forked from snap-stanford/GraphGym
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.py
477 lines (342 loc) · 14 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
import logging
import os
from yacs.config import CfgNode as CN
from graphgym.utils.io import makedirs_rm_exist
from graphgym.contrib.config import *
import graphgym.register as register
# Global config object
cfg = CN()
def set_cfg(cfg):
r'''
This function sets the default config value.
1) Note that for an experiment, only part of the arguments will be used
The remaining unused arguments won't affect anything.
So feel free to register any argument in graphgym.contrib.config
2) We support *at most* two levels of configs, e.g., cfg.dataset.name
:return: configuration use by the experiment.
'''
# ------------------------------------------------------------------------ #
# Basic options
# ------------------------------------------------------------------------ #
# Set print destination: stdout / file / both
cfg.print = 'both'
# Select device: 'cpu', 'cuda:0', 'auto'
cfg.device = 'auto'
# Output directory
cfg.out_dir = 'results'
# Config destination (in OUT_DIR)
cfg.cfg_dest = 'config.yaml'
# Random seed
cfg.seed = 1
# Print rounding
cfg.round = 4
# Tensorboard support for each run
cfg.tensorboard_each_run = False
# Tensorboard support for aggregated results
cfg.tensorboard_agg = True
# Additional num of worker for data loading
cfg.num_workers = 0
# Max threads used by PyTorch
cfg.num_threads = 6
# The metric for selecting the best epoch for each run
cfg.metric_best = 'auto'
# argmax or argmin in aggregating results
cfg.metric_agg = 'argmax'
# If visualize embedding.
cfg.view_emb = False
# If get GPU usage
cfg.gpu_mem = False
# ------------------------------------------------------------------------ #
# Globally shared variables:
# These variables will be set dynamically based on the input dataset
# Do not directly set them here or in .yaml files
# ------------------------------------------------------------------------ #
cfg.share = CN()
# Size of input dimension
cfg.share.dim_in = 1
# Size of out dimension, i.e., number of labels to be predicted
cfg.share.dim_out = 1
# Number of dataset splits: train/val/test
cfg.share.num_splits = 1
# ------------------------------------------------------------------------ #
# Dataset options
# ------------------------------------------------------------------------ #
cfg.dataset = CN()
# Name of the dataset
cfg.dataset.name = 'Cora'
# if PyG: look for it in Pytorch Geometric dataset
# if NetworkX/nx: load data in NetworkX format
cfg.dataset.format = 'PyG'
# Dir to load the dataset. If the dataset is downloaded, this is the
# cache dir
cfg.dataset.dir = './datasets'
# Task: node, edge, graph, link_pred
cfg.dataset.task = 'node'
# Type of task: classification, regression, classification_binary
# classification_multi
cfg.dataset.task_type = 'classification'
# Transductive / Inductive
# Graph classification is always inductive
cfg.dataset.transductive = True
# Split ratio of dataset. Len=2: Train, Val. Len=3: Train, Val, Test
cfg.dataset.split = [0.8, 0.1, 0.1]
# Whether to shuffle the graphs for splitting
cfg.dataset.shuffle_split = True
# Whether to use an encoder for the node features
cfg.dataset.node_encoder = False
# Name of node encoder
cfg.dataset.node_encoder_name = 'Atom'
# If add batchnorm after node encoder
cfg.dataset.node_encoder_bn = True
# Whether to use an encoder for the edge features
cfg.dataset.edge_encoder = False
# Name of edge encoder
cfg.dataset.edge_encoder_name = 'Bond'
# If add batchnorm after edge encoder
cfg.dataset.edge_encoder_bn = True
# Dimension of the encoded features.
# For now the node and edge encoding dimensions
# are the same.
cfg.dataset.encoder_dim = 128
# Dimension for edge feature. Updated by the real dim of the dataset
cfg.dataset.edge_dim = 128
# ============== Link/edge tasks only
# all or disjoint
cfg.dataset.edge_train_mode = 'all'
# Used in disjoint edge_train_mode. The proportion of edges used for
# message-passing
cfg.dataset.edge_message_ratio = 0.8
# The ratio of negative samples to positive samples
cfg.dataset.edge_negative_sampling_ratio = 1.0
# Whether resample disjoint when dataset.edge_train_mode is 'disjoint'
cfg.dataset.resample_disjoint = False
# Whether resample negative edges at training time (link prediction only)
cfg.dataset.resample_negative = False
# ==============
# feature augmentation
# naming is consistent with DeepSNAP graph:
# node_xxx is feature on nodes; edge_xxx is feature on edge; graph_xxx is
# feature on graph.
# a list of tuples (feature, feature_dim)
cfg.dataset.augment_feature = []
cfg.dataset.augment_feature_dims = []
# 'balanced', 'equal_width', 'bounded', 'original', 'position'
cfg.dataset.augment_feature_repr = 'original'
# If non-empty, this replaces the label with structural features
# For example, setting label = 'node_degree' causes the model to
# replace the node labels with node degrees (overwriting previous node
# labels)
# Note: currently only support 1 label
cfg.dataset.augment_label = ''
cfg.dataset.augment_label_dims = 0
# What transformation function is applied to the dataset
cfg.dataset.transform = 'none'
# Whether cache the splitted dataset
# NOTE: it should be cautiouslly used, as cached dataset may not have
# exactly the same setting as the config file
cfg.dataset.cache_save = False
cfg.dataset.cache_load = False
# Whether remove the original node features in the dataset
cfg.dataset.remove_feature = False
# Simplify TU dataset for synthetic tasks
cfg.dataset.tu_simple = True
# Convert to undirected graph (save 2*E edges)
cfg.dataset.to_undirected = False
# ------------------------------------------------------------------------ #
# Training options
# ------------------------------------------------------------------------ #
cfg.train = CN()
# Training (and validation) pipeline mode
cfg.train.mode = 'standard'
# Total graph mini-batch size
cfg.train.batch_size = 16
# Sampling strategy for a train loader
cfg.train.sampler = 'full_batch'
# Minibatch node
cfg.train.sample_node = False
# Num of sampled node per graph
cfg.train.node_per_graph = 32
# Radius: same, extend. same: same as cfg.gnn.layers_mp, extend: layers+1
cfg.train.radius = 'extend'
# Evaluate model on test data every eval period epochs
cfg.train.eval_period = 10
# Save model checkpoint every checkpoint period epochs
cfg.train.ckpt_period = 100
# Resume training from the latest checkpoint in the output directory
cfg.train.auto_resume = False
# The epoch to resume. -1 means resume the latest epoch.
cfg.train.epoch_resume = -1
# Clean checkpoint: only keep the last ckpt
cfg.train.ckpt_clean = True
# Number of iterations per epoch (for sampling based loaders only)
cfg.train.iter_per_epoch = 32
# GraphSAINTRandomWalkSampler: random walk length
cfg.train.walk_length = 4
# NeighborSampler: number of sampled nodes per layer
cfg.train.neighbor_sizes = [20, 15, 10, 5]
# ------------------------------------------------------------------------ #
# Validation options
# ------------------------------------------------------------------------ #
cfg.val = CN()
# Minibatch node
cfg.val.sample_node = False
# Sampling strategy for a val/test loader
cfg.val.sampler = 'full_batch'
# Num of sampled node per graph
cfg.val.node_per_graph = 32
# Radius: same, extend. same: same as cfg.gnn.layers_mp, extend: layers+1
cfg.val.radius = 'extend'
# ------------------------------------------------------------------------ #
# Model options
# ------------------------------------------------------------------------ #
cfg.model = CN()
# Model type to use
cfg.model.type = 'gnn'
# Auto match computational budget, match upper bound / lower bound
cfg.model.match_upper = True
# Loss function: cross_entropy, mse
cfg.model.loss_fun = 'cross_entropy'
# size average for loss function. 'mean' or 'sum'
cfg.model.size_average = 'mean'
# Threshold for binary classification
cfg.model.thresh = 0.5
# ============== Link/edge tasks only
# Edge decoding methods.
# - dot: compute dot(u, v) to predict link (binary)
# - cosine_similarity: use cosine similarity (u, v) to predict link (
# binary)
# - concat: use u||v followed by an nn.Linear to obtain edge embedding
# (multi-class)
cfg.model.edge_decoding = 'dot'
# ===================================
# ================== Graph tasks only
# Pooling methods.
# - add: global add pool
# - mean: global mean pool
# - max: global max pool
cfg.model.graph_pooling = 'add'
# ===================================
# ------------------------------------------------------------------------ #
# GNN options
# ------------------------------------------------------------------------ #
cfg.gnn = CN()
# Number of layers before message passing
cfg.gnn.layers_pre_mp = 0
# Number of layers for message passing
cfg.gnn.layers_mp = 2
# Number of layers after message passing
cfg.gnn.layers_post_mp = 0
# Hidden layer dim. Automatically set if train.auto_match = True
cfg.gnn.dim_inner = 16
# Type of graph conv: generalconv, gcnconv, sageconv, gatconv, ...
cfg.gnn.layer_type = 'generalconv'
# Stage type: 'stack', 'skipsum', 'skipconcat'
cfg.gnn.stage_type = 'stack'
# How many layers to skip each time
cfg.gnn.skip_every = 1
# Whether use batch norm
cfg.gnn.batchnorm = True
# Activation
cfg.gnn.act = 'relu'
# Dropout
cfg.gnn.dropout = 0.0
# Aggregation type: add, mean, max
# Note: only for certain layers that explicitly set aggregation type
# e.g., when cfg.gnn.layer_type = 'generalconv'
cfg.gnn.agg = 'add'
# Message passing flow: source_to_target or target_to_source
cfg.gnn.flow = 'source_to_target'
# Normalize adj
cfg.gnn.normalize_adj = False
# Message direction: single, both
cfg.gnn.msg_direction = 'single'
# Whether add message from node itself: none, add, cat
cfg.gnn.self_msg = 'concat'
# Number of attention heads
cfg.gnn.att_heads = 1
# After concat attention heads, add a linear layer
cfg.gnn.att_final_linear = False
# After concat attention heads, add a linear layer
cfg.gnn.att_final_linear_bn = False
# Normalize after message passing
cfg.gnn.l2norm = True
# randomly use fewer edges for message passing
cfg.gnn.keep_edge = 0.5
# ------------------------------------------------------------------------ #
# Optimizer options
# ------------------------------------------------------------------------ #
cfg.optim = CN()
# optimizer: sgd, adam
cfg.optim.optimizer = 'adam'
# Base learning rate
cfg.optim.base_lr = 0.01
# L2 regularization
cfg.optim.weight_decay = 5e-4
# SGD momentum
cfg.optim.momentum = 0.9
# scheduler: none, steps, cos
cfg.optim.scheduler = 'cos'
# Steps for 'steps' policy (in epochs)
cfg.optim.steps = [30, 60, 90]
# Learning rate multiplier for 'steps' policy
cfg.optim.lr_decay = 0.1
# Maximal number of epochs
cfg.optim.max_epoch = 200
# ------------------------------------------------------------------------ #
# Batch norm options
# ------------------------------------------------------------------------ #
cfg.bn = CN()
# BN epsilon
cfg.bn.eps = 1e-5
# BN momentum (BN momentum in PyTorch = 1 - BN momentum in Caffe2)
cfg.bn.mom = 0.1
# ------------------------------------------------------------------------ #
# Memory options
# ------------------------------------------------------------------------ #
cfg.mem = CN()
# Perform ReLU inplace
cfg.mem.inplace = False
#### Set user customized cfgs
for func in register.config_dict.values():
func(cfg)
def assert_cfg(cfg):
"""Checks config values invariants."""
if cfg.dataset.task not in ['node', 'edge', 'graph', 'link_pred']:
raise ValueError('Task {} not supported, must be one of'
'node, edge, graph, link_pred'.format(
cfg.dataset.task))
if 'classification' in cfg.dataset.task_type and cfg.model.loss_fun == \
'mse':
cfg.model.loss_fun = 'cross_entropy'
logging.warning(
'model.loss_fun changed to cross_entropy for classification.')
if cfg.dataset.task_type == 'regression' and cfg.model.loss_fun == \
'cross_entropy':
cfg.model.loss_fun = 'mse'
logging.warning('model.loss_fun changed to mse for regression.')
if cfg.dataset.task == 'graph' and cfg.dataset.transductive:
cfg.dataset.transductive = False
logging.warning('dataset.transductive changed to False for graph task.')
if cfg.gnn.layers_post_mp < 1:
cfg.gnn.layers_post_mp = 1
logging.warning('Layers after message passing should be >=1')
def dump_cfg(cfg):
"""Dumps the config to the output directory."""
cfg_file = os.path.join(cfg.out_dir, cfg.cfg_dest)
with open(cfg_file, 'w') as f:
cfg.dump(stream=f)
def update_out_dir(out_dir, fname):
fname = fname.split('/')[-1][:-5]
cfg.out_dir = os.path.join(out_dir, fname, str(cfg.seed))
# Make output directory
if cfg.train.auto_resume:
os.makedirs(cfg.out_dir, exist_ok=True)
else:
makedirs_rm_exist(cfg.out_dir)
def get_parent_dir(out_dir, fname):
fname = fname.split('/')[-1][:-5]
return os.path.join(out_dir, fname)
def rm_parent_dir(out_dir, fname):
fname = fname.split('/')[-1][:-5]
makedirs_rm_exist(os.path.join(out_dir, fname))
set_cfg(cfg)