def build_default_args_for_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.001, "weight_decay": 0, "max_epoch": 1000, "max_epochs": 1000, "patience": 20, "cpu": cpu, "device_id": [0], "seed": [0], "num_shuffle": 5, "drop_edge_rates": [0.1, 0.2], "drop_feature_rates": [0.2, 0.3], "hidden_size": 128, "num_layers": 2, "proj_hidden_size": 128, "tau": 0.4, "activation": "relu", "sampler": "none", "task": "unsupervised_node_classification", "model": "grace", "dataset": dataset, "save_dir": "./saved", "enhance": None, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_graph_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.001, "weight_decay": 5e-4, "max_epoch": 500, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [0], "hidden_size": 64, "degree_feature": False, "gamma": 0.5, "kfold": False, "uniform_feature": False, "train_ratio": 0.7, "test_ratio": 0.1, "num_layers": 3, "dropout": 0.5, "batch_size": 128, "kernel_size": 5, "k": 30, "out_channels": 32, "task": "graph_classification", "model": "sortpool", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.01, "weight_decay": 5e-4, "max_epoch": 1000, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [100], "input_dropout": 0.5, "hidden_dropout": 0.5, "hidden_size": 32, "dropnode_rate": 0.5, "order": 5, "tem": 0.5, "lam": 0.5, "sample": 10, "alpha": 0.2, "bn": False, "task": "node_classification", "model": "grand", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.01, "weight_decay": 0.0005, "max_epoch": 1000, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [1], "n_dropout": 0.90, "adj_dropout": 0.05, "hidden_size": 128, "aug_adj": False, "improved": False, "n_pool": 4, "pool_rate": [0.7, 0.5, 0.5, 0.4], "activation": "relu", "task": "node_classification", "model": "unet", "dataset": dataset, "missing_rate": -1, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.005, "weight_decay": 5e-4, "max_epoch": 1300, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [ 0, ], "dropout": 0.5, "hidden_size": 8, "attention_type": "node", "normalization": "row_uniform", "num_heads": 8, "nhtop": 1, "node_dropout": 0.5, "subheads": 1, "activation": "leaky_relu", "alpha": 0.2, "task": "node_classification", "model": "gcn", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_unsupervisde_graph_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.0001, "weight_decay": 5e-4, "max_epoch": 500, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [0], "num_shuffle": 10, "epoch": 15, "train_num": 5000, "unsup": True, "hidden_size": 512, "degree_feature": False, "kfold": False, "train_ratio": 0.7, "test_ratio": 0.1, "num_layers": 1, "dropout": 0.5, "batch_size": 20, "target": 0, "task": "unsupervised_graph_classification", "model": "infograph", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.001, "weight_decay": 5e-4, "max_epoch": 500, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [0, 1, 2], "dropout": 0.1, "hidden_size": 64, "alpha": 0.5, "num_layers": 2, "activation": "relu", "nprop_inference": 2, "norm": "sym", "eps": 1e-4, "k": 32, "eval_step": 5, "batch_size": 1024, "test_batch_size": 10240, "task": "node_classification", "model": "pprgo", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_graph_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.001, "weight_decay": 5e-4, "max_epoch": 500, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [0], "hidden_size": 32, "degree_feature": False, "gamma": 0.5, "kfold": False, "uniform_feature": False, "train_ratio": 0.7, "test_ratio": 0.1, "dropout": 0.5, "batch_size": 20, "sample": 30, "stride": 1, "neighbor": 10, "iteration": 5, "task": "graph_classification", "model": "patchy_san", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.00005, "weight_decay": 5e-4, "max_epoch": 500, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [0, 1, 2], "num_features": 1, "hidden_size": 2048, "out_channels": 1, "num_propagations": 3, "num_layers": 3, "dropout": 0.5, "directed": False, "dropedge_rate": 0.2, "asymm_norm": False, "set_diag": False, "remove_diag": False, "task": "node_classification", "model": "sign", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_graph_classification(dataset): cpu = not torch.cuda.is_available() args = { "hidden_size": 128, "dropout": 0.0, "pooling": 0.5, "batch_size": 64, "train_ratio": 0.8, "test_ratio": 0.1, "lr": 0.001, "weight_decay": 0.001, "patience": 100, "max_epoch": 500, "sample_neighbor": True, "sparse_attention": True, "structure_learning": True, "cpu": cpu, "device_id": [0], "seed": [777], "task": "graph_classification", "model": "hgpsl", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def get_default_args(): cuda_available = torch.cuda.is_available() default_dict = { "hidden_size": 16, "dropout": 0.5, "patience": 100, "max_epoch": 500, "cpu": not cuda_available, "lr": 0.01, "device_id": [0], "weight_decay": 5e-4, "missing_rate": -1, } default_dict = get_extra_args(default_dict) return build_args_from_dict(default_dict)
def build_default_args_for_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.01, "weight_decay": 5e-4, "max_epoch": 500, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [42], "task": "node_classification", "model": "dropedge_gcn", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_multiplex_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "hidden_size": 64, "cpu": cpu, "device_id": [0], "enhance": None, "save_dir": ".", "seed": [0, 1, 2], "epoch": 0, "load_path": "./saved/gcc_pretrained.pth", "task": "multiplex_node_classification", "model": "gcc", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_heterogeneous_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "hidden_size": 128, "patience": 100, "max_epoch": 500, "cpu": cpu, "device_id": [0], "lr": 0.005, "weight_decay": 0.001, "seed": [0, 1, 2], "num_layers": 2, "task": "heterogeneous_node_classification", "model": "han", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.01, "weight_decay": 5e-4, "max_epoch": 1000, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [0, 1, 2], "dropout": 0.7, "layer1_pows": [200, 200, 200], "layer2_pows": [20, 20, 20], "task": "node_classification", "model": "mixhop", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def get_default_args(): cuda_available = torch.cuda.is_available() default_dict = { "hidden_size": 16, "dropout": 0.5, "patience": 100, "max_epoch": 500, "cpu": not cuda_available, "lr": 0.01, "device_id": [0], "weight_decay": 5e-4, "alpha": 0.05, "t": 5.0, "k": 128, "eps": 0.01, "gdc_type": "ppr", } default_dict = get_extra_args(default_dict) return build_args_from_dict(default_dict)
def build_default_args_for_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.01, "weight_decay": 5e-4, "max_epoch": 500, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [0, 1, 2], "dropout": 0.5, "hidden_size": 64, "num_layers": 2, "filter_size": 5, "task": "node_classification", "model": "chebyshev", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.01, "weight_decay": 5e-4, "max_epoch": 500, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [0, 1, 2], "dropout": 0.5, "hidden_size": 64, "propagation_type": "appnp", # can also be 'ppnp' "alpha": 0.1, "num_iterations": 10, "task": "node_classification", "model": "ppnp", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.005, "weight_decay": 5e-4, "max_epoch": 1000, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [ 72, ], "dropout": 0.6, "hidden_size": 8, "alpha": 0.2, "nheads": 8, "task": "node_classification", "model": "gat", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_node_classification(dataset, missing_rate=0, num_layers=40): cpu = not torch.cuda.is_available() args = { "lr": 0.005, "weight_decay": 5e-4, "max_epoch": 1000, "patience": 1000, "cpu": cpu, "device_id": [0], "seed": [0, 1, 2, 3, 4], "missing_rate": missing_rate, "norm_mode": "PN", "norm_scale": 10, "dropout": 0.6, "num_layers": num_layers, "task": "node_classification", "model": "sgcpn", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)
def build_default_args_for_node_classification(dataset): cpu = not torch.cuda.is_available() args = { "lr": 0.01, "weight_decay": 5e-4, "max_epoch": 1000, "max_epochs": 1000, "patience": 100, "cpu": cpu, "device_id": [0], "seed": [42], "dropout": 0.5, "hidden_size": 256, "num_layers": 32, "lmbda": 0.5, "wd1": 0.001, "wd2": 5e-4, "alpha": 0.1, "task": "node_classification", "model": "gcnii", "dataset": dataset, } args = get_extra_args(args) return build_args_from_dict(args)