def main(args): if args.gpu < 0: cuda = False else: cuda = True torch.cuda.set_device(args.gpu) default_path = create_default_path() print('\n*** Set default saving/loading path to:', default_path) if args.dataset == AIFB or args.dataset == MUTAG: module = importlib.import_module(MODULE.format('dglrgcn')) data = module.load_dglrgcn(args.data_path) data = to_cuda(data) if cuda else data mode = NODE_CLASSIFICATION elif args.dataset == MUTAGENICITY or args.dataset == PTC_MR or args.dataset == PTC_MM or args.dataset == PTC_FR or args.dataset == PTC_FM: module = importlib.import_module(MODULE.format('dortmund')) data = module.load_dortmund(args.data_path) data = to_cuda(data) if cuda else data mode = GRAPH_CLASSIFICATION else: raise ValueError('Unable to load dataset', args.dataset) print_graph_stats(data[GRAPH]) config_params = read_params(args.config_fpath, verbose=True) # create GNN model model = Model(g=data[GRAPH], config_params=config_params[0], n_classes=data[N_CLASSES], n_rels=data[N_RELS] if N_RELS in data else None, n_entities=data[N_ENTITIES] if N_ENTITIES in data else None, is_cuda=cuda, mode=mode) if cuda: model.cuda() # 1. Training app = App() learning_config = { 'lr': args.lr, 'n_epochs': args.n_epochs, 'weight_decay': args.weight_decay, 'batch_size': args.batch_size, 'cuda': cuda } print('\n*** Start training ***\n') app.train(data, config_params[0], learning_config, default_path, mode=mode) # 2. Testing print('\n*** Start testing ***\n') app.test(data, default_path, mode=mode) # 3. Delete model remove_model(default_path)