def load_base_model(label_map, test_glist = None): assert cmd_args.base_model_dump is not None with open('%s-args.pkl' % cmd_args.base_model_dump, 'rb') as f: base_args = cp.load(f) classifier = GraphClassifier(label_map, **vars(base_args)) if cmd_args.ctx == 'gpu': classifier = classifier.cuda() classifier.load_state_dict(torch.load(cmd_args.base_model_dump + '.model')) if test_glist is not None: test_graphs(classifier, test_glist) return classifier
# generate graphs output = Generate_dataset(GLOBAL_NUM_GRAPHS) g_list, test_glist = load_graphs(output, GLOBAL_NUM_GRAPHS) #base_classifier = load_base_model(label_map, g_list) base_args = { 'gm': 'mean_field', 'feat_dim': 2, 'latent_dim': 10, 'out_dim': 20, 'max_lv': 2, 'hidden': 32 } base_classifier = GraphClassifier(num_classes=20, **base_args) env = GraphEdgeEnv(base_classifier) print("len g_list:", len(g_list)) if cmd_args.frac_meta > 0: num_train = int(len(g_list) * (1 - cmd_args.frac_meta)) agent = Agent(g_list, test_glist[num_train:], env) else: agent = Agent(g_list, None, env) if GLOBAL_PHASE == 'train': print("\n\nStarting Training Loop\n\n")
random.seed(cmd_args.seed) np.random.seed(cmd_args.seed) torch.manual_seed(cmd_args.seed) # load the data # label_map maps number of connected components to label i.e. {1: 0, 2: 1, 3: 2} # train_glist and test_glist are S2VGraph from the following link # https://github.com/Hanjun-Dai/pytorch_structure2vec/blob/bcf20c90f21e468f862f13e2f5809a52cd247d4e/graph_classification/util.py label_map, train_glist, test_glist = load_er_data() # load model if specified else create a new one if cmd_args.saved_model is not None and cmd_args.saved_model != '': print('loading model from %s' % cmd_args.saved_model) with open('%s-args.pkl' % cmd_args.saved_model, 'rb') as f: base_args = cp.load(f) classifier = GraphClassifier(label_map, **vars(base_args)) classifier.load_state_dict(torch.load(cmd_args.saved_model + '.model')) else: classifier = GraphClassifier(label_map, **vars(cmd_args)) # move classifier to gpu if available if cmd_args.ctx == 'gpu': classifier = classifier.cuda() # if phase is test look at the test accuracy and loss if cmd_args.phase == 'test': test_loss = loop_dataset(test_glist, classifier, list(range(len(test_glist)))) print('\033[93maverage test: loss %.5f acc %.5f\033[0m' % (test_loss[0], test_loss[1]))
from data_util import load_pkl from graph_common import loop_dataset, load_er_data if __name__ == '__main__': random.seed(cmd_args.seed) np.random.seed(cmd_args.seed) torch.manual_seed(cmd_args.seed) label_map, train_glist, test_glist = load_er_data() if cmd_args.saved_model is not None and cmd_args.saved_model != '': print('loading model from %s' % cmd_args.saved_model) with open('%s-args.pkl' % cmd_args.saved_model, 'rb') as f: base_args = cp.load(f) classifier = GraphClassifier(label_map, **vars(base_args)) classifier.load_state_dict(torch.load(cmd_args.saved_model + '.model')) else: classifier = GraphClassifier(label_map, **vars(cmd_args)) if cmd_args.ctx == 'gpu': classifier = classifier.cuda() if cmd_args.phase == 'test': test_loss = loop_dataset(test_glist, classifier, list(range(len(test_glist))), epoch=101) print('\033[93maverage test: loss %.5f acc %.5f\033[0m' % (test_loss[0], test_loss[1])) if cmd_args.phase == 'train': optimizer = optim.Adam(classifier.parameters(), lr=cmd_args.learning_rate) train_idxes = list(range(len(train_glist))) best_loss = None