def inference(MODEL_NAME, dataset, params, net_params, model_path): if MODEL_NAME in ['GCN', 'GAT']: if net_params['self_loop']: print("[!] Adding graph self-loops for GCN/GAT models (central node trick).") dataset._add_self_loops() trainset, valset, testset = dataset.train, dataset.val, dataset.test device = net_params['device'] model = gnn_model(MODEL_NAME, net_params) model = model.to(device) model.load_state_dict(torch.load(model_path)) # batching exception for Diffpool drop_last = True if MODEL_NAME == 'DiffPool' else False train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate) val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate) test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate) _, train_acc = evaluate_network(model, device, train_loader) _, val_acc = evaluate_network(model, device, val_loader) _, test_acc = evaluate_network(model, device, test_loader) print("Train ACC: {:.4f}".format(train_acc)) print("Val ACC: {:.4f}".format(val_acc)) print("Test ACC: {:.4f}".format(test_acc))
def test_pipeline(MODEL_NAME, dataset, device, verbose, out_dir): # Load models print('\n>> Loading models...') model_ls = load_model(out_dir, device=device, only_best=False, verbose=verbose, filter=lambda df: df[df['model'] == MODEL_NAME][df['dataset'] == dataset.name]) # Preparing dataset print('\n>> Preparing data...') if MODEL_NAME in ['GCN']: if model_ls[0]['net_params']['self_loop']: print("[!] Adding graph self-loops for GCN/GAT models (central node trick).") dataset._add_self_loops() if MODEL_NAME in ['SoGCN']: if model_ls[0]['net_params']['undirected']: print("[!] Converting directed graphs to undirected graphs for SoGCN model.") dataset._to_undirected() testset = dataset.test print("Test Graphs: ", len(testset)) # Batching test data test_loader = DataLoader(testset, batch_size=model_ls[0]['net_params']['batch_size'], shuffle=False, drop_last=False, collate_fn=dataset.collate) # Test models print('\n>> Testing models...') acc_ls = [] for i, item in enumerate(model_ls): model = item['model'] net_params = item['net_params'] # Set random seed set_random_seed(item['seed'], device) # Evaluate model _, test_acc = evaluate_network(model, device, test_loader, 0) acc_ls.append(test_acc) if verbose: print('\nModel #%s' % i) print('Test Accuracy: %s' % acc_ls[-1]) print('\n') print('AVG Test Accuracy: %s, s.d.: %s' % (np.mean(acc_ls), np.std(acc_ls)))
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs): t0 = time.time() per_epoch_time = [] DATASET_NAME = dataset.name if MODEL_NAME in ['GCN', 'GAT']: if net_params['self_loop']: print( "[!] Adding graph self-loops for GCN/GAT models (central node trick)." ) dataset._add_self_loops() trainset, valset, testset = dataset.train, dataset.val, dataset.test root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs device = net_params['device'] # Write the network and optimization hyper-parameters in folder config/ with open(write_config_file + '.txt', 'w') as f: f.write( """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param'])) log_dir = os.path.join(root_log_dir, "RUN_" + str(0)) writer = SummaryWriter(log_dir=log_dir) # setting seeds random.seed(params['seed']) np.random.seed(params['seed']) torch.manual_seed(params['seed']) if device == 'cuda': torch.cuda.manual_seed(params['seed']) print("Training Graphs: ", len(trainset)) print("Validation Graphs: ", len(valset)) print("Test Graphs: ", len(testset)) print("Number of Classes: ", net_params['n_classes']) model = gnn_model(MODEL_NAME, net_params) model = model.to(device) optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay']) scheduler = optim.lr_scheduler.ReduceLROnPlateau( optimizer, mode='min', factor=params['lr_reduce_factor'], patience=params['lr_schedule_patience'], verbose=True) epoch_train_losses, epoch_val_losses = [], [] epoch_train_accs, epoch_val_accs = [], [] # batching exception for Diffpool drop_last = True if MODEL_NAME == 'DiffPool' else False train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate) val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate) test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate) # At any point you can hit Ctrl + C to break out of training early. try: with tqdm(range(params['epochs'])) as t: for epoch in t: t.set_description('Epoch %d' % epoch) start = time.time() epoch_train_loss, epoch_train_acc, optimizer = train_epoch( model, optimizer, device, train_loader, epoch) epoch_val_loss, epoch_val_acc = evaluate_network( model, device, val_loader, epoch) epoch_train_losses.append(epoch_train_loss) epoch_val_losses.append(epoch_val_loss) epoch_train_accs.append(epoch_train_acc) epoch_val_accs.append(epoch_val_acc) writer.add_scalar('train/_loss', epoch_train_loss, epoch) writer.add_scalar('val/_loss', epoch_val_loss, epoch) writer.add_scalar('train/_acc', epoch_train_acc, epoch) writer.add_scalar('val/_acc', epoch_val_acc, epoch) writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch) _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch) t.set_postfix(time=time.time() - start, lr=optimizer.param_groups[0]['lr'], train_loss=epoch_train_loss, val_loss=epoch_val_loss, train_acc=epoch_train_acc, val_acc=epoch_val_acc, test_acc=epoch_test_acc) per_epoch_time.append(time.time() - start) # Saving checkpoint ckpt_dir = os.path.join(root_ckpt_dir, "RUN_") if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch))) files = glob.glob(ckpt_dir + '/*.pkl') for file in files: epoch_nb = file.split('_')[-1] epoch_nb = int(epoch_nb.split('.')[0]) if epoch_nb < epoch - 1: os.remove(file) scheduler.step(epoch_val_loss) if optimizer.param_groups[0]['lr'] < params['min_lr']: print("\n!! LR EQUAL TO MIN LR SET.") break # Stop training after params['max_time'] hours if time.time() - t0 > params['max_time'] * 3600: print('-' * 89) print( "Max_time for training elapsed {:.2f} hours, so stopping" .format(params['max_time'])) break except KeyboardInterrupt: print('-' * 89) print('Exiting from training early because of KeyboardInterrupt') _, test_acc = evaluate_network(model, device, test_loader, epoch) _, train_acc = evaluate_network(model, device, train_loader, epoch) print("Test Accuracy: {:.4f}".format(test_acc)) print("Train Accuracy: {:.4f}".format(train_acc)) print("TOTAL TIME TAKEN: {:.4f}s".format(time.time() - t0)) print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time))) writer.close() """ Write the results in out_dir/results folder """ with open(write_file_name + '.txt', 'w') as f: f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\ .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'], np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, (time.time()-t0)/3600, np.mean(per_epoch_time))) # send results to gmail try: from gmail import send subject = 'Result for Dataset: {}, Model: {}'.format( DATASET_NAME, MODEL_NAME) body = """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\ .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'], np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, (time.time()-t0)/3600, np.mean(per_epoch_time)) send(subject, body) except: pass
def train_val_pipeline(dataset, params, net_params): t0 = time.time() per_epoch_time = [] trainset, valset, testset = dataset.train, dataset.val, dataset.test device = net_params['device'] # setting seeds random.seed(params['seed']) np.random.seed(params['seed']) torch.manual_seed(params['seed']) if device == 'cuda': torch.cuda.manual_seed(params['seed']) print("Training Graphs: ", len(trainset)) print("Validation Graphs: ", len(valset)) print("Test Graphs: ", len(testset)) model = DGNNet(net_params) model = model.to(device) optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay']) scheduler = optim.lr_scheduler.ReduceLROnPlateau( optimizer, mode='min', factor=params['lr_reduce_factor'], patience=params['lr_schedule_patience']) start_epoch = 0 epoch_train_losses, epoch_val_losses = [], [] epoch_train_accs, epoch_val_accs = [], [] train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate) val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate) test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate) # At any point you can hit Ctrl + C to break out of training early. try: with tqdm(range(start_epoch, params['epochs']), mininterval=params['print_epoch_interval'], maxinterval=None, unit='epoch', initial=start_epoch, total=params['epochs']) as t: for epoch in t: t.set_description('Epoch %d' % epoch) start = time.time() epoch_train_loss, epoch_train_acc, optimizer = train_epoch( model, optimizer, device, train_loader, epoch, net_params['augmentation'], net_params['flip'], net_params['distortion']) epoch_val_loss, epoch_val_acc = evaluate_network( model, device, val_loader, epoch) epoch_train_losses.append(epoch_train_loss) epoch_val_losses.append(epoch_val_loss) epoch_train_accs.append(epoch_train_acc) epoch_val_accs.append(epoch_val_acc) _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch) t.set_postfix(time=time.time() - start, lr=optimizer.param_groups[0]['lr'], train_loss=epoch_train_loss, val_loss=epoch_val_loss, train_acc=epoch_train_acc, val_acc=epoch_val_acc, test_acc=epoch_test_acc) per_epoch_time.append(time.time() - start) scheduler.step(epoch_val_loss) if optimizer.param_groups[0]['lr'] < params['min_lr']: print("\n!! LR EQUAL TO MIN LR SET.") break # Stop training after params['max_time'] hours if time.time() - t0 > params['max_time'] * 3600: print('-' * 89) print( "Max_time for training elapsed {:.2f} hours, so stopping" .format(params['max_time'])) break except KeyboardInterrupt: print('-' * 89) print('Exiting from training early because of KeyboardInterrupt') _, test_acc = evaluate_network(model, device, test_loader, epoch) _, val_acc = evaluate_network(model, device, val_loader, epoch) _, train_acc = evaluate_network(model, device, train_loader, epoch) print("Test Accuracy: {:.4f}".format(test_acc)) print("Val Accuracy: {:.4f}".format(val_acc)) print("Train Accuracy: {:.4f}".format(train_acc)) print("TOTAL TIME TAKEN: {:.4f}s".format(time.time() - t0)) print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, args): load_model = args.load_model aug_type_list = [ 'drop_nodes', 'drop_add_edges', 'noise', 'mask', 'subgraph', 'new', 'random', 'random2' ] DATASET_NAME = dataset.name if MODEL_NAME in ['GCN', 'GAT']: if net_params['self_loop']: print( "[!] Adding graph self-loops for GCN/GAT models (central node trick)." ) dataset._add_self_loops() trainset, valset, testset = dataset.train, dataset.val, dataset.test device = net_params['device'] # setting seeds random.seed(params['seed']) np.random.seed(params['seed']) torch.manual_seed(params['seed']) if device == 'cuda': torch.cuda.manual_seed(params['seed']) print('-' * 40 + "Finetune Option" + '-' * 40) print("Data Name: [{}]".format(DATASET_NAME)) print("Model Name: [{}]".format(MODEL_NAME)) print("Training Graphs:[{}]".format(len(trainset))) print("Valid Graphs: [{}]".format(len(valset))) print("Test Graphs: [{}]".format(len(testset))) print("Number Classes: [{}]".format(net_params['n_classes'])) print("Learning rate: [{}]".format(params['init_lr'])) print('-' * 40 + "Contrastive Option" + '-' * 40) print("Load model: [{}]".format(load_model)) print("Aug Type: [{}]".format(aug_type_list[args.aug])) print("Projection head:[{}]".format(args.head)) print('-' * 100) model = gnn_model(MODEL_NAME, net_params) if load_model: output_path = './001_contrastive_models' # output_path = './001_mask_models_03' save_model_dir0 = os.path.join(output_path, DATASET_NAME) save_model_dir1 = os.path.join(save_model_dir0, aug_type_list[args.aug]) if args.head: save_model_dir1 += "_head" else: save_model_dir1 += "_no_head" save_model_dir2 = os.path.join(save_model_dir1, MODEL_NAME) load_file_name = glob.glob(save_model_dir2 + '/*.pkl') checkpoint = torch.load(load_file_name[-1]) model_dict = model.state_dict() state_dict = { k: v for k, v in checkpoint.items() if k in model_dict.keys() } model.load_state_dict(state_dict) print('Success load pre-trained model!: [{}]'.format( load_file_name[-1])) else: print('No model load!: Test baseline! ') model = model.to(device) optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay']) scheduler = optim.lr_scheduler.ReduceLROnPlateau( optimizer, mode='min', factor=params['lr_reduce_factor'], patience=params['lr_schedule_patience'], verbose=True) # batching exception for Diffpool drop_last = True if MODEL_NAME == 'DiffPool' else False train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate) val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate) test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate) for epoch in range(params['epochs']): epoch_train_loss, epoch_train_acc, optimizer = train_epoch( model, optimizer, device, train_loader, epoch) epoch_val_loss, epoch_val_acc = evaluate_network( model, device, val_loader, epoch) _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch) print('-' * 80) print("Epoch [{}] Test Acc: [{:.4f}]".format(epoch + 1, epoch_test_acc)) print('-' * 80) scheduler.step(epoch_val_loss) if optimizer.param_groups[0]['lr'] < params['min_lr']: print("\n!! LR EQUAL TO MIN LR SET.") break _, test_acc = evaluate_network(model, device, test_loader, epoch) _, train_acc = evaluate_network(model, device, train_loader, epoch) print("Test Accuracy: {:.4f}".format(test_acc)) print("Train Accuracy: {:.4f}".format(train_acc))
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs): t0 = time.time() per_epoch_time = [] DATASET_NAME = dataset.name trainset, valset, testset = dataset.train, dataset.val, dataset.test root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs device = net_params['device'] # Write the network and optimization hyper-parameters in folder config/ with open(write_config_file + '.txt', 'w') as f: f.write( """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param'])) log_dir = os.path.join(root_log_dir, "RUN_" + str(0)) writer = SummaryWriter(log_dir=log_dir) # setting seeds random.seed(params['seed']) np.random.seed(params['seed']) torch.manual_seed(params['seed']) if device == 'cuda': torch.cuda.manual_seed(params['seed']) if hydra.is_first_execution(): print("Training Graphs: ", len(trainset)) print("Validation Graphs: ", len(valset)) print("Test Graphs: ", len(testset)) model = EIGNet(net_params) model = model.to(device) optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay']) scheduler = optim.lr_scheduler.ReduceLROnPlateau( optimizer, mode='min', factor=params['lr_reduce_factor'], patience=params['lr_schedule_patience'], verbose=True) if hydra.is_first_execution(): start_epoch = 0 else: print('not the first exec.') print(t0, 'old') t0 -= hydra.retrieved_checkpoint.time_elapsed print(t0, 'new') start_epoch = hydra.retrieved_checkpoint.last_epoch states = torch.load(hydra.retrieved_checkpoint.linked_files()[0]) model.load_state_dict(states['model']) optimizer.load_state_dict(states['optimizer']) scheduler.load_state_dict(states['scheduler']) epoch_train_losses, epoch_val_losses = [], [] epoch_train_accs, epoch_val_accs = [], [] train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate) val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate) test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate) last_hydra_checkpoint = t0 # At any point you can hit Ctrl + C to break out of training early. try: with tqdm(range(start_epoch, params['epochs']), mininterval=params['hydra_progress_bar_every'], maxinterval=None, unit='epoch', initial=start_epoch, total=params['epochs']) as t: for epoch in t: t.set_description('Epoch %d' % epoch) start = time.time() epoch_train_loss, epoch_train_acc, optimizer = train_epoch( model, optimizer, device, train_loader, epoch, net_params['augmentation'], net_params['flip']) epoch_val_loss, epoch_val_acc = evaluate_network( model, device, val_loader, epoch) epoch_train_losses.append(epoch_train_loss) epoch_val_losses.append(epoch_val_loss) epoch_train_accs.append(epoch_train_acc) epoch_val_accs.append(epoch_val_acc) writer.add_scalar('train/_loss', epoch_train_loss, epoch) writer.add_scalar('val/_loss', epoch_val_loss, epoch) writer.add_scalar('train/_acc', epoch_train_acc, epoch) writer.add_scalar('val/_acc', epoch_val_acc, epoch) writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch) _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch) t.set_postfix(time=time.time() - start, lr=optimizer.param_groups[0]['lr'], train_loss=epoch_train_loss, val_loss=epoch_val_loss, train_acc=epoch_train_acc, val_acc=epoch_val_acc, test_acc=epoch_test_acc) per_epoch_time.append(time.time() - start) scheduler.step(epoch_val_loss) if optimizer.param_groups[0]['lr'] < params['min_lr']: print("\n!! LR EQUAL TO MIN LR SET.") break # Stop training after params['max_time'] hours if time.time() - t0 > params['max_time'] * 3600: print('-' * 89) print( "Max_time for training elapsed {:.2f} hours, so stopping" .format(params['max_time'])) break # Saving checkpoint if hydra.is_available() and (time.time( ) - last_hydra_checkpoint) > params['hydra_checkpoint_every']: last_hydra_checkpoint = time.time() ck_path = '/tmp/epoch_{}.pkl'.format(epoch + 1) torch.save( { 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict() }, ck_path) ck = hydra.checkpoint() ck.last_epoch = epoch + 1 ck.time_elapsed = time.time() - t0 # save best epoch ck.link_file(ck_path) ck.save_to_server() if hydra.is_available( ) and epoch % params['hydra_eta_every'] == 0: hydra.set_eta(per_epoch_time[-1] * (params['epochs'] - epoch - 1)) except KeyboardInterrupt: print('-' * 89) print('Exiting from training early because of KeyboardInterrupt') _, test_acc = evaluate_network(model, device, test_loader, epoch) _, val_acc = evaluate_network(model, device, val_loader, epoch) _, train_acc = evaluate_network(model, device, train_loader, epoch) print("Test Accuracy: {:.4f}".format(test_acc)) print("Val Accuracy: {:.4f}".format(val_acc)) print("Train Accuracy: {:.4f}".format(train_acc)) print("TOTAL TIME TAKEN: {:.4f}s".format(time.time() - t0)) print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time))) writer.close() if hydra.is_available(): hydra.save_output( { 'loss': { 'train': epoch_train_losses, 'val': epoch_val_losses }, 'acc': { 'train': epoch_train_accs, 'val': epoch_val_accs } }, 'history') hydra.save_output( { 'test_acc': test_acc, 'train_acc': train_acc, 'val_acc': val_acc, 'total_time': time.time() - t0, 'avg_epoch_time': np.mean(per_epoch_time) }, 'summary') """ Write the results in out_dir/results folder """ with open(write_file_name + '.txt', 'w') as f: f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n""" \ .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'], np.mean(np.array(test_acc)) * 100, np.mean(np.array(train_acc)) * 100, (time.time() - t0) / 3600, np.mean(per_epoch_time)))