def inference(MODEL_NAME, dataset, params, net_params, model_path): if MODEL_NAME in ['GCN', 'GAT']: if net_params['self_loop']: print("[!] Adding graph self-loops for GCN/GAT models (central node trick).") dataset._add_self_loops() trainset, valset, testset = dataset.train, dataset.val, dataset.test device = net_params['device'] model = gnn_model(MODEL_NAME, net_params) model = model.to(device) model.load_state_dict(torch.load(model_path)) # batching exception for Diffpool drop_last = True if MODEL_NAME == 'DiffPool' else False train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate) val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate) test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate) _, train_acc = evaluate_network(model, device, train_loader) _, val_acc = evaluate_network(model, device, val_loader) _, test_acc = evaluate_network(model, device, test_loader) print("Train ACC: {:.4f}".format(train_acc)) print("Val ACC: {:.4f}".format(val_acc)) print("Test ACC: {:.4f}".format(test_acc))
def view_model_param(MODEL_NAME, net_params): model = gnn_model(MODEL_NAME, net_params) total_param = 0 print("MODEL DETAILS:\n") #print(model) for param in model.parameters(): # print(param.data.size()) total_param += np.prod(list(param.data.size())) print('MODEL/Total parameters:', MODEL_NAME, total_param) return total_param
def view_model_param(MODEL_NAME, net_params, verbose=False): model = gnn_model(MODEL_NAME, net_params) total_param = 0 print("MODEL DETAILS:\n") #print(model) for param in model.parameters(): # print(param.data.size()) total_param += np.prod(list(param.data.size())) print('MODEL/Total parameters:', MODEL_NAME, total_param) if verbose: print('\n== Net Params:') print(net_params) print('\n== Model Structure:') print(model) return total_param
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs): t0 = time.time() per_epoch_time = [] DATASET_NAME = dataset.name if MODEL_NAME in ['GCN', 'GAT']: if net_params['self_loop']: print( "[!] Adding graph self-loops for GCN/GAT models (central node trick)." ) dataset._add_self_loops() trainset, valset, testset = dataset.train, dataset.val, dataset.test root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs device = net_params['device'] # Write the network and optimization hyper-parameters in folder config/ with open(write_config_file + '.txt', 'w') as f: f.write( """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param'])) log_dir = os.path.join(root_log_dir, "RUN_" + str(0)) writer = SummaryWriter(log_dir=log_dir) # setting seeds random.seed(params['seed']) np.random.seed(params['seed']) torch.manual_seed(params['seed']) if device == 'cuda': torch.cuda.manual_seed(params['seed']) print("Training Graphs: ", len(trainset)) print("Validation Graphs: ", len(valset)) print("Test Graphs: ", len(testset)) print("Number of Classes: ", net_params['n_classes']) model = gnn_model(MODEL_NAME, net_params) model = model.to(device) optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay']) scheduler = optim.lr_scheduler.ReduceLROnPlateau( optimizer, mode='min', factor=params['lr_reduce_factor'], patience=params['lr_schedule_patience'], verbose=True) epoch_train_losses, epoch_val_losses = [], [] epoch_train_accs, epoch_val_accs = [], [] # batching exception for Diffpool drop_last = True if MODEL_NAME == 'DiffPool' else False train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate) val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate) test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate) # At any point you can hit Ctrl + C to break out of training early. try: with tqdm(range(params['epochs'])) as t: for epoch in t: t.set_description('Epoch %d' % epoch) start = time.time() epoch_train_loss, epoch_train_acc, optimizer = train_epoch( model, optimizer, device, train_loader, epoch) epoch_val_loss, epoch_val_acc = evaluate_network( model, device, val_loader, epoch) epoch_train_losses.append(epoch_train_loss) epoch_val_losses.append(epoch_val_loss) epoch_train_accs.append(epoch_train_acc) epoch_val_accs.append(epoch_val_acc) writer.add_scalar('train/_loss', epoch_train_loss, epoch) writer.add_scalar('val/_loss', epoch_val_loss, epoch) writer.add_scalar('train/_acc', epoch_train_acc, epoch) writer.add_scalar('val/_acc', epoch_val_acc, epoch) writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch) _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch) t.set_postfix(time=time.time() - start, lr=optimizer.param_groups[0]['lr'], train_loss=epoch_train_loss, val_loss=epoch_val_loss, train_acc=epoch_train_acc, val_acc=epoch_val_acc, test_acc=epoch_test_acc) per_epoch_time.append(time.time() - start) # Saving checkpoint ckpt_dir = os.path.join(root_ckpt_dir, "RUN_") if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch))) files = glob.glob(ckpt_dir + '/*.pkl') for file in files: epoch_nb = file.split('_')[-1] epoch_nb = int(epoch_nb.split('.')[0]) if epoch_nb < epoch - 1: os.remove(file) scheduler.step(epoch_val_loss) if optimizer.param_groups[0]['lr'] < params['min_lr']: print("\n!! LR EQUAL TO MIN LR SET.") break # Stop training after params['max_time'] hours if time.time() - t0 > params['max_time'] * 3600: print('-' * 89) print( "Max_time for training elapsed {:.2f} hours, so stopping" .format(params['max_time'])) break except KeyboardInterrupt: print('-' * 89) print('Exiting from training early because of KeyboardInterrupt') _, test_acc = evaluate_network(model, device, test_loader, epoch) _, train_acc = evaluate_network(model, device, train_loader, epoch) print("Test Accuracy: {:.4f}".format(test_acc)) print("Train Accuracy: {:.4f}".format(train_acc)) print("TOTAL TIME TAKEN: {:.4f}s".format(time.time() - t0)) print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time))) writer.close() """ Write the results in out_dir/results folder """ with open(write_file_name + '.txt', 'w') as f: f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\ .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'], np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, (time.time()-t0)/3600, np.mean(per_epoch_time))) # send results to gmail try: from gmail import send subject = 'Result for Dataset: {}, Model: {}'.format( DATASET_NAME, MODEL_NAME) body = """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\ .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'], np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, (time.time()-t0)/3600, np.mean(per_epoch_time)) send(subject, body) except: pass
perturb_indicator = "diff", perturb_mode = "mean", perturb_feature = "color" ) return parser.parse_args() prog_args = arg_parse() MNIST_test_dataset = ds.MNIST(root='PATH', train=False, download=True, transform=transforms.ToTensor()) MODEL_NAME = 'GCN' DATASET_NAME = 'MNIST' dataset = LoadData(DATASET_NAME) trainset, valset, testset = dataset.train, dataset.val, dataset.test net_params = GCN_params.net_params() model = gnn_model(MODEL_NAME, net_params) model.load_state_dict(torch.load("data/superpixels/epoch_188.pkl")) model.eval() test_loader = DataLoader(testset, batch_size=1, shuffle=False, drop_last=False, collate_fn=dataset.collate) index_to_explain = range(prog_args.start, prog_args.end) if prog_args.perturb_feature == "color": perturb_features_list = [0] elif prog_args.perturb_feature == "location": perturb_features_list = [1,2] Explanations = [] for iter, (graph, label, snorm_n, snorm_e) in enumerate(test_loader): if iter in index_to_explain:
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, args): load_model = args.load_model aug_type_list = [ 'drop_nodes', 'drop_add_edges', 'noise', 'mask', 'subgraph', 'new', 'random', 'random2' ] DATASET_NAME = dataset.name if MODEL_NAME in ['GCN', 'GAT']: if net_params['self_loop']: print( "[!] Adding graph self-loops for GCN/GAT models (central node trick)." ) dataset._add_self_loops() trainset, valset, testset = dataset.train, dataset.val, dataset.test device = net_params['device'] # setting seeds random.seed(params['seed']) np.random.seed(params['seed']) torch.manual_seed(params['seed']) if device == 'cuda': torch.cuda.manual_seed(params['seed']) print('-' * 40 + "Finetune Option" + '-' * 40) print("Data Name: [{}]".format(DATASET_NAME)) print("Model Name: [{}]".format(MODEL_NAME)) print("Training Graphs:[{}]".format(len(trainset))) print("Valid Graphs: [{}]".format(len(valset))) print("Test Graphs: [{}]".format(len(testset))) print("Number Classes: [{}]".format(net_params['n_classes'])) print("Learning rate: [{}]".format(params['init_lr'])) print('-' * 40 + "Contrastive Option" + '-' * 40) print("Load model: [{}]".format(load_model)) print("Aug Type: [{}]".format(aug_type_list[args.aug])) print("Projection head:[{}]".format(args.head)) print('-' * 100) model = gnn_model(MODEL_NAME, net_params) if load_model: output_path = './001_contrastive_models' # output_path = './001_mask_models_03' save_model_dir0 = os.path.join(output_path, DATASET_NAME) save_model_dir1 = os.path.join(save_model_dir0, aug_type_list[args.aug]) if args.head: save_model_dir1 += "_head" else: save_model_dir1 += "_no_head" save_model_dir2 = os.path.join(save_model_dir1, MODEL_NAME) load_file_name = glob.glob(save_model_dir2 + '/*.pkl') checkpoint = torch.load(load_file_name[-1]) model_dict = model.state_dict() state_dict = { k: v for k, v in checkpoint.items() if k in model_dict.keys() } model.load_state_dict(state_dict) print('Success load pre-trained model!: [{}]'.format( load_file_name[-1])) else: print('No model load!: Test baseline! ') model = model.to(device) optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay']) scheduler = optim.lr_scheduler.ReduceLROnPlateau( optimizer, mode='min', factor=params['lr_reduce_factor'], patience=params['lr_schedule_patience'], verbose=True) # batching exception for Diffpool drop_last = True if MODEL_NAME == 'DiffPool' else False train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate) val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate) test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate) for epoch in range(params['epochs']): epoch_train_loss, epoch_train_acc, optimizer = train_epoch( model, optimizer, device, train_loader, epoch) epoch_val_loss, epoch_val_acc = evaluate_network( model, device, val_loader, epoch) _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch) print('-' * 80) print("Epoch [{}] Test Acc: [{:.4f}]".format(epoch + 1, epoch_test_acc)) print('-' * 80) scheduler.step(epoch_val_loss) if optimizer.param_groups[0]['lr'] < params['min_lr']: print("\n!! LR EQUAL TO MIN LR SET.") break _, test_acc = evaluate_network(model, device, test_loader, epoch) _, train_acc = evaluate_network(model, device, train_loader, epoch) print("Test Accuracy: {:.4f}".format(test_acc)) print("Train Accuracy: {:.4f}".format(train_acc))
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, args): DATASET_NAME = dataset.name if MODEL_NAME in ['GCN', 'GAT']: if net_params['self_loop']: print("[!] Adding graph self-loops for GCN/GAT models (central node trick).") dataset._add_self_loops() trainset = dataset.train valset = dataset.val testset = dataset.test device = net_params['device'] # setting seeds random.seed(params['seed']) np.random.seed(params['seed']) torch.manual_seed(params['seed']) if device == 'cuda': torch.cuda.manual_seed(params['seed']) output_path = './001_contrastive_models' save_model_dir0 = os.path.join(output_path, DATASET_NAME) save_model_dir1 = os.path.join(save_model_dir0, args.aug) if args.head: save_model_dir1 += "_head" else: save_model_dir1 += "_no_head" save_model_dir2 = os.path.join(save_model_dir1, MODEL_NAME) print('-'*40 + "Training Option" + '-'*40) print("Data Name: [{}]".format(DATASET_NAME)) print("Model Name: [{}]".format(MODEL_NAME)) print("Training Graphs:[{}]".format(len(trainset))) print("Batch Size: [{}]".format(net_params['batch_size'])) print("Learning Rate: [{}]".format(params['init_lr'])) print("Epoch To Train: [{}]".format(args.epochs)) print("Model Save Dir: [{}]".format(save_model_dir2)) print('-'*40 + "Contrastive Option" + '-'*40) print("Aug Type: [{}]".format(args.aug)) print("Projection head:[{}]".format(args.head)) print("Drop Proportion:[{}]".format(args.drop_percent)) print("Temperature: [{}]".format(args.temp)) print('-'*100) model = gnn_model(MODEL_NAME, net_params) start_epoch = 0 if args.resume: print("Resume ...") load_file_name = glob.glob(save_model_dir2 + '/*.pkl')[-1] epoch_nb = load_file_name.split('_')[-1] start_epoch = int(epoch_nb.split('.')[0]) + 1 print("Success Resume At Epoch : [{}]".format(start_epoch)) checkpoint = torch.load(load_file_name) model_dict = model.state_dict() state_dict = {k:v for k,v in checkpoint.items() if k in model_dict.keys()} model.load_state_dict(state_dict) print('Success load Resume Model: [{}]'.format(load_file_name)) print('-'*100) model = model.to(device) optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay']) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=params['lr_reduce_factor'], patience=params['lr_schedule_patience'], verbose=True) # batching exception for Diffpool drop_last = True if MODEL_NAME == 'DiffPool' else False train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate) val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate) test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate) run_time = 0 for epoch in range(start_epoch, args.epochs): t0 = time.time() epoch_train_loss, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, drop_percent=args.drop_percent, temp=args.temp, aug_type=args.aug, head=args.head) epoch_time = time.time() - t0 run_time += epoch_time scheduler.step(epoch_train_loss) print('-'*120) print( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' | ' + 'Epoch [{:>2d}]: Loss [{:.4f}] Epoch Time: [{:.2f} min] Run Total Time: [{:.2f} min]' .format(epoch + 1, epoch_train_loss, epoch_time / 60, run_time / 60)) print('-'*120) ''' './001_contrastive_models/DATASET_NAME/nn/MODEL_NAME/*.pkl' ''' if not args.debug: output_path = './001_contrastive_models' save_model_dir0 = os.path.join(output_path, DATASET_NAME) save_model_dir1 = os.path.join(save_model_dir0, args.aug) if args.head: save_model_dir1 += "_head" else: save_model_dir1 += "_no_head" save_model_dir2 = os.path.join(save_model_dir1, MODEL_NAME) if not os.path.exists(save_model_dir2): os.makedirs(save_model_dir2) save_ckpt_path = '{}.pkl'.format(save_model_dir2 + "/" + "epoch_" + str(epoch)) torch.save(model.state_dict(), save_ckpt_path) files = glob.glob(save_model_dir2 + '/*.pkl') for file in files: epoch_nb = file.split('_')[-1] epoch_nb = int(epoch_nb.split('.')[0]) if epoch_nb < epoch-1: os.remove(file)