def main(args): args['device'] = torch.device( "cuda") if torch.cuda.is_available() else torch.device("cpu") set_random_seed(args['random_seed']) # Interchangeable with other datasets dataset, train_set, val_set, test_set = load_dataset_for_classification( args) train_loader = DataLoader(train_set, batch_size=args['batch_size'], collate_fn=collate_molgraphs, shuffle=True) val_loader = DataLoader(val_set, batch_size=args['batch_size'], collate_fn=collate_molgraphs) test_loader = DataLoader(test_set, batch_size=args['batch_size'], collate_fn=collate_molgraphs) if args['pre_trained']: args['num_epochs'] = 0 model = load_pretrained(args['exp']) else: args['n_tasks'] = dataset.n_tasks model = load_model(args) loss_criterion = BCEWithLogitsLoss(pos_weight=dataset.task_pos_weights( torch.tensor(train_set.indices)).to(args['device']), reduction='none') optimizer = Adam(model.parameters(), lr=args['lr']) stopper = EarlyStopping(patience=args['patience']) model.to(args['device']) for epoch in range(args['num_epochs']): # Train run_a_train_epoch(args, epoch, model, train_loader, loss_criterion, optimizer) # Validation and early stop val_score = run_an_eval_epoch(args, model, val_loader) early_stop = stopper.step(val_score, model) print( 'epoch {:d}/{:d}, validation {} {:.4f}, best validation {} {:.4f}'. format(epoch + 1, args['num_epochs'], args['metric_name'], val_score, args['metric_name'], stopper.best_score)) if early_stop: break if not args['pre_trained']: stopper.load_checkpoint(model) test_score = run_an_eval_epoch(args, model, test_loader) print('test {} {:.4f}'.format(args['metric_name'], test_score))
def main(args): args['device'] = "cuda" if torch.cuda.is_available() else "cpu" set_random_seed() # Interchangeable with other datasets dataset, train_set, val_set, test_set = load_dataset_for_classification( args) train_loader = DataLoader(train_set, batch_size=args['batch_size'], collate_fn=collate_molgraphs) val_loader = DataLoader(val_set, batch_size=args['batch_size'], collate_fn=collate_molgraphs) test_loader = DataLoader(test_set, batch_size=args['batch_size'], collate_fn=collate_molgraphs) if args['pre_trained']: args['num_epochs'] = 0 model = model_zoo.chem.load_pretrained(args['exp']) else: # Interchangeable with other models if args['model'] == 'GCN': model = model_zoo.chem.GCNClassifier( in_feats=args['in_feats'], gcn_hidden_feats=args['gcn_hidden_feats'], classifier_hidden_feats=args['classifier_hidden_feats'], n_tasks=dataset.n_tasks) elif args['model'] == 'GAT': model = model_zoo.chem.GATClassifier( in_feats=args['in_feats'], gat_hidden_feats=args['gat_hidden_feats'], num_heads=args['num_heads'], classifier_hidden_feats=args['classifier_hidden_feats'], n_tasks=dataset.n_tasks) loss_criterion = BCEWithLogitsLoss( pos_weight=dataset.task_pos_weights.to(args['device']), reduction='none') optimizer = Adam(model.parameters(), lr=args['lr']) stopper = EarlyStopping(patience=args['patience']) model.to(args['device']) for epoch in range(args['num_epochs']): # Train run_a_train_epoch(args, epoch, model, train_loader, loss_criterion, optimizer) # Validation and early stop val_score = run_an_eval_epoch(args, model, val_loader) early_stop = stopper.step(val_score, model) print( 'epoch {:d}/{:d}, validation {} {:.4f}, best validation {} {:.4f}'. format(epoch + 1, args['num_epochs'], args['metric_name'], val_score, args['metric_name'], stopper.best_score)) if early_stop: break if not args['pre_trained']: stopper.load_checkpoint(model) test_score = run_an_eval_epoch(args, model, test_loader) print('test {} {:.4f}'.format(args['metric_name'], test_score))
def main(args): torch.cuda.set_device(args['gpu']) set_random_seed(args['random_seed']) dataset, train_set, val_set, test_set = load_dataset_for_classification( args) # 6264, 783, 784 train_loader = DataLoader(train_set, batch_size=args['batch_size'], collate_fn=collate_molgraphs, shuffle=True) val_loader = DataLoader(val_set, batch_size=args['batch_size'], collate_fn=collate_molgraphs) test_loader = DataLoader(test_set, batch_size=args['batch_size'], collate_fn=collate_molgraphs) if args['pre_trained']: args['num_epochs'] = 0 model = load_pretrained(args['exp']) else: args['n_tasks'] = dataset.n_tasks if args['method'] == 'twp': model = load_mymodel(args) print(model) else: model = load_model(args) for name, parameters in model.named_parameters(): print(name, ':', parameters.size()) method = args['method'] life_model = importlib.import_module(f'LifeModel.{method}_model') life_model_ins = life_model.NET(model, args) data_loader = DataLoader(train_set, batch_size=len(train_set), collate_fn=collate_molgraphs, shuffle=True) life_model_ins.data_loader = data_loader loss_criterion = BCEWithLogitsLoss( pos_weight=dataset.task_pos_weights.cuda(), reduction='none') model.cuda() score_mean = [] score_matrix = np.zeros([args['n_tasks'], args['n_tasks']]) prev_model = None for task_i in range(12): print('\n********' + str(task_i)) stopper = EarlyStopping(patience=args['patience']) for epoch in range(args['num_epochs']): # Train if args['method'] == 'lwf': life_model_ins.observe(train_loader, loss_criterion, task_i, args, prev_model) else: life_model_ins.observe(train_loader, loss_criterion, task_i, args) # Validation and early stop val_score = run_an_eval_epoch(args, model, val_loader, task_i) early_stop = stopper.step(val_score, model) if early_stop: print(epoch) break if not args['pre_trained']: stopper.load_checkpoint(model) score_matrix[task_i] = run_eval_epoch(args, model, test_loader) prev_model = copy.deepcopy(life_model_ins).cuda() print('AP: ', round(np.mean(score_matrix[-1, :]), 4)) backward = [] for t in range(args['n_tasks'] - 1): b = score_matrix[args['n_tasks'] - 1][t] - score_matrix[t][t] backward.append(round(b, 4)) mean_backward = round(np.mean(backward), 4) print('AF: ', mean_backward)