Exemplo n.º 1
0
def main(args, exp_config, train_set, val_set, test_set):
    # Record settings
    exp_config.update({
        'model': args['model'],
        'in_feats': args['node_featurizer'].feat_size(),
        'n_tasks': args['n_tasks']
    })

    # Set up directory for saving results
    args = init_trial_path(args)

    train_loader = DataLoader(dataset=train_set,
                              batch_size=exp_config['batch_size'],
                              shuffle=True,
                              collate_fn=collate_molgraphs)
    val_loader = DataLoader(dataset=val_set,
                            batch_size=exp_config['batch_size'],
                            collate_fn=collate_molgraphs)
    test_loader = DataLoader(dataset=test_set,
                             batch_size=exp_config['batch_size'],
                             collate_fn=collate_molgraphs)
    model = load_model(exp_config).to(args['device'])

    loss_criterion = nn.SmoothL1Loss(reduction='none')
    optimizer = Adam(model.parameters(),
                     lr=exp_config['lr'],
                     weight_decay=exp_config['weight_decay'])
    stopper = EarlyStopping(patience=exp_config['patience'],
                            filename=args['trial_path'] + '/model.pth')

    for epoch in range(args['num_epochs']):
        # Train
        run_a_train_epoch(args, epoch, model, train_loader, loss_criterion,
                          optimizer)

        # Validation and early stop
        val_score = run_an_eval_epoch(args, model, val_loader)
        early_stop = stopper.step(val_score, model)
        print(
            'epoch {:d}/{:d}, validation {} {:.4f}, best validation {} {:.4f}'.
            format(epoch + 1, args['num_epochs'], args['metric'], val_score,
                   args['metric'], stopper.best_score))

        if early_stop:
            break

    stopper.load_checkpoint(model)
    test_score = run_an_eval_epoch(args, model, test_loader)
    print('test {} {:.4f}'.format(args['metric'], test_score))

    with open(args['trial_path'] + '/eval.txt', 'w') as f:
        f.write('Best val {}: {}\n'.format(args['metric'], stopper.best_score))
        f.write('Test {}: {}\n'.format(args['metric'], test_score))

    with open(args['trial_path'] + '/configure.json', 'w') as f:
        json.dump(exp_config, f, indent=2)

    return args['trial_path'], stopper.best_score
Exemplo n.º 2
0
    def objective(hyperparams):
        configure = deepcopy(args)
        save_path = init_trial_path(args)
        val_metrics, test_metrics = main(save_path, configure, hyperparams)

        if args['metric'] in ['roc_auc']:
            # To maximize a non-negative value is equivalent to minimize its opposite number
            val_metric_to_minimize = -1 * np.mean(val_metrics)
        else:
            val_metric_to_minimize = np.mean(val_metrics)

        results.append(
            (save_path, val_metric_to_minimize, val_metrics, test_metrics))

        return val_metric_to_minimize
Exemplo n.º 3
0
def main(args, train_set, val_set, test_set):
    # Set up directory for saving results
    args = init_trial_path(args)

    train_loader = DataLoader(dataset=train_set,
                              batch_size=args['batch_size'],
                              shuffle=True,
                              collate_fn=collate_molgraphs)
    val_loader = DataLoader(dataset=val_set,
                            batch_size=args['batch_size'],
                            collate_fn=collate_molgraphs)
    test_loader = DataLoader(dataset=test_set,
                             batch_size=args['batch_size'],
                             collate_fn=collate_molgraphs)
    model = load_model(args).to(args['device'])

    loss_criterion = nn.BCEWithLogitsLoss(reduction='none')
    optimizer = Adam(model.parameters(),
                     lr=args['lr'],
                     weight_decay=args['weight_decay'])
    stopper = EarlyStopping(patience=args['patience'],
                            filename=args['trial_path'] + '/model.pth')

    for epoch in range(args['num_epochs']):
        # Train
        run_a_train_epoch(args, epoch, model, train_loader, loss_criterion,
                          optimizer)

        # Validation and early stop
        val_score = run_an_eval_epoch(args, model, val_loader)
        early_stop = stopper.step(val_score, model)
        print(
            'epoch {:d}/{:d}, validation {} {:.4f}, best validation {} {:.4f}'.
            format(epoch + 1, args['num_epochs'], args['metric'], val_score,
                   args['metric'], stopper.best_score))

        if early_stop:
            break

    test_score = run_an_eval_epoch(args, model, test_loader)
    print('test {} {:.4f}'.format(args['metric'], test_score))

    with open(args['trial_path'] + '/eval.txt', 'w') as f:
        f.write('Best val {}: {}\n'.format(args['metric'], stopper.best_score))
        f.write('Test {}: {}\n'.format(args['metric'], test_score))

    return args, stopper.best_score