Exemple #1
0
def class_balance(data_path: str, split_type: str):
    # Update args
    args.val_fold_index, args.test_fold_index = 1, 2
    args.split_type = 'predetermined'

    # Load data
    data = get_data(path=args.data_path,
                    smiles_column=args.smiles_column,
                    target_columns=args.target_columns)
    args.task_names = args.target_columns or get_task_names(
        path=args.data_path, smiles_column=args.smiles_column)

    # Average class sizes
    all_class_sizes = {'train': [], 'val': [], 'test': []}

    for i in range(10):
        print(f'Fold {i}')

        # Update args
        data_name = os.path.splitext(os.path.basename(data_path))[0]
        args.folds_file = f'/data/rsg/chemistry/yangk/lsc_experiments_dump_splits/data/{data_name}/{split_type}/fold_{i}/0/split_indices.pckl'

        if not os.path.exists(args.folds_file):
            print(f'Fold indices do not exist')
            continue

        # Split data
        train_data, val_data, test_data = split_data(
            data=data, split_type=args.split_type, args=args)

        # Determine class balance
        for data_split, split_name in [(train_data, 'train'),
                                       (val_data, 'val'), (test_data, 'test')]:
            class_sizes = get_class_sizes(data_split)
            print(f'Class sizes for {split_name}')

            for i, task_class_sizes in enumerate(class_sizes):
                print(
                    f'{args.task_names[i]} '
                    f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}'
                )

            all_class_sizes[split_name].append(class_sizes)

        print()

    # Mean and std across folds
    for split_name in ['train', 'val', 'test']:
        print(f'Average class sizes for {split_name}')

        mean_class_sizes, std_class_sizes = np.mean(
            all_class_sizes[split_name],
            axis=0), np.std(all_class_sizes[split_name], axis=0)

        for i, (mean_task_class_sizes, std_task_class_sizes) in enumerate(
                zip(mean_class_sizes, std_class_sizes)):
            print(
                f'{args.task_names[i]} '
                f'{", ".join(f"{cls}: {mean_size * 100:.2f}% +/- {std_size * 100:.2f}%" for cls, (mean_size, std_size) in enumerate(zip(mean_task_class_sizes, std_task_class_sizes)))}'
            )
Exemple #2
0
def cross_validate_sklearn(args: Namespace,
                           logger: Logger = None) -> Tuple[float, float]:
    # TODO: Multi-task might not work with RF.
    info = logger.info if logger is not None else print
    args.task_names = get_task_names(args.data_path, args.data_format)
    init_seed = args.seed
    save_dir = args.save_dir

    # Run training on different random seeds for each fold
    all_scores = []
    for fold_num in range(args.num_folds):
        info(f'Fold {fold_num}')
        args.seed = init_seed + fold_num
        args.save_dir = os.path.join(save_dir, f'fold_{fold_num}')
        makedirs(args.save_dir)
        model_scores = run_sklearn(args, logger)
        all_scores.append(model_scores)
    all_scores = np.array(all_scores)

    # Report scores for each fold
    for fold_num, scores in enumerate(all_scores):
        info(
            f'Seed {init_seed + fold_num} ==> test {args.metric} = {np.nanmean(scores):.6f}'
        )

    # Report scores across folds
    avg_scores = np.nanmean(
        all_scores, axis=1)  # average score for each model across tasks
    mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)
    info(f'Overall test {args.metric} = {mean_score:.6f} +/- {std_score:.6f}')

    return mean_score, std_score
def plot_distribution(data_path: str, save_dir: str, bins: int):
    """
    Plots the distribution of values of a dataset.

    :param data_path: Path to data CSV file.
    :param save_dir: Directory where plot PNGs will be saved.
    :param bins: Number of bins in histogram.
    """
    # Get values
    task_names = get_task_names(data_path)
    data = get_data(data_path)
    targets = data.targets()

    # Arrange values by task
    data_size, num_tasks = len(targets), len(task_names)
    values = [[targets[i][j] for i in range(data_size)]
              for j in range(num_tasks)]

    # Plot distributions for each task
    data_name = os.path.basename(data_path).replace('.csv', '')

    for i in range(num_tasks):
        plt.clf()
        plt.hist(values[i], bins=bins)

        # Save plot
        plt.title('{} - {}'.format(data_name, task_names[i]))
        plt.xlabel(task_names[i])
        plt.ylabel('Frequency')
        plt.savefig(
            os.path.join(save_dir, '{}_{}.png'.format(data_name,
                                                      task_names[i])))
Exemple #4
0
def cross_validate(args: Namespace,
                   logger: Logger = None) -> Tuple[float, float]:
    """k-fold cross validation"""
    info = logger.info if logger is not None else print

    # Initialize relevant variables
    init_seed = args.seed
    save_dir = args.save_dir
    task_names = get_task_names(args.data_path)
    desired_labels = get_desired_labels(args, task_names)

    # Run training on different random seeds for each fold
    all_scores = []
    for fold_num in range(args.num_folds):
        info(f'Fold {fold_num}')
        args.seed = init_seed + fold_num
        args.save_dir = os.path.join(save_dir, f'fold_{fold_num}')
        os.makedirs(args.save_dir, exist_ok=True)
        model_scores = run_training(args, logger)
        all_scores.append(model_scores)
    all_scores = np.array(all_scores)

    # Report results
    info(f'{args.num_folds}-fold cross validation')

    # Report scores for each fold
    for fold_num, scores in enumerate(all_scores):
        info(
            f'Seed {init_seed + fold_num} ==> test {args.metric} = {np.nanmean(scores):.6f}'
        )

        if args.show_individual_scores:
            for task_name, score in zip(task_names, scores):
                if task_name in desired_labels:
                    info(
                        f'Seed {init_seed + fold_num} ==> test {task_name} {args.metric} = {score:.6f}'
                    )

    # Report scores across models
    avg_scores = np.nanmean(
        all_scores, axis=1)  # average score for each model across tasks
    mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)
    info(f'Overall test {args.metric} = {mean_score:.6f} +/- {std_score:.6f}')

    if args.show_individual_scores:
        for task_num, task_name in enumerate(task_names):
            if task_name in desired_labels:
                info(
                    f'Overall test {task_name} {args.metric} = '
                    f'{np.nanmean(all_scores[:, task_num]):.6f} +/- {np.nanstd(all_scores[:, task_num]):.6f}'
                )

    if args.num_chunks > 1:
        shutil.rmtree(args.chunk_temp_dir)

    return mean_score, std_score
Exemple #5
0
def cross_validate(args: TrainArgs,
                   logger: Logger = None) -> Tuple[float, float]:
    """k-fold cross validation"""
    info = logger.info if logger is not None else print

    # Initialize relevant variables
    init_seed = args.seed
    save_dir = args.save_dir
    task_names = args.target_columns or get_task_names(args.data_path)

    # Run training on different random seeds for each fold
    all_scores = []
    for fold_num in range(args.num_folds):
        info(f'Fold {fold_num}')
        args.seed = init_seed + fold_num
        args.save_dir = os.path.join(save_dir, f'fold_{fold_num}')
        makedirs(args.save_dir)
        model_scores, uncertainty_estimator = run_training(args, logger)

        # Save one model for each fold
        if uncertainty_estimator:
            process_estimator(uncertainty_estimator, logger, args, fold_num)
        all_scores.append(model_scores)
    all_scores = np.array(all_scores)

    # Report results
    info(f'{args.num_folds}-fold cross validation')

    # Report scores for each fold
    for fold_num, scores in enumerate(all_scores):
        info(
            f'Seed {init_seed + fold_num} ==> test {args.metric} = {np.nanmean(scores):.6f}'
        )

        if args.show_individual_scores:
            for task_name, score in zip(task_names, scores):
                info(
                    f'Seed {init_seed + fold_num} ==> test {task_name} {args.metric} = {score:.6f}'
                )

    # Report scores across models
    avg_scores = np.nanmean(
        all_scores, axis=1)  # average score for each model across tasks
    mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)
    info(f'Overall test {args.metric} = {mean_score:.6f} +/- {std_score:.6f}')

    if args.show_individual_scores:
        for task_num, task_name in enumerate(task_names):
            info(
                f'Overall test {task_name} {args.metric} = '
                f'{np.nanmean(all_scores[:, task_num]):.6f} +/- {np.nanstd(all_scores[:, task_num]):.6f}'
            )

    return mean_score, std_score
Exemple #6
0
def predict_sklearn(args: SklearnPredictArgs):
    print('Loading data')
    data = get_data(path=args.test_path,
                    smiles_column=args.smiles_column,
                    target_columns=[])

    print('Computing morgan fingerprints')
    morgan_fingerprint = get_features_generator('morgan')
    for datapoint in tqdm(data, total=len(data)):
        datapoint.set_features(
            morgan_fingerprint(mol=datapoint.smiles,
                               radius=args.radius,
                               num_bits=args.num_bits))

    print(
        f'Predicting with an ensemble of {len(args.checkpoint_paths)} models')
    sum_preds = np.zeros((len(data), args.num_tasks))

    for checkpoint_path in tqdm(args.checkpoint_paths,
                                total=len(args.checkpoint_paths)):
        with open(checkpoint_path, 'rb') as f:
            model = pickle.load(f)

        model_preds = predict(model=model,
                              model_type=args.model_type,
                              dataset_type=args.dataset_type,
                              features=data.features())
        sum_preds += np.array(model_preds)

    # Ensemble predictions
    avg_preds = sum_preds / len(args.checkpoint_paths)
    avg_preds = avg_preds.tolist()

    print(f'Saving predictions to {args.preds_path}')
    assert len(data) == len(avg_preds)
    makedirs(args.preds_path, isfile=True)

    # Copy predictions over to data
    task_names = get_task_names(path=args.test_path)
    for datapoint, preds in zip(data, avg_preds):
        for pred_name, pred in zip(task_names, preds):
            datapoint.row[pred_name] = pred

    # Save
    with open(args.preds_path, 'w') as f:
        writer = csv.DictWriter(f, fieldnames=data[0].row.keys())
        writer.writeheader()

        for datapoint in data:
            writer.writerow(datapoint.row)
Exemple #7
0
def predict_sklearn(args: Namespace):
    print('Loading data')
    data = get_data(path=args.test_path)

    print('Computing morgan fingerprints')
    morgan_fingerprint = get_features_generator('morgan')
    for datapoint in tqdm(data, total=len(data)):
        datapoint.set_features(morgan_fingerprint(mol=datapoint.smiles, radius=args.radius, num_bits=args.num_bits))

    print(f'Predicting with an ensemble of {len(args.checkpoint_paths)} models')
    sum_preds = np.zeros((len(data), args.num_tasks))

    for checkpoint_path in tqdm(args.checkpoint_paths, total=len(args.checkpoint_paths)):
        with open(checkpoint_path, 'rb') as f:
            model = pickle.load(f)

        model_preds = predict(
            model=model,
            model_type=args.model_type,
            dataset_type=args.dataset_type,
            features=data.features()
        )
        sum_preds += np.array(model_preds)

    # Ensemble predictions
    avg_preds = sum_preds / len(args.checkpoint_paths)
    avg_preds = avg_preds.tolist()

    print('Saving predictions')
    assert len(data) == len(avg_preds)
    makedirs(args.preds_path, isfile=True)

    with open(args.preds_path, 'w') as f:
        writer = csv.writer(f)
        writer.writerow(['smiles'] + get_task_names(args.test_path))

        for smiles, pred in zip(data.smiles(), avg_preds):
            writer.writerow([smiles] + pred)
Exemple #8
0
def predict_sklearn(args: SklearnPredictArgs):

    if args.parcel_size and args.max_data_size:
        num_iterations = math.ceil(args.max_data_size / args.parcel_size)
        max_data_size = args.parcel_size
    else:
        num_iterations = 1
        max_data_size = args.max_data_size
    offset = 0

    for iteration in range(num_iterations):

        if iteration > 0:
            offset = offset + args.parcel_size
            max_data_size = max_data_size + args.parcel_size

        print('Loading data')
        data = get_data(path=args.test_path,
                        smiles_column=args.smiles_column,
                        target_columns=[],
                        max_data_size=max_data_size,
                        data_offset=offset)

        print('Computing morgan fingerprints')
        morgan_fingerprint = get_features_generator('morgan')
        for datapoint in tqdm(data, total=len(data)):
            datapoint.set_features(
                morgan_fingerprint(mol=datapoint.smiles,
                                   radius=args.radius,
                                   num_bits=args.num_bits))

        print(
            f'Predicting with an ensemble of {len(args.checkpoint_paths)} models'
        )
        sum_preds = np.zeros((len(data), args.num_tasks))

        for checkpoint_path in tqdm(args.checkpoint_paths,
                                    total=len(args.checkpoint_paths)):
            with open(checkpoint_path, 'rb') as f:
                model = pickle.load(f)

            model_preds = predict(model=model,
                                  model_type=args.model_type,
                                  dataset_type=args.dataset_type,
                                  features=data.features())
            sum_preds += np.array(model_preds)

        # Ensemble predictions
        avg_preds = sum_preds / len(args.checkpoint_paths)
        avg_preds = avg_preds.tolist()

        print(f'Saving predictions to {args.preds_path}')
        assert len(data) == len(avg_preds)
        makedirs(args.preds_path, isfile=True)

        # Copy predictions over to data
        task_names = get_task_names(path=args.test_path)
        for datapoint, preds in zip(data, avg_preds):
            for pred_name, pred in zip(task_names, preds):
                datapoint.row[pred_name] = pred

        # Save
        if iteration != 0:
            name, ext = os.path.splitext(args.preds_path)
            preds_path = "{name}.{it}{ext}".format(name=name,
                                                   it=iteration,
                                                   ext=ext)
        else:
            preds_path = args.preds_path
        with open(args.preds_path, 'w') as f:
            writer = csv.DictWriter(f, fieldnames=data[0].row.keys())
            writer.writeheader()

            for datapoint in data:
                writer.writerow(datapoint.row)
def run_training(args: Namespace, logger: Logger = None) -> List[float]:
    """
    Trains a model and returns test scores on the model checkpoint with the highest validation score.

    :param args: Arguments.
    :param logger: Logger.
    :return: A list of ensemble scores for each task.
    """
    if logger is not None:
        debug, info = logger.debug, logger.info
    else:
        debug = info = print

    # Set GPU
    if args.gpu is not None:
        torch.cuda.set_device(args.gpu)

    # Print args
# =============================================================================
#     debug(pformat(vars(args)))
# =============================================================================

# Get data
    debug('Loading data')
    args.task_names = get_task_names(args.data_path)
    data = get_data(path=args.data_path, args=args, logger=logger)
    args.num_tasks = data.num_tasks()
    args.features_size = data.features_size()
    debug(f'Number of tasks = {args.num_tasks}')

    # Split data
    debug(f'Splitting data with seed {args.seed}')
    if args.separate_test_path:
        test_data = get_data(path=args.separate_test_path,
                             args=args,
                             features_path=args.separate_test_features_path,
                             logger=logger)
    if args.separate_val_path:
        val_data = get_data(path=args.separate_val_path,
                            args=args,
                            features_path=args.separate_val_features_path,
                            logger=logger)

    if args.separate_val_path and args.separate_test_path:
        train_data = data
    elif args.separate_val_path:
        train_data, _, test_data = split_data(data=data,
                                              split_type=args.split_type,
                                              sizes=(0.8, 0.2, 0.0),
                                              seed=args.seed,
                                              args=args,
                                              logger=logger)
    elif args.separate_test_path:
        train_data, val_data, _ = split_data(data=data,
                                             split_type=args.split_type,
                                             sizes=(0.8, 0.2, 0.0),
                                             seed=args.seed,
                                             args=args,
                                             logger=logger)
    else:
        print('=' * 100)
        train_data, val_data, test_data = split_data(
            data=data,
            split_type=args.split_type,
            sizes=args.split_sizes,
            seed=args.seed,
            args=args,
            logger=logger)

        ###my_code###
        train_df = get_data_df(train_data)
        train_df.to_csv(
            '~/PycharmProjects/CMPNN-master/data/24w_train_df_seed0.csv')
        val_df = get_data_df(val_data)
        val_df.to_csv(
            '~/PycharmProjects/CMPNN-master/data/24w_val_df_seed0.csv')
        test_df = get_data_df(test_data)
        test_df.to_csv(
            '~/PycharmProjects/CMPNN-master/data/24w_test_df_seed0.csv')

        ##########

    if args.dataset_type == 'classification':
        class_sizes = get_class_sizes(data)
        debug('Class sizes')
        for i, task_class_sizes in enumerate(class_sizes):
            debug(
                f'{args.task_names[i]} '
                f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}'
            )

    if args.save_smiles_splits:
        with open(args.data_path, 'r') as f:
            reader = csv.reader(f)
            header = next(reader)

            lines_by_smiles = {}
            indices_by_smiles = {}
            for i, line in enumerate(reader):
                smiles = line[0]
                lines_by_smiles[smiles] = line
                indices_by_smiles[smiles] = i

        all_split_indices = []
        for dataset, name in [(train_data, 'train'), (val_data, 'val'),
                              (test_data, 'test')]:
            with open(os.path.join(args.save_dir, name + '_smiles.csv'),
                      'w') as f:
                writer = csv.writer(f)
                writer.writerow(['smiles'])
                for smiles in dataset.smiles():
                    writer.writerow([smiles])
            with open(os.path.join(args.save_dir, name + '_full.csv'),
                      'w') as f:
                writer = csv.writer(f)
                writer.writerow(header)
                for smiles in dataset.smiles():
                    writer.writerow(lines_by_smiles[smiles])
            split_indices = []
            for smiles in dataset.smiles():
                split_indices.append(indices_by_smiles[smiles])
                split_indices = sorted(split_indices)
            all_split_indices.append(split_indices)
        with open(os.path.join(args.save_dir, 'split_indices.pckl'),
                  'wb') as f:
            pickle.dump(all_split_indices, f)

    if args.features_scaling:
        features_scaler = train_data.normalize_features(replace_nan_token=0)
        val_data.normalize_features(features_scaler)
        test_data.normalize_features(features_scaler)
    else:
        features_scaler = None

    args.train_data_size = len(train_data)

    debug(
        f'Total size = {len(data):,} | '
        f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}'
    )

    # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
    if args.dataset_type == 'regression':
        debug('Fitting scaler')
        train_smiles, train_targets = train_data.smiles(), train_data.targets()
        scaler = StandardScaler().fit(train_targets)
        scaled_targets = scaler.transform(train_targets).tolist()
        train_data.set_targets(scaled_targets)
    else:
        scaler = None

    # Get loss and metric functions
    loss_func = get_loss_func(args)
    metric_func = get_metric_func(metric=args.metric)

    # Set up test set evaluation
    test_smiles, test_targets = test_data.smiles(), test_data.targets()
    if args.dataset_type == 'multiclass':
        sum_test_preds = np.zeros(
            (len(test_smiles), args.num_tasks, args.multiclass_num_classes))
    else:
        sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))

    # Train ensemble of models
    for model_idx in range(args.ensemble_size):
        # Tensorboard writer
        save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
        makedirs(save_dir)
        try:
            writer = SummaryWriter(log_dir=save_dir)
        except:
            writer = SummaryWriter(logdir=save_dir)
        # Load/build model
        if args.checkpoint_paths is not None:
            debug(
                f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}'
            )
            model = load_checkpoint(args.checkpoint_paths[model_idx],
                                    current_args=args,
                                    logger=logger)
        else:
            debug(f'Building model {model_idx}')
            model = build_model(args)

        debug(model)
        debug(f'Number of parameters = {param_count(model):,}')
        if args.cuda:
            debug('Moving model to cuda')
            model = model.cuda()

        # Ensure that model is saved in correct location for evaluation if 0 epochs
        save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler,
                        features_scaler, args)

        # Optimizers
        optimizer = build_optimizer(model, args)

        # Learning rate schedulers
        scheduler = build_lr_scheduler(optimizer, args)

        # Run training
        best_score = float('inf') if args.minimize_score else -float('inf')
        best_epoch, n_iter = 0, 0
        for epoch in range(args.epochs):
            debug(f'Epoch {epoch}')

            n_iter = train(model=model,
                           data=train_data,
                           loss_func=loss_func,
                           optimizer=optimizer,
                           scheduler=scheduler,
                           args=args,
                           n_iter=n_iter,
                           logger=logger,
                           writer=writer)
            if isinstance(scheduler, ExponentialLR):
                scheduler.step()
            val_scores = evaluate(model=model,
                                  data=val_data,
                                  num_tasks=args.num_tasks,
                                  metric_func=metric_func,
                                  batch_size=args.batch_size,
                                  dataset_type=args.dataset_type,
                                  scaler=scaler,
                                  logger=logger)

            # Average validation score
            avg_val_score = np.nanmean(val_scores)
            debug(f'Validation {args.metric} = {avg_val_score:.6f}')
            writer.add_scalar(f'validation_{args.metric}', avg_val_score,
                              n_iter)

            if args.show_individual_scores:
                # Individual validation scores
                for task_name, val_score in zip(args.task_names, val_scores):
                    debug(
                        f'Validation {task_name} {args.metric} = {val_score:.6f}'
                    )
                    writer.add_scalar(f'validation_{task_name}_{args.metric}',
                                      val_score, n_iter)

            # Save model checkpoint if improved validation score
            if args.minimize_score and avg_val_score < best_score or \
                    not args.minimize_score and avg_val_score > best_score:
                best_score, best_epoch = avg_val_score, epoch
                save_checkpoint(os.path.join(save_dir, 'model.pt'), model,
                                scaler, features_scaler, args)

        # Evaluate on test set using model with best validation score
        info(
            f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}'
        )
        model = load_checkpoint(os.path.join(save_dir, 'model.pt'),
                                cuda=args.cuda,
                                logger=logger)

        test_preds = predict(model=model,
                             data=test_data,
                             batch_size=args.batch_size,
                             scaler=scaler)
        test_scores = evaluate_predictions(preds=test_preds,
                                           targets=test_targets,
                                           num_tasks=args.num_tasks,
                                           metric_func=metric_func,
                                           dataset_type=args.dataset_type,
                                           logger=logger)

        if len(test_preds) != 0:
            sum_test_preds += np.array(test_preds)

        # Average test score
        avg_test_score = np.nanmean(test_scores)
        info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')
        writer.add_scalar(f'test_{args.metric}', avg_test_score, 0)

        if args.show_individual_scores:
            # Individual test scores
            for task_name, test_score in zip(args.task_names, test_scores):
                info(
                    f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}'
                )
                writer.add_scalar(f'test_{task_name}_{args.metric}',
                                  test_score, n_iter)

    # Evaluate ensemble on test set
    avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()

    ensemble_scores = evaluate_predictions(preds=avg_test_preds,
                                           targets=test_targets,
                                           num_tasks=args.num_tasks,
                                           metric_func=metric_func,
                                           dataset_type=args.dataset_type,
                                           logger=logger)

    # Average ensemble score
    avg_ensemble_test_score = np.nanmean(ensemble_scores)
    info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')
    writer.add_scalar(f'ensemble_test_{args.metric}', avg_ensemble_test_score,
                      0)

    # Individual ensemble scores
    if args.show_individual_scores:
        for task_name, ensemble_score in zip(args.task_names, ensemble_scores):
            info(
                f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}'
            )

    return ensemble_scores
def predict_feature(args, logger, model, external_test_path):
    if logger is not None:
        debug, info = logger.debug, logger.info
    else:
        debug = info = print

    # Print command line
    debug('Command line')
    debug(f'python {" ".join(sys.argv)}')

    # Print args
    debug('Args')
    debug(args)

    # Save args
    args.save(os.path.join(args.save_dir, 'args.json'))

    # Set pytorch seed for random initial weights
    torch.manual_seed(args.pytorch_seed)

    # Get data
    debug('Loading data')
    args.task_names = args.target_columns or get_task_names(args.data_path)
    data = get_data(path=args.data_path, args=args, logger=logger)
    args.num_tasks = data.num_tasks()
    args.features_size = data.features_size()
    debug(f'Number of tasks = {args.num_tasks}')

    external_test_data = get_data(path=external_test_path,
                                  args=args,
                                  logger=logger)

    # Split data
    debug(f'Splitting data with seed {args.seed}')
    if args.separate_test_path:
        test_data = get_data(path=args.separate_test_path,
                             args=args,
                             features_path=args.separate_test_features_path,
                             logger=logger)
    if args.separate_val_path:
        val_data = get_data(path=args.separate_val_path,
                            args=args,
                            features_path=args.separate_val_features_path,
                            logger=logger)

    if args.separate_val_path and args.separate_test_path:
        train_data = data
    elif args.separate_val_path:
        train_data, _, test_data = split_data(data=data,
                                              split_type=args.split_type,
                                              sizes=(0.8, 0.0, 0.2),
                                              seed=args.seed,
                                              args=args,
                                              logger=logger)
    elif args.separate_test_path:
        train_data, val_data, _ = split_data(data=data,
                                             split_type=args.split_type,
                                             sizes=(0.8, 0.2, 0.0),
                                             seed=args.seed,
                                             args=args,
                                             logger=logger)
    else:
        train_data, val_data, test_data = split_data(
            data=data,
            split_type=args.split_type,
            sizes=args.split_sizes,
            seed=args.seed,
            args=args,
            logger=logger)

    if args.dataset_type == 'classification':
        class_sizes = get_class_sizes(data)
        debug('Class sizes')
        for i, task_class_sizes in enumerate(class_sizes):
            debug(
                f'{args.task_names[i]} '
                f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}'
            )

    if args.save_smiles_splits:
        save_smiles_splits(train_data=train_data,
                           val_data=val_data,
                           test_data=test_data,
                           data_path=args.data_path,
                           save_dir=args.save_dir)

    if args.features_scaling:
        features_scaler = train_data.normalize_features(replace_nan_token=0)
        external_test_data.normalize_features(features_scaler)
    else:
        features_scaler = None

    args.train_data_size = len(train_data)

    debug(
        f'Total size = {len(data):,} | '
        f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}'
    )

    scaler = None

    # Automatically determine whether to cache
    if len(data) <= args.cache_cutoff:
        cache = True
        num_workers = 0
    else:
        cache = False
        num_workers = args.num_workers

    # Create data loaders
    train_data_loader = MoleculeDataLoader(dataset=train_data,
                                           batch_size=args.batch_size,
                                           num_workers=num_workers,
                                           cache=cache,
                                           class_balance=args.class_balance,
                                           shuffle=False,
                                           seed=args.seed)
    external_test_loader = MoleculeDataLoader(dataset=external_test_data,
                                              batch_size=args.batch_size,
                                              num_workers=num_workers,
                                              cache=cache,
                                              class_balance=args.class_balance,
                                              shuffle=False,
                                              seed=args.seed)
    external_test_preds, external_test_feature = predict(
        model=model, data_loader=external_test_loader, scaler=scaler)
    external_test_smiles, external_test_targets = external_test_data.smiles(
    ), external_test_data.targets()
    return external_test_smiles, external_test_feature, external_test_preds, external_test_targets
def cross_validate(args: TrainArgs,
                   logger: Logger = None) -> Tuple[float, float]:
    """k-fold cross validation"""
    info = logger.info if logger is not None else print

    # Initialize relevant variables
    init_seed = args.seed
    save_dir = args.save_dir
    args.task_names = get_task_names(path=args.data_path,
                                     smiles_column=args.smiles_column,
                                     target_columns=args.target_columns,
                                     ignore_columns=args.ignore_columns)

    # Run training on different random seeds for each fold
    all_scores = []
    for fold_num in range(args.num_folds):
        info(f'Fold {fold_num}')
        args.seed = init_seed + fold_num
        args.save_dir = os.path.join(save_dir, f'fold_{fold_num}')
        makedirs(args.save_dir)
        model_scores = run_training(args, logger)
        all_scores.append(model_scores)
    all_scores = np.array(all_scores)

    # Report results
    info(f'{args.num_folds}-fold cross validation')

    # Report scores for each fold
    for fold_num, scores in enumerate(all_scores):
        info(
            f'Seed {init_seed + fold_num} ==> test {args.metric} = {np.nanmean(scores):.6f}'
        )

        if args.show_individual_scores:
            for task_name, score in zip(args.task_names, scores):
                info(
                    f'Seed {init_seed + fold_num} ==> test {task_name} {args.metric} = {score:.6f}'
                )

    # Report scores across models
    avg_scores = np.nanmean(
        all_scores, axis=1)  # average score for each model across tasks
    mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)
    info(f'Overall test {args.metric} = {mean_score:.6f} +/- {std_score:.6f}')

    if args.show_individual_scores:
        for task_num, task_name in enumerate(args.task_names):
            info(
                f'Overall test {task_name} {args.metric} = '
                f'{np.nanmean(all_scores[:, task_num]):.6f} +/- {np.nanstd(all_scores[:, task_num]):.6f}'
            )

    # Save scores
    with open(os.path.join(save_dir, 'test_scores.csv'), 'w') as f:
        writer = csv.writer(f)
        writer.writerow([
            'Task', f'Mean {args.metric}', f'Standard deviation {args.metric}'
        ] + [f'Fold {i} {args.metric}' for i in range(args.num_folds)])

        for task_num, task_name in enumerate(args.task_names):
            task_scores = all_scores[:, task_num]
            mean, std = np.nanmean(task_scores), np.nanstd(task_scores)
            writer.writerow([task_name, mean, std] + task_scores.tolist())

    return mean_score, std_score
Exemple #12
0
def run_sklearn(args: SklearnTrainArgs, logger: Logger = None) -> List[float]:
    if logger is not None:
        debug, info = logger.debug, logger.info
    else:
        debug = info = print

    debug(pformat(vars(args)))

    metric_func = get_metric_func(args.metric)

    debug('Loading data')
    data = get_data(path=args.data_path,
                    smiles_column=args.smiles_column,
                    target_columns=args.target_columns)
    args.task_names = get_task_names(path=args.data_path,
                                     smiles_column=args.smiles_column,
                                     target_columns=args.target_columns,
                                     ignore_columns=args.ignore_columns)

    if args.model_type == 'svm' and data.num_tasks() != 1:
        raise ValueError(
            f'SVM can only handle single-task data but found {data.num_tasks()} tasks'
        )

    debug(f'Splitting data with seed {args.seed}')
    # Need to have val set so that train and test sets are the same as when doing MPN
    train_data, _, test_data = split_data(data=data,
                                          split_type=args.split_type,
                                          seed=args.seed,
                                          sizes=args.split_sizes,
                                          args=args)

    debug(
        f'Total size = {len(data):,} | train size = {len(train_data):,} | test size = {len(test_data):,}'
    )

    debug('Computing morgan fingerprints')
    morgan_fingerprint = get_features_generator('morgan')
    for dataset in [train_data, test_data]:
        for datapoint in tqdm(dataset, total=len(dataset)):
            datapoint.set_features(
                morgan_fingerprint(mol=datapoint.smiles,
                                   radius=args.radius,
                                   num_bits=args.num_bits))

    debug('Building model')
    if args.dataset_type == 'regression':
        if args.model_type == 'random_forest':
            model = RandomForestRegressor(n_estimators=args.num_trees,
                                          n_jobs=-1)
        elif args.model_type == 'svm':
            model = SVR()
        else:
            raise ValueError(f'Model type "{args.model_type}" not supported')
    elif args.dataset_type == 'classification':
        if args.model_type == 'random_forest':
            model = RandomForestClassifier(n_estimators=args.num_trees,
                                           n_jobs=-1,
                                           class_weight=args.class_weight)
        elif args.model_type == 'svm':
            model = SVC()
        else:
            raise ValueError(f'Model type "{args.model_type}" not supported')
    else:
        raise ValueError(f'Dataset type "{args.dataset_type}" not supported')

    debug(model)

    model.train_args = args.as_dict()

    debug('Training')
    if args.single_task:
        scores = single_task_sklearn(model=model,
                                     train_data=train_data,
                                     test_data=test_data,
                                     metric_func=metric_func,
                                     args=args,
                                     logger=logger)
    else:
        scores = multi_task_sklearn(model=model,
                                    train_data=train_data,
                                    test_data=test_data,
                                    metric_func=metric_func,
                                    args=args,
                                    logger=logger)

    info(f'Test {args.metric} = {np.nanmean(scores)}')

    return scores
Exemple #13
0
def new_noise(args: TrainArgs, logger: Logger = None) -> List[float]:
    """
    Trains a model and returns test scores on the model checkpoint with the highest validation score.

    :param args: Arguments.
    :param logger: Logger.
    :return: A list of ensemble scores for each task.
    """

    debug = info = print

    # Get data
    args.task_names = args.target_columns or get_task_names(args.data_path)
    data = get_data(path=args.data_path, args=args, logger=logger)
    args.num_tasks = data.num_tasks()
    args.features_size = data.features_size()

    # Split data
    debug(f'Splitting data with seed {args.seed}')
    train_data, val_data, test_data = split_data(data=data,
                                                 split_type=args.split_type,
                                                 sizes=args.split_sizes,
                                                 seed=args.seed,
                                                 args=args,
                                                 logger=logger)

    if args.features_scaling:
        features_scaler = train_data.normalize_features(replace_nan_token=0)
        val_data.normalize_features(features_scaler)
        test_data.normalize_features(features_scaler)
    else:
        features_scaler = None

    args.train_data_size = len(train_data)

    # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
    if args.dataset_type == 'regression':
        debug('Fitting scaler')
        train_smiles, train_targets = train_data.smiles(), train_data.targets()
        scaler = StandardScaler().fit(train_targets)
        scaled_targets = scaler.transform(train_targets).tolist()
        train_data.set_targets(scaled_targets)
    else:
        scaler = None

    # Get loss and metric functions
    loss_func = neg_log_like
    metric_func = get_metric_func(metric=args.metric)

    # Set up test set evaluation
    test_smiles, test_targets = test_data.smiles(), test_data.targets()
    sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))

    # Automatically determine whether to cache
    if len(data) <= args.cache_cutoff:
        cache = True
        num_workers = 0
    else:
        cache = False
        num_workers = args.num_workers

    # Create data loaders
    train_data_loader = MoleculeDataLoader(dataset=train_data,
                                           batch_size=args.batch_size,
                                           num_workers=num_workers,
                                           cache=cache)
    val_data_loader = MoleculeDataLoader(dataset=val_data,
                                         batch_size=args.batch_size,
                                         num_workers=num_workers,
                                         cache=cache)
    test_data_loader = MoleculeDataLoader(dataset=test_data,
                                          batch_size=args.batch_size,
                                          num_workers=num_workers,
                                          cache=cache)

    ###########################################
    ########## Outer loop over ensemble members
    ###########################################

    for model_idx in range(args.ensemble_start_idx,
                           args.ensemble_start_idx + args.ensemble_size):

        # load the model
        if (args.method == 'map') or (args.method == 'swag') or (args.method
                                                                 == 'sgld'):
            model = load_checkpoint(args.checkpoint_path +
                                    f'/model_{model_idx}/model.pt',
                                    device=args.device,
                                    logger=logger)

        if args.method == 'gp':
            args.num_inducing_points = 1200
            fake_model = MoleculeModel(args)
            fake_model.featurizer = True
            feature_extractor = fake_model
            inducing_points = initial_inducing_points(train_data_loader,
                                                      feature_extractor, args)
            gp_layer = GPLayer(inducing_points, args.num_tasks)
            model = load_checkpoint(
                args.checkpoint_path + f'/model_{model_idx}/DKN_model.pt',
                device=args.device,
                logger=None,
                template=DKLMoleculeModel(MoleculeModel(args, featurizer=True),
                                          gp_layer))

        if args.method == 'dropR' or args.method == 'dropA':
            model = load_checkpoint(args.checkpoint_path +
                                    f'/model_{model_idx}/model.pt',
                                    device=args.device,
                                    logger=logger)

        if args.method == 'bbp':
            template = MoleculeModelBBP(args)
            for layer in template.children():
                if isinstance(layer, BayesLinear):
                    layer.init_rho(args.rho_min_bbp, args.rho_max_bbp)
            for layer in template.encoder.encoder.children():
                if isinstance(layer, BayesLinear):
                    layer.init_rho(args.rho_min_bbp, args.rho_max_bbp)
            model = load_checkpoint(args.checkpoint_path +
                                    f'/model_{model_idx}/model_bbp.pt',
                                    device=args.device,
                                    logger=None,
                                    template=template)

        if args.method == 'dun':
            args.prior_sig_dun = 0.05
            args.depth_min = 1
            args.depth_max = 5
            args.rho_min_dun = -5.5
            args.rho_max_dun = -5
            args.log_cat_init = 0
            template = MoleculeModelDUN(args)
            for layer in template.children():
                if isinstance(layer, BayesLinear):
                    layer.init_rho(args.rho_min_dun, args.rho_max_dun)
            for layer in template.encoder.encoder.children():
                if isinstance(layer, BayesLinear):
                    layer.init_rho(args.rho_min_dun, args.rho_max_dun)
            template.create_log_cat(args)
            model = load_checkpoint(args.checkpoint_path +
                                    f'/model_{model_idx}/model_dun.pt',
                                    device=args.device,
                                    logger=None,
                                    template=template)

        # make results_dir
        results_dir = os.path.join(args.results_dir, f'model_{model_idx}')
        makedirs(results_dir)

        # train_preds, train_targets
        train_preds = predict(model=model,
                              data_loader=train_data_loader,
                              args=args,
                              scaler=scaler,
                              test_data=False,
                              bbp_sample=False)
        train_preds = np.array(train_preds)
        train_targets = np.array(train_targets)

        # compute tstats
        tstats = np.ones((12, 3))
        for task in range(12):
            resid = train_preds[:, task] - train_targets[:, task]
            tstats[task] = np.array(stats.t.fit(resid, floc=0.0))

        ##################################
        ########## Inner loop over samples
        ##################################

        for sample_idx in range(args.samples):

            # save down
            np.savez(os.path.join(results_dir, f'tstats_{sample_idx}'), tstats)

            print('done one')
def run_meta_training(args: TrainArgs, logger: Logger = None) -> List[float]:
    """
    Trains a model and returns test scores on the model checkpoint with the highest validation score.

    :param args: Arguments.
    :param logger: Logger.
    :return: A list of ensemble scores for each task.
    """
    if logger is not None:
        debug, info = logger.debug, logger.info
    else:
        debug = info = print

    # Print command line
    debug('Command line')
    debug(f'python {" ".join(sys.argv)}')

    # Print args
    debug('Args')
    debug(args)

    # Save args
    args.save(os.path.join(args.save_dir, 'args.json'))

    # Set pytorch seed for random initial weights
    torch.manual_seed(args.pytorch_seed)

    # Get data
    debug('Loading data')
    args.task_names = args.target_columns or get_task_names(args.data_path)
    data = get_data(path=args.data_path, args=args, logger=logger)
    args.num_tasks = data.num_tasks()
    args.features_size = data.features_size()
    debug(f'Number of tasks = {args.num_tasks}')

    # Split data
    # debug(f'Splitting data with seed {args.seed}')
    # if args.separate_test_path:
    #     test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger)
    # if args.separate_val_path:
    #     val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger)

    # if args.separate_val_path and args.separate_test_path:
    #     train_data = data
    # elif args.separate_val_path:
    #     train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, args=args, logger=logger)
    # elif args.separate_test_path:
    #     train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger)
    # else:
    #     train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger)

    if args.dataset_type == 'classification':
        class_sizes = get_class_sizes(data)
        debug('Class sizes')
        for i, task_class_sizes in enumerate(class_sizes):
            debug(f'{args.task_names[i]} '
                  f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}')

    # if args.save_smiles_splits:
    #     save_smiles_splits(
    #         train_data=train_data,
    #         val_data=val_data,
    #         test_data=test_data,
    #         data_path=args.data_path,
    #         save_dir=args.save_dir
    #     )

    # If this happens, then need to move this logic into the task data loader
    # when it creates the datasets! 
    # if args.features_scaling:
    #     features_scaler = train_data.normalize_features(replace_nan_token=0)
    #     val_data.normalize_features(features_scaler)
    #     test_data.normalize_features(features_scaler)
    # else:
    #     features_scaler = None

    # args.train_data_size = len(train_data)
    
    # debug(f'Total size = {len(data):,} | '
    #       f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}')

    # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
    # if args.dataset_type == 'regression':
    #     debug('Fitting scaler')
    #     train_smiles, train_targets = train_data.smiles(), train_data.targets()
    #     scaler = StandardScaler().fit(train_targets)
    #     scaled_targets = scaler.transform(train_targets).tolist()
    #     train_data.set_targets(scaled_targets)
    # else:
    #     scaler = None

    # Get loss and metric functions
    loss_func = get_loss_func(args)
    metric_func = get_metric_func(metric=args.metric)

    # Set up test set evaluation
    # test_smiles, test_targets = test_data.smiles(), test_data.targets()
    # if args.dataset_type == 'multiclass':
    #     sum_test_preds = np.zeros((len(test_smiles), args.num_tasks, args.multiclass_num_classes))
    # else:
    #     sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))

    # Automatically determine whether to cache
    if len(data) <= args.cache_cutoff:
        cache = True
        num_workers = 0
    else:
        cache = False
        num_workers = args.num_workers

    # Set up MetaTaskDataLoaders, which takes care of task splits under the hood 
    # Set up task splits into T_tr, T_val, T_test

    assert args.chembl_assay_metadata_pickle_path is not None
    with open(args.chembl_assay_metadata_pickle_path +
            'chembl_128_assay_type_to_names.pickle', 'rb') as handle:
        chembl_128_assay_type_to_names = pickle.load(handle)
    with open(args.chembl_assay_metadata_pickle_path +
            'chembl_128_assay_name_to_type.pickle', 'rb') as handle:
        chembl_128_assay_name_to_type = pickle.load(handle)

    """ 
    Copy GSK implementation of task split 
    We have 5 Task types remaining
    ADME (A)
    Toxicity (T)
    Unassigned (U) 
    Binding (B)
    Functional (F)
    resulting in 902 tasks.

    For T_val, randomly select 10 B and F tasks
    For T_test, select another 10 B and F tasks and allocate all A, T, and U
    tasks to the test split.
    For T_train, allocate the remaining B and F tasks. 

    """
    import pdb; pdb.set_trace()
    T_val_num_BF_tasks = args.meta_split_sizes_BF[0]
    T_test_num_BF_tasks = args.meta_split_sizes_BF[1]
    T_val_idx = T_val_num_BF_tasks
    T_test_idx = T_val_num_BF_tasks + T_test_num_BF_tasks

    chembl_id_to_idx = {chembl_id: idx for idx, chembl_id in enumerate(args.task_names)}

    # Shuffle B and F tasks
    randomized_B_tasks = np.copy(chembl_128_assay_type_to_names['B'])
    np.random.shuffle(randomized_B_tasks)
    randomized_B_task_indices = [chembl_id_to_idx[assay] for assay in
            randomized_B_tasks]

    randomized_F_tasks = np.copy(chembl_128_assay_type_to_names['F'])
    np.random.shuffle(randomized_F_tasks)
    randomized_F_task_indices = [chembl_id_to_idx[assay] for assay in
            randomized_F_tasks]

    # Grab B and F indices for T_val
    T_val_B_task_indices = randomized_B_task_indices[:T_val_idx]
    T_val_F_task_indices = randomized_F_task_indices[:T_val_idx]

    # Grab B and F indices for T_test
    T_test_B_task_indices = randomized_B_task_indices[T_val_idx:T_test_idx]
    T_test_F_task_indices = randomized_F_task_indices[T_val_idx:T_test_idx]
    # Grab all A, T and U indices for T_test
    T_test_A_task_indices = [chembl_id_to_idx[assay] for assay in chembl_128_assay_type_to_names['A']]
    T_test_T_task_indices = [chembl_id_to_idx[assay] for assay in chembl_128_assay_type_to_names['T']]
    T_test_U_task_indices = [chembl_id_to_idx[assay] for assay in chembl_128_assay_type_to_names['U']]

    # Slot remaining BF tasks into T_tr
    T_tr_B_task_indices = randomized_B_task_indices[T_test_idx:]
    T_tr_F_task_indices = randomized_F_task_indices[T_test_idx:]

    T_tr = [0] * len(args.task_names)
    T_val = [0] * len(args.task_names)
    T_test = [0] * len(args.task_names)

    # Now make task bit vectors
    for idx_list in (T_tr_B_task_indices, T_tr_F_task_indices):
        for idx in idx_list:
            T_tr[idx] = 1

    for idx_list in (T_val_B_task_indices, T_val_F_task_indices):
        for idx in idx_list:
            T_val[idx] = 1

    for idx_list in (T_test_B_task_indices, T_test_F_task_indices, T_test_A_task_indices, T_test_T_task_indices, T_test_U_task_indices):
        for idx in idx_list:
            T_test[idx] = 1


    """
    Random task split for testing
    task_indices = list(range(len(args.task_names)))
    np.random.shuffle(task_indices)
    train_task_split, val_task_split, test_task_split = 0.9, 0, 0.1
    train_task_cutoff = int(len(task_indices) * train_task_split)
    train_task_idxs, test_task_idxs = [0] * len(task_indices), [0] * len(task_indices)
    for idx in task_indices[:train_task_cutoff]:
        train_task_idxs[idx] = 1
    for idx in task_indices[train_task_cutoff:]:
        test_task_idxs[idx] = 1
    """

    train_meta_task_data_loader = MetaTaskDataLoader(
            dataset=data,
            tasks=T_tr,
            sizes=args.meta_train_split_sizes,
            args=args,
            logger=logger)

    val_meta_task_data_loader = MetaTaskDataLoader(
            dataset=data,
            tasks=T_val,
            sizes=args.meta_test_split_sizes,
            args=args,
            logger=logger)

    test_meta_task_data_loader = MetaTaskDataLoader(
            dataset=data,
            tasks=T_test,
            sizes=args.meta_test_split_sizes,
            args=args,
            logger=logger)

    import pdb; pdb.set_trace()
    for meta_train_batch in train_meta_task_data_loader.tasks():
        for train_task in meta_train_batch:
            print('In inner loop')
            continue

    # Train ensemble of models
    for model_idx in range(args.ensemble_size):
        # Tensorboard writer
        save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
        makedirs(save_dir)
        try:
            writer = SummaryWriter(log_dir=save_dir)
        except:
            writer = SummaryWriter(logdir=save_dir)

        # Load/build model
        if args.checkpoint_paths is not None:
            debug(f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}')
            model = load_checkpoint(args.checkpoint_paths[model_idx], logger=logger)
        else:
            debug(f'Building model {model_idx}')
            model = MoleculeModel(args)

        debug(model)
        debug(f'Number of parameters = {param_count(model):,}')
        if args.cuda:
            debug('Moving model to cuda')
        model = model.to(args.device)

        # Ensure that model is saved in correct location for evaluation if 0 epochs
        save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)

        # Optimizers
        optimizer = build_optimizer(model, args)

        # Learning rate schedulers
        scheduler = build_lr_scheduler(optimizer, args)

        # Run training
        best_score = float('inf') if args.minimize_score else -float('inf')
        best_epoch, n_iter = 0, 0
        for epoch in trange(args.epochs):
            debug(f'Epoch {epoch}')

            n_iter = train(
                model=model,
                data_loader=train_data_loader,
                loss_func=loss_func,
                optimizer=optimizer,
                scheduler=scheduler,
                args=args,
                n_iter=n_iter,
                logger=logger,
                writer=writer
            )
            if isinstance(scheduler, ExponentialLR):
                scheduler.step()
            val_scores = evaluate(
                model=model,
                data_loader=val_data_loader,
                num_tasks=args.num_tasks,
                metric_func=metric_func,
                dataset_type=args.dataset_type,
                scaler=scaler,
                logger=logger
            )

            # Average validation score
            avg_val_score = np.nanmean(val_scores)
            debug(f'Validation {args.metric} = {avg_val_score:.6f}')
            writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter)

            if args.show_individual_scores:
                # Individual validation scores
                for task_name, val_score in zip(args.task_names, val_scores):
                    debug(f'Validation {task_name} {args.metric} = {val_score:.6f}')
                    writer.add_scalar(f'validation_{task_name}_{args.metric}', val_score, n_iter)

            # Save model checkpoint if improved validation score
            if args.minimize_score and avg_val_score < best_score or \
                    not args.minimize_score and avg_val_score > best_score:
                best_score, best_epoch = avg_val_score, epoch
                save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)        

        # Evaluate on test set using model with best validation score
        info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')
        model = load_checkpoint(os.path.join(save_dir, 'model.pt'), device=args.device, logger=logger)
        
        test_preds = predict(
            model=model,
            data_loader=test_data_loader,
            scaler=scaler
        )
        test_scores = evaluate_predictions(
            preds=test_preds,
            targets=test_targets,
            num_tasks=args.num_tasks,
            metric_func=metric_func,
            dataset_type=args.dataset_type,
            logger=logger
        )

        if len(test_preds) != 0:
            sum_test_preds += np.array(test_preds)

        # Average test score
        avg_test_score = np.nanmean(test_scores)
        info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')
        writer.add_scalar(f'test_{args.metric}', avg_test_score, 0)

        if args.show_individual_scores:
            # Individual test scores
            for task_name, test_score in zip(args.task_names, test_scores):
                info(f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}')
                writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter)
        writer.close()

    # Evaluate ensemble on test set
    avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()

    ensemble_scores = evaluate_predictions(
        preds=avg_test_preds,
        targets=test_targets,
        num_tasks=args.num_tasks,
        metric_func=metric_func,
        dataset_type=args.dataset_type,
        logger=logger
    )

    # Average ensemble score
    avg_ensemble_test_score = np.nanmean(ensemble_scores)
    info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')

    # Individual ensemble scores
    if args.show_individual_scores:
        for task_name, ensemble_score in zip(args.task_names, ensemble_scores):
            info(f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}')

    return ensemble_scores
def save_test_data(args: TrainArgs, logger: Logger = None) -> List[float]:
    """
    Trains a model and returns test scores on the model checkpoint with the highest validation score.

    :param args: Arguments.
    :param logger: Logger.
    :return: A list of ensemble scores for each task.
    """

    debug = info = print

    # Print command line and args
    debug('Command line')
    debug(f'python {" ".join(sys.argv)}')
    debug('Args')
    debug(args)

    # Get data
    debug('Loading data')
    args.task_names = args.target_columns or get_task_names(args.data_path)
    data = get_data(path=args.data_path, args=args, logger=logger)
    args.num_tasks = data.num_tasks()
    args.features_size = data.features_size()
    debug(f'Number of tasks = {args.num_tasks}')

    # Split data
    debug(f'Splitting data with seed {args.seed}')
    train_data, val_data, test_data = split_data(data=data,
                                                 split_type=args.split_type,
                                                 sizes=args.split_sizes,
                                                 seed=args.seed,
                                                 args=args,
                                                 logger=logger)

    if args.features_scaling:
        features_scaler = train_data.normalize_features(replace_nan_token=0)
        val_data.normalize_features(features_scaler)
        test_data.normalize_features(features_scaler)
    else:
        features_scaler = None

    args.train_data_size = len(train_data)

    debug(
        f'Total size = {len(data):,} | '
        f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}'
    )

    # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
    if args.dataset_type == 'regression':
        debug('Fitting scaler')
        train_smiles, train_targets = train_data.smiles(), train_data.targets()
        scaler = StandardScaler().fit(train_targets)
        scaled_targets = scaler.transform(train_targets).tolist()
        train_data.set_targets(scaled_targets)
    else:
        scaler = None

    # Get loss and metric functions
    loss_func = neg_log_like
    metric_func = get_metric_func(metric=args.metric)

    # Set up test set evaluation
    test_smiles, test_targets = test_data.smiles(), test_data.targets()
    sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))

    # save down test targets
    np.savez('/home/willlamb/results/test_targets', np.array(test_targets))

    return None
Exemple #16
0
def run_training(args: Namespace, logger: Logger = None) -> List[float]:
    """
    Trains a model and returns test scores on the model checkpoint with the highest validation score.

    :param args: Arguments.
    :param logger: Logger.
    :return: A list of ensemble scores for each task.
    """
    if logger is not None:
        debug, info = logger.debug, logger.info
    else:
        debug = info = print

    # Set GPU
    if args.gpu is not None:
        torch.cuda.set_device(args.gpu)

    # Print args
    debug(pformat(vars(args)))

    # Get data
    debug('Loading data')
    args.task_names = get_task_names(args.data_path)
    desired_labels = get_desired_labels(args, args.task_names)
    data = get_data(path=args.data_path, args=args, logger=logger)
    args.num_tasks = data.num_tasks()
    args.features_size = data.features_size()
    args.real_num_tasks = args.num_tasks - args.features_size if args.predict_features else args.num_tasks
    debug(f'Number of tasks = {args.num_tasks}')

    if args.dataset_type == 'bert_pretraining':
        data.bert_init(args, logger)

    # Split data
    if args.dataset_type == 'regression_with_binning':  # Note: for now, binning based on whole dataset, not just training set
        data, bin_predictions, regression_data = data
        args.bin_predictions = bin_predictions
        debug(f'Splitting data with seed {args.seed}')
        train_data, _, _ = split_data(data=data,
                                      split_type=args.split_type,
                                      sizes=args.split_sizes,
                                      seed=args.seed,
                                      args=args,
                                      logger=logger)
        _, val_data, test_data = split_data(regression_data,
                                            split_type=args.split_type,
                                            sizes=args.split_sizes,
                                            seed=args.seed,
                                            args=args,
                                            logger=logger)
    else:
        debug(f'Splitting data with seed {args.seed}')
        if args.separate_test_set:
            test_data = get_data(path=args.separate_test_set,
                                 args=args,
                                 features_path=args.separate_test_set_features,
                                 logger=logger)
            if args.separate_val_set:
                val_data = get_data(
                    path=args.separate_val_set,
                    args=args,
                    features_path=args.separate_val_set_features,
                    logger=logger)
                train_data = data  # nothing to split; we already got our test and val sets
            else:
                train_data, val_data, _ = split_data(
                    data=data,
                    split_type=args.split_type,
                    sizes=(0.8, 0.2, 0.0),
                    seed=args.seed,
                    args=args,
                    logger=logger)
        else:
            train_data, val_data, test_data = split_data(
                data=data,
                split_type=args.split_type,
                sizes=args.split_sizes,
                seed=args.seed,
                args=args,
                logger=logger)

    # Optionally replace test data with train or val data
    if args.test_split == 'train':
        test_data = train_data
    elif args.test_split == 'val':
        test_data = val_data

    if args.dataset_type == 'classification':
        class_sizes = get_class_sizes(data)
        debug('Class sizes')
        for i, task_class_sizes in enumerate(class_sizes):
            debug(
                f'{args.task_names[i]} '
                f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}'
            )

        if args.class_balance:
            train_class_sizes = get_class_sizes(train_data)
            class_batch_counts = torch.Tensor(
                train_class_sizes) * args.batch_size
            args.class_weights = 1 / torch.Tensor(class_batch_counts)

    if args.save_smiles_splits:
        with open(args.data_path, 'r') as f:
            reader = csv.reader(f)
            header = next(reader)

            lines_by_smiles = {}
            indices_by_smiles = {}
            for i, line in enumerate(reader):
                smiles = line[0]
                lines_by_smiles[smiles] = line
                indices_by_smiles[smiles] = i

        all_split_indices = []
        for dataset, name in [(train_data, 'train'), (val_data, 'val'),
                              (test_data, 'test')]:
            with open(os.path.join(args.save_dir, name + '_smiles.csv'),
                      'w') as f:
                writer = csv.writer(f)
                writer.writerow(['smiles'])
                for smiles in dataset.smiles():
                    writer.writerow([smiles])
            with open(os.path.join(args.save_dir, name + '_full.csv'),
                      'w') as f:
                writer = csv.writer(f)
                writer.writerow(header)
                for smiles in dataset.smiles():
                    writer.writerow(lines_by_smiles[smiles])
            split_indices = []
            for smiles in dataset.smiles():
                split_indices.append(indices_by_smiles[smiles])
                split_indices = sorted(split_indices)
            all_split_indices.append(split_indices)
        with open(os.path.join(args.save_dir, 'split_indices.pckl'),
                  'wb') as f:
            pickle.dump(all_split_indices, f)
        return [1 for _ in range(args.num_tasks)
                ]  # short circuit out when just generating splits

    if args.features_scaling:
        features_scaler = train_data.normalize_features(
            replace_nan_token=None if args.predict_features else 0)
        val_data.normalize_features(features_scaler)
        test_data.normalize_features(features_scaler)
    else:
        features_scaler = None

    args.train_data_size = len(
        train_data
    ) if args.prespecified_chunk_dir is None else args.prespecified_chunks_max_examples_per_epoch

    if args.adversarial or args.moe:
        val_smiles, test_smiles = val_data.smiles(), test_data.smiles()

    debug(
        f'Total size = {len(data):,} | '
        f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}'
    )

    # Optionally truncate outlier values
    if args.truncate_outliers:
        print('Truncating outliers in train set')
        train_data = truncate_outliers(train_data)

    # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
    if args.dataset_type == 'regression' and args.target_scaling:
        debug('Fitting scaler')
        train_smiles, train_targets = train_data.smiles(), train_data.targets()
        scaler = StandardScaler().fit(train_targets)
        scaled_targets = scaler.transform(train_targets).tolist()
        train_data.set_targets(scaled_targets)
    else:
        scaler = None

    if args.moe:
        train_data = cluster_split(train_data,
                                   args.num_sources,
                                   args.cluster_max_ratio,
                                   seed=args.cluster_split_seed,
                                   logger=logger)

    # Chunk training data if too large to load in memory all at once
    if args.num_chunks > 1:
        os.makedirs(args.chunk_temp_dir, exist_ok=True)
        train_paths = []
        if args.moe:
            chunked_sources = [td.chunk(args.num_chunks) for td in train_data]
            chunks = []
            for i in range(args.num_chunks):
                chunks.append([source[i] for source in chunked_sources])
        else:
            chunks = train_data.chunk(args.num_chunks)
        for i in range(args.num_chunks):
            chunk_path = os.path.join(args.chunk_temp_dir, str(i) + '.txt')
            memo_path = os.path.join(args.chunk_temp_dir,
                                     'memo' + str(i) + '.txt')
            with open(chunk_path, 'wb') as f:
                pickle.dump(chunks[i], f)
            train_paths.append((chunk_path, memo_path))
        train_data = train_paths

    # Get loss and metric functions
    loss_func = get_loss_func(args)
    metric_func = get_metric_func(metric=args.metric, args=args)

    # Set up test set evaluation
    test_smiles, test_targets = test_data.smiles(), test_data.targets()
    if args.maml:  # TODO refactor
        test_targets = []
        for task_idx in range(len(data.data[0].targets)):
            _, task_test_data, _ = test_data.sample_maml_task(args, seed=0)
            test_targets += task_test_data.targets()

    if args.dataset_type == 'bert_pretraining':
        sum_test_preds = {
            'features':
            np.zeros((len(test_smiles), args.features_size))
            if args.features_size is not None else None,
            'vocab':
            np.zeros((len(test_targets['vocab']), args.vocab.output_size))
        }
    elif args.dataset_type == 'kernel':
        sum_test_preds = np.zeros((len(test_targets), args.num_tasks))
    else:
        sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))

    if args.maml:
        sum_test_preds = None  # annoying to determine exact size; will initialize later

    if args.dataset_type == 'bert_pretraining':
        # Only predict targets that are masked out
        test_targets['vocab'] = [
            target if mask == 0 else None
            for target, mask in zip(test_targets['vocab'], test_data.mask())
        ]

    # Train ensemble of models
    for model_idx in range(args.ensemble_size):
        # Tensorboard writer
        save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
        os.makedirs(save_dir, exist_ok=True)
        writer = SummaryWriter(log_dir=save_dir)

        # Load/build model
        if args.checkpoint_paths is not None:
            debug(
                f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}'
            )
            model = load_checkpoint(args.checkpoint_paths[model_idx],
                                    current_args=args,
                                    logger=logger)
        else:
            debug(f'Building model {model_idx}')
            model = build_model(args)

        debug(model)
        debug(f'Number of parameters = {param_count(model):,}')
        if args.cuda:
            debug('Moving model to cuda')
            model = model.cuda()

        # Ensure that model is saved in correct location for evaluation if 0 epochs
        save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler,
                        features_scaler, args)

        if args.adjust_weight_decay:
            args.pnorm_target = compute_pnorm(model)

        # Optimizers
        optimizer = build_optimizer(model, args)

        # Learning rate schedulers
        scheduler = build_lr_scheduler(optimizer, args)

        # Run training
        best_score = float('inf') if args.minimize_score else -float('inf')
        best_epoch, n_iter = 0, 0
        for epoch in trange(args.epochs):
            debug(f'Epoch {epoch}')

            if args.prespecified_chunk_dir is not None:
                # load some different random chunks each epoch
                train_data, val_data = load_prespecified_chunks(args, logger)
                debug('Loaded prespecified chunks for epoch')

            if args.dataset_type == 'unsupervised':  # won't work with moe
                full_data = MoleculeDataset(train_data.data + val_data.data)
                generate_unsupervised_cluster_labels(
                    build_model(args), full_data,
                    args)  # cluster with a new random init
                model.create_ffn(
                    args
                )  # reset the ffn since we're changing targets-- we're just pretraining the encoder.
                optimizer.param_groups.pop()  # remove ffn parameters
                optimizer.add_param_group({
                    'params': model.ffn.parameters(),
                    'lr': args.init_lr[1],
                    'weight_decay': args.weight_decay[1]
                })
                if args.cuda:
                    model.ffn.cuda()

            if args.gradual_unfreezing:
                if epoch % args.epochs_per_unfreeze == 0:
                    unfroze_layer = model.unfreeze_next(
                    )  # consider just stopping early after we have nothing left to unfreeze?
                    if unfroze_layer:
                        debug('Unfroze last frozen layer')

            n_iter = train(model=model,
                           data=train_data,
                           loss_func=loss_func,
                           optimizer=optimizer,
                           scheduler=scheduler,
                           args=args,
                           n_iter=n_iter,
                           logger=logger,
                           writer=writer,
                           chunk_names=(args.num_chunks > 1),
                           val_smiles=val_smiles if args.adversarial else None,
                           test_smiles=test_smiles
                           if args.adversarial or args.moe else None)
            if isinstance(scheduler, ExponentialLR):
                scheduler.step()
            val_scores = evaluate(model=model,
                                  data=val_data,
                                  metric_func=metric_func,
                                  args=args,
                                  scaler=scaler,
                                  logger=logger)

            if args.dataset_type == 'bert_pretraining':
                if val_scores['features'] is not None:
                    debug(
                        f'Validation features rmse = {val_scores["features"]:.6f}'
                    )
                    writer.add_scalar('validation_features_rmse',
                                      val_scores['features'], n_iter)
                val_scores = [val_scores['vocab']]

            # Average validation score
            avg_val_score = np.nanmean(val_scores)
            debug(f'Validation {args.metric} = {avg_val_score:.6f}')
            writer.add_scalar(f'validation_{args.metric}', avg_val_score,
                              n_iter)

            if args.show_individual_scores:
                # Individual validation scores
                for task_name, val_score in zip(args.task_names, val_scores):
                    if task_name in desired_labels:
                        debug(
                            f'Validation {task_name} {args.metric} = {val_score:.6f}'
                        )
                        writer.add_scalar(
                            f'validation_{task_name}_{args.metric}', val_score,
                            n_iter)

            # Save model checkpoint if improved validation score, or always save it if unsupervised
            if args.minimize_score and avg_val_score < best_score or \
                    not args.minimize_score and avg_val_score > best_score or \
                    args.dataset_type == 'unsupervised':
                best_score, best_epoch = avg_val_score, epoch
                save_checkpoint(os.path.join(save_dir, 'model.pt'), model,
                                scaler, features_scaler, args)

        if args.dataset_type == 'unsupervised':
            return [0]  # rest of this is meaningless when unsupervised

        # Evaluate on test set using model with best validation score
        info(
            f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}'
        )
        model = load_checkpoint(os.path.join(save_dir, 'model.pt'),
                                cuda=args.cuda,
                                logger=logger)

        if args.split_test_by_overlap_dataset is not None:
            overlap_data = get_data(path=args.split_test_by_overlap_dataset,
                                    logger=logger)
            overlap_smiles = set(overlap_data.smiles())
            test_data_intersect, test_data_nonintersect = [], []
            for d in test_data.data:
                if d.smiles in overlap_smiles:
                    test_data_intersect.append(d)
                else:
                    test_data_nonintersect.append(d)
            test_data_intersect, test_data_nonintersect = MoleculeDataset(
                test_data_intersect), MoleculeDataset(test_data_nonintersect)
            for name, td in [('Intersect', test_data_intersect),
                             ('Nonintersect', test_data_nonintersect)]:
                test_preds = predict(model=model,
                                     data=td,
                                     args=args,
                                     scaler=scaler,
                                     logger=logger)
                test_scores = evaluate_predictions(
                    preds=test_preds,
                    targets=td.targets(),
                    metric_func=metric_func,
                    dataset_type=args.dataset_type,
                    args=args,
                    logger=logger)
                avg_test_score = np.nanmean(test_scores)
                info(
                    f'Model {model_idx} test {args.metric} for {name} = {avg_test_score:.6f}'
                )

        if len(
                test_data
        ) == 0:  # just get some garbage results without crashing; in this case we didn't care anyway
            test_preds, test_scores = sum_test_preds, [
                0 for _ in range(len(args.task_names))
            ]
        else:
            test_preds = predict(model=model,
                                 data=test_data,
                                 args=args,
                                 scaler=scaler,
                                 logger=logger)
            test_scores = evaluate_predictions(preds=test_preds,
                                               targets=test_targets,
                                               metric_func=metric_func,
                                               dataset_type=args.dataset_type,
                                               args=args,
                                               logger=logger)

        if args.maml:
            if sum_test_preds is None:
                sum_test_preds = np.zeros(np.array(test_preds).shape)

        if args.dataset_type == 'bert_pretraining':
            if test_preds['features'] is not None:
                sum_test_preds['features'] += np.array(test_preds['features'])
            sum_test_preds['vocab'] += np.array(test_preds['vocab'])
        else:
            sum_test_preds += np.array(test_preds)

        if args.dataset_type == 'bert_pretraining':
            if test_preds['features'] is not None:
                debug(
                    f'Model {model_idx} test features rmse = {test_scores["features"]:.6f}'
                )
                writer.add_scalar('test_features_rmse',
                                  test_scores['features'], 0)
            test_scores = [test_scores['vocab']]

        # Average test score
        avg_test_score = np.nanmean(test_scores)
        info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')
        writer.add_scalar(f'test_{args.metric}', avg_test_score, 0)

        if args.show_individual_scores:
            # Individual test scores
            for task_name, test_score in zip(args.task_names, test_scores):
                if task_name in desired_labels:
                    info(
                        f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}'
                    )
                    writer.add_scalar(f'test_{task_name}_{args.metric}',
                                      test_score, n_iter)

    # Evaluate ensemble on test set
    if args.dataset_type == 'bert_pretraining':
        avg_test_preds = {
            'features':
            (sum_test_preds['features'] / args.ensemble_size).tolist()
            if sum_test_preds['features'] is not None else None,
            'vocab': (sum_test_preds['vocab'] / args.ensemble_size).tolist()
        }
    else:
        avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()

    if len(test_data
           ) == 0:  # just return some garbage when we didn't want test data
        ensemble_scores = test_scores
    else:
        ensemble_scores = evaluate_predictions(preds=avg_test_preds,
                                               targets=test_targets,
                                               metric_func=metric_func,
                                               dataset_type=args.dataset_type,
                                               args=args,
                                               logger=logger)

    # Average ensemble score
    if args.dataset_type == 'bert_pretraining':
        if ensemble_scores['features'] is not None:
            info(
                f'Ensemble test features rmse = {ensemble_scores["features"]:.6f}'
            )
            writer.add_scalar('ensemble_test_features_rmse',
                              ensemble_scores['features'], 0)
        ensemble_scores = [ensemble_scores['vocab']]

    avg_ensemble_test_score = np.nanmean(ensemble_scores)
    info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')
    writer.add_scalar(f'ensemble_test_{args.metric}', avg_ensemble_test_score,
                      0)

    # Individual ensemble scores
    if args.show_individual_scores:
        for task_name, ensemble_score in zip(args.task_names, ensemble_scores):
            info(
                f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}'
            )

    return ensemble_scores
Exemple #17
0
def cross_validate(args: TrainArgs,
                   logger: Logger = None) -> Tuple[float, float]:
    """k-fold cross validation"""
    info = logger.info if logger is not None else print

    # Initialize relevant variables
    init_seed = args.seed
    save_dir = args.save_dir
    task_names = args.target_columns or get_task_names(args.data_path)

    # Run training on different random seeds for each fold
    all_scores = []
    if args.seeds and (len(args.seeds) != args.num_folds):
        raise ValueError(
            "Num seeds provided must match the number of folds required")

    for fold_num in range(args.num_folds):
        info(f'Fold {fold_num}')
        if args.seeds:
            args.seed = args.seeds[fold_num]
        else:
            args.seed = init_seed + fold_num
        args.save_dir = os.path.join(save_dir, f'fold_{fold_num}')
        makedirs(args.save_dir)
        model_scores = run_training(args, logger)
        all_scores.append(model_scores)
    all_scores = np.array(all_scores)

    # Report results
    info(f'{args.num_folds}-fold cross validation')

    # Report scores for each fold
    for fold_num, scores in enumerate(all_scores):
        info(
            f'Seed {init_seed + fold_num} ==> test {args.metric} = {np.nanmean(scores):.6f}'
        )

        if args.show_individual_scores:
            for task_name, score in zip(task_names, scores):
                info(
                    f'Seed {init_seed + fold_num} ==> test {task_name} {args.metric} = {score:.6f}'
                )

    # Report scores across models
    avg_scores = np.nanmean(
        all_scores, axis=1)  # average score for each model across tasks
    mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)
    info(f'Overall test {args.metric} = {mean_score:.6f} +/- {std_score:.6f}')
    wandb.log({
        'test_{}_mean'.format(args.metric): mean_score,
        'test_{}_std'.format(args.metric): std_score
    })

    # save results
    save_results(all_scores, [], task_names, args)

    if args.show_individual_scores:
        for task_num, task_name in enumerate(task_names):
            info(
                f'Overall test {task_name} {args.metric} = '
                f'{np.nanmean(all_scores[:, task_num]):.6f} +/- {np.nanstd(all_scores[:, task_num]):.6f}'
            )

    return mean_score, std_score
Exemple #18
0
def run_training(args: TrainArgs, logger: Logger = None) -> List[float]:
    """
    Trains a model and returns test scores on the model checkpoint with the highest validation score.

    :param args: Arguments.
    :param logger: Logger.
    :return: A list of ensemble scores for each task.
    """

    debug = info = print

    # Print command line and args
    debug('Command line')
    debug(f'python {" ".join(sys.argv)}')
    debug('Args')
    debug(args)

    # Save args
    args.save(os.path.join(args.save_dir, 'args.json'))

    # Get data
    debug('Loading data')
    args.task_names = args.target_columns or get_task_names(args.data_path)
    data = get_data(path=args.data_path, args=args, logger=logger)
    args.num_tasks = data.num_tasks()
    args.features_size = data.features_size()
    debug(f'Number of tasks = {args.num_tasks}')

    # Split data
    debug(f'Splitting data with seed {args.seed}')
    train_data, val_data, test_data = split_data(data=data,
                                                 split_type=args.split_type,
                                                 sizes=args.split_sizes,
                                                 seed=args.seed,
                                                 args=args,
                                                 logger=logger)

    if args.features_scaling:
        features_scaler = train_data.normalize_features(replace_nan_token=0)
        val_data.normalize_features(features_scaler)
        test_data.normalize_features(features_scaler)
    else:
        features_scaler = None

    args.train_data_size = len(train_data)

    debug(
        f'Total size = {len(data):,} | '
        f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}'
    )

    # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
    if args.dataset_type == 'regression':
        debug('Fitting scaler')
        train_smiles, train_targets = train_data.smiles(), train_data.targets()
        scaler = StandardScaler().fit(train_targets)
        scaled_targets = scaler.transform(train_targets).tolist()
        train_data.set_targets(scaled_targets)
    else:
        scaler = None

    # Get loss and metric functions
    loss_func = neg_log_like
    metric_func = get_metric_func(metric=args.metric)

    # Set up test set evaluation
    test_smiles, test_targets = test_data.smiles(), test_data.targets()
    sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))

    # Automatically determine whether to cache
    if len(data) <= args.cache_cutoff:
        cache = True
        num_workers = 0
    else:
        cache = False
        num_workers = args.num_workers

    # Create data loaders
    train_data_loader = MoleculeDataLoader(dataset=train_data,
                                           batch_size=args.batch_size,
                                           num_workers=num_workers,
                                           cache=cache,
                                           class_balance=args.class_balance,
                                           shuffle=True,
                                           seed=args.seed)
    val_data_loader = MoleculeDataLoader(dataset=val_data,
                                         batch_size=args.batch_size,
                                         num_workers=num_workers,
                                         cache=cache)
    test_data_loader = MoleculeDataLoader(dataset=test_data,
                                          batch_size=args.batch_size,
                                          num_workers=num_workers,
                                          cache=cache)

    ###########################################
    ########## Outer loop over ensemble members
    ###########################################

    for model_idx in range(args.ensemble_start_idx,
                           args.ensemble_start_idx + args.ensemble_size):

        # Set pytorch seed for random initial weights
        torch.manual_seed(args.pytorch_seeds[model_idx])

        ######## set up all logging ########
        # make save_dir
        save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
        makedirs(save_dir)

        # make results_dir
        results_dir = os.path.join(args.results_dir, f'model_{model_idx}')
        makedirs(results_dir)

        # initialise wandb
        os.environ['WANDB_MODE'] = 'dryrun'
        wandb.init(name=args.wandb_name + '_' + str(model_idx),
                   project=args.wandb_proj,
                   reinit=True)
        print('WANDB directory is:')
        print(wandb.run.dir)
        ####################################

        # Load/build model
        if args.checkpoint_path is not None:
            debug(f'Loading model {model_idx} from {args.checkpoint_path}')
            model = load_checkpoint(args.checkpoint_path +
                                    f'/model_{model_idx}/model.pt',
                                    device=args.device,
                                    logger=logger)
        else:
            debug(f'Building model {model_idx}')
            model = MoleculeModel(args)

        debug(model)
        debug(f'Number of parameters = {param_count(model):,}')
        if args.cuda:
            debug('Moving model to cuda')
        model = model.to(args.device)

        # Ensure that model is saved in correct location for evaluation if 0 epochs
        save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler,
                        features_scaler, args)

        # Optimizer
        optimizer = Adam([{
            'params': model.encoder.parameters()
        }, {
            'params': model.ffn.parameters()
        }, {
            'params': model.log_noise,
            'weight_decay': 0
        }],
                         lr=args.init_lr,
                         weight_decay=args.weight_decay)

        # Learning rate scheduler
        scheduler = build_lr_scheduler(optimizer, args)

        # Run training
        best_score = float('inf') if args.minimize_score else -float('inf')
        best_epoch, n_iter = 0, 0
        for epoch in range(args.epochs):
            debug(f'Epoch {epoch}')

            n_iter = train(model=model,
                           data_loader=train_data_loader,
                           loss_func=loss_func,
                           optimizer=optimizer,
                           scheduler=scheduler,
                           args=args,
                           n_iter=n_iter,
                           logger=logger)
            val_scores = evaluate(model=model,
                                  data_loader=val_data_loader,
                                  args=args,
                                  num_tasks=args.num_tasks,
                                  metric_func=metric_func,
                                  dataset_type=args.dataset_type,
                                  scaler=scaler,
                                  logger=logger)

            # Average validation score
            avg_val_score = np.nanmean(val_scores)
            debug(f'Validation {args.metric} = {avg_val_score:.6f}')
            wandb.log({"Validation MAE": avg_val_score})

            # Save model checkpoint if improved validation score
            if args.minimize_score and avg_val_score < best_score or \
                    not args.minimize_score and avg_val_score > best_score:
                best_score, best_epoch = avg_val_score, epoch
                save_checkpoint(os.path.join(save_dir, 'model.pt'), model,
                                scaler, features_scaler, args)

            if epoch == args.noam_epochs - 1:
                optimizer = Adam([{
                    'params': model.encoder.parameters()
                }, {
                    'params': model.ffn.parameters()
                }, {
                    'params': model.log_noise,
                    'weight_decay': 0
                }],
                                 lr=args.final_lr,
                                 weight_decay=args.weight_decay)

                scheduler = scheduler_const([args.final_lr])

        # load model with best validation score
        info(
            f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}'
        )
        model = load_checkpoint(os.path.join(save_dir, 'model.pt'),
                                device=args.device,
                                logger=logger)

        # SWAG training loop, returns swag_model
        if args.swag:
            model = train_swag(model, train_data, val_data, num_workers, cache,
                               loss_func, metric_func, scaler, features_scaler,
                               args, save_dir)

        # SGLD loop, which saves nets
        if args.sgld:
            model = train_sgld(model, train_data, val_data, num_workers, cache,
                               loss_func, metric_func, scaler, features_scaler,
                               args, save_dir)

        # GP loop
        if args.gp:
            model, likelihood = train_gp(model, train_data, val_data,
                                         num_workers, cache, metric_func,
                                         scaler, features_scaler, args,
                                         save_dir)

        # BBP
        if args.bbp:
            model = train_bbp(model, train_data, val_data, num_workers, cache,
                              loss_func, metric_func, scaler, features_scaler,
                              args, save_dir)

        # DUN
        if args.dun:
            model = train_dun(model, train_data, val_data, num_workers, cache,
                              loss_func, metric_func, scaler, features_scaler,
                              args, save_dir)

        ##################################
        ########## Inner loop over samples
        ##################################

        for sample_idx in range(args.samples):

            # draw model from SWAG posterior
            if args.swag:
                model.sample(scale=1.0, cov=args.cov_mat, block=args.block)

            # draw model from collected SGLD models
            if args.sgld:
                model = load_checkpoint(os.path.join(save_dir,
                                                     f'model_{sample_idx}.pt'),
                                        device=args.device,
                                        logger=logger)

            # make predictions
            test_preds = predict(model=model,
                                 data_loader=test_data_loader,
                                 args=args,
                                 scaler=scaler,
                                 test_data=True,
                                 bbp_sample=True)

            #######################################################################
            #######################################################################
            #####        SAVING STUFF DOWN

            if args.gp:

                # get test_preds_std (scaled back to original data)
                test_preds_std = predict_std_gp(model=model,
                                                data_loader=test_data_loader,
                                                args=args,
                                                scaler=scaler,
                                                likelihood=likelihood)

                # 1 - MEANS
                np.savez(os.path.join(results_dir, f'preds_{sample_idx}'),
                         np.array(test_preds))

                # 2 - STD, combined aleatoric and epistemic (we save down the stds, always)
                np.savez(os.path.join(results_dir, f'predsSTDEV_{sample_idx}'),
                         np.array(test_preds_std))

            else:

                # save test_preds and aleatoric uncertainties
                if args.dun:
                    log_cat = model.log_cat.detach().cpu().numpy()
                    cat = np.exp(log_cat) / np.sum(np.exp(log_cat))
                    np.savez(os.path.join(results_dir, f'cat_{sample_idx}'),
                             cat)

                    # samples from categorical dist and saves a depth MC sample
                    depth_sample = np.random.multinomial(1,
                                                         cat).nonzero()[0][0]
                    test_preds_MCdepth = predict_MCdepth(
                        model=model,
                        data_loader=test_data_loader,
                        args=args,
                        scaler=scaler,
                        d=depth_sample)
                    np.savez(
                        os.path.join(results_dir,
                                     f'predsMCDEPTH_{sample_idx}'),
                        np.array(test_preds_MCdepth))

                if args.swag:
                    log_noise = model.base.log_noise
                else:
                    log_noise = model.log_noise
                noise = np.exp(log_noise.detach().cpu().numpy()) * np.array(
                    scaler.stds)
                np.savez(os.path.join(results_dir, f'preds_{sample_idx}'),
                         np.array(test_preds))
                np.savez(os.path.join(results_dir, f'noise_{sample_idx}'),
                         noise)

            #######################################################################
            #######################################################################

            # add predictions to sum_test_preds
            if len(test_preds) != 0:
                sum_test_preds += np.array(test_preds)

            # evaluate predictions using metric function
            test_scores = evaluate_predictions(preds=test_preds,
                                               targets=test_targets,
                                               num_tasks=args.num_tasks,
                                               metric_func=metric_func,
                                               dataset_type=args.dataset_type,
                                               logger=logger)

            # compute average test score
            avg_test_score = np.nanmean(test_scores)
            info(
                f'Model {model_idx}, sample {sample_idx} test {args.metric} = {avg_test_score:.6f}'
            )

    #################################
    ########## Bayesian Model Average
    #################################
    # note: this is an average over Bayesian samples AND components in an ensemble

    # compute number of prediction iterations
    pred_iterations = args.ensemble_size * args.samples

    # average predictions across iterations
    avg_test_preds = (sum_test_preds / pred_iterations).tolist()

    # evaluate
    BMA_scores = evaluate_predictions(preds=avg_test_preds,
                                      targets=test_targets,
                                      num_tasks=args.num_tasks,
                                      metric_func=metric_func,
                                      dataset_type=args.dataset_type,
                                      logger=logger)

    # average scores across tasks
    avg_BMA_test_score = np.nanmean(BMA_scores)
    info(f'BMA test {args.metric} = {avg_BMA_test_score:.6f}')

    return BMA_scores
def pdts(args: TrainArgs, model_idx):
    """
    preliminary experiment with PDTS (approximate BO)
    we use a data set size of 50k and run until we have trained with 15k data points
    our batch size is 50
    we initialise with 1000 data points
    """

    ######## set up all logging ########
    logger = None

    # make save_dir
    save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
    makedirs(save_dir)

    # make results_dir
    results_dir = args.results_dir
    makedirs(results_dir)

    # initialise wandb
    #os.environ['WANDB_MODE'] = 'dryrun'
    wandb.init(name=args.wandb_name + '_' + str(model_idx),
               project=args.wandb_proj,
               reinit=True)
    #print('WANDB directory is:')
    #print(wandb.run.dir)
    ####################################

    ########## get data
    args.task_names = args.target_columns or get_task_names(args.data_path)
    data = get_data(path=args.data_path, args=args, logger=logger)
    args.num_tasks = data.num_tasks()
    args.features_size = data.features_size()

    ########## SMILES of top 1%
    top1p = np.array(MoleculeDataset(data).targets())
    top1p_idx = np.argsort(-top1p[:, 0])[:int(args.max_data_size * 0.01)]
    SMILES = np.array(MoleculeDataset(data).smiles())[top1p_idx]

    ########## initial data splits
    args.seed = args.data_seeds[model_idx]
    data.shuffle(seed=args.seed)
    sizes = args.split_sizes
    train_size = int(sizes[0] * len(data))
    train_orig = data[:train_size]
    test_orig = data[train_size:]
    train_data, test_data = copy.deepcopy(
        MoleculeDataset(train_orig)), copy.deepcopy(MoleculeDataset(test_orig))
    args.train_data_size = len(train_data)

    ########## standardising
    # features (train and test)
    features_scaler = train_data.normalize_features(replace_nan_token=0)
    test_data.normalize_features(features_scaler)
    # targets (train)
    train_targets = train_data.targets()
    test_targets = test_data.targets()
    scaler = StandardScaler().fit(train_targets)
    scaled_targets = scaler.transform(train_targets).tolist()
    train_data.set_targets(scaled_targets)

    ########## loss, metric functions
    loss_func = neg_log_like
    metric_func = get_metric_func(metric=args.metric)

    ########## data loaders
    if len(data) <= args.cache_cutoff:
        cache = True
        num_workers = 0
    else:
        cache = False
        num_workers = args.num_workers
    train_data_loader = MoleculeDataLoader(dataset=train_data,
                                           batch_size=args.batch_size,
                                           num_workers=num_workers,
                                           cache=cache,
                                           class_balance=args.class_balance,
                                           shuffle=True,
                                           seed=args.seed)
    test_data_loader = MoleculeDataLoader(dataset=test_data,
                                          batch_size=args.batch_size,
                                          num_workers=num_workers,
                                          cache=cache)

    ########## instantiating model, optimiser, scheduler (MAP)
    # set pytorch seed for random initial weights
    torch.manual_seed(args.pytorch_seeds[model_idx])
    # build model
    print(f'Building model {model_idx}')
    model = MoleculeModel(args)
    print(model)
    print(f'Number of parameters = {param_count(model):,}')
    if args.cuda:
        print('Moving model to cuda')
    model = model.to(args.device)
    # optimizer
    optimizer = Adam([{
        'params': model.encoder.parameters()
    }, {
        'params': model.ffn.parameters()
    }, {
        'params': model.log_noise,
        'weight_decay': 0
    }],
                     lr=args.lr,
                     weight_decay=args.weight_decay)
    # learning rate scheduler
    scheduler = scheduler_const([args.lr])

    ####################################################################
    ####################################################################
    # FIRST THOMPSON ITERATION

    ### scores array
    ptds_scores = np.ones(args.pdts_batches + 1)
    batch_no = 0

    ### fill for batch 0
    SMILES_train = np.array(train_data.smiles())
    SMILES_stack = np.hstack((SMILES, SMILES_train))
    overlap = len(SMILES_stack) - len(np.unique(SMILES_stack))
    prop = overlap / len(SMILES)
    ptds_scores[batch_no] = prop
    wandb.log({
        "Proportion of top 1%": prop,
        "batch_no": batch_no
    },
              commit=False)

    ### train MAP posterior
    gp_switch = False
    likelihood = None
    bbp_switch = None
    n_iter = 0
    for epoch in range(args.epochs_init_map):
        n_iter = train(model=model,
                       data_loader=train_data_loader,
                       loss_func=loss_func,
                       optimizer=optimizer,
                       scheduler=scheduler,
                       args=args,
                       n_iter=n_iter,
                       bbp_switch=bbp_switch)
        # save to save_dir
        #if epoch == args.epochs_init_map - 1:
        #save_checkpoint(os.path.join(save_dir, f'model_{batch_no}.pt'), model, scaler, features_scaler, args)
    # if X load from checkpoint path
    if args.bbp or args.gp or args.swag or args.sgld:
        model = load_checkpoint(args.checkpoint_path +
                                f'/model_{model_idx}/model_{batch_no}.pt',
                                device=args.device,
                                logger=None)

    ########## BBP
    if args.bbp:
        model_bbp = MoleculeModelBBP(
            args)  # instantiate with bayesian linear layers
        for (_, param_bbp), (_, param_pre) in zip(model_bbp.named_parameters(),
                                                  model.named_parameters()):
            param_bbp.data = copy.deepcopy(
                param_pre.data.T)  # copy over parameters
        # instantiate rhos
        for layer in model_bbp.children():
            if isinstance(layer, BayesLinear):
                layer.init_rho(args.rho_min_bbp, args.rho_max_bbp)
        for layer in model_bbp.encoder.encoder.children():
            if isinstance(layer, BayesLinear):
                layer.init_rho(args.rho_min_bbp, args.rho_max_bbp)
        model = model_bbp  # name back
        # move to cuda
        if args.cuda:
            print('Moving bbp model to cuda')
            model = model.to(args.device)
        # optimiser and scheduler
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
        scheduler = scheduler_const([args.lr])

        bbp_switch = 2
        n_iter = 0
        for epoch in range(args.epochs_init):
            n_iter = train(model=model,
                           data_loader=train_data_loader,
                           loss_func=loss_func,
                           optimizer=optimizer,
                           scheduler=scheduler,
                           args=args,
                           n_iter=n_iter,
                           bbp_switch=bbp_switch)

    ########## GP
    if args.gp:
        # feature_extractor
        model.featurizer = True
        feature_extractor = model
        # inducing points
        inducing_points = initial_inducing_points(train_data_loader,
                                                  feature_extractor, args)
        # GP layer
        gp_layer = GPLayer(inducing_points, args.num_tasks)
        # full DKL model
        model = copy.deepcopy(DKLMoleculeModel(feature_extractor, gp_layer))
        # likelihood (rank 0 restricts to diagonal matrix)
        likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(
            num_tasks=12, rank=0)
        # model and likelihood to CUDA
        if args.cuda:
            model.cuda()
            likelihood.cuda()
        # loss object
        loss_func = gpytorch.mlls.VariationalELBO(
            likelihood, model.gp_layer, num_data=args.train_data_size)
        # optimiser and scheduler
        params_list = [
            {
                'params': model.feature_extractor.parameters(),
                'weight_decay': args.weight_decay_gp
            },
            {
                'params': model.gp_layer.hyperparameters()
            },
            {
                'params': model.gp_layer.variational_parameters()
            },
            {
                'params': likelihood.parameters()
            },
        ]
        optimizer = torch.optim.Adam(params_list, lr=args.lr)
        scheduler = scheduler_const([args.lr])

        gp_switch = True
        n_iter = 0
        for epoch in range(args.epochs_init):
            n_iter = train(model=model,
                           data_loader=train_data_loader,
                           loss_func=loss_func,
                           optimizer=optimizer,
                           scheduler=scheduler,
                           args=args,
                           n_iter=n_iter,
                           gp_switch=gp_switch,
                           likelihood=likelihood)

    ########## SWAG
    if args.swag:
        model_core = copy.deepcopy(model)
        model = train_swag_pdts(model_core, train_data_loader, loss_func,
                                scaler, features_scaler, args, save_dir,
                                batch_no)

    ########## SGLD
    if args.sgld:
        model = train_sgld_pdts(model, train_data_loader, loss_func, scaler,
                                features_scaler, args, save_dir, batch_no)

    ### find top_idx
    top_idx = []  # need for thom
    sum_test_preds = np.zeros(
        (len(test_orig), args.num_tasks))  # need for greedy
    for sample in range(args.samples):

        # draw model from SWAG posterior
        if args.swag:
            model.sample(scale=1.0, cov=args.cov_mat, block=args.block)

        # retrieve sgld sample
        if args.sgld:
            model = load_checkpoint(
                args.save_dir +
                f'/model_{model_idx}/model_{batch_no}/model_{sample}.pt',
                device=args.device,
                logger=logger)

        test_preds = predict(model=model,
                             data_loader=test_data_loader,
                             args=args,
                             scaler=scaler,
                             test_data=True,
                             gp_sample=args.thompson,
                             bbp_sample=True)
        test_preds = np.array(test_preds)
        # thompson bit
        rank = 0

        # base length
        if args.sgld:
            base_length = 5 * sample + 4
        else:
            base_length = sample

        while args.thompson and (len(top_idx) <= base_length):
            top_unique_molecule = np.argsort(-test_preds[:, 0])[rank]
            rank += 1
            if top_unique_molecule not in top_idx:
                top_idx.append(top_unique_molecule)
        # add to sum_test_preds
        sum_test_preds += test_preds
        # print
        print('done sample ' + str(sample))
    # final top_idx
    if args.thompson:
        top_idx = np.array(top_idx)
    else:
        sum_test_preds /= args.samples
        top_idx = np.argsort(-sum_test_preds[:, 0])[:50]

    ### transfer from test to train
    top_idx = -np.sort(-top_idx)
    for idx in top_idx:
        train_orig.append(test_orig.pop(idx))
    train_data, test_data = copy.deepcopy(
        MoleculeDataset(train_orig)), copy.deepcopy(MoleculeDataset(test_orig))
    args.train_data_size = len(train_data)
    if args.gp:
        loss_func = gpytorch.mlls.VariationalELBO(
            likelihood, model.gp_layer, num_data=args.train_data_size)
    print(args.train_data_size)

    ### standardise features (train and test; using original features_scaler)
    train_data.normalize_features(features_scaler)
    test_data.normalize_features(features_scaler)

    ### standardise targets (train only; using original scaler)
    train_targets = train_data.targets()
    scaled_targets_tr = scaler.transform(train_targets).tolist()
    train_data.set_targets(scaled_targets_tr)

    ### create data loaders
    train_data_loader = MoleculeDataLoader(dataset=train_data,
                                           batch_size=args.batch_size,
                                           num_workers=num_workers,
                                           cache=cache,
                                           class_balance=args.class_balance,
                                           shuffle=True,
                                           seed=args.seed)
    test_data_loader = MoleculeDataLoader(dataset=test_data,
                                          batch_size=args.batch_size,
                                          num_workers=num_workers,
                                          cache=cache)

    ####################################################################
    ####################################################################

    ##################################
    ########## thompson sampling loop
    ##################################

    for batch_no in range(1, args.pdts_batches + 1):

        ### fill in ptds_scores
        SMILES_train = np.array(train_data.smiles())
        SMILES_stack = np.hstack((SMILES, SMILES_train))
        overlap = len(SMILES_stack) - len(np.unique(SMILES_stack))
        prop = overlap / len(SMILES)
        ptds_scores[batch_no] = prop
        wandb.log({
            "Proportion of top 1%": prop,
            "batch_no": batch_no
        },
                  commit=False)

        ### train posterior
        n_iter = 0
        for epoch in range(args.epochs):
            n_iter = train(model=model,
                           data_loader=train_data_loader,
                           loss_func=loss_func,
                           optimizer=optimizer,
                           scheduler=scheduler,
                           args=args,
                           n_iter=n_iter,
                           gp_switch=gp_switch,
                           likelihood=likelihood,
                           bbp_switch=bbp_switch)
            # save to save_dir
            #if epoch == args.epochs - 1:
            #save_checkpoint(os.path.join(save_dir, f'model_{batch_no}.pt'), model, scaler, features_scaler, args)
        # if swag, load checkpoint
        if args.swag:
            model_core = load_checkpoint(
                args.checkpoint_path +
                f'/model_{model_idx}/model_{batch_no}.pt',
                device=args.device,
                logger=None)

        ########## SWAG
        if args.swag:
            model = train_swag_pdts(model_core, train_data_loader, loss_func,
                                    scaler, features_scaler, args, save_dir,
                                    batch_no)

        ########## SGLD
        if args.sgld:
            model = train_sgld_pdts(model, train_data_loader, loss_func,
                                    scaler, features_scaler, args, save_dir,
                                    batch_no)

        ### find top_idx
        top_idx = []  # need for thom
        sum_test_preds = np.zeros(
            (len(test_orig), args.num_tasks))  # need for greedy
        for sample in range(args.samples):

            # draw model from SWAG posterior
            if args.swag:
                model.sample(scale=1.0, cov=args.cov_mat, block=args.block)

            # retrieve sgld sample
            if args.sgld:
                model = load_checkpoint(
                    args.save_dir +
                    f'/model_{model_idx}/model_{batch_no}/model_{sample}.pt',
                    device=args.device,
                    logger=logger)

            test_preds = predict(model=model,
                                 data_loader=test_data_loader,
                                 args=args,
                                 scaler=scaler,
                                 test_data=True,
                                 gp_sample=args.thompson,
                                 bbp_sample=True)
            test_preds = np.array(test_preds)
            # thompson bit
            rank = 0

            # base length
            if args.sgld:
                base_length = 5 * sample + 4
            else:
                base_length = sample

            while args.thompson and (len(top_idx) <= base_length):
                top_unique_molecule = np.argsort(-test_preds[:, 0])[rank]
                rank += 1
                if top_unique_molecule not in top_idx:
                    top_idx.append(top_unique_molecule)
            # add to sum_test_preds
            sum_test_preds += test_preds
            # print
            print('done sample ' + str(sample))
        # final top_idx
        if args.thompson:
            top_idx = np.array(top_idx)
        else:
            sum_test_preds /= args.samples
            top_idx = np.argsort(-sum_test_preds[:, 0])[:50]

        ### transfer from test to train
        top_idx = -np.sort(-top_idx)
        for idx in top_idx:
            train_orig.append(test_orig.pop(idx))
        train_data, test_data = copy.deepcopy(
            MoleculeDataset(train_orig)), copy.deepcopy(
                MoleculeDataset(test_orig))
        args.train_data_size = len(train_data)
        if args.gp:
            loss_func = gpytorch.mlls.VariationalELBO(
                likelihood, model.gp_layer, num_data=args.train_data_size)
        print(args.train_data_size)

        ### standardise features (train and test; using original features_scaler)
        train_data.normalize_features(features_scaler)
        test_data.normalize_features(features_scaler)

        ### standardise targets (train only; using original scaler)
        train_targets = train_data.targets()
        scaled_targets_tr = scaler.transform(train_targets).tolist()
        train_data.set_targets(scaled_targets_tr)

        ### create data loaders
        train_data_loader = MoleculeDataLoader(
            dataset=train_data,
            batch_size=args.batch_size,
            num_workers=num_workers,
            cache=cache,
            class_balance=args.class_balance,
            shuffle=True,
            seed=args.seed)
        test_data_loader = MoleculeDataLoader(dataset=test_data,
                                              batch_size=args.batch_size,
                                              num_workers=num_workers,
                                              cache=cache)

    # save scores
    np.savez(os.path.join(results_dir, f'ptds_{model_idx}'), ptds_scores)
def run_training_gnn_xgb(args: TrainArgs,
                         logger: Logger = None) -> List[float]:
    """
    Trains a model and returns test scores on the model checkpoint with the highest validation score.

    :param args: Arguments.
    :param logger: Logger.
    :return: A list of ensemble scores for each task.
    """
    if logger is not None:
        debug, info = logger.debug, logger.info
    else:
        debug = info = print

    # Print command line
    debug('Command line')
    debug(f'python {" ".join(sys.argv)}')

    # Print args
    debug('Args')
    debug(args)

    # Save args
    args.save(os.path.join(args.save_dir, 'args.json'))

    # Set pytorch seed for random initial weights
    torch.manual_seed(args.pytorch_seed)

    # Get data
    debug('Loading data')
    args.task_names = args.target_columns or get_task_names(args.data_path)
    data = get_data(path=args.data_path, args=args, logger=logger)
    args.num_tasks = data.num_tasks()
    args.features_size = data.features_size()
    debug(f'Number of tasks = {args.num_tasks}')

    # Split data
    debug(f'Splitting data with seed {args.seed}')
    if args.separate_test_path:
        test_data = get_data(path=args.separate_test_path,
                             args=args,
                             features_path=args.separate_test_features_path,
                             logger=logger)
    if args.separate_val_path:
        val_data = get_data(path=args.separate_val_path,
                            args=args,
                            features_path=args.separate_val_features_path,
                            logger=logger)

    if args.separate_val_path and args.separate_test_path:
        train_data = data
    elif args.separate_val_path:
        train_data, _, test_data = split_data(data=data,
                                              split_type=args.split_type,
                                              sizes=(0.8, 0.0, 0.2),
                                              seed=args.seed,
                                              args=args,
                                              logger=logger)
    elif args.separate_test_path:
        train_data, val_data, _ = split_data(data=data,
                                             split_type=args.split_type,
                                             sizes=(0.8, 0.2, 0.0),
                                             seed=args.seed,
                                             args=args,
                                             logger=logger)
    else:
        train_data, val_data, test_data = split_data(
            data=data,
            split_type=args.split_type,
            sizes=args.split_sizes,
            seed=args.seed,
            args=args,
            logger=logger)

    if args.dataset_type == 'classification':
        class_sizes = get_class_sizes(data)
        debug('Class sizes')
        for i, task_class_sizes in enumerate(class_sizes):
            debug(
                f'{args.task_names[i]} '
                f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}'
            )

    if args.save_smiles_splits:
        save_smiles_splits(train_data=train_data,
                           val_data=val_data,
                           test_data=test_data,
                           data_path=args.data_path,
                           save_dir=args.save_dir)

    if args.features_scaling:
        features_scaler = train_data.normalize_features(replace_nan_token=0)
        val_data.normalize_features(features_scaler)
        test_data.normalize_features(features_scaler)
    else:
        features_scaler = None

    args.train_data_size = len(train_data)

    debug(
        f'Total size = {len(data):,} | '
        f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}'
    )

    # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
    if args.dataset_type == 'regression':
        debug('Fitting scaler')
        train_smiles, train_targets = train_data.smiles(), train_data.targets()
        scaler = StandardScaler().fit(train_targets)
        scaled_targets = scaler.transform(train_targets).tolist()
        train_data.set_targets(scaled_targets)
    else:
        scaler = None

    # Get loss and metric functions
    loss_func = get_loss_func(args)
    metric_func = get_metric_func(metric=args.metric)

    # Set up test set evaluation
    val_smiles, val_targets = val_data.smiles(), val_data.targets()
    test_smiles, test_targets = test_data.smiles(), test_data.targets()
    if args.dataset_type == 'multiclass':
        sum_test_preds = np.zeros(
            (len(test_smiles), args.num_tasks, args.multiclass_num_classes))
    else:
        sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))

    # Automatically determine whether to cache
    if len(data) <= args.cache_cutoff:
        cache = True
        num_workers = 0
    else:
        cache = False
        num_workers = args.num_workers

    # Create data loaders
    train_data_loader = MoleculeDataLoader(dataset=train_data,
                                           batch_size=args.batch_size,
                                           num_workers=num_workers,
                                           cache=cache,
                                           class_balance=args.class_balance,
                                           shuffle=True,
                                           seed=args.seed)
    val_data_loader = MoleculeDataLoader(dataset=val_data,
                                         batch_size=args.batch_size,
                                         num_workers=num_workers,
                                         cache=cache)
    test_data_loader = MoleculeDataLoader(dataset=test_data,
                                          batch_size=args.batch_size,
                                          num_workers=num_workers,
                                          cache=cache)

    # Train ensemble of models
    for model_idx in range(args.ensemble_size):
        # Tensorboard writer
        save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
        makedirs(save_dir)
        try:
            writer = SummaryWriter(log_dir=save_dir)
        except:
            writer = SummaryWriter(logdir=save_dir)

        # Load/build model
        if args.checkpoint_paths is not None:
            debug(
                f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}'
            )
            model = load_checkpoint(args.checkpoint_paths[model_idx],
                                    logger=logger)
        else:
            debug(f'Building model {model_idx}')
            model = MoleculeModel(args)

        debug(model)
        debug(f'Number of parameters = {param_count(model):,}')
        if args.cuda:
            debug('Moving model to cuda')
        model = model.to(args.device)

        # Ensure that model is saved in correct location for evaluation if 0 epochs
        save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler,
                        features_scaler, args)

        # Optimizers
        optimizer = build_optimizer(model, args)

        # Learning rate schedulers
        scheduler = build_lr_scheduler(optimizer, args)

        # Run training
        best_score = float('inf') if args.minimize_score else -float('inf')
        best_epoch, n_iter = 0, 0
        for epoch in trange(args.epochs):
            debug(f'Epoch {epoch}')

            n_iter = train(model=model,
                           data_loader=train_data_loader,
                           loss_func=loss_func,
                           optimizer=optimizer,
                           scheduler=scheduler,
                           args=args,
                           n_iter=n_iter,
                           logger=logger,
                           writer=writer)
            if isinstance(scheduler, ExponentialLR):
                scheduler.step()
            val_scores = evaluate(model=model,
                                  data_loader=val_data_loader,
                                  num_tasks=args.num_tasks,
                                  metric_func=metric_func,
                                  dataset_type=args.dataset_type,
                                  scaler=scaler,
                                  logger=logger)

            # Average validation score
            avg_val_score = np.nanmean(val_scores)
            debug(f'Validation {args.metric} = {avg_val_score:.6f}')
            writer.add_scalar(f'validation_{args.metric}', avg_val_score,
                              n_iter)

            if args.show_individual_scores:
                # Individual validation scores
                for task_name, val_score in zip(args.task_names, val_scores):
                    debug(
                        f'Validation {task_name} {args.metric} = {val_score:.6f}'
                    )
                    writer.add_scalar(f'validation_{task_name}_{args.metric}',
                                      val_score, n_iter)

            # Save model checkpoint if improved validation score
            if args.minimize_score and avg_val_score < best_score or \
                    not args.minimize_score and avg_val_score > best_score:
                best_score, best_epoch = avg_val_score, epoch
                save_checkpoint(os.path.join(save_dir, 'model.pt'), model,
                                scaler, features_scaler, args)

                # Evaluate on test set using model with best validation score
        info(
            f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}'
        )
        model = load_checkpoint(os.path.join(save_dir, 'model.pt'),
                                device=args.device,
                                logger=logger)

        test_preds, _ = predict(model=model,
                                data_loader=test_data_loader,
                                scaler=scaler)
        test_scores = evaluate_predictions(preds=test_preds,
                                           targets=test_targets,
                                           num_tasks=args.num_tasks,
                                           metric_func=metric_func,
                                           dataset_type=args.dataset_type,
                                           logger=logger)

        if len(test_preds) != 0:
            sum_test_preds += np.array(test_preds)

        # Average test score
        avg_test_score = np.nanmean(test_scores)
        info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')
        writer.add_scalar(f'test_{args.metric}', avg_test_score, 0)

        if args.show_individual_scores:
            # Individual test scores
            for task_name, test_score in zip(args.task_names, test_scores):
                info(
                    f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}'
                )
                writer.add_scalar(f'test_{task_name}_{args.metric}',
                                  test_score, n_iter)
        writer.close()

    # Evaluate ensemble on test set
    avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()

    ensemble_scores = evaluate_predictions(preds=avg_test_preds,
                                           targets=test_targets,
                                           num_tasks=args.num_tasks,
                                           metric_func=metric_func,
                                           dataset_type=args.dataset_type,
                                           logger=logger)

    # Average ensemble score
    avg_ensemble_test_score = np.nanmean(ensemble_scores)
    info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')

    # Individual ensemble scores
    if args.show_individual_scores:
        for task_name, ensemble_score in zip(args.task_names, ensemble_scores):
            info(
                f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}'
            )

    _, train_feature = predict(model=model,
                               data_loader=train_data_loader,
                               scaler=scaler)
    _, val_feature = predict(model=model,
                             data_loader=val_data_loader,
                             scaler=scaler)
    _, test_feature = predict(model=model,
                              data_loader=test_data_loader,
                              scaler=scaler)

    return ensemble_scores, train_feature, val_feature, test_feature, train_targets, val_targets, test_targets
Exemple #21
0
def cross_validate(args: TrainArgs,
                   logger: Logger = None) -> Tuple[float, float]:
    """k-fold cross validation"""
    info = logger.info if logger is not None else print

    # Initialize relevant variables
    init_seed = args.seed
    save_dir = args.save_dir
    task_names = args.target_columns or get_task_names(args.data_path)

    # Run training on different random seeds for each fold
    dmpnn_scores = []
    dmpnn_xgb_scores = []
    morgan_scores = []
    dmpnn_morgan_scores = []
    for fold_num in range(args.num_folds):
        if args.dataset_type == 'classification':
            args.data_path = 'molnet_benchmark/molnet_random_' + args.protein + '_c/seed' + str(
                fold_num + 1) + '/train.csv'
            args.separate_test_path = 'molnet_benchmark/molnet_random_' + args.protein + '_c/seed' + str(
                fold_num + 1) + '/val.csv'
            args.separate_val_path = 'molnet_benchmark/molnet_random_' + args.protein + '_c/seed' + str(
                fold_num + 1) + '/test.csv'
        elif args.dataset_type == 'regression':
            args.data_path = 'molnet_benchmark/molnet_random_' + args.protein + '_r/seed' + str(
                fold_num + 1) + '/train.csv'
            args.separate_test_path = 'molnet_benchmark/molnet_random_' + args.protein + '_r/seed' + str(
                fold_num + 1) + '/val.csv'
            args.separate_val_path = 'molnet_benchmark/molnet_random_' + args.protein + '_r/seed' + str(
                fold_num + 1) + '/test.csv'
        info(f'Fold {fold_num}')
        args.seed = init_seed + fold_num
        args.save_dir = os.path.join(save_dir, f'fold_{fold_num}')
        makedirs(args.save_dir)
        model_scores, model, scaler = run_training(args, logger)
        dmpnn_scores.append(model_scores)
        train_target, train_feature, val_target, val_feature, test_target, test_feature, train_smiles, val_smiles, test_smiles = get_xgboost_feature(
            args, logger, model)
        train_target = pd.DataFrame(train_target)
        train_feature = pd.DataFrame(train_feature)
        val_target = pd.DataFrame(val_target)
        val_feature = pd.DataFrame(val_feature)
        test_target = pd.DataFrame(test_target)
        test_feature = pd.DataFrame(test_feature)
        train_morgan_feature = get_morgan_feature(train_smiles)
        val_morgan_feature = get_morgan_feature(val_smiles)
        test_morgan_feature = get_morgan_feature(test_smiles)
        if args.dataset_type == 'classification':
            if test_target.shape[1] == 1:
                xgb_gbc = xgb.XGBClassifier(base_score=0.5,
                                            booster='gbtree',
                                            colsample_bylevel=1,
                                            colsample_bytree=1,
                                            gamma=1,
                                            learning_rate=0.1,
                                            max_delta_step=0,
                                            max_depth=4,
                                            min_child_weight=8,
                                            missing=None,
                                            n_estimators=2000,
                                            n_jobs=1,
                                            nthread=None,
                                            objective='binary:logistic',
                                            random_state=0,
                                            reg_alpha=0,
                                            reg_lambda=1,
                                            scale_pos_weight=1,
                                            seed=None,
                                            silent=True,
                                            subsample=0.8,
                                            tree_method='gpu_hist',
                                            n_gpus=-1)
                xgb_gbc.fit(train_feature,
                            train_target,
                            eval_set=[(val_feature, val_target)],
                            eval_metric='auc',
                            early_stopping_rounds=200)
                pre_pro = xgb_gbc.predict_proba(test_feature)[:, 1]
                fpr, tpr, threshold = roc_curve(test_target, pre_pro)
                AUC = auc(fpr, tpr)
                pre_pro = [1 if i > 0.5 else 0 for i in pre_pro]
                tn, fp, fn, tp = confusion_matrix(test_target, pre_pro).ravel()
                # Sn = TP /(TP + FN)  Sp = TN / (TN+FP)
                Sn = tp / (tp + fn)
                Sp = tn / (tn + fp)
                acc = accuracy_score(test_target, pre_pro)
                dmpnn_xgb_scores.append([AUC, Sn, Sp, acc])
                joblib.dump(xgb_gbc, 'external_test/dmpnn_xgb.model')
                xgb_gbc = xgb.XGBClassifier(base_score=0.5,
                                            booster='gbtree',
                                            colsample_bylevel=1,
                                            colsample_bytree=1,
                                            gamma=1,
                                            learning_rate=0.1,
                                            max_delta_step=0,
                                            max_depth=4,
                                            min_child_weight=8,
                                            missing=None,
                                            n_estimators=2000,
                                            n_jobs=1,
                                            nthread=None,
                                            objective='binary:logistic',
                                            random_state=0,
                                            reg_alpha=0,
                                            reg_lambda=1,
                                            scale_pos_weight=1,
                                            seed=None,
                                            silent=True,
                                            subsample=0.8,
                                            tree_method='gpu_hist',
                                            n_gpus=-1)
                xgb_gbc.fit(train_morgan_feature,
                            train_target,
                            eval_set=[(val_morgan_feature, val_target)],
                            eval_metric='auc',
                            early_stopping_rounds=200)
                pre_pro = xgb_gbc.predict_proba(test_morgan_feature)[:, 1]
                fpr, tpr, threshold = roc_curve(test_target, pre_pro)
                AUC = auc(fpr, tpr)
                pre_pro = [1 if i > 0.5 else 0 for i in pre_pro]
                tn, fp, fn, tp = confusion_matrix(test_target, pre_pro).ravel()
                # Sn = TP /(TP + FN)  Sp = TN / (TN+FP)
                Sn = tp / (tp + fn)
                Sp = tn / (tn + fp)
                acc = accuracy_score(test_target, pre_pro)
                morgan_scores.append([AUC, Sn, Sp, acc])
                joblib.dump(xgb_gbc, 'external_test/morgan_xgb.model')
                train_gcn_mor_feature = pd.concat(
                    [train_feature, train_morgan_feature], axis=1)
                val_gcn_mor_feature = pd.concat(
                    [val_feature, val_morgan_feature], axis=1)
                test_gcn_mor_feature = pd.concat(
                    [test_feature, test_morgan_feature], axis=1)
                train_gcn_mor_feature.columns = val_gcn_mor_feature.columns = test_gcn_mor_feature.columns = range(
                    train_gcn_mor_feature.shape[1])
                xgb_gbc = xgb.XGBClassifier(base_score=0.5,
                                            booster='gbtree',
                                            colsample_bylevel=1,
                                            colsample_bytree=1,
                                            gamma=1,
                                            learning_rate=0.1,
                                            max_delta_step=0,
                                            max_depth=4,
                                            min_child_weight=8,
                                            missing=None,
                                            n_estimators=2000,
                                            n_jobs=1,
                                            nthread=None,
                                            objective='binary:logistic',
                                            random_state=0,
                                            reg_alpha=0,
                                            reg_lambda=1,
                                            scale_pos_weight=1,
                                            seed=None,
                                            silent=True,
                                            subsample=0.8,
                                            tree_method='gpu_hist',
                                            n_gpus=-1)
                xgb_gbc.fit(train_gcn_mor_feature,
                            train_target,
                            eval_set=[(val_gcn_mor_feature, val_target)],
                            eval_metric='auc',
                            early_stopping_rounds=200)
                pre_pro = xgb_gbc.predict_proba(test_gcn_mor_feature)[:, 1]
                fpr, tpr, threshold = roc_curve(test_target, pre_pro)
                AUC = auc(fpr, tpr)
                pre_pro = [1 if i > 0.5 else 0 for i in pre_pro]
                tn, fp, fn, tp = confusion_matrix(test_target, pre_pro).ravel()
                Sn = tp / (tp + fn)
                Sp = tn / (tn + fp)
                acc = accuracy_score(test_target, pre_pro)
                dmpnn_morgan_scores.append([AUC, Sn, Sp, acc])
                joblib.dump(xgb_gbc, 'external_test/dmpnn_morgan_xgb.model')

            else:
                aucs = []
                for i in range(test_target.shape[1]):
                    xgb_gbc = xgb.XGBClassifier(base_score=0.5,
                                                booster='gbtree',
                                                colsample_bylevel=1,
                                                colsample_bytree=1,
                                                gamma=1,
                                                learning_rate=0.1,
                                                max_delta_step=0,
                                                max_depth=4,
                                                min_child_weight=8,
                                                missing=None,
                                                n_estimators=2000,
                                                n_jobs=1,
                                                nthread=None,
                                                objective='binary:logistic',
                                                random_state=0,
                                                reg_alpha=0,
                                                reg_lambda=1,
                                                scale_pos_weight=1,
                                                seed=None,
                                                silent=True,
                                                subsample=0.8,
                                                tree_method='gpu_hist',
                                                n_gpus=-1)
                    if max(val_target[i]) == 0 or max(
                            train_target[i]) == 0 or max(test_target[i]) == 0:
                        continue
                    xgb_gbc.fit(train_feature,
                                train_target[i],
                                eval_set=[(val_feature, val_target[i])],
                                eval_metric='auc',
                                early_stopping_rounds=100)
                    pre_pro = xgb_gbc.predict_proba(test_feature)[:, 1]
                    fpr, tpr, threshold = roc_curve(test_target[i], pre_pro)
                    AUC = auc(fpr, tpr)
                    if args.metric == "prc-auc":
                        precision, recall, _ = precision_recall_curve(
                            test_target[i], pre_pro)
                        AUC = auc(recall, precision)
                    pre_pro = [1 if i > 0.5 else 0 for i in pre_pro]
                    tn, fp, fn, tp = confusion_matrix(test_target,
                                                      pre_pro).ravel()
                    Sn = tp / (tp + fn)
                    Sp = tn / (tn + fp)
                    acc = accuracy_score(test_target, pre_pro)
                    aucs.append([AUC, Sn, Sp, acc])
                dmpnn_xgb_scores.append([np.mean(aucs)])
        elif args.dataset_type == 'regression':
            if test_target.shape[1] == 1:
                xgb_gbc = xgb.XGBRegressor(learn_rate=0.1,
                                           max_depth=4,
                                           min_child_weight=10,
                                           gamma=1,
                                           subsample=0.8,
                                           colsample_bytree=0.8,
                                           reg_alpha=0.8,
                                           objective='reg:linear',
                                           n_estimators=2000,
                                           tree_method='gpu_hist',
                                           n_gpus=-1)
                xgb_gbc.fit(train_feature,
                            train_target,
                            eval_set=[(val_feature, val_target)],
                            eval_metric='rmse',
                            early_stopping_rounds=200)
                y_pred = xgb_gbc.predict(test_feature)
                y_pred = scaler.inverse_transform(y_pred)
                y_test = test_target.astype('float')
                MSE = mean_squared_error(y_test, y_pred)
                RMSE = MSE**0.5
                MAE = median_absolute_error(y_test, y_pred)
                dmpnn_xgb_scores.append([RMSE, MAE])
                joblib.dump(xgb_gbc, 'external_test/dmpnn_xgb.model')
                xgb_gbc = xgb.XGBRegressor(learn_rate=0.1,
                                           max_depth=4,
                                           min_child_weight=10,
                                           gamma=1,
                                           subsample=0.8,
                                           colsample_bytree=0.8,
                                           reg_alpha=0.8,
                                           objective='reg:linear',
                                           n_estimators=2000,
                                           tree_method='gpu_hist',
                                           n_gpus=-1)
                xgb_gbc.fit(train_morgan_feature,
                            train_target,
                            eval_set=[(val_morgan_feature, val_target)],
                            eval_metric='rmse',
                            early_stopping_rounds=200)
                y_pred = xgb_gbc.predict(test_morgan_feature)
                y_pred = scaler.inverse_transform(y_pred)
                MSE = mean_squared_error(y_test, y_pred)
                RMSE = MSE**0.5
                MAE = median_absolute_error(y_test, y_pred)
                morgan_scores.append([RMSE, MAE])
                joblib.dump(xgb_gbc, 'external_test/morgan_xgb.model')
                train_gcn_mor_feature = pd.concat(
                    [train_feature, train_morgan_feature], axis=1)
                val_gcn_mor_feature = pd.concat(
                    [val_feature, val_morgan_feature], axis=1)
                test_gcn_mor_feature = pd.concat(
                    [test_feature, test_morgan_feature], axis=1)
                train_gcn_mor_feature.columns = val_gcn_mor_feature.columns = test_gcn_mor_feature.columns = range(
                    train_gcn_mor_feature.shape[1])

                xgb_gbc = xgb.XGBRegressor(learn_rate=0.1,
                                           max_depth=4,
                                           min_child_weight=10,
                                           gamma=1,
                                           subsample=0.8,
                                           colsample_bytree=0.8,
                                           reg_alpha=0.8,
                                           objective='reg:linear',
                                           n_estimators=2000,
                                           tree_method='gpu_hist',
                                           n_gpus=-1)
                xgb_gbc.fit(train_gcn_mor_feature,
                            train_target,
                            eval_set=[(val_gcn_mor_feature, val_target)],
                            eval_metric='rmse',
                            early_stopping_rounds=200)
                y_pred = xgb_gbc.predict(test_gcn_mor_feature)
                y_pred = scaler.inverse_transform(y_pred)
                MSE = mean_squared_error(y_test, y_pred)
                RMSE = MSE**0.5
                MAE = median_absolute_error(y_test, y_pred)
                dmpnn_morgan_scores.append([RMSE, MAE])
                joblib.dump(xgb_gbc, 'external_test/dmpnn_morgan_xgb.model')

            else:
                MAEs = []
                for i in range(test_target.shape[1]):
                    xgb_gbc = xgb.XGBRegressor(learn_rate=0.1,
                                               max_depth=4,
                                               min_child_weight=10,
                                               gamma=1,
                                               subsample=0.8,
                                               colsample_bytree=0.8,
                                               reg_alpha=0.8,
                                               objective='reg:linear',
                                               n_estimators=2000,
                                               tree_method='gpu_hist',
                                               n_gpus=-1)
                    xgb_gbc.fit(train_feature,
                                train_target[i],
                                eval_set=[(val_feature, val_target[i])],
                                eval_metric='rmse',
                                early_stopping_rounds=200)
                    y_pred = xgb_gbc.predict(test_feature)
                    y_test = test_target[i].astype('float')
                    MSE = mean_squared_error(y_test, y_pred)
                    RMSE = MSE**0.5
                    MAE = median_absolute_error(y_test, y_pred)
                    MAEs.append([MAE, RMSE])
                dmpnn_xgb_scores.append([np.mean(MAEs)])

    dmpnn_scores = np.array(dmpnn_scores)
    # Report scores across models
    dmpnn_scores = np.nanmean(
        dmpnn_scores, axis=1)  # average score for each model across tasks
    dmpnn_mean_score, dmpnn_std_score = np.nanmean(dmpnn_scores), np.nanstd(
        dmpnn_scores)
    print('three dmpnn test = ', dmpnn_scores)
    info(
        f'Overall dmpnn test {args.metric} = {dmpnn_mean_score:.6f} +/- {dmpnn_std_score:.6f}'
    )

    dmpnn_xgb_scores = np.nanmean(
        dmpnn_xgb_scores, axis=1)  # average score for each model across tasks
    dmpnn_xgb_mean_score, dmpnn_xgb_std_score = np.nanmean(
        dmpnn_xgb_scores), np.nanstd(dmpnn_xgb_scores)
    print('three dmpnn_xgb_test = ', dmpnn_xgb_scores)
    info(
        f'Overall dmpnn_xgb_test {args.metric} = {dmpnn_xgb_mean_score:.6f} +/- {dmpnn_xgb_std_score:.6f}'
    )

    morgan_scores = np.nanmean(
        morgan_scores, axis=1)  # average score for each model across tasks
    morgan_mean_score, morgan_std_score = np.nanmean(morgan_scores), np.nanstd(
        morgan_scores)
    print('three morgen_test = ', morgan_scores)
    info(
        f'Overall morgen_test {args.metric} = {morgan_mean_score:.6f} +/- {morgan_std_score:.6f}'
    )

    dmpnn_morgan_scores = np.nanmean(
        dmpnn_morgan_scores,
        axis=1)  # average score for each model across tasks
    dmpnn_morgan_mean_score, dmpnn_morgan_std_score = np.nanmean(
        dmpnn_morgan_scores), np.nanstd(dmpnn_morgan_scores)
    print('three dmpnn_morgan_scores = ', dmpnn_morgan_scores)
    info(
        f'Overall dmpnn_morgen_test {args.metric} = {dmpnn_morgan_mean_score:.6f} +/- {dmpnn_morgan_std_score:.6f}'
    )
    return model