def predict(model: nn.Module, data: MoleculeDataset, batch_size: int, scaler: StandardScaler = None, uncertainty: bool = False) -> List[List[float]]: """ Makes predictions on a dataset using an ensemble of models. :param model: A model. :param data: A MoleculeDataset. :param batch_size: Batch size. :param scaler: A StandardScaler object fit on the training targets. :param uncertainty: Whether uncertainty values should be returned. :return: A list of lists of predictions. The outer list is examples while the inner list is tasks. """ model.eval() preds = [] num_iters, iter_step = len(data), batch_size for i in trange(0, num_iters, iter_step): # Prepare batch mol_batch = MoleculeDataset(data[i:i + batch_size]) smiles_batch, features_batch = mol_batch.smiles(), mol_batch.features() # Run model batch = smiles_batch with torch.no_grad(): batch_preds = model(batch, features_batch) batch_preds = batch_preds.data.cpu().numpy() # Collect vectors batch_preds = batch_preds.tolist() preds.extend(batch_preds) if model.uncertainty: p = [] c = [] for i in range(len(preds)): p.append([preds[i][j] for j in range(len(preds[i])) if j % 2 == 0]) c.append([preds[i][j] for j in range(len(preds[i])) if j % 2 == 1]) if scaler is not None: p = scaler.inverse_transform(p).tolist() c = (scaler.stds**2 * c).tolist() if uncertainty: return p, c return p if scaler is not None: preds = scaler.inverse_transform(preds).tolist() return preds
def load_scalers(path: str) -> Tuple[StandardScaler, StandardScaler]: """ Loads the scalers a model was trained with. :param path: Path where model checkpoint is saved. :return: A tuple with the data scaler and the features scaler. """ state = torch.load(path, map_location=lambda storage, loc: storage) scaler = StandardScaler(state['data_scaler']['means'], state['data_scaler']['stds']) if state['data_scaler'] is not None else None features_scaler = StandardScaler(state['features_scaler']['means'], state['features_scaler']['stds'], replace_nan_token=0) if state['features_scaler'] is not None else None return scaler, features_scaler
def save_predictions(save_dir: str, train_data: MolPairDataset, val_data: MolPairDataset, test_data: MolPairDataset, train_preds: List[List[float]], val_preds: List[List[float]], test_preds: List[List[float]], task_names: List[str], scaler: StandardScaler = None) -> None: """ Saves predictions to csv file for entire model. Any of the datasets can be absent. They will not be saved in that case. """ with open(os.path.join(save_dir, 'preds.csv'), 'w') as f: writer = csv.writer(f) header = ['SMILE1', 'SMILE2', 'SPLIT'] + task_names + ['PRED_' + task for task in task_names] writer.writerow(header) splits = ['train', 'val', 'test'] dataSplits = [train_data, val_data, test_data] predSplits = [train_preds, val_preds, test_preds] for k, split in enumerate(splits): if dataSplits[k] is None: continue smiles = dataSplits[k].smiles() targets = dataSplits[k].targets() # Inverse scale if regression and only for training data if k == 0 and scaler is not None: targets = scaler.inverse_transform(targets) preds = predSplits[k] for i in range(len(smiles)): row = [smiles[i][0], smiles[i][1], split] + targets[i] + preds[i] writer.writerow(row)
def predict_MCdepth(model: nn.Module, data_loader: MoleculeDataLoader, args: TrainArgs, scaler: StandardScaler, d) -> List[List[float]]: """ makes a random prediction given a certain depth, d """ # set model to eval mode model.eval() preds = [] for batch in data_loader: batch: MoleculeDataset mol_batch, features_batch = batch.batch_graph(), batch.features() with torch.no_grad(): _, batch_preds_list, _, _ = model(mol_batch, features_batch, sample=True) batch_preds = batch_preds_list[d] batch_preds = batch_preds.data.cpu().numpy() if scaler is not None: batch_preds = scaler.inverse_transform(batch_preds) batch_preds = batch_preds.tolist() preds.extend(batch_preds) return preds
def load_scalers( path: str, ) -> Tuple[StandardScaler, StandardScaler, StandardScaler, StandardScaler]: """ Loads the scalers a model was trained with. :param path: Path where model checkpoint is saved. :return: A tuple with the data :class:`~chemprop.data.scaler.StandardScaler` and features :class:`~chemprop.data.scaler.StandardScaler`. """ state = torch.load(path, map_location=lambda storage, loc: storage) if state["data_scaler"] is not None: scaler = StandardScaler(state["data_scaler"]["means"], state["data_scaler"]["stds"]) else: scaler = None if state["features_scaler"] is not None: features_scaler = StandardScaler(state["features_scaler"]["means"], state["features_scaler"]["stds"], replace_nan_token=0) else: features_scaler = None if "atom_descriptor_scaler" in state.keys( ) and state["atom_descriptor_scaler"] is not None: atom_descriptor_scaler = StandardScaler( state["atom_descriptor_scaler"]["means"], state["atom_descriptor_scaler"]["stds"], replace_nan_token=0, ) else: atom_descriptor_scaler = None if "bond_feature_scaler" in state.keys( ) and state["bond_feature_scaler"] is not None: bond_feature_scaler = StandardScaler( state["bond_feature_scaler"]["means"], state["bond_feature_scaler"]["stds"], replace_nan_token=0, ) else: bond_feature_scaler = None return scaler, features_scaler, atom_descriptor_scaler, bond_feature_scaler
def load_scalers( path: str ) -> Tuple[StandardScaler, StandardScaler, StandardScaler, StandardScaler]: """ Loads the scalers a model was trained with. :param path: Path where model checkpoint is saved. :return: A tuple with the data :class:`~chemprop.data.scaler.StandardScaler` and features :class:`~chemprop.data.scaler.StandardScaler`. """ state = torch.load(path, map_location=lambda storage, loc: storage) scaler = StandardScaler( state['data_scaler']['means'], state['data_scaler'] ['stds']) if state['data_scaler'] is not None else None features_scaler = StandardScaler( state['features_scaler']['means'], state['features_scaler']['stds'], replace_nan_token=0) if state['features_scaler'] is not None else None if 'atom_descriptor_scaler' in state.keys(): atom_descriptor_scaler = StandardScaler( state['atom_descriptor_scaler']['means'], state['atom_descriptor_scaler']['stds'], replace_nan_token=0 ) if state['atom_descriptor_scaler'] is not None else None else: atom_descriptor_scaler = None if 'bond_feature_scaler' in state.keys(): bond_feature_scaler = StandardScaler( state['bond_feature_scaler']['means'], state['bond_feature_scaler']['stds'], replace_nan_token=0 ) if state['bond_feature_scaler'] is not None else None else: bond_feature_scaler = None return scaler, features_scaler, atom_descriptor_scaler, bond_feature_scaler
def predict(model: nn.Module, data: MoleculeDataset, batch_size: int, disable_progress_bar: bool = False, scaler: StandardScaler = None) -> List[List[float]]: """ Makes predictions on a dataset using an ensemble of models. :param model: A model. :param data: A MoleculeDataset. :param batch_size: Batch size. :param disable_progress_bar: Whether to disable the progress bar. :param scaler: A StandardScaler object fit on the training targets. :return: A list of lists of predictions. The outer list is examples while the inner list is tasks. """ model.eval() preds = [] num_iters, iter_step = len(data), batch_size for i in trange(0, num_iters, iter_step, disable=disable_progress_bar): # Prepare batch mol_batch = MoleculeDataset(data[i:i + batch_size]) smiles_batch, features_batch = mol_batch.smiles(), mol_batch.features() # Run model batch = smiles_batch with torch.no_grad(): batch_preds = model(batch, features_batch) batch_preds = batch_preds.data.cpu().numpy() # Inverse scale if regression if scaler is not None: batch_preds = scaler.inverse_transform(batch_preds) # Collect vectors batch_preds = batch_preds.tolist() preds.extend(batch_preds) return preds
def predict(model: nn.Module, data: MoleculeDataset, batch_size: int, scaler: StandardScaler = None) -> List[List[float]]: """ Makes predictions on a dataset using an ensemble of models. :param model: A model. :param data: A MoleculeDataset. :param batch_size: Batch size. :param scaler: A StandardScaler object fit on the training targets. :return: A list of lists of predictions. The outer list is examples while the inner list is tasks. """ model.eval() preds = [] num_iters, iter_step = len(data), batch_size smiles_batch_all = [] for i in trange(0, num_iters, iter_step): # Prepare batch mol_batch = MoleculeDataset(data[i:i + batch_size]) smiles_batch, features_batch = mol_batch.smiles(), mol_batch.features() # Run model batch = smiles_batch with torch.no_grad(): batch_preds = model(batch, features_batch) batch_preds = [x.data.cpu().numpy() for x in batch_preds] # Inverse scale if regression if scaler is not None: batch_preds = scaler.inverse_transform(batch_preds) # Collect vectors preds.append(batch_preds) smiles_batch_all.extend(smiles_batch) preds = [np.concatenate(x) for x in zip(*preds)] return preds, smiles_batch_all
def predict(model: MoleculeModel, data_loader: MoleculeDataLoader, disable_progress_bar: bool = False, scaler: StandardScaler = None) -> List[List[float]]: """ Makes predictions on a dataset using an ensemble of models. :param model: A :class:`~chemprop.models.model.MoleculeModel`. :param data_loader: A :class:`~chemprop.data.data.MoleculeDataLoader`. :param disable_progress_bar: Whether to disable the progress bar. :param scaler: A :class:`~chemprop.features.scaler.StandardScaler` object fit on the training targets. :return: A list of lists of predictions. The outer list is molecules while the inner list is tasks. """ model.eval() preds = [] for batch in tqdm(data_loader, disable=disable_progress_bar, leave=False): # Prepare batch batch: MoleculeDataset mol_batch, features_batch, target_batch, atom_descriptors_batch, atom_features_batch, bond_features_batch, smiles_batch = \ batch.batch_graph(), batch.features(), batch.targets(), batch.atom_descriptors(), \ batch.atom_features(), batch.bond_features(), batch.smiles_one_hot_encoding() # Make predictions with torch.no_grad(): batch_preds = model(mol_batch, features_batch, atom_descriptors_batch, atom_features_batch, bond_features_batch, smiles_batch) batch_preds = batch_preds.data.cpu().numpy() # Inverse scale if regression if scaler is not None: batch_preds = scaler.inverse_transform(batch_preds) # Collect vectors batch_preds = batch_preds.tolist() preds.extend(batch_preds) return preds
def predict(model: nn.Module, data_loader: MoleculeDataLoader, disable_progress_bar: bool = False, scaler: StandardScaler = None) -> List[List[float]]: """ Makes predictions on a dataset using an ensemble of models. :param model: A model. :param data_loader: A MoleculeDataLoader. :param disable_progress_bar: Whether to disable the progress bar. :param scaler: A StandardScaler object fit on the training targets. :return: A list of lists of predictions. The outer list is examples while the inner list is tasks. """ model.eval() preds = [] for batch in tqdm(data_loader, disable=disable_progress_bar): # Prepare batch batch: MoleculeDataset mol_batch, features_batch = batch.batch_graph(), batch.features() # Make predictions with torch.no_grad(): batch_preds = model(mol_batch, features_batch) batch_preds = batch_preds.data.cpu().numpy() # Inverse scale if regression if scaler is not None: batch_preds = scaler.inverse_transform(batch_preds) # Collect vectors batch_preds = batch_preds.tolist() preds.extend(batch_preds) return preds
def run_training(args: Namespace, logger: Logger = None) -> List[float]: """ Trains a model and returns test scores on the model checkpoint with the highest validation score. :param args: Arguments. :param logger: Logger. :return: A list of ensemble scores for each task. """ if logger is not None: debug, info = logger.debug, logger.info else: debug = info = print # Set GPU if args.gpu is not None: torch.cuda.set_device(args.gpu) # Print args # ============================================================================= # debug(pformat(vars(args))) # ============================================================================= # Get data debug('Loading data') args.task_names = get_task_names(args.data_path) data = get_data(path=args.data_path, args=args, logger=logger) args.num_tasks = data.num_tasks() args.features_size = data.features_size() debug(f'Number of tasks = {args.num_tasks}') # Split data debug(f'Splitting data with seed {args.seed}') if args.separate_test_path: test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger) if args.separate_val_path: val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger) if args.separate_val_path and args.separate_test_path: train_data = data elif args.separate_val_path: train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger) elif args.separate_test_path: train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger) else: print('=' * 100) train_data, val_data, test_data = split_data( data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger) ###my_code### train_df = get_data_df(train_data) train_df.to_csv( '~/PycharmProjects/CMPNN-master/data/24w_train_df_seed0.csv') val_df = get_data_df(val_data) val_df.to_csv( '~/PycharmProjects/CMPNN-master/data/24w_val_df_seed0.csv') test_df = get_data_df(test_data) test_df.to_csv( '~/PycharmProjects/CMPNN-master/data/24w_test_df_seed0.csv') ########## if args.dataset_type == 'classification': class_sizes = get_class_sizes(data) debug('Class sizes') for i, task_class_sizes in enumerate(class_sizes): debug( f'{args.task_names[i]} ' f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}' ) if args.save_smiles_splits: with open(args.data_path, 'r') as f: reader = csv.reader(f) header = next(reader) lines_by_smiles = {} indices_by_smiles = {} for i, line in enumerate(reader): smiles = line[0] lines_by_smiles[smiles] = line indices_by_smiles[smiles] = i all_split_indices = [] for dataset, name in [(train_data, 'train'), (val_data, 'val'), (test_data, 'test')]: with open(os.path.join(args.save_dir, name + '_smiles.csv'), 'w') as f: writer = csv.writer(f) writer.writerow(['smiles']) for smiles in dataset.smiles(): writer.writerow([smiles]) with open(os.path.join(args.save_dir, name + '_full.csv'), 'w') as f: writer = csv.writer(f) writer.writerow(header) for smiles in dataset.smiles(): writer.writerow(lines_by_smiles[smiles]) split_indices = [] for smiles in dataset.smiles(): split_indices.append(indices_by_smiles[smiles]) split_indices = sorted(split_indices) all_split_indices.append(split_indices) with open(os.path.join(args.save_dir, 'split_indices.pckl'), 'wb') as f: pickle.dump(all_split_indices, f) if args.features_scaling: features_scaler = train_data.normalize_features(replace_nan_token=0) val_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) else: features_scaler = None args.train_data_size = len(train_data) debug( f'Total size = {len(data):,} | ' f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}' ) # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only) if args.dataset_type == 'regression': debug('Fitting scaler') train_smiles, train_targets = train_data.smiles(), train_data.targets() scaler = StandardScaler().fit(train_targets) scaled_targets = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets) else: scaler = None # Get loss and metric functions loss_func = get_loss_func(args) metric_func = get_metric_func(metric=args.metric) # Set up test set evaluation test_smiles, test_targets = test_data.smiles(), test_data.targets() if args.dataset_type == 'multiclass': sum_test_preds = np.zeros( (len(test_smiles), args.num_tasks, args.multiclass_num_classes)) else: sum_test_preds = np.zeros((len(test_smiles), args.num_tasks)) # Train ensemble of models for model_idx in range(args.ensemble_size): # Tensorboard writer save_dir = os.path.join(args.save_dir, f'model_{model_idx}') makedirs(save_dir) try: writer = SummaryWriter(log_dir=save_dir) except: writer = SummaryWriter(logdir=save_dir) # Load/build model if args.checkpoint_paths is not None: debug( f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}' ) model = load_checkpoint(args.checkpoint_paths[model_idx], current_args=args, logger=logger) else: debug(f'Building model {model_idx}') model = build_model(args) debug(model) debug(f'Number of parameters = {param_count(model):,}') if args.cuda: debug('Moving model to cuda') model = model.cuda() # Ensure that model is saved in correct location for evaluation if 0 epochs save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) # Optimizers optimizer = build_optimizer(model, args) # Learning rate schedulers scheduler = build_lr_scheduler(optimizer, args) # Run training best_score = float('inf') if args.minimize_score else -float('inf') best_epoch, n_iter = 0, 0 for epoch in range(args.epochs): debug(f'Epoch {epoch}') n_iter = train(model=model, data=train_data, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, logger=logger, writer=writer) if isinstance(scheduler, ExponentialLR): scheduler.step() val_scores = evaluate(model=model, data=val_data, num_tasks=args.num_tasks, metric_func=metric_func, batch_size=args.batch_size, dataset_type=args.dataset_type, scaler=scaler, logger=logger) # Average validation score avg_val_score = np.nanmean(val_scores) debug(f'Validation {args.metric} = {avg_val_score:.6f}') writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter) if args.show_individual_scores: # Individual validation scores for task_name, val_score in zip(args.task_names, val_scores): debug( f'Validation {task_name} {args.metric} = {val_score:.6f}' ) writer.add_scalar(f'validation_{task_name}_{args.metric}', val_score, n_iter) # Save model checkpoint if improved validation score if args.minimize_score and avg_val_score < best_score or \ not args.minimize_score and avg_val_score > best_score: best_score, best_epoch = avg_val_score, epoch save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) # Evaluate on test set using model with best validation score info( f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}' ) model = load_checkpoint(os.path.join(save_dir, 'model.pt'), cuda=args.cuda, logger=logger) test_preds = predict(model=model, data=test_data, batch_size=args.batch_size, scaler=scaler) test_scores = evaluate_predictions(preds=test_preds, targets=test_targets, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, logger=logger) if len(test_preds) != 0: sum_test_preds += np.array(test_preds) # Average test score avg_test_score = np.nanmean(test_scores) info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}') writer.add_scalar(f'test_{args.metric}', avg_test_score, 0) if args.show_individual_scores: # Individual test scores for task_name, test_score in zip(args.task_names, test_scores): info( f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}' ) writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter) # Evaluate ensemble on test set avg_test_preds = (sum_test_preds / args.ensemble_size).tolist() ensemble_scores = evaluate_predictions(preds=avg_test_preds, targets=test_targets, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, logger=logger) # Average ensemble score avg_ensemble_test_score = np.nanmean(ensemble_scores) info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}') writer.add_scalar(f'ensemble_test_{args.metric}', avg_ensemble_test_score, 0) # Individual ensemble scores if args.show_individual_scores: for task_name, ensemble_score in zip(args.task_names, ensemble_scores): info( f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}' ) return ensemble_scores
def run_training(args: Namespace, logger: Logger = None) -> List[float]: """ Trains a model and returns test scores on the model checkpoint with the highest validation score. :param args: Arguments. :param logger: Logger. :return: A list of ensemble scores for each task. """ if logger is not None: debug, info = logger.debug, logger.info else: debug = info = print # Set GPU if args.gpu is not None: torch.cuda.set_device(args.gpu) # Print args debug(pformat(vars(args))) # Get data debug('Loading data') args.task_names = get_task_names(args.data_path) desired_labels = get_desired_labels(args, args.task_names) data = get_data(path=args.data_path, args=args, logger=logger) args.num_tasks = data.num_tasks() args.features_size = data.features_size() args.real_num_tasks = args.num_tasks - args.features_size if args.predict_features else args.num_tasks debug(f'Number of tasks = {args.num_tasks}') if args.dataset_type == 'bert_pretraining': data.bert_init(args, logger) # Split data if args.dataset_type == 'regression_with_binning': # Note: for now, binning based on whole dataset, not just training set data, bin_predictions, regression_data = data args.bin_predictions = bin_predictions debug(f'Splitting data with seed {args.seed}') train_data, _, _ = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger) _, val_data, test_data = split_data(regression_data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger) else: debug(f'Splitting data with seed {args.seed}') if args.separate_test_set: test_data = get_data(path=args.separate_test_set, args=args, features_path=args.separate_test_set_features, logger=logger) if args.separate_val_set: val_data = get_data( path=args.separate_val_set, args=args, features_path=args.separate_val_set_features, logger=logger) train_data = data # nothing to split; we already got our test and val sets else: train_data, val_data, _ = split_data( data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger) else: train_data, val_data, test_data = split_data( data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger) # Optionally replace test data with train or val data if args.test_split == 'train': test_data = train_data elif args.test_split == 'val': test_data = val_data if args.dataset_type == 'classification': class_sizes = get_class_sizes(data) debug('Class sizes') for i, task_class_sizes in enumerate(class_sizes): debug( f'{args.task_names[i]} ' f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}' ) if args.class_balance: train_class_sizes = get_class_sizes(train_data) class_batch_counts = torch.Tensor( train_class_sizes) * args.batch_size args.class_weights = 1 / torch.Tensor(class_batch_counts) if args.save_smiles_splits: with open(args.data_path, 'r') as f: reader = csv.reader(f) header = next(reader) lines_by_smiles = {} indices_by_smiles = {} for i, line in enumerate(reader): smiles = line[0] lines_by_smiles[smiles] = line indices_by_smiles[smiles] = i all_split_indices = [] for dataset, name in [(train_data, 'train'), (val_data, 'val'), (test_data, 'test')]: with open(os.path.join(args.save_dir, name + '_smiles.csv'), 'w') as f: writer = csv.writer(f) writer.writerow(['smiles']) for smiles in dataset.smiles(): writer.writerow([smiles]) with open(os.path.join(args.save_dir, name + '_full.csv'), 'w') as f: writer = csv.writer(f) writer.writerow(header) for smiles in dataset.smiles(): writer.writerow(lines_by_smiles[smiles]) split_indices = [] for smiles in dataset.smiles(): split_indices.append(indices_by_smiles[smiles]) split_indices = sorted(split_indices) all_split_indices.append(split_indices) with open(os.path.join(args.save_dir, 'split_indices.pckl'), 'wb') as f: pickle.dump(all_split_indices, f) return [1 for _ in range(args.num_tasks) ] # short circuit out when just generating splits if args.features_scaling: features_scaler = train_data.normalize_features( replace_nan_token=None if args.predict_features else 0) val_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) else: features_scaler = None args.train_data_size = len( train_data ) if args.prespecified_chunk_dir is None else args.prespecified_chunks_max_examples_per_epoch if args.adversarial or args.moe: val_smiles, test_smiles = val_data.smiles(), test_data.smiles() debug( f'Total size = {len(data):,} | ' f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}' ) # Optionally truncate outlier values if args.truncate_outliers: print('Truncating outliers in train set') train_data = truncate_outliers(train_data) # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only) if args.dataset_type == 'regression' and args.target_scaling: debug('Fitting scaler') train_smiles, train_targets = train_data.smiles(), train_data.targets() scaler = StandardScaler().fit(train_targets) scaled_targets = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets) else: scaler = None if args.moe: train_data = cluster_split(train_data, args.num_sources, args.cluster_max_ratio, seed=args.cluster_split_seed, logger=logger) # Chunk training data if too large to load in memory all at once if args.num_chunks > 1: os.makedirs(args.chunk_temp_dir, exist_ok=True) train_paths = [] if args.moe: chunked_sources = [td.chunk(args.num_chunks) for td in train_data] chunks = [] for i in range(args.num_chunks): chunks.append([source[i] for source in chunked_sources]) else: chunks = train_data.chunk(args.num_chunks) for i in range(args.num_chunks): chunk_path = os.path.join(args.chunk_temp_dir, str(i) + '.txt') memo_path = os.path.join(args.chunk_temp_dir, 'memo' + str(i) + '.txt') with open(chunk_path, 'wb') as f: pickle.dump(chunks[i], f) train_paths.append((chunk_path, memo_path)) train_data = train_paths # Get loss and metric functions loss_func = get_loss_func(args) metric_func = get_metric_func(metric=args.metric, args=args) # Set up test set evaluation test_smiles, test_targets = test_data.smiles(), test_data.targets() if args.maml: # TODO refactor test_targets = [] for task_idx in range(len(data.data[0].targets)): _, task_test_data, _ = test_data.sample_maml_task(args, seed=0) test_targets += task_test_data.targets() if args.dataset_type == 'bert_pretraining': sum_test_preds = { 'features': np.zeros((len(test_smiles), args.features_size)) if args.features_size is not None else None, 'vocab': np.zeros((len(test_targets['vocab']), args.vocab.output_size)) } elif args.dataset_type == 'kernel': sum_test_preds = np.zeros((len(test_targets), args.num_tasks)) else: sum_test_preds = np.zeros((len(test_smiles), args.num_tasks)) if args.maml: sum_test_preds = None # annoying to determine exact size; will initialize later if args.dataset_type == 'bert_pretraining': # Only predict targets that are masked out test_targets['vocab'] = [ target if mask == 0 else None for target, mask in zip(test_targets['vocab'], test_data.mask()) ] # Train ensemble of models for model_idx in range(args.ensemble_size): # Tensorboard writer save_dir = os.path.join(args.save_dir, f'model_{model_idx}') os.makedirs(save_dir, exist_ok=True) writer = SummaryWriter(log_dir=save_dir) # Load/build model if args.checkpoint_paths is not None: debug( f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}' ) model = load_checkpoint(args.checkpoint_paths[model_idx], current_args=args, logger=logger) else: debug(f'Building model {model_idx}') model = build_model(args) debug(model) debug(f'Number of parameters = {param_count(model):,}') if args.cuda: debug('Moving model to cuda') model = model.cuda() # Ensure that model is saved in correct location for evaluation if 0 epochs save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) if args.adjust_weight_decay: args.pnorm_target = compute_pnorm(model) # Optimizers optimizer = build_optimizer(model, args) # Learning rate schedulers scheduler = build_lr_scheduler(optimizer, args) # Run training best_score = float('inf') if args.minimize_score else -float('inf') best_epoch, n_iter = 0, 0 for epoch in trange(args.epochs): debug(f'Epoch {epoch}') if args.prespecified_chunk_dir is not None: # load some different random chunks each epoch train_data, val_data = load_prespecified_chunks(args, logger) debug('Loaded prespecified chunks for epoch') if args.dataset_type == 'unsupervised': # won't work with moe full_data = MoleculeDataset(train_data.data + val_data.data) generate_unsupervised_cluster_labels( build_model(args), full_data, args) # cluster with a new random init model.create_ffn( args ) # reset the ffn since we're changing targets-- we're just pretraining the encoder. optimizer.param_groups.pop() # remove ffn parameters optimizer.add_param_group({ 'params': model.ffn.parameters(), 'lr': args.init_lr[1], 'weight_decay': args.weight_decay[1] }) if args.cuda: model.ffn.cuda() if args.gradual_unfreezing: if epoch % args.epochs_per_unfreeze == 0: unfroze_layer = model.unfreeze_next( ) # consider just stopping early after we have nothing left to unfreeze? if unfroze_layer: debug('Unfroze last frozen layer') n_iter = train(model=model, data=train_data, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, logger=logger, writer=writer, chunk_names=(args.num_chunks > 1), val_smiles=val_smiles if args.adversarial else None, test_smiles=test_smiles if args.adversarial or args.moe else None) if isinstance(scheduler, ExponentialLR): scheduler.step() val_scores = evaluate(model=model, data=val_data, metric_func=metric_func, args=args, scaler=scaler, logger=logger) if args.dataset_type == 'bert_pretraining': if val_scores['features'] is not None: debug( f'Validation features rmse = {val_scores["features"]:.6f}' ) writer.add_scalar('validation_features_rmse', val_scores['features'], n_iter) val_scores = [val_scores['vocab']] # Average validation score avg_val_score = np.nanmean(val_scores) debug(f'Validation {args.metric} = {avg_val_score:.6f}') writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter) if args.show_individual_scores: # Individual validation scores for task_name, val_score in zip(args.task_names, val_scores): if task_name in desired_labels: debug( f'Validation {task_name} {args.metric} = {val_score:.6f}' ) writer.add_scalar( f'validation_{task_name}_{args.metric}', val_score, n_iter) # Save model checkpoint if improved validation score, or always save it if unsupervised if args.minimize_score and avg_val_score < best_score or \ not args.minimize_score and avg_val_score > best_score or \ args.dataset_type == 'unsupervised': best_score, best_epoch = avg_val_score, epoch save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) if args.dataset_type == 'unsupervised': return [0] # rest of this is meaningless when unsupervised # Evaluate on test set using model with best validation score info( f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}' ) model = load_checkpoint(os.path.join(save_dir, 'model.pt'), cuda=args.cuda, logger=logger) if args.split_test_by_overlap_dataset is not None: overlap_data = get_data(path=args.split_test_by_overlap_dataset, logger=logger) overlap_smiles = set(overlap_data.smiles()) test_data_intersect, test_data_nonintersect = [], [] for d in test_data.data: if d.smiles in overlap_smiles: test_data_intersect.append(d) else: test_data_nonintersect.append(d) test_data_intersect, test_data_nonintersect = MoleculeDataset( test_data_intersect), MoleculeDataset(test_data_nonintersect) for name, td in [('Intersect', test_data_intersect), ('Nonintersect', test_data_nonintersect)]: test_preds = predict(model=model, data=td, args=args, scaler=scaler, logger=logger) test_scores = evaluate_predictions( preds=test_preds, targets=td.targets(), metric_func=metric_func, dataset_type=args.dataset_type, args=args, logger=logger) avg_test_score = np.nanmean(test_scores) info( f'Model {model_idx} test {args.metric} for {name} = {avg_test_score:.6f}' ) if len( test_data ) == 0: # just get some garbage results without crashing; in this case we didn't care anyway test_preds, test_scores = sum_test_preds, [ 0 for _ in range(len(args.task_names)) ] else: test_preds = predict(model=model, data=test_data, args=args, scaler=scaler, logger=logger) test_scores = evaluate_predictions(preds=test_preds, targets=test_targets, metric_func=metric_func, dataset_type=args.dataset_type, args=args, logger=logger) if args.maml: if sum_test_preds is None: sum_test_preds = np.zeros(np.array(test_preds).shape) if args.dataset_type == 'bert_pretraining': if test_preds['features'] is not None: sum_test_preds['features'] += np.array(test_preds['features']) sum_test_preds['vocab'] += np.array(test_preds['vocab']) else: sum_test_preds += np.array(test_preds) if args.dataset_type == 'bert_pretraining': if test_preds['features'] is not None: debug( f'Model {model_idx} test features rmse = {test_scores["features"]:.6f}' ) writer.add_scalar('test_features_rmse', test_scores['features'], 0) test_scores = [test_scores['vocab']] # Average test score avg_test_score = np.nanmean(test_scores) info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}') writer.add_scalar(f'test_{args.metric}', avg_test_score, 0) if args.show_individual_scores: # Individual test scores for task_name, test_score in zip(args.task_names, test_scores): if task_name in desired_labels: info( f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}' ) writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter) # Evaluate ensemble on test set if args.dataset_type == 'bert_pretraining': avg_test_preds = { 'features': (sum_test_preds['features'] / args.ensemble_size).tolist() if sum_test_preds['features'] is not None else None, 'vocab': (sum_test_preds['vocab'] / args.ensemble_size).tolist() } else: avg_test_preds = (sum_test_preds / args.ensemble_size).tolist() if len(test_data ) == 0: # just return some garbage when we didn't want test data ensemble_scores = test_scores else: ensemble_scores = evaluate_predictions(preds=avg_test_preds, targets=test_targets, metric_func=metric_func, dataset_type=args.dataset_type, args=args, logger=logger) # Average ensemble score if args.dataset_type == 'bert_pretraining': if ensemble_scores['features'] is not None: info( f'Ensemble test features rmse = {ensemble_scores["features"]:.6f}' ) writer.add_scalar('ensemble_test_features_rmse', ensemble_scores['features'], 0) ensemble_scores = [ensemble_scores['vocab']] avg_ensemble_test_score = np.nanmean(ensemble_scores) info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}') writer.add_scalar(f'ensemble_test_{args.metric}', avg_ensemble_test_score, 0) # Individual ensemble scores if args.show_individual_scores: for task_name, ensemble_score in zip(args.task_names, ensemble_scores): info( f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}' ) return ensemble_scores
def run_training(args: TrainArgs, data: MoleculeDataset, logger: Logger = None) -> Dict[str, List[float]]: """ Loads data, trains a Chemprop model, and returns test scores for the model checkpoint with the highest validation score. :param args: A :class:`~chemprop.args.TrainArgs` object containing arguments for loading data and training the Chemprop model. :param data: A :class:`~chemprop.data.MoleculeDataset` containing the data. :param logger: A logger to record output. :return: A dictionary mapping each metric in :code:`args.metrics` to a list of values for each task. """ if logger is not None: debug, info = logger.debug, logger.info else: debug = info = print # Set pytorch seed for random initial weights torch.manual_seed(args.pytorch_seed) # Split data debug(f'Splitting data with seed {args.seed}') if args.separate_test_path: test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger) if args.separate_val_path: val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger) if args.separate_val_path and args.separate_test_path: train_data = data elif args.separate_val_path: train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, num_folds=args.num_folds, args=args, logger=logger) elif args.separate_test_path: train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, num_folds=args.num_folds, args=args, logger=logger) else: train_data, val_data, test_data = split_data( data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, num_folds=args.num_folds, args=args, logger=logger) if args.dataset_type == 'classification': class_sizes = get_class_sizes(data) debug('Class sizes') for i, task_class_sizes in enumerate(class_sizes): debug( f'{args.task_names[i]} ' f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}' ) if args.save_smiles_splits: save_smiles_splits(train_data=train_data, val_data=val_data, test_data=test_data, data_path=args.data_path, save_dir=args.save_dir, smiles_column=args.smiles_column) if args.features_scaling: features_scaler = train_data.normalize_features(replace_nan_token=0) val_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) else: features_scaler = None args.train_data_size = len(train_data) debug( f'Total size = {len(data):,} | ' f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}' ) # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only) if args.dataset_type == 'regression': debug('Fitting scaler') train_smiles, train_targets = train_data.smiles(), train_data.targets() scaler = StandardScaler().fit(train_targets) scaled_targets = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets) else: scaler = None # Get loss function loss_func = get_loss_func(args) # Set up test set evaluation test_smiles, test_targets = test_data.smiles(), test_data.targets() if args.dataset_type == 'multiclass': sum_test_preds = np.zeros( (len(test_smiles), args.num_tasks, args.multiclass_num_classes)) else: sum_test_preds = np.zeros((len(test_smiles), args.num_tasks)) # Automatically determine whether to cache if len(data) <= args.cache_cutoff: cache = True num_workers = 0 else: cache = False num_workers = args.num_workers # Create data loaders train_data_loader = MoleculeDataLoader(dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache, class_balance=args.class_balance, shuffle=True, seed=args.seed) val_data_loader = MoleculeDataLoader(dataset=val_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) if args.class_balance: debug( f'With class_balance, effective train size = {train_data_loader.iter_size:,}' ) # Train ensemble of models for model_idx in range(args.ensemble_size): # Tensorboard writer save_dir = os.path.join(args.save_dir, f'model_{model_idx}') makedirs(save_dir) try: writer = SummaryWriter(log_dir=save_dir) except: writer = SummaryWriter(logdir=save_dir) # Load/build model if args.checkpoint_paths is not None: debug( f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}' ) model = load_checkpoint(args.checkpoint_paths[model_idx], logger=logger) else: debug(f'Building model {model_idx}') model = MoleculeModel(args) debug(model) debug(f'Number of parameters = {param_count(model):,}') if args.cuda: debug('Moving model to cuda') model = model.to(args.device) # Ensure that model is saved in correct location for evaluation if 0 epochs save_checkpoint(os.path.join(save_dir, MODEL_FILE_NAME), model, scaler, features_scaler, args) # Optimizers optimizer = build_optimizer(model, args) # Learning rate schedulers scheduler = build_lr_scheduler(optimizer, args) # Run training best_score = float('inf') if args.minimize_score else -float('inf') best_epoch, n_iter = 0, 0 for epoch in trange(args.epochs): debug(f'Epoch {epoch}') n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, logger=logger, writer=writer) if isinstance(scheduler, ExponentialLR): scheduler.step() val_scores = evaluate(model=model, data_loader=val_data_loader, num_tasks=args.num_tasks, metrics=args.metrics, dataset_type=args.dataset_type, scaler=scaler, logger=logger) for metric in args.metrics: # Average validation score avg_val_score = np.nanmean(val_scores[metric]) debug(f'Validation {metric} = {avg_val_score:.6f}') writer.add_scalar(f'validation_{metric}', avg_val_score, n_iter) if args.show_individual_scores: # Individual validation scores for task_name, val_score in zip(args.task_names, val_scores[metric]): debug( f'Validation {task_name} {metric} = {val_score:.6f}' ) writer.add_scalar(f'validation_{task_name}_{metric}', val_score, n_iter) # Save model checkpoint if improved validation score avg_val_score = np.nanmean(val_scores[args.metric]) if args.minimize_score and avg_val_score < best_score or \ not args.minimize_score and avg_val_score > best_score: best_score, best_epoch = avg_val_score, epoch save_checkpoint(os.path.join(save_dir, MODEL_FILE_NAME), model, scaler, features_scaler, args) # Evaluate on test set using model with best validation score info( f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}' ) model = load_checkpoint(os.path.join(save_dir, MODEL_FILE_NAME), device=args.device, logger=logger) test_preds = predict(model=model, data_loader=test_data_loader, scaler=scaler) test_scores = evaluate_predictions(preds=test_preds, targets=test_targets, num_tasks=args.num_tasks, metrics=args.metrics, dataset_type=args.dataset_type, logger=logger) if len(test_preds) != 0: sum_test_preds += np.array(test_preds) # Average test score for metric in args.metrics: avg_test_score = np.nanmean(test_scores[metric]) info(f'Model {model_idx} test {metric} = {avg_test_score:.6f}') writer.add_scalar(f'test_{metric}', avg_test_score, 0) if args.show_individual_scores: # Individual test scores for task_name, test_score in zip(args.task_names, test_scores[metric]): info( f'Model {model_idx} test {task_name} {metric} = {test_score:.6f}' ) writer.add_scalar(f'test_{task_name}_{metric}', test_score, n_iter) writer.close() # Evaluate ensemble on test set avg_test_preds = (sum_test_preds / args.ensemble_size).tolist() ensemble_scores = evaluate_predictions(preds=avg_test_preds, targets=test_targets, num_tasks=args.num_tasks, metrics=args.metrics, dataset_type=args.dataset_type, logger=logger) for metric in args.metrics: # Average ensemble score avg_ensemble_test_score = np.nanmean(ensemble_scores[metric]) info(f'Ensemble test {metric} = {avg_ensemble_test_score:.6f}') # Individual ensemble scores if args.show_individual_scores: for task_name, ensemble_score in zip(args.task_names, ensemble_scores[metric]): info( f'Ensemble test {task_name} {metric} = {ensemble_score:.6f}' ) # Optionally save test preds if args.save_preds: test_preds_dataframe = pd.DataFrame( data={'smiles': test_data.smiles()}) for i, task_name in enumerate(args.task_names): test_preds_dataframe[task_name] = [ pred[i] for pred in avg_test_preds ] test_preds_dataframe.to_csv(os.path.join(args.save_dir, 'test_preds.csv'), index=False) return ensemble_scores
def predict( model: MoleculeModel, data_loader: MoleculeDataLoader, disable_progress_bar: bool = False, scaler: StandardScaler = None, return_unc_parameters: bool = False, dropout_prob: float = 0.0, ) -> List[List[float]]: """ Makes predictions on a dataset using an ensemble of models. :param model: A :class:`~chemprop.models.model.MoleculeModel`. :param data_loader: A :class:`~chemprop.data.data.MoleculeDataLoader`. :param disable_progress_bar: Whether to disable the progress bar. :param scaler: A :class:`~chemprop.features.scaler.StandardScaler` object fit on the training targets. :param return_unc_parameters: A bool indicating whether additional uncertainty parameters would be returned alongside the mean predictions. :param dropout_prob: For use during uncertainty prediction only. The propout probability used in generating a dropout ensemble. :return: A list of lists of predictions. The outer list is molecules while the inner list is tasks. If returning uncertainty parameters as well, it is a tuple of lists of lists, of a length depending on how many uncertainty parameters are appropriate for the loss function. """ model.eval() # Activate dropout layers to work during inference for uncertainty estimation if dropout_prob > 0.0: def activate_dropout_(model): return activate_dropout(model, dropout_prob) model.apply(activate_dropout_) preds = [] var, lambdas, alphas, betas = [], [], [], [ ] # only used if returning uncertainty parameters for batch in tqdm(data_loader, disable=disable_progress_bar, leave=False): # Prepare batch batch: MoleculeDataset mol_batch = batch.batch_graph() features_batch = batch.features() atom_descriptors_batch = batch.atom_descriptors() atom_features_batch = batch.atom_features() bond_features_batch = batch.bond_features() # Make predictions with torch.no_grad(): batch_preds = model( mol_batch, features_batch, atom_descriptors_batch, atom_features_batch, bond_features_batch, ) batch_preds = batch_preds.data.cpu().numpy() if model.loss_function == "mve": batch_preds, batch_var = np.split(batch_preds, 2, axis=1) elif model.loss_function == "dirichlet": if model.classification: batch_alphas = np.reshape( batch_preds, [batch_preds.shape[0], batch_preds.shape[1] // 2, 2]) batch_preds = batch_alphas[:, :, 1] / np.sum( batch_alphas, axis=2) # shape(data, tasks, 2) elif model.multiclass: batch_alphas = batch_preds batch_preds = batch_preds / np.sum( batch_alphas, axis=2, keepdims=True) # shape(data, tasks, num_classes) elif model.loss_function == 'evidential': # regression batch_preds, batch_lambdas, batch_alphas, batch_betas = np.split( batch_preds, 4, axis=1) # Inverse scale if regression if scaler is not None: batch_preds = scaler.inverse_transform(batch_preds) if model.loss_function == "mve": batch_var = batch_var * scaler.stds**2 elif model.loss_function == "evidential": batch_betas = batch_betas * scaler.stds**2 # Collect vectors batch_preds = batch_preds.tolist() preds.extend(batch_preds) if model.loss_function == "mve": var.extend(batch_var.tolist()) elif model.loss_function == "dirichlet" and model.classification: alphas.extend(batch_alphas.tolist()) elif model.loss_function == "evidential": # regression lambdas.extend(batch_lambdas.tolist()) alphas.extend(batch_alphas.tolist()) betas.extend(batch_betas.tolist()) if return_unc_parameters: if model.loss_function == "mve": return preds, var elif model.loss_function == "dirichlet": return preds, alphas elif model.loss_function == "evidential": return preds, lambdas, alphas, betas return preds
def predict(model: nn.Module, data: MoleculeDataset, args: Namespace, scaler: StandardScaler = None, bert_save_memory: bool = False, logger: logging.Logger = None) -> List[List[float]]: """ Makes predictions on a dataset using an ensemble of models. :param model: A model. :param data: A MoleculeDataset. :param args: Arguments. :param scaler: A StandardScaler object fit on the training targets. :param bert_save_memory: Store unused predictions as None to avoid unnecessary memory use. :param logger: Logger. :return: A list of lists of predictions. The outer list is examples while the inner list is tasks. """ model.eval() preds = [] if args.dataset_type == 'bert_pretraining': features_preds = [] if args.maml: num_iters, iter_step = data.num_tasks() * args.maml_batches_per_epoch, 1 full_targets = [] else: num_iters, iter_step = len(data), args.batch_size if args.parallel_featurization: batch_queue = Queue(args.batch_queue_max_size) exit_queue = Queue(1) batch_process = Process(target=async_mol2graph, args=(batch_queue, data, args, num_iters, iter_step, exit_queue, True)) batch_process.start() currently_loaded_batches = [] for i in trange(0, num_iters, iter_step): if args.maml: task_train_data, task_test_data, task_idx = data.sample_maml_task(args, seed=0) mol_batch = task_test_data smiles_batch, features_batch, targets_batch = task_train_data.smiles(), task_train_data.features(), task_train_data.targets(task_idx) targets = torch.Tensor(targets_batch).unsqueeze(1) if args.cuda: targets = targets.cuda() else: # Prepare batch if args.parallel_featurization: if len(currently_loaded_batches) == 0: currently_loaded_batches = batch_queue.get() mol_batch, featurized_mol_batch = currently_loaded_batches.pop(0) else: mol_batch = MoleculeDataset(data[i:i + args.batch_size]) smiles_batch, features_batch = mol_batch.smiles(), mol_batch.features() # Run model if args.dataset_type == 'bert_pretraining': batch = mol2graph(smiles_batch, args) batch.bert_mask(mol_batch.mask()) else: batch = smiles_batch if args.maml: # TODO refactor with train loop model.zero_grad() intermediate_preds = model(batch, features_batch) loss = get_loss_func(args)(intermediate_preds, targets) loss = loss.sum() / len(batch) grad = torch.autograd.grad(loss, [p for p in model.parameters() if p.requires_grad]) theta = [p for p in model.named_parameters() if p[1].requires_grad] # comes in same order as grad theta_prime = {p[0]: p[1] - args.maml_lr * grad[i] for i, p in enumerate(theta)} for name, nongrad_param in [p for p in model.named_parameters() if not p[1].requires_grad]: theta_prime[name] = nongrad_param + torch.zeros(nongrad_param.size()).to(nongrad_param) model_prime = build_model(args=args, params=theta_prime) smiles_batch, features_batch, targets_batch = task_test_data.smiles(), task_test_data.features(), task_test_data.targets(task_idx) # no mask since we only picked data points that have the desired target with torch.no_grad(): batch_preds = model_prime(smiles_batch, features_batch) full_targets.extend([[t] for t in targets_batch]) else: with torch.no_grad(): if args.parallel_featurization: previous_graph_input_mode = model.encoder.graph_input model.encoder.graph_input = True # force model to accept already processed input batch_preds = model(featurized_mol_batch, features_batch) model.encoder.graph_input = previous_graph_input_mode else: batch_preds = model(batch, features_batch) if args.dataset_type == 'bert_pretraining': if batch_preds['features'] is not None: features_preds.extend(batch_preds['features'].data.cpu().numpy()) batch_preds = batch_preds['vocab'] if args.dataset_type == 'kernel': batch_preds = batch_preds.view(int(batch_preds.size(0)/2), 2, batch_preds.size(1)) batch_preds = model.kernel_output_layer(batch_preds) batch_preds = batch_preds.data.cpu().numpy() if scaler is not None: batch_preds = scaler.inverse_transform(batch_preds) if args.dataset_type == 'regression_with_binning': batch_preds = batch_preds.reshape((batch_preds.shape[0], args.num_tasks, args.num_bins)) indices = np.argmax(batch_preds, axis=2) preds.extend(indices.tolist()) else: batch_preds = batch_preds.tolist() if args.dataset_type == 'bert_pretraining' and bert_save_memory: for atom_idx, mask_val in enumerate(mol_batch.mask()): if mask_val != 0: batch_preds[atom_idx] = None # not going to predict, so save some memory when passing around preds.extend(batch_preds) if args.dataset_type == 'regression_with_binning': preds = args.bin_predictions[np.array(preds)].tolist() if args.dataset_type == 'bert_pretraining': preds = { 'features': features_preds if len(features_preds) > 0 else None, 'vocab': preds } if args.parallel_featurization: exit_queue.put(0) # dummy var to get the subprocess to know that we're done batch_process.join() if args.maml: # return the task targets here to guarantee alignment; # there's probably no reasonable scenario where we'd use MAML directly to predict something that's actually unknown return preds, full_targets return preds
def generate_scalers(self): print("Creating new molecule, atom, and bond scalers for dataset.") # Get all data features for normalization. all_molecule_features = [] all_atom_features = [] all_bond_features = [] for d in self.dataset: all_molecule_features.append(d.features) all_atom_features += d.x all_bond_features += d.edge_attr # Generate and save molecule feature scaler for data. molecule_scaler = StandardScaler(replace_nan_token=0) molecule_scaler.fit(vstack(all_molecule_features)) self.molecule_scaler = molecule_scaler with open(self.scalers_file_paths["molecule"], "wb") as f: pickle.dump(molecule_scaler, f, pickle.HIGHEST_PROTOCOL) # Generate and save atom feature scaler for data. atom_scaler = StandardScaler(replace_nan_token=0) atom_scaler.fit(vstack(all_atom_features)) self.atom_scaler = atom_scaler with open(self.scalers_file_paths["atom"], "wb") as f: pickle.dump(atom_scaler, f, pickle.HIGHEST_PROTOCOL) # Generate and save bond feature scaler for data. bond_scaler = StandardScaler(replace_nan_token=0) bond_scaler.fit(vstack(all_bond_features)) self.bond_scaler = bond_scaler with open(self.scalers_file_paths["bond"], "wb") as f: pickle.dump(bond_scaler, f, pickle.HIGHEST_PROTOCOL)
def pdts(args: TrainArgs, model_idx): """ preliminary experiment with PDTS (approximate BO) we use a data set size of 50k and run until we have trained with 15k data points our batch size is 50 we initialise with 1000 data points """ ######## set up all logging ######## logger = None # make save_dir save_dir = os.path.join(args.save_dir, f'model_{model_idx}') makedirs(save_dir) # make results_dir results_dir = args.results_dir makedirs(results_dir) # initialise wandb #os.environ['WANDB_MODE'] = 'dryrun' wandb.init(name=args.wandb_name + '_' + str(model_idx), project=args.wandb_proj, reinit=True) #print('WANDB directory is:') #print(wandb.run.dir) #################################### ########## get data args.task_names = args.target_columns or get_task_names(args.data_path) data = get_data(path=args.data_path, args=args, logger=logger) args.num_tasks = data.num_tasks() args.features_size = data.features_size() ########## SMILES of top 1% top1p = np.array(MoleculeDataset(data).targets()) top1p_idx = np.argsort(-top1p[:, 0])[:int(args.max_data_size * 0.01)] SMILES = np.array(MoleculeDataset(data).smiles())[top1p_idx] ########## initial data splits args.seed = args.data_seeds[model_idx] data.shuffle(seed=args.seed) sizes = args.split_sizes train_size = int(sizes[0] * len(data)) train_orig = data[:train_size] test_orig = data[train_size:] train_data, test_data = copy.deepcopy( MoleculeDataset(train_orig)), copy.deepcopy(MoleculeDataset(test_orig)) args.train_data_size = len(train_data) ########## standardising # features (train and test) features_scaler = train_data.normalize_features(replace_nan_token=0) test_data.normalize_features(features_scaler) # targets (train) train_targets = train_data.targets() test_targets = test_data.targets() scaler = StandardScaler().fit(train_targets) scaled_targets = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets) ########## loss, metric functions loss_func = neg_log_like metric_func = get_metric_func(metric=args.metric) ########## data loaders if len(data) <= args.cache_cutoff: cache = True num_workers = 0 else: cache = False num_workers = args.num_workers train_data_loader = MoleculeDataLoader(dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache, class_balance=args.class_balance, shuffle=True, seed=args.seed) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) ########## instantiating model, optimiser, scheduler (MAP) # set pytorch seed for random initial weights torch.manual_seed(args.pytorch_seeds[model_idx]) # build model print(f'Building model {model_idx}') model = MoleculeModel(args) print(model) print(f'Number of parameters = {param_count(model):,}') if args.cuda: print('Moving model to cuda') model = model.to(args.device) # optimizer optimizer = Adam([{ 'params': model.encoder.parameters() }, { 'params': model.ffn.parameters() }, { 'params': model.log_noise, 'weight_decay': 0 }], lr=args.lr, weight_decay=args.weight_decay) # learning rate scheduler scheduler = scheduler_const([args.lr]) #################################################################### #################################################################### # FIRST THOMPSON ITERATION ### scores array ptds_scores = np.ones(args.pdts_batches + 1) batch_no = 0 ### fill for batch 0 SMILES_train = np.array(train_data.smiles()) SMILES_stack = np.hstack((SMILES, SMILES_train)) overlap = len(SMILES_stack) - len(np.unique(SMILES_stack)) prop = overlap / len(SMILES) ptds_scores[batch_no] = prop wandb.log({ "Proportion of top 1%": prop, "batch_no": batch_no }, commit=False) ### train MAP posterior gp_switch = False likelihood = None bbp_switch = None n_iter = 0 for epoch in range(args.epochs_init_map): n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, bbp_switch=bbp_switch) # save to save_dir #if epoch == args.epochs_init_map - 1: #save_checkpoint(os.path.join(save_dir, f'model_{batch_no}.pt'), model, scaler, features_scaler, args) # if X load from checkpoint path if args.bbp or args.gp or args.swag or args.sgld: model = load_checkpoint(args.checkpoint_path + f'/model_{model_idx}/model_{batch_no}.pt', device=args.device, logger=None) ########## BBP if args.bbp: model_bbp = MoleculeModelBBP( args) # instantiate with bayesian linear layers for (_, param_bbp), (_, param_pre) in zip(model_bbp.named_parameters(), model.named_parameters()): param_bbp.data = copy.deepcopy( param_pre.data.T) # copy over parameters # instantiate rhos for layer in model_bbp.children(): if isinstance(layer, BayesLinear): layer.init_rho(args.rho_min_bbp, args.rho_max_bbp) for layer in model_bbp.encoder.encoder.children(): if isinstance(layer, BayesLinear): layer.init_rho(args.rho_min_bbp, args.rho_max_bbp) model = model_bbp # name back # move to cuda if args.cuda: print('Moving bbp model to cuda') model = model.to(args.device) # optimiser and scheduler optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) scheduler = scheduler_const([args.lr]) bbp_switch = 2 n_iter = 0 for epoch in range(args.epochs_init): n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, bbp_switch=bbp_switch) ########## GP if args.gp: # feature_extractor model.featurizer = True feature_extractor = model # inducing points inducing_points = initial_inducing_points(train_data_loader, feature_extractor, args) # GP layer gp_layer = GPLayer(inducing_points, args.num_tasks) # full DKL model model = copy.deepcopy(DKLMoleculeModel(feature_extractor, gp_layer)) # likelihood (rank 0 restricts to diagonal matrix) likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood( num_tasks=12, rank=0) # model and likelihood to CUDA if args.cuda: model.cuda() likelihood.cuda() # loss object loss_func = gpytorch.mlls.VariationalELBO( likelihood, model.gp_layer, num_data=args.train_data_size) # optimiser and scheduler params_list = [ { 'params': model.feature_extractor.parameters(), 'weight_decay': args.weight_decay_gp }, { 'params': model.gp_layer.hyperparameters() }, { 'params': model.gp_layer.variational_parameters() }, { 'params': likelihood.parameters() }, ] optimizer = torch.optim.Adam(params_list, lr=args.lr) scheduler = scheduler_const([args.lr]) gp_switch = True n_iter = 0 for epoch in range(args.epochs_init): n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, gp_switch=gp_switch, likelihood=likelihood) ########## SWAG if args.swag: model_core = copy.deepcopy(model) model = train_swag_pdts(model_core, train_data_loader, loss_func, scaler, features_scaler, args, save_dir, batch_no) ########## SGLD if args.sgld: model = train_sgld_pdts(model, train_data_loader, loss_func, scaler, features_scaler, args, save_dir, batch_no) ### find top_idx top_idx = [] # need for thom sum_test_preds = np.zeros( (len(test_orig), args.num_tasks)) # need for greedy for sample in range(args.samples): # draw model from SWAG posterior if args.swag: model.sample(scale=1.0, cov=args.cov_mat, block=args.block) # retrieve sgld sample if args.sgld: model = load_checkpoint( args.save_dir + f'/model_{model_idx}/model_{batch_no}/model_{sample}.pt', device=args.device, logger=logger) test_preds = predict(model=model, data_loader=test_data_loader, args=args, scaler=scaler, test_data=True, gp_sample=args.thompson, bbp_sample=True) test_preds = np.array(test_preds) # thompson bit rank = 0 # base length if args.sgld: base_length = 5 * sample + 4 else: base_length = sample while args.thompson and (len(top_idx) <= base_length): top_unique_molecule = np.argsort(-test_preds[:, 0])[rank] rank += 1 if top_unique_molecule not in top_idx: top_idx.append(top_unique_molecule) # add to sum_test_preds sum_test_preds += test_preds # print print('done sample ' + str(sample)) # final top_idx if args.thompson: top_idx = np.array(top_idx) else: sum_test_preds /= args.samples top_idx = np.argsort(-sum_test_preds[:, 0])[:50] ### transfer from test to train top_idx = -np.sort(-top_idx) for idx in top_idx: train_orig.append(test_orig.pop(idx)) train_data, test_data = copy.deepcopy( MoleculeDataset(train_orig)), copy.deepcopy(MoleculeDataset(test_orig)) args.train_data_size = len(train_data) if args.gp: loss_func = gpytorch.mlls.VariationalELBO( likelihood, model.gp_layer, num_data=args.train_data_size) print(args.train_data_size) ### standardise features (train and test; using original features_scaler) train_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) ### standardise targets (train only; using original scaler) train_targets = train_data.targets() scaled_targets_tr = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets_tr) ### create data loaders train_data_loader = MoleculeDataLoader(dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache, class_balance=args.class_balance, shuffle=True, seed=args.seed) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) #################################################################### #################################################################### ################################## ########## thompson sampling loop ################################## for batch_no in range(1, args.pdts_batches + 1): ### fill in ptds_scores SMILES_train = np.array(train_data.smiles()) SMILES_stack = np.hstack((SMILES, SMILES_train)) overlap = len(SMILES_stack) - len(np.unique(SMILES_stack)) prop = overlap / len(SMILES) ptds_scores[batch_no] = prop wandb.log({ "Proportion of top 1%": prop, "batch_no": batch_no }, commit=False) ### train posterior n_iter = 0 for epoch in range(args.epochs): n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, gp_switch=gp_switch, likelihood=likelihood, bbp_switch=bbp_switch) # save to save_dir #if epoch == args.epochs - 1: #save_checkpoint(os.path.join(save_dir, f'model_{batch_no}.pt'), model, scaler, features_scaler, args) # if swag, load checkpoint if args.swag: model_core = load_checkpoint( args.checkpoint_path + f'/model_{model_idx}/model_{batch_no}.pt', device=args.device, logger=None) ########## SWAG if args.swag: model = train_swag_pdts(model_core, train_data_loader, loss_func, scaler, features_scaler, args, save_dir, batch_no) ########## SGLD if args.sgld: model = train_sgld_pdts(model, train_data_loader, loss_func, scaler, features_scaler, args, save_dir, batch_no) ### find top_idx top_idx = [] # need for thom sum_test_preds = np.zeros( (len(test_orig), args.num_tasks)) # need for greedy for sample in range(args.samples): # draw model from SWAG posterior if args.swag: model.sample(scale=1.0, cov=args.cov_mat, block=args.block) # retrieve sgld sample if args.sgld: model = load_checkpoint( args.save_dir + f'/model_{model_idx}/model_{batch_no}/model_{sample}.pt', device=args.device, logger=logger) test_preds = predict(model=model, data_loader=test_data_loader, args=args, scaler=scaler, test_data=True, gp_sample=args.thompson, bbp_sample=True) test_preds = np.array(test_preds) # thompson bit rank = 0 # base length if args.sgld: base_length = 5 * sample + 4 else: base_length = sample while args.thompson and (len(top_idx) <= base_length): top_unique_molecule = np.argsort(-test_preds[:, 0])[rank] rank += 1 if top_unique_molecule not in top_idx: top_idx.append(top_unique_molecule) # add to sum_test_preds sum_test_preds += test_preds # print print('done sample ' + str(sample)) # final top_idx if args.thompson: top_idx = np.array(top_idx) else: sum_test_preds /= args.samples top_idx = np.argsort(-sum_test_preds[:, 0])[:50] ### transfer from test to train top_idx = -np.sort(-top_idx) for idx in top_idx: train_orig.append(test_orig.pop(idx)) train_data, test_data = copy.deepcopy( MoleculeDataset(train_orig)), copy.deepcopy( MoleculeDataset(test_orig)) args.train_data_size = len(train_data) if args.gp: loss_func = gpytorch.mlls.VariationalELBO( likelihood, model.gp_layer, num_data=args.train_data_size) print(args.train_data_size) ### standardise features (train and test; using original features_scaler) train_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) ### standardise targets (train only; using original scaler) train_targets = train_data.targets() scaled_targets_tr = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets_tr) ### create data loaders train_data_loader = MoleculeDataLoader( dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache, class_balance=args.class_balance, shuffle=True, seed=args.seed) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) # save scores np.savez(os.path.join(results_dir, f'ptds_{model_idx}'), ptds_scores)
def save_test_data(args: TrainArgs, logger: Logger = None) -> List[float]: """ Trains a model and returns test scores on the model checkpoint with the highest validation score. :param args: Arguments. :param logger: Logger. :return: A list of ensemble scores for each task. """ debug = info = print # Print command line and args debug('Command line') debug(f'python {" ".join(sys.argv)}') debug('Args') debug(args) # Get data debug('Loading data') args.task_names = args.target_columns or get_task_names(args.data_path) data = get_data(path=args.data_path, args=args, logger=logger) args.num_tasks = data.num_tasks() args.features_size = data.features_size() debug(f'Number of tasks = {args.num_tasks}') # Split data debug(f'Splitting data with seed {args.seed}') train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger) if args.features_scaling: features_scaler = train_data.normalize_features(replace_nan_token=0) val_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) else: features_scaler = None args.train_data_size = len(train_data) debug( f'Total size = {len(data):,} | ' f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}' ) # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only) if args.dataset_type == 'regression': debug('Fitting scaler') train_smiles, train_targets = train_data.smiles(), train_data.targets() scaler = StandardScaler().fit(train_targets) scaled_targets = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets) else: scaler = None # Get loss and metric functions loss_func = neg_log_like metric_func = get_metric_func(metric=args.metric) # Set up test set evaluation test_smiles, test_targets = test_data.smiles(), test_data.targets() sum_test_preds = np.zeros((len(test_smiles), args.num_tasks)) # save down test targets np.savez('/home/willlamb/results/test_targets', np.array(test_targets)) return None
def run_training(args: TrainArgs, logger: Logger = None) -> List[float]: """ Trains a model and returns test scores on the model checkpoint with the highest validation score. :param args: Arguments. :param logger: Logger. :return: A list of ensemble scores for each task. """ debug = info = print # Print command line and args debug('Command line') debug(f'python {" ".join(sys.argv)}') debug('Args') debug(args) # Save args args.save(os.path.join(args.save_dir, 'args.json')) # Get data debug('Loading data') args.task_names = args.target_columns or get_task_names(args.data_path) data = get_data(path=args.data_path, args=args, logger=logger) args.num_tasks = data.num_tasks() args.features_size = data.features_size() debug(f'Number of tasks = {args.num_tasks}') # Split data debug(f'Splitting data with seed {args.seed}') train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger) if args.features_scaling: features_scaler = train_data.normalize_features(replace_nan_token=0) val_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) else: features_scaler = None args.train_data_size = len(train_data) debug( f'Total size = {len(data):,} | ' f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}' ) # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only) if args.dataset_type == 'regression': debug('Fitting scaler') train_smiles, train_targets = train_data.smiles(), train_data.targets() scaler = StandardScaler().fit(train_targets) scaled_targets = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets) else: scaler = None # Get loss and metric functions loss_func = neg_log_like metric_func = get_metric_func(metric=args.metric) # Set up test set evaluation test_smiles, test_targets = test_data.smiles(), test_data.targets() sum_test_preds = np.zeros((len(test_smiles), args.num_tasks)) # Automatically determine whether to cache if len(data) <= args.cache_cutoff: cache = True num_workers = 0 else: cache = False num_workers = args.num_workers # Create data loaders train_data_loader = MoleculeDataLoader(dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache, class_balance=args.class_balance, shuffle=True, seed=args.seed) val_data_loader = MoleculeDataLoader(dataset=val_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) ########################################### ########## Outer loop over ensemble members ########################################### for model_idx in range(args.ensemble_start_idx, args.ensemble_start_idx + args.ensemble_size): # Set pytorch seed for random initial weights torch.manual_seed(args.pytorch_seeds[model_idx]) ######## set up all logging ######## # make save_dir save_dir = os.path.join(args.save_dir, f'model_{model_idx}') makedirs(save_dir) # make results_dir results_dir = os.path.join(args.results_dir, f'model_{model_idx}') makedirs(results_dir) # initialise wandb os.environ['WANDB_MODE'] = 'dryrun' wandb.init(name=args.wandb_name + '_' + str(model_idx), project=args.wandb_proj, reinit=True) print('WANDB directory is:') print(wandb.run.dir) #################################### # Load/build model if args.checkpoint_path is not None: debug(f'Loading model {model_idx} from {args.checkpoint_path}') model = load_checkpoint(args.checkpoint_path + f'/model_{model_idx}/model.pt', device=args.device, logger=logger) else: debug(f'Building model {model_idx}') model = MoleculeModel(args) debug(model) debug(f'Number of parameters = {param_count(model):,}') if args.cuda: debug('Moving model to cuda') model = model.to(args.device) # Ensure that model is saved in correct location for evaluation if 0 epochs save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) # Optimizer optimizer = Adam([{ 'params': model.encoder.parameters() }, { 'params': model.ffn.parameters() }, { 'params': model.log_noise, 'weight_decay': 0 }], lr=args.init_lr, weight_decay=args.weight_decay) # Learning rate scheduler scheduler = build_lr_scheduler(optimizer, args) # Run training best_score = float('inf') if args.minimize_score else -float('inf') best_epoch, n_iter = 0, 0 for epoch in range(args.epochs): debug(f'Epoch {epoch}') n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, logger=logger) val_scores = evaluate(model=model, data_loader=val_data_loader, args=args, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, scaler=scaler, logger=logger) # Average validation score avg_val_score = np.nanmean(val_scores) debug(f'Validation {args.metric} = {avg_val_score:.6f}') wandb.log({"Validation MAE": avg_val_score}) # Save model checkpoint if improved validation score if args.minimize_score and avg_val_score < best_score or \ not args.minimize_score and avg_val_score > best_score: best_score, best_epoch = avg_val_score, epoch save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) if epoch == args.noam_epochs - 1: optimizer = Adam([{ 'params': model.encoder.parameters() }, { 'params': model.ffn.parameters() }, { 'params': model.log_noise, 'weight_decay': 0 }], lr=args.final_lr, weight_decay=args.weight_decay) scheduler = scheduler_const([args.final_lr]) # load model with best validation score info( f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}' ) model = load_checkpoint(os.path.join(save_dir, 'model.pt'), device=args.device, logger=logger) # SWAG training loop, returns swag_model if args.swag: model = train_swag(model, train_data, val_data, num_workers, cache, loss_func, metric_func, scaler, features_scaler, args, save_dir) # SGLD loop, which saves nets if args.sgld: model = train_sgld(model, train_data, val_data, num_workers, cache, loss_func, metric_func, scaler, features_scaler, args, save_dir) # GP loop if args.gp: model, likelihood = train_gp(model, train_data, val_data, num_workers, cache, metric_func, scaler, features_scaler, args, save_dir) # BBP if args.bbp: model = train_bbp(model, train_data, val_data, num_workers, cache, loss_func, metric_func, scaler, features_scaler, args, save_dir) # DUN if args.dun: model = train_dun(model, train_data, val_data, num_workers, cache, loss_func, metric_func, scaler, features_scaler, args, save_dir) ################################## ########## Inner loop over samples ################################## for sample_idx in range(args.samples): # draw model from SWAG posterior if args.swag: model.sample(scale=1.0, cov=args.cov_mat, block=args.block) # draw model from collected SGLD models if args.sgld: model = load_checkpoint(os.path.join(save_dir, f'model_{sample_idx}.pt'), device=args.device, logger=logger) # make predictions test_preds = predict(model=model, data_loader=test_data_loader, args=args, scaler=scaler, test_data=True, bbp_sample=True) ####################################################################### ####################################################################### ##### SAVING STUFF DOWN if args.gp: # get test_preds_std (scaled back to original data) test_preds_std = predict_std_gp(model=model, data_loader=test_data_loader, args=args, scaler=scaler, likelihood=likelihood) # 1 - MEANS np.savez(os.path.join(results_dir, f'preds_{sample_idx}'), np.array(test_preds)) # 2 - STD, combined aleatoric and epistemic (we save down the stds, always) np.savez(os.path.join(results_dir, f'predsSTDEV_{sample_idx}'), np.array(test_preds_std)) else: # save test_preds and aleatoric uncertainties if args.dun: log_cat = model.log_cat.detach().cpu().numpy() cat = np.exp(log_cat) / np.sum(np.exp(log_cat)) np.savez(os.path.join(results_dir, f'cat_{sample_idx}'), cat) # samples from categorical dist and saves a depth MC sample depth_sample = np.random.multinomial(1, cat).nonzero()[0][0] test_preds_MCdepth = predict_MCdepth( model=model, data_loader=test_data_loader, args=args, scaler=scaler, d=depth_sample) np.savez( os.path.join(results_dir, f'predsMCDEPTH_{sample_idx}'), np.array(test_preds_MCdepth)) if args.swag: log_noise = model.base.log_noise else: log_noise = model.log_noise noise = np.exp(log_noise.detach().cpu().numpy()) * np.array( scaler.stds) np.savez(os.path.join(results_dir, f'preds_{sample_idx}'), np.array(test_preds)) np.savez(os.path.join(results_dir, f'noise_{sample_idx}'), noise) ####################################################################### ####################################################################### # add predictions to sum_test_preds if len(test_preds) != 0: sum_test_preds += np.array(test_preds) # evaluate predictions using metric function test_scores = evaluate_predictions(preds=test_preds, targets=test_targets, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, logger=logger) # compute average test score avg_test_score = np.nanmean(test_scores) info( f'Model {model_idx}, sample {sample_idx} test {args.metric} = {avg_test_score:.6f}' ) ################################# ########## Bayesian Model Average ################################# # note: this is an average over Bayesian samples AND components in an ensemble # compute number of prediction iterations pred_iterations = args.ensemble_size * args.samples # average predictions across iterations avg_test_preds = (sum_test_preds / pred_iterations).tolist() # evaluate BMA_scores = evaluate_predictions(preds=avg_test_preds, targets=test_targets, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, logger=logger) # average scores across tasks avg_BMA_test_score = np.nanmean(BMA_scores) info(f'BMA test {args.metric} = {avg_BMA_test_score:.6f}') return BMA_scores
def new_noise(args: TrainArgs, logger: Logger = None) -> List[float]: """ Trains a model and returns test scores on the model checkpoint with the highest validation score. :param args: Arguments. :param logger: Logger. :return: A list of ensemble scores for each task. """ debug = info = print # Get data args.task_names = args.target_columns or get_task_names(args.data_path) data = get_data(path=args.data_path, args=args, logger=logger) args.num_tasks = data.num_tasks() args.features_size = data.features_size() # Split data debug(f'Splitting data with seed {args.seed}') train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger) if args.features_scaling: features_scaler = train_data.normalize_features(replace_nan_token=0) val_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) else: features_scaler = None args.train_data_size = len(train_data) # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only) if args.dataset_type == 'regression': debug('Fitting scaler') train_smiles, train_targets = train_data.smiles(), train_data.targets() scaler = StandardScaler().fit(train_targets) scaled_targets = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets) else: scaler = None # Get loss and metric functions loss_func = neg_log_like metric_func = get_metric_func(metric=args.metric) # Set up test set evaluation test_smiles, test_targets = test_data.smiles(), test_data.targets() sum_test_preds = np.zeros((len(test_smiles), args.num_tasks)) # Automatically determine whether to cache if len(data) <= args.cache_cutoff: cache = True num_workers = 0 else: cache = False num_workers = args.num_workers # Create data loaders train_data_loader = MoleculeDataLoader(dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) val_data_loader = MoleculeDataLoader(dataset=val_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) ########################################### ########## Outer loop over ensemble members ########################################### for model_idx in range(args.ensemble_start_idx, args.ensemble_start_idx + args.ensemble_size): # load the model if (args.method == 'map') or (args.method == 'swag') or (args.method == 'sgld'): model = load_checkpoint(args.checkpoint_path + f'/model_{model_idx}/model.pt', device=args.device, logger=logger) if args.method == 'gp': args.num_inducing_points = 1200 fake_model = MoleculeModel(args) fake_model.featurizer = True feature_extractor = fake_model inducing_points = initial_inducing_points(train_data_loader, feature_extractor, args) gp_layer = GPLayer(inducing_points, args.num_tasks) model = load_checkpoint( args.checkpoint_path + f'/model_{model_idx}/DKN_model.pt', device=args.device, logger=None, template=DKLMoleculeModel(MoleculeModel(args, featurizer=True), gp_layer)) if args.method == 'dropR' or args.method == 'dropA': model = load_checkpoint(args.checkpoint_path + f'/model_{model_idx}/model.pt', device=args.device, logger=logger) if args.method == 'bbp': template = MoleculeModelBBP(args) for layer in template.children(): if isinstance(layer, BayesLinear): layer.init_rho(args.rho_min_bbp, args.rho_max_bbp) for layer in template.encoder.encoder.children(): if isinstance(layer, BayesLinear): layer.init_rho(args.rho_min_bbp, args.rho_max_bbp) model = load_checkpoint(args.checkpoint_path + f'/model_{model_idx}/model_bbp.pt', device=args.device, logger=None, template=template) if args.method == 'dun': args.prior_sig_dun = 0.05 args.depth_min = 1 args.depth_max = 5 args.rho_min_dun = -5.5 args.rho_max_dun = -5 args.log_cat_init = 0 template = MoleculeModelDUN(args) for layer in template.children(): if isinstance(layer, BayesLinear): layer.init_rho(args.rho_min_dun, args.rho_max_dun) for layer in template.encoder.encoder.children(): if isinstance(layer, BayesLinear): layer.init_rho(args.rho_min_dun, args.rho_max_dun) template.create_log_cat(args) model = load_checkpoint(args.checkpoint_path + f'/model_{model_idx}/model_dun.pt', device=args.device, logger=None, template=template) # make results_dir results_dir = os.path.join(args.results_dir, f'model_{model_idx}') makedirs(results_dir) # train_preds, train_targets train_preds = predict(model=model, data_loader=train_data_loader, args=args, scaler=scaler, test_data=False, bbp_sample=False) train_preds = np.array(train_preds) train_targets = np.array(train_targets) # compute tstats tstats = np.ones((12, 3)) for task in range(12): resid = train_preds[:, task] - train_targets[:, task] tstats[task] = np.array(stats.t.fit(resid, floc=0.0)) ################################## ########## Inner loop over samples ################################## for sample_idx in range(args.samples): # save down np.savez(os.path.join(results_dir, f'tstats_{sample_idx}'), tstats) print('done one')
def predict(model: nn.Module, data: MoleculeDataset, batch_size: int, scaler: StandardScaler = None) -> List[List[float]]: """ Makes predictions on a dataset using an ensemble of models. :param model: A model. :param data: A MoleculeDataset. :param batch_size: Batch size. :param scaler: A StandardScaler object fit on the training targets. :return: A list of lists of predictions. The outer list is examples while the inner list is tasks. """ model.eval() preds = [] check_fp = [] # wei, for batch problem check_fp_d0 = [] # wei, for batch problem check_fp_d1 = [] # wei, for batch problem check_fp_d2 = [] # wei, for batch problem check_fp_final = [] # wei, for batch problem check_fp_mol = [] # wei, for batch problem num_iters, iter_step = len(data), batch_size for i in trange(0, num_iters, iter_step): # Prepare batch mol_batch = MoleculeDataset(data[i:i + batch_size]) smiles_batch, features_batch = mol_batch.smiles(), mol_batch.features() # Run model batch = smiles_batch with torch.no_grad(): batch_preds = model(batch, features_batch) batch_preds = batch_preds.data.cpu().numpy() # Inverse scale if regression if scaler is not None: batch_preds = scaler.inverse_transform(batch_preds) # Collect vectors batch_preds = batch_preds.tolist() preds.extend(batch_preds) # wei, for batch problem each_fp = model.output_fp.tolist() #print('each_fp:', each_fp) remain = num_iters % batch_size if len(each_fp) / 5 == remain: # wei, /5 for d0, d1, d2, final, mol check_fp_d0.extend(each_fp[:remain]) check_fp_d1.extend(each_fp[remain:remain * 2]) check_fp_d2.extend(each_fp[remain * 2:remain * 3]) check_fp_final.extend(each_fp[remain * 3:remain * 4]) check_fp_mol.extend(each_fp[remain * 4:remain * 5]) else: check_fp_d0.extend(each_fp[:batch_size]) check_fp_d1.extend(each_fp[batch_size:batch_size * 2]) check_fp_d2.extend(each_fp[batch_size * 2:batch_size * 3]) check_fp_final.extend(each_fp[batch_size * 3:batch_size * 4]) check_fp_mol.extend(each_fp[batch_size * 4:batch_size * 5]) check_fp.append(check_fp_d0) check_fp.append(check_fp_d1) check_fp.append(check_fp_d2) check_fp.append(check_fp_final) check_fp.append(check_fp_mol) return preds, check_fp
def predict(model: nn.Module, data: MoleculeDataset, batch_size: int, sampling_size: int, scaler: StandardScaler = None): # -> Tuple[Union[List[List[float]], None], ...]: """ Makes predictions on a dataset using an ensemble of models. :param model: A model. :param data: A MoleculeDataset. :param batch_size: Batch size. :param sampling_size: Sampling size for MC-Dropout. :param scaler: A StandardScaler object fit on the training targets. :return: A 3-length tuple for predictions, aleatoric uncertainties and epistemic uncertainties. Each element is a list of lists. The outer list is examples while the inner list is tasks. The second and/or the third element of the tuple can be None if not computed. """ model.eval() preds = [] ale_unc = [] epi_unc = [] features = [] num_iters, iter_step = len(data), batch_size # if aleatoric uncertainty is enabled aleatoric = model.aleatoric # if MC-Dropout mc_dropout = model.mc_dropout for i in trange(0, num_iters, iter_step): # Prepare batch mol_batch = MoleculeDataset(data[i:i + batch_size]) smiles_batch, features_batch = mol_batch.smiles(), mol_batch.features() # Run model batch = smiles_batch if not aleatoric and not mc_dropout: with torch.no_grad(): batch_preds = model(batch, features_batch) batch_preds = batch_preds.data.cpu().numpy() # Inverse scale if regression if scaler is not None: batch_preds = scaler.inverse_transform(batch_preds) # Collect vectors batch_preds = batch_preds.tolist() preds.extend(batch_preds) elif aleatoric and not mc_dropout: with torch.no_grad(): ############################# # batch feature batch_preds, batch_logvar, batch_feature = model( batch, features_batch) ############################# batch_var = torch.exp(batch_logvar) batch_preds = batch_preds.data.cpu().numpy() batch_ale_unc = batch_var.data.cpu().numpy() ############################ batch_feature = batch_feature.data.cpu().numpy() features.extend(batch_feature) ############################ # Inverse scale if regression if scaler is not None: batch_preds = scaler.inverse_transform(batch_preds) batch_ale_unc = scaler.inverse_transform_variance( batch_ale_unc) # Collect vectors batch_preds = batch_preds.tolist() batch_ale_unc = batch_ale_unc.tolist() preds.extend(batch_preds) ale_unc.extend(batch_ale_unc) elif not aleatoric and mc_dropout: with torch.no_grad(): P_mean = [] for ss in range(sampling_size): batch_preds = model(batch, features_batch) P_mean.append(batch_preds) batch_preds = torch.mean(torch.stack(P_mean), 0) batch_epi_unc = torch.var(torch.stack(P_mean), 0) batch_preds = batch_preds.data.cpu().numpy() batch_epi_unc = batch_epi_unc.data.cpu().numpy() # Inverse scale if regression if scaler is not None: batch_preds = scaler.inverse_transform(batch_preds) batch_epi_unc = scaler.inverse_transform_variance( batch_epi_unc) # Collect vectors batch_preds = batch_preds.tolist() batch_epi_unc = batch_epi_unc.tolist() preds.extend(batch_preds) epi_unc.extend(batch_epi_unc) elif aleatoric and mc_dropout: with torch.no_grad(): P_mean = [] P_logvar = [] for ss in range(sampling_size): batch_preds, batch_logvar = model(batch, features_batch) P_mean.append(batch_preds) P_logvar.append(torch.exp(batch_logvar)) batch_preds = torch.mean(torch.stack(P_mean), 0) batch_ale_unc = torch.mean(torch.stack(P_logvar), 0) batch_epi_unc = torch.var(torch.stack(P_mean), 0) batch_preds = batch_preds.data.cpu().numpy() batch_ale_unc = batch_ale_unc.data.cpu().numpy() batch_epi_unc = batch_epi_unc.data.cpu().numpy() # Inverse scale if regression if scaler is not None: batch_preds = scaler.inverse_transform(batch_preds) batch_ale_unc = scaler.inverse_transform_variance( batch_ale_unc) batch_epi_unc = scaler.inverse_transform_variance( batch_epi_unc) # Collect vectors batch_preds = batch_preds.tolist() batch_ale_unc = batch_ale_unc.tolist() batch_epi_unc = batch_epi_unc.tolist() preds.extend(batch_preds) ale_unc.extend(batch_ale_unc) epi_unc.extend(batch_epi_unc) if not aleatoric and not mc_dropout: return preds, None, None, features elif aleatoric and not mc_dropout: return preds, ale_unc, None, features elif not aleatoric and mc_dropout: return preds, None, epi_unc, features elif aleatoric and mc_dropout: return preds, ale_unc, epi_unc, features
def predict(model: nn.Module, data_loader: MoleculeDataLoader, disable_progress_bar: bool = False, scaler: StandardScaler = None) -> List[List[float]]: """ Makes predictions on a dataset using an ensemble of models. :param model: A model. :param data_loader: A MoleculeDataLoader. :param disable_progress_bar: Whether to disable the progress bar. :param scaler: A StandardScaler object fit on the training targets. :return: A list of lists of predictions. The outer list is examples while the inner list is tasks. """ UQ = model.uncertainty two_vals = UQ == 'Dropout_VI' or UQ == 'Ensemble' mve = model.mve training = model.training if UQ != 'Dropout_VI': model.eval() total_batch_preds = [] total_var_preds = [] for batch in tqdm(data_loader, disable=disable_progress_bar): # Prepare batch batch: MoleculeDataset mol_batch, features_batch = batch.batch_graph(), batch.features() # Make predictions if two_vals: with torch.no_grad(): batch_preds, logvar_preds = model(mol_batch, features_batch) var_preds = torch.exp(logvar_preds) var_preds = var_preds.data.cpu().numpy().tolist() total_var_preds.extend(var_preds) else: with torch.no_grad(): batch_preds = model(mol_batch, features_batch) batch_preds = batch_preds.data.cpu().numpy() # Collect vectors batch_preds = batch_preds.tolist() total_batch_preds.extend(batch_preds) if mve: p = [] c = [] for i in range(len(total_batch_preds)): p.append([ total_batch_preds[i][j] for j in range(len(total_batch_preds[i])) if j % 2 == 0 ]) c.append([ total_batch_preds[i][j] for j in range(len(total_batch_preds[i])) if j % 2 == 1 ]) if scaler is not None: p = scaler.inverse_transform(p).tolist() c = (scaler.stds**2 * c).tolist() if not training: return p, c else: return p # Inverse scale if regression if scaler is not None: total_batch_preds = scaler.inverse_transform( total_batch_preds).tolist() if not UQ or training or not two_vals: return total_batch_preds else: return total_batch_preds, total_var_preds
def run_training_gnn_xgb(args: TrainArgs, logger: Logger = None) -> List[float]: """ Trains a model and returns test scores on the model checkpoint with the highest validation score. :param args: Arguments. :param logger: Logger. :return: A list of ensemble scores for each task. """ if logger is not None: debug, info = logger.debug, logger.info else: debug = info = print # Print command line debug('Command line') debug(f'python {" ".join(sys.argv)}') # Print args debug('Args') debug(args) # Save args args.save(os.path.join(args.save_dir, 'args.json')) # Set pytorch seed for random initial weights torch.manual_seed(args.pytorch_seed) # Get data debug('Loading data') args.task_names = args.target_columns or get_task_names(args.data_path) data = get_data(path=args.data_path, args=args, logger=logger) args.num_tasks = data.num_tasks() args.features_size = data.features_size() debug(f'Number of tasks = {args.num_tasks}') # Split data debug(f'Splitting data with seed {args.seed}') if args.separate_test_path: test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger) if args.separate_val_path: val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger) if args.separate_val_path and args.separate_test_path: train_data = data elif args.separate_val_path: train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, args=args, logger=logger) elif args.separate_test_path: train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger) else: train_data, val_data, test_data = split_data( data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger) if args.dataset_type == 'classification': class_sizes = get_class_sizes(data) debug('Class sizes') for i, task_class_sizes in enumerate(class_sizes): debug( f'{args.task_names[i]} ' f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}' ) if args.save_smiles_splits: save_smiles_splits(train_data=train_data, val_data=val_data, test_data=test_data, data_path=args.data_path, save_dir=args.save_dir) if args.features_scaling: features_scaler = train_data.normalize_features(replace_nan_token=0) val_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) else: features_scaler = None args.train_data_size = len(train_data) debug( f'Total size = {len(data):,} | ' f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}' ) # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only) if args.dataset_type == 'regression': debug('Fitting scaler') train_smiles, train_targets = train_data.smiles(), train_data.targets() scaler = StandardScaler().fit(train_targets) scaled_targets = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets) else: scaler = None # Get loss and metric functions loss_func = get_loss_func(args) metric_func = get_metric_func(metric=args.metric) # Set up test set evaluation val_smiles, val_targets = val_data.smiles(), val_data.targets() test_smiles, test_targets = test_data.smiles(), test_data.targets() if args.dataset_type == 'multiclass': sum_test_preds = np.zeros( (len(test_smiles), args.num_tasks, args.multiclass_num_classes)) else: sum_test_preds = np.zeros((len(test_smiles), args.num_tasks)) # Automatically determine whether to cache if len(data) <= args.cache_cutoff: cache = True num_workers = 0 else: cache = False num_workers = args.num_workers # Create data loaders train_data_loader = MoleculeDataLoader(dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache, class_balance=args.class_balance, shuffle=True, seed=args.seed) val_data_loader = MoleculeDataLoader(dataset=val_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) # Train ensemble of models for model_idx in range(args.ensemble_size): # Tensorboard writer save_dir = os.path.join(args.save_dir, f'model_{model_idx}') makedirs(save_dir) try: writer = SummaryWriter(log_dir=save_dir) except: writer = SummaryWriter(logdir=save_dir) # Load/build model if args.checkpoint_paths is not None: debug( f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}' ) model = load_checkpoint(args.checkpoint_paths[model_idx], logger=logger) else: debug(f'Building model {model_idx}') model = MoleculeModel(args) debug(model) debug(f'Number of parameters = {param_count(model):,}') if args.cuda: debug('Moving model to cuda') model = model.to(args.device) # Ensure that model is saved in correct location for evaluation if 0 epochs save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) # Optimizers optimizer = build_optimizer(model, args) # Learning rate schedulers scheduler = build_lr_scheduler(optimizer, args) # Run training best_score = float('inf') if args.minimize_score else -float('inf') best_epoch, n_iter = 0, 0 for epoch in trange(args.epochs): debug(f'Epoch {epoch}') n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, logger=logger, writer=writer) if isinstance(scheduler, ExponentialLR): scheduler.step() val_scores = evaluate(model=model, data_loader=val_data_loader, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, scaler=scaler, logger=logger) # Average validation score avg_val_score = np.nanmean(val_scores) debug(f'Validation {args.metric} = {avg_val_score:.6f}') writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter) if args.show_individual_scores: # Individual validation scores for task_name, val_score in zip(args.task_names, val_scores): debug( f'Validation {task_name} {args.metric} = {val_score:.6f}' ) writer.add_scalar(f'validation_{task_name}_{args.metric}', val_score, n_iter) # Save model checkpoint if improved validation score if args.minimize_score and avg_val_score < best_score or \ not args.minimize_score and avg_val_score > best_score: best_score, best_epoch = avg_val_score, epoch save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) # Evaluate on test set using model with best validation score info( f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}' ) model = load_checkpoint(os.path.join(save_dir, 'model.pt'), device=args.device, logger=logger) test_preds, _ = predict(model=model, data_loader=test_data_loader, scaler=scaler) test_scores = evaluate_predictions(preds=test_preds, targets=test_targets, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, logger=logger) if len(test_preds) != 0: sum_test_preds += np.array(test_preds) # Average test score avg_test_score = np.nanmean(test_scores) info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}') writer.add_scalar(f'test_{args.metric}', avg_test_score, 0) if args.show_individual_scores: # Individual test scores for task_name, test_score in zip(args.task_names, test_scores): info( f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}' ) writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter) writer.close() # Evaluate ensemble on test set avg_test_preds = (sum_test_preds / args.ensemble_size).tolist() ensemble_scores = evaluate_predictions(preds=avg_test_preds, targets=test_targets, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, logger=logger) # Average ensemble score avg_ensemble_test_score = np.nanmean(ensemble_scores) info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}') # Individual ensemble scores if args.show_individual_scores: for task_name, ensemble_score in zip(args.task_names, ensemble_scores): info( f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}' ) _, train_feature = predict(model=model, data_loader=train_data_loader, scaler=scaler) _, val_feature = predict(model=model, data_loader=val_data_loader, scaler=scaler) _, test_feature = predict(model=model, data_loader=test_data_loader, scaler=scaler) return ensemble_scores, train_feature, val_feature, test_feature, train_targets, val_targets, test_targets
def predict(model: nn.Module, data_loader: MoleculeDataLoader, args: TrainArgs, disable_progress_bar: bool = False, scaler: StandardScaler = None, test_data: bool = False, gp_sample: bool = False, bbp_sample: bool = False) -> List[List[float]]: """ Makes predictions on a dataset using an ensemble of models. :param model: A model. :param data_loader: A MoleculeDataLoader. :param args: Arguments. :param disable_progress_bar: Whether to disable the progress bar. :param scaler: A StandardScaler object fit on the training targets. :param test_data: Flag indicating whether data is test data. :return: A list of lists of predictions. The outer list is examples while the inner list is tasks. """ ### seed to ensure single network sampled across batches if args.thompson: network_seed = np.random.randint(1e15) ########## detection of gp or bayeslinear layer or DUN try: model.gp_layer except: gp = False else: gp = True bbp = False for layer in model.children(): if isinstance(layer, BayesLinear): bbp = True break try: model.log_cat except: dun = False else: dun = True # set model to eval mode model.eval() # enable dropout layers with test data, if args.test_dropout == True if args.test_dropout and test_data: model.apply(enable_dropout) preds = [] #for batch in tqdm(data_loader, disable=disable_progress_bar): for batch in data_loader: # Prepare batch batch: MoleculeDataset mol_batch, features_batch = batch.batch_graph(), batch.features() # Make predictions with torch.no_grad(): if gp: if gp_sample: batch_preds = model(mol_batch, features_batch).sample() else: batch_preds = model(mol_batch, features_batch).mean elif bbp: if dun: batch_preds, _, _, _ = model(mol_batch, features_batch, sample=bbp_sample) else: if args.thompson: torch.manual_seed(network_seed) batch_preds, _ = model(mol_batch, features_batch, sample=bbp_sample) else: if args.thompson: torch.manual_seed(network_seed) batch_preds = model(mol_batch, features_batch) batch_preds = batch_preds.data.cpu().numpy() # Inverse scale if regression if scaler is not None: batch_preds = scaler.inverse_transform(batch_preds) # Collect vectors batch_preds = batch_preds.tolist() preds.extend(batch_preds) return preds