def load_checkpoint(path: str, device: torch.device = None, logger: logging.Logger = None) -> MoleculeModel: """ Loads a model checkpoint. :param path: Path where checkpoint is saved. :param device: Device where the model will be moved. :param logger: A logger for recording output. :return: The loaded :class:`~chemprop.models.model.MoleculeModel`. """ if logger is not None: debug, info = logger.debug, logger.info else: debug = info = print # Load model and args state = torch.load(path, map_location=lambda storage, loc: storage) args = TrainArgs() args.from_dict(vars(state['args']), skip_unsettable=True) loaded_state_dict = state['state_dict'] if device is not None: args.device = device # Build model model = MoleculeModel(args) model_state_dict = model.state_dict() # Skip missing parameters and parameters of mismatched size pretrained_state_dict = {} for loaded_param_name in loaded_state_dict.keys(): # Backward compatibility for parameter names if re.match(r'(encoder\.encoder\.)([Wc])', loaded_param_name): param_name = loaded_param_name.replace('encoder.encoder', 'encoder.encoder.0') else: param_name = loaded_param_name # Load pretrained parameter, skipping unmatched parameters if param_name not in model_state_dict: info(f'Warning: Pretrained parameter "{loaded_param_name}" cannot be found in model parameters.') elif model_state_dict[param_name].shape != loaded_state_dict[loaded_param_name].shape: info(f'Warning: Pretrained parameter "{loaded_param_name}" ' f'of shape {loaded_state_dict[loaded_param_name].shape} does not match corresponding ' f'model parameter of shape {model_state_dict[param_name].shape}.') else: debug(f'Loading pretrained parameter "{loaded_param_name}".') pretrained_state_dict[param_name] = loaded_state_dict[loaded_param_name] # Load pretrained weights model_state_dict.update(pretrained_state_dict) model.load_state_dict(model_state_dict) if args.cuda: debug('Moving model to cuda') model = model.to(args.device) return model
def run_training_gnn_xgb(args: TrainArgs, logger: Logger = None) -> List[float]: """ Trains a model and returns test scores on the model checkpoint with the highest validation score. :param args: Arguments. :param logger: Logger. :return: A list of ensemble scores for each task. """ if logger is not None: debug, info = logger.debug, logger.info else: debug = info = print # Print command line debug('Command line') debug(f'python {" ".join(sys.argv)}') # Print args debug('Args') debug(args) # Save args args.save(os.path.join(args.save_dir, 'args.json')) # Set pytorch seed for random initial weights torch.manual_seed(args.pytorch_seed) # Get data debug('Loading data') args.task_names = args.target_columns or get_task_names(args.data_path) data = get_data(path=args.data_path, args=args, logger=logger) args.num_tasks = data.num_tasks() args.features_size = data.features_size() debug(f'Number of tasks = {args.num_tasks}') # Split data debug(f'Splitting data with seed {args.seed}') if args.separate_test_path: test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger) if args.separate_val_path: val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger) if args.separate_val_path and args.separate_test_path: train_data = data elif args.separate_val_path: train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, args=args, logger=logger) elif args.separate_test_path: train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger) else: train_data, val_data, test_data = split_data( data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger) if args.dataset_type == 'classification': class_sizes = get_class_sizes(data) debug('Class sizes') for i, task_class_sizes in enumerate(class_sizes): debug( f'{args.task_names[i]} ' f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}' ) if args.save_smiles_splits: save_smiles_splits(train_data=train_data, val_data=val_data, test_data=test_data, data_path=args.data_path, save_dir=args.save_dir) if args.features_scaling: features_scaler = train_data.normalize_features(replace_nan_token=0) val_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) else: features_scaler = None args.train_data_size = len(train_data) debug( f'Total size = {len(data):,} | ' f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}' ) # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only) if args.dataset_type == 'regression': debug('Fitting scaler') train_smiles, train_targets = train_data.smiles(), train_data.targets() scaler = StandardScaler().fit(train_targets) scaled_targets = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets) else: scaler = None # Get loss and metric functions loss_func = get_loss_func(args) metric_func = get_metric_func(metric=args.metric) # Set up test set evaluation val_smiles, val_targets = val_data.smiles(), val_data.targets() test_smiles, test_targets = test_data.smiles(), test_data.targets() if args.dataset_type == 'multiclass': sum_test_preds = np.zeros( (len(test_smiles), args.num_tasks, args.multiclass_num_classes)) else: sum_test_preds = np.zeros((len(test_smiles), args.num_tasks)) # Automatically determine whether to cache if len(data) <= args.cache_cutoff: cache = True num_workers = 0 else: cache = False num_workers = args.num_workers # Create data loaders train_data_loader = MoleculeDataLoader(dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache, class_balance=args.class_balance, shuffle=True, seed=args.seed) val_data_loader = MoleculeDataLoader(dataset=val_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) # Train ensemble of models for model_idx in range(args.ensemble_size): # Tensorboard writer save_dir = os.path.join(args.save_dir, f'model_{model_idx}') makedirs(save_dir) try: writer = SummaryWriter(log_dir=save_dir) except: writer = SummaryWriter(logdir=save_dir) # Load/build model if args.checkpoint_paths is not None: debug( f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}' ) model = load_checkpoint(args.checkpoint_paths[model_idx], logger=logger) else: debug(f'Building model {model_idx}') model = MoleculeModel(args) debug(model) debug(f'Number of parameters = {param_count(model):,}') if args.cuda: debug('Moving model to cuda') model = model.to(args.device) # Ensure that model is saved in correct location for evaluation if 0 epochs save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) # Optimizers optimizer = build_optimizer(model, args) # Learning rate schedulers scheduler = build_lr_scheduler(optimizer, args) # Run training best_score = float('inf') if args.minimize_score else -float('inf') best_epoch, n_iter = 0, 0 for epoch in trange(args.epochs): debug(f'Epoch {epoch}') n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, logger=logger, writer=writer) if isinstance(scheduler, ExponentialLR): scheduler.step() val_scores = evaluate(model=model, data_loader=val_data_loader, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, scaler=scaler, logger=logger) # Average validation score avg_val_score = np.nanmean(val_scores) debug(f'Validation {args.metric} = {avg_val_score:.6f}') writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter) if args.show_individual_scores: # Individual validation scores for task_name, val_score in zip(args.task_names, val_scores): debug( f'Validation {task_name} {args.metric} = {val_score:.6f}' ) writer.add_scalar(f'validation_{task_name}_{args.metric}', val_score, n_iter) # Save model checkpoint if improved validation score if args.minimize_score and avg_val_score < best_score or \ not args.minimize_score and avg_val_score > best_score: best_score, best_epoch = avg_val_score, epoch save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) # Evaluate on test set using model with best validation score info( f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}' ) model = load_checkpoint(os.path.join(save_dir, 'model.pt'), device=args.device, logger=logger) test_preds, _ = predict(model=model, data_loader=test_data_loader, scaler=scaler) test_scores = evaluate_predictions(preds=test_preds, targets=test_targets, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, logger=logger) if len(test_preds) != 0: sum_test_preds += np.array(test_preds) # Average test score avg_test_score = np.nanmean(test_scores) info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}') writer.add_scalar(f'test_{args.metric}', avg_test_score, 0) if args.show_individual_scores: # Individual test scores for task_name, test_score in zip(args.task_names, test_scores): info( f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}' ) writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter) writer.close() # Evaluate ensemble on test set avg_test_preds = (sum_test_preds / args.ensemble_size).tolist() ensemble_scores = evaluate_predictions(preds=avg_test_preds, targets=test_targets, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, logger=logger) # Average ensemble score avg_ensemble_test_score = np.nanmean(ensemble_scores) info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}') # Individual ensemble scores if args.show_individual_scores: for task_name, ensemble_score in zip(args.task_names, ensemble_scores): info( f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}' ) _, train_feature = predict(model=model, data_loader=train_data_loader, scaler=scaler) _, val_feature = predict(model=model, data_loader=val_data_loader, scaler=scaler) _, test_feature = predict(model=model, data_loader=test_data_loader, scaler=scaler) return ensemble_scores, train_feature, val_feature, test_feature, train_targets, val_targets, test_targets
def run_training(args: TrainArgs, data: MoleculeDataset, logger: Logger = None) -> Dict[str, List[float]]: """ Loads data, trains a Chemprop model, and returns test scores for the model checkpoint with the highest validation score. :param args: A :class:`~chemprop.args.TrainArgs` object containing arguments for loading data and training the Chemprop model. :param data: A :class:`~chemprop.data.MoleculeDataset` containing the data. :param logger: A logger to record output. :return: A dictionary mapping each metric in :code:`args.metrics` to a list of values for each task. """ if logger is not None: debug, info = logger.debug, logger.info else: debug = info = print # Set pytorch seed for random initial weights torch.manual_seed(args.pytorch_seed) # Split data debug(f'Splitting data with seed {args.seed}') if args.separate_test_path: test_data = get_data( path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, atom_descriptors_path=args.separate_test_atom_descriptors_path, bond_features_path=args.separate_test_bond_features_path, phase_features_path=args.separate_test_phase_features_path, smiles_columns=args.smiles_columns, logger=logger) if args.separate_val_path: val_data = get_data( path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, atom_descriptors_path=args.separate_val_atom_descriptors_path, bond_features_path=args.separate_val_bond_features_path, phase_features_path=args.separate_val_phase_features_path, smiles_columns=args.smiles_columns, logger=logger) if args.separate_val_path and args.separate_test_path: train_data = data elif args.separate_val_path: train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, num_folds=args.num_folds, args=args, logger=logger) elif args.separate_test_path: train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, num_folds=args.num_folds, args=args, logger=logger) else: train_data, val_data, test_data = split_data( data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, num_folds=args.num_folds, args=args, logger=logger) if args.dataset_type == 'classification': class_sizes = get_class_sizes(data) debug('Class sizes') for i, task_class_sizes in enumerate(class_sizes): debug( f'{args.task_names[i]} ' f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}' ) if args.save_smiles_splits: save_smiles_splits( data_path=args.data_path, save_dir=args.save_dir, task_names=args.task_names, features_path=args.features_path, train_data=train_data, val_data=val_data, test_data=test_data, smiles_columns=args.smiles_columns, logger=logger, ) if args.features_scaling: features_scaler = train_data.normalize_features(replace_nan_token=0) val_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) else: features_scaler = None if args.atom_descriptor_scaling and args.atom_descriptors is not None: atom_descriptor_scaler = train_data.normalize_features( replace_nan_token=0, scale_atom_descriptors=True) val_data.normalize_features(atom_descriptor_scaler, scale_atom_descriptors=True) test_data.normalize_features(atom_descriptor_scaler, scale_atom_descriptors=True) else: atom_descriptor_scaler = None if args.bond_feature_scaling and args.bond_features_size > 0: bond_feature_scaler = train_data.normalize_features( replace_nan_token=0, scale_bond_features=True) val_data.normalize_features(bond_feature_scaler, scale_bond_features=True) test_data.normalize_features(bond_feature_scaler, scale_bond_features=True) else: bond_feature_scaler = None args.train_data_size = len(train_data) debug( f'Total size = {len(data):,} | ' f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}' ) # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only) if args.dataset_type == 'regression': debug('Fitting scaler') scaler = train_data.normalize_targets() elif args.dataset_type == 'spectra': debug( 'Normalizing spectra and excluding spectra regions based on phase') args.spectra_phase_mask = load_phase_mask(args.spectra_phase_mask_path) for dataset in [train_data, test_data, val_data]: data_targets = normalize_spectra( spectra=dataset.targets(), phase_features=dataset.phase_features(), phase_mask=args.spectra_phase_mask, excluded_sub_value=None, threshold=args.spectra_target_floor, ) dataset.set_targets(data_targets) scaler = None else: scaler = None # Get loss function loss_func = get_loss_func(args) # Set up test set evaluation test_smiles, test_targets = test_data.smiles(), test_data.targets() if args.dataset_type == 'multiclass': sum_test_preds = np.zeros( (len(test_smiles), args.num_tasks, args.multiclass_num_classes)) else: sum_test_preds = np.zeros((len(test_smiles), args.num_tasks)) # Automatically determine whether to cache if len(data) <= args.cache_cutoff: set_cache_graph(True) num_workers = 0 else: set_cache_graph(False) num_workers = args.num_workers # Create data loaders train_data_loader = MoleculeDataLoader(dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, class_balance=args.class_balance, shuffle=True, seed=args.seed) val_data_loader = MoleculeDataLoader(dataset=val_data, batch_size=args.batch_size, num_workers=num_workers) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers) if args.class_balance: debug( f'With class_balance, effective train size = {train_data_loader.iter_size:,}' ) # Train ensemble of models for model_idx in range(args.ensemble_size): # Tensorboard writer save_dir = os.path.join(args.save_dir, f'model_{model_idx}') makedirs(save_dir) try: writer = SummaryWriter(log_dir=save_dir) except: writer = SummaryWriter(logdir=save_dir) # Load/build model if args.checkpoint_paths is not None: debug( f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}' ) model = load_checkpoint(args.checkpoint_paths[model_idx], logger=logger) else: debug(f'Building model {model_idx}') model = MoleculeModel(args) # Optionally, overwrite weights: if args.checkpoint_frzn is not None: debug( f'Loading and freezing parameters from {args.checkpoint_frzn}.' ) model = load_frzn_model(model=model, path=args.checkpoint_frzn, current_args=args, logger=logger) debug(model) if args.checkpoint_frzn is not None: debug(f'Number of unfrozen parameters = {param_count(model):,}') debug(f'Total number of parameters = {param_count_all(model):,}') else: debug(f'Number of parameters = {param_count_all(model):,}') if args.cuda: debug('Moving model to cuda') model = model.to(args.device) # Ensure that model is saved in correct location for evaluation if 0 epochs save_checkpoint(os.path.join(save_dir, MODEL_FILE_NAME), model, scaler, features_scaler, atom_descriptor_scaler, bond_feature_scaler, args) # Optimizers optimizer = build_optimizer(model, args) # Learning rate schedulers scheduler = build_lr_scheduler(optimizer, args) # Run training best_score = float('inf') if args.minimize_score else -float('inf') best_epoch, n_iter = 0, 0 for epoch in trange(args.epochs): debug(f'Epoch {epoch}') n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, logger=logger, writer=writer) if isinstance(scheduler, ExponentialLR): scheduler.step() val_scores = evaluate(model=model, data_loader=val_data_loader, num_tasks=args.num_tasks, metrics=args.metrics, dataset_type=args.dataset_type, scaler=scaler, logger=logger) for metric, scores in val_scores.items(): # Average validation score avg_val_score = np.nanmean(scores) debug(f'Validation {metric} = {avg_val_score:.6f}') writer.add_scalar(f'validation_{metric}', avg_val_score, n_iter) if args.show_individual_scores: # Individual validation scores for task_name, val_score in zip(args.task_names, scores): debug( f'Validation {task_name} {metric} = {val_score:.6f}' ) writer.add_scalar(f'validation_{task_name}_{metric}', val_score, n_iter) # Save model checkpoint if improved validation score avg_val_score = np.nanmean(val_scores[args.metric]) if args.minimize_score and avg_val_score < best_score or \ not args.minimize_score and avg_val_score > best_score: best_score, best_epoch = avg_val_score, epoch save_checkpoint(os.path.join(save_dir, MODEL_FILE_NAME), model, scaler, features_scaler, atom_descriptor_scaler, bond_feature_scaler, args) # Evaluate on test set using model with best validation score info( f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}' ) model = load_checkpoint(os.path.join(save_dir, MODEL_FILE_NAME), device=args.device, logger=logger) test_preds = predict(model=model, data_loader=test_data_loader, scaler=scaler) test_scores = evaluate_predictions(preds=test_preds, targets=test_targets, num_tasks=args.num_tasks, metrics=args.metrics, dataset_type=args.dataset_type, logger=logger) if len(test_preds) != 0: sum_test_preds += np.array(test_preds) # Average test score for metric, scores in test_scores.items(): avg_test_score = np.nanmean(scores) info(f'Model {model_idx} test {metric} = {avg_test_score:.6f}') writer.add_scalar(f'test_{metric}', avg_test_score, 0) if args.show_individual_scores and args.dataset_type != 'spectra': # Individual test scores for task_name, test_score in zip(args.task_names, scores): info( f'Model {model_idx} test {task_name} {metric} = {test_score:.6f}' ) writer.add_scalar(f'test_{task_name}_{metric}', test_score, n_iter) writer.close() # Evaluate ensemble on test set avg_test_preds = (sum_test_preds / args.ensemble_size).tolist() ensemble_scores = evaluate_predictions(preds=avg_test_preds, targets=test_targets, num_tasks=args.num_tasks, metrics=args.metrics, dataset_type=args.dataset_type, logger=logger) for metric, scores in ensemble_scores.items(): # Average ensemble score avg_ensemble_test_score = np.nanmean(scores) info(f'Ensemble test {metric} = {avg_ensemble_test_score:.6f}') # Individual ensemble scores if args.show_individual_scores: for task_name, ensemble_score in zip(args.task_names, scores): info( f'Ensemble test {task_name} {metric} = {ensemble_score:.6f}' ) # Save scores with open(os.path.join(args.save_dir, 'test_scores.json'), 'w') as f: json.dump(ensemble_scores, f, indent=4, sort_keys=True) # Optionally save test preds if args.save_preds: test_preds_dataframe = pd.DataFrame( data={'smiles': test_data.smiles()}) for i, task_name in enumerate(args.task_names): test_preds_dataframe[task_name] = [ pred[i] for pred in avg_test_preds ] test_preds_dataframe.to_csv(os.path.join(args.save_dir, 'test_preds.csv'), index=False) return ensemble_scores
def run_training(args: TrainArgs, data: MoleculeDataset, logger: Logger = None) -> Dict[str, List[float]]: """ Loads data, trains a Chemprop model, and returns test scores for the model checkpoint with the highest validation score. :param args: A :class:`~chemprop.args.TrainArgs` object containing arguments for loading data and training the Chemprop model. :param data: A :class:`~chemprop.data.MoleculeDataset` containing the data. :param logger: A logger to record output. :return: A dictionary mapping each metric in :code:`args.metrics` to a list of values for each task. """ if logger is not None: debug, info = logger.debug, logger.info else: debug = info = print # Set pytorch seed for random initial weights torch.manual_seed(args.pytorch_seed) # Split data debug(f"Splitting data with seed {args.seed}") # if args.separate_test_path: # test_data = get_data( # path=args.separate_test_path, # args=args, # features_path=args.separate_test_features_path, # atom_descriptors_path=args.separate_test_atom_descriptors_path, # bond_features_path=args.separate_test_bond_features_path, # smiles_columns=args.smiles_columns, # logger=logger, # ) # if args.separate_val_path: # val_data = get_data( # path=args.separate_val_path, # args=args, # features_path=args.separate_val_features_path, # atom_descriptors_path=args.separate_val_atom_descriptors_path, # bond_features_path=args.separate_val_bond_features_path, # smiles_columns=args.smiles_columns, # logger=logger, # ) # if args.separate_val_path and args.separate_test_path: # train_data = data # elif args.separate_val_path: # train_data, _, test_data = split_data( # data=data, # split_type=args.split_type, # sizes=(0.8, 0.0, 0.2), # seed=args.seed, # num_folds=args.num_folds, # args=args, # logger=logger, # ) # elif args.separate_test_path: # train_data, val_data, _ = split_data( # data=data, # split_type=args.split_type, # sizes=(0.8, 0.2, 0.0), # seed=args.seed, # num_folds=args.num_folds, # args=args, # logger=logger, # ) # else: # Default train_data, val_data, test_data = split_data( data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, num_folds=args.num_folds, args=args, logger=logger, ) if args.dataset_type == "classification": class_sizes = get_class_sizes(data) debug("Class sizes") for i, task_class_sizes in enumerate(class_sizes): debug( f"{args.task_names[i]} " f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}' ) if args.save_smiles_splits: save_smiles_splits( data_path=args.data_path, save_dir=args.save_dir, task_names=args.task_names, features_path=args.features_path, train_data=train_data, val_data=val_data, test_data=test_data, smiles_columns=args.smiles_columns, ) if args.features_scaling: features_scaler = train_data.normalize_features(replace_nan_token=0) val_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) else: features_scaler = None if args.atom_descriptor_scaling and args.atom_descriptors is not None: atom_descriptor_scaler = train_data.normalize_features( replace_nan_token=0, scale_atom_descriptors=True) val_data.normalize_features(atom_descriptor_scaler, scale_atom_descriptors=True) test_data.normalize_features(atom_descriptor_scaler, scale_atom_descriptors=True) else: atom_descriptor_scaler = None if args.bond_feature_scaling and args.bond_features_size > 0: bond_feature_scaler = train_data.normalize_features( replace_nan_token=0, scale_bond_features=True) val_data.normalize_features(bond_feature_scaler, scale_bond_features=True) test_data.normalize_features(bond_feature_scaler, scale_bond_features=True) else: bond_feature_scaler = None args.train_data_size = len(train_data) debug( f"Total size = {len(data):,} | " f"train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}" ) # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only) if args.dataset_type == "regression": debug("Fitting scaler") scaler = train_data.normalize_targets() else: scaler = None # Get loss function loss_func = get_loss_func(args) # Set up test set evaluation test_smiles, test_targets = test_data.smiles(), test_data.targets() if args.dataset_type == "multiclass": sum_test_preds = np.zeros( (len(test_smiles), args.num_tasks, args.multiclass_num_classes)) else: sum_test_preds = np.zeros((len(test_smiles), args.num_tasks)) # Automatically determine whether to cache if len(data) <= args.cache_cutoff: set_cache_graph(True) num_workers = 0 else: set_cache_graph(False) num_workers = args.num_workers # Create data loaders train_data_loader = MoleculeDataLoader( dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, class_balance=args.class_balance, shuffle=True, seed=args.seed, ) val_data_loader = MoleculeDataLoader(dataset=val_data, batch_size=args.batch_size, num_workers=num_workers) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers) if args.class_balance: debug( f"With class_balance, effective train size = {train_data_loader.iter_size:,}" ) # Train ensemble of models for model_idx in range(args.ensemble_size): # Tensorboard writer save_dir = os.path.join(args.save_dir, f"model_{model_idx}") makedirs(save_dir) try: writer = SummaryWriter(log_dir=save_dir) except: writer = SummaryWriter(logdir=save_dir) # Load/build model if args.checkpoint_paths is not None: debug( f"Loading model {model_idx} from {args.checkpoint_paths[model_idx]}" ) model = load_checkpoint(args.checkpoint_paths[model_idx], logger=logger) else: debug(f"Building model {model_idx}") model = MoleculeModel(args) debug(model) debug(f"Number of parameters = {param_count(model):,}") if args.cuda: debug("Moving model to cuda") model = model.to(args.device) # Ensure that model is saved in correct location for evaluation if 0 epochs save_checkpoint( os.path.join(save_dir, MODEL_FILE_NAME), model, scaler, features_scaler, atom_descriptor_scaler, bond_feature_scaler, args, ) # Optimizers optimizer = build_optimizer(model, args) # Learning rate schedulers scheduler = build_lr_scheduler(optimizer, args) # Run training best_score = float("inf") if args.minimize_score else -float("inf") best_epoch, n_iter = 0, 0 for epoch in trange(args.epochs): debug(f"Epoch {epoch}") n_iter = train( model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, logger=logger, writer=writer, ) if isinstance(scheduler, ExponentialLR): scheduler.step() val_scores = evaluate( model=model, data_loader=val_data_loader, num_tasks=args.num_tasks, metrics=args.metrics, dataset_type=args.dataset_type, scaler=scaler, logger=logger, ) for metric, scores in val_scores.items(): # Average validation score avg_val_score = np.nanmean(scores) debug(f"Validation {metric} = {avg_val_score:.6f}") writer.add_scalar(f"validation_{metric}", avg_val_score, n_iter) if args.show_individual_scores: # Individual validation scores for task_name, val_score in zip(args.task_names, scores): debug( f"Validation {task_name} {metric} = {val_score:.6f}" ) writer.add_scalar(f"validation_{task_name}_{metric}", val_score, n_iter) # Save model checkpoint if improved validation score avg_val_score = np.nanmean(val_scores[args.metric]) if (args.minimize_score and avg_val_score < best_score or not args.minimize_score and avg_val_score > best_score): best_score, best_epoch = avg_val_score, epoch save_checkpoint( os.path.join(save_dir, MODEL_FILE_NAME), model, scaler, features_scaler, atom_descriptor_scaler, bond_feature_scaler, args, ) # Evaluate on test set using model with best validation score info( f"Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}" ) model = load_checkpoint(os.path.join(save_dir, MODEL_FILE_NAME), device=args.device, logger=logger) test_preds = predict(model=model, data_loader=test_data_loader, scaler=scaler) test_scores = evaluate_predictions( preds=test_preds, targets=test_targets, num_tasks=args.num_tasks, metrics=args.metrics, dataset_type=args.dataset_type, logger=logger, ) if len(test_preds) != 0: sum_test_preds += np.array(test_preds) # Average test score for metric, scores in test_scores.items(): avg_test_score = np.nanmean(scores) info(f"Model {model_idx} test {metric} = {avg_test_score:.6f}") writer.add_scalar(f"test_{metric}", avg_test_score, 0) if args.show_individual_scores: # Individual test scores for task_name, test_score in zip(args.task_names, scores): info( f"Model {model_idx} test {task_name} {metric} = {test_score:.6f}" ) writer.add_scalar(f"test_{task_name}_{metric}", test_score, n_iter) writer.close() # Evaluate ensemble on test set avg_test_preds = (sum_test_preds / args.ensemble_size).tolist() ensemble_scores = evaluate_predictions( preds=avg_test_preds, targets=test_targets, num_tasks=args.num_tasks, metrics=args.metrics, dataset_type=args.dataset_type, logger=logger, ) for metric, scores in ensemble_scores.items(): # Average ensemble score avg_ensemble_test_score = np.nanmean(scores) info(f"Ensemble test {metric} = {avg_ensemble_test_score:.6f}") # Individual ensemble scores if args.show_individual_scores: for task_name, ensemble_score in zip(args.task_names, scores): info( f"Ensemble test {task_name} {metric} = {ensemble_score:.6f}" ) # Optionally save test preds if args.save_preds: test_preds_dataframe = pd.DataFrame( data={"smiles": test_data.smiles()}) for i, task_name in enumerate(args.task_names): test_preds_dataframe[task_name] = [ pred[i] for pred in avg_test_preds ] test_preds_dataframe.to_csv(os.path.join(args.save_dir, "test_preds.csv"), index=False) return ensemble_scores
def run_training(args: TrainArgs, logger: Logger = None) -> List[float]: """ Trains a model and returns test scores on the model checkpoint with the highest validation score. :param args: Arguments. :param logger: Logger. :return: A list of ensemble scores for each task. """ debug = info = print # Print command line and args debug('Command line') debug(f'python {" ".join(sys.argv)}') debug('Args') debug(args) # Save args args.save(os.path.join(args.save_dir, 'args.json')) # Get data debug('Loading data') args.task_names = args.target_columns or get_task_names(args.data_path) data = get_data(path=args.data_path, args=args, logger=logger) args.num_tasks = data.num_tasks() args.features_size = data.features_size() debug(f'Number of tasks = {args.num_tasks}') # Split data debug(f'Splitting data with seed {args.seed}') train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger) if args.features_scaling: features_scaler = train_data.normalize_features(replace_nan_token=0) val_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) else: features_scaler = None args.train_data_size = len(train_data) debug( f'Total size = {len(data):,} | ' f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}' ) # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only) if args.dataset_type == 'regression': debug('Fitting scaler') train_smiles, train_targets = train_data.smiles(), train_data.targets() scaler = StandardScaler().fit(train_targets) scaled_targets = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets) else: scaler = None # Get loss and metric functions loss_func = neg_log_like metric_func = get_metric_func(metric=args.metric) # Set up test set evaluation test_smiles, test_targets = test_data.smiles(), test_data.targets() sum_test_preds = np.zeros((len(test_smiles), args.num_tasks)) # Automatically determine whether to cache if len(data) <= args.cache_cutoff: cache = True num_workers = 0 else: cache = False num_workers = args.num_workers # Create data loaders train_data_loader = MoleculeDataLoader(dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache, class_balance=args.class_balance, shuffle=True, seed=args.seed) val_data_loader = MoleculeDataLoader(dataset=val_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) ########################################### ########## Outer loop over ensemble members ########################################### for model_idx in range(args.ensemble_start_idx, args.ensemble_start_idx + args.ensemble_size): # Set pytorch seed for random initial weights torch.manual_seed(args.pytorch_seeds[model_idx]) ######## set up all logging ######## # make save_dir save_dir = os.path.join(args.save_dir, f'model_{model_idx}') makedirs(save_dir) # make results_dir results_dir = os.path.join(args.results_dir, f'model_{model_idx}') makedirs(results_dir) # initialise wandb os.environ['WANDB_MODE'] = 'dryrun' wandb.init(name=args.wandb_name + '_' + str(model_idx), project=args.wandb_proj, reinit=True) print('WANDB directory is:') print(wandb.run.dir) #################################### # Load/build model if args.checkpoint_path is not None: debug(f'Loading model {model_idx} from {args.checkpoint_path}') model = load_checkpoint(args.checkpoint_path + f'/model_{model_idx}/model.pt', device=args.device, logger=logger) else: debug(f'Building model {model_idx}') model = MoleculeModel(args) debug(model) debug(f'Number of parameters = {param_count(model):,}') if args.cuda: debug('Moving model to cuda') model = model.to(args.device) # Ensure that model is saved in correct location for evaluation if 0 epochs save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) # Optimizer optimizer = Adam([{ 'params': model.encoder.parameters() }, { 'params': model.ffn.parameters() }, { 'params': model.log_noise, 'weight_decay': 0 }], lr=args.init_lr, weight_decay=args.weight_decay) # Learning rate scheduler scheduler = build_lr_scheduler(optimizer, args) # Run training best_score = float('inf') if args.minimize_score else -float('inf') best_epoch, n_iter = 0, 0 for epoch in range(args.epochs): debug(f'Epoch {epoch}') n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, logger=logger) val_scores = evaluate(model=model, data_loader=val_data_loader, args=args, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, scaler=scaler, logger=logger) # Average validation score avg_val_score = np.nanmean(val_scores) debug(f'Validation {args.metric} = {avg_val_score:.6f}') wandb.log({"Validation MAE": avg_val_score}) # Save model checkpoint if improved validation score if args.minimize_score and avg_val_score < best_score or \ not args.minimize_score and avg_val_score > best_score: best_score, best_epoch = avg_val_score, epoch save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) if epoch == args.noam_epochs - 1: optimizer = Adam([{ 'params': model.encoder.parameters() }, { 'params': model.ffn.parameters() }, { 'params': model.log_noise, 'weight_decay': 0 }], lr=args.final_lr, weight_decay=args.weight_decay) scheduler = scheduler_const([args.final_lr]) # load model with best validation score info( f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}' ) model = load_checkpoint(os.path.join(save_dir, 'model.pt'), device=args.device, logger=logger) # SWAG training loop, returns swag_model if args.swag: model = train_swag(model, train_data, val_data, num_workers, cache, loss_func, metric_func, scaler, features_scaler, args, save_dir) # SGLD loop, which saves nets if args.sgld: model = train_sgld(model, train_data, val_data, num_workers, cache, loss_func, metric_func, scaler, features_scaler, args, save_dir) # GP loop if args.gp: model, likelihood = train_gp(model, train_data, val_data, num_workers, cache, metric_func, scaler, features_scaler, args, save_dir) # BBP if args.bbp: model = train_bbp(model, train_data, val_data, num_workers, cache, loss_func, metric_func, scaler, features_scaler, args, save_dir) # DUN if args.dun: model = train_dun(model, train_data, val_data, num_workers, cache, loss_func, metric_func, scaler, features_scaler, args, save_dir) ################################## ########## Inner loop over samples ################################## for sample_idx in range(args.samples): # draw model from SWAG posterior if args.swag: model.sample(scale=1.0, cov=args.cov_mat, block=args.block) # draw model from collected SGLD models if args.sgld: model = load_checkpoint(os.path.join(save_dir, f'model_{sample_idx}.pt'), device=args.device, logger=logger) # make predictions test_preds = predict(model=model, data_loader=test_data_loader, args=args, scaler=scaler, test_data=True, bbp_sample=True) ####################################################################### ####################################################################### ##### SAVING STUFF DOWN if args.gp: # get test_preds_std (scaled back to original data) test_preds_std = predict_std_gp(model=model, data_loader=test_data_loader, args=args, scaler=scaler, likelihood=likelihood) # 1 - MEANS np.savez(os.path.join(results_dir, f'preds_{sample_idx}'), np.array(test_preds)) # 2 - STD, combined aleatoric and epistemic (we save down the stds, always) np.savez(os.path.join(results_dir, f'predsSTDEV_{sample_idx}'), np.array(test_preds_std)) else: # save test_preds and aleatoric uncertainties if args.dun: log_cat = model.log_cat.detach().cpu().numpy() cat = np.exp(log_cat) / np.sum(np.exp(log_cat)) np.savez(os.path.join(results_dir, f'cat_{sample_idx}'), cat) # samples from categorical dist and saves a depth MC sample depth_sample = np.random.multinomial(1, cat).nonzero()[0][0] test_preds_MCdepth = predict_MCdepth( model=model, data_loader=test_data_loader, args=args, scaler=scaler, d=depth_sample) np.savez( os.path.join(results_dir, f'predsMCDEPTH_{sample_idx}'), np.array(test_preds_MCdepth)) if args.swag: log_noise = model.base.log_noise else: log_noise = model.log_noise noise = np.exp(log_noise.detach().cpu().numpy()) * np.array( scaler.stds) np.savez(os.path.join(results_dir, f'preds_{sample_idx}'), np.array(test_preds)) np.savez(os.path.join(results_dir, f'noise_{sample_idx}'), noise) ####################################################################### ####################################################################### # add predictions to sum_test_preds if len(test_preds) != 0: sum_test_preds += np.array(test_preds) # evaluate predictions using metric function test_scores = evaluate_predictions(preds=test_preds, targets=test_targets, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, logger=logger) # compute average test score avg_test_score = np.nanmean(test_scores) info( f'Model {model_idx}, sample {sample_idx} test {args.metric} = {avg_test_score:.6f}' ) ################################# ########## Bayesian Model Average ################################# # note: this is an average over Bayesian samples AND components in an ensemble # compute number of prediction iterations pred_iterations = args.ensemble_size * args.samples # average predictions across iterations avg_test_preds = (sum_test_preds / pred_iterations).tolist() # evaluate BMA_scores = evaluate_predictions(preds=avg_test_preds, targets=test_targets, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, logger=logger) # average scores across tasks avg_BMA_test_score = np.nanmean(BMA_scores) info(f'BMA test {args.metric} = {avg_BMA_test_score:.6f}') return BMA_scores
def pdts(args: TrainArgs, model_idx): """ preliminary experiment with PDTS (approximate BO) we use a data set size of 50k and run until we have trained with 15k data points our batch size is 50 we initialise with 1000 data points """ ######## set up all logging ######## logger = None # make save_dir save_dir = os.path.join(args.save_dir, f'model_{model_idx}') makedirs(save_dir) # make results_dir results_dir = args.results_dir makedirs(results_dir) # initialise wandb #os.environ['WANDB_MODE'] = 'dryrun' wandb.init(name=args.wandb_name + '_' + str(model_idx), project=args.wandb_proj, reinit=True) #print('WANDB directory is:') #print(wandb.run.dir) #################################### ########## get data args.task_names = args.target_columns or get_task_names(args.data_path) data = get_data(path=args.data_path, args=args, logger=logger) args.num_tasks = data.num_tasks() args.features_size = data.features_size() ########## SMILES of top 1% top1p = np.array(MoleculeDataset(data).targets()) top1p_idx = np.argsort(-top1p[:, 0])[:int(args.max_data_size * 0.01)] SMILES = np.array(MoleculeDataset(data).smiles())[top1p_idx] ########## initial data splits args.seed = args.data_seeds[model_idx] data.shuffle(seed=args.seed) sizes = args.split_sizes train_size = int(sizes[0] * len(data)) train_orig = data[:train_size] test_orig = data[train_size:] train_data, test_data = copy.deepcopy( MoleculeDataset(train_orig)), copy.deepcopy(MoleculeDataset(test_orig)) args.train_data_size = len(train_data) ########## standardising # features (train and test) features_scaler = train_data.normalize_features(replace_nan_token=0) test_data.normalize_features(features_scaler) # targets (train) train_targets = train_data.targets() test_targets = test_data.targets() scaler = StandardScaler().fit(train_targets) scaled_targets = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets) ########## loss, metric functions loss_func = neg_log_like metric_func = get_metric_func(metric=args.metric) ########## data loaders if len(data) <= args.cache_cutoff: cache = True num_workers = 0 else: cache = False num_workers = args.num_workers train_data_loader = MoleculeDataLoader(dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache, class_balance=args.class_balance, shuffle=True, seed=args.seed) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) ########## instantiating model, optimiser, scheduler (MAP) # set pytorch seed for random initial weights torch.manual_seed(args.pytorch_seeds[model_idx]) # build model print(f'Building model {model_idx}') model = MoleculeModel(args) print(model) print(f'Number of parameters = {param_count(model):,}') if args.cuda: print('Moving model to cuda') model = model.to(args.device) # optimizer optimizer = Adam([{ 'params': model.encoder.parameters() }, { 'params': model.ffn.parameters() }, { 'params': model.log_noise, 'weight_decay': 0 }], lr=args.lr, weight_decay=args.weight_decay) # learning rate scheduler scheduler = scheduler_const([args.lr]) #################################################################### #################################################################### # FIRST THOMPSON ITERATION ### scores array ptds_scores = np.ones(args.pdts_batches + 1) batch_no = 0 ### fill for batch 0 SMILES_train = np.array(train_data.smiles()) SMILES_stack = np.hstack((SMILES, SMILES_train)) overlap = len(SMILES_stack) - len(np.unique(SMILES_stack)) prop = overlap / len(SMILES) ptds_scores[batch_no] = prop wandb.log({ "Proportion of top 1%": prop, "batch_no": batch_no }, commit=False) ### train MAP posterior gp_switch = False likelihood = None bbp_switch = None n_iter = 0 for epoch in range(args.epochs_init_map): n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, bbp_switch=bbp_switch) # save to save_dir #if epoch == args.epochs_init_map - 1: #save_checkpoint(os.path.join(save_dir, f'model_{batch_no}.pt'), model, scaler, features_scaler, args) # if X load from checkpoint path if args.bbp or args.gp or args.swag or args.sgld: model = load_checkpoint(args.checkpoint_path + f'/model_{model_idx}/model_{batch_no}.pt', device=args.device, logger=None) ########## BBP if args.bbp: model_bbp = MoleculeModelBBP( args) # instantiate with bayesian linear layers for (_, param_bbp), (_, param_pre) in zip(model_bbp.named_parameters(), model.named_parameters()): param_bbp.data = copy.deepcopy( param_pre.data.T) # copy over parameters # instantiate rhos for layer in model_bbp.children(): if isinstance(layer, BayesLinear): layer.init_rho(args.rho_min_bbp, args.rho_max_bbp) for layer in model_bbp.encoder.encoder.children(): if isinstance(layer, BayesLinear): layer.init_rho(args.rho_min_bbp, args.rho_max_bbp) model = model_bbp # name back # move to cuda if args.cuda: print('Moving bbp model to cuda') model = model.to(args.device) # optimiser and scheduler optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) scheduler = scheduler_const([args.lr]) bbp_switch = 2 n_iter = 0 for epoch in range(args.epochs_init): n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, bbp_switch=bbp_switch) ########## GP if args.gp: # feature_extractor model.featurizer = True feature_extractor = model # inducing points inducing_points = initial_inducing_points(train_data_loader, feature_extractor, args) # GP layer gp_layer = GPLayer(inducing_points, args.num_tasks) # full DKL model model = copy.deepcopy(DKLMoleculeModel(feature_extractor, gp_layer)) # likelihood (rank 0 restricts to diagonal matrix) likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood( num_tasks=12, rank=0) # model and likelihood to CUDA if args.cuda: model.cuda() likelihood.cuda() # loss object loss_func = gpytorch.mlls.VariationalELBO( likelihood, model.gp_layer, num_data=args.train_data_size) # optimiser and scheduler params_list = [ { 'params': model.feature_extractor.parameters(), 'weight_decay': args.weight_decay_gp }, { 'params': model.gp_layer.hyperparameters() }, { 'params': model.gp_layer.variational_parameters() }, { 'params': likelihood.parameters() }, ] optimizer = torch.optim.Adam(params_list, lr=args.lr) scheduler = scheduler_const([args.lr]) gp_switch = True n_iter = 0 for epoch in range(args.epochs_init): n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, gp_switch=gp_switch, likelihood=likelihood) ########## SWAG if args.swag: model_core = copy.deepcopy(model) model = train_swag_pdts(model_core, train_data_loader, loss_func, scaler, features_scaler, args, save_dir, batch_no) ########## SGLD if args.sgld: model = train_sgld_pdts(model, train_data_loader, loss_func, scaler, features_scaler, args, save_dir, batch_no) ### find top_idx top_idx = [] # need for thom sum_test_preds = np.zeros( (len(test_orig), args.num_tasks)) # need for greedy for sample in range(args.samples): # draw model from SWAG posterior if args.swag: model.sample(scale=1.0, cov=args.cov_mat, block=args.block) # retrieve sgld sample if args.sgld: model = load_checkpoint( args.save_dir + f'/model_{model_idx}/model_{batch_no}/model_{sample}.pt', device=args.device, logger=logger) test_preds = predict(model=model, data_loader=test_data_loader, args=args, scaler=scaler, test_data=True, gp_sample=args.thompson, bbp_sample=True) test_preds = np.array(test_preds) # thompson bit rank = 0 # base length if args.sgld: base_length = 5 * sample + 4 else: base_length = sample while args.thompson and (len(top_idx) <= base_length): top_unique_molecule = np.argsort(-test_preds[:, 0])[rank] rank += 1 if top_unique_molecule not in top_idx: top_idx.append(top_unique_molecule) # add to sum_test_preds sum_test_preds += test_preds # print print('done sample ' + str(sample)) # final top_idx if args.thompson: top_idx = np.array(top_idx) else: sum_test_preds /= args.samples top_idx = np.argsort(-sum_test_preds[:, 0])[:50] ### transfer from test to train top_idx = -np.sort(-top_idx) for idx in top_idx: train_orig.append(test_orig.pop(idx)) train_data, test_data = copy.deepcopy( MoleculeDataset(train_orig)), copy.deepcopy(MoleculeDataset(test_orig)) args.train_data_size = len(train_data) if args.gp: loss_func = gpytorch.mlls.VariationalELBO( likelihood, model.gp_layer, num_data=args.train_data_size) print(args.train_data_size) ### standardise features (train and test; using original features_scaler) train_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) ### standardise targets (train only; using original scaler) train_targets = train_data.targets() scaled_targets_tr = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets_tr) ### create data loaders train_data_loader = MoleculeDataLoader(dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache, class_balance=args.class_balance, shuffle=True, seed=args.seed) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) #################################################################### #################################################################### ################################## ########## thompson sampling loop ################################## for batch_no in range(1, args.pdts_batches + 1): ### fill in ptds_scores SMILES_train = np.array(train_data.smiles()) SMILES_stack = np.hstack((SMILES, SMILES_train)) overlap = len(SMILES_stack) - len(np.unique(SMILES_stack)) prop = overlap / len(SMILES) ptds_scores[batch_no] = prop wandb.log({ "Proportion of top 1%": prop, "batch_no": batch_no }, commit=False) ### train posterior n_iter = 0 for epoch in range(args.epochs): n_iter = train(model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, gp_switch=gp_switch, likelihood=likelihood, bbp_switch=bbp_switch) # save to save_dir #if epoch == args.epochs - 1: #save_checkpoint(os.path.join(save_dir, f'model_{batch_no}.pt'), model, scaler, features_scaler, args) # if swag, load checkpoint if args.swag: model_core = load_checkpoint( args.checkpoint_path + f'/model_{model_idx}/model_{batch_no}.pt', device=args.device, logger=None) ########## SWAG if args.swag: model = train_swag_pdts(model_core, train_data_loader, loss_func, scaler, features_scaler, args, save_dir, batch_no) ########## SGLD if args.sgld: model = train_sgld_pdts(model, train_data_loader, loss_func, scaler, features_scaler, args, save_dir, batch_no) ### find top_idx top_idx = [] # need for thom sum_test_preds = np.zeros( (len(test_orig), args.num_tasks)) # need for greedy for sample in range(args.samples): # draw model from SWAG posterior if args.swag: model.sample(scale=1.0, cov=args.cov_mat, block=args.block) # retrieve sgld sample if args.sgld: model = load_checkpoint( args.save_dir + f'/model_{model_idx}/model_{batch_no}/model_{sample}.pt', device=args.device, logger=logger) test_preds = predict(model=model, data_loader=test_data_loader, args=args, scaler=scaler, test_data=True, gp_sample=args.thompson, bbp_sample=True) test_preds = np.array(test_preds) # thompson bit rank = 0 # base length if args.sgld: base_length = 5 * sample + 4 else: base_length = sample while args.thompson and (len(top_idx) <= base_length): top_unique_molecule = np.argsort(-test_preds[:, 0])[rank] rank += 1 if top_unique_molecule not in top_idx: top_idx.append(top_unique_molecule) # add to sum_test_preds sum_test_preds += test_preds # print print('done sample ' + str(sample)) # final top_idx if args.thompson: top_idx = np.array(top_idx) else: sum_test_preds /= args.samples top_idx = np.argsort(-sum_test_preds[:, 0])[:50] ### transfer from test to train top_idx = -np.sort(-top_idx) for idx in top_idx: train_orig.append(test_orig.pop(idx)) train_data, test_data = copy.deepcopy( MoleculeDataset(train_orig)), copy.deepcopy( MoleculeDataset(test_orig)) args.train_data_size = len(train_data) if args.gp: loss_func = gpytorch.mlls.VariationalELBO( likelihood, model.gp_layer, num_data=args.train_data_size) print(args.train_data_size) ### standardise features (train and test; using original features_scaler) train_data.normalize_features(features_scaler) test_data.normalize_features(features_scaler) ### standardise targets (train only; using original scaler) train_targets = train_data.targets() scaled_targets_tr = scaler.transform(train_targets).tolist() train_data.set_targets(scaled_targets_tr) ### create data loaders train_data_loader = MoleculeDataLoader( dataset=train_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache, class_balance=args.class_balance, shuffle=True, seed=args.seed) test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=num_workers, cache=cache) # save scores np.savez(os.path.join(results_dir, f'ptds_{model_idx}'), ptds_scores)
def load_checkpoint(path: str, device: torch.device = None, logger: logging.Logger = None, template=None) -> MoleculeModel: """ Loads a model checkpoint. :param path: Path where checkpoint is saved. :param device: Device where the model will be moved. :param logger: A logger. :return: The loaded MoleculeModel. """ if logger is not None: debug, info = logger.debug, logger.info else: debug = info = print # Load model and args state = torch.load(path, map_location=lambda storage, loc: storage) args = TrainArgs() args.from_dict(vars(state['args']), skip_unsettable=True) loaded_state_dict = state['state_dict'] if device is not None: args.device = device # Build model if template is not None: model = template else: model = MoleculeModel(args) model_state_dict = model.state_dict() # Skip missing parameters and parameters of mismatched size pretrained_state_dict = {} for param_name in loaded_state_dict.keys(): if param_name not in model_state_dict: info( f'Warning: Pretrained parameter "{param_name}" cannot be found in model parameters.' ) elif model_state_dict[param_name].shape != loaded_state_dict[ param_name].shape: info( f'Warning: Pretrained parameter "{param_name}" ' f'of shape {loaded_state_dict[param_name].shape} does not match corresponding ' f'model parameter of shape {model_state_dict[param_name].shape}.' ) else: #debug(f'Loading pretrained parameter "{param_name}".') pretrained_state_dict[param_name] = loaded_state_dict[param_name] # Load pretrained weights model_state_dict.update(pretrained_state_dict) model.load_state_dict(model_state_dict) if args.cuda: debug('Moving model to cuda') model = model.to(args.device) return model
def run_meta_training(args: TrainArgs, logger: Logger = None) -> List[float]: """ Trains a model and returns test scores on the model checkpoint with the highest validation score. :param args: Arguments. :param logger: Logger. :return: A list of ensemble scores for each task. """ if logger is not None: debug, info = logger.debug, logger.info else: debug = info = print # Print command line debug('Command line') debug(f'python {" ".join(sys.argv)}') # Print args debug('Args') debug(args) # Save args args.save(os.path.join(args.save_dir, 'args.json')) # Set pytorch seed for random initial weights torch.manual_seed(args.pytorch_seed) # Get data debug('Loading data') args.task_names = args.target_columns or get_task_names(args.data_path) data = get_data(path=args.data_path, args=args, logger=logger) args.num_tasks = data.num_tasks() args.features_size = data.features_size() debug(f'Number of tasks = {args.num_tasks}') # Split data # debug(f'Splitting data with seed {args.seed}') # if args.separate_test_path: # test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger) # if args.separate_val_path: # val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger) # if args.separate_val_path and args.separate_test_path: # train_data = data # elif args.separate_val_path: # train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, args=args, logger=logger) # elif args.separate_test_path: # train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger) # else: # train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger) if args.dataset_type == 'classification': class_sizes = get_class_sizes(data) debug('Class sizes') for i, task_class_sizes in enumerate(class_sizes): debug(f'{args.task_names[i]} ' f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}') # if args.save_smiles_splits: # save_smiles_splits( # train_data=train_data, # val_data=val_data, # test_data=test_data, # data_path=args.data_path, # save_dir=args.save_dir # ) # If this happens, then need to move this logic into the task data loader # when it creates the datasets! # if args.features_scaling: # features_scaler = train_data.normalize_features(replace_nan_token=0) # val_data.normalize_features(features_scaler) # test_data.normalize_features(features_scaler) # else: # features_scaler = None # args.train_data_size = len(train_data) # debug(f'Total size = {len(data):,} | ' # f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}') # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only) # if args.dataset_type == 'regression': # debug('Fitting scaler') # train_smiles, train_targets = train_data.smiles(), train_data.targets() # scaler = StandardScaler().fit(train_targets) # scaled_targets = scaler.transform(train_targets).tolist() # train_data.set_targets(scaled_targets) # else: # scaler = None # Get loss and metric functions loss_func = get_loss_func(args) metric_func = get_metric_func(metric=args.metric) # Set up test set evaluation # test_smiles, test_targets = test_data.smiles(), test_data.targets() # if args.dataset_type == 'multiclass': # sum_test_preds = np.zeros((len(test_smiles), args.num_tasks, args.multiclass_num_classes)) # else: # sum_test_preds = np.zeros((len(test_smiles), args.num_tasks)) # Automatically determine whether to cache if len(data) <= args.cache_cutoff: cache = True num_workers = 0 else: cache = False num_workers = args.num_workers # Set up MetaTaskDataLoaders, which takes care of task splits under the hood # Set up task splits into T_tr, T_val, T_test assert args.chembl_assay_metadata_pickle_path is not None with open(args.chembl_assay_metadata_pickle_path + 'chembl_128_assay_type_to_names.pickle', 'rb') as handle: chembl_128_assay_type_to_names = pickle.load(handle) with open(args.chembl_assay_metadata_pickle_path + 'chembl_128_assay_name_to_type.pickle', 'rb') as handle: chembl_128_assay_name_to_type = pickle.load(handle) """ Copy GSK implementation of task split We have 5 Task types remaining ADME (A) Toxicity (T) Unassigned (U) Binding (B) Functional (F) resulting in 902 tasks. For T_val, randomly select 10 B and F tasks For T_test, select another 10 B and F tasks and allocate all A, T, and U tasks to the test split. For T_train, allocate the remaining B and F tasks. """ import pdb; pdb.set_trace() T_val_num_BF_tasks = args.meta_split_sizes_BF[0] T_test_num_BF_tasks = args.meta_split_sizes_BF[1] T_val_idx = T_val_num_BF_tasks T_test_idx = T_val_num_BF_tasks + T_test_num_BF_tasks chembl_id_to_idx = {chembl_id: idx for idx, chembl_id in enumerate(args.task_names)} # Shuffle B and F tasks randomized_B_tasks = np.copy(chembl_128_assay_type_to_names['B']) np.random.shuffle(randomized_B_tasks) randomized_B_task_indices = [chembl_id_to_idx[assay] for assay in randomized_B_tasks] randomized_F_tasks = np.copy(chembl_128_assay_type_to_names['F']) np.random.shuffle(randomized_F_tasks) randomized_F_task_indices = [chembl_id_to_idx[assay] for assay in randomized_F_tasks] # Grab B and F indices for T_val T_val_B_task_indices = randomized_B_task_indices[:T_val_idx] T_val_F_task_indices = randomized_F_task_indices[:T_val_idx] # Grab B and F indices for T_test T_test_B_task_indices = randomized_B_task_indices[T_val_idx:T_test_idx] T_test_F_task_indices = randomized_F_task_indices[T_val_idx:T_test_idx] # Grab all A, T and U indices for T_test T_test_A_task_indices = [chembl_id_to_idx[assay] for assay in chembl_128_assay_type_to_names['A']] T_test_T_task_indices = [chembl_id_to_idx[assay] for assay in chembl_128_assay_type_to_names['T']] T_test_U_task_indices = [chembl_id_to_idx[assay] for assay in chembl_128_assay_type_to_names['U']] # Slot remaining BF tasks into T_tr T_tr_B_task_indices = randomized_B_task_indices[T_test_idx:] T_tr_F_task_indices = randomized_F_task_indices[T_test_idx:] T_tr = [0] * len(args.task_names) T_val = [0] * len(args.task_names) T_test = [0] * len(args.task_names) # Now make task bit vectors for idx_list in (T_tr_B_task_indices, T_tr_F_task_indices): for idx in idx_list: T_tr[idx] = 1 for idx_list in (T_val_B_task_indices, T_val_F_task_indices): for idx in idx_list: T_val[idx] = 1 for idx_list in (T_test_B_task_indices, T_test_F_task_indices, T_test_A_task_indices, T_test_T_task_indices, T_test_U_task_indices): for idx in idx_list: T_test[idx] = 1 """ Random task split for testing task_indices = list(range(len(args.task_names))) np.random.shuffle(task_indices) train_task_split, val_task_split, test_task_split = 0.9, 0, 0.1 train_task_cutoff = int(len(task_indices) * train_task_split) train_task_idxs, test_task_idxs = [0] * len(task_indices), [0] * len(task_indices) for idx in task_indices[:train_task_cutoff]: train_task_idxs[idx] = 1 for idx in task_indices[train_task_cutoff:]: test_task_idxs[idx] = 1 """ train_meta_task_data_loader = MetaTaskDataLoader( dataset=data, tasks=T_tr, sizes=args.meta_train_split_sizes, args=args, logger=logger) val_meta_task_data_loader = MetaTaskDataLoader( dataset=data, tasks=T_val, sizes=args.meta_test_split_sizes, args=args, logger=logger) test_meta_task_data_loader = MetaTaskDataLoader( dataset=data, tasks=T_test, sizes=args.meta_test_split_sizes, args=args, logger=logger) import pdb; pdb.set_trace() for meta_train_batch in train_meta_task_data_loader.tasks(): for train_task in meta_train_batch: print('In inner loop') continue # Train ensemble of models for model_idx in range(args.ensemble_size): # Tensorboard writer save_dir = os.path.join(args.save_dir, f'model_{model_idx}') makedirs(save_dir) try: writer = SummaryWriter(log_dir=save_dir) except: writer = SummaryWriter(logdir=save_dir) # Load/build model if args.checkpoint_paths is not None: debug(f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}') model = load_checkpoint(args.checkpoint_paths[model_idx], logger=logger) else: debug(f'Building model {model_idx}') model = MoleculeModel(args) debug(model) debug(f'Number of parameters = {param_count(model):,}') if args.cuda: debug('Moving model to cuda') model = model.to(args.device) # Ensure that model is saved in correct location for evaluation if 0 epochs save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) # Optimizers optimizer = build_optimizer(model, args) # Learning rate schedulers scheduler = build_lr_scheduler(optimizer, args) # Run training best_score = float('inf') if args.minimize_score else -float('inf') best_epoch, n_iter = 0, 0 for epoch in trange(args.epochs): debug(f'Epoch {epoch}') n_iter = train( model=model, data_loader=train_data_loader, loss_func=loss_func, optimizer=optimizer, scheduler=scheduler, args=args, n_iter=n_iter, logger=logger, writer=writer ) if isinstance(scheduler, ExponentialLR): scheduler.step() val_scores = evaluate( model=model, data_loader=val_data_loader, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, scaler=scaler, logger=logger ) # Average validation score avg_val_score = np.nanmean(val_scores) debug(f'Validation {args.metric} = {avg_val_score:.6f}') writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter) if args.show_individual_scores: # Individual validation scores for task_name, val_score in zip(args.task_names, val_scores): debug(f'Validation {task_name} {args.metric} = {val_score:.6f}') writer.add_scalar(f'validation_{task_name}_{args.metric}', val_score, n_iter) # Save model checkpoint if improved validation score if args.minimize_score and avg_val_score < best_score or \ not args.minimize_score and avg_val_score > best_score: best_score, best_epoch = avg_val_score, epoch save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args) # Evaluate on test set using model with best validation score info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}') model = load_checkpoint(os.path.join(save_dir, 'model.pt'), device=args.device, logger=logger) test_preds = predict( model=model, data_loader=test_data_loader, scaler=scaler ) test_scores = evaluate_predictions( preds=test_preds, targets=test_targets, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, logger=logger ) if len(test_preds) != 0: sum_test_preds += np.array(test_preds) # Average test score avg_test_score = np.nanmean(test_scores) info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}') writer.add_scalar(f'test_{args.metric}', avg_test_score, 0) if args.show_individual_scores: # Individual test scores for task_name, test_score in zip(args.task_names, test_scores): info(f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}') writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter) writer.close() # Evaluate ensemble on test set avg_test_preds = (sum_test_preds / args.ensemble_size).tolist() ensemble_scores = evaluate_predictions( preds=avg_test_preds, targets=test_targets, num_tasks=args.num_tasks, metric_func=metric_func, dataset_type=args.dataset_type, logger=logger ) # Average ensemble score avg_ensemble_test_score = np.nanmean(ensemble_scores) info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}') # Individual ensemble scores if args.show_individual_scores: for task_name, ensemble_score in zip(args.task_names, ensemble_scores): info(f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}') return ensemble_scores