def main(): set_seeds(2020) args = vars(parser.parse_args()) alphabet = Protein() cfgs = [] data_cfg = config.DataConfig(args["data_config"]); cfgs.append(data_cfg) if args["lm_model_config"] is None: model_cfg = config.ModelConfig(args["model_config"], input_dim=len(alphabet), num_classes=3) cfgs += [model_cfg] else: lm_model_cfg = config.ModelConfig(args["lm_model_config"], idx="lm_model_config", input_dim=len(alphabet)) model_cfg = config.ModelConfig(args["model_config"], input_dim=len(alphabet), lm_dim=lm_model_cfg.num_layers * lm_model_cfg.hidden_dim * 2, num_classes=3) cfgs += [model_cfg, lm_model_cfg] if model_cfg.model_type == "RNN": pr_model_cfg = config.ModelConfig(args["pr_model_config"], idx="pr_model_config", model_type="MLP", num_classes=3) if pr_model_cfg.projection: pr_model_cfg.set_input_dim(model_cfg.embedding_dim) else: pr_model_cfg.set_input_dim(model_cfg.hidden_dim * 2) cfgs.append(pr_model_cfg) run_cfg = config.RunConfig(args["run_config"], sanity_check=args["sanity_check"]); cfgs.append(run_cfg) output, save_prefix = set_output(args, "eval_transmembrane_log", test=True) os.environ['CUDA_VISIBLE_DEVICES'] = args["device"] if args["device"] is not None else "" device, data_parallel = torch.device("cuda" if torch.cuda.is_available() else "cpu"), torch.cuda.device_count() > 1 config.print_configs(args, cfgs, device, output) flag_rnn = (model_cfg.model_type == "RNN") flag_lm_model = (args["lm_model_config"] is not None) ## load a test dataset start = Print(" ".join(['start loading test datasets', data_cfg.path["test"]]), output) dataset_test = transmembrane.load_transmembrane(data_cfg, "test", alphabet, args["sanity_check"]) dataset_test = dataset.Seq_dataset(*dataset_test, alphabet, run_cfg, flag_rnn, model_cfg.max_len, truncate=False) collate_fn = dataset.collate_sequences if flag_rnn else None iterator_test = torch.utils.data.DataLoader(dataset_test, run_cfg.batch_size_eval, collate_fn=collate_fn) end = Print(" ".join(['loaded', str(len(dataset_test)), 'sequences']), output) Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True) ## initialize a model start = Print('start initializing a model', output) models_list = [] # list of lists [model, idx, flag_frz, flag_clip_grad, flag_clip_weight] ### model if not flag_rnn: model = plus_tfm.PLUS_TFM(model_cfg) elif not flag_lm_model: model = plus_rnn.PLUS_RNN(model_cfg) else: model = p_elmo.P_ELMo(model_cfg) models_list.append([model, "", flag_lm_model, flag_rnn, False]) ### lm_model if flag_lm_model: lm_model = p_elmo.P_ELMo_lm(lm_model_cfg) models_list.append([lm_model, "lm", True, False, False]) ### pr_model if flag_rnn: pr_model = mlp.MLP(pr_model_cfg) models_list.append([pr_model, "pr", False, False, False]) params, pr_params = [], [] for model, idx, frz, _, _ in models_list: if frz: continue elif idx != "pr": params += [p for p in model.parameters() if p.requires_grad] else: pr_params += [p for p in model.parameters() if p.requires_grad] load_models(args, models_list, device, data_parallel, output, tfm_cls=flag_rnn) get_loss = plus_rnn.get_loss if flag_rnn else plus_tfm.get_loss end = Print('end initializing a model', output) Print("".join(['elapsed time:', str(end - start)]), output, newline=True) ## setup trainer configurations start = Print('start setting trainer configurations', output) optim = torch.optim.Adam([{'params':params, 'lr':run_cfg.learning_rate }, {'params':pr_params, 'lr':run_cfg.pr_learning_rate}]) tasks_list = [] # list of lists [idx, metrics_train, metrics_eval] tasks_list.append(["cls", [], ["acc", "acc_p"]]) if not flag_lm_model: tasks_list.append(["lm", [], ["acc"]]) trainer = Trainer(models_list, get_loss, run_cfg, tasks_list, optim) trainer_args = {} trainer_args["data_parallel"] = data_parallel trainer_args["paired"] = False if flag_rnn: trainer_args["projection"] = pr_model_cfg.projection if flag_rnn: trainer_args["evaluate_cls"] = plus_rnn.evaluate_transmembrane else: trainer_args["evaluate_cls"] = plus_tfm.evaluate_cls_amino trainer_args["evaluate"] = ["cls", plus_tfm.evaluate_transmembrane] end = Print('end setting trainer configurations', output) Print("".join(['elapsed time:', str(end - start)]), output, newline=True) ## train a model start = Print('start evaluating a model', output) Print(trainer.get_headline(test=True), output) ### evaluate cls dataset_test.set_augment(False) trainer.set_exec_flags(["cls", 'lm'], [True, False]) for b, batch in enumerate(iterator_test): batch = [t.to(device) if type(t) is torch.Tensor else t for t in batch] trainer.evaluate(batch, trainer_args) if b % 10 == 0: print('# cls {:.1%} loss={:.4f}'.format( b / len(iterator_test), trainer.loss_eval), end='\r', file=sys.stderr) print(' ' * 150, end='\r', file=sys.stderr) ### evaluate lm if not flag_lm_model: dataset_test.set_augment(True) trainer.set_exec_flags(["cls", 'lm'], [False, True]) for b, batch in enumerate(iterator_test): batch = [t.to(device) if type(t) is torch.Tensor else t for t in batch] trainer.evaluate(batch, trainer_args) if b % 10 == 0: print('# lm {:.1%} loss={:.4f}'.format( b / len(iterator_test), trainer.loss_eval), end='\r', file=sys.stderr) print(' ' * 150, end='\r', file=sys.stderr) Print(trainer.get_log(test_idx="Transmembrane", args=trainer_args), output) trainer.reset() end = Print('end evaluating a model', output) Print("".join(['elapsed time:', str(end - start)]), output, newline=True)