コード例 #1
0
def main():
    args = vars(parser.parse_args())
    check_args(args)
    set_seeds(2020)
    data_cfg = config.DataConfig(args["data_config"])
    model_cfg = config.ModelConfig(args["model_config"])
    run_cfg   = config.RunConfig(args["run_config"], eval=True)
    output, save_prefix = set_output(args, "evaluate_model_log")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    config.print_configs(args, [data_cfg, model_cfg, run_cfg], device, output)
    torch.zeros((1)).to(device)

    ## Loading datasets
    start = Print(" ".join(['start loading datasets']), output)
    dataset_idxs, datasets, iterators = data_cfg.path.keys(), [], []
    for idx in dataset_idxs:
        dataset = get_dataset_from_configs(data_cfg, idx)
        iterator = torch.utils.data.DataLoader(dataset, run_cfg.batch_size, shuffle=False, pin_memory=True, num_workers=4)
        datasets.append(dataset)
        iterators.append(iterator)
        end = Print(" ".join(['loaded', str(len(dataset)), idx, 'samples']), output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## initialize a model
    start = Print('start initializing a model', output)
    model, params = get_model(model_cfg, data_cfg.with_esa)
    end = Print('end initializing a model', output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## setup trainer configurations
    start = Print('start setting trainer configurations', output)
    trainer = Trainer(model)
    trainer.load_model(args["checkpoint"], output)
    trainer.set_device(device)
    end = Print('end setting trainer configurations', output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## evaluate a model
    start = Print('start evaluating a model', output)
    ### validation
    for idx, dataset, iterator in zip(dataset_idxs, datasets, iterators):
        Print(" ".join(['processing', idx]), output)

        ### validation
        for B, batch in enumerate(iterator):
            trainer.evaluate(batch, device)
            if B % 5 == 0: print('# {} {:.1%}'.format(idx, B / len(iterator)), end='\r', file=sys.stderr)
        print(' ' * 150, end='\r', file=sys.stderr)

        ### save outputs
        trainer.aggregate(dataset.set_labels)
        trainer.save_outputs(idx, save_prefix)

    end = Print('end evaluating a model', output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)
    if not output == sys.stdout: output.close()
コード例 #2
0
ファイル: evaluate_model.py プロジェクト: mswzeus/DeeperHSP
def main():
    args = vars(parser.parse_args())
    check_args(args)
    set_seeds(2021)
    data_cfg = config.DataConfig(args["data_config"])
    model_cfg = config.ModelConfig(args["model_config"])
    run_cfg = config.RunConfig(args["run_config"],
                               eval=True,
                               sanity_check=args["sanity_check"])
    output, save_prefix = set_output(args, "evaluate_model_log")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    config.print_configs(args, [data_cfg, model_cfg, run_cfg], device, output)
    torch.zeros((1)).to(device)

    ## Loading a dataset
    start = Print(" ".join(['start loading a dataset']), output)
    dataset_test = get_dataset_from_configs(data_cfg,
                                            "test",
                                            model_cfg.embedder,
                                            sanity_check=args["sanity_check"])
    iterator_test = torch.utils.data.DataLoader(dataset_test,
                                                run_cfg.batch_size,
                                                shuffle=False,
                                                pin_memory=True,
                                                num_workers=4)
    end = Print(
        " ".join(['loaded',
                  str(len(dataset_test)), 'dataset_test samples']), output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## initialize a model
    start = Print('start initializing a model', output)
    model, params = get_model(model_cfg, run_cfg)
    get_profile(model, dataset_test, output)
    end = Print('end initializing a model', output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## setup trainer configurations
    start = Print('start setting trainer configurations', output)
    trainer = Trainer(model)
    trainer.load_model(args["checkpoint"], output)
    trainer.set_device(device)
    end = Print('end setting trainer configurations', output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## evaluate a model
    start = Print('start evaluating a model', output)
    trainer.headline(output)
    ### validation
    for B, batch in enumerate(iterator_test):
        trainer.evaluate(batch, device)
        if B % 5 == 0:
            print('# {:.1%}'.format(B / len(iterator_test)),
                  end='\r',
                  file=sys.stderr)
    print(' ' * 150, end='\r', file=sys.stderr)

    ### print log
    trainer.save_outputs(save_prefix)
    trainer.log(data_cfg.data_idx, output)

    end = Print('end evaluating a model', output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)
    if not output == sys.stdout: output.close()
コード例 #3
0
ファイル: eval_plus_stability.py プロジェクト: fivelike/PLUS
def main():
    set_seeds(2020)
    args = vars(parser.parse_args())

    alphabet = Protein()
    cfgs = []
    data_cfg = config.DataConfig(args["data_config"])
    cfgs.append(data_cfg)
    if args["lm_model_config"] is None:
        model_cfg = config.ModelConfig(args["model_config"],
                                       input_dim=len(alphabet),
                                       num_classes=1)
        cfgs += [model_cfg]
    else:
        lm_model_cfg = config.ModelConfig(args["lm_model_config"],
                                          idx="lm_model_config",
                                          input_dim=len(alphabet))
        model_cfg = config.ModelConfig(args["model_config"],
                                       input_dim=len(alphabet),
                                       lm_dim=lm_model_cfg.num_layers *
                                       lm_model_cfg.hidden_dim * 2,
                                       num_classes=1)
        cfgs += [model_cfg, lm_model_cfg]
    if model_cfg.model_type == "RNN":
        pr_model_cfg = config.ModelConfig(args["pr_model_config"],
                                          idx="pr_model_config",
                                          model_type="MLP",
                                          num_classes=1)
        if pr_model_cfg.projection:
            pr_model_cfg.set_input_dim(model_cfg.embedding_dim)
        else:
            pr_model_cfg.set_input_dim(model_cfg.hidden_dim * 2)
        cfgs.append(pr_model_cfg)
    run_cfg = config.RunConfig(args["run_config"],
                               sanity_check=args["sanity_check"])
    cfgs.append(run_cfg)
    output, save_prefix = set_output(args, "eval_stability_log", test=True)
    os.environ['CUDA_VISIBLE_DEVICES'] = args["device"] if args[
        "device"] is not None else ""
    device, data_parallel = torch.device("cuda" if torch.cuda.is_available(
    ) else "cpu"), torch.cuda.device_count() > 1
    config.print_configs(args, cfgs, device, output)
    flag_rnn = (model_cfg.model_type == "RNN")
    flag_lm_model = (args["lm_model_config"] is not None)

    ## load test datasets
    start = Print(
        " ".join(['start loading a test dataset', data_cfg.path["test"]]),
        output)
    dataset_test = ss.load_stability(data_cfg, "test", alphabet,
                                     args["sanity_check"])
    dataset_test = dataset.Seq_dataset(*dataset_test, alphabet, run_cfg,
                                       flag_rnn, model_cfg.max_len)
    collate_fn = dataset.collate_sequences if flag_rnn else None
    iterator_test = torch.utils.data.DataLoader(dataset_test,
                                                run_cfg.batch_size_eval,
                                                collate_fn=collate_fn)
    end = Print(" ".join(['loaded',
                          str(len(dataset_test)), 'sequences']), output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## initialize a model
    start = Print('start initializing a model', output)
    models_list = [
    ]  # list of lists [model, idx, flag_frz, flag_clip_grad, flag_clip_weight]
    ### model
    if not flag_rnn: model = plus_tfm.PLUS_TFM(model_cfg)
    elif not flag_lm_model: model = plus_rnn.PLUS_RNN(model_cfg)
    else: model = p_elmo.P_ELMo(model_cfg)
    models_list.append([model, "", flag_lm_model, flag_rnn, False])
    ### lm_model
    if flag_lm_model:
        lm_model = p_elmo.P_ELMo_lm(lm_model_cfg)
        models_list.append([lm_model, "lm", True, False, False])
    ### pr_model
    if flag_rnn:
        pr_model = mlp.MLP(pr_model_cfg, per_seq=True)
        models_list.append([pr_model, "pr", False, True, False])
    params, pr_params = [], []
    for model, idx, frz, _, _ in models_list:
        if frz: continue
        elif idx != "pr":
            params += [p for p in model.parameters() if p.requires_grad]
        else:
            pr_params += [p for p in model.parameters() if p.requires_grad]
    load_models(args,
                models_list,
                device,
                data_parallel,
                output,
                tfm_cls=flag_rnn)
    get_loss = plus_rnn.get_loss if flag_rnn else plus_tfm.get_loss
    end = Print('end initializing a model', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## setup trainer configurations
    start = Print('start setting trainer configurations', output)
    tasks_list = []  # list of lists [idx, metrics_train, metrics_eval]
    tasks_list.append(["cls", [], ["rho", "r"]])
    if not flag_lm_model: tasks_list.append(["lm", [], ["acc"]])
    trainer = Trainer(models_list, get_loss, run_cfg, tasks_list)
    trainer_args = {}
    trainer_args["data_parallel"] = data_parallel
    trainer_args["paired"] = False
    if flag_rnn: trainer_args["projection"] = pr_model_cfg.projection
    trainer_args["regression"] = True
    if flag_rnn: trainer_args["evaluate_cls"] = plus_rnn.evaluate_cls_protein
    else: trainer_args["evaluate_cls"] = plus_tfm.evaluate_cls_protein
    end = Print('end setting trainer configurations', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## evaluate a model
    start = Print('start evaluating a model', output)
    Print(trainer.get_headline(test=True), output)

    ### evaluate cls
    dataset_test.set_augment(False)
    trainer.set_exec_flags(["cls", 'lm'], [True, False])
    for b, batch in enumerate(iterator_test):
        batch = [t.to(device) if type(t) is torch.Tensor else t for t in batch]
        trainer.evaluate(batch, trainer_args)
        if b % 10 == 0:
            print('# cls {:.1%} loss={:.4f}'.format(b / len(iterator_test),
                                                    trainer.loss_eval),
                  end='\r',
                  file=sys.stderr)
    print(' ' * 150, end='\r', file=sys.stderr)

    ### evaluate lm
    if not flag_lm_model:
        dataset_test.set_augment(True)
        trainer.set_exec_flags(["cls", 'lm'], [False, True])
        for b, batch in enumerate(iterator_test):
            batch = [
                t.to(device) if type(t) is torch.Tensor else t for t in batch
            ]
            trainer.evaluate(batch, trainer_args)
            if b % 10 == 0:
                print('# lm {:.1%} loss={:.4f}'.format(b / len(iterator_test),
                                                       trainer.loss_eval),
                      end='\r',
                      file=sys.stderr)
        print(' ' * 150, end='\r', file=sys.stderr)

    Print(trainer.get_log(test_idx="Stability", args=trainer_args), output)
    trainer.reset()

    end = Print('end evaluating a model', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)
    output.close()
コード例 #4
0
def main():
    set_seeds(2020)
    args = vars(parser.parse_args())

    alphabet = Protein()
    cfgs = []
    data_cfg = config.DataConfig(args["data_config"])
    cfgs.append(data_cfg)
    if args["lm_model_config"] is None:
        model_cfg = config.ModelConfig(args["model_config"],
                                       input_dim=len(alphabet),
                                       num_classes=3)
        cfgs += [model_cfg]
    else:
        lm_model_cfg = config.ModelConfig(args["lm_model_config"],
                                          idx="lm_model_config",
                                          input_dim=len(alphabet))
        model_cfg = config.ModelConfig(args["model_config"],
                                       input_dim=len(alphabet),
                                       lm_dim=lm_model_cfg.num_layers *
                                       lm_model_cfg.hidden_dim * 2,
                                       num_classes=3)
        cfgs += [model_cfg, lm_model_cfg]
    if model_cfg.model_type == "RNN":
        pr_model_cfg = config.ModelConfig(args["pr_model_config"],
                                          idx="pr_model_config",
                                          model_type="MLP",
                                          num_classes=3)
        if pr_model_cfg.projection:
            pr_model_cfg.set_input_dim(model_cfg.embedding_dim)
        else:
            pr_model_cfg.set_input_dim(model_cfg.hidden_dim * 2)
        cfgs.append(pr_model_cfg)
    run_cfg = config.RunConfig(args["run_config"],
                               sanity_check=args["sanity_check"])
    cfgs.append(run_cfg)
    output, save_prefix = set_output(args, "train_transmembrane_log")
    os.environ['CUDA_VISIBLE_DEVICES'] = args["device"] if args[
        "device"] is not None else ""
    device, data_parallel = torch.device("cuda" if torch.cuda.is_available(
    ) else "cpu"), torch.cuda.device_count() > 1
    config.print_configs(args, cfgs, device, output)
    flag_rnn = (model_cfg.model_type == "RNN")
    flag_lm_model = (args["lm_model_config"] is not None)
    flag_lm_loss = (run_cfg.lm_loss_lambda != -1)

    ## load a train dataset
    start = Print(
        " ".join(['start loading train datasets', data_cfg.path["train"]]),
        output)
    dataset_train = transmembrane.load_transmembrane(data_cfg, "train",
                                                     alphabet,
                                                     args["sanity_check"])
    dataset_train = dataset.Seq_dataset(*dataset_train,
                                        alphabet,
                                        run_cfg,
                                        flag_rnn,
                                        model_cfg.max_len,
                                        truncate=False)
    collate_fn = dataset.collate_sequences if flag_rnn else None
    iterator_train = torch.utils.data.DataLoader(dataset_train,
                                                 run_cfg.batch_size_train,
                                                 collate_fn=collate_fn,
                                                 shuffle=True)
    end = Print(" ".join(['loaded',
                          str(len(dataset_train)), 'sequences']), output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## load a dev dataset
    start = Print(
        " ".join(['start loading dev datasets', data_cfg.path["dev"]]), output)
    dataset_dev = transmembrane.load_transmembrane(data_cfg, "dev", alphabet,
                                                   args["sanity_check"])
    dataset_dev = dataset.Seq_dataset(*dataset_dev,
                                      alphabet,
                                      run_cfg,
                                      flag_rnn,
                                      model_cfg.max_len,
                                      truncate=False)
    iterator_dev = torch.utils.data.DataLoader(dataset_dev,
                                               run_cfg.batch_size_eval,
                                               collate_fn=collate_fn)
    end = Print(" ".join(['loaded',
                          str(len(dataset_dev)), 'sequences']), output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## initialize a model
    start = Print('start initializing a model', output)
    models_list = [
    ]  # list of lists [model, idx, flag_frz, flag_clip_grad, flag_clip_weight]
    ### model
    if not flag_rnn: model = plus_tfm.PLUS_TFM(model_cfg)
    elif not flag_lm_model: model = plus_rnn.PLUS_RNN(model_cfg)
    else: model = p_elmo.P_ELMo(model_cfg)
    models_list.append([model, "", flag_lm_model, flag_rnn, False])
    ### lm_model
    if flag_lm_model:
        lm_model = p_elmo.P_ELMo_lm(lm_model_cfg)
        models_list.append([lm_model, "lm", True, False, False])
    ### pr_model
    if flag_rnn:
        pr_model = mlp.MLP(pr_model_cfg)
        models_list.append([pr_model, "pr", False, False, False])
    params, pr_params = [], []
    for model, idx, frz, _, _ in models_list:
        if frz: continue
        elif idx != "pr":
            params += [p for p in model.parameters() if p.requires_grad]
        else:
            pr_params += [p for p in model.parameters() if p.requires_grad]
    load_models(args,
                models_list,
                device,
                data_parallel,
                output,
                tfm_cls=flag_rnn)
    get_loss = plus_rnn.get_loss if flag_rnn else plus_tfm.get_loss
    end = Print('end initializing a model', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## setup trainer configurations
    start = Print('start setting trainer configurations', output)
    optim = torch.optim.Adam([{
        'params': params,
        'lr': run_cfg.learning_rate
    }, {
        'params': pr_params,
        'lr': run_cfg.pr_learning_rate
    }])
    tasks_list = []  # list of lists [idx, metrics_train, metrics_eval]
    tasks_list.append(["cls", [], ["acc", "acc_p"]])
    if flag_lm_loss: tasks_list.append(["lm", [], ["acc"]])
    trainer = Trainer(models_list, get_loss, run_cfg, tasks_list, optim)
    trainer_args = {}
    trainer_args["data_parallel"] = data_parallel
    trainer_args["paired"] = False
    if flag_rnn: trainer_args["projection"] = pr_model_cfg.projection
    if flag_rnn:
        trainer_args["evaluate_cls"] = plus_rnn.evaluate_transmembrane
    else:
        trainer_args["evaluate_cls"] = plus_tfm.evaluate_cls_amino
        trainer_args["evaluate"] = ["cls", plus_tfm.evaluate_transmembrane]
    end = Print('end setting trainer configurations', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## train a model
    start = Print('start training a model', output)
    Print(trainer.get_headline(), output)
    for epoch in range(run_cfg.num_epochs):
        ### train
        dataset_train.set_augment(flag_lm_loss)
        for B, batch in enumerate(iterator_train):
            batch = [
                t.to(device) if type(t) is torch.Tensor else t for t in batch
            ]
            trainer.train(batch, trainer_args)
            if B % 10 == 0:
                print('# epoch [{}/{}] train {:.1%} loss={:.4f}'.format(
                    epoch + 1, run_cfg.num_epochs, B / len(iterator_train),
                    trainer.loss_train),
                      end='\r',
                      file=sys.stderr)
        print(' ' * 150, end='\r', file=sys.stderr)

        ### evaluate cls
        dataset_dev.set_augment(False)
        trainer.set_exec_flags(["cls", 'lm'], [True, False])
        for b, batch in enumerate(iterator_dev):
            batch = [
                t.to(device) if type(t) is torch.Tensor else t for t in batch
            ]
            trainer.evaluate(batch, trainer_args)
            if b % 10 == 0:
                print('# cls {:.1%} loss={:.4f}'.format(
                    b / len(iterator_dev), trainer.loss_eval),
                      end='\r',
                      file=sys.stderr)
        print(' ' * 150, end='\r', file=sys.stderr)

        ### evaluate lm
        if flag_lm_loss:
            dataset_dev.set_augment(True)
            trainer.set_exec_flags(["cls", 'lm'], [False, True])
            for b, batch in enumerate(iterator_dev):
                batch = [
                    t.to(device) if type(t) is torch.Tensor else t
                    for t in batch
                ]
                trainer.evaluate(batch, trainer_args)
                if b % 10 == 0:
                    print('# lm {:.1%} loss={:.4f}'.format(
                        b / len(iterator_dev), trainer.loss_eval),
                          end='\r',
                          file=sys.stderr)
            print(' ' * 150, end='\r', file=sys.stderr)

        ### print log and save models
        trainer.save(save_prefix)
        Print(trainer.get_log(epoch + 1, args=trainer_args), output)
        trainer.set_exec_flags(["cls", "lm"], [True, True])
        trainer.reset()
        if trainer.patience == 0: break

    end = Print('end training a model', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)
コード例 #5
0
ファイル: train_plus_pfam.py プロジェクト: fivelike/PLUS
def main():
    set_seeds(2020)
    args = vars(parser.parse_args())

    alphabet = Protein()
    data_cfg = config.DataConfig(args["data_config"])
    model_cfg = config.ModelConfig(args["model_config"],
                                   input_dim=len(alphabet),
                                   num_classes=2)
    run_cfg = config.RunConfig(args["run_config"],
                               sanity_check=args["sanity_check"])
    output, save_prefix = set_output(args, "train_pfam_log")
    os.environ['CUDA_VISIBLE_DEVICES'] = args["device"] if args[
        "device"] is not None else ""
    device, data_parallel = torch.device("cuda" if torch.cuda.is_available(
    ) else "cpu"), torch.cuda.device_count() > 1
    config.print_configs(args, [data_cfg, model_cfg, run_cfg], device, output)
    flag_rnn = (model_cfg.model_type == "RNN")
    flag_paired = ("testpairs" in data_cfg.path)

    ## load a train dataset
    start = Print(
        " ".join(['start loading a train dataset:', data_cfg.path["train"]]),
        output)
    dataset_train = pfam.load_pfam(data_cfg, "train", alphabet,
                                   args["sanity_check"])
    dataset_train = dataset.Pfam_dataset(*dataset_train,
                                         alphabet,
                                         run_cfg,
                                         flag_rnn,
                                         model_cfg.max_len,
                                         random_pairing=flag_paired,
                                         sanity_check=args["sanity_check"])
    if flag_rnn and flag_paired: collate_fn = dataset.collate_paired_sequences
    elif flag_rnn: collate_fn = dataset.collate_sequences_pelmo
    else: collate_fn = None
    iterator_train = torch.utils.data.DataLoader(dataset_train,
                                                 run_cfg.batch_size_train,
                                                 collate_fn=collate_fn,
                                                 shuffle=True)
    end = Print(" ".join(['loaded',
                          str(len(dataset_train)), 'sequences']), output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## load a test dataset
    start = Print(
        " ".join([
            'start loading a test dataset:',
            data_cfg.path["testpairs" if flag_paired else "test"]
        ]), output)
    if flag_paired:
        dataset_test = pfam.load_pfam_pairs(data_cfg, "testpairs", alphabet,
                                            args["sanity_check"])
        dataset_test = dataset.PairedPfam_dataset(*dataset_test, alphabet,
                                                  run_cfg, flag_rnn,
                                                  model_cfg.max_len)
    else:
        dataset_test = pfam.load_pfam(data_cfg, "test", alphabet,
                                      args["sanity_check"])
        dataset_test = dataset.Pfam_dataset(*dataset_test,
                                            alphabet,
                                            run_cfg,
                                            flag_rnn,
                                            model_cfg.max_len,
                                            random_pairing=flag_paired,
                                            sanity_check=args["sanity_check"])
    iterator_test = torch.utils.data.DataLoader(dataset_test,
                                                run_cfg.batch_size_eval,
                                                collate_fn=collate_fn)
    end = Print(
        " ".join(['loaded',
                  str(len(dataset_test)), 'sequence(pair)s']), output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## initialize a model
    start = Print('start initializing a model', output)
    models_list = [
    ]  # list of lists [model, idx, flag_frz, flag_clip_grad, flag_clip_weight]
    if not flag_rnn:
        model = plus_tfm.PLUS_TFM(model_cfg)
        run_cfg.set_total_steps(len(dataset_train))
    elif model_cfg.rnn_type == "B":
        model = plus_rnn.PLUS_RNN(model_cfg)
    else:
        model = p_elmo.P_ELMo_lm(model_cfg)
    models_list.append([model, "", False, flag_rnn, flag_rnn and flag_paired])
    params = []
    for model, _, frz, _, _ in models_list:
        if not frz:
            params += [p for p in model.parameters() if p.requires_grad]
    load_models(args, models_list, device, data_parallel, output)
    get_loss = plus_rnn.get_loss if flag_rnn else plus_tfm.get_loss
    end = Print('end initializing a model', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## setup trainer configurations
    start = Print('start setting trainer configurations', output)
    if flag_rnn: optim = torch.optim.Adam(params, lr=run_cfg.learning_rate)
    else: optim = get_BertAdam_optimizer(run_cfg, models_list[0][0])
    tasks_list = []  # list of lists [idx, metrics_train, metrics_eval]
    if run_cfg.lm_loss_lambda != -1: tasks_list.append(["lm", [], ["acc"]])
    if run_cfg.cls_loss_lambda != -1: tasks_list.append(["cls", [], ["acc"]])
    trainer = Trainer(models_list, get_loss, run_cfg, tasks_list, optim)
    trainer_args = {}
    trainer_args["data_parallel"] = data_parallel
    trainer_args["paired"] = flag_paired
    if flag_paired and flag_rnn:
        trainer_args["evaluate_cls"] = plus_rnn.evaluate_sfp
    elif flag_paired:
        trainer_args["evaluate_cls"] = plus_tfm.evaluate_sfp
    else:
        trainer_args["num_alphabets"] = len(alphabet)
    end = Print('end setting trainer configurations', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## train a model
    start = Print('start training a model', output)
    Print(trainer.get_headline(), output)
    for epoch in range(run_cfg.num_epochs):
        ### train
        for B, batch in enumerate(iterator_train):
            batch = [
                t.to(device) if type(t) is torch.Tensor else t for t in batch
            ]
            trainer.train(batch, trainer_args)
            if B % 10 == 0:
                print('# epoch [{}/{}] train {:.1%} loss={:.4f}'.format(
                    epoch + 1, run_cfg.num_epochs, B / len(iterator_train),
                    trainer.loss_train),
                      end='\r',
                      file=sys.stderr)

            if trainer.global_step % 20000 == 0 or args["sanity_check"]:
                print(' ' * 150, end='\r', file=sys.stderr)

                ### evaluate lm
                if run_cfg.lm_loss_lambda != -1:
                    if flag_paired: dataset_test.set_augment(True)
                    trainer.set_exec_flags(["lm", "cls"], [True, False])
                    for b, batch in enumerate(iterator_test):
                        batch = [
                            t.to(device) if type(t) is torch.Tensor else t
                            for t in batch
                        ]
                        trainer.evaluate(batch, trainer_args)
                        if b % 10 == 0:
                            print('# lm {:.1%} loss={:.4f}'.format(
                                b / len(iterator_test), trainer.loss_eval),
                                  end='\r',
                                  file=sys.stderr)
                    print(' ' * 150, end='\r', file=sys.stderr)

                ### evaluate cls
                if run_cfg.cls_loss_lambda != -1:
                    dataset_test.set_augment(False)
                    trainer.set_exec_flags(["lm", "cls"], [False, True])
                    for b, batch in enumerate(iterator_test):
                        batch = [
                            t.to(device) if type(t) is torch.Tensor else t
                            for t in batch
                        ]
                        trainer.evaluate(batch, trainer_args)
                        if b % 10 == 0:
                            print('# cls {:.1%} loss={:.4f}'.format(
                                b / len(iterator_test), trainer.loss_eval),
                                  end='\r',
                                  file=sys.stderr)
                    print(' ' * 150, end='\r', file=sys.stderr)

                ### print log and save models
                trainer.save(save_prefix)
                Print(trainer.get_log(epoch + 1, args=trainer_args), output)
                trainer.set_exec_flags(["lm", "cls"], [True, True])
                trainer.reset()

    end = Print('end trainin a model', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)
    output.close()
コード例 #6
0
def main():
    set_seeds(2020)
    args = vars(parser.parse_args())

    alphabet = Protein()
    cfgs = []
    data_cfg = config.DataConfig(args["data_config"])
    cfgs.append(data_cfg)
    if args["lm_model_config"] is None:
        model_cfg = config.ModelConfig(args["model_config"],
                                       input_dim=len(alphabet),
                                       num_classes=5)
        cfgs += [model_cfg]
    else:
        lm_model_cfg = config.ModelConfig(args["lm_model_config"],
                                          idx="lm_model_config",
                                          input_dim=len(alphabet))
        model_cfg = config.ModelConfig(args["model_config"],
                                       input_dim=len(alphabet),
                                       lm_dim=lm_model_cfg.num_layers *
                                       lm_model_cfg.hidden_dim * 2,
                                       num_classes=5)
        cfgs += [model_cfg, lm_model_cfg]
    run_cfg = config.RunConfig(args["run_config"],
                               eval=True,
                               sanity_check=args["sanity_check"])
    cfgs.append(run_cfg)
    output, save_prefix = set_output(args, "eval_homology_log", test=True)
    os.environ['CUDA_VISIBLE_DEVICES'] = args["device"] if args[
        "device"] is not None else ""
    device, data_parallel = torch.device("cuda" if torch.cuda.is_available(
    ) else "cpu"), torch.cuda.device_count() > 1
    config.print_configs(args, cfgs, device, output)
    flag_rnn = (model_cfg.model_type == "RNN")
    flag_lm_model = (args["pretrained_lm_model"] is not None)
    flag_cm_model = (args["pretrained_cm_model"] is not None)

    ## load test datasets
    idxs_test, datasets_test, iterators_test = [
        key for key in data_cfg.path.keys() if "pairs" in key
    ], [], []
    start = Print(" ".join(['start loading test datasets'] + idxs_test),
                  output)
    collate_fn = dataset.collate_paired_sequences if flag_rnn else None
    for idx_test in idxs_test:
        dataset_test = homology.load_homology_pairs(data_cfg, idx_test,
                                                    alphabet, flag_cm_model,
                                                    args["sanity_check"])
        dataset_test = dataset.PairedHomology_dataset(*dataset_test, alphabet,
                                                      run_cfg, flag_rnn,
                                                      model_cfg.max_len)
        iterator_test = torch.utils.data.DataLoader(dataset_test,
                                                    run_cfg.batch_size_eval,
                                                    collate_fn=collate_fn)
        datasets_test.append(dataset_test)
        iterators_test.append(iterator_test)
        end = Print(
            " ".join(['loaded',
                      str(len(dataset_test)), 'sequence pairs']), output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## initialize a model
    start = Print('start initializing a model', output)
    models_list = [
    ]  # list of lists [model, idx, flag_frz, flag_clip_grad, flag_clip_weight]
    ### model
    if not flag_rnn: model = plus_tfm.PLUS_TFM(model_cfg)
    elif not flag_lm_model: model = plus_rnn.PLUS_RNN(model_cfg)
    else: model = p_elmo.P_ELMo(model_cfg)
    models_list.append([model, "", True, False, False])
    ### lm_model
    if flag_lm_model:
        lm_model = p_elmo.P_ELMo_lm(lm_model_cfg)
        models_list.append([lm_model, "lm", True, False, False])
    ### cm_model
    if flag_cm_model:
        cm_model = cnn.ConvNet2D(model_cfg.embedding_dim)
        models_list.append([cm_model, "cm", True, False, False])
    load_models(args, models_list, device, data_parallel, output)
    get_loss = plus_rnn.get_loss if flag_rnn else plus_tfm.get_loss
    end = Print('end initializing a model', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## setup trainer configurations
    start = Print('start setting trainer configurations', output)
    tasks_list = []  # list of lists [idx, metrics_train, metrics_eval]
    tasks_list.append([
        "cls", [],
        ["acc", "r", "rho", "aupr_cl", "aupr_fo", "aupr_sf", "aupr_fa"]
    ])
    if not flag_lm_model: tasks_list.append(["lm", [], ["acc"]])
    if flag_cm_model: tasks_list.append(["cm", [], ["pr", "re", "f1"]])
    trainer = Trainer(models_list, get_loss, run_cfg, tasks_list)
    trainer_args = {}
    trainer_args["data_parallel"] = data_parallel
    trainer_args["paired"] = True
    if flag_rnn: trainer_args["evaluate_cls"] = plus_rnn.evaluate_homology
    else: trainer_args["evaluate_cls"] = plus_tfm.evaluate_homology
    trainer_args["evaluate"] = ["cls", homology.evaluate_homology]
    end = Print('end setting trainer configurations', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## evaluate a model
    start = Print('start evaluating a model', output)
    Print(trainer.get_headline(test=True), output)
    for idx_test, dataset_test, iterator_test in zip(idxs_test, datasets_test,
                                                     iterators_test):

        ### evaluate cls and cm
        dataset_test.set_augment(False)
        trainer.set_exec_flags(["cls", 'lm', "cm"], [True, False, True])
        for b, batch in enumerate(iterator_test):
            batch = [
                t.to(device) if type(t) is torch.Tensor else t for t in batch
            ]
            trainer.evaluate(batch, trainer_args)
            if b % 10 == 0:
                print('# cls {:.1%} loss={:.4f}'.format(
                    b / len(iterator_test), trainer.loss_eval),
                      end='\r',
                      file=sys.stderr)
        print(' ' * 150, end='\r', file=sys.stderr)

        ### evaluate lm
        if not flag_lm_model:
            dataset_test.set_augment(True)
            trainer.set_exec_flags(["cls", 'lm', "cm"], [False, True, False])
            for b, batch in enumerate(iterator_test):
                batch = [
                    t.to(device) if type(t) is torch.Tensor else t
                    for t in batch
                ]
                trainer.evaluate(batch, trainer_args)
                if b % 10 == 0:
                    print('# lm {:.1%} loss={:.4f}'.format(
                        b / len(iterator_test), trainer.loss_eval),
                          end='\r',
                          file=sys.stderr)
            print(' ' * 150, end='\r', file=sys.stderr)

        Print(trainer.get_log(test_idx=idx_test, args=trainer_args), output)
        trainer.reset()

    end = Print('end evaluating a model', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)
    output.close()
コード例 #7
0
ファイル: run.py プロジェクト: jabalazs/implicit_emotion
def main():
    hp = arg_parser.parse_args()
    validate_args(hp)

    logger = Logger(hp, model_name='Baseline', write_mode=hp.write_mode)
    if not hp.test:
        print(f"Running experiment {logger.model_hash}.")
        if hp.write_mode != 'NONE':
            logger.write_hyperparams()
            print(f"Hyperparameters and checkpoints will be saved in "
                  f"{logger.run_savepath}")

    torch.manual_seed(hp.seed)
    torch.cuda.manual_seed_all(
        hp.seed)  # silently ignored if there are no GPUs

    CUDA = False
    if torch.cuda.is_available() and not hp.no_cuda:
        CUDA = True

    USE_POS = False
    if hp.pos_emb_dim is not None:
        USE_POS = True

    SAVE_IN_SPREADSHEET = False
    if hp.spreadsheet:
        SAVE_IN_SPREADSHEET = True

    corpus = IESTCorpus(config.corpora_dict,
                        hp.corpus,
                        force_reload=hp.force_reload,
                        train_data_proportion=hp.train_data_proportion,
                        dev_data_proportion=hp.dev_data_proportion,
                        batch_size=hp.batch_size,
                        lowercase=hp.lowercase,
                        use_pos=USE_POS)

    if hp.embeddings != "random":
        # Load pre-trained embeddings
        embeddings = Embeddings(
            config.embedding_dict[hp.embeddings],
            k_most_frequent=None,
            force_reload=hp.force_reload,
        )

        # Get subset of embeddings corresponding to our vocabulary
        embedding_matrix = embeddings.generate_embedding_matrix(
            corpus.lang.token2id)
        print(
            f"{len(embeddings.unknown_tokens)} words from vocabulary not found "
            f"in {hp.embeddings} embeddings. ")
    else:
        word_vocab_size = len(corpus.lang.token2id)
        embedding_matrix = np.random.uniform(-0.05,
                                             0.05,
                                             size=(word_vocab_size, 300))

    # Repeat process for character embeddings with the difference that they are
    # not pretrained

    # Initialize character embedding matrix randomly
    char_vocab_size = len(corpus.lang.char2id)
    char_embedding_matrix = np.random.uniform(-0.05,
                                              0.05,
                                              size=(char_vocab_size,
                                                    hp.char_emb_dim))

    pos_embedding_matrix = None
    if USE_POS:
        # Initialize pos embedding matrix randomly
        pos_vocab_size = len(corpus.pos_lang.token2id)
        pos_embedding_matrix = np.random.uniform(-0.05,
                                                 0.05,
                                                 size=(pos_vocab_size,
                                                       hp.pos_emb_dim))

    if hp.model_hash:
        # WARNING: This feature should be used only when testing. We
        # haven't implemented a proper way to resume training yet.
        experiment_path = os.path.join(config.RESULTS_PATH,
                                       hp.model_hash + '*')
        ext_experiment_path = glob(experiment_path)
        assert len(
            ext_experiment_path) == 1, 'Try provinding a longer model hash'
        ext_experiment_path = ext_experiment_path[0]
        model_path = os.path.join(ext_experiment_path, 'best_model.pth')
        model = torch.load(model_path)

    else:
        num_classes = len(corpus.label2id)
        batch_size = corpus.train_batches.batch_size
        hidden_sizes = hp.lstm_hidden_size
        model = IESTClassifier(
            num_classes,
            batch_size,
            embedding_matrix=embedding_matrix,
            char_embedding_matrix=char_embedding_matrix,
            pos_embedding_matrix=pos_embedding_matrix,
            word_encoding_method=hp.word_encoding_method,
            word_char_aggregation_method=hp.word_char_aggregation_method,
            sent_encoding_method=hp.model,
            hidden_sizes=hidden_sizes,
            use_cuda=CUDA,
            pooling_method=hp.pooling_method,
            batch_first=True,
            dropout=hp.dropout,
            lstm_layer_dropout=hp.lstm_layer_dropout,
            sent_enc_dropout=hp.sent_enc_dropout,
            sent_enc_layers=hp.sent_enc_layers)

    if CUDA:
        model.cuda()

    if hp.write_mode != 'NONE':
        logger.write_architecture(str(model))

    logger.write_current_run_details(str(model))

    if hp.model == 'transformer':
        core_optimizer = torch.optim.Adam(model.parameters(),
                                          lr=0,
                                          betas=(0.9, 0.98),
                                          eps=1e-9)
        transformer_scheduler = TransformerScheduler(
            1024, factor=1, warmup_steps=hp.warmup_iters)
        optimizer = ScheduledOptim(core_optimizer, transformer_scheduler)
    else:
        # optimizer = OptimWithDecay(model.parameters(),
        #                            method=hp.optim,
        #                            initial_lr=hp.learning_rate,
        #                            max_grad_norm=hp.grad_clipping,
        #                            lr_decay=hp.learning_rate_decay,
        #                            start_decay_at=hp.start_decay_at,
        #                            decay_every=hp.decay_every)

        core_optimizer = torch.optim.Adam(
            [param for param in model.parameters() if param.requires_grad],
            lr=0,
        )

        max_iter = corpus.train_batches.num_batches * hp.epochs
        slanted_triangular_scheduler = SlantedTriangularScheduler(
            max_iter, max_lr=hp.max_lr, cut_fraction=0.1, ratio=32)
        optimizer = ScheduledOptim(core_optimizer,
                                   slanted_triangular_scheduler)

    loss_function = torch.nn.CrossEntropyLoss()

    trainer = Trainer(model,
                      optimizer,
                      loss_function,
                      num_epochs=hp.epochs,
                      use_cuda=CUDA,
                      log_interval=hp.log_interval)

    #  FIXME: This test block of code looks ugly here <2018-06-29 11:41:51, Jorge Balazs>
    if hp.test:
        if hp.model_hash is None:
            raise RuntimeError(
                'You should have provided a pre-trained model hash with the '
                '--model_hash flag')

        print(f'Testing model {model_path}')
        eval_dict = trainer.evaluate(corpus.test_batches)

        probs = np_softmax(eval_dict['output'])
        probs_filepath = os.path.join(ext_experiment_path,
                                      'test_probabilities.csv')
        np.savetxt(probs_filepath, probs, delimiter=',', fmt='%.8f')
        print(f'Saved prediction probs in {probs_filepath}')

        labels_filepath = os.path.join(ext_experiment_path,
                                       'test_predictions.txt')
        labels = [label + '\n' for label in eval_dict['labels']]
        with open(labels_filepath, 'w', encoding='utf-8') as f:
            f.writelines(labels)
        print(f'Saved prediction file in {labels_filepath}')

        representations_filepath = os.path.join(
            ext_experiment_path, 'sentence_representations.txt')

        if hp.save_sent_reprs:
            with open(representations_filepath, 'w', encoding='utf-8') as f:
                np.savetxt(representations_filepath,
                           eval_dict['sent_reprs'],
                           delimiter=' ',
                           fmt='%.8f')
        exit()

    # Main Training Loop
    writer = None
    if hp.write_mode != 'NONE':
        writer = SummaryWriter(logger.run_savepath)
    try:
        best_accuracy = None
        for epoch in tqdm(range(hp.epochs), desc='Epoch'):
            total_loss = 0

            trainer.train_epoch(corpus.train_batches, epoch, writer)
            corpus.train_batches.shuffle_examples()
            eval_dict = trainer.evaluate(corpus.dev_batches, epoch, writer)

            if hp.update_learning_rate and hp.model != 'transformer':
                # hp.update_learning_rate is not supposed to be used with
                # scheduled learning rates
                optim_updated, new_lr = trainer.optimizer.updt_lr_accuracy(
                    epoch, eval_dict['accuracy'])

                #  TODO: lr_threshold shouldn't be hardcoded
                lr_threshold = 1e-5
                if new_lr < lr_threshold:
                    tqdm.write(f'Learning rate smaller than {lr_threshold}, '
                               f'stopping.')
                    break
                if optim_updated:
                    tqdm.write(f'Learning rate decayed to {new_lr}')

            accuracy = eval_dict['accuracy']
            if not best_accuracy or accuracy > best_accuracy:
                best_accuracy = accuracy
                logger.update_results({
                    'best_valid_acc': best_accuracy,
                    'best_epoch': epoch
                })

                if hp.write_mode != 'NONE':
                    probs = np_softmax(eval_dict['output'])
                    probs_filepath = os.path.join(
                        logger.run_savepath, 'best_dev_probabilities.csv')
                    np.savetxt(probs_filepath,
                               probs,
                               delimiter=',',
                               fmt='%.8f')

                    labels_filepath = os.path.join(logger.run_savepath,
                                                   'best_dev_predictions.txt')
                    labels = [label + '\n' for label in eval_dict['labels']]
                    with open(labels_filepath, 'w', encoding='utf-8') as f:
                        f.writelines(labels)

                if hp.save_model:
                    logger.torch_save_file('best_model_state_dict.pth',
                                           model.state_dict(),
                                           progress_bar=tqdm)
                    logger.torch_save_file('best_model.pth',
                                           model,
                                           progress_bar=tqdm)
    except KeyboardInterrupt:
        pass
    finally:
        if SAVE_IN_SPREADSHEET:
            logger.insert_in_googlesheets()
コード例 #8
0
ファイル: train_plus_homology.py プロジェクト: fivelike/PLUS
def main():
    set_seeds(2020)
    args = vars(parser.parse_args())

    alphabet = Protein()
    cfgs = []
    data_cfg = config.DataConfig(args["data_config"]);   cfgs.append(data_cfg)
    if args["lm_model_config"] is None:
        model_cfg = config.ModelConfig(args["model_config"], input_dim=len(alphabet), num_classes=5)
        cfgs += [model_cfg]
    else:
        lm_model_cfg = config.ModelConfig(args["lm_model_config"], idx="lm_model_config", input_dim=len(alphabet))
        model_cfg = config.ModelConfig(args["model_config"], input_dim=len(alphabet),
                                       lm_dim=lm_model_cfg.num_layers * lm_model_cfg.hidden_dim * 2, num_classes=5)
        cfgs += [model_cfg, lm_model_cfg]
    run_cfg = config.RunConfig(args["run_config"], sanity_check=args["sanity_check"]);  cfgs.append(run_cfg)
    output, save_prefix = set_output(args, "train_homology_log")
    os.environ['CUDA_VISIBLE_DEVICES'] = args["device"] if args["device"] is not None else ""
    device, data_parallel = torch.device("cuda" if torch.cuda.is_available() else "cpu"), torch.cuda.device_count() > 1
    config.print_configs(args, cfgs, device, output)
    flag_rnn = (model_cfg.model_type == "RNN")
    flag_lm_model = (args["lm_model_config"] is not None)
    flag_lm_loss = (run_cfg.lm_loss_lambda != -1)
    flag_cm_loss = (run_cfg.cm_loss_lambda != -1)

    ## load a train dataset
    start = Print(" ".join(['start loading a train dataset:', data_cfg.path["train"]]), output)
    dataset_train = homology.load_homology(data_cfg, "train", alphabet, flag_cm_loss, args["sanity_check"])
    dataset_train = dataset.Homology_dataset(*dataset_train, alphabet, run_cfg, flag_rnn, model_cfg.max_len)
    sampler = dataset.HomolgySampler(dataset_train.labels, run_cfg)
    collate_fn = dataset.collate_paired_sequences if flag_rnn else None
    iterator_train = torch.utils.data.DataLoader(dataset_train, run_cfg.batch_size_train, collate_fn=collate_fn, sampler=sampler)
    end = Print(" ".join(['loaded', str(int(np.sqrt(len(dataset_train)))), 'sequences']), output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## load a dev dataset
    start = Print(" ".join(['start loading a dev dataset:', data_cfg.path["devpairs"]]), output)
    dataset_test = homology.load_homology_pairs(data_cfg, "devpairs", alphabet, flag_cm_loss, args["sanity_check"])
    dataset_test = dataset.PairedHomology_dataset(*dataset_test, alphabet, run_cfg, flag_rnn, model_cfg.max_len)
    iterator_test = torch.utils.data.DataLoader(dataset_test, run_cfg.batch_size_eval, collate_fn=collate_fn)
    end = Print(" ".join(['loaded', str(len(dataset_test)), 'sequence pairs']), output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## initialize a model
    start = Print('start initializing a model', output)
    models_list = [] # list of lists [model, idx, flag_frz, flag_clip_grad, flag_clip_weight]
    ### model
    if not flag_rnn:                model = plus_tfm.PLUS_TFM(model_cfg)
    elif not flag_lm_model:         model = plus_rnn.PLUS_RNN(model_cfg)
    else:                           model = p_elmo.P_ELMo(model_cfg)
    models_list.append([model, "", False, flag_rnn, flag_rnn])
    ### lm_model
    if flag_lm_model:
        lm_model = p_elmo.P_ELMo_lm(lm_model_cfg)
        models_list.append([lm_model, "lm", True, False, False])
    ### cm_model
    if flag_cm_loss:
        cm_model = cnn.ConvNet2D(model_cfg.embedding_dim)
        models_list.append([cm_model, "cm", False, False, True])
    params = []
    for model, _, frz, _, _ in models_list:
        if not frz: params += [p for p in model.parameters() if p.requires_grad]
    load_models(args, models_list, device, data_parallel, output, tfm_cls=flag_rnn)
    get_loss = plus_rnn.get_loss if flag_rnn else plus_tfm.get_loss
    end = Print('end initializing a model', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## setup trainer configurations
    start = Print('start setting trainer configurations', output)
    optim = torch.optim.Adam(params, lr=run_cfg.learning_rate)
    tasks_list = [] # list of lists [idx, metrics_train, metrics_eval]
    tasks_list.append(["cls", [], ["acc", "r", "rho"]])
    if flag_lm_loss: tasks_list.append(["lm", [], ["acc"]])
    if flag_cm_loss: tasks_list.append(["cm", [], ["pr", "re", "f1"]])
    trainer = Trainer(models_list, get_loss, run_cfg, tasks_list, optim)
    trainer_args = {}
    trainer_args["data_parallel"] = data_parallel
    trainer_args["paired"] = True
    if   flag_rnn: trainer_args["evaluate_cls"] = plus_rnn.evaluate_homology
    else:          trainer_args["evaluate_cls"] = plus_tfm.evaluate_homology
    trainer_args["evaluate"] = ["cls", homology.evaluate_homology]
    end = Print('end setting trainer configurations', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## train a model
    start = Print('start training a model', output)
    Print(trainer.get_headline(), output)
    for epoch in range(run_cfg.num_epochs):
        ### train
        for B, batch in enumerate(iterator_train):
            batch = [t.to(device) if type(t) is torch.Tensor else t for t in batch]
            trainer.train(batch, trainer_args)
            if B % 10 == 0: print('# epoch [{}/{}] train {:.1%} loss={:.4f}'.format(
                epoch + 1, run_cfg.num_epochs, B / len(iterator_train), trainer.loss_train), end='\r', file=sys.stderr)
        print(' ' * 150, end='\r', file=sys.stderr)

        ### evaluate cls and cm
        dataset_test.set_augment(False)
        trainer.set_exec_flags(["cls", 'lm', "cm"], [True, False, flag_cm_loss])
        for b, batch in enumerate(iterator_test):
            batch = [t.to(device) if type(t) is torch.Tensor else t for t in batch]
            trainer.evaluate(batch, trainer_args)
            if b % 10 == 0: print('# cls {:.1%} loss={:.4f}'.format(
                b / len(iterator_test), trainer.loss_eval), end='\r', file=sys.stderr)
        print(' ' * 150, end='\r', file=sys.stderr)

        ### evaluate lm
        if flag_lm_loss:
            dataset_test.set_augment(True)
            trainer.set_exec_flags(["cls", 'lm', "cm"], [False, True, False])
            for b, batch in enumerate(iterator_test):
                batch = [t.to(device) if type(t) is torch.Tensor else t for t in batch]
                trainer.evaluate(batch, trainer_args)
                if b % 10 == 0: print('# lm {:.1%} loss={:.4f}'.format(
                    b / len(iterator_test), trainer.loss_eval), end='\r', file=sys.stderr)
            print(' ' * 150, end='\r', file=sys.stderr)

        ### print log and save models
        trainer.save(save_prefix)
        Print(trainer.get_log(epoch + 1, args=trainer_args), output)
        trainer.set_exec_flags(["cls", "lm", "cm"], [True, True, True])
        trainer.reset()
        if trainer.patience == 0: break

    end = Print('end training a model', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)
    output.close()
コード例 #9
0
ファイル: eval_plus_pfam.py プロジェクト: fivelike/PLUS
def main():
    set_seeds(2020)
    args = vars(parser.parse_args())

    alphabet = Protein()
    data_cfg = config.DataConfig(args["data_config"])
    model_cfg = config.ModelConfig(args["model_config"],
                                   input_dim=len(alphabet),
                                   num_classes=2)
    run_cfg = config.RunConfig(args["run_config"],
                               eval=True,
                               sanity_check=args["sanity_check"])
    output, save_prefix = set_output(args, "eval_pfam_log", test=True)
    os.environ['CUDA_VISIBLE_DEVICES'] = args["device"] if args[
        "device"] is not None else ""
    device, data_parallel = torch.device("cuda" if torch.cuda.is_available(
    ) else "cpu"), torch.cuda.device_count() > 1
    config.print_configs(args, [data_cfg, model_cfg, run_cfg], device, output)
    flag_rnn = (model_cfg.model_type == "RNN")
    flag_paired = ("testpairs" in data_cfg.path)

    ## load a test dataset
    start = Print(
        " ".join([
            'start loading a test dataset:',
            data_cfg.path["testpairs" if flag_paired else "test"]
        ]), output)
    if flag_paired:
        dataset_test = pfam.load_pfam_pairs(data_cfg, "testpairs", alphabet,
                                            args["sanity_check"])
        dataset_test = dataset.PairedPfam_dataset(*dataset_test, alphabet,
                                                  run_cfg, flag_rnn,
                                                  model_cfg.max_len)
    else:
        dataset_test = pfam.load_pfam(data_cfg, "test", alphabet,
                                      args["sanity_check"])
        dataset_test = dataset.Pfam_dataset(*dataset_test,
                                            alphabet,
                                            run_cfg,
                                            flag_rnn,
                                            model_cfg.max_len,
                                            random_pairing=flag_paired,
                                            sanity_check=args["sanity_check"])
    if flag_rnn and flag_paired: collate_fn = dataset.collate_paired_sequences
    elif flag_rnn: collate_fn = dataset.collate_sequences_pelmo
    else: collate_fn = None
    iterator_test = torch.utils.data.DataLoader(dataset_test,
                                                run_cfg.batch_size_eval,
                                                collate_fn=collate_fn)
    end = Print(
        " ".join(['loaded',
                  str(len(dataset_test)), 'sequence(pair)s']), output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## initialize a model
    start = Print('start initializing a model', output)
    models_list = [
    ]  # list of lists [model, idx, flag_frz, flag_clip_grad, flag_clip_weight]
    if not flag_rnn: model = plus_tfm.PLUS_TFM(model_cfg)
    elif model_cfg.rnn_type == "B": model = plus_rnn.PLUS_RNN(model_cfg)
    else: model = p_elmo.P_ELMo_lm(model_cfg)
    models_list.append([model, "", True, False, False])
    load_models(args, models_list, device, data_parallel, output)
    get_loss = plus_rnn.get_loss if flag_rnn else plus_tfm.get_loss
    end = Print('end initializing a model', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## setup trainer configurations
    start = Print('start setting trainer configurations', output)
    tasks_list = []  # list of lists [idx, metrics_train, metrics_eval]
    tasks_list.append(["lm", [], ["acc"]])
    if flag_paired: tasks_list.append(["cls", [], ["acc"]])
    trainer = Trainer(models_list, get_loss, run_cfg, tasks_list)
    trainer_args = {}
    trainer_args["data_parallel"] = data_parallel
    trainer_args["paired"] = flag_paired
    if flag_paired and flag_rnn:
        trainer_args["evaluate_cls"] = plus_rnn.evaluate_sfp
    elif flag_paired:
        trainer_args["evaluate_cls"] = plus_tfm.evaluate_sfp
    else:
        trainer_args["num_alphabets"] = len(alphabet)
    end = Print('end setting trainer configurations', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## evaluate a model
    start = Print('start evaluating a model', output)
    Print(trainer.get_headline(test=True), output)

    ### evaluate lm
    if flag_paired: dataset_test.set_augment(True)
    trainer.set_exec_flags(["lm", "cls"], [True, False])
    for b, batch in enumerate(iterator_test):
        batch = [t.to(device) if type(t) is torch.Tensor else t for t in batch]
        trainer.evaluate(batch, trainer_args)
        if b % 10 == 0:
            print('# lm {:.1%} loss={:.4f}'.format(b / len(iterator_test),
                                                   trainer.loss_eval),
                  end='\r',
                  file=sys.stderr)
    print(' ' * 150, end='\r', file=sys.stderr)

    ### evaluate cls
    if flag_paired:
        dataset_test.set_augment(False)
        trainer.set_exec_flags(["lm", "cls"], [False, True])
        for b, batch in enumerate(iterator_test):
            batch = [
                t.to(device) if type(t) is torch.Tensor else t for t in batch
            ]
            trainer.evaluate(batch, trainer_args)
            if b % 10 == 0:
                print('# cls {:.1%} loss={:.4f}'.format(
                    b / len(iterator_test), trainer.loss_eval),
                      end='\r',
                      file=sys.stderr)
        print(' ' * 150, end='\r', file=sys.stderr)

    Print(trainer.get_log(test_idx="Pfam", args=trainer_args), output)
    end = Print('end evaluating a model', output)
    Print("".join(['elapsed time:', str(end - start)]), output, newline=True)
    output.close()
コード例 #10
0
ファイル: train_model.py プロジェクト: mswzeus/TargetNet
def main():
    args = vars(parser.parse_args())
    check_args(args)
    set_seeds(2020)
    data_cfg = config.DataConfig(args["data_config"])
    model_cfg = config.ModelConfig(args["model_config"])
    run_cfg = config.RunConfig(args["run_config"], eval=False)
    output, save_prefix = set_output(args, "train_model_log")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    config.print_configs(args, [data_cfg, model_cfg, run_cfg], device, output)
    torch.zeros((1)).to(device)

    ## Loading datasets
    start = Print(" ".join(['start loading datasets']), output)
    dataset_train = get_dataset_from_configs(data_cfg, split_idx="train")
    dataset_val = get_dataset_from_configs(data_cfg, split_idx="val")
    iterator_train = torch.utils.data.DataLoader(dataset_train,
                                                 run_cfg.batch_size,
                                                 shuffle=True,
                                                 pin_memory=True,
                                                 num_workers=4)
    iterator_val = torch.utils.data.DataLoader(dataset_val,
                                               run_cfg.batch_size,
                                               shuffle=False,
                                               pin_memory=True,
                                               num_workers=4)
    end = Print(
        " ".join(['loaded',
                  str(len(dataset_train)), 'dataset_train samples']), output)
    end = Print(
        " ".join(['loaded',
                  str(len(dataset_val)), 'dataset_val samples']), output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## initialize a model
    start = Print('start initializing a model', output)
    model, params = get_model(model_cfg, data_cfg.with_esa,
                              run_cfg.dropout_rate)
    end = Print('end initializing a model', output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## setup trainer configurations
    start = Print('start setting trainer configurations', output)
    trainer = Trainer(model)
    trainer.set_device(device)
    trainer.set_optim_scheduler(run_cfg, params)
    end = Print('end setting trainer configurations', output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)

    ## train a model
    start = Print('start training a model', output)
    Print(trainer.get_headline(), output)
    for epoch in range(int(trainer.epoch), run_cfg.num_epochs):
        ### train
        for B, batch in enumerate(iterator_train):
            trainer.train(batch, device)
            if B % 5 == 0:
                print('# epoch [{}/{}] train {:.1%}'.format(
                    epoch + 1, run_cfg.num_epochs, B / len(iterator_train)),
                      end='\r',
                      file=sys.stderr)
        print(' ' * 150, end='\r', file=sys.stderr)

        ### validation
        for B, batch in enumerate(iterator_val):
            trainer.evaluate(batch, device)
            if B % 5 == 0:
                print('# epoch [{}/{}] val {:.1%}'.format(
                    epoch + 1, run_cfg.num_epochs, B / len(iterator_val)),
                      end='\r',
                      file=sys.stderr)
        print(' ' * 150, end='\r', file=sys.stderr)

        ### print log and save models
        trainer.epoch += 1
        trainer.scheduler_step()
        trainer.save_model(save_prefix)
        trainer.log("val", output)

    end = Print('end training a model', output)
    Print(" ".join(['elapsed time:', str(end - start)]), output, newline=True)
    if not output == sys.stdout: output.close()
コード例 #11
0
ファイル: run.py プロジェクト: epochx/elmo4irony
def main():

    hp = arg_parser.parse_args()


    logger = Logger(hp, model_name='elmo4irony', write_mode=hp.write_mode)
    if hp.write_mode != 'NONE':
        logger.write_hyperparams()

    torch.manual_seed(hp.seed)
    torch.cuda.manual_seed_all(hp.seed)  # silently ignored if there are no GPUs

    CUDA = False
    if torch.cuda.is_available() and not hp.no_cuda:
        CUDA = True

    USE_POS = False

    corpus = ClassificationCorpus(config.corpora_dict, hp.corpus,
                                  force_reload=hp.force_reload,
                                  train_data_proportion=hp.train_data_proportion,
                                  dev_data_proportion=hp.dev_data_proportion,
                                  batch_size=hp.batch_size,
                                  lowercase=hp.lowercase,
                                  use_pos=USE_POS)

    if hp.model_hash:
        experiment_path = os.path.join(config.RESULTS_PATH, hp.model_hash + '*')
        ext_experiment_path = glob(experiment_path)
        assert len(ext_experiment_path) == 1, 'Try providing a longer model hash'
        ext_experiment_path = ext_experiment_path[0]
        model_path = os.path.join(ext_experiment_path, 'best_model.pth')
        model = torch.load(model_path)

    else:
        # Define some specific parameters for the model
        num_classes = len(corpus.label2id)
        batch_size = corpus.train_batches.batch_size
        hidden_sizes = hp.lstm_hidden_size
        model = Classifier(num_classes,
                           batch_size,
                           hidden_sizes=hidden_sizes,
                           use_cuda=CUDA,
                           pooling_method=hp.pooling_method,
                           batch_first=True,
                           dropout=hp.dropout,
                           sent_enc_dropout=hp.sent_enc_dropout,
                           sent_enc_layers=hp.sent_enc_layers)

    if CUDA:
        model.cuda()

    if hp.write_mode != 'NONE':
        logger.write_architecture(str(model))

    logger.write_current_run_details(str(model))

    optimizer = OptimWithDecay(model.parameters(),
                               method=hp.optim,
                               initial_lr=hp.learning_rate,
                               max_grad_norm=hp.grad_clipping,
                               lr_decay=hp.learning_rate_decay,
                               start_decay_at=hp.start_decay_at,
                               decay_every=hp.decay_every)


    loss_function = torch.nn.CrossEntropyLoss()

    trainer = Trainer(model, optimizer, loss_function, num_epochs=hp.epochs,
                      use_cuda=CUDA, log_interval=hp.log_interval)

    if hp.test:
        if hp.model_hash is None:
            raise RuntimeError(
                'You should have provided a pre-trained model hash with the '
                '--model_hash flag'
            )

        print(f'Testing model {model_path}')
        eval_dict = trainer.evaluate(corpus.test_batches)

        probs = np_softmax(eval_dict['output'])
        probs_filepath = os.path.join(ext_experiment_path,
                                      'test_probs.csv')
        np.savetxt(probs_filepath, probs,
                   delimiter=',', fmt='%.8f')
        print(f'Saved prediction probs in {probs_filepath}')

        labels_filepath = os.path.join(ext_experiment_path,
                                       'predictions.txt')
        labels = [label + '\n' for label in eval_dict['labels']]
        with open(labels_filepath, 'w', encoding='utf-8') as f:
            f.writelines(labels)
        print(f'Saved prediction file in {labels_filepath}')

        # We know the last segment of ext_experiment_path corresponds to the
        # model hash
        full_model_hash = os.path.basename(ext_experiment_path)

        # WARNING: We are accessing an internal method of the Logger class;
        # This could corrupt the results database if not used properly
        logger._update_in_db({'test_acc': eval_dict['accuracy'],
                              'test_f1': eval_dict['f1'],
                              'test_p': eval_dict['precision'],
                              'test_r': eval_dict['recall']},
                             experiment_hash=full_model_hash)

        exit()

    writer = None
    if hp.write_mode != 'NONE':
        writer = SummaryWriter(logger.run_savepath)
    try:
        best_accuracy = None
        for epoch in tqdm(range(hp.epochs), desc='Epoch'):
            total_loss = 0

            trainer.train_epoch(corpus.train_batches, epoch, writer)
            corpus.train_batches.shuffle_examples()
            eval_dict = trainer.evaluate(corpus.dev_batches, epoch, writer)

            if hp.training_schedule == 'decay':
                optim_updated, new_lr = trainer.optimizer.updt_lr_accuracy(epoch, eval_dict['accuracy'])
                lr_threshold = 1e-5
                if new_lr < lr_threshold:
                    tqdm.write(f'Learning rate smaller than {lr_threshold}, '
                               f'stopping.')
                    break
                if optim_updated:
                    tqdm.write(f'Learning rate decayed to {new_lr}')

            if hp.training_schedule == 'decay-nie':
                optim_updated, new_lr = trainer.optimizer.update_learning_rate_nie(epoch)
                if optim_updated:
                    tqdm.write(f'Learning rate decayed to {new_lr}')

            accuracy = eval_dict['accuracy']
            if not best_accuracy or accuracy > best_accuracy:
                best_accuracy = accuracy
                logger.update_results({'best_valid_acc': best_accuracy,
                                       'best_epoch': epoch})

                if hp.write_mode != 'NONE':
                    probs = np_softmax(eval_dict['output'])
                    probs_filepath = os.path.join(logger.run_savepath,
                                                  'best_eval_probs.csv')
                    np.savetxt(probs_filepath, probs,
                               delimiter=',', fmt='%.8f')

                    labels_filepath = os.path.join(logger.run_savepath,
                                                   'predictions_dev.txt')
                    labels = [label + '\n' for label in eval_dict['labels']]
                    with open(labels_filepath, 'w', encoding='utf-8') as f:
                        f.writelines(labels)

                if hp.save_model:
                    logger.torch_save_file('best_model_state_dict.pth',
                                           model.state_dict(),
                                           progress_bar=tqdm)
                    logger.torch_save_file('best_model.pth',
                                           model,
                                           progress_bar=tqdm)

    except KeyboardInterrupt:
        pass