コード例 #1
0
ファイル: train.py プロジェクト: LittleArden/RoBERTaABSA
    def run(self, repeats=5):
        # Loss and Optimizer
        criterion = nn.CrossEntropyLoss()

        accs = []
        f1s = []
        # max_test_acc_avg = 0
        # max_f1_avg = 0
        for i in range(1):
            print("repeat: ", i)
            self._reset_params()
            _params = filter(lambda p: p.requires_grad,
                             self.model.parameters())
            optimizer = self.opt.optimizer(_params,
                                           lr=self.opt.learning_rate,
                                           weight_decay=self.opt.l2reg)
            max_test_acc, max_f1 = self._train(criterion, optimizer)
            print("max_test_acc: {0}     max_f1: {1}".format(
                max_test_acc, max_f1))
            accs.append(max_test_acc)
            f1s.append(max_f1)
            # max_test_acc_avg += max_test_acc
            # max_f1_avg += max_f1
            print("#" * 100)
            fitlog.finish()
        print(accs)
        print("max_test_acc_avg:{:.4f}/{:.4f}".format(np.mean(accs),
                                                      np.std(accs)))
        print(f1s)
        print("max_f1_avg:{:.4f}/{:.4f}".format(np.mean(f1s), np.std(f1s)))
コード例 #2
0
def postprocess(args: argparse.Namespace, start: float):
    exe_time = time.time() - start
    print('Executing time: %dh:%dm:%ds.' %
          (exe_time // 3600, (exe_time // 60) % 60, exe_time % 60))
    fitlog.finish()
    args.visual_logger.close()
    print('training ends.')
コード例 #3
0
def main():
    from config import get_config

    C, logger = get_config()

    #----- prepare data and some global variables -----
    data_train, data_test, data_valid, relations, rel_weights = load_data(
        C, logger)

    n_rel_typs, loss_func, generator = initialize(C, logger, relations,
                                                  rel_weights)
    #----- train & test -----
    trained_models = []
    for i in range(C.ensemble_size):
        model, best_valid = train(
            C,
            logger,
            data_train,
            data_valid,
            loss_func,
            generator,
            n_rel_typs,
            run_name=str(i),
            test_data=data_test,
        )

        if hasattr(model, "module"):  #dataparallel
            model = model.module

        model = model.cpu()
        trained_models.append(model)

    #----- ensemble test -----
    micro_f1, macro_f1, loss = test(
        C,
        logger,
        data_test,
        trained_models,
        loss_func,
        generator,
        mode="test",
        epoch_id=C.epoch_numb,
        run_name='final',
    )
    fitlog.add_hyper("t%.4f v%.4f" % (macro_f1, best_valid), name="result")

    #----- save ensembled model -----
    if C.model_save:
        with open(C.model_save, "wb") as fil:
            pickle.dump(trained_models, fil)
    logger.log("final model saved at %s" % C.model_save)

    #----- finish -----
    fitlog.finish()
コード例 #4
0
    def run(self, repeats=1):
        # Loss and Optimizer
        criterion = nn.CrossEntropyLoss()

        max_test_acc_avg = 0
        max_test_f1_avg = 0
        for i in range(repeats):
            print("repeat: ", (i + 1))
            self._reset_params()
            _params = filter(lambda p: p.requires_grad,
                             self.model.parameters())
            optimizer = self.opt.optimizer(_params,
                                           lr=self.opt.learning_rate,
                                           weight_decay=self.opt.l2reg)
            max_test_acc, max_test_f1 = self._train(criterion, optimizer)
            print("max_test_acc: {0}     max_test_f1: {1}".format(
                max_test_acc, max_test_f1))
            max_test_acc_avg += max_test_acc
            max_test_f1_avg += max_test_f1
            print("#" * 100)
            fitlog.finish()
        print("max_test_acc_avg:", max_test_acc_avg / repeats)
        print("max_test_f1_avg:", max_test_f1_avg / repeats)
コード例 #5
0
ファイル: callback.py プロジェクト: zhdbwe/fastNLP
 def on_exception(self, exception):
     fitlog.finish(status=1)
     if self._log_exception:
         fitlog.add_other(repr(exception), name='except_info')
コード例 #6
0
ファイル: callback.py プロジェクト: zhdbwe/fastNLP
 def on_train_end(self):
     fitlog.finish()
コード例 #7
0
dataset.set_input(Const.INPUT, Const.INPUT_LEN)
dataset.set_target(Const.TARGET)

testset.rename_field('words', Const.INPUT)
testset.rename_field('target', Const.TARGET)
testset.rename_field('seq_len', Const.INPUT_LEN)
testset.set_input(Const.INPUT, Const.INPUT_LEN)
testset.set_target(Const.TARGET)

train_data, dev_data = dataset.split(0.1)

loss = CrossEntropyLoss(pred=Const.OUTPUT, target=Const.TARGET)
metrics = AccuracyMetric(pred=Const.OUTPUT, target=Const.TARGET)
trainer = Trainer(model=model,
                  train_data=train_data,
                  dev_data=dev_data,
                  loss=loss,
                  batch_size=16,
                  metrics=metrics,
                  n_epochs=20,
                  callbacks=[FitlogCallback(dataset)])
trainer.train()

tester = Tester(data=testset, model=model, metrics=metrics)
tester.test()

tester = Tester(data=train_data, model=model, metrics=metrics)
tester.test()

fitlog.finish()
コード例 #8
0
ファイル: train.py プロジェクト: jeou/MXNetSeg
            fitlog.add_hyper(value=str(v), name=str(k))
    if 'dilate' not in hyper_dict.keys():
        fitlog.add_hyper(value='-', name='dilate')
    fitlog.add_other(value=platform.system(), name='platform')


if __name__ == '__main__':

    # parse args
    args = parse_args()

    # get contexts
    ctx = get_contexts(args.ctx)

    # get initialized model by model name
    model, conf = get_model_by_name(args.model, ctx)
    model.hybridize()

    # record hyper-parameters
    record_hyper_params(conf)

    # training
    try:
        my_fitter = Fitter(model, conf, ctx, val_per_epoch=args.val)
        my_fitter.log()  # log to file
        my_fitter.fit()
        fitlog.finish()
    except Exception as e:
        fitlog.finish(status=1)
        print(e)
コード例 #9
0
ファイル: main.py プロジェクト: irfan11111111/Flat-ner
import fitlog

fitlog.commit(__file__)  # auto commit your codes
fitlog.add_hyper_in_file(__file__)  # record your hyperparameters
"""
Your training code here, you may use these functions to log your result:
    fitlog.add_hyper()
    fitlog.add_loss()
    fitlog.add_metric()
    fitlog.add_best_metric()
    ......
"""

fitlog.finish()  # finish the logging
コード例 #10
0
ファイル: run.py プロジェクト: LittleArden/RoBERTaABSA
def main():
    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )

    # Parse args
    args = parse_args()
    if args.dataset_name.endswith("/"):
        args.dataset_name = args.dataset_name[:-1]
    dataset_name = args.dataset_name
    # 形如 ~/rgat/bert/11/Restaurants
    if "/" in dataset_name:
        pre_model_name, layer, dataset = dataset_name.split("/")[-3:]
    else:
        pre_model_name, dataset = "None", dataset_name
        layer = "-1"
    fitlog.add_hyper(value=pre_model_name, name="model_name")
    fitlog.add_hyper(value=dataset, name="dataset")
    fitlog.add_hyper(value=layer, name="pre_layer")
    fitlog.add_hyper(value="RGAT", name="model")

    # if 'Laptop' in args.dataset_name:
    #     assert args.lower == 0
    check_args(args)

    # Setup CUDA, GPU training
    # os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_id
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    args.device = device
    logger.info("Device is %s", args.device)

    # Bert, load pretrained model and tokenizer, check if neccesary to put bert here
    if args.embedding_type == "bert":
        tokenizer = BertTokenizer.from_pretrained(args.bert_model_dir)
        args.tokenizer = tokenizer
    elif args.embedding_type == "roberta":
        tokenizer = RobertaTokenizer.from_pretrained(args.bert_model_dir)
        args.tokenizer = tokenizer

    # Load datasets and vocabs
    (
        train_dataset,
        test_dataset,
        word_vocab,
        dep_tag_vocab,
        pos_tag_vocab,
    ) = load_datasets_and_vocabs(args)

    # Build Model
    # model = Aspect_Text_Multi_Syntax_Encoding(args, dep_tag_vocab['len'], pos_tag_vocab['len'])
    if args.pure_bert:
        model = Pure_Bert(args)
    elif args.gat_roberta:
        model = Aspect_Roberta_GAT(args, dep_tag_vocab["len"],
                                   pos_tag_vocab["len"])
    elif args.gat_bert:
        model = Aspect_Bert_GAT(args, dep_tag_vocab["len"],
                                pos_tag_vocab["len"])  # R-GAT + Bert
    elif args.gat_our:
        model = Aspect_Text_GAT_ours(
            args, dep_tag_vocab["len"],
            pos_tag_vocab["len"])  # R-GAT with reshaped tree
    else:
        model = Aspect_Text_GAT_only(
            args, dep_tag_vocab["len"],
            pos_tag_vocab["len"])  # original GAT with reshaped tree

    model.to(args.device)
    # Train
    _, _, all_eval_results = train(args, train_dataset, model, test_dataset)

    print("\n\nBest Results:")
    if len(all_eval_results):
        best_eval_result = max(all_eval_results, key=lambda x: x["acc"])
        step = [
            i for i, result in enumerate(all_eval_results)
            if result == best_eval_result
        ][0]
        logger.info("Achieve at step {}/{}".format(step,
                                                   len(all_eval_results)))
        for key in sorted(best_eval_result.keys()):
            logger.info("  %s = %s", key, str(best_eval_result[key]))
        # fitlog.add_best_metric(value=best_eval_result['acc'], name='acc')
        # fitlog.add_best_metric(value=best_eval_result['f1'], name='f1')
    fitlog.finish()
コード例 #11
0
ファイル: run.py プロジェクト: ChaokunChang/KDD-CUP-2014
def predict(args):
    text_data = TextData()
    with open(os.path.join(args.vocab_dir, args.vocab_data), 'rb') as fin:
        text_data = pickle.load(fin)
    vocab_size = text_data.vocab_size
    class_num = text_data.class_num
    # class_num = 1
    seq_len = text_data.max_seq_len
    print("(vocab_size,class_num,seq_len):({0},{1},{2})".format(
        vocab_size, class_num, seq_len))

    test_data = text_data.test_set
    test_data.set_input('words', 'seq_len')
    test_data.set_target('target')
    test_size = test_data.get_length()
    print("test_size:{}".format(test_size))
    print("Data type:{}".format(type(test_data)))

    init_embeds = None
    model_save_path = os.path.join(args.model_dir, args.model,
                                   args.model_suffix, args.reload_model_name)
    print("Loading the model {}".format(model_save_path))
    model = torch.load(model_save_path)
    model.eval()
    print(model)
    if args.cuda:
        device = torch.device('cuda')
    else:
        device = None
    model.to(device)
    acc = 0.0
    output = []
    data_iterator = Batch(test_data, batch_size=args.batch_size)
    for data_x, batch_y in data_iterator:
        i_data = Variable(data_x['words']).cuda()
        pred = model(i_data)[C.OUTPUT]
        pred = pred.sigmoid()
        # print(pred.shape)
        output.append(pred.cpu().data)
    output = torch.cat(output, 0).numpy()
    print(output.shape)
    print("Predict Done.{} records".format(len(output) * args.batch_size))
    result_save_path = os.path.join(
        args.result_dir,
        args.model + args.model_suffix + args.reload_model_name)
    with open(result_save_path + ".pkl", 'wb') as f:
        pickle.dump(output, f)
    output = output.squeeze()[:, 1].tolist()
    projectid = text_data.test_projectid.values
    answers = []
    count = 0
    for i in range(len(output)):
        if output[i] > 0.5:
            count += 1
    print("pc1 < 0.5 count:{}".format(count))
    for i in range(len(projectid) - len(output)):
        output.append([0.87])

    df = pd.DataFrame()
    df['projectid'] = projectid
    df['y'] = output
    df.to_csv(result_save_path + ".csv", index=False)
    print("Predict Done, results saved to {}".format(result_save_path))
    # with open(result_save_path,'w') as f:

    #     for i in output:
    #         f.write()
    fitlog.finish()
コード例 #12
0
ファイル: run.py プロジェクト: ChaokunChang/KDD-CUP-2014
def train(args):
    text_data = TextData()
    with open(os.path.join(args.vocab_dir, args.vocab_data), 'rb') as fin:
        text_data = pickle.load(fin)
    vocab_size = text_data.vocab_size
    class_num = text_data.class_num
    # class_num = 1
    seq_len = text_data.max_seq_len
    print("(vocab_size,class_num,seq_len):({0},{1},{2})".format(
        vocab_size, class_num, seq_len))

    train_data = text_data.train_set
    val_data = text_data.val_set
    test_data = text_data.test_set
    train_data.set_input('words', 'seq_len')
    train_data.set_target('target')
    val_data.set_input('words', 'seq_len')
    val_data.set_target('target')

    test_data.set_input('words', 'seq_len')
    test_data.set_target('target')

    init_embeds = None
    if args.pretrain_model == "None":
        print("No pretrained model with be used.")
        print("vocabsize:{0}".format(vocab_size))
        init_embeds = (vocab_size, args.embed_size)
    elif args.pretrain_model == "word2vec":
        embeds_path = os.path.join(args.prepare_dir, 'w2v_embeds.pkl')
        print("Loading Word2Vec pretrained embedding from {0}.".format(
            embeds_path))
        with open(embeds_path, 'rb') as fin:
            init_embeds = pickle.load(fin)
    elif args.pretrain_model == 'glove':
        embeds_path = os.path.join(args.prepare_dir, 'glove_embeds.pkl')
        print(
            "Loading Glove pretrained embedding from {0}.".format(embeds_path))
        with open(embeds_path, 'rb') as fin:
            init_embeds = pickle.load(fin)
    elif args.pretrain_model == 'glove2wv':
        embeds_path = os.path.join(args.prepare_dir, 'glove2wv_embeds.pkl')
        print(
            "Loading Glove pretrained embedding from {0}.".format(embeds_path))
        with open(embeds_path, 'rb') as fin:
            init_embeds = pickle.load(fin)
    else:
        init_embeds = (vocab_size, args.embed_size)

    if args.model == "CNNText":
        print("Using CNN Model.")
        model = CNNText(init_embeds,
                        num_classes=class_num,
                        padding=2,
                        dropout=args.dropout)
    elif args.model == "StarTransformer":
        print("Using StarTransformer Model.")
        model = STSeqCls(init_embeds,
                         num_cls=class_num,
                         hidden_size=args.hidden_size)
    elif args.model == "MyCNNText":
        model = MyCNNText(init_embeds=init_embeds,
                          num_classes=class_num,
                          padding=2,
                          dropout=args.dropout)
        print("Using user defined CNNText")
    elif args.model == "LSTMText":
        print("Using LSTM Model.")
        model = LSTMText(init_embeds=init_embeds,
                         output_dim=class_num,
                         hidden_dim=args.hidden_size,
                         num_layers=args.num_layers,
                         dropout=args.dropout)
    elif args.model == "Bert":
        print("Using Bert Model.")
    else:
        print("Using default model: CNNText.")
        model = CNNText((vocab_size, args.embed_size),
                        num_classes=class_num,
                        padding=2,
                        dropout=0.1)
    print(model)
    if args.cuda:
        device = torch.device('cuda')
    else:
        device = None

    print("train_size:{0} ; val_size:{1} ; test_size:{2}".format(
        train_data.get_length(), val_data.get_length(),
        test_data.get_length()))

    if args.optim == "Adam":
        print("Using Adam as optimizer.")
        optimizer = fastnlp_optim.Adam(lr=0.001,
                                       weight_decay=args.weight_decay)
        if (args.model_suffix == "default"):
            args.model_suffix == args.optim
    else:
        print("No Optimizer will be used.")
        optimizer = None

    criterion = CrossEntropyLoss()
    metric = AccuracyMetric()
    model_save_path = os.path.join(args.model_dir, args.model,
                                   args.model_suffix)
    earlystop = EarlyStopCallback(args.patience)
    fitlog_back = FitlogCallback({"val": val_data, "train": train_data})
    trainer = Trainer(train_data=train_data,
                      model=model,
                      save_path=model_save_path,
                      device=device,
                      n_epochs=args.epochs,
                      optimizer=optimizer,
                      dev_data=val_data,
                      loss=criterion,
                      batch_size=args.batch_size,
                      metrics=metric,
                      callbacks=[fitlog_back, earlystop])
    trainer.train()
    print("Train Done.")

    tester = Tester(data=val_data,
                    model=model,
                    metrics=metric,
                    batch_size=args.batch_size,
                    device=device)
    tester.test()
    print("Test Done.")

    print("Predict the answer with best model...")
    acc = 0.0
    output = []
    data_iterator = Batch(test_data, batch_size=args.batch_size)
    for data_x, batch_y in data_iterator:
        i_data = Variable(data_x['words']).cuda()
        pred = model(i_data)[C.OUTPUT]
        pred = pred.sigmoid()
        # print(pred.shape)
        output.append(pred.cpu().data)
    output = torch.cat(output, 0).numpy()
    print(output.shape)
    print("Predict Done. {} records".format(len(output)))
    result_save_path = os.path.join(args.result_dir,
                                    args.model + "_" + args.model_suffix)
    with open(result_save_path + ".pkl", 'wb') as f:
        pickle.dump(output, f)
    output = output.squeeze()[:, 1].tolist()
    projectid = text_data.test_projectid.values
    answers = []
    count = 0
    for i in range(len(output)):
        if output[i] > 0.5:
            count += 1
    print("true sample count:{}".format(count))
    add_count = 0
    for i in range(len(projectid) - len(output)):
        output.append([0.13])
        add_count += 1
    print("Add {} default result in predict.".format(add_count))

    df = pd.DataFrame()
    df['projectid'] = projectid
    df['y'] = output
    df.to_csv(result_save_path + ".csv", index=False)
    print("Predict Done, results saved to {}".format(result_save_path))

    fitlog.finish()
コード例 #13
0
def train(args):
    text_data = TextData()
    with open(os.path.join(args.vocab_dir, args.vocab_data), 'rb') as fin:
        text_data = pickle.load(fin)
    vocab_size = text_data.vocab_size
    class_num = text_data.class_num
    seq_len = text_data.max_seq_len
    print("(vocab_size,class_num,seq_len):({0},{1},{2})".format(
        vocab_size, class_num, seq_len))

    train_data = text_data.train_set
    test_dev_data = text_data.test_set
    train_data.set_input('words', 'seq_len')
    train_data.set_target('target')
    test_dev_data.set_input('words', 'seq_len')
    test_dev_data.set_target('target')
    test_data, dev_data = test_dev_data.split(0.2)

    test_data = test_dev_data
    init_embeds = None
    if args.pretrain_model == "None":
        print("No pretrained model with be used.")
        print("vocabsize:{0}".format(vocab_size))
        init_embeds = (vocab_size, args.embed_size)
    elif args.pretrain_model == "word2vec":
        embeds_path = os.path.join(args.prepare_dir, 'w2v_embeds.pkl')
        print("Loading Word2Vec pretrained embedding from {0}.".format(
            embeds_path))
        with open(embeds_path, 'rb') as fin:
            init_embeds = pickle.load(fin)
    elif args.pretrain_model == 'glove':
        embeds_path = os.path.join(args.prepare_dir, 'glove_embeds.pkl')
        print(
            "Loading Glove pretrained embedding from {0}.".format(embeds_path))
        with open(embeds_path, 'rb') as fin:
            init_embeds = pickle.load(fin)
    elif args.pretrain_model == 'glove2wv':
        embeds_path = os.path.join(args.prepare_dir, 'glove2wv_embeds.pkl')
        print(
            "Loading Glove pretrained embedding from {0}.".format(embeds_path))
        with open(embeds_path, 'rb') as fin:
            init_embeds = pickle.load(fin)
    else:
        init_embeds = (vocab_size, args.embed_size)

    if args.model == "CNNText":
        print("Using CNN Model.")
        model = CNNText(init_embeds,
                        num_classes=class_num,
                        padding=2,
                        dropout=args.dropout)
    elif args.model == "StarTransformer":
        print("Using StarTransformer Model.")
        model = STSeqCls(init_embeds,
                         num_cls=class_num,
                         hidden_size=args.hidden_size)
    elif args.model == "MyCNNText":
        model = MyCNNText(init_embeds=init_embeds,
                          num_classes=class_num,
                          padding=2,
                          dropout=args.dropout)
        print("Using user defined CNNText")
    elif args.model == "LSTMText":
        print("Using LSTM Model.")
        model = LSTMText(init_embeds=init_embeds,
                         output_dim=class_num,
                         hidden_dim=args.hidden_size,
                         num_layers=args.num_layers,
                         dropout=args.dropout)
    elif args.model == "Bert":
        print("Using Bert Model.")
    else:
        print("Using default model: CNNText.")
        model = CNNText((vocab_size, args.embed_size),
                        num_classes=class_num,
                        padding=2,
                        dropout=0.1)
    print(model)
    if args.cuda:
        device = torch.device('cuda')
    else:
        device = None

    print("train_size:{0} ; dev_size:{1} ; test_size:{2}".format(
        train_data.get_length(), dev_data.get_length(),
        test_data.get_length()))

    if args.optim == "Adam":
        print("Using Adam as optimizer.")
        optimizer = fastnlp_optim.Adam(lr=0.001,
                                       weight_decay=args.weight_decay)
        if (args.model_suffix == "default"):
            args.model_suffix == args.optim
    else:
        print("No Optimizer will be used.")
        optimizer = None

    criterion = CrossEntropyLoss()
    metric = AccuracyMetric()
    model_save_path = os.path.join(args.model_dir, args.model,
                                   args.model_suffix)
    earlystop = EarlyStopCallback(args.patience)
    trainer = Trainer(train_data=train_data,
                      model=model,
                      save_path=model_save_path,
                      device=device,
                      n_epochs=args.epochs,
                      optimizer=optimizer,
                      dev_data=test_data,
                      loss=criterion,
                      batch_size=args.batch_size,
                      metrics=metric,
                      callbacks=[FitlogCallback(test_data), earlystop])
    trainer.train()
    print("Train Done.")

    tester = Tester(data=test_data,
                    model=model,
                    metrics=metric,
                    batch_size=args.batch_size,
                    device=device)
    tester.test()
    print("Test Done.")
    fitlog.finish()