Esempio n. 1
0
def main(args):
    # create dataloader
    data_loader = DataLoaderFactory()

    train_data_loader = data_loader.get_input_for_ccf(file_path,
                                                      batch_size,
                                                      max_seq_length,
                                                      shuffle,
                                                      drop_last)

    test_data_loader = data_loader.get_input_for_ccf(file_path,
                                                      batch_size,
                                                      max_seq_length,
                                                      shuffle,
                                                      drop_last)

    dev_data_loader = data_loader.get_input_for_ccf(file_path,
                                                      batch_size,
                                                      max_seq_length,
                                                      shuffle,
                                                      drop_last)

    # 设置task model
    task_model = TaskModel(num_labels=len(args.label_to_id), dropout_prob=args.dropout_prob,
                           bret_pretrainded_path=args.bert_pretrain_path)

    # 重新加载模型参数
    # print("从test_model.bin 加载参数")
    # task_model.load_state_dict(torch.load(os.path.join(os.path.dirname(curr_path), "model_save", "model_89.15.bin"),
    #                                       "cuda" if torch.cuda.is_available() else None))

    # 设置优化器
    optimizer = get_optim(task_model.parameters(), args)

    # print config
    print("args", args)

    # 开始模型训练
    cls_app = ModelRunner(task_type="cls", is_bert=False, label_to_id=args.label_to_id)

    cls_app.train(total_step=args.total_step, eval_per_step=args.eval_per_step, task_model=task_model,
                  model_save_path=args.model_save_path, optimizer=optimizer,
                  train_data_loader=train_data_loader, dev_data_loader=dev_data_loader,
                  eval_label_list=list(args.label_to_id.values()), compare_param="f1",
                  eval_file_save_name=args.eval_file_save_name)

    cls_app.predict_for_ccf(dataiter=dev_data_loader, model=task_model,
                                    save_file_path="test_predict_out.txt",
                                    model_path=args.model_save_path, load_from_onnx=False)

    print("模型运行完成")
Esempio n. 2
0
def main(argv):
    data_loader = BaseLoader.get_loader_from_flags(FLAGS.data_set)
    train_set, valid_set, test_set = data_loader.load_dataset(FLAGS.num_steps, FLAGS.shuffle_train)
    model = DualStageRNN(encoder_dim=FLAGS.encoder_dim,
                         decoder_dim=FLAGS.decoder_dim,
                         num_steps=FLAGS.num_steps,
                         num_series=data_loader.num_series,
                         use_cur_exg=FLAGS.use_cur_exg)
    save_path = 'data/data_nasdaq'
    model_runner = ModelRunner(model, data_loader.label_scaler, FLAGS,save_path)

    model_runner.train(train_set, valid_set, test_set, FLAGS.max_epoch)
    # model_runner.restore('logs/ModelWrapper/lr-0.001_encoder-32_decoder-32/20190922-125838/saved_model')
    model_runner.evaluate(test_set, plot=FLAGS.plot_prediction)

    return
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser(description="-----[IMDB-classifier]-----")
    parser.add_argument("--sample",
                        default=False,
                        action='store_true',
                        help="flag whether use sample dataset")
    parser.add_argument(
        "--mode",
        default="train",
        help="train: train (with test) a model / test: test saved models")
    parser.add_argument("--model",
                        default="simple-gru",
                        help="available models: simple-gru, ...")
    parser.add_argument("--epoch",
                        default=10,
                        type=int,
                        help="number of max epoch")
    parser.add_argument("--learning_rate",
                        default=0.001,
                        type=float,
                        help="learning rate")
    parser.add_argument("--batch_size",
                        default=32,
                        type=int,
                        help="batch size")

    options = parser.parse_args()

    params = {
        'sample': options.sample,
        'model': options.model,
        'mode': options.mode,
        'batch_size': options.batch_size,
        'epoch': options.epoch,
        'learning_rate': options.learning_rate
    }

    modelRunner = ModelRunner(params)
    if options.mode == 'train':
        print("=" * 20 + "TRAINING STARTED" + "=" * 20)
        modelRunner.train()
    elif options.mode == 'test':
        print("=" * 20 + "TESTING STARTED" + "=" * 20)
        modelRunner.load_model()
        modelRunner.test()