def test_run(self): os.system('ls -l') os.system('ls -l tests') # Fool the demo data with the test data # Because running the full demo on travis is not possible if not os.path.exists('tests/slt_arctic_merlin_full'): os.symlink('slt_arctic_merlin_test', 'tests/slt_arctic_merlin_full') os.listdir('tests/slt_arctic_merlin_full') os.system('ls -l tests/slt_arctic_merlin_full') import run print('Overwrite the configuration to run a smoke test') run.cfg.id_valid_start = 8 run.cfg.id_valid_nb = 1 run.cfg.id_test_nb = 1 run.cfg.train_batch_size = 2 run.cfg.train_min_nbepochs = 1 run.cfg.train_max_nbepochs = 5 run.cfg.train_cancel_nodecepochs = 2 run.cfg.model_hiddensize = 4 run.cfg.model_nbprelayers = 1 run.cfg.model_nbcnnlayers = 1 run.cfg.model_nbfilters = 2 run.cfg.model_spec_freqlen = 3 run.cfg.model_nm_freqlen = 3 run.cfg.model_windur = 0.020 run.cfg.print_content() run.features_extraction() run.contexts_extraction() run.training(cont=False) run.generate('model-last.pkl')
default="./Test/t1.png") parser.add_argument('--scale', help='Scaling factor of the model', default=2) parser.add_argument('--epoch', help='Number of epochs during training', default=100) parser.add_argument('--lr', help='Sets the learning rate', default=0.01) args = parser.parse_args() ARGS = dict() ARGS["SCALE"] = int(args.scale) main_ckpt_dir = "./checkpoints" if not os.path.exists(main_ckpt_dir): os.makedirs(main_ckpt_dir) ARGS["CKPT_dir"] = main_ckpt_dir + "/checkpoint" + "_sc" + str(args.scale) ARGS["CKPT"] = ARGS["CKPT_dir"] + "/ESPCN_ckpt_sc" + str(args.scale) ARGS["TRAINDIR"] = args.traindir ARGS["EPOCH_NUM"] = int(args.epoch) ARGS["TESTIMG"] = args.testimg ARGS["LRATE"] = float(args.lr) if args.train: run.training(ARGS) elif args.test: run.test(ARGS) elif args.export: run.export(ARGS)
train_ds = tf.data.Dataset.from_tensor_slices(train).shuffle(10000).batch( batch_size) dev_ds = tf.data.Dataset.from_tensor_slices(dev).shuffle(2000).batch( batch_size * 2) test_ds = tf.data.Dataset.from_tensor_slices(test).shuffle(2000).batch( batch_size * 2) embedding_pretrained = utils.load_word2vec( 'data/embeddings/wiki_100.utf8', token2idx, embed_dim, 'data/embeddings/embed_mat.npy') model = LSTM_CRF(len(token2idx), embed_dim, maxlen, len(tag2idx), rnn_hiden_size, embedding_pretrained) optimizer = tf.keras.optimizers.Adam(lr=0.003) run.training(model, train_ds, dev_ds, epochs, optimizer) run.evaluate(model, test_ds, data_name="测试集") # # # save model # # print("\nsave model...") # # model.save_weights('model saved/') # # # load model # print("load model...") # model.load_weights('model saved/') # model.summary() run.evaluate(model, test_ds, data_name="测试集", print_score=True, tag_names=list(tag2idx.keys()))
def random_seed(): np.random.seed(1) torch.manual_seed(1) torch.cuda.seed_all() torch.backends.cudnn.deterministic = True # 保证每次运行结果一样 parser = argparse.ArgumentParser(description='--BERT分类任务--') parser.add_argument('--model', type=str, default='BERT', help='model_name') args = parser.parse_args() if __name__ == "__main__": model_name = args.model # 命令行参数 config = Config() random_seed() start_time = time.time() # 加载数据 train_Iter = utils.get_dataIter(config.train_path, config) dev_Iter = utils.get_dataIter(config.dev_path, config) test_Iter = utils.get_dataIter(config.test_path, config) print('使用时间:', utils.get_time_dif(start_time)) # 模型训练 model = Model(config).to(config.device) run.training(config, model, train_Iter, dev_Iter) run.predction(config, model, test_Iter) print()