def test_case1(self): sum_loader = SummarizationLoader() paths = { "train": "testdata/train.jsonl", "valid": "testdata/val.jsonl", "test": "testdata/test.jsonl" } data = sum_loader.process(paths=paths, vocab_size=vocab_size, vocab_path=vocab_path, sent_max_len=sent_max_len, doc_max_timesteps=doc_max_timesteps) print(data.datasets)
def main(): parser = argparse.ArgumentParser(description='Transformer Model') # Where to find data parser.add_argument( '--data_path', type=str, default='/remote-home/dqwang/Datasets/CNNDM/train.label.jsonl', help='Path expression to pickle datafiles.') parser.add_argument( '--valid_path', type=str, default='/remote-home/dqwang/Datasets/CNNDM/val.label.jsonl', help='Path expression to pickle valid datafiles.') parser.add_argument('--vocab_path', type=str, default='/remote-home/dqwang/Datasets/CNNDM/vocab', help='Path expression to text vocabulary file.') parser.add_argument('--embedding_path', type=str, default='/remote-home/dqwang/Glove/glove.42B.300d.txt', help='Path expression to external word embedding.') # Important settings parser.add_argument('--mode', type=str, default='train', help='must be one of train/test') parser.add_argument( '--restore_model', type=str, default='None', help= 'Restore model for further training. [bestmodel/bestFmodel/earlystop/None]' ) parser.add_argument( '--test_model', type=str, default='evalbestmodel', help= 'choose different model to test [evalbestmodel/evalbestFmodel/trainbestmodel/trainbestFmodel/earlystop]' ) parser.add_argument('--use_pyrouge', action='store_true', default=False, help='use_pyrouge') # Where to save output parser.add_argument('--save_root', type=str, default='save/', help='Root directory for all model.') parser.add_argument('--log_root', type=str, default='log/', help='Root directory for all logging.') # Hyperparameters parser.add_argument('--gpu', type=str, default='0', help='GPU ID to use. For cpu, set -1 [default: -1]') parser.add_argument('--cuda', action='store_true', default=False, help='use cuda') parser.add_argument( '--vocab_size', type=int, default=100000, help= 'Size of vocabulary. These will be read from the vocabulary file in order. If the vocabulary file contains fewer words than this number, or if this number is set to 0, will take all words in the vocabulary file.' ) parser.add_argument('--n_epochs', type=int, default=20, help='Number of epochs [default: 20]') parser.add_argument('--batch_size', type=int, default=32, help='Mini batch size [default: 128]') parser.add_argument('--word_embedding', action='store_true', default=True, help='whether to use Word embedding') parser.add_argument('--word_emb_dim', type=int, default=300, help='Word embedding size [default: 200]') parser.add_argument( '--embed_train', action='store_true', default=False, help='whether to train Word embedding [default: False]') parser.add_argument('--min_kernel_size', type=int, default=1, help='kernel min length for CNN [default:1]') parser.add_argument('--max_kernel_size', type=int, default=7, help='kernel max length for CNN [default:7]') parser.add_argument('--output_channel', type=int, default=50, help='output channel: repeated times for one kernel') parser.add_argument('--n_layers', type=int, default=12, help='Number of deeplstm layers') parser.add_argument('--hidden_size', type=int, default=512, help='hidden size [default: 512]') parser.add_argument( '--ffn_inner_hidden_size', type=int, default=2048, help='PositionwiseFeedForward inner hidden size [default: 2048]') parser.add_argument('--n_head', type=int, default=8, help='multihead attention number [default: 8]') parser.add_argument('--recurrent_dropout_prob', type=float, default=0.1, help='recurrent dropout prob [default: 0.1]') parser.add_argument('--atten_dropout_prob', type=float, default=0.1, help='attention dropout prob [default: 0.1]') parser.add_argument( '--ffn_dropout_prob', type=float, default=0.1, help='PositionwiseFeedForward dropout prob [default: 0.1]') parser.add_argument('--use_orthnormal_init', action='store_true', default=True, help='use orthnormal init for lstm [default: true]') parser.add_argument( '--sent_max_len', type=int, default=100, help='max length of sentences (max source text sentence tokens)') parser.add_argument( '--doc_max_timesteps', type=int, default=50, help='max length of documents (max timesteps of documents)') parser.add_argument('--save_label', action='store_true', default=False, help='require multihead attention') # Training parser.add_argument('--lr', type=float, default=0.0001, help='learning rate') parser.add_argument('--lr_descent', action='store_true', default=False, help='learning rate descent') parser.add_argument('--warmup_steps', type=int, default=4000, help='warmup_steps') parser.add_argument('--grad_clip', action='store_true', default=False, help='for gradient clipping') parser.add_argument( '--max_grad_norm', type=float, default=1.0, help='for gradient clipping max gradient normalization') parser.add_argument('-m', type=int, default=3, help='decode summary length') parser.add_argument('--limited', action='store_true', default=False, help='limited decode summary length') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.set_printoptions(threshold=50000) hps = args # File paths DATA_FILE = args.data_path VALID_FILE = args.valid_path VOCAL_FILE = args.vocab_path LOG_PATH = args.log_root # train_log setting if not os.path.exists(LOG_PATH): if hps.mode == "train": os.makedirs(LOG_PATH) else: logger.exception( "[Error] Logdir %s doesn't exist. Run in train mode to create it.", LOG_PATH) raise Exception( "[Error] Logdir %s doesn't exist. Run in train mode to create it." % (LOG_PATH)) nowTime = datetime.datetime.now().strftime('%Y%m%d_%H%M%S') log_path = os.path.join(LOG_PATH, hps.mode + "_" + nowTime) file_handler = logging.FileHandler(log_path) file_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.info("Pytorch %s", torch.__version__) logger.info(args) logger.info(args) sum_loader = SummarizationLoader() if hps.mode == 'test': paths = {"test": DATA_FILE} hps.recurrent_dropout_prob = 0.0 hps.atten_dropout_prob = 0.0 hps.ffn_dropout_prob = 0.0 logger.info(hps) else: paths = {"train": DATA_FILE, "valid": VALID_FILE} dataInfo = sum_loader.process(paths=paths, vocab_size=hps.vocab_size, vocab_path=VOCAL_FILE, sent_max_len=hps.sent_max_len, doc_max_timesteps=hps.doc_max_timesteps, load_vocab=os.path.exists(VOCAL_FILE)) vocab = dataInfo.vocabs["vocab"] model = TransformerModel(hps, vocab) if len(hps.gpu) > 1: gpuid = hps.gpu.split(',') gpuid = [int(s) for s in gpuid] model = nn.DataParallel(model, device_ids=gpuid) logger.info("[INFO] Use Multi-gpu: %s", hps.gpu) if hps.cuda: model = model.cuda() logger.info("[INFO] Use cuda") if hps.mode == 'train': trainset = dataInfo.datasets["train"] train_sampler = BucketSampler(batch_size=hps.batch_size, seq_len_field_name=Const.INPUT) train_batch = DataSetIter(batch_size=hps.batch_size, dataset=trainset, sampler=train_sampler) validset = dataInfo.datasets["valid"] validset.set_input("text", "summary") valid_batch = DataSetIter(batch_size=hps.batch_size, dataset=validset) setup_training(model, train_batch, valid_batch, hps) elif hps.mode == 'test': logger.info("[INFO] Decoding...") testset = dataInfo.datasets["test"] testset.set_input("text", "summary") test_batch = DataSetIter(batch_size=hps.batch_size, dataset=testset) run_test(model, test_batch, hps, limited=hps.limited) else: logger.error("The 'mode' flag must be one of train/eval/test") raise ValueError("The 'mode' flag must be one of train/eval/test")
from fastNLP.core.const import Const from data.dataloader import SummarizationLoader from tools.data import ExampleSet, Vocab vocab_size = 100000 vocab_path = "test/testdata/vocab" sent_max_len = 100 doc_max_timesteps = 50 # paths = {"train": "test/testdata/train.jsonl", "valid": "test/testdata/val.jsonl"} paths = { "train": "/remote-home/dqwang/Datasets/CNNDM/train.label.jsonl", "valid": "/remote-home/dqwang/Datasets/CNNDM/val.label.jsonl" } sum_loader = SummarizationLoader() dataInfo = sum_loader.process(paths=paths, vocab_size=vocab_size, vocab_path=vocab_path, sent_max_len=sent_max_len, doc_max_timesteps=doc_max_timesteps, load_vocab_file=True) trainset = dataInfo.datasets["train"] vocab = Vocab(vocab_path, vocab_size) dataset = ExampleSet(paths["train"], vocab, doc_max_timesteps, sent_max_len) # print(trainset[0]["text"]) # print(dataset.get_example(0).original_article_sents) # print(trainset[0]["words"]) # print(dataset[0][0].numpy().tolist())
def main(): parser = argparse.ArgumentParser(description='Summarization Model') # Where to find data parser.add_argument( '--data_path', type=str, default='/remote-home/dqwang/Datasets/CNNDM/train.label.jsonl', help='Path expression to pickle datafiles.') parser.add_argument( '--valid_path', type=str, default='/remote-home/dqwang/Datasets/CNNDM/val.label.jsonl', help='Path expression to pickle valid datafiles.') parser.add_argument('--vocab_path', type=str, default='/remote-home/dqwang/Datasets/CNNDM/vocab', help='Path expression to text vocabulary file.') # Important settings parser.add_argument('--mode', choices=['train', 'test'], default='train', help='must be one of train/test') parser.add_argument('--embedding', type=str, default='glove', choices=['word2vec', 'glove', 'elmo', 'bert'], help='must be one of word2vec/glove/elmo/bert') parser.add_argument('--sentence_encoder', type=str, default='transformer', choices=['bilstm', 'deeplstm', 'transformer'], help='must be one of LSTM/Transformer') parser.add_argument('--sentence_decoder', type=str, default='SeqLab', choices=['PN', 'SeqLab'], help='must be one of PN/SeqLab') parser.add_argument( '--restore_model', type=str, default='None', help= 'Restore model for further training. [bestmodel/bestFmodel/earlystop/None]' ) # Where to save output parser.add_argument('--save_root', type=str, default='save/', help='Root directory for all model.') parser.add_argument('--log_root', type=str, default='log/', help='Root directory for all logging.') # Hyperparameters parser.add_argument('--gpu', type=str, default='0', help='GPU ID to use. For cpu, set -1 [default: -1]') parser.add_argument('--cuda', action='store_true', default=False, help='use cuda') parser.add_argument( '--vocab_size', type=int, default=100000, help= 'Size of vocabulary. These will be read from the vocabulary file in order. If the vocabulary file contains fewer words than this number, or if this number is set to 0, will take all words in the vocabulary file.' ) parser.add_argument('--n_epochs', type=int, default=20, help='Number of epochs [default: 20]') parser.add_argument('--batch_size', type=int, default=32, help='Mini batch size [default: 128]') parser.add_argument('--word_embedding', action='store_true', default=True, help='whether to use Word embedding') parser.add_argument('--embedding_path', type=str, default='/remote-home/dqwang/Glove/glove.42B.300d.txt', help='Path expression to external word embedding.') parser.add_argument('--word_emb_dim', type=int, default=300, help='Word embedding size [default: 200]') parser.add_argument( '--embed_train', action='store_true', default=False, help='whether to train Word embedding [default: False]') parser.add_argument('--min_kernel_size', type=int, default=1, help='kernel min length for CNN [default:1]') parser.add_argument('--max_kernel_size', type=int, default=7, help='kernel max length for CNN [default:7]') parser.add_argument('--output_channel', type=int, default=50, help='output channel: repeated times for one kernel') parser.add_argument('--use_orthnormal_init', action='store_true', default=True, help='use orthnormal init for lstm [default: true]') parser.add_argument( '--sent_max_len', type=int, default=100, help='max length of sentences (max source text sentence tokens)') parser.add_argument( '--doc_max_timesteps', type=int, default=50, help='max length of documents (max timesteps of documents)') parser.add_argument('--save_label', action='store_true', default=False, help='require multihead attention') # Training parser.add_argument('--lr', type=float, default=0.0001, help='learning rate') parser.add_argument('--lr_descent', action='store_true', default=False, help='learning rate descent') parser.add_argument('--warmup_steps', type=int, default=4000, help='warmup_steps') parser.add_argument('--grad_clip', action='store_true', default=False, help='for gradient clipping') parser.add_argument( '--max_grad_norm', type=float, default=10, help='for gradient clipping max gradient normalization') # test parser.add_argument('-m', type=int, default=3, help='decode summary length') parser.add_argument('--limited', action='store_true', default=False, help='limited decode summary length') parser.add_argument( '--test_model', type=str, default='evalbestmodel', help= 'choose different model to test [evalbestmodel/evalbestFmodel/trainbestmodel/trainbestFmodel/earlystop]' ) parser.add_argument('--use_pyrouge', action='store_true', default=False, help='use_pyrouge') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.set_printoptions(threshold=50000) # File paths DATA_FILE = args.data_path VALID_FILE = args.valid_path VOCAL_FILE = args.vocab_path LOG_PATH = args.log_root # train_log setting if not os.path.exists(LOG_PATH): if args.mode == "train": os.makedirs(LOG_PATH) else: logger.exception( "[Error] Logdir %s doesn't exist. Run in train mode to create it.", LOG_PATH) raise Exception( "[Error] Logdir %s doesn't exist. Run in train mode to create it." % (LOG_PATH)) nowTime = datetime.datetime.now().strftime('%Y%m%d_%H%M%S') log_path = os.path.join(LOG_PATH, args.mode + "_" + nowTime) file_handler = logging.FileHandler(log_path) file_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.info("Pytorch %s", torch.__version__) sum_loader = SummarizationLoader() hps = args if hps.mode == 'test': paths = {"test": DATA_FILE} hps.recurrent_dropout_prob = 0.0 hps.atten_dropout_prob = 0.0 hps.ffn_dropout_prob = 0.0 logger.info(hps) else: paths = {"train": DATA_FILE, "valid": VALID_FILE} dataInfo = sum_loader.process(paths=paths, vocab_size=hps.vocab_size, vocab_path=VOCAL_FILE, sent_max_len=hps.sent_max_len, doc_max_timesteps=hps.doc_max_timesteps, load_vocab=os.path.exists(VOCAL_FILE)) if args.embedding == "glove": vocab = dataInfo.vocabs["vocab"] embed = torch.nn.Embedding(len(vocab), hps.word_emb_dim) if hps.word_embedding: embed_loader = EmbedLoader() pretrained_weight = embed_loader.load_with_vocab( hps.embedding_path, vocab) # unfound with random init embed.weight.data.copy_(torch.from_numpy(pretrained_weight)) embed.weight.requires_grad = hps.embed_train else: logger.error("[ERROR] embedding To Be Continued!") sys.exit(1) if args.sentence_encoder == "transformer" and args.sentence_decoder == "SeqLab": model_param = json.load(open("config/transformer.config", "rb")) hps.__dict__.update(model_param) model = TransformerModel(hps, embed) else: logger.error("[ERROR] Model To Be Continued!") sys.exit(1) logger.info(hps) if hps.cuda: model = model.cuda() logger.info("[INFO] Use cuda") if hps.mode == 'train': dataInfo.datasets["valid"].set_target("text", "summary") setup_training(model, dataInfo.datasets["train"], dataInfo.datasets["valid"], hps) elif hps.mode == 'test': logger.info("[INFO] Decoding...") dataInfo.datasets["test"].set_target("text", "summary") run_test(model, dataInfo.datasets["test"], hps, limited=hps.limited) else: logger.error("The 'mode' flag must be one of train/eval/test") raise ValueError("The 'mode' flag must be one of train/eval/test")