full_ret[:ret.shape[0], :ret.shape[1]] = ret return full_batch, full_ret if __name__ == '__main__': start_time = time.time() group_default = { 'model': sq.Word2DefModel.default_opt(), 'train': sq.default_training_opt(), 'pg': sq.policy_gradient_opt(), 'decode': sq.default_decoding_opt() } parser = sq.get_common_argparser('main_word2word.py') sq.add_arg_group_defaults(parser, group_default) opt, groups = sq.parse_set_args(parser, group_default, dup_replaces=('enc:', 'dec:')) logger, all_opt = sq.init_exp_opts(opt, groups, group_default) opt, model_opt, train_opt, decode_opt, pg_opt = all_opt def data_fn(): dpath = partial(os.path.join, opt['data_dir']) enc_vocab = sq.Vocabulary.from_vocab_file(dpath('enc_vocab.txt')) dec_vocab = sq.Vocabulary.from_vocab_file(dpath('dec_vocab.txt')) char_vocab = sq.Vocabulary.from_vocab_file(dpath('char_vocab.txt')) file_list = (opt['train_file'], opt['valid_file'], opt['eval_file']) line_fn = partial(sq.read_lines, token_split=' ', part_split='\t', part_indices=(0, -1)) read_fn = partial(sq.read_word2def_data,
if __name__ == '__main__': start_time = time.time() group_default = { 'model': sq.SeqModel.default_opt(), 'train': sq.default_training_opt(), 'decode': sq.default_decoding_opt() } parser = sq.get_common_argparser('main_lm.py') parser.add_argument('--seq_len', type=int, default=20, help=' ') parser.add_argument('--sentence_level', action='store_true', help=' ') parser.add_argument('--training_weights', type=bool, default=False, help=' ') sq.add_arg_group_defaults(parser, group_default) opt, groups = sq.parse_set_args(parser, group_default) logger, all_opt = sq.init_exp_opts(opt, groups, group_default) opt, model_opt, train_opt, decode_opt = all_opt def data_fn(): dpath = partial(os.path.join, opt['data_dir']) vocab = sq.Vocabulary.from_vocab_file(dpath('vocab.txt')) data_fn = partial(sq.read_seq_data, in_vocab=vocab, out_vocab=vocab, keep_sentence=opt['sentence_level'], seq_len=opt['seq_len'], tr_weights=opt['training_weights']) data = ([ data_fn(