示例#1
0
def experiment_fn(run_config, params):

    model = Model()
    estimator = tf.estimator.Estimator(
            model_fn=model.model_fn,
            model_dir=Config.train.model_dir,
            params=params,
            config=run_config)

    source_vocab = data_loader.load_vocab("source_vocab")
    target_vocab = data_loader.load_vocab("target_vocab")

    Config.data.rev_source_vocab = utils.get_rev_vocab(source_vocab)
    Config.data.rev_target_vocab = utils.get_rev_vocab(target_vocab)
    Config.data.source_vocab_size = len(source_vocab)
    Config.data.target_vocab_size = len(target_vocab)

    train_data, test_data = data_loader.make_train_and_test_set()
    train_input_fn, train_input_hook = data_loader.get_dataset_batch(train_data,
                                                                     batch_size=Config.model.batch_size,
                                                                     scope="train")
    test_input_fn, test_input_hook = data_loader.get_dataset_batch(test_data,
                                                                   batch_size=Config.model.batch_size,
                                                                   scope="test")

    train_hooks = [train_input_hook]
    if Config.train.print_verbose:
        train_hooks.append(hook.print_variables(
            variables=['train/enc_0'],
            rev_vocab=utils.get_rev_vocab(source_vocab),
            every_n_iter=Config.train.check_hook_n_iter))
        train_hooks.append(hook.print_variables(
            variables=['train/target_0', 'train/pred_0'],
            rev_vocab=utils.get_rev_vocab(target_vocab),
            every_n_iter=Config.train.check_hook_n_iter))
    if Config.train.debug:
        train_hooks.append(tf_debug.LocalCLIDebugHook())

    eval_hooks = [test_input_hook]
    if Config.train.debug:
        eval_hooks.append(tf_debug.LocalCLIDebugHook())

    experiment = tf.contrib.learn.Experiment(
        estimator=estimator,
        train_input_fn=train_input_fn,
        eval_input_fn=test_input_fn,
        train_steps=Config.train.train_steps,
        min_eval_frequency=Config.train.min_eval_frequency,
        train_monitors=train_hooks,
        eval_hooks=eval_hooks
    )
    return experiment
示例#2
0
文件: main2.py 项目: PhantomGrapes/ds
def main(Config, mode):
    # 返回字典
    vocab = data_loader.load_vocab("vocab")
    Config.data.vocab_size = len(vocab)
    with open(os.path.join(Config.data.base_path, Config.data.processed_path,
                           'oov_size'),
              'r',
              encoding='utf-8') as f:
        oov_size = int(f.readline().strip())
    Config.data.oov_size = oov_size
    Config.data.vocab = vocab
    rev_vocab = utils.get_rev_vocab(vocab)
    Config.data.rev_vocab = rev_vocab
    if mode == 'train':
        # save_path = os.path.join(Config.train.model_dir)
        # if not os.path.exists(save_path):
        #     os.makedirs(save_path)
        # with open(os.path.join(save_path, 'vocab.pkl'), 'wb') as f:
        #     cPickle.dump(vocab, f)

        # 定义训练数据
        train_X, test_X, train_y, test_y = data_loader.make_train_and_test_set(
            tsize=Config.train.size)

        model = Model(Config)
        trainer = dy.AdamTrainer(model.model)
        # model.load('model-1-final')
        global best_blue

        for e in range(Config.train.epoch):
            dev_blue = train(train_X, train_y, model, Config, test_X, test_y,
                             e, trainer)
            if dev_blue > best_blue:
                # if (e + 1) % 50 == 0:
                best_blue = dev_blue
                model.save('model-{}-{}'.format(e + 1, 'final'))
                eval(train_X, train_y, model)

    if mode == 'eval':
        # save_path = os.path.join(Config.train.model_dir)
        # with open(os.path.join(save_path, 'vocab.pkl'), 'rb') as f:
        #     vocab = cPickle.load(f)
        Config.vocab = vocab
        rev_vocab = utils.get_rev_vocab(vocab)
        Config.data.rev_vocab = rev_vocab

        test_X, test_y = data_loader.make_eval_set()

        model = Model(Config)
        model.load('model-1-final')

        eval(test_X, test_y, model)
示例#3
0
def chat(ids, vocab):

    X = np.array(data_loader._pad_input(ids, Config.data.max_seq_length),
                 dtype=np.int32)
    X = np.reshape(X, (1, Config.data.max_seq_length))

    predict_input_fn = tf.estimator.inputs.numpy_input_fn(x={"input_data": X},
                                                          num_epochs=1,
                                                          shuffle=False)

    estimator = _make_estimator()
    result = estimator.predict(input_fn=predict_input_fn)

    prediction = next(result)["prediction"]

    beam_width = Config.predict.get('beam_width', 0)
    if beam_width > 0:

        def select_by_score(predictions):
            p_list = list(predictions)

            scores = []
            for p in p_list:
                score = 0

                unknown_count = len(list(filter(lambda x: x == -1, p)))
                score -= 2 * unknown_count

                eos_except_last_count = len(
                    list(filter(lambda x: x == Config.data.EOS_ID, p[:-1])))
                score -= 2 * eos_except_last_count

                distinct_id_count = len(list(set(p)))
                score += 1 * distinct_id_count

                if eos_except_last_count == 0 and p[-1] == Config.data.EOS_ID:
                    score += 5

                scores.append(score)

            max_score_index = scores.index(max(scores))
            return predictions[max_score_index]

        prediction = select_by_score(prediction)

    rev_vocab = utils.get_rev_vocab(vocab)

    def to_str(sequence):
        tokens = [
            rev_vocab.get(x, '') for x in sequence if x != Config.data.PAD_ID
        ]
        return ' '.join(tokens)

    return to_str(prediction)
示例#4
0
文件: main.py 项目: PhantomGrapes/ds
def experiment_fn(run_config, params):
    # 先定义estimator
    conversation = Conversation()
    estimator = tf.estimator.Estimator(model_fn=conversation.model_fn,
                                       model_dir=Config.train.model_dir,
                                       params=params,
                                       config=run_config)

    # 返回字典
    vocab = data_loader.load_vocab("vocab")
    Config.data.vocab_size = len(vocab)

    # 定义训练数据
    train_X, test_X, train_y, test_y = data_loader.make_train_and_test_set()

    train_input_fn, train_input_hook = data_loader.make_batch(
        (train_X, train_y), batch_size=Config.model.batch_size)
    test_input_fn, test_input_hook = data_loader.make_batch(
        (test_X, test_y), batch_size=Config.model.batch_size, scope="test")

    train_hooks = [train_input_hook]
    if Config.train.print_verbose:
        train_hooks.append(
            hook.print_variables(
                variables=['train/enc_0', 'train/dec_0', 'train/pred_0'],
                rev_vocab=utils.get_rev_vocab(vocab),
                every_n_iter=Config.train.check_hook_n_iter))
    if Config.train.debug:
        train_hooks.append(tf_debug.LocalCLIDebugHook())

    eval_hooks = [test_input_hook]
    if Config.train.debug:
        eval_hooks.append(tf_debug.LocalCLIDebugHook())

    # 定义实验
    experiment = tf.contrib.learn.Experiment(
        estimator=estimator,
        train_input_fn=train_input_fn,
        eval_input_fn=test_input_fn,
        train_steps=Config.train.train_steps,
        min_eval_frequency=Config.train.min_eval_frequency,
        train_monitors=train_hooks,
        eval_hooks=eval_hooks,
        eval_delay_secs=0)
    return experiment
def main(ids, vocab):
  X = np.array(data_loader._pad_input(ids, Config.data.max_seq_length), dtype=np.int32)
  X = np.reshape(X, (1, Config.data.max_seq_length))

  predict_input_fn = tf.estimator.inputs.numpy_input_fn(
    x={"enc_inputs": X},
    num_epochs=1,
    shuffle=False)

  estimator = _make_estimator()
  result = estimator.predict(input_fn=predict_input_fn)

  prediction = next(result)["prediction"]

  rev_vocab = utils.get_rev_vocab(vocab)

  def to_str(sequence):
    tokens = [
      rev_vocab.get(x, '') for x in sequence if x != Config.data.PAD_ID]
    return ' '.join(tokens)

  return to_str(prediction)
    formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  parser.add_argument('--config', type=str, default='config',
                      help='config file name')
  parser.add_argument('--src', type=str, default='example source sentence',
                      help='input source sentence')
  args = parser.parse_args()

  Config(args.config)
  Config.train.batch_size = 1

  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
  tf.logging.set_verbosity(tf.logging.ERROR)

  # set data property
  data_loader.set_max_seq_length(['train_ids.enc', 'train_ids.dec', 'test_ids.enc', 'test_ids.dec'])

  source_vocab = data_loader.load_vocab("source_vocab")
  target_vocab = data_loader.load_vocab("target_vocab")

  Config.data.rev_source_vocab = utils.get_rev_vocab(source_vocab)
  Config.data.rev_target_vocab = utils.get_rev_vocab(target_vocab)
  Config.data.source_vocab_size = len(source_vocab)
  Config.data.target_vocab_size = len(target_vocab)

  print("------------------------------------")
  print("Source: " + args.src)
  token_ids = data_loader.sentence2id(source_vocab, args.src)
  prediction = main(token_ids, target_vocab)

  print(" > Result: " + prediction)