Esempio n. 1
0
def main(_):
    config = model_config.Config()
    with tf.Session() as sess:
        forward_only = True

        vocab_path = os.path.join(config.data_dir,
                                  'vocab%d.in' % config.input_vocab_size)

        # Load data
        vocab, vocab_rev = data_utils.load_vocabulary(vocab_path)

        config.batch_size = 1
        model = model_utils.create_model(sess, config, forward_only)

        valid_data_path = os.path.join(
            config.data_dir, 'chat_valid_ids%d.in' % config.input_vocab_size)
        dev_set = data_utils.read_test_data_chat(valid_data_path, config)[:2]
        bucket_id = 0

        for i in range(len(dev_set[0])):
            dev_inputs, dev_inputs_length, dev_outputs, dev_outputs_length, target_weights = (
                data_utils.get_test_line(dev_set[bucket_id], i))

            predicted = model.step(sess, dev_inputs, dev_inputs_length,
                                   dev_outputs, dev_outputs_length,
                                   target_weights, forward_only)

            print("\nPrediction Results in Iteration %d : " % i)

            s = ""
            for input in dev_inputs[0]:
                s += (vocab_rev[input]) + " "
            print(s)

            s = ""
            for output in dev_outputs[0]:
                s += (vocab_rev[output]) + " "
            print(s)

            s = ""
            for i in predicted[0][0]:
                s += (vocab_rev[i]) + " "
            print(s)
Esempio n. 2
0
_PAD = "_PAD"
_GO = "_GO"
_EOS = "_EOS"
_UNK = "_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]

PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3

_WORD_SPLIT = re.compile("([.,!?\"':;)(])")
_DIGIT_RE = re.compile(r"\d{3,}")

config = model_config.Config()


def basic_tokenizer(sentence):
    """Very basic tokenizer: split the sentence into a list of tokens."""
    words = []
    for space_separated_fragment in sentence.strip().split():
        words.extend(re.split(_WORD_SPLIT, space_separated_fragment))
    return [w.lower() for w in words if w]


def create_vocabulary(vocabulary_path,
                      data_path,
                      max_vocabulary_size,
                      tokenizer=None,
                      normalize_digits=True):
Esempio n. 3
0
def main(_):
    config = model_config.Config()
    with tf.Session() as sess:
        forward_only = False

        vocab_path = os.path.join(config.data_dir,
                                  'vocab%d.in' % config.input_vocab_size)

        train_data_path = os.path.join(
            config.data_dir, 'chat_ids%d.in' % config.input_vocab_size)

        # Load data
        vocab, vocab_rev = data_utils.load_vocabulary(vocab_path)
        train_set = data_utils.read_data_chat(train_data_path, config)
        # print(train_set[0])

        if forward_only:
            config.batch_size = 1
            model = model_utils.create_model(sess, config, forward_only)
        else:
            model = model_utils.create_model(sess, config, forward_only)

        # This is the training loop.
        steps_per_checkpoint = 100
        step_time, loss = 0.0, 0.0
        current_step = 0
        perplexity = 10000.0
        previous_losses = []

        while current_step < config.max_epoch and not forward_only:
            start_time = time.time()
            bucket_id = 0
            encoder_inputs, encoder_inputs_length, decoder_inputs, decoder_inputs_length, target_weights = (
                data_utils.get_batch(train_set[bucket_id], config))

            if (len(encoder_inputs) == 0 or len(decoder_inputs) == 0):
                continue

            _, step_loss, _, _, enc_embedding, dec_embedding = model.step(
                sess, encoder_inputs, encoder_inputs_length, decoder_inputs,
                decoder_inputs_length, target_weights, forward_only)

            step_time += (time.time() - start_time) / 100
            loss += step_loss / 100
            current_step += 1

            if current_step % 100 == 0:
                # Print statistics for the previous epoch.
                # loss *= config.max_state_length 		# Temporary purpose only
                perplexity = math.exp(loss) if loss < 300 else float('inf')
                print(
                    "global step %d learning rate %.4f step-time %.2f perplexity %.2f loss %.2f"
                    % (model.global_step.eval(), model.learning_rate.eval(),
                       step_time, perplexity, loss))

                if len(previous_losses) > 2 and loss > max(
                        previous_losses[-2:]):
                    # if len(previous_losses) > 0 and loss > previous_losses[-1:]:
                    sess.run(model.learning_rate_decay_op)

                previous_losses.append(loss)

                # Save checkpoint and zero timer and loss.
                checkpoint_path = os.path.join(config.model_dir, "model.ckpt")
                model.saver.save(sess,
                                 checkpoint_path,
                                 global_step=model.global_step)
                step_time, loss = 0.0, 0.0

                sys.stdout.flush()

        if forward_only:
            valid_data_path = os.path.join(
                config.data_dir,
                'chat_valid_ids%d.in' % config.input_vocab_size)
            dev_set = data_utils.read_data_chat(valid_data_path, config)
            print(dev_set)
            bucket_id = 0
            # for i in range(len(dev_set[0])):
            for i in range(1):
                dev_inputs, dev_inputs_length, dev_outputs, dev_outputs_length, target_weights = (
                    data_utils.get_test_line(train_set[bucket_id], i))

                _, _, logits, predicted, enc_embedding, dec_embedding = model.step(
                    sess, dev_inputs, dev_inputs_length, dev_outputs,
                    dev_outputs_length, target_weights, forward_only)

                print("Prediction Results in Iteration %d : " % i)
                print(dev_inputs.transpose())
                print(dev_outputs.transpose())
                print(predicted.transpose())
                print("")