def run_model(data, in_memory=False):
    """
    Runs a seq2seq model.

    @param data is
        if in_memory == True:
            ([[size, incoming]], [webpage_label])
        else:
            A list of paths
    """
    tf.reset_default_graph()
    tf.set_random_seed(123)

    # Only print small part of array
    np.set_printoptions(threshold=10)

    with tf.Session() as session:

        # with bidirectional encoder, decoder state size should be
        # 2x encoder state size
        model = Seq2SeqModel(encoder_cell=args.cell(args.encoder_hidden_states),
                             decoder_cell=args.cell(args.decoder_hidden_states),
                             seq_width=1,
                             batch_size=args.batch_size,
                             bidirectional=args.bidirectional,
                             reverse=args.reverse_traces,
                             learning_rate=args.learning_rate)

        session.run(tf.global_variables_initializer())

        loss_track = train_on_copy_task(session, model, data,
                           batch_size=args.batch_size,
                           batches_in_epoch=100,
                           max_time_diff=args.max_time_diff,
                           verbose=True)
Exemplo n.º 2
0
def run_model(data, in_memory=False):
    """
    Runs a seq2seq model.

    @param data is
        if in_memory == True:
            ([[size, incoming]], [webpage_label])
        else:
            A list of paths
    """
    tf.reset_default_graph()
    tf.set_random_seed(123)

    # Only print small part of array
    np.set_printoptions(threshold=10)

    with tf.Session() as session:

        encoder_cell, decoder_cell = None, None

        if args.batch_norm:
            encoder_cell = args.cell(args.encoder_hidden_states,
                                     is_training=False)
            decoder_cell = args.cell(args.decoder_hidden_states,
                                     is_training=False)

        else:
            encoder_cell = args.cell(args.encoder_hidden_states)
            decoder_cell = args.cell(args.decoder_hidden_states)

        # with bidirectional encoder, decoder state size should be
        # 2x encoder state size
        model = Seq2SeqModel(encoder_cell=encoder_cell,
                             decoder_cell=decoder_cell,
                             seq_width=1,
                             batch_size=args.batch_size,
                             bidirectional=args.bidirectional,
                             reverse=args.reverse_traces,
                             learning_rate=args.learning_rate,
                             saved_graph=args.graph_file,
                             sess=session)

        # session.run(tf.global_variables_initializer())

        get_vector_representations(session,
                                   model,
                                   data,
                                   DATA_DIR + '/../seq2seq_cells',
                                   batch_size=args.batch_size,
                                   max_batches=None,
                                   batches_in_epoch=100,
                                   max_time_diff=args.max_time_diff,
                                   extension=args.extension)
def evaluate():
  print (FLAGS.checkpoint_dir)
  data_dir = os.path.abspath(os.path.join(os.path.curdir, "data"))
  src_dict_path = os.path.join(data_dir, "src.dict")
  trg_dict_path = os.path.join(data_dir, "trg.dict")
  src_dict, trg_dict = read_vocab(src_dict_path, trg_dict_path)
  params = create_model_params({
    "src_vocab_size": len(src_dict),
    "trg_vocab_size": len(trg_dict)
  })
  data_filename = "youdao_encn_tokens_50k"
  dev_data_generator = DataGenerator(data_dir, data_filename, FLAGS.valid_batch_size,
    1, src_dict, trg_dict, FLAGS.max_seq_len)

  with tf.Graph().as_default() as g:
    tf.set_random_seed(1234)

    seq2seq = Seq2SeqModel(params)
    
    input_fn = dev_data_generator.create_input_fn_new(is_training=False)
    _, loss, summary_op, num_tokens = seq2seq.build_eval_model(input_fn)
    dev_op = [loss, num_tokens]

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        FLAGS.moving_average_decay)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

    while True:
      eval_once(saver, summary_writer, dev_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
Exemplo n.º 4
0
# Model parameter
num_units = 128
num_layers = 2
cell = tf.contrib.rnn.GRUCell
attention = True

# Training parameter
epochs = 500
batch_size = 512
learning_rate = 0.01
keep_prob = 0.5
optimizer = tf.train.AdamOptimizer

with tf.Session() as sess:
    seq2seq_model = Seq2SeqModel(sess, cell, num_units, num_layers,
                                 source_letter_to_int, target_letter_to_int,
                                 attention)

    train_loss_history, valid_loss_history = seq2seq_model.train(
        [train_sources, train_targets], [valid_sources, valid_targets],
        learning_rate,
        batch_size,
        epochs,
        keep_prob,
        optimizer,
        save_path='ckpt_dir/han_to_en/model',
        display_size=10,
        save_size=10)

    print(train_loss_history)
    print(valid_loss_history)
Exemplo n.º 5
0
def training():
    epochs = 100
    batch_size = 64
    rnn_size = 64
    num_layers = 2
    source_vocab_size = 5000
    target_vocab_size = 5000
    encoder_embed_size = 32
    decoder_embed_size = 32
    learning_rate = 0.001

    datas = Seq2SeqDataProcess()
    datas.convert_sentences_to_number_sequences(source_path, target_path)
    datas.save_word_mapping_info(model_dir)

    #construct graph
    train_graph = tf.Graph()
    with train_graph.as_default():
        #construct seq2seq model
        seq2seq = Seq2SeqModel(source_vocab_size, target_vocab_size,
                               encoder_embed_size, decoder_embed_size,
                               rnn_size, num_layers, learning_rate, batch_size)

        training_decoder_output, predicting_decoder_output = seq2seq.seq2seq_model(
            datas.target_word2int['<GO>'], datas.target_word2int['<EOS>'])

        training_logits = tf.identity(training_decoder_output.rnn_output,
                                      name='logits')
        predicting_logits = tf.identity(predicting_decoder_output.sample_id,
                                        name='predictions')

        masks = tf.sequence_mask(seq2seq.target_sequence_length,
                                 seq2seq.max_target_sequence_length,
                                 dtype=tf.float32,
                                 name='masks')

        with tf.name_scope('optimization'):
            #loss function
            cost = tf.contrib.seq2seq.sequence_loss(training_logits,
                                                    seq2seq.targets, masks)
            #optimizer
            optimizer = tf.train.AdamOptimizer(learning_rate)
            #Gradient clipping
            gradients = optimizer.compute_gradients(cost)
            capped_grandients = [(tf.clip_by_value(grad, -5., 5.), var)
                                 for grad, var in gradients
                                 if grad is not None]
            train_op = optimizer.apply_gradients(capped_grandients)

    #training data (you can use another method to prepare your validation data)
    train_source = datas.source_int_seq[batch_size:]
    train_target = datas.target_int_seq[batch_size:]
    valid_source = datas.source_int_seq[:batch_size]
    valid_target = datas.target_int_seq[:batch_size]
    (valid_targets_batch, valid_sources_batch, valid_targets_lengths,
     valid_sources_lengths) = next(
         datas.get_batches(valid_target, valid_source, batch_size))

    display_step = 50
    with tf.Session(graph=train_graph) as sess:
        sess.run(tf.global_variables_initializer())

        for epoch_i in range(1, epochs + 1):
            for batch_i, (targets_batch, sources_batch, targets_lengths,
                          sources_lengths) in enumerate(
                              datas.get_batches(train_target, train_source,
                                                batch_size)):
                _, loss = sess.run(
                    [train_op, cost],
                    feed_dict={
                        seq2seq.inputs: sources_batch,
                        seq2seq.targets: targets_batch,
                        seq2seq.learning_rate: learning_rate,
                        seq2seq.target_sequence_length: targets_lengths,
                        seq2seq.source_sequence_length: sources_lengths
                    })
                if batch_i % display_step == 0:
                    validation_loss = sess.run(
                        [cost],
                        feed_dict={
                            seq2seq.inputs: valid_sources_batch,
                            seq2seq.targets: valid_targets_batch,
                            seq2seq.learning_rate: learning_rate,
                            seq2seq.target_sequence_length:
                            valid_targets_lengths,
                            seq2seq.source_sequence_length:
                            valid_sources_lengths,
                        })
                    print(
                        'Epoch {:>3}/{} Batch {:>4}/{} - Training Loss: {:>6.3f} - Validation loss: {:>6.3f}'
                        .format(epoch_i, epochs, batch_i,
                                len(train_source) // batch_size, loss,
                                validation_loss[0]))

        #save model
        saver = tf.train.Saver()
        saver.save(sess, model_dir + "/seq2seq_model.ckpt")
        print("Model Trained and Saved.")
Exemplo n.º 6
0
        lrt = (loss12 - loss1 - loss2).detach().numpy()
        res.append(lrt)

        print(lrt)

    return res


dataframe = pandas.read_csv('data/ptbdb_normal.csv', engine='python').values

dataframe1 = pandas.read_csv('data/ptbdb_abnormal.csv', engine='python').values

row = dataframe[1, 10:150]
row1 = dataframe1[10, 10:150]
row2 = dataframe1[12, 10:150]
model = Seq2SeqModel(50)

data = np.concatenate([row, row, row, row, row, row1, row])

plt.plot(data)
plt.show()

#lrt_res = LRT(data, 2 * len(row), model)
# plt.plot(lrt_res)
# plt.show()

loss, y_pred = model.train(np.concatenate([row, row, row, row]), 50)

print(loss)

plt.plot(y_pred.detach().numpy())
Exemplo n.º 7
0
import tensorflow as tf
import numpy as np

from data_sampler import DataSampler
from democonfig import DemoConfig
from seq2seq import Seq2SeqModel

with tf.Session() as sess:
    config = DemoConfig()
    data = DataSampler()
    model = Seq2SeqModel(config, data)

    model.train(sess)