Example #1
0
# train loop:
hps = hps_model
start = time.time()
for local_step in range(hps.num_steps):
    step = rnn.sess.run(rnn.global_step)
    curr_learning_rate = (hps.learning_rate - hps.min_learning_rate) * (
        hps.decay_rate)**step + hps.min_learning_rate

    raw_z, raw_a = random_batch()
    inputs = np.concatenate((raw_z[:, :-1, :], raw_a[:, :-1, :]), axis=2)
    outputs = raw_z[:, 1:, :]  # teacher forcing (shift by one predictions)

    feed = {
        rnn.input_x: inputs,
        rnn.output_x: outputs,
        rnn.lr: curr_learning_rate
    }
    (train_cost, state, train_step, _) = rnn.sess.run(
        [rnn.cost, rnn.final_state, rnn.global_step, rnn.train_op], feed)
    if (step % 20 == 0 and step > 0):
        end = time.time()
        time_taken = end - start
        start = time.time()
        output_log = "step: %d, lr: %.6f, cost: %.4f, train_time_taken: %.4f" % (
            step, curr_learning_rate, train_cost, time_taken)
        print(output_log)
        rnn.save_json(os.path.join(model_save_path, args.name + "_rnn.json"))

# save the model (don't bother with tf checkpoints json all the way ...)
rnn.save_json(os.path.join(model_save_path, args.name + "_rnn.json"))
# train loop:
hps = hps_model
start = time.time()
for local_step in range(hps.num_steps):

    step = rnn.sess.run(rnn.global_step)
    curr_learning_rate = (hps.learning_rate - hps.min_learning_rate) * (
        hps.decay_rate)**step + hps.min_learning_rate

    raw_z, raw_a = random_batch()
    inputs = np.concatenate((raw_z[:, :-1, :], raw_a[:, :-1, :]), axis=2)
    outputs = raw_z[:, 1:, :]  # teacher forcing (shift by one predictions)

    feed = {
        rnn.input_x: inputs,
        rnn.output_x: outputs,
        rnn.lr: curr_learning_rate
    }
    (train_cost, state, train_step, _) = rnn.sess.run(
        [rnn.cost, rnn.final_state, rnn.global_step, rnn.train_op], feed)
    if (step % 20 == 0 and step > 0):
        end = time.time()
        time_taken = end - start
        start = time.time()
        output_log = "step: %d, lr: %.6f, cost: %.4f, train_time_taken: %.4f" % (
            step, curr_learning_rate, train_cost, time_taken)
        print(output_log)

# save the model (don't bother with tf checkpoints json all the way ...)
rnn.save_json(os.path.join(model_save_path, "rnn.json"))
Example #3
0
    with open(os.path.join("tf_initial_z", "initial_z.json"), 'wt') as outfile:
      json.dump([initial_mu, initial_logvar], outfile, sort_keys=True, indent=0, separators=(',', ': '))

    reset_graph()
    rnn = MDNRNN(hps_model)

    # train loop:w
    hps = hps_model
    start = time.time()
    for local_step in range(hps.num_steps):

      step = rnn.sess.run(rnn.global_step)
      curr_learning_rate = (hps.learning_rate-hps.min_learning_rate) * (hps.decay_rate) ** step + hps.min_learning_rate

      raw_z, raw_a = random_batch()
      print(raw_z.shape, raw_a.shape)
      inputs = np.concatenate((raw_z[:, :-1, :], raw_a[:, :-1, :]), axis=2)
      outputs = raw_z[:, 1:, :] # teacher forcing (shift by one predictions)

      feed = {rnn.input_x: inputs, rnn.output_x: outputs, rnn.lr: curr_learning_rate}
      (train_cost, state, train_step, _) = rnn.sess.run([rnn.cost, rnn.final_state, rnn.global_step, rnn.train_op], feed)
      if (step%20==0 and step > 0):
        end = time.time()
        time_taken = end-start
        start = time.time()
        output_log = "step: %d, lr: %.6f, cost: %.4f, train_time_taken: %.4f" % (step, curr_learning_rate, train_cost, time_taken)
        print(output_log)
        rnn.save_json(os.path.join(arglist.model_save_path, "rnn.json"))
    # save the model (don't bother with tf checkpoints json all the way ...)
    rnn.save_json(os.path.join(arglist.model_save_path, "rnn.json"))