Esempio n. 1
0
                [bi_output,bi_state,cc_output,preds,grads_and_vars_tf_style,train_tf_style, accuracy, cost], \
                                                feed_dict={x: symbols_in_keys, y: symbols_out_onehot})
        print("tfbi_output:", tfbi_output)
        print("tfbi_state:", tfbi_state)
        print("cc_output:", tfcc_output)
        print("tfpreds:", tfpreds)
        print("loss:", loss)
        print("tfgrads_and_vars_tf_style:", tfgrads_and_vars_tf_style)

        if (step + 1) % display_step == 0:
            print("Iter= " + str(step+1) + ", Average Loss= " + \
                  "{:.6f}".format(loss_total/display_step) + ", Average Accuracy= " + \
                  "{:.2f}%".format(100*acc_total/display_step))
            acc_total = 0
            loss_total = 0
            symbols_in = [
                train_data[i] for i in range(offset, offset + n_input)
            ]
            symbols_out = train_data[offset + n_input]
            symbols_out_pred = reverse_dictionary[int(
                tf.argmax(tfpreds, 1).eval())]
            print("%s - Actual word:[%s] vs Predicted word:[%s]" %
                  (symbols_in, symbols_out, symbols_out_pred))
            saver.save(session,
                       get_rel_save_file(projectdir) + '%04d' % (step + 1),
                       global_step=global_step)
        step += 1
        offset += (n_input + 1)
    print("Optimization Finished!")
    print("Elapsed time: ", elapsed(time.time() - start_time))
        symbols_in_keys = np.reshape(np.array(symbols_in_keys), [-1, n_input,vocab_size])
        symbols_out_onehot=input_one_hot(dictionary[str(train_data[offset+n_input])],vocab_size)
        symbols_out_onehot = np.reshape(symbols_out_onehot,[1,-1])

        tfbi_output,tfbi_state,tfcc_output,tfpreds,tfgrads_and_vars_tf_style, _,acc, loss=session.run(
                [bi_output,bi_state,cc_output,preds,grads_and_vars_tf_style,train_tf_style, accuracy, cost], \
                                                feed_dict={x: symbols_in_keys, y: symbols_out_onehot})
        print("tfbi_output:",tfbi_output)
        print("tfbi_state:",tfbi_state)
        print("cc_output:",tfcc_output)
        print("tfpreds:",tfpreds)
        print("loss:",loss)
        print("tfgrads_and_vars_tf_style:",tfgrads_and_vars_tf_style)

        if (step+1) % display_step == 0:
            print("Iter= " + str(step+1) + ", Average Loss= " + \
                  "{:.6f}".format(loss_total/display_step) + ", Average Accuracy= " + \
                  "{:.2f}%".format(100*acc_total/display_step))
            acc_total = 0
            loss_total = 0
            symbols_in = [train_data[i] for i in range(offset, offset + n_input)]
            symbols_out = train_data[offset + n_input]
            symbols_out_pred = reverse_dictionary[int(tf.argmax(tfpreds, 1).eval())]
            print("%s - Actual word:[%s] vs Predicted word:[%s]" % (symbols_in,symbols_out,symbols_out_pred))
            saver.save(session,
                   get_rel_save_file(projectdir)+ '%04d' % (step+1), global_step=global_step)
        step += 1
        offset += (n_input+1)
    print("Optimization Finished!")
    print("Elapsed time: ", elapsed(time.time() - start_time))
        train_op = optimizer.apply_gradients(gradients)
        tf.summary.scalar("cost", tr_cost)

print("Graph created.")

en_train = en_filtered[0:30000]
fr_train = fr_filtered[0:30000]
print("fr_train:", len(fr_train))
update_check = (len(fr_train) // batch_size // per_epoch) - 1
print("update_check:", update_check)
#checkpoint = logs_path + 'best_so_far_model.ckpt'
summary_update_loss = []
stop_early_max_count = 3

with tf.Session(graph=train_graph) as sess:
    tf_summary_writer = tf.summary.FileWriter(get_rel_save_file(projectdir),
                                              graph=train_graph)
    merged_summary_op = tf.summary.merge_all()
    sess.run(tf.global_variables_initializer())

    for epoch_i in range(1, epochs + 1):
        update_loss = 0
        batch_loss = 0
        for batch_i, (en_batch, fr_batch, en_text_len,
                      fr_text_len) in enumerate(
                          get_batches(en_train, fr_train, batch_size,
                                      fr_word2int, en_word2int)):
            before = time.time()

            encoding_embed_inputtf, encoding_optf, encoding_sttf, rnn_inputstf, decoding_inputtf, decoding_embed_inputtf, logits_trtf, _, gradientstf, loss, summary = sess.run(
                [
Esempio n. 4
0
    elif sec<(60*60):
        return str(sec/60) + " min"
    else:
        return str(sec/(60*60)) + " hr"
# Launch the graph
saver = tf.train.Saver(max_to_keep=200)
with tf.Session() as session:
    session.run(init)
    step = 0
    offset =2
    end_offset = n_input + 1
    acc_total = 0
    loss_total = 0
    print ("offset:",offset)

    summary_writer = tf.summary.FileWriter(get_rel_save_file(projectdir),graph=session.graph)

    while step < training_iters:
        if offset > (len(train_data)-end_offset):
            offset = rnd.randint(0, n_input+1)
        print("offset:", offset)
        symbols_in_keys = [ input_one_hot(dictionary[ str(train_data[i])],vocab_size) for i in range(offset, offset+n_input) ]
        symbols_in_keys = np.reshape(np.array(symbols_in_keys), [-1, n_input,vocab_size])
        symbols_out_onehot=input_one_hot(dictionary[str(train_data[offset+n_input])],vocab_size)
        symbols_out_onehot = np.reshape(symbols_out_onehot,[1,-1])
        tfgrads_and_vars_tf_style, _,acc, loss, onehot_pred,tfoutput,tfstate,tfout_weights,tfbiases_out  = session.run([grads_and_vars_tf_style,train_tf_style, accuracy, cost, pred,output,state,weights_out,biases_out], \
                                                feed_dict={x: symbols_in_keys, y: symbols_out_onehot})
        loss_total += loss
        acc_total += acc
        print("tfoutput:",tfoutput," tfstate:",tfstate)
        print("onehot_pred:",onehot_pred)