Example #1
0
    def train(self):
        #- imports

        print 'training...'
        from time import time
        import numpy as np
        from functions import get_batch

        x_train = self.loadData()
        print '\n  ** Data Shape:', np.shape(x_train), '  ** \n'
        print 'data loaded...'

        print 'epoch \t cost_g \t cost_d \t time'
        for i in range(self.args.maxIter):
            self.epoch += 1
            t = time()
            for j in range(self.args.k):
                Z = self.pzFn(self.args.batchSize,
                              self.args.nz).astype(self.floatX)
                #print 'Z'
                X = get_batch(self.args.batchSize, x_train)
                #print 'X'
                cost_D = self.train_D(X, Z)
                #print 'cost_D'
            #Take another random sample...
            Z = self.pzFn(self.args.batchSize,
                          self.args.nz).astype(self.floatX)
            cost_G = self.train_G(Z)

            print self.epoch, '\t', cost_G, '\t', cost_D, '\t', time() - t
            self.error_g.append(cost_G)
            self.error_d.append(cost_D)

        #print 'sampled from pz:', self.args.pz.name

        self.save()
Example #2
0
accuracy = tf.reduce_mean(
    tf.cast(tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)), tf.float32))

cost_save = []
accuracy_save = []
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
with tf.Session(config=config) as sess:
    sess.run(init)
    step = 1
    # Keep training until reach max iterations
    while step * batch_size < training_iters:
        #     while step < 2:
        batch_x, batch_y, index = func.get_batch(batch_size, data_x, data_y,
                                                 index)
        # Run optimization op (backprop)
        sess.run(opt, feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})
        if step % display_step == 0:
            # Calculate batch loss and accuracy
            cos, acc = sess.run([cost, accuracy],
                                feed_dict={
                                    x: batch_x,
                                    y: batch_y,
                                    keep_prob: 1.
                                })
            cost_save.append(cos)
            accuracy_save.append(acc)
            print("Iter " + str(step * batch_size) + ", Minibatch Loss= " +
                  "{:.6f}".format(cos) + "\nTraining Accuracy= " +
                  "{:.5f}".format(acc))
Example #3
0
emb_vec = embd_vec/tf.sqrt(tf.reduce_sum(tf.square(embd_vec), 1, keep_dims=True))
validation_X = tf.nn.embedding_lookup(embd_vec, validation_indices)
cos_simi = tf.matmul(validation_X, tf.transpose(embd_vec))

end_time = time.time()
time_taken = end_time - start_time
print("Writing vocab to file took: %.2f" % time_taken)

hm_epochs = 200000
batch_index = 0
with tf.Session() as session:
    session.run(tf.global_variables_initializer())
    epoch_loss = 0
    for epoch in range(1, hm_epochs+1):
        log = ""
        batch_index, batch = get_batch(batch_index, train_data, batch_size, context_word_count, window_size)
        _, batch_loss = session.run([optimizer, loss], feed_dict={X:batch[:,0], Y:batch[:,1].reshape(-1,1)})
        epoch_loss += batch_loss
        if epoch == 1000 or epoch == 500:
            log += "Loss at epoch %d  is %.3f\n" %(epoch, epoch_loss / epoch)
        elif epoch % 5000 == 0:
            log += "Loss at epoch %d  is %.3f\n" %(epoch, epoch_loss/epoch)
            simi = cos_simi.eval()
            for i in range(int(validation_X.shape[0])):
                word = index_word[validation_set_indices[i]]
                nearest = (-simi[i, :]).argsort()[1:2]
                close_word = index_word[nearest[0]]
                log+="%s is close to %s \n" % (word, close_word) 
        print(log)
        with open(root_dir+"/logs.txt", "a") as myfile:
            myfile.write(log)