Exemple #1
0
def compute_accuracy(v_xs, v_ys):
	global prediction 
	y_pre = sess.run(prediction, feed_dict={xs: v_xs})
	correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))

	y_pre = sess.run(prediction, feed_dict={xs: v_xs})
	correct_predicition = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))
	accuracy = tf.reduce_mean(tf.cast(correct_predicition, tf.float32))
	result = tf.run(accuracy)
	return result 
Exemple #2
0
def train(model, generated_image, initial_image):
    """ Train your model.
    Don't forget to create folders for checkpoints and outputs.
    """
    skip_step = 1
    with tf.Session() as sess:
        saver = tf.train.Saver()
        ###############################
        ## TO DO:
        ## 1. initialize your variables
        ## 2. create writer to write your graph
        sess.run(tf.global_variables_initializer())
        writer = tf.summary.Filewriter('graphs', sess.graph)
        ###############################
        sess.run(generated_image.assign(initial_image))
        ckpt = tf.train.get_checkpoint_state(
            os.path.dirname('checkpoints/checkpoint'))
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        initial_step = model['global_step'].eval()

        start_time = time.time()
        for index in range(initial_step, ITERS):
            if index >= 5 and index < 20:
                skip_step = 10
            elif index >= 20:
                skip_step = 20

            sess.run(model['optimizer'])
            if (index + 1) % skip_step == 0:
                ###############################
                ## TO DO: obtain generated image and loss
                gen_image, total_loss, summary = tf.run([
                    generated_image, model['total_loss'], model['summary_op']
                ])
                ###############################
                gen_image = gen_image + MEAN_PIXELS
                writer.add_summary(summary, global_step=index)
                print('Step {}\n   Sum: {:5.1f}'.format(
                    index + 1, np.sum(gen_image)))
                print('   Loss: {:5.1f}'.format(total_loss))
                print('   Time: {}'.format(time.time() - start_time))
                start_time = time.time()

                filename = 'outputs/%d.png' % (index)
                utils.save_image(filename, gen_image)

                if (index + 1) % SAVE_EVERY == 0:
                    saver.save(sess, 'checkpoints/style_transfer', index)
def get_jacobian(dof, u, frame, cur_keyframe, T):
    '''
	Returns the Jacobian of the Residual Error wrt the Pose

	Arguments:
		dof: Number of high gradient elements we are using
		u: A list containing the high gradient elements
		frame: Numpy array o the current frame
		cur_keyframe: Previous keyframe as a Keyframe class
		T: Current estimated Pose

	Returns:
		J: The required Jacobian
	'''
    T_s = get_min_rep(T)
    T_c = tf.constant(T_s)  #Flattened pose in tf
    r_s = calc_cost_jacobian(u, frame, keyframe, T_c)
    with tf.Session() as sess:
        _, J = tf.run(
            tf.test.compute_gradient(r_s, (dof, 1), T_c, (12, 1))
        )  #Returns two jacobians... (Other two parameters are the shapes)
    return J
Exemple #4
0
# variable ops
x = tf.Variable(...)

x.initializer  # init
x.value()  # read op
x.assign(...)  # write op
x.assign_add(...)
# and more
# Initilize the variable before using them or run into error (FailedPreconditionError: Attempting to use uninitialized value tensor)

# initialize all variables at once
init = tf.global_variables_initializer()

with tf.Session() as sess:
    tf.run(init)  # to run the initializer, not fetching any value

# initialize only as subset of variables
init_ab = tf.variables_initializer([a, b], name="init_ab")
with tf.Session() as sess:
    tf.run(init_ab)

# intialize each variable separately
# create variable W as 784x10 tensor, filled with zeros
W = tf.Variable(tf.zeros([784, 10]))
with tf.Session() as sess:
    tf.run(W.initializer)

# Evaluate values of variables
# W is a random 700x100 variable object
W = tf.Variable(tf.truncated_normal([700, 10]))
Exemple #5
0
def test_model(session, x):
    return tf.run([accuracy], feed_dict={x: x})
Exemple #6
0
def make_prediction(session, x):
    y_pred = tf.run([y], feed_dict={x: x})
Exemple #7
0
def main():
    global EPOCHS
    # train_X, test_X, train_y, test_y = get_iris_data()

    # Saver
    name = ""

    print("Train? (y for train, n for test)")
    choice = raw_input()
    train_flag = True
    if (choice == 'n' or choice == 'N'):
        df = pd.read_csv("data/out-test.csv")
        BATCH_SIZE = df.shape[0]
        EPOCHS = 1
        train_flag = False
        name = raw_input("Enter model file name: ")
    else:
        df = pd.read_csv("data/out-train.csv")

    cols = df.columns.values
    cols = np.delete(cols, [1])
    train_X = df.loc[:, cols].values

    train_y = df["decile_score"].values
    y_train_ = train_y
    train_y = keras.utils.np_utils.to_categorical(train_y)

    print train_X.shape
    print train_y.shape
    # exit()
    # Layer's sizes
    x_size = train_X.shape[1]  # Number of input nodes: 4 features and 1 bias
    h_size_1 = 256  # Number of hidden nodes
    h_size_2 = 256  # Number of hidden nodes
    h_size_3 = 128  # Number of hidden nodes
    h_size_4 = 64  # Number of hidden nodes
    h_size_5 = 64  # Number of hidden nodes
    h_size_6 = 32  # Number of hidden nodes
    h_size_7 = 16  # Number of hidden nodes
    h_size_8 = 8  # Number of hidden nodes
    y_size = train_y.shape[1]  # Number of outcomes (3 iris flowers)

    # Symbols
    X = tf.placeholder("float", shape=[None, x_size])
    y = tf.placeholder("float", shape=[None, y_size])

    # Weight initializations
    w_1 = init_weights((x_size, h_size_1))
    w_2 = init_weights((h_size_1, h_size_2))
    w_3 = init_weights((h_size_2, h_size_3))
    w_4 = init_weights((h_size_3, h_size_4))
    w_5 = init_weights((h_size_4, h_size_5))
    w_6 = init_weights((h_size_5, h_size_6))
    w_7 = init_weights((h_size_6, h_size_7))
    w_8 = init_weights((h_size_7, h_size_8))
    w_9 = init_weights((h_size_8, y_size))

    # Forward propagation
    yhat = forwardprop(X, w_1, w_2, w_3, w_4, w_5, w_6, w_7, w_8, w_9)
    predict = tf.argmax(yhat, axis=1)

    # Backward propagation
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
    updates = tf.train.GradientDescentOptimizer(0.01).minimize(cost)

    saver = tf.train.Saver()
    # Run SGD
    sess = tf.Session()
    if not train_flag:
        saver.restore(sess, "checkpoints/" + name)

    if train_flag:
        init = tf.global_variables_initializer()
        sess.run(init)

    for epoch in range(EPOCHS):
        # Train with each example
        if train_flag:
            for i in range(len(train_X)):
                sess.run(updates,
                         feed_dict={
                             X: train_X[i:i + 1],
                             y: train_y[i:i + 1]
                         })

        train_accuracy = np.mean(
            np.argmax(train_y, axis=1) == tf.run(feed_dict={X: train_X}))
        # test_accuracy  = np.mean(np.argmax(test_y, axis=1) ==
        #                          sess.run(predict, feed_dict={X: test_X, y: test_y}))
        pu.db
        print("Epoch = %d, train accuracy = %.2f%%" %
              (epoch + 1, 100. * train_accuracy))
        if train_flag:
            saver.save(sess, "checkpoints/model_epoch_" + str(epoch) + ".ckpt")

    sess.close()
import tensorflow as tf
import numpy as np


def sample_gumbel(shape, eps=1e-20):
    """Sample from Gumbel(0, 1)"""
    U = tf.random_uniform(shape, minval=0, maxval=1)
    return -tf.log(-tf.log(U + eps) + eps)


def gumbel_softmax(logits, temperature):
    """ Draw a sample from the Gumbel-Softmax distribution"""
    y = logits + sample_gumbel(tf.shape(logits))
    return tf.nn.softmax(y / temperature)


logits = np.random.rand(3, 4)
gumbel_logits = gumbel_softmax(logits, 1.0)
with tf.Session() as tf:
    for i in range(100):
        logits = tf.run(gumbel_logits)
        if (i + 1) % 20 == 0:
            print(i + 1)
        if np.isnan(logits).any():
            print('logits has nan')
Exemple #9
0
import tensorflow as tf
hello = tf.constant("hello")
tf.run(hello)
__author__ = 'deepika'

import tensorflow as tf
deep_learning = tf.constant('Deep Learning')

session = tf.Session()
print session.run(deep_learning)

a = tf.constant(2)
b = tf.constant(3)
multiply = tf.run(a, b)
print session.run(multiply)

matrix1 = tf.constant([[3., 3.]])
matrix2 = tf.constant([[2.], [2.]])
product = tf.matmul(matrix1, matrix2)
print product
Exemple #11
0
import tensorflow as tf

s = tf.run()
Exemple #12
0
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2

cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
for i in range(500):  #Changed from 2000 to 500
    batch = mnist.train.next_batch(50)
    if i % 100 == 0:
        train_accuracy = accuracy.eval(feed_dict={
            x: batch[0],
            y_: batch[1],
            keep_prob: 1.0
        })
        print("step %d, training accuracy %g" % (i, train_accuracy))
    train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g" % accuracy.eval(feed_dict={
    x: mnist.test.images,
    y_: mnist.test.labels,
    keep_prob: 1.0
}))

your_image = "TensorFlow#3TestImage.png"
feed_dict = {x: [your_image], keep_prob: 1.0}
classification = tf.run(y, feed_dict)
print(classification)
    optimizer = tf.AdamOptimizer(learning_rate).minimize(cost)

# Launch a session to run TensorFlow operations
with tf.Session() as session:
    # Run the global variable initializer to initialize all variables and layers of ther neural network
    session.run(tf.global_variables_initializer())

    # Run the optimizer over and over again to train the network.
    for epoch in range(training_epochs):
        # Feed the training data
        session.run(optimizer,
                    feed_dict={
                        X: X_scaled_training,
                        Y: Y_scaled_training
                    })

        # log the progress for every 5 epochs.
        if epoch % 5 == 0:
            training_cost = tf.run(cost,
                                   feed_dict={
                                       X: X_scaled_training,
                                       Y: Y_scaled_training
                                   })
            testing_cost = tf.run(cost,
                                  feed_dict={
                                      X: X_scaled_testing,
                                      Y: Y_scaled_testing
                                  })
            print("epoch #{} - training_cost {}, testing_cost {}".format(
                epoch, training_cost, testing_cost))
Exemple #14
0
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)

# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.cast(Y, tf.int64))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()

# Saver object
saver = tf.train.Saver()

# Start training
# Run the initializer
tf.run(init)

# Start the data queue
tf.train.start_queue_runners()

# Training cycle
for step in range(1, num_steps + 1):

    if step % display_step == 0:
        # Run optimization and calculate batch loss and accuracy
        _, loss, acc = tf.run([train_op, loss_op, accuracy])
        print("Step " + str(step) + ", Minibatch Loss= " + \
                "{:.4f}".format(loss) + ", Training Accuracy= " + \
                "{:.3f}".format(acc))
    else:
        # Only run the optimization op (backprop)
Exemple #15
0
   
session = tf.Session()
session.run(tf.global_variables_initializer())

epochs = 500
for epoch in range(epochs):
  n_batches = n_samples / batch_size
  for batch in range(n_batches):
    start = batch * batch_size
    end = start + batch_size
    ph = {
      model_input_number1: ints_to_one_hots(samples_number1[start:end], max_numbers),
      model_input_number2: ints_to_one_hots(samples_number2[start:end], max_numbers),
      model_output: ints_to_one_hots(samples_total[start:end], max_numbers*2)
    }
    loss, _ = tf.run([model_loss, model_train_op], ph)
    print("Loss {0}".format(loss)

  right = 0
  for i in range(max_numbers):
    for j in range(max_numbers):
      ph = {
        model_input_number1: ints_to_one_hots([i]),
        model_input_number2: ints_to_one_hots([j]),
        model_outputs: ints_to_one_hots([i+j])
      }
      predict_one_hot = tf.run(model_predict, ph)
      predict = np.argmax(predict_one_hot)
      if predict == i+j:
        right += 1.0