def do_train(sess, X_input, Y_input, X_validation, Y_validation): mini_batch_size = 10 n_train = X_input.shape[0] graph = create_graph() LAYER_WEIGHTS_NAME = "layer_weights" LAYER_BIASES_NAME = "layer_biases" # Initialize tensorflow variable for the training process bottleneck_tensor_train = tf.placeholder( tf.float32, [mini_batch_size, BOTTLENECK_TENSOR_SIZE], name=BOTTLENECK_TENSOR_TRAIN_NAME) ground_truth_tensor_train = tf.placeholder( tf.float32, [mini_batch_size, len(classes)], name=GROUND_TRUTH_TENSOR_TRAIN) layer_weights = tf.Variable(tf.truncated_normal( [BOTTLENECK_TENSOR_SIZE, len(classes)], stddev=0.001), name=LAYER_WEIGHTS_NAME) layer_biases = tf.Variable(tf.zeros([len(classes)]), name=LAYER_BIASES_NAME) # Initialize tensorflow variable for the validating process bottleneck_tensor_validation = tf.placeholder( tf.float32, [mini_batch_size, BOTTLENECK_TENSOR_SIZE]) ground_truth_tensor_validation = tf.placeholder( tf.float32, [mini_batch_size, len(classes)]) # Initialize tensorflow variable for the testing process bottleneck_tensor_test = tf.placeholder(tf.float32, [10000, BOTTLENECK_TENSOR_SIZE], name=BOTTLENECK_TENSOR_TEST_NAME) ground_truth_tensor_test = tf.placeholder(tf.float32, [10000, len(classes)], name=GROUND_TRUTH_TENSOR_TEST) # draw the graph for the last layer train_step, cross_entropy = add_final_training_ops( graph, bottleneck_tensor_train, ground_truth_tensor_train, layer_weights, layer_biases) evaluation_step_validation = add_evaluation_step( graph, bottleneck_tensor_validation, ground_truth_tensor_validation, layer_weights, layer_biases) evaluation_step_test = add_evaluation_step(graph, bottleneck_tensor_test, ground_truth_tensor_test, layer_weights, layer_biases) init = tf.initialize_all_variables() sess.run(init) i = 0 epocs = 1 for epoch in range(epocs): shuffledRange = np.random.permutation(n_train) y_one_hot_train = encode_one_hot(len(classes), Y_input) y_one_hot_validation = encode_one_hot(len(classes), Y_validation) shuffledX = X_input[shuffledRange, :] shuffledY = y_one_hot_train[shuffledRange] for Xi, Yi in iterate_mini_batches(shuffledX, shuffledY, mini_batch_size): _, cross_entropy_value = sess.run([train_step, cross_entropy], feed_dict={ bottleneck_tensor_train: Xi, ground_truth_tensor_train: Yi }) # Every so often, print out how well the graph is training. is_last_step = (i + 1 == FLAGS.how_many_training_steps) if (i % FLAGS.eval_step_interval) == 0 or is_last_step: train_accuracy = sess.run(evaluation_step_validation, feed_dict={ bottleneck_tensor_validation: Xi, ground_truth_tensor_validation: Yi }) validation_accuracy = sess.run(evaluation_step_test, feed_dict={ bottleneck_tensor_test: X_validation, ground_truth_tensor_test: y_one_hot_validation }) print( '%s: Step %d: Train accuracy = %.1f%%, Cross entropy = %f, Validation accuracy = %.1f%%' % (datetime.now(), i, train_accuracy * 100, cross_entropy_value, validation_accuracy * 100)) i += 1 test_accuracy = sess.run(evaluation_step_test, feed_dict={ bottleneck_tensor_test: X_test_pool3, ground_truth_tensor_test: encode_one_hot(len(classes), y_test_pool3) }) print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
def do_train(sess, X_input, Y_input, X_validation, Y_validation, X_sample_pool3): ground_truth_tensor_name = 'ground_truth' mini_batch_size = 1 n_train = X_input.shape[0] graph = create_graph() train_step, cross_entropy = add_final_training_ops( graph, len(classes), FLAGS.final_tensor_name, ground_truth_tensor_name) t_vars = tf.trainable_variables() final_vars = [var for var in t_vars if 'final_' in var.name] saver = tf.train.Saver(final_vars, max_to_keep=10) init = tf.initialize_all_variables() sess.run(init) evaluation_step = add_evaluation_step(graph, FLAGS.final_tensor_name, ground_truth_tensor_name) # Get some layers we'll need to access during training. bottleneck_tensor = graph.get_tensor_by_name( ensure_name_has_port(BOTTLENECK_TENSOR_NAME)) ground_truth_tensor = graph.get_tensor_by_name( ensure_name_has_port(ground_truth_tensor_name)) saver.restore(sess, tf.train.latest_checkpoint(os.getcwd() + "/train/test2/")) result_tensor = graph.get_tensor_by_name( ensure_name_has_port(FLAGS.final_tensor_name)) ''' # Uncomment this line for calculating the inception score splits=10 preds=[] print(X_sample_pool3) for Xj, Yj in iterate_mini_batches(X_sample_pool3,np.zeros([X_sample_pool3.shape[0],10]),mini_batch_size): pred = sess.run(result_tensor,feed_dict={bottleneck_tensor: Xj}) preds.append(pred) preds = np.concatenate(preds, 0) argmax = preds.argmax(axis=1) scores = [] # Calculating the inception score for i in range(splits): part = preds[argmax==i] logp= np.log(part) self = np.sum(part*logp,axis=1) cross = np.mean(np.dot(part,np.transpose(logp)),axis=1) diff = self - cross kl = np.mean(self - cross) kl1 = [] for j in range(splits): diffj = diff[(j * diff.shape[0] // splits):((j+ 1) * diff.shape[0] //splits)] kl1.append(np.exp(diffj.mean())) print("category: %s scores_mean = %.2f, scores_std = %.2f" % (classes[i], np.mean(kl1),np.std(kl1))) scores.append(np.exp(kl)) print("scores_mean = %.2f, scores_std = %.2f" % (np.mean(scores), np.std(scores))) ''' # Uncomment this line for calculating the inception score # The block commented out below has to be uncommented for transfer learning and the block above has to be commented # ''' # Comment this line when doing transfer learning i = 0 epocs = 1 for epoch in range(epocs): shuffledRange = np.random.permutation(n_train) y_one_hot_train = encode_one_hot(len(classes), Y_input) y_one_hot_validation = encode_one_hot(len(classes), Y_validation) shuffledX = X_input[shuffledRange, :] shuffledY = y_one_hot_train[shuffledRange] for Xi, Yi in iterate_mini_batches(shuffledX, shuffledY, mini_batch_size): sess.run(train_step, feed_dict={ bottleneck_tensor: Xi, ground_truth_tensor: Yi }) # Every so often, print out how well the graph is training. is_last_step = (i + 1 == FLAGS.how_many_training_steps) if (i % FLAGS.eval_step_interval) == 0 or is_last_step: train_accuracy, cross_entropy_value = sess.run( [evaluation_step, cross_entropy], feed_dict={ bottleneck_tensor: Xi, ground_truth_tensor: Yi }) if (i % 1000) == 0: saver.save(sess, os.getcwd() + "/train/test2/", global_step=i) i += 1 validation_accuracy = 0 for Xj, Yj in iterate_mini_batches(X_validation, y_one_hot_validation, mini_batch_size): validation_accuracy = validation_accuracy + sess.run( evaluation_step, feed_dict={ bottleneck_tensor: Xj, ground_truth_tensor: Yj }) validation_accuracy = validation_accuracy / X_validation.shape[0] print( '%s: Step %d: Train accuracy = %.1f%%, Cross entropy = %f, Validation accuracy = %.1f%%' % (datetime.now(), i, train_accuracy * 100, cross_entropy_value, validation_accuracy * 100)) for Xi, Yi in iterate_mini_batches( X_test_pool3, encode_one_hot(len(classes), y_test_pool3), mini_batch_size): test_accuracy = sess.run(evaluation_step, feed_dict={ bottleneck_tensor: Xi, ground_truth_tensor: Yi }) print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
def do_train(sess,X_input, Y_input, X_validation, Y_validation): ground_truth_tensor_name = 'ground_truth' mini_batch_size = 256 n_train = X_input.shape[0] graph = create_graph() train_step, cross_entropy = add_final_training_ops( graph, len(classes), FLAGS.final_tensor_name, ground_truth_tensor_name) init = tf.initialize_all_variables() sess.run(init) evaluation_step = add_evaluation_step(graph, FLAGS.final_tensor_name, ground_truth_tensor_name) bottleneck_tensor = graph.get_tensor_by_name(ensure_name_has_port(BOTTLENECK_TENSOR_NAME)) ground_truth_tensor = graph.get_tensor_by_name(ensure_name_has_port(ground_truth_tensor_name)) i=0 epocs = 20 validation_acc_vector = [] training_acc_vector = [] cross_entropy_vector = [] for epoch in xrange(epocs): shuffledRange = np.random.permutation(n_train) y_one_hot_train = encode_one_hot(len(classes), Y_input) y_one_hot_validation = encode_one_hot(len(classes), Y_validation) shuffledX = X_input[shuffledRange,:] shuffledY = y_one_hot_train[shuffledRange] for Xi, Yi in iterate_mini_batches(shuffledX, shuffledY, mini_batch_size): sess.run(train_step, feed_dict={bottleneck_tensor: Xi, ground_truth_tensor: Yi}) is_last_step = (i + 1 == FLAGS.how_many_training_steps) if (i % FLAGS.eval_step_interval) == 0 or is_last_step: train_accuracy, cross_entropy_value = sess.run( [evaluation_step, cross_entropy], feed_dict={bottleneck_tensor: Xi, ground_truth_tensor: Yi}) validation_accuracy = sess.run( evaluation_step, feed_dict={bottleneck_tensor: X_validation, ground_truth_tensor: y_one_hot_validation}) print('%s: Step %d: Train accuracy = %.1f%%, Cross entropy = %f, Validation accuracy = %.1f%%' % (datetime.now(), i, train_accuracy * 100, cross_entropy_value, validation_accuracy * 100)) cross_entropy_vector.append(cross_entropy_value) training_acc_vector.append(train_accuracy * 100) validation_acc_vector.append(validation_accuracy * 100) i+=1 print("cross entropy vector length is "+str(len(cross_entropy_vector))) """ plotting the training accuarcy vs iterations """ x_ax = np.arange(0,len(cross_entropy_vector)) fig, ax = plt.subplots(nrows = 1, ncols = 1) ax.plot(x_ax, training_acc_vector) plt.xlabel('Iterations') plt.ylabel('Training Accuracy') plt.title('Training Accuracy vs Number of Iterations') fig.savefig('training_acc.jpg') plt.close(fig) """ plotting the validation accuarcy vs iterations """ x_ax = np.arange(0,len(cross_entropy_vector)) fig, ax = plt.subplots(nrows = 1, ncols = 1) ax.plot(x_ax, validation_acc_vector) plt.xlabel('Iterations') plt.ylabel('Validation Accuracy') plt.title('Validation Accuracy vs Number of Iterations') fig.savefig('validation_acc.jpg') plt.close(fig) """ plotting the cross-entropy error vs iterations """ x_ax = np.arange(0,len(cross_entropy_vector)) fig, ax = plt.subplots(nrows = 1, ncols = 1) ax.plot(x_ax, cross_entropy_vector) plt.xlabel('Iterations') plt.ylabel('Cross Entropy value') plt.title('Cross Entropy Value vs Number of Iterations') fig.savefig('cross_entropy_value.jpg') plt.close(fig) """ calculating the test_set accuracy """ test_accuracy = sess.run( evaluation_step, feed_dict={bottleneck_tensor: X_test_pool3, ground_truth_tensor: encode_one_hot(len(classes), y_test_pool3)}) print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
gpu_options=gpu_options)) X_pool3 = batch_pool3_features(sess, X) np.save(filename, X_pool3) def serialize_data(): X_train, y_train, X_test, y_test = load_CIFAR10( cifar10_dir ) # Change this line to take the sketches dataset as input using input_data_sketches.read_data_sets() for testing with sketches serialize_cifar_pool3(X_train, 'X_train_1') serialize_cifar_pool3(X_test, 'X_test_1') np.save('y_train_1', y_train) np.save('y_test_1', y_test) graph = create_graph( ) # Comment this line while calculating the inception scores serialize_data() # Comment this line while calculating the inception scores X_sample = np.load('samples50k.npy').transpose(0, 2, 3, 1).astype("float") X_sample = X_sample * 128 + 127.5 serialize_cifar_pool3( X_sample, 'X_sample_1') # Comment this line while calculating the inception scores X_sample_pool3 = np.load('X_sample_1.npy') print(X_sample_pool3) #sys.exit() classes = np.array([ 'plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck' ]) # Change this line to test for sketches X_train_orig, y_train_orig, X_test_orig, y_test_orig = load_CIFAR10( cifar10_dir