for step in range(2001): cost_val, hy_val, _ = sess.run( [cost, hypothesis, train], feed_dict={X: x_data, Y: y_data}) if step % 10 == 0: print(step, "Cost: ", cost_val, "\nPrediction:\n", hy_val) # Ask my score print("Your score will be ", sess.run(hypothesis, feed_dict={X: [[100, 70, 101]]})) print("Other scores will be ", sess.run(hypothesis, feed_dict={X: [[60, 70, 110], [90, 100, 80]]})) coldGraph(sess, 'lab_04_2_multi_variable_matmul_linear_regression', "X", "hypothesis", "save/Const:hypothesis" ) ''' 0 Cost: 7105.46 Prediction: [[ 80.82241058] [ 92.26364136] [ 93.70250702] [ 98.09217834] [ 72.51759338]] 10 Cost: 5.89726 Prediction: [[ 155.35159302] [ 181.85691833] [ 181.97254944] [ 194.21760559]
for i in range(len(train_y_o[:len(y_hat_train_o_list)])): if train_y_o[i] != y_hat_train_o_list[i]: file_train.write(img_list_train[i] + " Objectness Error. True: " + str(train_y_o[i]) + " Prediction: " + str(y_hat_train_o_list[i]) + " " + str(y_hat_train_o_soft_list[i]) + "\n") asd = img_list_train[i].split('.')[0] + "_" + str( train_y_o[i]) + "_" + str(y_hat_train_o_list[i]) + "_" + str( y_hat_train_o_soft_list[i]) + '.jpg' shutil.copy( os.path.join(path_list_train[i], img_list_train[i]), os.path.join( '/home/leehanbeen/PycharmProjects/TypeClassifier/SavedImage/Object', asd)) coldGraph(sess, 'model', 'input', 'hypothesis', 'save/Const:hypothesis') frozen = tf.graph_util.convert_variables_to_constants( sess, sess.graph_def, ["hypothesis"]) #41번째 줄의 hypothesis 가 인자로 들어감. graph_io.write_graph( frozen, './', 'inference_graph_type.pb', as_text=False) # 현재 디렉토리에 inference_graph_type.pb 파일 생성. print("Test Confusion Matrix") print(confusion_matrix(test_y[:len(y_hat_t_list)], y_hat_t_list)) print(confusion_matrix(test_y_o[:len(y_hat_o_list)], y_hat_o_list)) print("Test Type Accuracy: %.5f Test Object Accuracy: %.5f" % (avg_acc_t, avg_acc_o)) print("\n\n") print("Train Confusion Matrix") print(
print('Learning Finished!') # Test model and check accuracy correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy') print('Accuracy:', sess.run(accuracy, feed_dict={ X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1})) # Get one and predict r = random.randint(0, mnist.test.num_examples - 1) prediction = tf.argmax(hypothesis, 1, name='prediction') print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1))) print("Prediction: ", sess.run(prediction, feed_dict={X: mnist.test.images[r:r + 1], keep_prob: 1})) coldGraph(sess, 'lab_10_5_mnist_nn_dropout', "X", "hypothesis, prediction, accuracy", "save/Const:hypothesis, save/Const:prediction, save/Const:accuracy" ) # plt.imshow(mnist.test.images[r:r + 1]. # reshape(28, 28), cmap='Greys', interpolation='nearest') # plt.show() ''' Epoch: 0001 cost = 0.447322626 Epoch: 0002 cost = 0.157285590 Epoch: 0003 cost = 0.121884535 Epoch: 0004 cost = 0.098128681 Epoch: 0005 cost = 0.082901778 Epoch: 0006 cost = 0.075337573 Epoch: 0007 cost = 0.069752543 Epoch: 0008 cost = 0.060884363 Epoch: 0009 cost = 0.055276413
for step in range(10001): cost_val, _ = sess.run([cost, train], feed_dict={X: x_data, Y: y_data}) if step % 200 == 0: print(step, cost_val) # Accuracy report h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict={ X: x_data, Y: y_data }) print("\nHypothesis: ", h, "\nCorrect (Y): ", c, "\nAccuracy: ", a) coldGraph( sess, 'lab_05_1_logistic_regression', "X", "hypothesis, predicted, accuracy", "save/Const:hypothesis, save/Const:predicted, save/Const:accuracy") ''' 0 1.73078 200 0.571512 400 0.507414 600 0.471824 800 0.447585 ... 9200 0.159066 9400 0.15656 9600 0.154132 9800 0.151778 10000 0.149496 Hypothesis: [[ 0.03074029]
print('Learning Finished!') # Test model and check accuracy correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy') print('Accuracy:', sess.run(accuracy, feed_dict={ X: mnist.test.images, Y: mnist.test.labels})) # Get one and predict r = random.randint(0, mnist.test.num_examples - 1) prediction = tf.argmax(hypothesis, 1, name='prediction') print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1))) print("Prediction: ", sess.run(prediction, feed_dict={X: mnist.test.images[r:r + 1]})) coldGraph(sess, 'lab_10_3_mnist_nn_xavier', "X", "hypothesis, prediction, accuracy", "save/Const:hypothesis, save/Const:prediction, save/Const:accuracy" ) # plt.imshow(mnist.test.images[r:r + 1]. # reshape(28, 28), cmap='Greys', interpolation='nearest') # plt.show() ''' Epoch: 0001 cost = 0.301498963 Epoch: 0002 cost = 0.107252513 Epoch: 0003 cost = 0.064888892 Epoch: 0004 cost = 0.044463030 Epoch: 0005 cost = 0.029951642 Epoch: 0006 cost = 0.020663404 Epoch: 0007 cost = 0.015853033 Epoch: 0008 cost = 0.011764387 Epoch: 0009 cost = 0.008598264
sess.run(train, feed_dict={X: x_data, Y: y_data}) if step % 100 == 0: print(step, sess.run(cost, feed_dict={ X: x_data, Y: y_data }), sess.run([W1, W2])) # Accuracy report h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict={ X: x_data, Y: y_data }) print("\nHypothesis: ", h, "\nCorrect: ", c, "\nAccuracy: ", a) coldGraph( sess, 'lab_09_3_xor_nn_wide_deep', "X", "hypothesis, prediction, accuracy", "save/Const:hypothesis, save/Const:prediction, save/Const:accuracy") ''' Hypothesis: [[ 7.80511764e-04] [ 9.99238133e-01] [ 9.98379230e-01] [ 1.55659032e-03]] Correct: [[ 0.] [ 1.] [ 1.] [ 0.]] Accuracy: 1.0 '''
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost)) print('Learning Finished!') # Test model and check accuracy correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy') print('Accuracy:', sess.run(accuracy, feed_dict={X: mnist.test.images, Y: mnist.test.labels})) # Get one and predict r = random.randint(0, mnist.test.num_examples - 1) prediction = tf.argmax(hypothesis, 1, name='prediction') print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1))) print("Prediction: ", sess.run( prediction, feed_dict={X: mnist.test.images[r:r + 1]})) coldGraph(sess, 'lab_10_1_mnist_softmax', "X", "hypothesis, prediction, accuracy", "save/Const:hypothesis, save/Const:prediction, save/Const:accuracy" ) # plt.imshow(mnist.test.images[r:r + 1]. # reshape(28, 28), cmap='Greys', interpolation='nearest') # plt.show() ''' Epoch: 0001 cost = 5.888845987 Epoch: 0002 cost = 1.860620173 Epoch: 0003 cost = 1.159035648 Epoch: 0004 cost = 0.892340870 Epoch: 0005 cost = 0.751155428 Epoch: 0006 cost = 0.662484806 Epoch: 0007 cost = 0.601544010 Epoch: 0008 cost = 0.556526115 Epoch: 0009 cost = 0.521186961
for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) c, _ = sess.run([cost, optimizer], feed_dict={ X: batch_xs, Y: batch_ys }) avg_cost += c / total_batch print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost)) print("Learning finished") coldGraph( sess, 'lab_07_4_mnist_introduction', "X", "hypothesis,prediction,accuracy", "save/Const:hypothesis,save/Const:prediction,save/Const:accuracy") # Test the model using test sets print( "Accuracy: ", accuracy.eval(session=sess, feed_dict={ X: mnist.test.images, Y: mnist.test.labels })) # Get one and predict # r = random.randint(0, mnist.test.num_examples - 1) r = 1 print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
# Minimize: Gradient Descent using derivative: W -= learning_rate * derivative learning_rate = 0.1 gradient = tf.reduce_mean((W * X - Y) * X) descent = W - learning_rate * gradient update = W.assign(descent) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for step in range(21): sess.run(update, feed_dict={X: x_data, Y: y_data}) print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W)) coldGraph(sess, 'lab_03_2_minimizing_cost_gradient_update', "X", "cost,hypothesis", "save/Const:cost, save/Const:hypothesis" ) ''' 0 1.93919 [ 1.64462376] 1 0.551591 [ 1.34379935] 2 0.156897 [ 1.18335962] 3 0.0446285 [ 1.09779179] 4 0.0126943 [ 1.05215561] 5 0.00361082 [ 1.0278163] 6 0.00102708 [ 1.01483536] 7 0.000292144 [ 1.00791216] 8 8.30968e-05 [ 1.00421977] 9 2.36361e-05 [ 1.00225055] 10 6.72385e-06 [ 1.00120032] 11 1.91239e-06 [ 1.00064015]
})) # Get one and predict r = random.randint(0, mnist.test.num_examples - 1) prediction = tf.argmax(logits, 1, name='prediction') print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1))) print( "Prediction: ", sess.run(prediction, feed_dict={ X: mnist.test.images[r:r + 1], keep_prob: 1 })) coldGraph(sess, 'lab_11_2_mnist_deep_cnn', "X", "hypothesis, prediction, accuracy", "save/Const:hypothesis, save/Const:prediction, save/Const:accuracy") # plt.imshow(mnist.test.images[r:r + 1]. # reshape(28, 28), cmap='Greys', interpolation='nearest') # plt.show() ''' Learning stared. It takes sometime. Epoch: 0001 cost = 0.385748474 Epoch: 0002 cost = 0.092017397 Epoch: 0003 cost = 0.065854684 Epoch: 0004 cost = 0.055604566 Epoch: 0005 cost = 0.045996377 Epoch: 0006 cost = 0.040913645 Epoch: 0007 cost = 0.036924479 Epoch: 0008 cost = 0.032808939
for step in range(201): cost_val, W_val, _ = sess.run([cost, W, optimizer], feed_dict={ X: x_data, Y: y_data }) print(step, cost_val, W_val) # predict print("Prediction:", sess.run(prediction, feed_dict={X: x_test})) # Calculate the accuracy print("Accuracy: ", sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) coldGraph( sess, 'lab_07_1_learning_rate_and_evaluation', "X", "hypothesis, prediction, accuracy", "save/Const:hypothesis, save/Const:prediction, save/Const:accuracy") ''' when lr = 1.5 0 5.73203 [[-0.30548954 1.22985029 -0.66033536] [-4.39069986 2.29670858 2.99386835] [-3.34510708 2.09743214 -0.80419564]] 1 23.1494 [[ 0.06951046 0.29449689 -0.0999819 ] [-1.95319986 -1.63627958 4.48935604] [-0.90760708 -1.65020132 0.50593793]] 2 27.2798 [[ 0.44451016 0.85699677 -1.03748143] [ 0.48429942 0.98872018 -0.57314301] [ 1.52989244 1.16229868 -4.74406147]] 3 8.668 [[ 0.12396193 0.61504567 -0.47498202] [ 0.22003263 -0.2470119 0.9268558 ] [ 0.96035379 0.41933775 -3.43156195]]
loss = tf.reduce_sum(tf.square(hypothesis - y), name='loss') # sum of the squares # optimizer optimizer = tf.train.GradientDescentOptimizer(0.01) train = optimizer.minimize(loss, name='train') # training data x_train = [1, 2, 3, 4] y_train = [0, -1, -2, -3] # training loop init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) # reset values to wrong for i in range(1000): # sess.run(train, {x: x_train, y: y_train}) feed_dict = {x: x_train, y: y_train} sess.run([train], feed_dict=feed_dict) # evaluate training accuracy curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train}) print("W: %s b: %s loss: %s" % (curr_W, curr_b, curr_loss)) curr_loss, curr_hypo = sess.run([loss, hypothesis], {x: 3, y: 3}) print("loss: %s, hypothesis: %s " % (curr_loss, curr_hypo)) coldGraph(sess, 'lab_02_3_linear_regression', "x", "hypothesis", "save/Const:hypothesis")
print('--------------') b = sess.run(hypothesis, feed_dict={X: [[1, 3, 4, 3]]}) print(b, sess.run(tf.argmax(b, 1))) print('--------------') c = sess.run(hypothesis, feed_dict={X: [[1, 1, 0, 1]]}) print(c, sess.run(tf.argmax(c, 1))) print('--------------') all = sess.run(hypothesis, feed_dict={X: [[1, 11, 7, 9], [1, 3, 4, 3], [1, 1, 0, 1]]}) print(all, sess.run(tf.argmax(all, 1))) coldGraph(sess, 'lab_06_1_softmax_classifier', "X", "hypothesis, ud_argmax", "save/Const:hypothesis, save/Const:ud_argmax") ''' -------------- [[ 1.38904958e-03 9.98601854e-01 9.06129117e-06]] [1] -------------- [[ 0.93119204 0.06290206 0.0059059 ]] [0] -------------- [[ 1.27327668e-08 3.34112905e-04 9.99665856e-01]] [2] -------------- [[ 1.38904958e-03 9.98601854e-01 9.06129117e-06] [ 9.31192040e-01 6.29020557e-02 5.90589503e-03] [ 1.27327668e-08 3.34112905e-04 9.99665856e-01]] [1 0 2] '''