Beispiel #1
0
def train(X_test,y_test_lable):
	x_ = tf.placeholder(tf.float32, [None, INPUT_NODE],name='x-input')	
	x = tf.reshape(x_, shape=[-1, 28, 28, 1])
	
	y_ = tf.placeholder(tf.float32, [None,OUTPUT_NODE], name='y-input')
	regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
	cosine,loss = lenet5_infernece.inference(x,False,regularizer,tf.argmax(y_,1))

	# Evaluate model
	pred_max=tf.argmax(cosine,1)
	y_max=tf.argmax(y_,1)
	correct_pred = tf.equal(pred_max,y_max)
	accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
	
	# 初始化TensorFlow持久化類。
	saver = tf.train.Saver()
	with tf.Session() as sess:
	        saver.restore(sess,"lenet5/lenet5_model") 
		X_test = np.reshape(X_test, (
                                        X_test.shape[0],
                                        IMAGE_SIZE,
                                        IMAGE_SIZE,
                                        NUM_CHANNELS))

		acc = sess.run(accuracy, feed_dict={x: X_test, y_:y_test_lable})
                print('Test accuracy: %.2f%%' % (acc * 100))
Beispiel #2
0
def initVarible():
    shuffle = True
    batch_idx = 0
    train_acc = []
    # 定義輸出為4維矩陣的placeholder
    # x_ = tf.placeholder(tf.float32, [None, INPUT_NODE],name='x-input')
    # x = tf.reshape(x_, shape=[-1, 28, 28, 1])
    x = tf.placeholder(tf.float32, [None, 28, 28, 1], name='x-input')

    y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = lenet5_infernece.inference(x, True, regularizer,
                                   tf.AUTO_REUSE)  #tf.AUTO_REUSE
    global_step = tf.Variable(0, trainable=False)

    # Evaluate model
    pred_max = tf.argmax(y, 1)
    y_max = tf.argmax(y_, 1)
    correct_pred = tf.equal(pred_max, y_max)
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # 定義損失函數、學習率、及訓練過程。

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))

    return global_step, shuffle, batch_idx, loss, x, y_, accuracy, train_acc
def train(X_train,y_train_lable,X_test,y_test_lable):
	shuffle=True
	batch_idx=0
	
	batch_len =int( X_train.shape[0]/BATCH_SIZE)
	test_batch_len =int( X_test.shape[0]/BATCH_SIZE)
	test_acc=[]
	train_acc=[]
	train_idx=np.random.permutation(batch_len)#打散btach_len=600 group

	x_ = tf.placeholder(tf.float32, [None, INPUT_NODE],name='x-input')	
	x = tf.reshape(x_, shape=[-1, 28, 28, 1])
	y_ = tf.placeholder(tf.float32, [None,NUM_LABELS], name='y-input')

	regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
	cosine,loss= lenet5_infernece.inference(x,True,regularizer,tf.argmax(y_,1))
	global_step = tf.Variable(0, trainable=False)

        pred_max=tf.argmax(cosine,1)
        y_max=tf.argmax(y_,1)
        correct_pred = tf.equal(pred_max,y_max)
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

	train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
	saver = tf.train.Saver()
	with tf.Session() as sess:
		tf.global_variables_initializer().run()
		
		step = 1
		print ("Start  training!")
		while step	< TRAINING_STEPS:
			batch_shuffle_idx=train_idx[batch_idx]
			batch_xs=X_train[batch_shuffle_idx*BATCH_SIZE:batch_shuffle_idx*BATCH_SIZE+BATCH_SIZE]
			batch_ys=y_train_lable[batch_shuffle_idx*BATCH_SIZE:batch_shuffle_idx*BATCH_SIZE+BATCH_SIZE]	
			if batch_idx<batch_len:
				batch_idx+=1
				if batch_idx==batch_len:
					batch_idx=0
			else:
				batch_idx=0
		
			reshaped_xs = np.reshape(batch_xs, (
					BATCH_SIZE,
					IMAGE_SIZE,
					IMAGE_SIZE,
					NUM_CHANNELS))
			_, loss_value, step = sess.run([train_step, loss, global_step], feed_dict={x: reshaped_xs, y_: batch_ys})
			#print loss_value,step
			if step % display_step == 0:
				X_test=np.reshape(X_test[:3000],(3000,28,28,1))
				loss_value,acc=sess.run([loss,accuracy] , feed_dict={x:X_test,y_:y_test_lable[:3000]})
				print("After %d training step(s), loss on test data is %g,acc is %g" % (step, loss_value,acc))
				saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME),global_step=global_step)
			step += 1
		print ("Optimization Finished!")
		saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME))
def evaluate(X_test):
    with tf.Graph().as_default() as g:

        # 定義輸出為4維矩陣的placeholder
        x_ = tf.placeholder(tf.float32, [None, lenet5_train.INPUT_NODE],
                            name='x-input')
        x = tf.reshape(x_, shape=[-1, 28, 28, 1])

        y_ = tf.placeholder(tf.float32, [None, lenet5_train.OUTPUT_NODE],
                            name='y-input')

        regularizer = tf.contrib.layers.l2_regularizer(
            lenet5_train.REGULARIZATION_RATE)
        y = lenet5_infernece.inference(x, False, regularizer)
        global_step = tf.Variable(0, trainable=False)

        # Evaluate model
        pred_max = tf.argmax(y, 1)
        y_max = tf.argmax(y_, 1)
        correct_pred = tf.equal(pred_max, y_max)
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

        test_batch_len = int(X_test.shape[0] / lenet5_train.BATCH_SIZE)
        test_acc = []

        kaggle_pred = np.array([])

        test_xs = np.reshape(
            X_test, (X_test.shape[0], lenet5_train.IMAGE_SIZE,
                     lenet5_train.IMAGE_SIZE, lenet5_train.NUM_CHANNELS))

        batchsize = lenet5_train.BATCH_SIZE

        # 'Saver' op to save and restore all the variables
        saver = tf.train.Saver()

        with tf.Session() as sess:
            saver.restore(sess, "./lenet5/lenet5_model")

            for i in range(test_batch_len):
                pred_result = sess.run(pred_max,
                                       feed_dict={
                                           x:
                                           test_xs[batchsize *
                                                   i:batchsize * i + batchsize]
                                       })
                kaggle_pred = np.append(kaggle_pred, pred_result)
                kaggle_pred = kaggle_pred.astype(int)
                kaggle_pred = kaggle_pred.tolist()

            print("pred_result.length:", len(kaggle_pred))
            #print("pred_result=",kaggle_pred)
            print("Save prediction result...")
            saveResult(kaggle_pred)
            return
Beispiel #5
0
def evaluate(X_test, y_test_lable, My_Yd):
    with tf.Graph().as_default() as g:
        # 定義輸出為4維矩陣的placeholder
        x_ = tf.placeholder(tf.float32, [None, lenet5_train.INPUT_NODE],
                            name='x-input')
        x = tf.reshape(x_, shape=[-1, 28, 28, 1])

        y_ = tf.placeholder(tf.float32, [None, lenet5_train.OUTPUT_NODE],
                            name='y-input')

        regularizer = tf.contrib.layers.l2_regularizer(
            lenet5_train.REGULARIZATION_RATE)
        y = lenet5_infernece.inference(x, False, regularizer)
        global_step = tf.Variable(0, trainable=False)

        # Evaluate model
        pred_max = tf.argmax(y, 1)
        y_max = tf.argmax(y_, 1)
        correct_pred = tf.equal(pred_max, y_max)
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
        batchsize = 20
        test_batch_len = int(X_test.shape[0] / batchsize)
        test_acc = []

        test_xs = np.reshape(
            X_test, (X_test.shape[0], lenet5_train.IMAGE_SIZE,
                     lenet5_train.IMAGE_SIZE, lenet5_train.NUM_CHANNELS))

        # 'Saver' op to save and restore all the variables
        saver = tf.train.Saver()
        #saver = tf.train.import_meta_graph("./mnist/mnist_model.meta")
        with tf.Session() as sess:

            saver.restore(sess, "./lenet5/lenet5_model")

            My_test_pred = sess.run(pred_max, feed_dict={x: test_xs[:20]})
            print("期望值:", My_Yd)
            print("預測值:", My_test_pred)
            My_acc = sess.run(accuracy,
                              feed_dict={
                                  x: test_xs,
                                  y_: y_test_lable
                              })
            print('Test accuracy: %.2f%%' % (My_acc * 100))
            display_result(My_test_pred)
            return
def evaluate(X_test,y_test_lable):
	with tf.Graph().as_default() as g:
	
		# 定義輸出為4維矩陣的placeholder
		x_ = tf.placeholder(tf.float32, [None, lenet5_train.INPUT_NODE],name='x-input')	
		x = tf.reshape(x_, shape=[-1, 28, 28, 1])
	
		y_ = tf.placeholder(tf.float32, [None, lenet5_train.OUTPUT_NODE], name='y-input')
	
		regularizer = tf.contrib.layers.l2_regularizer(lenet5_train.REGULARIZATION_RATE)
		y = lenet5_infernece.inference(x,False,regularizer)
		global_step = tf.Variable(0, trainable=False)

		# Evaluate model
		pred_max=tf.argmax(y,1)
		y_max=tf.argmax(y_,1)
		correct_pred = tf.equal(pred_max,y_max)
		accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
	
		test_batch_len =int( X_test.shape[0]/lenet5_train.BATCH_SIZE)
		test_acc=[]
		

		test_xs = np.reshape(X_test, (
					X_test.shape[0],
					lenet5_train.IMAGE_SIZE,
					lenet5_train.IMAGE_SIZE,
					lenet5_train.NUM_CHANNELS))
		
		batchsize=lenet5_train.BATCH_SIZE
	
		# 'Saver' op to save and restore all the variables
		saver = tf.train.Saver()
		with tf.Session() as sess:
			
			saver.restore(sess,"./lenet5/lenet5_model")

			for i in range(test_batch_len):
				temp_acc= sess.run(accuracy, feed_dict={x: test_xs[batchsize*i:batchsize*i+batchsize], y_: y_test_lable[batchsize*i:batchsize*i+batchsize]})
				test_acc.append(temp_acc)
				print ("Test  batch ",i,":Testing Accuracy:",temp_acc)	

			t_acc=tf.reduce_mean(tf.cast(test_acc, tf.float32))	
			print("Average Testing Accuracy=",sess.run(t_acc))
			return
Beispiel #7
0
def evaluate(X_test, y_test_lable, My_Yd):
    with tf.Graph().as_default() as g:
        # 定義輸出為4維矩陣的placeholder
        x_ = tf.placeholder(tf.float32, [None, lenet5_train.INPUT_NODE],
                            name='x-input')
        x = tf.reshape(x_, shape=[-1, 28, 28, 1])

        y_ = tf.placeholder(tf.float32, [None, lenet5_train.OUTPUT_NODE],
                            name='y-input')

        regularizer = tf.contrib.layers.l2_regularizer(
            lenet5_train.REGULARIZATION_RATE)
        y = lenet5_infernece.inference(x, False, regularizer,
                                       False)  #tf.AUTO_REUSE
        global_step = tf.Variable(0, trainable=False)

        # Evaluate model
        pred_max = tf.argmax(y, 1)
        y_max = tf.argmax(y_, 1)
        correct_pred = tf.equal(pred_max, y_max)
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
        batchsize = 20
        test_batch_len = int(X_test.shape[0] / batchsize)
        test_acc = []

        test_xs = np.reshape(
            X_test, (X_test.shape[0], lenet5_train.IMAGE_SIZE,
                     lenet5_train.IMAGE_SIZE, lenet5_train.NUM_CHANNELS))

        # 'Saver' op to save and restore all the variables
        saver = tf.train.Saver()
        # saver = tf.train.import_meta_graph("./mnist/mnist_model.meta")
        with tf.Session() as sess:
            if MODEL_SELECT == 0:
                MODEL_DIR = MODEL_DIR_NUMBER
            else:
                MODEL_DIR = MODEL_DIR_LETTER

            saver.restore(sess, MODEL_DIR)

            My_test_pred = sess.run(pred_max, feed_dict={x: test_xs[:20]})
            print("期望值:", My_Yd)
            print("before cnvert My_test_pred =")
            print(My_test_pred)
            My_test_pred1 = []
            for i in range(len(My_test_pred)):
                print("My_test_pred[i] =")
                print(My_test_pred[i])

                if MODEL_SELECT == 0:
                    if My_test_pred[i] == 0:
                        My_test_pred1.append(0)
                    elif My_test_pred[i] == 1:
                        My_test_pred1.append(1)
                        print("A=")
                    elif My_test_pred[i] == 2:
                        My_test_pred1.append(2)
                    elif My_test_pred[i] == 3:
                        My_test_pred1.append(3)
                    elif My_test_pred[i] == 4:
                        My_test_pred1.append(4)
                    elif My_test_pred[i] == 5:
                        My_test_pred1.append(5)
                    elif My_test_pred[i] == 6:
                        My_test_pred1.append(6)
                    elif My_test_pred[i] == 7:
                        My_test_pred1.append(7)
                    elif My_test_pred[i] == 8:
                        My_test_pred1.append(8)
                    elif My_test_pred[i] == 9:
                        My_test_pred1.append(9)
                    elif My_test_pred[i] == 10:
                        My_test_pred1.append('A')
                    elif My_test_pred[i] == 11:
                        My_test_pred1.append('B')
                    elif My_test_pred[i] == 12:
                        My_test_pred1.append('C')
                    elif My_test_pred[i] == 13:
                        My_test_pred1.append('D')
                    elif My_test_pred[i] == 14:
                        My_test_pred1.append('E')
                    elif My_test_pred[i] == 15:
                        My_test_pred1.append('F')
                    elif My_test_pred[i] == 16:
                        My_test_pred1.append('G')
                    elif My_test_pred[i] == 17:
                        My_test_pred1.append('H')
                    elif My_test_pred[i] == 18:
                        My_test_pred1.append('I')
                    elif My_test_pred[i] == 19:
                        My_test_pred1.append('J')
                    else:
                        My_test_pred1.append('--')
                else:
                    if My_test_pred[i] == 0:
                        My_test_pred1.append('A')
                    elif My_test_pred[i] == 1:
                        My_test_pred1.append('B')
                        print("A=")
                    elif My_test_pred[i] == 2:
                        My_test_pred1.append('C')
                    elif My_test_pred[i] == 3:
                        My_test_pred1.append('D')
                    elif My_test_pred[i] == 4:
                        My_test_pred1.append('E')
                    elif My_test_pred[i] == 5:
                        My_test_pred1.append('F')
                    elif My_test_pred[i] == 6:
                        My_test_pred1.append('G')
                    elif My_test_pred[i] == 7:
                        My_test_pred1.append('H')
                    elif My_test_pred[i] == 8:
                        My_test_pred1.append('I')
                    elif My_test_pred[i] == 9:
                        My_test_pred1.append('J')
                    else:
                        My_test_pred1.append('--')

            print("預測值:", My_test_pred1)
            My_acc = sess.run(accuracy,
                              feed_dict={
                                  x: test_xs,
                                  y_: y_test_lable
                              })
            print('Test accuracy: %.2f%%' % (My_acc * 100))
            display_result(My_test_pred, My_test_pred1)
            return
def train(X_train, y_train_lable, X_test, y_test_lable):
    shuffle = True
    batch_idx = 0

    batch_len = int(X_train.shape[0] / BATCH_SIZE)
    test_batch_len = int(X_test.shape[0] / BATCH_SIZE)
    test_acc = []
    train_acc = []
    train_idx = np.random.permutation(batch_len)  #打散btach_len=600 group
    # 定義輸出為4維矩陣的placeholder
    x_ = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
    x = tf.reshape(x_, shape=[-1, 28, 28, 1])

    y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = lenet5_infernece.inference(x, True, regularizer)
    global_step = tf.Variable(0, trainable=False)

    # Evaluate model
    pred_max = tf.argmax(y, 1)
    y_max = tf.argmax(y_, 1)
    correct_pred = tf.equal(pred_max, y_max)
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # 定義損失函數、學習率、及訓練過程。

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    if learning_rate_flag == True:
        learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                                   global_step,
                                                   X_train.shape[0] /
                                                   BATCH_SIZE,
                                                   LEARNING_RATE_DECAY,
                                                   staircase=True)
    else:
        learning_rate = 0.001  #Ashing test
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)

    # 初始化TensorFlow持久化類。
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        step = 1
        print("Start  training!")
        while step < TRAINING_STEPS:
            #batch_xs, batch_ys = mnist.train.next_batch(BATCH_SIZE)
            if shuffle == True:
                batch_shuffle_idx = train_idx[batch_idx]
                batch_xs = X_train[batch_shuffle_idx *
                                   BATCH_SIZE:batch_shuffle_idx * BATCH_SIZE +
                                   BATCH_SIZE]
                batch_ys = y_train_lable[batch_shuffle_idx *
                                         BATCH_SIZE:batch_shuffle_idx *
                                         BATCH_SIZE + BATCH_SIZE]
            else:
                batch_xs = X_train[batch_idx *
                                   BATCH_SIZE:batch_idx * BATCH_SIZE +
                                   BATCH_SIZE]
                batch_ys = y_train_lable[batch_idx *
                                         BATCH_SIZE:batch_idx * BATCH_SIZE +
                                         BATCH_SIZE]

            if batch_idx < batch_len:
                batch_idx += 1
                if batch_idx == batch_len:
                    batch_idx = 0
            else:
                batch_idx = 0

            reshaped_xs = np.reshape(
                batch_xs, (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))

            # Fit training using batch data
            _, loss_value, step = sess.run([train_step, loss, global_step],
                                           feed_dict={
                                               x: reshaped_xs,
                                               y_: batch_ys
                                           })
            acc = sess.run(accuracy, feed_dict={x: reshaped_xs, y_: batch_ys})
            train_acc.append(acc)
            if step % display_step == 0:
                print(
                    "After %d training step(s), loss on training batch is %g,Training Accuracy=%g"
                    % (step, loss_value, acc))
            step += 1
        print("Optimization Finished!")
        train_acc_avg = tf.reduce_mean(tf.cast(train_acc, tf.float32))
        print("Average Training Accuracy=", sess.run(train_acc_avg))
        print("Save model...")
        #saver.save(sess, "./lenet5/lenet5_model")
        saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME))