Exemplo n.º 1
0
def test(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, fwp.INPUT_NODE])
        y_ = tf.placeholder(tf.float32, [None, fwp.OUTPUT_NODE])
        y = fwp.fw_propagation(x, None)

        ema = tf.train.ExponentialMovingAverage(bkp.EMA_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)

        correct_predicition = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_predicition, tf.float32))

        while True:
            with tf.Session() as testSess:
                ckpt = tf.train.get_checkpoint_state(bkp.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(testSess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]
                    accuracy_score = testSess.run(accuracy,
                                                  feed_dict={
                                                      x: mnist.test.images,
                                                      y_: mnist.test.labels
                                                  })
                    print "After %s steps, test accuracy is %g." % (
                        global_step, accuracy_score)
                else:
                    print "No checkpoint data found."
                    return

            time.sleep(TEST_INTVAL_IN_SECS)
Exemplo n.º 2
0
def test(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, fwp.INPUT_NODE])
        y_ = tf.placeholder(tf.float32, [None, fwp.OUTPUT_NODE])
        y = fwp.fw_propagation(x, None)

        ema = tf.train.ExponentialMovingAverage(bkp.EMA_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)

        correct_predicition = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_predicition, tf.float32))

        img_batch, label_batch = gends.get_tfRecords(TOTAL_TEST_EXAMPLES,
                                                     isTrain=False)

        while True:
            with tf.Session() as testSess:
                ckpt = tf.train.get_checkpoint_state(bkp.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(testSess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]

                    #multi-threads start
                    coord = tf.train.Coordinator()
                    threads = tf.train.start_queue_runners(sess=testSess,
                                                           coord=coord)

                    xs, y_s = testSess.run([img_batch, label_batch])

                    #accuracy_score = testSess.run(accuracy, feed_dict = {x: mnist.test.images, y_: mnist.test.labels})
                    accuracy_score = testSess.run(accuracy,
                                                  feed_dict={
                                                      x: xs,
                                                      y_: y_s
                                                  })
                    print "After %s steps, test accuracy is %g." % (
                        global_step, accuracy_score)

                    #multi-threads end
                    coord.request_stop()
                    coord.join(threads)
                else:
                    print "No checkpoint data found."
                    return

            time.sleep(TEST_INTVAL_IN_SECS)
Exemplo n.º 3
0
def test(mnist):
    with tf.Graph().as_default() as g:
        #x  = tf.placeholder(tf.float32, [None, fwp.INPUT_NODE])
        #同样,调整x的维度
        print mnist.test.num_examples
        x = tf.placeholder(tf.float32, [
            MNIST_MODF_TEST_CNT, fwp.IMAGE_XY_RES, fwp.IMAGE_XY_RES,
            fwp.IMAGE_CHANS
        ])
        y_ = tf.placeholder(tf.float32, [None, fwp.OUTPUT_NODE])
        y = fwp.fw_propagation(x, False, None)

        ema = tf.train.ExponentialMovingAverage(bkp.EMA_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)

        correct_predicition = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_predicition, tf.float32))

        while True:
            with tf.Session() as testSess:
                ckpt = tf.train.get_checkpoint_state(bkp.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(testSess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]
                    test_imgs = mnist.test.next_batch(MNIST_MODF_TEST_CNT)
                    mnist_test_images_reshaped = np.reshape(
                        test_imgs[0], [
                            MNIST_MODF_TEST_CNT, fwp.IMAGE_XY_RES,
                            fwp.IMAGE_XY_RES, fwp.IMAGE_CHANS
                        ])
                    accuracy_score = testSess.run(
                        accuracy,
                        feed_dict={
                            x: mnist_test_images_reshaped,
                            y_: test_imgs[1]
                        })
                    print "After %s steps, test accuracy is %g." % (
                        global_step, accuracy_score)
                else:
                    print "No checkpoint data found."
                    return

            time.sleep(TEST_INTVAL_IN_SECS)
Exemplo n.º 4
0
def FCN_restore_model(imgarr_pped):
	with tf.Graph().as_default() as tg:
		x = tf.placeholder(tf.float32, [1, fwp.IMAGE_XY_RES, fwp.IMAGE_XY_RES, fwp.IMAGE_CHANS])
		y = fwp.fw_propagation(x, False, None)
		resultV = tf.argmax(y, 1)

		var_avg = tf.train.ExponentialMovingAverage(bkp.EMA_DECAY)
		var_to_restore = var_avg.variables_to_restore()
		saver = tf.train.Saver(var_to_restore)

		with tf.Session() as fcnSess:
			ckpt = tf.train.get_checkpoint_state(bkp.MODEL_SAVE_PATH)
			if ckpt and ckpt.model_checkpoint_path:
				saver.restore(fcnSess, ckpt.model_checkpoint_path)
				resultV = fcnSess.run(resultV, feed_dict = {x:imgarr_pped})
				return resultV
			else:
				print "No checkpoint data found."
				return -1
Exemplo n.º 5
0
def bk_propagation(mnist):
	#x = tf.placeholder(tf.float32, shape = (None, fwp.INPUT_NODE))
	#在lenet5卷积网络中,输入x是一个四阶张量:[图片个数,图片X分辨率,图片Y分辨率,通道数]
	x  = tf.placeholder(tf.float32, [BATCH_SIZE, fwp.IMAGE_XY_RES, fwp.IMAGE_XY_RES, fwp.IMAGE_CHANS])
	y_ = tf.placeholder(tf.float32, shape = (None, fwp.OUTPUT_NODE))

	#X, Y_, Y_c = gends.generateds()

	y = fwp.fw_propagation(x, True, REGULARIZER)

	global_step = tf.Variable(0, trainable = False)

	#loss_mse = tf.reduce_mean(tf.square(y - y_))
	#loss = loss_mse

	ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = y, labels = tf.argmax(y_, 1))
	loss_cem = tf.reduce_mean(ce)
	loss = loss_cem

	loss_total = loss + tf.add_n(tf.get_collection("losses"))

	learning_rate = tf.train.exponential_decay(
		LR_BASE,
		global_step,
		mnist.train.num_examples / BATCH_SIZE,
		LR_DECAY,
		staircase = True)


	#train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss_total)
	train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_total, global_step = global_step)

	ema = tf.train.ExponentialMovingAverage(EMA_DECAY, global_step)
	ema_op = ema.apply(tf.trainable_variables())

	with tf.control_dependencies([train_step, ema_op]):
		train_op = tf.no_op(name = "train")

	saver = tf.train.Saver()

	with tf.Session() as fcSess:
		init_op = tf.global_variables_initializer()
		fcSess.run(init_op)

		ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
		if ckpt and ckpt.model_checkpoint_path:
			saver.restore(fcSess, ckpt.model_checkpoint_path)
			xs, ys = mnist.train.next_batch(BATCH_SIZE)
			xs = np.reshape(xs, (BATCH_SIZE, fwp.IMAGE_XY_RES, fwp.IMAGE_XY_RES, fwp.IMAGE_CHANS))
			_, loss_v, step = fcSess.run([train_op, loss, global_step], feed_dict = {x: xs, y_: ys})
			print "Restored session at %d steps, loss is %g." %(step, loss_v)
		else:
			step = 0
			loss_v = float("inf")

		for i in range(step, TOTAL_STEPS):
			if i % 100 == 0:
				print "After %d steps, loss is %g." %(step, loss_v)
				saver.save(fcSess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step = global_step)

			xs, ys = mnist.train.next_batch(BATCH_SIZE)
			xs = np.reshape(xs, (BATCH_SIZE, fwp.IMAGE_XY_RES, fwp.IMAGE_XY_RES, fwp.IMAGE_CHANS))
			_, loss_v, step = fcSess.run([train_op, loss, global_step], feed_dict = {x: xs, y_: ys})
		print "After %d steps, loss is %g." %(step, loss_v)
		saver.save(fcSess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step = global_step)
Exemplo n.º 6
0
def bk_propagation(mnist):
    x = tf.placeholder(tf.float32, shape=(None, fwp.INPUT_NODE))
    y_ = tf.placeholder(tf.float32, shape=(None, fwp.OUTPUT_NODE))

    #X, Y_, Y_c = gends.generateds()

    y = fwp.fw_propagation(x, REGULARIZER)

    global_step = tf.Variable(0, trainable=False)

    #loss_mse = tf.reduce_mean(tf.square(y - y_))
    #loss = loss_mse

    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,
                                                        labels=tf.argmax(
                                                            y_, 1))
    loss_cem = tf.reduce_mean(ce)
    loss = loss_cem

    loss_total = loss + tf.add_n(tf.get_collection("losses"))

    learning_rate = tf.train.exponential_decay(LR_BASE,
                                               global_step,
                                               TOTAL_NUM_EXAMPLES / BATCH_SIZE,
                                               LR_DECAY,
                                               staircase=True)

    #train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss_total)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss_total, global_step=global_step)

    ema = tf.train.ExponentialMovingAverage(EMA_DECAY, global_step)
    ema_op = ema.apply(tf.trainable_variables())

    with tf.control_dependencies([train_step, ema_op]):
        train_op = tf.no_op(name="train")

    saver = tf.train.Saver()
    img_batch, label_batch = gends.get_tfRecords(BATCH_SIZE)

    with tf.Session() as fcSess:
        init_op = tf.global_variables_initializer()
        fcSess.run(init_op)

        #multi-threads start
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=fcSess, coord=coord)

        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(fcSess, ckpt.model_checkpoint_path)
            #xs, ys = mnist.train.next_batch(BATCH_SIZE)
            xs, ys = fcSess.run([img_batch, label_batch])
            _, loss_v, step = fcSess.run([train_op, loss, global_step],
                                         feed_dict={
                                             x: xs,
                                             y_: ys
                                         })
            print "Restored session at %d steps, loss is %g." % (step, loss_v)
        else:
            step = 0
            loss_v = float("inf")

        for i in range(step, TOTAL_STEPS):
            if i % 1000 == 0:
                print "After %d steps, loss is %g." % (step, loss_v)
                saver.save(fcSess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)

            #xs, ys = mnist.train.next_batch(BATCH_SIZE)
            xs, ys = fcSess.run([img_batch, label_batch])
            _, loss_v, step = fcSess.run([train_op, loss, global_step],
                                         feed_dict={
                                             x: xs,
                                             y_: ys
                                         })
        print "After %d steps, loss is %g." % (step, loss_v)
        saver.save(fcSess,
                   os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                   global_step=global_step)

        #multi-threads end
        coord.request_stop()
        coord.join(threads)