Esempio n. 1
0
def test():
    images_dir='./image/image/'
    log_dir='./log/'

    images_cat=open("imagelist.txt")
    #保存所有图像经过模型计算之后的数组
    images_tested=[]

    with tf.Graph().as_default():
        #重载模型
        x=tf.placeholder(tf.float32,shape=[1,300,400,3])
        p=model.inference(x,1,10)
        logits=tf.nn.softmax(p)

        sess=tf.Session()
        tf.get_variable_scope().reuse_variables()
        ckpt=tf.train.get_checkpoint_state(log_dir)
        saver=tf.train.Saver()
        if ckpt and ckpt.model_checkpoint_path:
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('Loading success!')
        else:
            print('Loading failed!')
        for line in images_cat.readlines():
            image_name=line.strip('\n')
            image_array=get_one_image(images_dir+image_name)
            image_array=np.reshape(image_array,[1,300,400,3])                 
  
            prediction=sess.run(logits,feed_dict={x:image_array})
            prediction=np.array(prediction,dtype='float32')
            images_tested.append([image_name,prediction])

            print(image_name)
            print(prediction)
        
        #测试单张图片
        while (True):
            test_file=input('输入测试图片:')
            if(test_file=='z'):
                break

            image_name=test_file
            image_array=get_one_image(images_dir+image_name)
            image_array=np.reshape(image_array,[1,300,400,3])
            prediction=sess.run(logits,feed_dict={x:image_array})
            prediction=np.array(prediction,dtype='float32')
            test_result=[]
            for sample in images_tested:
                distance=np.sqrt(np.sum(np.square(sample[1]-prediction)));
                distance.astype('float32')
                test_result.append([sample[0],distance])
                                
            #将结果排序
            test_result=np.array(test_result)
            test_result=test_result[np.lexsort(test_result.T)]
            for i in range(10):
                print(test_result[i][0])

            images_show(test_result)
def run_training():
    #目录
    train_dir="./image/"
    logs_train_dir ="./log"

    train,train_label=image_P.get_files(train_dir)
    train_batch,train_label_batch=image_P.get_batch(train,
                                            train_label,
                                            IMG_W,
                                            IMG_H,
                                            BATCH_SIZE,
                                            CAPACITY)
    train_logits=model.inference(train_batch,BATCH_SIZE,N_CLASSES)
    train_loss=model.losses(train_logits,train_label_batch)
    train_op=model.trainning(train_loss,learning_rate)
    train_acc=model.evaluation(train_logits,train_label_batch)

    summary_op=tf.summary.merge_all()

    sess=tf.Session()
    train_writer=tf.summary.FileWriter(logs_train_dir,sess.graph)
    #保存模型
    saver=tf.train.Saver()

    #初始化
    sess.run(tf.global_variables_initializer())

    #使用多线程
    coord=tf.train.Coordinator()
    threads=tf.train.start_queue_runners(sess=sess,coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break#线程停止
            _,temp_loss,temp_acc=sess.run([train_op,train_loss,train_acc])
            
            #每迭代50次打印一次结果
            if step%50 == 0:
                print('Step %d,train loss = %.2f,train accuracy = %.2f'%(step,temp_loss,temp_acc))
                summary_str=sess.run(summary_op)
                train_writer.add_summary(summary_str,step)
            
            #每迭代200次或到达最后一次保存一次模型
            if step%200 == 0 or (step+1) == MAX_STEP:
                checkpoint_path=os.path.join(logs_train_dir,'model.ckpt')
                saver.save(sess,checkpoint_path,global_step=step)
    except tf.errors.OutOfRangeError:
        print('Failed!')
    finally:
        coord.request_stop()

    #结束线程
    coord.join(threads)

    sess.close()
Esempio n. 3
0
def train():
	with (tf.Graph().as_default()):
		global_step = tf.contrib.framework.get_or_create_global_step()

		images, display_images, labels = CNN_input.construct_inputs(False, True)

		logits = CNN_model.inference(True, images)

		loss = CNN_model.loss(logits, labels)

		train_op = CNN_model.train(loss, global_step)

		# Logs loss and runtime
		class _LoggerHook(tf.train.SessionRunHook):

			def begin(self):
				self._step = -1
				self._estimated_time_array = []
				for i in range(EST_TIME_SMOOTHING):
					self._estimated_time_array.append(0)

			def before_run(self, run_context):
				self._step += 1
				self._start_time = time.time()
				return tf.train.SessionRunArgs(loss)

			def get_estimated_completion(self):
				total = 0
				for i in range(EST_TIME_SMOOTHING):
					total += self._estimated_time_array[i]
				return (total / EST_TIME_SMOOTHING)

			def after_run(self, run_context, run_values):
				duration = time.time() - self._start_time
				loss_value = run_values.results

				self._estimated_time_array[self._step % EST_TIME_SMOOTHING] = float(duration * (FLAGS.max_steps - self._step)) / (60.0 * 60.0)

				if (self._step % LOGGING_STEP_INTERVAL == 0):
					log_value = '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch; %.2f est. hours to completion)' \
								% (datetime.now(), self._step, loss_value, FLAGS.batch_size / duration, float(duration), self.get_estimated_completion())
					print(log_value)
					return (log_value)

		with tf.train.MonitoredTrainingSession(
			checkpoint_dir=FLAGS.train_dir,
			hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
				   tf.train.NanTensorHook(loss),
				   _LoggerHook()],
			config=tf.ConfigProto(
				log_device_placement=FLAGS.log_device_placement)) as mon_sess:
			while (not mon_sess.should_stop()):
				mon_sess.run(train_op)
Esempio n. 4
0
def run_training():
    # log dir
    logs_train_dir = logs_dir

    # read data
    test, train, validate, test_label, train_label, validate_label = rd.read_file(
    )

    # get batch
    train_batch, train_label_batch = rd.get_batch(train, train_label, IMG_W,
                                                  IMG_H, BATCH_SIZE, CAPACITY)

    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %
                      (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
Esempio n. 5
0
def evaluate_all_image():
    '''
    Test all image against the saved models and parameters.
    Return global accuracy of test_image_set
    ##############################################
    ##Notice that test image must has label to compare the prediction and real
    ##############################################
    '''
    # you need to change the directories to yours.
    # test_dir = '/home/kevin/tensorflow/cats_vs_dogs/data/test/'
    # N_CLASSES = 2
    print('-------------------------')
    test, train, validate, test_label, train_label, validate_label = rd.read_file(
    )
    BATCH_SIZE = len(test)
    print('There are %d test images totally..' % BATCH_SIZE)
    print('-------------------------')
    test_batch, test_label_batch = rd.get_batch(test, test_label, IMG_W, IMG_H,
                                                BATCH_SIZE, CAPACITY)

    logits = model.inference(test_batch, BATCH_SIZE, N_CLASSES)
    testloss = model.losses(logits, test_label_batch)
    testacc = model.evaluation(logits, test_label_batch)

    logs_train_dir = logs_dir
    saver = tf.train.Saver()

    with tf.Session() as sess:
        print("Reading checkpoints...")
        ckpt = tf.train.get_checkpoint_state(logs_train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('Loading success, global_step is %s' % global_step)
        else:
            print('No checkpoint file found')
        print('-------------------------')
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        test_loss, test_acc = sess.run([testloss, testacc])
        print('The model\'s loss is %.2f' % test_loss)
        correct = int(BATCH_SIZE * test_acc)
        print('Correct : %d' % correct)
        print('Wrong : %d' % (BATCH_SIZE - correct))
        print('The accuracy in test images are %.2f%%' % (test_acc * 100.0))
    coord.request_stop()
    coord.join(threads)
    sess.close()
Esempio n. 6
0
def main(argv=None):
    CNN_input.extract()

    images, display_images, labels = CNN_input.construct_inputs(True, True)
    logits = CNN_model.inference(False, images)

    variable_averages = tf.train.ExponentialMovingAverage(
        CNN_model.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    with tf.Session() as sess:
        check_point = tf.train.get_checkpoint_state(FLAGS.train_dir)
        if check_point and check_point.model_checkpoint_path:
            saver.restore(
                sess,
                os.path.join(
                    FLAGS.train_dir,
                    list(
                        reversed(
                            check_point.model_checkpoint_path.split('\\',
                                                                    -1)))[0]))
        else:
            raise IOError("No Checkpoint file found!")

        coord = tf.train.Coordinator()
        threads = []
        try:
            for queue_runner in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(
                    queue_runner.create_threads(sess,
                                                coord=coord,
                                                daemon=True,
                                                start=True))

            session_result = sess.run(
                [tf.nn.softmax(logits), labels, display_images])

            ResultsPlot(session_result[2], session_result[1],
                        session_result[0])

            coord.request_stop()
            coord.join(threads, stop_grace_period_secs=10)
        except Exception as e:
            print(e)
            coord.request_stop()
Esempio n. 7
0
def evaluate(run_once):
    with tf.Graph().as_default() as graph:
        images, display_images, labels = CNN_input.construct_inputs(
            True, False)

        logits = CNN_model.inference(False, images)

        top_k_op = tf.nn.in_top_k(logits, labels, 3)

        variable_averages = tf.train.ExponentialMovingAverage(
            CNN_model.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, graph)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
def run_training():
    train_dir = "./image/"
    logs_train_dir = "./log_" + str(MAX_STEP) + "_cap_" + str(CAPACITY)

    train, train_label = image_P.get_files(train_dir)
    train_batch, train_label_batch = image_P.get_batch(train, train_label,
                                                       IMG_W, IMG_H,
                                                       BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()

    config = tf.ConfigProto(allow_soft_placement=True,
                            gpu_options=tf.GPUOptions(
                                per_process_gpu_memory_fraction=0.7,
                                allow_growth=True))

    sess = tf.Session(config=config)
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())

    tf.train.start_queue_runners(sess=sess)

    for step in np.arange(9999999):
        sess.run([train_op, train_loss, train_acc])

        print(step)

        if step == MAX_STEP:
            print("Finished")
            break

    log_dir = train_dir

    images_dir = './image/'

    images_cat = open("imagelist.txt")
    #保存所有图像经过模型计算之后的数组
    images_tested = []

    num_photos = 0
    outfile = open("test-new-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt",
                   "w")

    filelines = images_cat.readlines()
    for line in filelines:
        image_name = line.strip('\n')
        image_array = get_one_image(images_dir + image_name)
        image_array = np.reshape(image_array, [1, 300, 400, 3])

        xName = tf.placeholder(tf.float32, shape=[1, 300, 400, 3])
        prediction = sess.run(train_logits, feed_dict={xName: image_array})
        prediction = np.array(prediction, dtype='float32')
        images_tested.append([image_name, prediction])

        num_photos += 1
        print("Test:" + str(num_photos))

        outfile.writelines(image_name)
        t = str(prediction)
        outfile.writelines(t)
        outfile.close()
        outfile = open(
            "test-new-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt", "a")

    outfile.close()
    outfile2 = open(
        "nearesttest-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt", "w")
    outfile2.write("result = {\n")
    outfile2.close()
    num_photos = 0
    for line in filelines:
        num_photos += 1
        print("Find Near:" + str(num_photos))
        image_name = line.strip('\n')
        image_array = get_one_image(images_dir + image_name)
        image_array = np.reshape(image_array, [1, 300, 400, 3])
        outfile2 = open(
            "nearesttest-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt", "a")
        outfile2.write("'" + image_name + "': [\n")
        xName = tf.placeholder(tf.float32, shape=[1, 300, 400, 3])
        prediction = sess.run(train_logits, feed_dict={xName: image_array})
        prediction = np.array(prediction, dtype='float32')

        test_result = []
        for sample in images_tested:
            distance = np.sqrt(np.sum(np.square(sample[1] - prediction)))
            distance.astype('float32')
            test_result.append([sample[0], distance])

        #将结果排序
        test_result = np.array(test_result)
        test_result = test_result[np.lexsort(test_result.T)]
        for i in range(11):
            outfile2.write("'" + test_result[i][0] + "', ")
        outfile2.write("],\n")
        outfile2.close()

    outfile2 = open(
        "nearesttest-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt", "a")
    outfile2.write("}\n")
    outfile2.close()

    sess.close()
Esempio n. 9
0
def test(log_dir):
    images_dir = './image/'

    images_cat = open("imagelist.txt")
    #保存所有图像经过模型计算之后的数组
    images_tested = []

    with tf.Graph().as_default():
        #重载模型
        x = tf.placeholder(tf.float32, shape=[1, 300, 400, 3])
        p = model.inference(x, 1, 10)
        logits = tf.nn.softmax(p)

        sess = tf.Session()
        tf.get_variable_scope().reuse_variables()
        ckpt = tf.train.get_checkpoint_state(log_dir)
        saver = tf.train.Saver()

        num_photos = 0
        outfile = open("test.txt", "w")

        for line in images_cat.readlines():
            image_name = line.strip('\n')
            image_array = get_one_image(images_dir + image_name)
            image_array = np.reshape(image_array, [1, 300, 400, 3])

            prediction = sess.run(logits, feed_dict={x: image_array})
            prediction = np.array(prediction, dtype='float32')
            images_tested.append([image_name, prediction])

            num_photos += 1
            print(num_photos)

            outfile.writelines(image_name)
            t = str(prediction)
            outfile.writelines(t)

        #测试单张图片
        while (True):
            test_file = input('输入测试图片:')
            if (test_file == 'z'):
                break

            image_name = test_file
            image_array = get_one_image(images_dir + image_name)
            image_array = np.reshape(image_array, [1, 300, 400, 3])
            prediction = sess.run(logits, feed_dict={x: image_array})
            prediction = np.array(prediction, dtype='float32')
            test_result = []
            for sample in images_tested:
                distance = np.sqrt(np.sum(np.square(sample[1] - prediction)))
                distance.astype('float32')
                test_result.append([sample[0], distance])

            #将结果排序
            test_result = np.array(test_result)
            test_result = test_result[np.lexsort(test_result.T)]
            for i in range(11):
                print(test_result[i][0])

            images_show(test_result)