def run_training(): # you need to change the directories to yours. s_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+YuanTu' T_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+X_mid' logs_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+' s_train, s_train_label = input_data.get_files(s_train_dir) s_train_batch, s_train_label_batch = input_data.get_batch( s_train, s_train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) T_train, T_train_label = input_data.get_files(T_train_dir) T_train_batch, T_train_label_batch = input_data.get_batch( T_train, T_train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) train_logits = model.inference(s_train_batch, T_train_batch, BATCH_SIZE, N_CLASSES) train_loss = model.losses(train_logits, s_train_label_batch) train_op = model.trainning(train_loss, learning_rate) train__acc = model.evaluation(train_logits, s_train_label_batch) summary_op = tf.summary.merge_all() #汇总操作 sess = tf.Session() #定义sess train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) # saver = tf.train.Saver() #保存操作 sess.run(tf.global_variables_initializer()) #初始化所有变量 coord = tf.train.Coordinator() #设置多线程协调器 threads = tf.train.start_queue_runners( sess=sess, coord=coord) #开始Queue Runners(队列运行器) #开始训练过程 try: for step in np.arange(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc]) if step % 50 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0)) #运行汇总操作,写入汇总 summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 800 == 0 or (step + 1) == MAX_STEP: #保存当前模型和权重到 logs_train_dir,global_step为当前迭代次数 checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
def run_training(N_CLASSES, IMG_W, IMG_H, BATCH_SIZE, MAX_STEP, CAPACITY, model1_data, learning_rate, total): train, train_label, randomList = input_data.get_files(model1_data, total) train_batch, train_label_batch = input_data.get_batch( train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) return train_logits, train_batch, train_label_batch, randomList
def run_training(): traindir = 'data/train/' logs_train_dir = 'logs/train/' train_image, train_label = input_data.get_files(traindir) train_batch, train_label_batch = input_data.get_batch( train_image, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) train_loss = model.losses(train_logits, train_label_batch) train_op = model.training(train_loss, learning_rate) train_acc = model.evaluation(train_logits, train_label_batch) sess = tf.Session() saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in range(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc]) if step % 20 == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
def evaluate_one_image(): train_dir = '/home/xiaoyi/data/dogs_vs_cats/data/train/' train, train_label = input_data.get_files(train_dir) image_array = get_one_image(train) with tf.Graph().as_default(): BATCH_SIZE = 1 N_CLASSES = 2 image = tf.cast(image_array, tf.float32) image = tf.reshape(image, [1, 208, 208, 3]) logit = model.inference(image, BATCH_SIZE, N_CLASSES) logit = tf.nn.softmax(logit) x = tf.placeholder(tf.float32, shape=[208, 208, 3]) logs_train_dir = '/home/xiaoyi/data/dogs_vs_cats/logs/train/' saver = tf.train.Saver() with tf.Session() as sess: print('Reading checkpoints...') ckpt = tf.train.get_checkpoint_state(logs_train_dir) if ckpt and ckpt.model_checkpoint_path: global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] saver.restore(sess, ckpt.model_checkpoint_path) print('loading seccess,globol_step is %s' % global_step) else: print('No checkpoint file found') prediction = sess.run(logit, feed_dict={x: image_array}) print(prediction) max_index = np.argmax(prediction) if max_index == 0: print('This is a cat with possibility %.6f' % prediction[:, 0]) else: print('This is a dog with possibility %.6f' % prediction[:, 1])
def run_training(): # 数据集 train_dir = r'D:\Python\mnist\test_my\big_0_1_4/' # My dir--20170727-csq # logs_train_dir 存放训练模型的过程的数据,在tensorboard 中查看 logs_train_dir = r'D:\PyCharm_code\Ai\Tensorflow_mooc_note\6\MinstNew\logs\train/' # 获取图片和标签集 train, train_label = input_data.get_files(train_dir) # 生成批次 train_batch, train_label_batch = input_data.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) # 进入模型 train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) # 获取 loss train_loss = model.losses(train_logits, train_label_batch) # 训练 train_op = model.trainning(train_loss, learning_rate) # 获取准确率 train__acc = model.evaluation(train_logits, train_label_batch) # 合并 summary summary_op = tf.summary.merge_all() sess = tf.Session() # 保存summary train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): ckpt = tf.train.get_checkpoint_state(logs_train_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc]) if step % 50 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 2000 == 0 or (step + 1) == MAX_STEP: # 每隔2000步保存一下模型,模型保存在 checkpoint_path 中 checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
def evaluate_one_image(): #Test one image against the saved models and parameters train_dir = 'E:/data/17_DEG/' train, train_label = input_data.get_files(train_dir) n = len(train) ind = np.random.randint(0, n) img_dir = train[ind] image = Image.open(img_dir) image = image.resize([128, 128]) #image = tf.random_crop(image, [96, 96, 1])# randomly crop the image size to 96 x 96 image = tf.image.random_flip_left_right(image) #image = tf.image.random_brightness(image, max_delta=63) image = tf.image.random_contrast(image, lower=0.2, upper=1.8) plt.imshow(image) image1 = np.array(image) with tf.Graph().as_default(): BATCH_SIZE = 1 N_CLASSES = 3 image = tf.cast(image1, tf.float32) image = tf.image.per_image_standardization(image) image = tf.reshape(image, [1, 96, 96, 1]) logit = model.inference(image, BATCH_SIZE, N_CLASSES) logit = tf.nn.softmax(logit) x = tf.placeholder(tf.float32, shape=[96, 96, 1]) logs_train_dir = 'E:/data/logs/train/' saver = tf.train.Saver() with tf.Session() as sess: print("Reading checkpoints...") ckpt = tf.train.get_checkpoint_state(logs_train_dir) if ckpt and ckpt.model_checkpoint_path: global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] saver.restore(sess, ckpt.model_checkpoint_path) print('Loading success, global_step is %s' % global_step) else: print('No checkpoint file found') prediction = sess.run(logit, feed_dict={x: image1}) max_index = np.argmax(prediction) if max_index == 0: print('This is BMP2 with possibility %.6f' % prediction[:, 0]) if max_index == 1: print('This is BTR70 with possibility %.6f' % prediction[:, 1]) if max_index == 2: print('This is T72 with possibility %.6f' % prediction[:, 2])
def evaluate_one_image(): '''Test one image against the saved models and parameters ''' # you need to change the directories to yours. train_dir = './train/' train, train_label = input_data.get_files(train_dir) image_array = get_one_image(train) with tf.Graph().as_default(): BATCH_SIZE = 1 N_CLASSES = 5 image = tf.cast(image_array, tf.float32) image = tf.image.per_image_standardization(image) image = tf.reshape(image, [1, 208, 208, 3]) logit = model.inference(image, BATCH_SIZE, N_CLASSES) logit = tf.nn.softmax(logit) x = tf.placeholder(tf.float32, shape=[208, 208, 3]) # you need to change the directories to yours. logs_train_dir = './train_logs/' saver = tf.train.Saver() with tf.Session() as sess: print("Reading checkpoints...") ckpt = tf.train.get_checkpoint_state(logs_train_dir) if ckpt and ckpt.model_checkpoint_path: global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] saver.restore(sess, ckpt.model_checkpoint_path) print('Loading success, global_step is %s' % global_step) else: print('No checkpoint file found') prediction = sess.run(logit, feed_dict={x: image_array}) print(prediction) max_index = np.argmax(prediction) if max_index == 0: print('This is a daisy with possibility %.6f' % prediction[:, 0]) elif max_index == 1: print('This is a roses with possibility %.6f' % prediction[:, 1]) elif max_index == 2: print('This is a sunflowers with possibility %.6f' % prediction[:, 2]) elif max_index == 3: print('This is a dandelion with possibility %.6f' % prediction[:, 3]) else: print('This is a tuplits with possibility %.6f' % prediction[:, 4])
def evaluate_random_image(): '''Test one image against the saved models and parameters ''' # you need to change the directories to yours. train_dir = 'data/train/' train, train_label = input_data.get_files(train_dir) random_img = get_one_random_image(train) evaluate_image(random_img)
def run_training(): ''' ''' ''' tf.train.Coordinator和tf.train.start_queue_runners貌似都要在sess.run之前使用,不然会无法运行 try:这些语法貌似是模板,直接使用就好 ''' # you need to change the directories to yours. train_dir = '/home/kevin/tensorflow/cats_vs_dogs/data/train/' logs_train_dir = '/home/kevin/tensorflow/cats_vs_dogs/logs/train/' train, train_label = input_data.get_files(train_dir) train_batch, train_label_batch = input_data.get_batch( train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) train_loss = model.losses(train_logits, train_label_batch) train_op = model.trainning(train_loss, learning_rate) train__acc = model.evaluation(train_logits, train_label_batch) summary_op = tf.summary.merge_all() sess = tf.Session() train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() #和下面的queue_runners配合使用,发生错误可以正确关闭线程 threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc]) if step % 50 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 2000 == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: # When done, ask the threads to stop. coord.request_stop() # Wait for threads to finish. coord.join(threads) sess.close()
def run_training(): # dataset train_dir = '/Users/xcliang/PycharmProjects/cats_vs_dogs/data/train/' # My dir--20170727-csq # logs_train_dir store the data of the process of training model, view in tensorbpard logs_train_dir = '/Users/xcliang/PycharmProjects/cats_vs_dogs/data/saveNet' # Get images and tag sets train, train_label = input_data.get_files(train_dir) # Generate batch train_batch, train_label_batch = input_data.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) # Entering the model train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) # Get loss train_loss = model.losses(train_logits, train_label_batch) # train train_op = model.trainning(train_loss, learning_rate) # Get accuracy train__acc = model.evaluation(train_logits, train_label_batch) # merge summary summary_op = tf.summary.merge_all() sess = tf.Session() # save summary train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc]) if step % 50 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 2000 == 0 or (step + 1) == MAX_STEP: # Save the model every 2000 steps and save the model in checkpoint_path checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
def run_evaluating(): eval_data, eval_label = input_data.get_files(FLAGS.eval_dir) eval_batch, eval_label_batch = input_data.get_batch( eval_data, eval_label, FLAGS.height, FLAGS.width, FLAGS.batch_size, FLAGS.capacity) keep_prob = tf.placeholder(tf.float32) hypothesis, cross_entropy, eval_step = model.make_network( eval_batch, eval_label_batch, keep_prob) cost_sum = tf.summary.scalar("cost_eval", cross_entropy) eval_accuracy = tf.nn.in_top_k(hypothesis, eval_label_batch, 1) eval_acc = model.evaluation(hypothesis, eval_label_batch) saver = tf.train.Saver() print('Start Evaluation......') with tf.Session() as sess: sess.run(tf.global_variables_initializer()) total_sample_count = FLAGS.eval_steps * FLAGS.batch_size true_count = 0 writer = tf.summary.FileWriter(FLAGS.log_dir) writer.add_graph(sess.graph) # Show the graph merge_sum = tf.summary.merge_all() saver.restore(sess, './CNN_Homework/logs/model.ckpt-36000') coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) for step in np.arange(FLAGS.eval_steps + 1): _, summary, eval_loss = sess.run( [eval_step, merge_sum, cross_entropy], feed_dict={keep_prob: 1.0}) predictions, accuracy = sess.run([eval_accuracy, eval_acc], feed_dict={keep_prob: 1.0}) writer.add_summary(summary, global_step=step) true_count = true_count + np.sum(predictions) if step % 10 == 0: print('step : %d, loss : %f, eval_accuracy : %f' % (step, eval_loss, accuracy * 100)) coord.request_stop() coord.join(threads) print('precision : %f' % (true_count / total_sample_count)) sess.close()
def run_training(): # you need to change the directories to yours. train_dir = 'D:/tensorflow/practicePlus/ResNet/train/' # val_dir = 'D:/tensorflow/practicePlus/cats_vs_dogs/test' logs_train_dir = 'D:/tensorflow/practicePlus/ResNet/save/' train, train_label = input_data.get_files(train_dir) # val, val_label = input_data.get_files(val_dir) train_batch, train_label_batch = input_data.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) # val_batch, val_label_batch = input_data.get_batch(val, val_label,IMG_W,IMG_H,BATCH_SIZE,CAPACITY) # train train_logits = model_resnet50.inference(train_batch, BATCH_SIZE, N_CLASSES) train_loss = model_resnet50.losses(train_logits, train_label_batch) train_op = model_resnet50.trainning(train_loss, learning_rate) train_acc = model_resnet50.evaluation(train_logits, train_label_batch) # validation # test_logits = model.inference(val_batch,BATCH_SIZE,N_CLASSES) # test_loss = model.losses(test_logits, val_label_batch) # test_acc = model.evaluation(test_logits, val_label_batch) summary_op = tf.summary.merge_all() sess = tf.Session() train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # batch trainning try: # one step one batch for step in np.arange(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc]) # print loss and acc each 10 step, record log and write at same time if step % 10 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) # save modle each 500 steps if ((step == 500) or ((step + 1) == MAX_STEP)): checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop()
def run_training(): #train set path train_dir = '/raidHDD/experimentData/Dev/Knife/hackthon/upup7/' #output model path logs_train_dir = '/raidHDD/experimentData/Dev/Knife/hackthon/modelX' if removeLogFIle: if os.path.exists(logs_train_dir): for logFile in os.listdir(logs_train_dir): os.remove("{0}/{1}".format(logs_train_dir, logFile)) print("Delete Log file success...") train, train_label = input_data.get_files(train_dir) train_batch, train_label_batch = input_data.get_batch( train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) train_loss = model.losses(train_logits, train_label_batch) train_op = model.trainning(train_loss, learning_rate) train__acc = model.evaluation(train_logits, train_label_batch) summary_op = tf.summary.merge_all() config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.8 #0.8 =GPU_memory usage sess = tf.Session(config=config) train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc]) if step % 50 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) train_writer.flush() #only save end model if (step + 1) == MAX_STEP: checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
def main(_): # train raw_data and label train_data, train_label = input_data.get_files(WORK_DIRECTORY) train_batch, train_label_batch = input_data.next_batch( train_data, train_label) # define train op train_logits = model.inference(train_batch, N_LABELS, BATCH_SIZE) train_loss = model.losses(train_logits, train_label_batch) train_op = model.optimization(train_loss, LEARNING_RATE) train_acc = model.evaluation(train_logits, train_label_batch) # start logs summary_op = tf.summary.merge_all() # Save logs saver = tf.train.Saver() sess = tf.Session() sess.run(tf.global_variables_initializer()) # writen to logs train_writer = tf.summary.FileWriter(LOGS_DIRECTORY, sess.graph) # queue monitor coord = tf.train.Coordinator() # threads = tf.train.start_queue_runners(sess=sess, coord=coord) # train try: for step in np.arange(MAX_STEP): if coord.should_stop(): break # start op node _, loss, accuracy = sess.run([train_op, train_loss, train_acc]) # print and write to logs. if step % 2 == 0: print( f"Step [{step}/{MAX_STEP}] Loss {loss} Accuracy {accuracy * 100.0:.2f}%" ) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step == MAX_STEP: # Save logs checkpoint_path = os.path.join(LOGS_DIRECTORY, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) break print(f"Model saved! Global step = {step}") except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() sess.close()
def run_training(): train_dir = "/home/sxy/PycharmProjects/defect2/data/train" logs_train_dir = "/home/sxy/PycharmProjects/defect2/logs/train-4" tf.reset_default_graph() train, train_label = input_data.get_files(train_dir) train_batch, train_label_batch = input_data.get_batch( train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) train_loss = model.losses(train_logits, train_label_batch) train_op = model.trainning(train_loss, learning_rate) train_acc = model.evaluation(train_logits, train_label_batch) #测试准确率 # test_dir="/home/sxy/PycharmProjects/defect2/data/test" # test,test_label=input_data.get_files(test_dir) # test_batch,test_label_batch=input_data.get_batch(test, # test_label, # IMG_W, # IMG_H, # BATCH_SIZE, # CAPACITY) # test_logits = model.inference(test_batch, BATCH_SIZE, N_CLASSES) # train_loss = model.losses(test_logits, test_label_batch) # train_op = model.trainning(train_loss, learning_rate) # train_acc = model.evaluation(test_logits, test_label_batch) summary_op = tf.merge_all_summaries() sess = tf.Session() train_writer = tf.train.SummaryWriter(logs_train_dir, sess.graph) saver = tf.train.Saver() sess.run(tf.initialize_all_variables()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc]) if step % 50 == 0: print("Step %d, train loss = %.2f, train accuracy = %.2f%%" % (step, tra_loss, tra_acc)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 2000 == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(logs_train_dir, "model.ckpt") saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print("Done training -- epoch limit reached.") finally: coord.request_stop() coord.join(threads) sess.close()
def run_training(): # 调用input_data文件的get_files()函数获得image_list, label_list train, train_label = input_data.get_files(train_dir) # 获得image_batch, label_batch train_batch, train_label_batch = input_data.get_batch( train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) # 进行前向训练,获得回归值 train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) # 计算获得损失值loss train_loss = model.losses(train_logits, train_label_batch) # 对损失值进行优化 train_op = model.trainning(train_loss, learning_rate) # 根据计算得到的损失值,计算出分类准确率 train__acc = model.evaluation(train_logits, train_label_batch) # 将图形、训练过程合并在一起 summary_op = tf.summary.merge_all() # 新建会话 sess = tf.Session() # 将训练日志写入到logs_train_dir的文件夹内 train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) # 保存变量 saver = tf.train.Saver() # 执行训练过程,初始化变量 sess.run(tf.global_variables_initializer()) # 创建一个线程协调器,用来管理之后在Session中启动的所有线程 coord = tf.train.Coordinator() # 启动入队的线程,一般情况下,系统有多少个核,就会启动多少个入队线程(入队具体使用多少个线程在tf.train.batch中定义); threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): # 使用 coord.should_stop()来查询是否应该终止所有线程,当文件队列(queue)中的所有文件都已经读取出列的时候, # 会抛出一个 OutofRangeError 的异常,这时候就应该停止Sesson中的所有线程了; if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc]) # 每50步打印一次损失值和准确率 if step % 50 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) # 每2000步保存一次训练得到的模型 if step % 2000 == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) # 如果读取到文件队列末尾会抛出此异常 except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() # 使用coord.request_stop()来发出终止所有线程的命令 coord.join(threads) # coord.join(threads)把线程加入主线程,等待threads结束
def run_training(): # you need to change the directories to yours. train_dir = './data/train/train/' logs_train_dir = './logs/' train, train_label = input_data.get_files(train_dir) train_batch, train_label_batch = input_data.get_batch( train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) with tf.name_scope("training"): train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) train_loss = model.losses(train_logits, train_label_batch) train_op = model.trainning(train_loss, learning_rate) train__acc = model.evaluation(train_logits, train_label_batch) summary_op = tf.summary.merge_all() sess = tf.Session() train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run( [train_op, train_loss, train__acc]) if step % 20 == 0: print( 'Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 2000 == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) #保存模型和模型参数到logs_train_dir文件夹 except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
def evaluate_running(): with tf.Graph().as_default(): data_dir = './data/KTH_RGB/' model_dir = './model/KTH_RGB6000/' train_image, train_label, val_image, val_label, n_test = input_data.get_files( data_dir, RATIO, ret_val_num=True) train_batch, train_label_batch = input_data.get_batch( train_image, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) val_batch, val_label_batch = input_data.get_batch( val_image, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) # logits = models.AlexNet(val_batch, N_CLASSES) top_k_op = tf.nn.in_top_k(logits, val_label_batch, 1) saver = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: print("Reading checkpoints...") ckpt = tf.train.get_checkpoint_state(model_dir) if ckpt and ckpt.model_checkpoint_path: global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] saver.restore(sess, ckpt.model_checkpoint_path) print('Loading success, global_step is %s' % global_step) else: print('No checkpoint file found') return coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: num_iter = int(math.ceil(n_test / BATCH_SIZE)) true_count = 0 total_sample_count = num_iter * BATCH_SIZE step = 0 while step < num_iter and not coord.should_stop(): val_images_, val_labels_ = sess.run( [val_batch, val_label_batch]) predictions = sess.run([top_k_op]) true_count += np.sum(predictions) step += 1 precision = true_count / total_sample_count print('precision = %.3f' % precision) except Exception as e: coord.request_stop(e) finally: coord.request_stop() coord.join(threads)
def evaluate_all_image(): start_time = time.time() ''' Test all image against the saved models and parameters. Return global accuracy of test_image_set ############################################## ##Notice that test image must has label to compare the prediction and real ############################################## ''' # you need to change the directories to yours. test_dir = '/Users/sherry/Documents/Study/CS 6220/HW01/HW01_20190901/01_cats_vs_dogs/data/outlier_test/' N_CLASSES = 2 print('-------------------------') test, test_label = input_data.get_files(test_dir) BATCH_SIZE = len(test) print('There are %d test images totally..' % BATCH_SIZE) print('-------------------------') test_batch, test_label_batch = input_data.get_batch( test, test_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) logits = model.inference(test_batch, BATCH_SIZE, N_CLASSES) testloss = model.losses(logits, test_label_batch) testacc = model.evaluation(logits, test_label_batch) logs_train_dir = '/Users/sherry/Documents/Study/CS 6220/HW01/HW01_20190901/01_cats_vs_dogs/data/logs_dataset1/train/' saver = tf.train.Saver() with tf.Session() as sess: print("Reading checkpoints...") ckpt = tf.train.get_checkpoint_state(logs_train_dir) if ckpt and ckpt.model_checkpoint_path: global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] saver.restore(sess, ckpt.model_checkpoint_path) print('Loading success, global_step is %s' % global_step) else: print('No checkpoint file found') print('-------------------------') coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) test_loss, test_acc = sess.run([testloss, testacc]) print('The model\'s loss is %.2f' % test_loss) correct = int(BATCH_SIZE * test_acc) print('Correct : %d' % correct) print('Wrong : %d' % (BATCH_SIZE - correct)) print('The accuracy in test images are %.2f%%' % (test_acc * 100.0)) print( "------------------------ Testing time is: %s seconds -----------------------" % (time.time() - start_time)) coord.request_stop() coord.join(threads) sess.close() print("--- Testing time is: %s seconds ---" % (time.time() - start_time))
def evaluate_one_image(test1_data, i, total, N_CLASSES): '''Test one image against the saved models and parameters ''' train, train_label, randomList = input_data.get_files(test1_data, total) image_array = get_one_image(train, i) image = tf.cast(image_array, tf.float32) image = tf.image.per_image_standardization(image) image = tf.reshape(image, [1, 32, 32, 3]) logit = model.inference(image, 1, N_CLASSES) return logit, image_array, train_label, randomList
def evaluate_one_image(): '''Test one image against the saved models and parameters ''' # you need to change the directories to yours. train_dir = 'E:/Code/Dog vs Cat/test/' train = input_data.get_files(train_dir) image_array = get_one_image(train) with tf.Graph().as_default(): BATCH_SIZE = 1 N_CLASSES = 2 image = tf.cast(image_array, tf.float32) image = tf.image.per_image_standardization(image) image = tf.reshape(image, [1, 208, 208, 3]) x = tf.placeholder(tf.float32, shape=[1, 208, 208, 3]) logit = model.inference(x, BATCH_SIZE, N_CLASSES) logit = tf.nn.softmax(logit) # you need to change the directories to yours. logs_train_dir = 'E:/Code/Dog vs Cat/log/' saver = tf.train.Saver() with tf.Session() as sess: print("Reading checkpoints...") ckpt = tf.train.get_checkpoint_state(logs_train_dir) if ckpt and ckpt.model_checkpoint_path: global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] saver.restore(sess, ckpt.model_checkpoint_path) print('Loading success, global_step is %s' % global_step) else: print('No checkpoint file found') image_ = sess.run(image) prediction = sess.run(logit, feed_dict={x: image_}) print(prediction) max_index = np.argmax(prediction) if prediction[:, max_index] > 0.7: if max_index == 0: print('This is a cat with possibility %.6f' % prediction[:, 0]) else: print('This is a dog with possibility %.6f' % prediction[:, 1]) else: print('input error!')
def run_training(): train_dir = './train/' # 加载数据训练 logs_train_dir = './save_model/' # 储存训练好的位置 train, train_label = input_data.get_files(train_dir) train_batch, train_label_batch = input_data.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES,True) # forward pass train_loss = model.losses(train_logits, train_label_batch) # 设置损失函数 train_op = model.trainning(train_loss,learning_rate=) # training train__acc = model.evaluation(train_logits, train_label_batch) # 验证正确率 summary_op = tf.summary.merge_all() # 定义合并变量操作,一次性生成所有摘要数据 sess = tf.Session() # 初始化会话 train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) # TensorBoard的记录 saver = tf.train.Saver() # 储存模型 sess.run(tf.global_variables_initializer()) # 所有变量初始化 coord = tf.train.Coordinator() # 多线程 threads = tf.train.start_queue_runners(sess=sess, coord=coord) csvfile = open('csv.csv', 'w', newline='') writer = csv.writer(csvfile) writer.writerow(['name','label']) try: for step in np.arange(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc]) if step % 50 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 2000 == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
def run_training(): train_dir = "./data/TRAIN/" logs_train_dir = "./logs/" train, train_label = input_data.get_files(train_dir) train_batch, train_label_batch = input_data.get_batch(train,train_label,IMG_W,IMG_H,BATCH_SIZE,CAPACITY) train_logits = model.inference(train_batch,BATCH_SIZE,N_CLASSES) train_loss = model.losses(train_logits,train_label_batch) train_op = model.trainning(train_loss, learning_rate) train_acc = model.evaluation(train_logits, train_label_batch) summary_op = tf.summary.merge_all() sess = tf.Session() train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc= sess.run([train_op, train_loss, train_acc]) if step % 100 == 0: print('Step:', step, 'train loss:', tra_loss, 'train accuracy:', tra_acc) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if tra_acc > 0.95 and step>6000: checkpoint_path = os.path.join(logs_train_dir, "model") saver.save(sess, checkpoint_path, global_step=step) print("train success!") print('Step:', step, 'train loss:', tra_loss, 'train accuracy:', tra_acc) coord.request_stop() except tf.errors.OutOfRangeError: print("Done training -- epoch limit reached.") finally: coord.request_stop() coord.join(threads) sess.close()
def run_training(): DIR_PRE = os.getcwd() + '/' train_dir = DIR_PRE + 'data/train/' logs_train_dir = DIR_PRE + 'logs/train/' os.makedirs(train_dir, exist_ok=True) os.makedirs(logs_train_dir, exist_ok=True) # 获取所有图片文件列表和对应的标签列表 train, train_label = input_data.get_files(train_dir) # train_batch, train_label_batch = input_data.get_batch( train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) train_loss = model.losses(train_logits, train_label_batch) train_op = model.trainning(train_loss, learning_rate) train__acc = model.evaluation(train_logits, train_label_batch) summary_op = tf.summary.merge_all() sess = tf.Session() train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc]) if step % 50 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 2000 == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
def run_training(): # you need to change the directories to yours. # train_dir = '/home/kevin/tensorflow/hams_vs_hots/data/train/' train_dir = 'D:/workspace/uploadPicJudge3Class/train/' # logs_train_dir = '/home/kevin/tensorflow/hams_vs_hots/logs/train/' logs_train_dir = 'D:/workspace/uploadPicJudge3Class/logs/' train, train_label = input_data.get_files(train_dir) train_batch, train_label_batch = input_data.get_batch( train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) train_loss = model.losses(train_logits, train_label_batch) train_op = model.trainning(train_loss, learning_rate) train__acc = model.evaluation(train_logits, train_label_batch) summary_op = tf.summary.merge_all() sess = tf.Session() train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc]) if step % 10 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0) + ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H_%M_%S')) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 100 == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
def run_training(): # you need to change the directories to yours. train_dir = '/home/kevin/tensorflow/cats_vs_dogs/data/train/' logs_train_dir = '/home/kevin/tensorflow/cats_vs_dogs/logs/train/' train, train_label = input_data.get_files(train_dir) train_batch, train_label_batch = input_data.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) train_loss = model.losses(train_logits, train_label_batch) train_op = model.trainning(train_loss, learning_rate) train__acc = model.evaluation(train_logits, train_label_batch) summary_op = tf.summary.merge_all() sess = tf.Session() train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc]) if step % 50 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 2000 == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
def evaluate_one_image(): #测试原始图片 train_dir = "./data/ORIGIN_TEST/" train, train_label = input_data.get_files(train_dir) image_array, image_dir = get_one_origin_image(train) with tf.Graph().as_default(): BATCH_SIZE = 1 image = tf.cast(image_array, tf.float32) image = tf.reshape(image, [1, IMG_H, IMG_W, 3]) logit = model.inference(image, BATCH_SIZE, N_CLASSES) logit = tf.nn.softmax(logit) x = tf.placeholder(tf.float32, shape=[IMG_H, IMG_W, 3]) logs_train_dir = "./logs/" saver = tf.train.Saver() with tf.Session() as sess: print("Reading checkpoints...") ckpt = tf.train.get_checkpoint_state(logs_train_dir) if ckpt and ckpt.model_checkpoint_path: global_step = ckpt.model_checkpoint_path.split("/")[-1].split("-")[-1] saver.restore(sess, ckpt.model_checkpoint_path) print("Loading success, global_step is %s" % global_step) else: print("No checkpoint file found") print('The test picture is :', image_dir) prediction = sess.run(logit, feed_dict={x: image_array}) print(prediction) max_index = np.argmax(prediction) if max_index == 0: # print("This is a eosinophil cell with possibility %.6f" % prediction[:, 0]) print("This is a eosinophil cell") elif max_index==1: # print("This is a lymphocyte cell with possibility %.6f" % prediction[:, 1]) print("This is a lymphocyte cell") elif max_index==2: # print("This is a monocyte cell with possibility %.6f" % prediction[:, 1]) print("This is a monocyte cell") elif max_index==3: # print("This is a neutrophil cell with possibility %.6f" % prediction[:, 1]) print("This is a neutrophil cell") else: print('can not recognize the cell') cv2.waitKey(0) cv2.destroyAllWindows()
def evaluate_one_image(): '''Test one image against the saved models and parameters ''' # you need to change the directories to yours. train_dir = './data/train/train/' train, train_label = input_data.get_files(train_dir) image_array = get_one_image(train) # 任意选择一张图片 with tf.Graph().as_default(): BATCH_SIZE = 1 N_CLASSES = 2 image = tf.cast(image_array, tf.float32) image = tf.image.per_image_standardization(image) # 图片标准化 image = tf.reshape(image, [1, 208, 208, 3]) logit = model.inference(image, BATCH_SIZE, N_CLASSES) logit = tf.nn.softmax(logit) # 因为最后一层没有激活函数,所在此处应该加上激活函数 x = tf.placeholder(tf.float32, shape=[208, 208, 3]) #利用placeholder方式喂给数据 # you need to change the directories to yours. logs_train_dir = './logs' saver = tf.train.Saver() with tf.Session() as sess: print("Reading checkpoints...") ckpt = tf.train.get_checkpoint_state(logs_train_dir) # 读取模型结构和参数 if ckpt and ckpt.model_checkpoint_path: global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] saver.restore(sess, ckpt.model_checkpoint_path) print('Loading success, global_step is %s' % global_step) else: print('No checkpoint file found') # 模型已经准备就绪,准备预测图片的类型 prediction = sess.run(logit, feed_dict={x: image_array}) max_index = np.argmax(prediction) # 得到两个概率,取最大的概率 if max_index == 0: print('This is a cat with possibility %.6f' % prediction[:, 0]) # 猫 else: print('This is a dog with possibility %.6f' % prediction[:, 1]) # 狗
def run_training(): train_dir = 'C://Users/Sizhe/Desktop/CatsvsDogs/data/train/' logs_train_dir = 'C://Users/Sizhe/Desktop/CatsvsDogs/data/logs/train/' train, train_label = input_data.get_files(train_dir) train_batch, train_label_batch = input_data.get_batch(train, train_label, image_width, image_height, batch_size, capacity) train_logits = model.inference(train_batch, batch_size, n_class) train_loss = model.losses(train_logits, train_label_batch) train_op = model.training(train_loss, learning_rate) train_acc = model.evaluation(train_logits, train_label_batch) summary_op = tf.summary.merge_all() sess = tf.Session() train_writer = tf.summary.FileWriter(logs_train_dir,sess.graph) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess = sess, coord = coord) try: for step in np.arange(max_step): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc]) ### step%50 when training if step%50 == 0: print ('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.00)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step%2000 == 0 or step == max_step-1: checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step= step) except tf.errors.OutOfRangeError: print ('Training finished -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
def evaluate_one_image(): '''Test one image against the saved models and parameters ''' # you need to change the directories to yours. train_dir = '/userDocs/user000/workspaces/2018-06-30-tensorflowCNN/Data/catVsDog/train/' train, train_label = input_data.get_files(train_dir) image_array = get_one_image(train) with tf.Graph().as_default(): BATCH_SIZE = 1 N_CLASSES = 2 image = tf.cast(image_array, tf.float32) image = tf.image.per_image_standardization(image) image = tf.reshape(image, [1, 208, 208, 3]) logit = model.inference(image, BATCH_SIZE, N_CLASSES) logit = tf.nn.softmax(logit) x = tf.placeholder(tf.float32, shape=[208, 208, 3]) # you need to change the directories to yours. logs_train_dir = '/userDocs/user000/workspaces/2018-06-30-tensorflowCNN/Data/catVsDog/trainedModels/' saver = tf.train.Saver() with tf.Session() as sess: print("Reading checkpoints...") ckpt = tf.train.get_checkpoint_state(logs_train_dir) if ckpt and ckpt.model_checkpoint_path: global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] saver.restore(sess, ckpt.model_checkpoint_path) print('Loading success, global_step is %s' % global_step) else: print('No checkpoint file found') prediction = sess.run(logit, feed_dict={x: image_array}) max_index = np.argmax(prediction) if max_index == 0: print('This is a cat with possibility %.6f' % prediction[:, 0]) else: print('This is a dog with possibility %.6f' % prediction[:, 1]) plt.imshow(image_array) plt.show()
def run_training(): print 'let us begin....' train_dir = '../data/train/' logs_train_dir = './train/' train, train_label = input_data.get_files(train_dir) train_batch, train_label_batch = input_data.get_batch( train, train_label, IMG_H, IMG_W, BATCH_SIZE, CAPACITY) train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) train_loss = model.losses(train_logits, train_label_batch) train_op = model.trainning(train_loss, lr) train_acc = model.evaluation(train_logits, train_label_batch) summary_op = tf.summary.merge_all() #???? sess = tf.Session() train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): if coord.should_stop(): print 'coord stop!' break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc]) if step % 50 == 0: print 'Step: %d, train_loss = %.2f, train_accuracy = %.2f\n' % ( step, tra_loss, tra_acc * 100.0) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 2500 == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
def run_training(): # you need to change the directories to yours. s_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+YuanTu' T_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+X_mid' logs_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+' s_train, s_train_label = input_data.get_files(s_train_dir) s_train_batch, s_train_label_batch = input_data.get_batch(s_train, s_train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) T_train, T_train_label = input_data.get_files(T_train_dir) T_train_batch, T_train_label_batch = input_data.get_batch(T_train, T_train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) train_logits = model.inference(s_train_batch,T_train_batch, BATCH_SIZE, N_CLASSES) train_loss = model.losses(train_logits, s_train_label_batch) train_op = model.trainning(train_loss, learning_rate) train__acc = model.evaluation(train_logits, s_train_label_batch) summary_op = tf.summary.merge_all() #汇总操作 sess = tf.Session() #定义sess train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) # saver = tf.train.Saver() #保存操作 sess.run(tf.global_variables_initializer())#初始化所有变量 coord = tf.train.Coordinator() #设置多线程协调器 threads = tf.train.start_queue_runners(sess=sess, coord=coord) #开始Queue Runners(队列运行器) #开始训练过程 try: for step in np.arange(MAX_STEP): if coord.should_stop(): break _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc]) if step % 50 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0)) #运行汇总操作,写入汇总 summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 800 == 0 or (step + 1) == MAX_STEP: #保存当前模型和权重到 logs_train_dir,global_step为当前迭代次数 checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
import input_data import model import sys N_CLASSES = 6 IMG_W = 256 # resize the image, if the input image is too large, training will be very slow. IMG_H = 256 BATCH_SIZE = 16 CAPACITY = 100000 MAX_STEP = 10000 # with current parameters, it is suggested to use MAX_STEP>10k learning_rate = 0.0001 # with current parameters, it is suggested to use learning rate<0.0001 s_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+YuanTu' T_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+X_mid' logs_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+' s_train, s_train_label = input_data.get_files(s_train_dir) #print(s_train) s_train_batch, s_train_label_batch = input_data.get_batch(s_train, s_train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) #print(s_train_label_batch) T_train, T_train_label = input_data.get_files(T_train_dir) T_train_batch, T_train_label_batch = input_data.get_batch(T_train, T_train_label, IMG_W, IMG_H, BATCH_SIZE,
def training(): train, train_label, val, val_label = input_data.get_files(train_dir, RATIO) train_batch, train_label_batch = input_data.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) val_batch, val_label_batch = input_data.get_batch(val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) loss = model.losses(logits, train_label_batch) train_op = model.trainning(loss, learning_rate) acc = model.evaluation(logits, train_label_batch) x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3]) y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE]) with tf.Session() as sess: saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess= sess, coord=coord) summary_op = tf.summary.merge_all() train_writer = tf.summary.FileWriter(train_logs_dir, sess.graph) val_writer = tf.summary.FileWriter(val_logs_dir, sess.graph) try: for step in np.arange(MAX_STEP): if coord.should_stop(): break tra_images,tra_labels = sess.run([train_batch, train_label_batch]) _, tra_loss, tra_acc = sess.run([train_op, loss, acc], feed_dict={x:tra_images, y_:tra_labels}) if step % 50 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 200 == 0 or (step + 1) == MAX_STEP: val_images, val_labels = sess.run([val_batch, val_label_batch]) val_loss, val_acc = sess.run([loss, acc], feed_dict={x:val_images, y_:val_labels}) print('** Step %d, val loss = %.2f, val accuracy = %.2f%% **' %(step, val_loss, val_acc*100.0)) summary_str = sess.run(summary_op) val_writer.add_summary(summary_str, step) if step % 2000 == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(train_logs_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads)