def run_training(): # you need to change the directories to yours. #train_dir = '/home/kevin/tensorflow/cats_vs_dogs/data/train/' train_dir = 'D:/tensorflow/mydata/cat_dog2/' #My dir--20170727-csq #logs_train_dir = '/home/kevin/tensorflow/cats_vs_dogs/logs/train/' logs_train_dir = 'D:/tensorflow/mylog/cat_dog2/' train, train_label = input_data.get_files(train_dir) print(train) print(train_label) train_batch, train_label_batch = input_data.get_batch( train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) print(train_batch) print(train_label_batch) train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) train_loss = model.losses(train_logits, train_label_batch) train_op = model.trainning(train_loss, learning_rate) train__acc = model.evaluation(train_logits, train_label_batch) summary_op = tf.summary.merge_all() print(summary_op) with tf.Session() as sess: train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): print(step) if coord.should_stop(): break _, tra_loss, tra_acc = sess.run( [train_op, train_loss, train__acc]) if step % cnt_summary == 0: print( 'Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % cnt_cache == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
def evaluate(): '''''Test one image against the saved models and parameters ''' # you need to change the directories to yours. # img_dir = '/home/hjxu/PycharmProjects/01_cats_vs_dogs/222.jpg' # image_array = get_one_img(img_dir) with tf.Graph().as_default(): # you need to change the directories to yours. logs_train_dir = 'recordstrain/' tfrecords_file = 'tfrecords2' train_batch, train_batch_d, train_label_batch = cr.read_and_decode( tfrecords_file, batch_size=BATCH_SIZE) train_batch = tf.cast(train_batch, dtype=tf.float32) train_batch_d = tf.cast(train_batch_d, dtype=tf.float32) train_label_batch = tf.cast(train_label_batch, dtype=tf.int64) train_logits = model1.inference(model1, train_batch, BATCH_SIZE, N_CLASSES) train_loss = model1.losses(train_logits, train_label_batch) train_op = model1.trainning(train_loss, learning_rate) train__acc = model1.evaluation(train_logits, train_label_batch) m = np.empty([31]) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) print("Reading checkpoints...") ckpt = tf.train.get_checkpoint_state(logs_train_dir) if ckpt and ckpt.model_checkpoint_path: global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] saver.restore(sess, ckpt.model_checkpoint_path) print('Loading success, global_step is %s' % global_step) else: print('No checkpoint file found') coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(MAX_STEP): _, tra_loss, tra_acc = sess.run( [train_op, train_loss, train__acc]) if coord.should_stop(): break if step % 1 == 0: print( 'test: train loss = %.2f, train accuracy = %.2f%%' % (tra_loss, tra_acc * 100.0)) m[step] = tra_acc _, tra_loss, tra_acc = sess.run( [train_op, train_loss, train__acc]) finally: coord.request_stop() coord.join(threads) sess.close() print(np.mean(m))
# train_dir = 'E:\PyCharmProject\mycatvsdog\PetImages' logs_train_dir = 'E:\PyCharmProject\mycatvsdog\log' train, train_label = input_data1.get_file(train_dir) dataset = input_data1.get_batch(train, train_label, BATCH_SIZE) # iterator = dataset.make_initializable_iterator() iterator = dataset.make_one_shot_iterator() next_element = iterator.get_next() train_batch = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_H, IMG_W, 3]) train_label_batch = tf.placeholder(tf.int32, shape=[BATCH_SIZE]) train_logits = model1.mynn_inference(train_batch, BATCH_SIZE, N_CLASSES) train_loss = model1.losses(train_logits, train_label_batch) train_op = model1.training(train_loss, learning_rate) train_acc = model1.evaluation(train_logits, train_label_batch) summary_op = tf.compat.v1.summary.merge_all() # 折线图 step_list = list(range(100)) cnn_list1 = [] cnn_list2 = [] fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.yaxis.grid(True) ax.set_title('accuracy', fontsize=14, y=1.02) ax.set_xlabel("step") ax.set_ylabel("accuracy") bx = fig.add_subplot(1, 2, 2) ax.yaxis.grid(True)
def run_training(): logs_train_dir = 'G:\\dataS-UNIWARD0.4\\logs\\train' logs_val_dir = 'G:\\dataS-UNIWARD0.4\\logs\\val' tfrecords_traindir = 'G:\\dataS-UNIWARD0.4\\S_UNIWARD0.4train.tfrecords' tfrecords_valdir = 'G:\\dataS-UNIWARD0.4\\S_UNIWARD0.4val.tfrecords' # 获得batch tfrecord方法 train_batch, train_label_batch = input_data1.read_and_decode( tfrecords_traindir, BATCH_SIZE) val_batch, val_label_batch = input_data1.read_and_decode( tfrecords_valdir, BATCH_SIZE) x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 256, 256, 1]) y_ = tf.placeholder(tf.int32, shape=[BATCH_SIZE]) logits = model1.inference(x, BATCH_SIZE, N_CLASSES) loss = model1.losses(logits, y_) acc = model1.evaluation(logits, y_) train_op = model1.trainning(loss, learning_rate) sess = tf.Session() saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) summary_op = tf.summary.merge_all() train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) val_writer = tf.summary.FileWriter(logs_val_dir, sess.graph) try: for step in np.arange(MAX_STEP): if coord.should_stop(): break tra_images, tra_labels = sess.run([train_batch, train_label_batch]) _, tra_loss, tra_acc = sess.run([train_op, loss, acc], feed_dict={ x: tra_images, y_: tra_labels }) if step % 2 == 0: print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0)) # summary_str = sess.run(summary_op) # train_writer.add_summary(summary_str, step) if step % 200 == 0 or (step + 1) == MAX_STEP: val_images, val_labels = sess.run([val_batch, val_label_batch]) val_loss, val_acc = sess.run([loss, acc], feed_dict={ x: val_images, y_: val_labels }) print( '** Step %d, val loss = %.2f, val accuracy = %.2f%% **' % (step, val_loss, val_acc * 100.0)) # summary_str = sess.run(summary_op) # val_writer.add_summary(summary_str, step) if step % 2000 == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads)
def training(): train, train_label, val, val_label = input_data.get_files(train_dir, RATIO) train_batch, train_label_batch = input_data.get_batch( train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) val_batch, val_label_batch = input_data.get_batch(val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) logits = model1.inference(train_batch, BATCH_SIZE, N_CLASSES) loss = model1.losses(logits, train_label_batch) train_op = model1.trainning(loss, learning_rate) acc = model1.evaluation(logits, train_label_batch) # x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3]) # y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE]) with tf.Session() as sess: saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) summary_op = tf.summary.merge_all() train_writer = tf.summary.FileWriter(train_logs_dir, sess.graph) val_writer = tf.summary.FileWriter(val_logs_dir, sess.graph) try: for step in np.arange(MAX_STEP): if coord.should_stop(): break tra_images, tra_labels = sess.run( [train_batch, train_label_batch]) _, tra_loss, tra_acc = sess.run([train_op, loss, acc]) # feed_dict={x:tra_images, y_:tra_labels}) if step % 50 == 0: print( 'Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0)) summary_str = sess.run(summary_op) train_writer.add_summary(summary_str, step) if step % 200 == 0 or (step + 1) == MAX_STEP: val_loss, val_acc = sess.run([loss, acc]) print( '** Step %d, val loss = %.2f, val accuracy = %.2f%% **' % (step, val_loss, val_acc * 100.0)) summary_str = sess.run(summary_op) val_writer.add_summary(summary_str, step) if step % 2000 == 0 or (step + 1) == MAX_STEP: checkpoint_path = os.path.join(train_logs_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads)