def main(_): ff = open(FLAGS.out_file, 'w') if not ff: raise RuntimeError('OUTPUT FILE OPEN ERROR!!!!!!') os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.7 config.gpu_options.allow_growth = True if not tf.gfile.Exists(FLAGS.data_dir): raise RuntimeError('data direction is not exist!') with tf.name_scope('input'): x = tf.placeholder(tf.float32, [None, FLAGS.patch_size, FLAGS.patch_size, 3], 'x') y = build.net(x, False, FLAGS) # update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # with tf.control_dependencies(update_ops): pred = tf.nn.softmax(y, 1) with tf.name_scope("saver"): saver = tf.train.Saver(name="saver") f = open(os.path.join(FLAGS.meta_dir, FLAGS.set) + '.txt', 'r') image_names = [] labels = [] line = f.readline() while line: l = line.split(' ') if len(l) == 2: image_name = l[0] label = l[1] else: image_name = l[0] + ' ' + l[1] label = l[2] # image_name, label = line.split(' ') label = label[0:-1] image_names.append(image_name.split('.')[0] + '-' + FLAGS.extra + '.' + FLAGS.format) labels.append(int(label)) line = f.readline() f.close() f = open(os.path.join(FLAGS.meta_dir, 'spc_classes.txt'), 'r') meta = {} line = f.readline() while line: label, class_name = line.split(' ') class_name = class_name[0:-1] meta[int(label)] = class_name line = f.readline() f.close() confusion = np.zeros(shape=(10, 10), dtype=np.uint32) confusion_i = np.zeros(shape=(10, 10), dtype=np.uint32) total = 0. correct = 0. total_p = 0. correct_p = 0. with tf.Session(config = config) as sess: if tf.gfile.Exists(os.path.join(FLAGS.ckpt_dir, 'checkpoint')): saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir) if FLAGS.model_name is None else os.path.join(FLAGS.ckpt_dir, FLAGS.model_name)) else: raise RuntimeError("Check point files don't exist!") for i in range(len(labels)): label = labels[i] class_name = meta[label] image_name = image_names[i] full_path = os.path.join(FLAGS.data_dir, class_name, image_name) img = plt.imread(full_path) for img in get_patches(img, 1, 512): data = np.ndarray(shape=(FLAGS.patches, FLAGS.patch_size, FLAGS.patch_size, 3), dtype=np.float32) for n, patch in enumerate(get_patches(img, FLAGS.patches, FLAGS.patch_size)): patch = standardization(patch) data[n, :] = patch # data = standardization(data) prediction = sess.run(pred, feed_dict={x: data}) prediction0 = np.argmax(prediction, 1) for n in prediction0: if n == label: correct_p = correct_p + 1 confusion[label, n] = confusion[label, n] + 1 total_p = total_p + FLAGS.patches # count = np.bincount(prediction) # prediction = np.argmax(count) prediction = np.sum(prediction, 0) #print(prediction) prediction = np.argmax(prediction) confusion_i[label, prediction] = confusion_i[label, prediction] + 1 print("predict %d while true label is %d." % (prediction, label), file=ff) ff.flush() total = total + 1 if prediction == label: correct = correct + 1 print('accuracy(patch level) = %f' % (correct_p / total_p), file=ff) print('accuracy(image level) = %f' % (correct / total), file=ff) print('confusion matrix--patch level:', file=ff) print(confusion, file=ff) print('confusion matrix--image level:', file=ff) print(confusion_i, file=ff) print('/|\\', file=ff) print(' |', file=ff) print('actual', file=ff) print(' |', file=ff) print(' ---prediction--->', file=ff) ff.close()
def main(_): os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.95 config.gpu_options.allow_growth = True config.allow_soft_placement = True # config.log_device_placement = True if not tf.gfile.Exists(FLAGS.data_dir): raise RuntimeError('data direction is not exist!') # if tf.gfile.Exists(FLAGS.log_dir): # tf.gfile.DeleteRecursively(FLAGS.log_dir) tf.gfile.MakeDirs(FLAGS.log_dir) # if not tf.gfile.Exists(FLAGS.ckpt_dir): tf.gfile.MakeDirs(os.path.join(FLAGS.ckpt_dir, 'best')) f = open(FLAGS.out_file, 'a') if not f: raise RuntimeError('OUTPUT FILE OPEN ERROR!!!!!!') with tf.device('/cpu:0'): num_gpus = len(FLAGS.gpu.split(',')) global_step = tf.Variable(FLAGS.start_step, name='global_step', trainable=False) # learning_rate = tf.train.exponential_decay(0.05, global_step, 2000, 0.9, staircase=True) learning_rate = tf.train.exponential_decay(0.1, global_step, 1000, 0.95, staircase=True) # learning_rate = tf.train.piecewise_constant(global_step, [24000, 48000, 72000, 108000, 144000], # [0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001]) tf.summary.scalar('learing rate', learning_rate) # opt = tf.train.AdamOptimizer(learning_rate) opt = tf.train.MomentumOptimizer(learning_rate, momentum=FLAGS.momentum) # opt = tf.train.GradientDescentOptimizer(learning_rate) # learning_rate = tf.train.exponential_decay(0.01, global_step, 32000, 0.1) # opt = tf.train.GradientDescentOptimizer(learning_rate) tower_grads = [] tower_loss = [] tower_acc = [] tower_acc_v = [] images, labels = input_pipeline( tf.train.match_filenames_once( os.path.join(FLAGS.data_dir, 'train', '*.tfrecords')), FLAGS.batch_size) batch_queue = tf.contrib.slim.prefetch_queue.prefetch_queue( [images, labels], capacity=2 * num_gpus) images_v, labels_v = input_pipeline( tf.train.match_filenames_once( os.path.join(FLAGS.data_dir, 'valid', '*.tfrecords')), 128 // num_gpus) batch_queue_v = tf.contrib.slim.prefetch_queue.prefetch_queue( [images_v, labels_v], capacity=2 * num_gpus) for i in range(num_gpus): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i) as scope: image_batch, label_batch = batch_queue.dequeue() logits = build.net(image_batch, is_training, FLAGS) losses.sparse_softmax_cross_entropy(labels=label_batch, logits=logits, scope=scope) total_loss = losses.get_losses( scope=scope) + losses.get_regularization_losses( scope=scope) total_loss = tf.add_n(total_loss) grads = opt.compute_gradients(total_loss) tower_grads.append(grads) tower_loss.append(losses.get_losses(scope=scope)) with tf.name_scope('accuracy'): correct_prediction = tf.equal( tf.reshape(tf.argmax(logits, 1), [-1, 1]), tf.cast(label_batch, tf.int64)) accuracy = tf.reduce_mean( tf.cast(correct_prediction, tf.float32)) tower_acc.append(accuracy) tf.get_variable_scope().reuse_variables() image_batch_v, label_batch_v = batch_queue_v.dequeue() logits_v = build.net(image_batch_v, False, FLAGS) correct_prediction = tf.equal( tf.reshape(tf.argmax(logits_v, 1), [-1, 1]), tf.cast(label_batch_v, tf.int64)) accuracy = tf.reduce_mean( tf.cast(correct_prediction, tf.float32)) tower_acc_v.append(accuracy) with tf.name_scope('scores'): with tf.name_scope('accuracy'): accuracy = tf.reduce_mean(tf.stack(tower_acc, axis=0)) with tf.name_scope('accuracy_v'): accuracy_v = tf.reduce_mean(tf.stack(tower_acc_v, axis=0)) with tf.name_scope('batch_loss'): batch_loss = tf.add_n(tower_loss)[0] / num_gpus tf.summary.scalar('loss', batch_loss) tf.summary.scalar('accuracy', accuracy) grads = average_gradients(tower_grads) variable_averages = tf.train.ExponentialMovingAverage( 0.9999, global_step) variables_averages_op = variable_averages.apply( tf.trainable_variables()) with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) train_op = tf.group(apply_gradient_op, variables_averages_op) # train_op = apply_gradient_op # summary_op = tf.summary.merge_all() # init = tf.global_variables_initializer() summary_op = tf.summary.merge_all() saver = tf.train.Saver(name="saver", max_to_keep=10) saver_best = tf.train.Saver(name='best', max_to_keep=100) with tf.Session(config=config) as sess: sess.run(tf.local_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) if tf.gfile.Exists(os.path.join(FLAGS.ckpt_dir, 'checkpoint')): saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir)) else: sess.run(tf.global_variables_initializer()) train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph) train_writer.flush() cache = np.ones(5, dtype=np.float32) / FLAGS.num_classes cache_v = np.ones(5, dtype=np.float32) / FLAGS.num_classes d = 1000 best = 0 for i in range(FLAGS.start_step, FLAGS.max_steps + 1): # feed = feed_dict(True, True) if i % d == 0: # Record summaries and test-set accuracy # loss0 = sess.run([total_loss], feed_dict=feed_dict(False, False)) # test_writer.add_summary(summary, i) # feed[is_training] = FLAGS acc, loss, summ, lr, acc_v = sess.run( [ accuracy, batch_loss, summary_op, learning_rate, accuracy_v ], feed_dict={is_training: False}) cache[int(i / d) % 5] = acc cache_v[int(i / d) % 5] = acc_v train_writer.add_summary(summ, i) print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), file=f) print( 'step %d: acc(t)=%f(%f), loss=%f; acc(v)=%f(%f); lr=%e' % (i, acc, cache.mean(), loss, acc_v, cache_v.mean(), lr), file=f) saver.save(sess, os.path.join(FLAGS.ckpt_dir, FLAGS.model_name), global_step=i) if acc_v > 0.90: saver_best.save(sess, os.path.join(FLAGS.ckpt_dir, 'best', FLAGS.model_name), global_step=i) f.flush() sess.run(train_op, feed_dict={is_training: True}) coord.request_stop() coord.join(threads) train_writer.close() # test_writer.close() f.close()
def main(_): ff = open(FLAGS.out_file, 'w') if not ff: raise RuntimeError('OUTPUT FILE OPEN ERROR!!!!!!') print('fname,camera', file=ff) os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.7 config.gpu_options.allow_growth = True with tf.name_scope('input'): x = tf.placeholder(tf.float32, [None, FLAGS.patch_size, FLAGS.patch_size, 3], 'x') y = build.net(x, False, FLAGS) # update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # with tf.control_dependencies(update_ops): pred = tf.nn.softmax(y, 1) if FLAGS.ema: variable_averages = tf.train.ExponentialMovingAverage(0.9999) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore, name='saver') else: saver = tf.train.Saver(name="saver") # f = open(os.path.join(FLAGS.meta_dir, FLAGS.set) + '.txt', 'r') # image_names = [] # labels = [] # line = f.readline() # while line: # l = line.split(' ') # if len(l) == 2: # image_name = l[0] # label = l[1] # else: # image_name = l[0] + ' ' + l[1] # label = l[2] # # image_name, label = line.split(' ') # label = label[0:-1] # image_names.append(image_name.split('.')[0] + '-' + FLAGS.extra + '.' + FLAGS.format) # labels.append(int(label)) # line = f.readline() # f.close() image_names = os.listdir(FLAGS.data_dir) f = open(os.path.join(FLAGS.meta_dir, 'spc_classes.txt'), 'r') meta = {} line = f.readline() while line: label, class_name = line.split(' ') class_name = class_name[0:-1] meta[int(label)] = class_name line = f.readline() f.close() # confusion = np.zeros(shape=(10, 10), dtype=np.uint32) # confusion_i = np.zeros(shape=(10, 10), dtype=np.uint32) # total = 0. # correct = 0. # total_p = 0. # correct_p = 0. with tf.Session(config=config) as sess: if tf.gfile.Exists(os.path.join(FLAGS.ckpt_dir, 'checkpoint')): saver.restore( sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir) if FLAGS.model_name is None else os.path.join( FLAGS.ckpt_dir, FLAGS.model_name)) else: raise RuntimeError("Check point files don't exist!") for i in range(len(image_names)): # label = labels[i] # class_name = meta[label] image_name = image_names[i] full_path = os.path.join(FLAGS.data_dir, image_name) img = plt.imread(full_path) if img.shape[2] == 4: img = img[:, :, 0:3] data = np.ndarray(shape=(FLAGS.patches, FLAGS.patch_size, FLAGS.patch_size, 3), dtype=np.float32) for n, patch in enumerate( get_patches(img, FLAGS.patches, FLAGS.patch_size)): patch = standardization(patch) data[n, :] = patch # data = standardization(data) prediction = sess.run(pred, feed_dict={x: data}) prediction = np.argmax(prediction, 1) # for n in prediction0: # if n == label: # correct_p = correct_p + 1 # confusion[label, n] = confusion[label, n] + 1 # total_p = total_p + FLAGS.patches count = np.bincount(prediction) prediction = np.argmax(count) # prediction = np.sum(prediction, 0) #print(prediction) # prediction = np.argmax(prediction) print("%s,%s" % (image_name, meta[prediction]), file=ff) ff.flush() ff.close()
def main(_): os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.95 config.gpu_options.allow_growth = True config.allow_soft_placement = True # config.log_device_placement = True if not tf.gfile.Exists(FLAGS.data_dir): raise RuntimeError('data direction is not exist!') # if tf.gfile.Exists(FLAGS.log_dir): # tf.gfile.DeleteRecursively(FLAGS.log_dir) tf.gfile.MakeDirs(FLAGS.log_dir) # if not tf.gfile.Exists(FLAGS.ckpt_dir): tf.gfile.MakeDirs(os.path.join(FLAGS.ckpt_dir, 'best')) f = open(FLAGS.out_file + '.txt', 'a' if FLAGS.start_step is not 0 else 'w') if not f: raise RuntimeError('OUTPUT FILE OPEN ERROR!!!!!!') with tf.device('/cpu:0'): num_gpus = len(FLAGS.gpu.split(',')) global_step = tf.Variable(FLAGS.start_step, name='global_step', trainable=False) # learning_rate = tf.train.piecewise_constant(global_step, # [500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000], # [0.00001, 0.00005, 0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0]) # step_size = 10000 # learning_rate = tf.train.exponential_decay(1.0, global_step, 2*step_size, 0.5, staircase=True) # cycle = tf.floor(1 + tf.cast(global_step, tf.float32) / step_size / 2.) # xx = tf.abs(tf.cast(global_step, tf.float32)/step_size - 2. * tf.cast(cycle, tf.float32) + 1.) # learning_rate = 1e-4 + (1e-1 - 1e-4) * tf.maximum(0., (1-xx))*learning_rate # learning_rate = tf.train.piecewise_constant(global_step, [10000, 70000, 120000, 170000, 220000], # [0.01, 0.1, 0.001, 0.0001, 0.00001, 0.000001]) # learning_rate = tf.constant(0.001) learning_rate = tf.train.exponential_decay(0.05, global_step, 30000, 0.1, staircase=True) print( 'learning_rate = tf.train.exponential_decay(0.05, global_step, 30000, 0.1, staircase=True)', file=f) # opt = tf.train.AdamOptimizer(learning_rate) opt = tf.train.MomentumOptimizer(learning_rate, momentum=0.9) # opt = tf.train.GradientDescentOptimizer(learning_rate) # learning_rate = tf.train.exponential_decay(0.01, global_step, 32000, 0.1) # opt = tf.train.GradientDescentOptimizer(learning_rate) print('opt = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)', file=f) print('weight decay = %e' % FLAGS.weight_decay, file=f) f.flush() tf.summary.scalar('learing rate', learning_rate) tower_grads = [] tower_loss = [] tower_acc = [] images_t, labels_t = input_pipeline( tf.train.match_filenames_once( os.path.join(FLAGS.data_dir, 'train', '*.tfrecords')), FLAGS.batch_size * num_gpus, read_threads=len(os.listdir(os.path.join(FLAGS.data_dir, 'train')))) # batch_queue = tf.contrib.slim.prefetch_queue.prefetch_queue( # [images, labels], capacity=2 * num_gpus) images_v, labels_v = input_pipeline( tf.train.match_filenames_once( os.path.join(FLAGS.data_dir, 'valid', '*.tfrecords')), (256 // num_gpus) * num_gpus, read_threads=len(os.listdir(os.path.join(FLAGS.data_dir, 'valid'))), if_train=False) # batch_queue_v = tf.contrib.slim.prefetch_queue.prefetch_queue( # [images_v, labels_v], capacity=2 * num_gpus) image_batch0 = tf.placeholder( tf.float32, [None, FLAGS.patch_size, FLAGS.patch_size, channels], 'imgs') label_batch0 = tf.placeholder(tf.int32, [None, 1], 'labels') image_batch = tf.split(image_batch0, num_gpus, 0) label_batch = tf.split(label_batch0, num_gpus, 0) for i in range(num_gpus): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i) as scope: logits = build.net(image_batch[i], is_training, FLAGS) losses.sparse_softmax_cross_entropy(labels=label_batch[i], logits=logits, scope=scope) total_loss = losses.get_losses( scope=scope) + losses.get_regularization_losses( scope=scope) total_loss = tf.add_n(total_loss) grads = opt.compute_gradients(total_loss) tower_grads.append(grads) tower_loss.append(losses.get_losses(scope=scope)) with tf.name_scope('accuracy'): correct_prediction = tf.equal( tf.reshape(tf.argmax(logits, 1), [-1, 1]), tf.cast(label_batch[i], tf.int64)) accuracy = tf.reduce_mean( tf.cast(correct_prediction, tf.float32)) tower_acc.append(accuracy) tf.get_variable_scope().reuse_variables() with tf.name_scope('scores'): with tf.name_scope('accuracy'): accuracy = tf.reduce_mean(tf.stack(tower_acc, axis=0)) with tf.name_scope('batch_loss'): batch_loss = tf.add_n(tower_loss)[0] / num_gpus tf.summary.scalar('loss', batch_loss) tf.summary.scalar('accuracy', accuracy) grads = average_gradients(tower_grads) with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): variable_averages = tf.train.ExponentialMovingAverage( 0.9999, global_step) variables_averages_op = variable_averages.apply( tf.trainable_variables()) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): apply_gradient_op = opt.apply_gradients( grads, global_step=global_step) train_op = tf.group(apply_gradient_op, variables_averages_op) p_relu_update = tf.get_collection('p_relu') # train_op = apply_gradient_op # summary_op = tf.summary.merge_all() # init = tf.global_variables_initializer() summary_op = tf.summary.merge_all() saver = tf.train.Saver(name="saver", max_to_keep=10) saver_best = tf.train.Saver(name='best', max_to_keep=200) with tf.Session(config=config) as sess: sess.run(tf.local_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) if tf.gfile.Exists(os.path.join(FLAGS.ckpt_dir, 'checkpoint')): saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir)) else: sess.run(tf.global_variables_initializer()) if FLAGS.start_step != 0: sess.run(tf.assign(global_step, FLAGS.start_step)) train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph) train_writer.flush() valid_writer = tf.summary.FileWriter(FLAGS.log_dir + '/valid', sess.graph) valid_writer.flush() cache = np.ones(5, dtype=np.float32) / FLAGS.num_classes cache_v = np.ones(5, dtype=np.float32) / FLAGS.num_classes d = 1000 best = 0 for i in range(FLAGS.start_step, FLAGS.max_steps + 1): def get_batch(set, on_training): if set == 'train': img, lb = sess.run([images_t, labels_t]) # x = np.random.randint(0, 64) # y = np.random.randint(0, 64) # img = np.roll(np.roll(img, x, 1), y, 2) elif set == 'valid': img, lb = sess.run([images_v, labels_v]) else: raise RuntimeError('Unknown set name') feed_dict = {} feed_dict[image_batch0] = img feed_dict[label_batch0] = lb feed_dict[is_training] = on_training return feed_dict # feed = feed_dict(True, True) if i % d == 0: # Record summaries and test-set accuracy # loss0 = sess.run([total_loss], feed_dict=feed_dict(False, False)) # test_writer.add_summary(summary, i) # feed[is_training] = FLAGS acc, loss, summ, lr = sess.run( [accuracy, batch_loss, summary_op, learning_rate], feed_dict=get_batch('train', False)) acc2 = sess.run(accuracy, feed_dict=get_batch('train', True)) cache[int(i / d) % 5] = acc acc_v, loss_v, summ_v = sess.run( [accuracy, batch_loss, summary_op], feed_dict=get_batch('valid', False)) acc2_v = sess.run(accuracy, feed_dict=get_batch('valid', True)) cache_v[int(i / d) % 5] = acc_v train_writer.add_summary(summ, i) valid_writer.add_summary(summ_v, i) print(('step %d, ' % i) + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), file=f) print( 'acc(t)=%f(%f), loss(t)=%f;\nacc(v)=%f(%f), loss(v)=%f; lr=%e' % (acc, cache.mean(), loss, acc_v, cache_v.mean(), loss_v, lr), file=f) print('%f, %f' % (acc2, acc2_v), file=f) saver.save(sess, os.path.join(FLAGS.ckpt_dir, FLAGS.model_name), global_step=i) if acc_v > 0.90: saver_best.save(sess, os.path.join(FLAGS.ckpt_dir, 'best', FLAGS.model_name), global_step=i) f.flush() sess.run(train_op, feed_dict=get_batch('train', True)) sess.run(p_relu_update) coord.request_stop() coord.join(threads) train_writer.close() # test_writer.close() f.close()
def main(_): os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.7 config.gpu_options.allow_growth = True config.allow_soft_placement = True # config.log_device_placement = True if not tf.gfile.Exists(FLAGS.data_dir): raise RuntimeError('data direction is not exist!') # if tf.gfile.Exists(FLAGS.log_dir): # tf.gfile.DeleteRecursively(FLAGS.log_dir) # tf.gfile.MakeDirs(FLAGS.log_dir) if not tf.gfile.Exists(FLAGS.ckpt_dir): tf.gfile.MakeDirs(FLAGS.ckpt_dir) f = open(FLAGS.out_file, 'w') if not f: raise RuntimeError('OUTPUT FILE OPEN ERROR!!!!!!') with tf.device('/cpu:0'): global_step = tf.Variable(FLAGS.start_step, name='global_step', trainable=False) # learning_rate = tf.train.exponential_decay(0.1, global_step, 192000, 0.9, staircase=True) # tf.summary.scalar('learing rate', learning_rate) # opt = tf.train.AdamOptimizer(learning_rate) # opt = tf.train.MomentumOptimizer(learning_rate, momentum=FLAGS.momentum) # opt = tf.train.GradientDescentOptimizer(learning_rate) # learning_rate = tf.train.exponential_decay(0.01, global_step, 32000, 0.1) # opt = tf.train.GradientDescentOptimizer(learning_rate) # tower_grads = [] num_gpus = len(FLAGS.gpu.split(',')) tower_loss = [] tower_acc = [] images, labels = input_pipeline( tf.train.match_filenames_once( os.path.join(FLAGS.data_dir, 'valid', '*.tfrecords')), int(FLAGS.batch_size / num_gpus)) image_batch = tf.placeholder( tf.float32, [None, FLAGS.patch_size, FLAGS.patch_size, 3], 'imgs') label_batch = tf.placeholder(tf.int32, [None, 1], 'labels') for i in range(num_gpus): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i) as scope: # image_batch, label_batch = batch_queue.dequeue() # image_batch = tf.ones(shape=[128, 64, 64, 3], dtype=tf.float32) # label_batch = tf.ones(shape=[128, 1], dtype=tf.int32) logits = build.net(image_batch, False, FLAGS) losses.sparse_softmax_cross_entropy(labels=label_batch, logits=logits, scope=scope) # total_loss = losses.get_losses(scope=scope) + losses.get_regularization_losses(scope=scope) # total_loss = tf.add_n(total_loss) # grads = opt.compute_gradients(total_loss) # tower_grads.append(grads) tower_loss.append(losses.get_losses(scope=scope)) with tf.name_scope('accuracy'): correct_prediction = tf.equal( tf.reshape(tf.argmax(logits, 1), [-1, 1]), tf.cast(label_batch, tf.int64)) accuracy = tf.reduce_mean( tf.cast(correct_prediction, tf.float32)) tower_acc.append(accuracy) tf.get_variable_scope().reuse_variables() with tf.name_scope('scores'): with tf.name_scope('accuracy'): accuracy = tf.reduce_mean(tf.stack(tower_acc, axis=0)) with tf.name_scope('batch_loss'): batch_loss = tf.add_n(tower_loss)[0] tf.summary.scalar('loss', batch_loss) tf.summary.scalar('accuracy', accuracy) # grads = average_gradients(tower_grads) # variable_averages = tf.train.ExponentialMovingAverage( # cifar10.MOVING_AVERAGE_DECAY, global_step) # variables_averages_op = variable_averages.apply(tf.trainable_variables()) # with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): # update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # with tf.control_dependencies(update_ops): # apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # # train_op = tf.group(apply_gradient_op, variables_averages_op) # train_op = apply_gradient_op # summary_op = tf.summary.merge_all() # init = tf.global_variables_initializer() summary_op = tf.summary.merge_all() # variable_averages = tf.train.ExponentialMovingAverage(0.9999) # variables_to_restore = variable_averages.variables_to_restore() # saver = tf.train.Saver(variables_to_restore, name='saver') saver = tf.train.Saver(name="saver") with tf.Session(config=config) as sess: sess.run(tf.local_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) if tf.gfile.Exists(os.path.join(FLAGS.ckpt_dir, 'checkpoint')): # saver.restore(sess, FLAGS.ckpt_dir+'/model') saver.restore( sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir) if FLAGS.model_name is None else os.path.join( FLAGS.ckpt_dir, FLAGS.model_name)) train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test', sess.graph) train_writer.flush() for i in range(FLAGS.start_step, FLAGS.max_steps + 1): # feed = feed_dict(True, True) # if i % 1000 == 0: # Record summaries and test-set accuracy # loss0 = sess.run([total_loss], feed_dict=feed_dict(False, False)) # test_writer.add_summary(summary, i) # feed[is_training] = FLAGS img, lb = sess.run([images, labels]) acc, loss, summ = sess.run([accuracy, batch_loss, summary_op], feed_dict={ image_batch: img, label_batch: lb }) # acc, loss, summ = sess.run([accuracy, batch_loss, summary_op], feed_dict={image_batch: np.ones(shape = [256, 64, 64, 3], dtype=np.float32), label_batch: np.ones(shape=[256, 1], dtype=np.int32)}) train_writer.add_summary(summ, i) print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), file=f) # print('step %d: train_acc=%f, train_loss=%f; test_acc=%f, test_loss=%f' % (i, acc1, loss1, acc0, loss0), # file=f) print('step %d: accuracy=%f, loss=%f' % (i, acc, loss), file=f) # saver.save(sess, os.path.join(FLAGS.ckpt_dir, FLAGS.model_name)) f.flush() # sess.run(train_op, feed_dict={is_training: True}) coord.request_stop() coord.join(threads) train_writer.close() # test_writer.close() f.close()