def train(): """ 在ShanghaiTech测试集上对mscnn模型训练 :return: """ with tf.Graph().as_default(): # 读取文件目录txt dir_file = open(FLAGS.data_train_index) dir_name = dir_file.readlines() # 参数设置 nums_train = len(dir_name) # 训练一批次的图片数量 global_step = tf.Variable(0, trainable=False) # 定义全局衰减步数 # 用于训练数据的place_holder image = tf.placeholder("float") label = tf.placeholder("float") avg_loss1 = tf.placeholder("float") # 模型训练相关的初始化工作 # predicts = mscnn.inference(image) # 构建mscnn模型 predicts = mscnn.inference_bn(image) # 构建改进mscnn模型 loss = mscnn.loss(predicts, label) # 计算损失 train_op = mscnn.train(loss, global_step, nums_train) # 获取训练算子 sess = tf.Session(config=tf.ConfigProto( log_device_placement=FLAGS.log_device_placement)) # 创建一个会话 saver = tf.train.Saver(tf.all_variables()) # 创建保存器 init = tf.initialize_all_variables() # 变量初始化 sess.run(init) # 初始化模型所有变量 checkpoint_dir = tf.train.get_checkpoint_state(FLAGS.model_dir) if checkpoint_dir and checkpoint_dir.model_checkpoint_path: saver.restore(sess, checkpoint_dir.model_checkpoint_path) else: print('Not found checkpoint file') summary_op = tf.summary.merge_all() # 概要汇总 add_avg_loss_op = mscnn.add_avg_loss(avg_loss1) # 添加平均loss的op summary_writer = tf.summary.FileWriter( FLAGS.train_log, graph_def=sess.graph_def) # 创建一个概要器 # 参数设置 steps = 100000 avg_loss = 0 for step in xrange(steps): # 批次数 index = step % nums_train tmp1 = None while step % nums_train == 0: tmp = range(nums_train) tmp1 = random.sample(tmp, nums_train) break num_batch = tmp1[index] file_name = dir_name[num_batch] im_name, gt_name = file_name.split(' ') gt_name = gt_name.split('\n')[0] # 分离出最后一个回车符 # 训练数据(图片) batch_xs = cv2.imread(FLAGS.data_train_im + im_name) batch_xs = np.array(batch_xs, dtype=np.float32) batch_xs = batch_xs.reshape(1, len(batch_xs), -1, 3) # 训练数据(密度图) batch_ys = np.array(np.load(FLAGS.data_train_gt + gt_name)) batch_ys = np.array(batch_ys, dtype=np.float32) batch_ys = batch_ys.reshape(1, len(batch_ys), -1) # 获取损失值以及预测密度图 _, loss_value = sess.run([train_op, loss], feed_dict={ image: batch_xs, label: batch_ys }) output = sess.run(predicts, feed_dict={image: batch_xs}) avg_loss += loss_value # 保存概述数据 if step % 100 == 0: summary_str = sess.run(summary_op, feed_dict={ image: batch_xs, label: batch_ys, avg_loss1: avg_loss / 100 }) summary_writer.add_summary(summary_str, step) avg_loss = 0 if step % 10 == 0: print("avg_loss:%.7f\t counting:%.7f\t predict:%.7f" % \ (loss_value, sum(sum(sum(batch_ys))), sum(sum(sum(output))))) sess.run(add_avg_loss_op, feed_dict={avg_loss1: loss_value}) # 保存模型参数 if step % 2000 == 0: checkpoint_path = os.path.join(FLAGS.model_dir, 'skip_mcnn.ckpt') saver.save(sess, checkpoint_path, global_step=step) # 输出预测密度图(用于测试) if step % 500 == 0: out_path = os.path.join(FLAGS.output_dir, str(step) + "out.npy") np.save(out_path, output)
def evaluate(): """ 在ShanghaiTech测试集上对mscnn模型评价 :return: """ # 构建图模型 images = tf.placeholder("float") labels = tf.placeholder("float") predict_op = mscnn.inference_bn(images) loss_op = mscnn.loss(predict_op, labels) # 载入模型参数 saver = tf.train.Saver() sess = tf.Session() # 对模型变量进行初始化并用其创建会话 init_op = tf.global_variables_initializer() sess.run(init_op) checkpoint_dir = tf.train.get_checkpoint_state(FLAGS.model_dir) if checkpoint_dir and checkpoint_dir.model_checkpoint_path: saver.restore(sess, checkpoint_dir.model_checkpoint_path) else: print('Not found checkpoint file') return False dir_file = open(FLAGS.data_test_index) dir_names = dir_file.readlines() step = 0 sum_all_mae = 0 sum_all_mse = 0 for file_name in dir_names: step += 1 im_name, gt_name = file_name.split(' ') gt_name = gt_name.split('\n')[0] # 分离出最后一个回车符 batch_xs = cv2.imread(FLAGS.data_test_im + im_name) batch_xs = np.array(batch_xs, dtype=np.float32) batch_xs = batch_xs.reshape(1, len(batch_xs), -1, 3) # 测试数据(密度图) batch_ys = np.array(np.load(FLAGS.data_test_gt + gt_name)) batch_ys = np.array(batch_ys, dtype=np.float32) batch_ys = batch_ys.reshape(1, len(batch_ys), -1) start = time.clock() predict = sess.run([predict_op], feed_dict={images: batch_xs}) loss_value = sess.run(loss_op, feed_dict={images: batch_xs, labels: batch_ys}) end = time.clock() print("time: %s\t loss_value: %s\t counting:%.7f\t predict:%.7f\t diff:%.7f" % \ ((end - start), loss_value, sum(sum(sum(batch_ys))), sum(sum(sum(predict[0]))), sum(sum(sum(batch_ys)))-sum(sum(sum(predict[0]))))) sum_ab = abs(sum(sum(sum(batch_ys))) - sum(sum(sum(predict[0])))) sum_all_mae += sum_ab sum_all_mse += sum_ab ** 2 avg_mae = sum_all_mae / len(dir_names) avg_mse = (sum_all_mse / len(dir_names)) ** 0.5 print("MAE: %.7f\t MSE:%.7f" % (avg_mae, avg_mse))
if image_size_limit: longer_dimension = np.max(image.shape) scaling = np.min( (longer_dimension, image_size_limit)) / longer_dimension if scaling < 1: image = cv2.resize( image, (int(image.shape[1] * scaling), int(image.shape[0] * scaling)), interpolation=cv2.INTER_AREA) image = image.reshape(1, image.shape[0], image.shape[1], 3) return image images = tf.placeholder('float') predict_op = mscnn.inference_bn(images) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() checkpoint_dir = tf.train.get_checkpoint_state('./model') saver.restore(sess, checkpoint_dir.model_checkpoint_path) # image = open_image('./Data_original/Data_im/test_im/IMG_3_A.jpg') # image = open_image('./Data_original/Data_im/test_im/IMG_170_B.jpg') image = open_image('/Users/erichuang/Downloads/test.jpg', image_size_limit=1000) density_map = sess.run([predict_op], feed_dict={images: image}) crowd_count = np.sum(density_map).round().astype(int)
def train(): """ 在ShanghaiTech测试集上对mscnn模型训练 :return: """ with tf.Graph().as_default(): # 读取文件目录txt dir_file = open(FLAGS.data_train_index) dir_name = dir_file.readlines() # 参数设置 nums_train = len(dir_name) # 训练一批次的图片数量 global_step = tf.Variable(0, trainable=False) # 定义全局衰减步数 # 用于训练数据的place_holder image = tf.placeholder("float") label = tf.placeholder("float") avg_loss = tf.placeholder("float") # 模型训练相关的初始化工作 predicts = mscnn.inference_bn(image) # 构建改进mscnn模型 loss = mscnn.loss(predicts, label) # 计算损失 train_op = mscnn.train(loss, global_step, nums_train) # 获取训练算子 sess = tf.Session(config=tf.ConfigProto( log_device_placement=FLAGS.log_device_placement)) # 创建一个会话 saver = tf.train.Saver(tf.global_variables()) # 创建保存器 init = tf.global_variables_initializer() # 变量初始化 sess.run(init) # 初始化模型所有变量 checkpoint_dir = tf.train.get_checkpoint_state(FLAGS.model_dir) if checkpoint_dir and checkpoint_dir.model_checkpoint_path: saver.restore(sess, checkpoint_dir.model_checkpoint_path) else: print('Not found checkpoint file') summary_op = tf.summary.merge_all() # 概要汇总 add_avg_loss_op = mscnn.add_avg_loss(avg_loss) # 添加平均loss的op summary_writer = tf.summary.FileWriter(FLAGS.train_log, graph=sess.graph) # 创建一个概要器 # 参数设置 steps = 100000 avg_loss_1 = 0 for step in range(0, steps): if step < nums_train * 10: # 开始10次迭代轮循按样本次序训练 num_batch = [ step % nums_train + i for i in range(FLAGS.batch_size) ] else: # 随机选batch_size大小的样本 num_batch = random.sample(range(nums_train), nums_train)[0:FLAGS.batch_size] xs, ys = [], [] for index in num_batch: # 获取路径 file_name = dir_name[index] im_name, gt_name = file_name.split(' ') gt_name = gt_name.split('\n')[0] # 训练数据(图片) batch_xs = cv2.imread(FLAGS.data_train_im + im_name) batch_xs = np.array(batch_xs, dtype=np.float32) # 训练数据(密度图) batch_ys = np.array(np.load(FLAGS.data_train_gt + gt_name)) batch_ys = np.array(batch_ys, dtype=np.float32) batch_ys = batch_ys.reshape( [batch_ys.shape[0], batch_ys.shape[1], -1]) xs.append(batch_xs) ys.append(batch_ys) np_xs = np.array(xs) np_ys = np.array(ys)[:, :, :, 0] # 获取损失值以及预测密度图 _, loss_value = sess.run([train_op, loss], feed_dict={ image: np_xs, label: np_ys }) output = sess.run(predicts, feed_dict={image: np_xs}) avg_loss_1 += loss_value # 保存概述数据 if step % 100 == 0: summary_str = sess.run(summary_op, feed_dict={ image: np_xs, label: np_ys, avg_loss: avg_loss_1 / 100 }) summary_writer.add_summary(summary_str, step) avg_loss_1 = 0 if step % 1 == 0: print("avg_loss:%.7f\t counting:%.7f\t predict:%.7f" % \ (loss_value, sum(sum(sum(np_ys))), sum(sum(sum(output))))) sess.run(add_avg_loss_op, feed_dict={avg_loss: loss_value}) # 保存模型参数 if step % 2000 == 0: checkpoint_path = os.path.join(FLAGS.model_dir, 'skip_mcnn.ckpt') saver.save(sess, checkpoint_path, global_step=step) # 输出预测密度图(用于测试) if step % 500 == 0: out_path = os.path.join(FLAGS.output_dir, str(step) + "out.npy") np.save(out_path, output)