Пример #1
0
    def __init__(self):
        self.set_gpu(1)

        self.img_path = ''
        self.model_path = './model/MCNN_model/v1-2050'
        # crop_size = 256

        # place holder位置保持器(定义变量)
        self.input_img_placeholder = tf.placeholder(
            tf.float32, shape=([None, None, None, 3]))
        self.density_map_placeholder = tf.placeholder(tf.float32,
                                                      shape=(None, None, None,
                                                             1))

        self.inference_density_map = multi_column_cnn(
            self.input_img_placeholder)
Пример #2
0
def infer():
    set_gpu(1)

    img_path = 'D:\\YourZhouProject\\mcnn_project\\pytorch_mcnn\\part_A_final\\test_data\\images\\IMG_78.jpg'
    model_path = 'D:\\YourZhouProject\\mcnn_project\\tf_mcnn\\work\\ckpts2\\mcnn\\v1-1425'
    # crop_size = 256

    ori_crowd_img = cv.imread(img_path)
    # ori_crowd_img = cv.resize(ori_crowd_img, (256, 256))
    # h, w = ori_crowd_img.shape[0], ori_crowd_img.shape[1]
    img = ori_crowd_img.reshape(
        (ori_crowd_img.shape[0], ori_crowd_img.shape[1],
         ori_crowd_img.shape[2]))

    # place holder位置保持器
    input_img_placeholder = tf.placeholder(tf.float32,
                                           shape=([None, None, None, 3]))
    density_map_placeholder = tf.placeholder(tf.float32,
                                             shape=(None, None, None, 1))

    inference_density_map = multi_column_cnn(input_img_placeholder)

    saver = tf.train.Saver()

    time_star = time.time()
    with tf.Session() as sess:
        saver.restore(sess, model_path)
        result = sess.run(
            inference_density_map,
            feed_dict={input_img_placeholder: [(img - 127.5) / 128]})

    time_over = time.time() - time_star
    print(time_over)

    num = result.sum()
    print(num)
    dmap_img = result[0, :, :, 0]

    final_img = image_processing(dmap_img)
    final_img = image_add_heatmap(ori_crowd_img, final_img, 0.5)

    cv.putText(final_img, "P : " + str(int(num)), (50, 50),
               cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
    cv.imshow("really", final_img)

    cv.waitKey(0)
    cv.destroyAllWindows()
Пример #3
0
def infer():
    set_gpu(0)
    cap = cv.VideoCapture("D:/YourZhouDownloads/FFPUT/school_2/school_01.mp4")
    # img_path = 'D:\\YourZhouProject\\mcnn_project\\pytorch_mcnn\\part_A_final\\test_data\\images\\IMG_45.jpg'
    model_path = 'D:\\YourZhouProject\\mcnn_project\\tf_mcnn\\work\\ckpts\\mcnn\\v1-2200'
    # crop_size = 256

    # place holder位置保持器
    input_img_placeholder = tf.placeholder(tf.float32,
                                           shape=([None, None, None, 3]))
    density_map_placeholder = tf.placeholder(tf.float32,
                                             shape=(None, None, None, 1))

    inference_density_map = multi_column_cnn(input_img_placeholder)

    while True:
        ret, image = cap.read()
        ori_crowd_img = cv.flip(image, 1)

        # ori_crowd_img = cv.imread(img_path)
        # h, w = ori_crowd_img.shape[0], ori_crowd_img.shape[1]
        img = ori_crowd_img.reshape(
            (ori_crowd_img.shape[0], ori_crowd_img.shape[1],
             ori_crowd_img.shape[2]))

        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess, model_path)
            result = sess.run(
                inference_density_map,
                feed_dict={input_img_placeholder: [(img - 127.5) / 128]})

        print(result.sum())
        dmap_img = result[0, :, :, 0]
        # utils.show_density_map(dmap_img)

        final_img = image_processing(dmap_img)
        final_img = image_add_heatmap(ori_crowd_img, final_img, 0.3)

        cv.imshow("really", final_img)
        if (cv.waitKey(1) == 27):
            cv.destroyAllWindows()
            break
Пример #4
0
def test():
    cfig = ConfigFactory()
    #set_gpu(0)
    dataset = 'A'
    # training dataset
    img_root_dir = cfig.data_root_dir + r'part_' + dataset + r'_final/train_data/images/'
    gt_root_dir = cfig.data_root_dir + r'part_' + dataset + r'_final/train_data/ground_truth/'
    # testing dataset
    val_img_root_dir = cfig.data_root_dir + r'part_' + dataset + r'_final/test_data/images/'
    val_gt_root_dir = cfig.data_root_dir + r'part_' + dataset + r'_final/test_data/ground_truth/'

    # place holder
    input_img_placeholder = tf.placeholder(tf.float32,
                                           shape=(None, None, None, 3))
    density_map_placeholder = tf.placeholder(tf.float32,
                                             shape=(None, None, None, 1))

    # network generation
    inference_density_map = multi_column_cnn(input_img_placeholder)

    # density map loss
    density_map_loss = 0.5 * tf.reduce_sum(
        tf.square(tf.subtract(density_map_placeholder, inference_density_map)))

    # jointly training
    joint_loss = density_map_loss
    # optimizer = tf.train.MomentumOptimizer(configs.learing_rate, momentum=configs.momentum).minimize(joint_loss)
    # adam optimizer
    # optimizer = tf.train.AdamOptimizer(cfig.lr).minimize(joint_loss)

    init = tf.global_variables_initializer()

    file_path = cfig.log_router

    # training log route
    if not os.path.exists(file_path):
        os.makedirs(file_path)

    # model saver route
    if not os.path.exists(cfig.ckpt_router):
        os.makedirs(cfig.ckpt_router)
    log = open(cfig.log_router + cfig.name + r'_training.logs',
               mode='a+',
               encoding='utf-8')

    saver = tf.train.Saver(max_to_keep=cfig.max_ckpt_keep)
    ckpt = tf.train.get_checkpoint_state(cfig.ckpt_router)

    # start session
    sess = tf.Session()

    if ckpt and ckpt.model_checkpoint_path:
        print('load model', ckpt.model_checkpoint_path)
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        sess.run(init)

    data_loader = ImageDataLoader(img_root_dir,
                                  gt_root_dir,
                                  shuffle=False,
                                  downsample=False,
                                  pre_load=False)
    data_loader_val = ImageDataLoader(val_img_root_dir,
                                      val_gt_root_dir,
                                      shuffle=False,
                                      downsample=False,
                                      pre_load=False)

    absolute_error = 0.0
    square_error = 0.0
    file_index = 1
    for blob in data_loader_val:
        img, gt_dmp, gt_count = blob['data'], blob['gt_density'], blob[
            'crowd_count']
        feed_dict = {
            input_img_placeholder: (img - 127.5) / 128,
            density_map_placeholder: gt_dmp
        }
        inf_dmp, loss = sess.run([inference_density_map, joint_loss],
                                 feed_dict=feed_dict)
        print(blob['fname'], gt_count.sum(), inf_dmp.sum(), loss)
        #print(absolute_error,square_error)
        absolute_error = absolute_error + np.abs(
            np.subtract(gt_count.sum(), inf_dmp.sum())).mean()
        square_error = square_error + np.power(
            np.subtract(gt_count.sum(), inf_dmp.sum()), 2).mean()
        file_index = file_index + 1
        show_map(img[0, :, :, 0])
        show_density_map(inf_dmp[0, :, :, 0])
        show_density_map(gt_dmp[0, :, :, 0])
    mae = absolute_error / data_loader_val.num_samples
    rmse = np.sqrt(square_error / data_loader_val.num_samples)
    print(str('MAE_' + str(mae) + '_MSE_' + str(rmse)))
Пример #5
0
def train():
    set_gpu(0)
    dataset = 'A'
    # training dataset训练数据集
    img_root_dir = r'D:/people_all/ShanghaiTech/part_' + dataset + r'_final/train_data/images/'
    gt_root_dir = r'D:/people_all/ShanghaiTech/part_' + dataset + r'_final/train_data/ground_truth/'
    # testing dataset测试数据集
    val_img_root_dir = r'D:/people_all/ShanghaiTech/part_' + dataset + r'_final/test_data/images/'
    val_gt_root_dir = r'D:/people_all/ShanghaiTech/part_' + dataset + r'_final/test_data/ground_truth/'

    # training dataset file list训练数据集文件列表
    img_file_list = os.listdir(img_root_dir)
    gt_img_file_list = os.listdir(gt_root_dir)

    # testing dataset file listtesting dataset file list
    val_img_file_list = os.listdir(val_img_root_dir)
    val_gt_file_list = os.listdir(val_gt_root_dir)

    cfig = ConfigFactory()

    # place holder位置保持器
    input_img_placeholder = tf.placeholder(tf.float32,
                                           shape=(None, None, None, 3))
    density_map_placeholder = tf.placeholder(tf.float32,
                                             shape=(None, None, None, 1))

    # network generation网络生成
    inference_density_map = multi_column_cnn(input_img_placeholder)

    # density map loss密度图损失
    density_map_loss = 0.5 * tf.reduce_sum(
        tf.square(tf.subtract(density_map_placeholder, inference_density_map)))

    # jointly training联合训练
    joint_loss = density_map_loss
    # optimizer = tf.train.MomentumOptimizer(configs.learing_rate, momentum=configs.momentum).minimize(joint_loss)
    # adam optimizer ADAM优化器
    optimizer = tf.train.AdamOptimizer(cfig.lr).minimize(joint_loss)

    init = tf.global_variables_initializer()

    file_path = cfig.log_router

    # training log route训练日志路径
    if not os.path.exists(file_path):
        os.makedirs(file_path)

    # model saver route模型保护程序路由
    if not os.path.exists(cfig.ckpt_router):
        os.makedirs(cfig.ckpt_router)
    log = open(cfig.log_router + cfig.name + r'_training.logs',
               mode='a+',
               encoding='utf-8')

    saver = tf.train.Saver(max_to_keep=cfig.max_ckpt_keep)
    ckpt = tf.train.get_checkpoint_state(cfig.ckpt_router)

    # start session开始会话
    sess = tf.Session()
    if ckpt and ckpt.model_checkpoint_path:
        print('load model')
        saver.restore(sess, ckpt.model_checkpoint_path)
    sess.run(init)

    # 开始训练
    for i in range(cfig.total_iters):
        # 训练
        for file_index in range(len(img_file_list)):
            img_path = img_root_dir + img_file_list[file_index]
            gt_path = gt_root_dir + 'GT_' + img_file_list[file_index].split(
                r'.')[0] + '.mat.'

            img, gt_dmp, gt_count = read_crop_train_data(img_path,
                                                         gt_path,
                                                         scale=4)

            feed_dict = {
                input_img_placeholder: (img - 127.5) / 128,
                density_map_placeholder: gt_dmp
            }

            _, inf_dmp, loss = sess.run(
                [optimizer, inference_density_map, joint_loss],
                feed_dict=feed_dict)
            format_time = str(
                time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
            format_str = 'step %d, joint loss=%.5f, inference= %.5f, gt=%d'
            log_line = format_time, img_file_list[file_index], format_str % (
                i * len(img_file_list) + file_index, loss, inf_dmp.sum(),
                gt_count)
            log.writelines(str(log_line) + '\n')
            print(log_line)

            # covert graph to pb file
            # tf.train.write_graph(sess.graph_def, "./", 'graph.pb' + str(file_index), as_text=True)
            # test whether add new operation dynamically
            # sess.graph.finalize()
        # saver.save(sess, cfig.ckpt_router + '/v1', global_step=i)

        if i % 25 == 0:
            saver.save(sess, cfig.ckpt_router + '/v1', global_step=i)
            val_log = open(cfig.log_router + cfig.name + r'_validating_' +
                           str(i) + '_.logs',
                           mode='w',
                           encoding='utf-8')
            absolute_error = 0.0
            square_error = 0.0
            # validating验证
            for file_index in range(len(val_img_file_list)):
                img_path = val_img_root_dir + val_img_file_list[file_index]
                gt_path = val_gt_root_dir + 'GT_' + val_img_file_list[
                    file_index].split(r'.')[0] + '.mat'
                # gt_path = val_gt_root_dir + 'GT_' + val_img_file_list[file_index].split(r'.')[0]
                img, gt_dmp, gt_count = read_test_data(img_path,
                                                       gt_path,
                                                       scale=4)

                feed_dict = {
                    input_img_placeholder: (img - 127.5) / 128,
                    density_map_placeholder: gt_dmp
                }
                _, inf_dmp, loss = sess.run(
                    [optimizer, inference_density_map, joint_loss],
                    feed_dict=feed_dict)

                format_time = str(
                    time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
                format_str = 'step %d, joint loss=%.5f, inference= %.5f, gt=%d'
                absolute_error = absolute_error + np.abs(
                    np.subtract(gt_count, inf_dmp.sum())).mean()
                square_error = square_error + np.power(
                    np.subtract(gt_count, inf_dmp.sum()), 2).mean()
                log_line = format_time, val_img_file_list[
                    file_index], format_str % (file_index, loss, inf_dmp.sum(),
                                               gt_count)
                val_log.writelines(str(log_line) + '\n')
                print(log_line)
            mae = absolute_error / len(val_img_file_list)
            rmse = np.sqrt(absolute_error / len(val_img_file_list))
            val_log.writelines(
                str('MAE_' + str(mae) + '_MSE_' + str(rmse)) + '\n')
            val_log.close()
            print(str('MAE_' + str(mae) + '_MSE_' + str(rmse)))
Пример #6
0
def train():
    cfig = ConfigFactory()
    #set_gpu(0)
    dataset = 'A'
    # training dataset
    img_root_dir = cfig.data_root_dir + r'part_' + dataset + r'_final/train_data/images/'
    gt_root_dir = cfig.data_root_dir + r'part_' + dataset + r'_final/train_data/ground_truth/'
    # testing dataset
    val_img_root_dir = cfig.data_root_dir + r'part_' + dataset + r'_final/test_data/images/'
    val_gt_root_dir = cfig.data_root_dir + r'part_' + dataset + r'_final/test_data/ground_truth/'

    cfig = ConfigFactory()

    # place holder
    input_img_placeholder = tf.placeholder(tf.float32,
                                           shape=(None, None, None, 3))
    density_map_placeholder = tf.placeholder(tf.float32,
                                             shape=(None, None, None, 1))

    # network generation
    inference_density_map = multi_column_cnn(input_img_placeholder)

    # density map loss
    density_map_loss = 0.5 * tf.reduce_sum(
        tf.square(tf.subtract(density_map_placeholder, inference_density_map)))

    # jointly training
    joint_loss = density_map_loss
    # optimizer = tf.train.MomentumOptimizer(configs.learing_rate, momentum=configs.momentum).minimize(joint_loss)
    # adam optimizer
    optimizer = tf.train.AdamOptimizer(cfig.lr).minimize(joint_loss)

    init = tf.global_variables_initializer()

    file_path = cfig.log_router

    # training log route
    if not os.path.exists(file_path):
        os.makedirs(file_path)

    # model saver route
    if not os.path.exists(cfig.ckpt_router):
        os.makedirs(cfig.ckpt_router)
    log = open(cfig.log_router + cfig.name + r'_training.logs',
               mode='a+',
               encoding='utf-8')

    saver = tf.train.Saver(max_to_keep=cfig.max_ckpt_keep)
    ckpt = tf.train.get_checkpoint_state(cfig.ckpt_router)

    # start session
    sess = tf.Session()
    if ckpt and ckpt.model_checkpoint_path:
        print('load model, ckpt.model_checkpoint_path')
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        sess.run(init)

    data_loader = ImageDataLoader(img_root_dir,
                                  gt_root_dir,
                                  shuffle=True,
                                  downsample=True,
                                  pre_load=True)
    data_loader_val = ImageDataLoader(val_img_root_dir,
                                      val_gt_root_dir,
                                      shuffle=False,
                                      downsample=False,
                                      pre_load=True)
    # start training
    for i in range(cfig.start_iters, cfig.total_iters):
        # training
        index = 1
        for blob in data_loader:
            img, gt_dmp, gt_count = blob['data'], blob['gt_density'], blob[
                'crowd_count']
            feed_dict = {
                input_img_placeholder: (img - 127.5) / 128,
                density_map_placeholder: gt_dmp
            }
            _, inf_dmp, loss = sess.run(
                [optimizer, inference_density_map, joint_loss],
                feed_dict=feed_dict)
            format_time = str(
                time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
            format_str = 'step %d, joint loss=%.5f, inference= %.5f, gt=%d'
            log_line = format_time, blob['fname'], format_str % (
                i * data_loader.num_samples + index, loss, inf_dmp.sum(),
                gt_count)
            log.writelines(str(log_line) + '\n')
            print(log_line)
            index = index + 1

        if i % 50 == 0:
            val_log = open(cfig.log_router + cfig.name + r'_validating_' +
                           str(i) + '_.logs',
                           mode='w',
                           encoding='utf-8')
            absolute_error = 0.0
            square_error = 0.0
            file_index = 1
            for blob in data_loader_val:
                img, gt_dmp, gt_count = blob['data'], blob['gt_density'], blob[
                    'crowd_count']
                feed_dict = {
                    input_img_placeholder: (img - 127.5) / 128,
                    density_map_placeholder: gt_dmp
                }
                inf_dmp, loss = sess.run([inference_density_map, joint_loss],
                                         feed_dict=feed_dict)
                format_time = str(
                    time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
                format_str = 'step %d, joint loss=%.5f, inference= %.5f, gt=%d'
                absolute_error = absolute_error + np.abs(
                    np.subtract(gt_count, inf_dmp.sum())).mean()
                square_error = square_error + np.power(
                    np.subtract(gt_count, inf_dmp.sum()), 2).mean()
                log_line = format_time, blob['fname'], format_str % (
                    file_index, loss, inf_dmp.sum(), gt_count)
                val_log.writelines(str(log_line) + '\n')
                print(log_line)
                file_index = file_index + 1
            mae = absolute_error / data_loader_val.num_samples
            rmse = np.sqrt(square_error / data_loader_val.num_samples)
            val_log.writelines(
                str('MAE_' + str(mae) + '_MSE_' + str(rmse)) + '\n')
            val_log.close()
            print(str('MAE_' + str(mae) + '_MSE_' + str(rmse)))
            saver.save(sess, cfig.ckpt_router + '/v1', global_step=i + 1)