Esempio n. 1
0
def test_lanenet(image_path, weights_path, use_gpu, image_list, batch_size, save_dir):

    """
    :param image_path:
    :param weights_path:
    :param use_gpu:
    :return:
    """
    
    test_dataset = lanenet_data_processor_test.DataSet(image_path, batch_size)
    input_tensor = tf.placeholder(dtype=tf.string, shape=[None], name='input_tensor')
    imgs = tf.map_fn(test_dataset.process_img, input_tensor, dtype=tf.float32)
    phase_tensor = tf.constant('test', tf.string)

    net = lanenet_merge_model.LaneNet()
    binary_seg_ret, instance_seg_ret = net.test_inference(imgs, phase_tensor, 'lanenet_loss')
    initial_var = tf.global_variables()
    final_var = initial_var[:-1]
    saver = tf.train.Saver(final_var)
    # Set sess configuration
    if use_gpu:
        sess_config = tf.ConfigProto(device_count={'GPU': 1})
    else:
        sess_config = tf.ConfigProto(device_count={'GPU': 0})
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'
    sess = tf.Session(config=sess_config)
    with sess.as_default():
        sess.run(tf.global_variables_initializer())
        import IPython
        IPython.embed()
        saver.restore(sess=sess, save_path=weights_path)
        for i in range(math.ceil(len(image_list) / batch_size)):
            print(i)
            paths = test_dataset.next_batch()
            instance_seg_image, existence_output = sess.run([binary_seg_ret, instance_seg_ret],
                                                            feed_dict={input_tensor: paths})
            for cnt, image_name in enumerate(paths):
                print(image_name)
                parent_path = os.path.dirname(image_name)
                directory = os.path.join(save_dir, 'vgg_SCNN_DULR_w9', parent_path)
                if not os.path.exists(directory):
                    os.makedirs(directory)
                file_exist = open(os.path.join(directory, os.path.basename(image_name)[:-3] + 'exist.txt'), 'w')
                for cnt_img in range(4):
                    cv2.imwrite(os.path.join(directory, os.path.basename(image_name)[:-4] + '_' + str(cnt_img + 1) + '_avg.png'),
                            (instance_seg_image[cnt, :, :, cnt_img + 1] * 255).astype(int))
                    if existence_output[cnt, cnt_img] > 0.5:
                        file_exist.write('1 ')
                    else:
                        file_exist.write('0 ')
                file_exist.close()
    sess.close()
    return
def test_lanenet(image_path, weights_path, use_gpu, image_list, batch_size):
    """

    :param image_path:
    :param weights_path:
    :param use_gpu:
    :return:
    """

    test_dataset = lanenet_data_processor_test.DataSet(image_path)

    input_tensor = tf.placeholder(dtype=tf.float32,
                                  shape=[batch_size, 288, 800, 3],
                                  name='input_tensor')
    phase_tensor = tf.constant('test', tf.string)

    net = lanenet_merge_model.LaneNet(phase=phase_tensor, net_flag='vgg')
    binary_seg_ret, instance_seg_ret = net.inference(input_tensor=input_tensor,
                                                     name='lanenet_loss')

    initial_var = tf.global_variables()
    final_var = initial_var[:-1]

    saver = tf.train.Saver(final_var)

    # Set sess configuration
    if use_gpu:
        sess_config = tf.ConfigProto(device_count={'GPU': 1})
    else:
        sess_config = tf.ConfigProto(device_count={'GPU': 0})
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'

    sess = tf.Session(config=sess_config)

    with sess.as_default():

        sess.run(tf.global_variables_initializer())

        saver.restore(sess=sess, save_path=weights_path)
        for i in range(int(len(image_list) / batch_size)):
            print(i)
            gt_imgs = test_dataset.next_batch(CFG.TRAIN.BATCH_SIZE)
            gt_imgs = [
                cv2.resize(tmp,
                           dsize=(CFG.TRAIN.IMG_WIDTH, CFG.TRAIN.IMG_HEIGHT),
                           dst=tmp,
                           interpolation=cv2.INTER_CUBIC) for tmp in gt_imgs
            ]
            gt_imgs = [(tmp - VGG_MEAN) for tmp in gt_imgs]

            instance_seg_image, existence_output = sess.run(
                [binary_seg_ret, instance_seg_ret],
                feed_dict={input_tensor: gt_imgs})

            for cnt in range(batch_size):
                image_name = image_list[i * batch_size + cnt]
                image_prefix = image_name[:-10]
                directory = 'predicts_SCNN_test_final/vgg_SCNN_DULR_w9' + image_prefix
                if not os.path.exists(directory):
                    os.makedirs(directory)
                file_exist = open(
                    directory + image_name[-10:-4] + '.exist.txt', 'w')
                for cnt_img in range(4):
                    cv2.imwrite(
                        directory + image_name[-10:-4] + '_' +
                        str(cnt_img + 1) + '_avg.png',
                        (instance_seg_image[cnt, :, :, cnt_img + 1] *
                         255).astype(int))
                    if existence_output[cnt, cnt_img] > 0.5:
                        file_exist.write('1 ')
                    else:
                        file_exist.write('0 ')

                file_exist.close()

    sess.close()

    return
Esempio n. 3
0
def test_lanenet(image_path, weights_path, use_gpu, image_list, batch_size,
                 save_dir):
    """
    :param image_path:
    :param weights_path:  ***
    :param use_gpu:
    :return:
    """
    print("6666666666666")
    global total_img
    test_dataset = lanenet_data_processor_test.DataSet(image_path, batch_size)
    input_tensor = tf.placeholder(dtype=tf.string,
                                  shape=[None],
                                  name='input_tensor')
    imgs = tf.map_fn(test_dataset.process_img, input_tensor, dtype=tf.float32)
    phase_tensor = tf.constant('test', tf.string)  # str常量

    net = lanenet_merge_model.LaneNet()

    binary_seg_ret, instance_seg_ret = net.test_inference(
        imgs, phase_tensor, 'lanenet_loss')
    initial_var = tf.global_variables()
    final_var = initial_var[:-1]
    print(len(final_var))  # 85
    # Pass the variables as a list:
    saver = tf.train.Saver(final_var)
    # Set sess configuration
    if use_gpu:
        sess_config = tf.ConfigProto(device_count={'GPU': 1})
    else:
        sess_config = tf.ConfigProto(device_count={'GPU': 0})
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'
    sess = tf.Session(config=sess_config)
    with sess.as_default():
        sess.run(tf.global_variables_initializer())
        saver.restore(
            sess=sess, save_path=weights_path
        )  #.ckpt //save_path: Path where parameters were previously saved.
        # There is a mismatch between the graph and the checkpoint being loaded.
        print("Model restored.")
        #print(len(image_list) / batch_size)   #1.0
        for i in range(int(math.ceil(len(image_list) / batch_size))):
            print("i: ", i)
            paths = test_dataset.next_batch()
            #print(paths)    # 1.jpg ~ 8.jpg

            instance_seg_image, existence_output = sess.run(
                [binary_seg_ret, instance_seg_ret],
                feed_dict={input_tensor: paths})
            #print('instance_seg_image shape: ',instance_seg_image.shape)

            #image_list_epoch = [cv2.imread(tmp, cv2.IMREAD_COLOR) for tmp in paths]
            #image_list_epoch = [tmp - VGG_MEAN for tmp in image_list_epoch]
            #instance_seg_image, existence_output = sess.run([binary_seg_ret, instance_seg_ret],
            #feed_dict={input_tensor: image_list_epoch})

            for cnt, image_name in enumerate(paths):
                total_img = np.zeros([288, 800])
                #print(image_name)
                parent_path = os.path.dirname(image_name)
                #print(parent_path)  /Users/wutong/Downloads/test_set/clips/0601/1494453197736907986
                #                   //Users/wutong/Downloads/train_set/clips/0313-1/8580/20.jpg
                ph = parent_path.split('/')
                directory = os.path.join(save_dir, 'cable_pic', ph[-1])

                if not os.path.exists(directory):
                    os.makedirs(directory)

                file_exist = open(
                    os.path.join(
                        directory,
                        os.path.basename(image_name)[:-3] + 'exist.txt'), 'w')
                for cnt_img in range(4):  # 4 lines
                    cv2.imwrite(
                        os.path.join(
                            directory,
                            os.path.basename(image_name)[:-4] + '_' +
                            str(cnt_img + 1) + '_avg.png'),
                        (instance_seg_image[cnt, :, :, cnt_img + 1] *
                         255).astype(int))
                    if existence_output[
                            cnt,
                            cnt_img] > 0.5:  # >0.5 suppose that have a line
                        #file_exist.write('%s ' % existence_output[cnt, cnt_img])
                        file_exist.write('1 ')
                        total_img += (
                            instance_seg_image[cnt, :, :, cnt_img + 1] *
                            255).astype(int)
                    else:
                        file_exist.write('0 ')

                cv2.imwrite(
                    os.path.join(
                        directory,
                        os.path.basename(image_name)[:-4] + '_' +
                        'total_img.png'), total_img)

                file_exist.close()
    sess.close()
    return
def test_lanenet(image_path, weights_path, use_gpu, image_list, batch_size, save_dir):

    """
    :param image_path:
    :param weights_path:
    :param use_gpu:
    :return:
    """
    print("saving to "+save_dir) 
    test_dataset = lanenet_data_processor_test.DataSet(image_path, batch_size)
    input_tensor = tf.placeholder(dtype=tf.string, shape=[None], name='input_tensor')
    imgs = tf.map_fn(test_dataset.process_img, input_tensor, dtype=tf.float32)
    phase_tensor = tf.constant('test', tf.string)

    net = lanenet_merge_model.LaneNet()
    binary_seg_ret, instance_seg_ret = net.test_inference(imgs, phase_tensor, 'lanenet_loss')
    initial_var = tf.global_variables()
    final_var = initial_var[:-1]
    saver = tf.train.Saver(final_var)
    # Set sess configuration
    if use_gpu:
        sess_config = tf.ConfigProto(device_count={'GPU': 1})
    else:
        sess_config = tf.ConfigProto(device_count={'GPU': 0})
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TEST.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'
    sess = tf.Session(config=sess_config)
    
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    out = cv2.VideoWriter(os.path.join(save_dir,"result.avi"),fourcc,30.0, (1920,1080))
    print( "OPEN VIDEO FILE", out.isOpened())
    with sess.as_default():
        sess.run(tf.global_variables_initializer())
        saver.restore(sess=sess, save_path=weights_path)
        for i in range(math.ceil(len(image_list) / batch_size)):
            print(i)
            paths = test_dataset.next_batch()
            instance_seg_image, existence_output = sess.run([binary_seg_ret, instance_seg_ret],
                                                            feed_dict={input_tensor: paths})
            for cnt, image_name in enumerate(paths):
                print(image_name)
                parent_path = os.path.dirname(image_name)
                directory = os.path.join(save_dir, 'vgg_SCNN_DULR_w9', parent_path)
                if not os.path.exists(directory):
                    os.makedirs(directory)
                file_exist = open(os.path.join(directory, os.path.basename(image_name)[:-3] + 'exist.txt'), 'w')
                ori_img = cv2.imread(os.path.join(directory, os.path.basename(image_name)),1)
                for cnt_img in range(4):
                    cv2.imwrite(os.path.join(directory, os.path.basename(image_name)[:-4] + '_' + str(cnt_img + 1) + '_avg.png'),
                            (instance_seg_image[cnt, :, :, cnt_img + 1] * 255).astype('uint8'))
                    ori_size_lane = (cv2.resize(instance_seg_image[cnt, :, :, cnt_img + 1], ( ori_img.shape[1], ori_img.shape[0])) * 255).astype('uint8')
                    mean = np.mean(ori_size_lane)
                    std = np.std(ori_size_lane)
                    ori_size_lane = np.where(ori_size_lane>(mean - std), ori_size_lane,0)
                    if existence_output[cnt, cnt_img] > 0.5:
                        file_exist.write('1 ')
                        for channel,color in enumerate(LANE_COLOR[cnt_img]):
                            if color is 1:
                                #ori_img[:,:,channel] = np.where(ori_size_lane == 1, ori_img[:,:,channel] ,ori_size_lane )
                                ori_img[:,:,channel] += (ori_size_lane*255).astype('uint8')
                                np.where(ori_img[:,:,channel] > 255, ori_img[:,:,channel] ,255)
                    else:
                        file_exist.write('0 ')
                file_exist.close()
                out.write(ori_img)
                cv2.imwrite(os.path.join(directory, os.path.basename(image_name)[:-4] + '_lane.png'),ori_img)
    sess.close()
    out.release()
    return
Esempio n. 5
0
def test_lanenet(image_path, weights_path, use_gpu, image_list, batch_size,
                 save_dir):
    """
    :param image_path:
    :param weights_path:
    :param use_gpu:
    :return:
    """
    src_image = cv2.imread('./00330.jpg')

    #print(src_image.shape[0], src_image.shape[1])
    #src_image = cv2.resize(src_image, (1640,590), interpolation=cv2.INTER_AREA)
    w, h = src_image.shape[:2]
    test_dataset = lanenet_data_processor_test.DataSet(image_path, batch_size)
    input_tensor = tf.placeholder(dtype=tf.string,
                                  shape=[None],
                                  name='input_tensor')
    imgs = tf.map_fn(test_dataset.process_img, input_tensor, dtype=tf.float32)
    phase_tensor = tf.constant('test', tf.string)

    net = lanenet_merge_model.LaneNet()
    binary_seg_ret, instance_seg_ret = net.test_inference(
        imgs, phase_tensor, 'lanenet_loss')

    initial_var = tf.global_variables()
    final_var = initial_var[:-1]
    saver = tf.train.Saver(final_var)
    # Set sess configuration
    if use_gpu:
        sess_config = tf.ConfigProto(device_count={'GPU': 1})
    else:
        sess_config = tf.ConfigProto(device_count={'GPU': 0})
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'
    sess = tf.Session(config=sess_config)
    with sess.as_default():
        sess.run(tf.global_variables_initializer())
        saver.restore(sess=sess, save_path=weights_path)
        for i in range(math.ceil(len(image_list) / batch_size)):
            #print(i)
            paths = test_dataset.next_batch()
            instance_seg_image, existence_output = sess.run(
                [binary_seg_ret, instance_seg_ret],
                feed_dict={input_tensor: paths})
            for cnt, image_name in enumerate(paths):
                #print(image_name)
                parent_path = os.path.dirname(image_name)
                directory = os.path.join(save_dir, 'vgg_SCNN_DULR_w9',
                                         parent_path)
                if not os.path.exists(directory):
                    os.makedirs(directory)
                file_exist = open(
                    os.path.join(
                        directory,
                        os.path.basename(image_name)[:-3] + 'exist.txt'), 'w')
                for cnt_img in range(4):
                    coordinate_tmp = np.zeros((1, 30))
                    #print(coordinate_tmp)

                    img = (instance_seg_image[cnt, :, :, cnt_img + 1] *
                           255).astype(int)
                    #print(w, h)
                    for i in range(30):
                        #print(i)
                        lineId = math.ceil(288 - i * 20 / w * 288) - 1
                        #print(lineId)
                        #print(img.shape)
                        img_line = img[lineId]
                        #print(type(img_line))a = list(a)
                        value = np.max(img_line)
                        id = np.where(img_line == value)
                        #print(id[0])
                        #Wprint()
                        #print(value, id[0][0])
                        if (value / 255 > 0.3):
                            coordinate_tmp[0][i] = id[0][0]

                    if np.sum(coordinate_tmp > 0) < 2:
                        coordinate_tmp = np.zeros((1, 30))
                    #print(coordinate_tmp)
                    #print(np.sum(coordinate_tmp>0))
                    for i in range(30):
                        if coordinate_tmp[0][i] > 0:
                            cv2.circle(src_image,
                                       (int(coordinate_tmp[0][i] * h / 800),
                                        int(w - i * 20)), 6, (0, 0, 255), -1)
                        #line = score(lineId,:)
                    #[value, id] = max(line)
                    #if double(value)/255 > thr
                    #    coordinate(i) = id
                    #end
                    #end
                    cv2.imwrite(
                        os.path.join(
                            directory,
                            os.path.basename(image_name)[:-4] + '_' +
                            str(cnt_img + 1) + '_avg.png'),
                        (instance_seg_image[cnt, :, :, cnt_img + 1] *
                         255).astype(int))
                    if existence_output[cnt, cnt_img] > 0.5:
                        print('\n\n\n')
                        print(existence_output[cnt, cnt_img])
                        print('\n\n\n')
                        file_exist.write('1 ')
                    else:
                        file_exist.write('0 ')
                file_exist.close()
            cv2.imshow('img', src_image)
            cv2.waitKey(0)
    sess.close()
    return
Esempio n. 6
0
def test_lanenet(dataset_dir, image_path, weights_path, use_gpu, image_list, batch_size, save_dir):

    """
    :param image_path:
    :param weights_path:
    :param use_gpu:
    :return:
    """
    
    test_dataset = lanenet_data_processor_test.DataSet(image_path, batch_size, dataset_dir)
    input_tensor = tf.placeholder(dtype=tf.string, shape=[None], name='input_tensor')
    imgs = tf.map_fn(test_dataset.process_img, input_tensor, dtype=tf.float32)
    phase_tensor = tf.constant('test', tf.string)

    net = lanenet_merge_model.LaneNet()
    binary_seg_ret, instance_seg_ret = net.test_inference(imgs, phase_tensor, 'lanenet_loss')
    initial_var = tf.global_variables()
    final_var = initial_var[:-1]
    saver = tf.train.Saver(final_var)
    # Set sess configuration
    if use_gpu:
        sess_config = tf.ConfigProto(device_count={'GPU': 1})
    else:
        sess_config = tf.ConfigProto(device_count={'GPU': 0})
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'
    sess = tf.Session(config=sess_config)
    with sess.as_default():
        sess.run(tf.global_variables_initializer())
        saver.restore(sess=sess, save_path=weights_path)
        for i in range(math.ceil(len(image_list) / batch_size)):
            print("Inferring batch {} with batch size {}".format(i, batch_size))
            paths = test_dataset.next_batch()
            instance_seg_image, existence_output = sess.run([binary_seg_ret, instance_seg_ret],
                                                            feed_dict={input_tensor: paths})
            # pudb.set_trace()
            for cnt, input_image_path in enumerate(paths):
                print("Generating output for input image {}".format(input_image_path))
                input_image = cv2.imread(input_image_path)
                input_image = cv2.resize(input_image, (CFG.TRAIN.IMG_WIDTH, CFG.TRAIN.IMG_HEIGHT))

                all_lanes_image = np.zeros((CFG.TRAIN.IMG_HEIGHT, CFG.TRAIN.IMG_WIDTH))

                parent_path = os.path.dirname(input_image_path)
                output_dir = os.path.join(parent_path, save_dir)
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)

                file_exist = open(os.path.join(output_dir, os.path.basename(input_image_path)[:-3] + 'exist.txt'), 'w')
                for cnt_img in range(4):
                    filename = os.path.join(output_dir, os.path.basename(input_image_path)[:-4] + '_' + str(cnt_img + 1) + '_avg.png')
                    lane_image = (instance_seg_image[cnt, :, :, cnt_img + 1] * 255).astype(int)
                    cv2.imwrite(filename, lane_image)
                    all_lanes_image = all_lanes_image + lane_image
                    if existence_output[cnt, cnt_img] > 0.5:
                        file_exist.write('1 ')
                    else:
                        file_exist.write('0 ')
                file_exist.close()

                # Make all_lanes_image into color image (3 channels)
                all_lanes_color_image = np.zeros((CFG.TRAIN.IMG_HEIGHT, CFG.TRAIN.IMG_WIDTH, 3))
                all_lanes_color_image[:, :, 2] = all_lanes_image[:, :]

                all_lanes_color_image_path = os.path.join(output_dir, os.path.basename(input_image_path)[:-3] + 'lanes.jpg')
                resized_input_image_path = os.path.join(output_dir, os.path.basename(input_image_path)[:-3] + 'resized_input.jpg')
                overlay_image_path = os.path.join(output_dir, os.path.basename(input_image_path)[:-3] + 'overlay.jpg')

                cv2.imwrite(resized_input_image_path, input_image)
                cv2.imwrite(all_lanes_color_image_path, all_lanes_color_image)
                cv2.imwrite(overlay_image_path, all_lanes_color_image + input_image)


    sess.close()
    return
Esempio n. 7
0
def test_lanenet(image_path, weights_path, use_gpu, image_list, batch_size):
    """
    :param image_path:
    :param weights_path:
    :param use_gpu:
    :return:
    """
    print('image_path:', image_path)
    img = cv2.imread('./1.jpg')
    test_dataset = lanenet_data_processor_test.DataSet(image_path, batch_size)
    input_tensor = tf.placeholder(dtype=tf.string,
                                  shape=[None],
                                  name='input_tensor')
    imgs = tf.map_fn(test_dataset.process_img, input_tensor, dtype=tf.float32)
    phase_tensor = tf.constant('test', tf.string)
    print('\n\n\n')
    print(imgs)
    print('\n\n\n')
    net = lanenet_merge_model.LaneNet()
    binary_seg_ret, instance_seg_ret = net.test_inference(
        imgs, phase_tensor, 'lanenet_loss')
    initial_var = tf.global_variables()
    final_var = initial_var[:-1]
    saver = tf.train.Saver(final_var)
    # Set sess configuration
    if use_gpu:
        sess_config = tf.ConfigProto(device_count={'GPU': 1})
    else:
        sess_config = tf.ConfigProto(device_count={'GPU': 0})
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'
    sess = tf.Session(config=sess_config)
    with sess.as_default():
        sess.run(tf.global_variables_initializer())
        saver.restore(sess=sess, save_path=weights_path)
        for i in range(math.ceil(len(image_list) / batch_size)):
            paths = test_dataset.next_batch()
            instance_seg_image, existence_output = sess.run(
                [binary_seg_ret, instance_seg_ret],
                feed_dict={input_tensor: paths})

            print('shape:', instance_seg_image.shape)
            for cnt, image_name in enumerate(paths):
                print('image:', image_name)
                parent_path = os.path.dirname(image_name)
                directory = os.path.join('result', parent_path)
                if not os.path.exists(directory):
                    os.makedirs(directory)
                file_exist = open(
                    os.path.join(
                        directory,
                        os.path.basename(image_name)[:-3] + 'exist.txt'), 'w')
                print('file_exist:', file_exist)

                print('img shape: ', img.shape)
                #img = cv2.resize(img, (800, 288), interpolation=cv2.INTER_CUBIC)
                for cnt_img in range(4):
                    if existence_output[cnt, cnt_img] > 0.5:
                        obj_mask = (
                            instance_seg_image[cnt, :, :, cnt_img + 1] *
                            255).astype(int)
                        h_scale = img.shape[0] / obj_mask.shape[0]
                        w_scale = img.shape[1] / obj_mask.shape[1]

                        for i in range(obj_mask.shape[0]):
                            for j in range(obj_mask.shape[1]):
                                if (obj_mask[i][j] > 150):
                                    pt1 = h_scale * i
                                    pt2 = w_scale * j
                                    cv2.circle(img, (int(pt2), int(pt1)), 1,
                                               color_options(cnt_img + 1), 0)
                                    #cv2.circle(obj_mask, (i, j), 1, (0,0,255), 0)
                                    #print(i, ' ', j, ' ', obj_mask[i][j])
                    else:
                        continue

                    coordinate_tmp = np.zeros(1, 30)
                    print(coordinate_tmp)
                    cv2.imwrite(
                        os.path.join(
                            directory,
                            os.path.basename(image_name)[:-4] + '_' +
                            str(cnt_img + 1) + '_avg.png'),
                        (instance_seg_image[cnt, :, :, cnt_img + 1] *
                         255).astype(int))
                    print(existence_output[cnt, cnt_img])
                    if existence_output[cnt, cnt_img] > 0.5:
                        file_exist.write('1 ')
                    else:
                        file_exist.write('0 ')
                file_exist.close()
                cv2.imwrite('obj.jpg', img)
                cv2.imshow('obj', img)
                cv2.waitKey(0)
    sess.close()
    return
def test_lanenet(image_path, weights_path, use_gpu):
    """

    :param image_path:
    :param weights_path:
    :param use_gpu:
    :return:
    """

    test_dataset = lanenet_data_processor_test.DataSet('test_culane_img.txt')

    input_tensor = tf.placeholder(dtype=tf.float32,
                                  shape=[8, 288, 800, 3],
                                  name='input_tensor')
    phase_tensor = tf.constant('test', tf.string)

    net = lanenet_merge_model.LaneNet(phase=phase_tensor, net_flag='vgg')
    binary_seg_ret, instance_seg_ret = net.inference(input_tensor=input_tensor,
                                                     name='lanenet_loss')

    saver = tf.train.Saver()

    # Set sess configuration
    if use_gpu:
        sess_config = tf.ConfigProto(device_count={'GPU': 1})
    else:
        sess_config = tf.ConfigProto(device_count={'GPU': 0})
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'
    lanes_extra_file = open('lanes_file.txt', 'w')

    sess = tf.Session(config=sess_config)

    with sess.as_default():

        saver.restore(sess=sess, save_path=weights_path)
        for i in range(int(len(image_path) / 8)):
            print(i)

            gt_imgs = test_dataset.next_batch(CFG.TRAIN.BATCH_SIZE)
            gt_imgs = [
                cv2.resize(tmp,
                           dsize=(CFG.TRAIN.IMG_WIDTH, CFG.TRAIN.IMG_HEIGHT),
                           dst=tmp,
                           interpolation=cv2.INTER_LINEAR) for tmp in gt_imgs
            ]
            gt_imgs = [tmp - VGG_MEAN for tmp in gt_imgs]

            instance_seg_image, existence_output = sess.run(
                [binary_seg_ret, instance_seg_ret],
                feed_dict={input_tensor: gt_imgs})

            cv2.imwrite('test_img/' + str(i) + '_origin.png', gt_imgs[0])
            cv2.imwrite('test_img/' + str(i) + '_ins.png',
                        instance_seg_image[0] * 50)
            print(np.unique(instance_seg_image[0]))
            print(existence_output[0])

    sess.close()
    lanes_extra_file.close()

    return