Esempio n. 1
0
def _test_generate_anchors_one_layer():
    """
    test _generate_anchors_one_layer method by visualizing it in an image.
    """
    import util
    image_shape = (512, 512)
    h_I, w_I = image_shape
    stride = 256
    feat_shape = (h_I/stride, w_I / stride)
    h_l, w_l = feat_shape
    anchors = _generate_anchors_one_layer(h_I, w_I, h_l, w_l, gamma = 1.5)
    assert(anchors.shape == (h_l, w_l, 4))
    mask = util.img.black(image_shape)
    for x in xrange(w_l):
        for y in xrange(h_l):
            cx, cy, w, h = anchors[y, x, :]
            xmin = (cx - w / 2)
            ymin = (cy - h / 2)
            
            xmax = (cx + w / 2)
            ymax = (cy + h / 2)
            
            cxy = (int(cx), int(cy))
            util.img.circle(mask, cxy, 3, color = 255)
            util.img.rectangle(mask, (xmin, ymin), (xmax, ymax), color = 255)
    
    util.sit(mask)
Esempio n. 2
0
def _test_generate_anchors_one_layer():
    """
    test _generate_anchors_one_layer method by visualizing it in an image.
    """
    import util
    image_shape = (512, 512)
    h_I, w_I = image_shape
    stride = 256
    feat_shape = (h_I / stride, w_I / stride)
    h_l, w_l = feat_shape
    anchors = _generate_anchors_one_layer(h_I, w_I, h_l, w_l, gamma=1.5)
    assert (anchors.shape == (h_l, w_l, 4))
    mask = util.img.black(image_shape)
    for x in range(int(w_l)):
        for y in range(int(h_l)):
            cx, cy, w, h = anchors[y, x, :]
            xmin = (cx - w / 2)
            ymin = (cy - h / 2)

            xmax = (cx + w / 2)
            ymax = (cy + h / 2)

            cxy = (int(cx), int(cy))
            util.img.circle(mask, cxy, 3, color=255)
            util.img.rectangle(mask, (xmin, ymin), (xmax, ymax), color=255)

    util.sit(mask)
def test():
    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)

    global_step = slim.get_or_create_global_step()
    with tf.name_scope('evaluation_%dx%d' %
                       (FLAGS.eval_image_height, FLAGS.eval_image_width)):
        with tf.variable_scope(tf.get_variable_scope(), reuse=False):
            image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
            image_shape = tf.placeholder(dtype=tf.int32, shape=[
                3,
            ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
                image,
                None,
                None,
                None,
                None,
                out_shape=config.image_shape,
                data_format=config.data_format,
                is_training=False)
            b_image = tf.expand_dims(processed_image, axis=0)

            # build model and loss
            net = pixel_link_symbol.PixelLinkNet(b_image, is_training=False)
            masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
                net.pixel_pos_scores, net.link_pos_scores)

    sess_config = tf.ConfigProto(log_device_placement=False,
                                 allow_soft_placement=True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore(
            tf.trainable_variables())
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    saver = tf.train.Saver(var_list=variables_to_restore)
    with tf.Session() as sess:
        saver.restore(sess, util.tf.get_latest_ckpt(FLAGS.checkpoint_path))

        files = util.io.ls(FLAGS.dataset_dir)

        for image_name in files:
            file_path = util.io.join_path(FLAGS.dataset_dir, image_name)
            image_data = util.img.imread(file_path)
            link_scores, pixel_scores, mask_vals = sess.run(
                [net.link_pos_scores, net.pixel_pos_scores, masks],
                feed_dict={image: image_data})
            h, w, _ = image_data.shape

            def resize(img):
                return util.img.resize(img,
                                       size=(w, h),
                                       interpolation=cv2.INTER_NEAREST)

            def get_bboxes(mask):
                return pixel_link.mask_to_bboxes(mask, image_data.shape)

            def draw_bboxes(img, bboxes, color):
                for bbox in bboxes:
                    points = np.reshape(bbox, [4, 2])
                    cnts = util.img.points_to_contours(points)
                    util.img.draw_contours(img,
                                           contours=cnts,
                                           idx=-1,
                                           color=color,
                                           border_width=1)

            image_idx = 0
            pixel_score = pixel_scores[image_idx, ...]
            mask = mask_vals[image_idx, ...]

            bboxes_det = get_bboxes(mask)

            mask = resize(mask)
            pixel_score = resize(pixel_score)

            draw_bboxes(image_data, bboxes_det, util.img.COLOR_RGB_RED)
            #             print util.sit(pixel_score)
            #             print util.sit(mask)
            print util.sit(image_data)
Esempio n. 4
0
def test():
    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)

    global_step = slim.get_or_create_global_step()
    with tf.name_scope('evaluation_%dx%d' % (FLAGS.eval_image_height, FLAGS.eval_image_width)):
        with tf.variable_scope(tf.get_variable_scope(), reuse=False):
            image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
            image_shape = tf.placeholder(dtype=tf.int32, shape=[3, ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(image, None, None, None, None,
                                                                                 out_shape=config.image_shape,
                                                                                 data_format=config.data_format,
                                                                                 is_training=False)
            b_image = tf.expand_dims(processed_image, axis=0)

            # build model and loss
            net = pixel_link_symbol.PixelLinkNet(b_image, is_training=False)
            masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
                net.pixel_pos_scores, net.link_pos_scores)

    sess_config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction;

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore(
            tf.trainable_variables())
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    saver = tf.train.Saver(var_list=variables_to_restore)
    with tf.Session() as sess:
        saver.restore(sess, util.tf.get_latest_ckpt(FLAGS.checkpoint_path))

        model_dir = '/Users/ci.chen/src/pixel_link/conv2_2/'

        # Finally we serialize and dump the output graph to the filesystem

        files = util.io.ls(FLAGS.dataset_dir)
        rows = [["image", "id", "xMin", "xMax", "yMin", "yMax"]]
        for image_name in files:
            file_path = util.io.join_path(FLAGS.dataset_dir, image_name)
            image_data = util.img.imread(file_path)
            link_scores, pixel_scores, mask_vals = sess.run(
                [net.link_pos_scores, net.pixel_pos_scores, masks],
                feed_dict={image: image_data})
            h, w, _ = image_data.shape

            def resize(img):
                return util.img.resize(img, size=(w, h),
                                       interpolation=cv2.INTER_NEAREST)

            def get_bboxes(mask):
                return pixel_link.mask_to_bboxes(mask, image_data.shape)

            def draw_bboxes(img, bboxes, color):
                for bbox in bboxes:
                    points = np.reshape(bbox, [4, 2])
                    cnts = util.img.points_to_contours(points)
                    util.img.draw_contours(img, contours=cnts,
                                           idx=-1, color=color, border_width=1)

            def get_box_info(img, bboxes, name):
                boxes = []
                for id, bbox in enumerate(bboxes):
                    points = np.reshape(bbox, [4, 2])
                    x = [points[0][0], points[1][0], points[2][0], points[3][0]]
                    y = [points[0][1], points[1][1], points[2][1], points[3][1]]
                    boxes.append([name, id + 1, min(x),
                                  max(x), min(y), max(y)])
                return boxes

            image_idx = 0
            pixel_score = pixel_scores[image_idx, ...]
            mask = mask_vals[image_idx, ...]

            bboxes_det = get_bboxes(mask)

            mask = resize(mask)
            pixel_score = resize(pixel_score)
            bbox = get_box_info(image_data, bboxes_det, image_name)
            rows += bbox
            draw_bboxes(image_data, bboxes_det, util.img.COLOR_RGB_RED)
            #             print util.sit(pixel_score)
            #             print util.sit(mask)
            print(util.sit(image_data))

        def writeCSV(boxes):
            with open('/Users/ci.chen/temp/no-use/images/result.csv', 'w') as File:
                writer = csv.writer(File)
                writer.writerows(boxes)

        writeCSV(rows)
Esempio n. 5
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    batch_size = FLAGS.batch_size
    with tf.Graph().as_default():
        # Select the dataset.
        dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
                                              FLAGS.dataset_split_name,
                                              FLAGS.dataset_dir)

        util.proc.set_proc_name(FLAGS.model_name + '_' + FLAGS.dataset_name)

        # =================================================================== #
        # Create a dataset provider and batches.
        # =================================================================== #
        with tf.device('/cpu:0'):
            with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
                provider = slim.dataset_data_provider.DatasetDataProvider(
                    dataset,
                    num_readers=FLAGS.num_readers,
                    common_queue_capacity=20 * batch_size,
                    common_queue_min=10 * batch_size,
                    shuffle=True)
            # Get for SSD network: image, labels, bboxes.
            [image, shape, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3,
             y4] = provider.get([
                 'image', 'shape', 'object/ignored', 'object/bbox',
                 'object/oriented_bbox/x1', 'object/oriented_bbox/x2',
                 'object/oriented_bbox/x3', 'object/oriented_bbox/x4',
                 'object/oriented_bbox/y1', 'object/oriented_bbox/y2',
                 'object/oriented_bbox/y3', 'object/oriented_bbox/y4'
             ])
            gxs = tf.transpose(tf.stack([x1, x2, x3, x4]))  # shape = (N, 4)
            gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
            image = tf.identity(image, 'input_image')
            # Pre-processing image, labels and bboxes.
            image_shape = (FLAGS.train_image_size, FLAGS.train_image_size)
            image, gignored, gbboxes, gxs, gys = \
                ssd_vgg_preprocessing.preprocess_image(image, gignored, gbboxes, gxs, gys,
                                                       out_shape=image_shape,
                                                       is_training=True)
            gxs = gxs * tf.cast(image_shape[1], gxs.dtype)
            gys = gys * tf.cast(image_shape[0], gys.dtype)
            gorbboxes = tfe_seglink.tf_min_area_rect(gxs, gys)
            image = tf.identity(image, 'processed_image')

            with tf.Session() as sess:
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)
                i = 0
                while i < 2:
                    i += 1
                    image_data, label_data, bbox_data, xs_data, ys_data, orbboxes = \
                        sess.run([image, gignored, gbboxes, gxs, gys, gorbboxes])
                    image_data = image_data + [123., 117., 104.]
                    image_data = np.asarray(image_data, np.uint8)
                    h, w = image_data.shape[0:-1]
                    bbox_data = bbox_data * [h, w, h, w]
                    I_bbox = image_data.copy()
                    I_xys = image_data.copy()
                    I_orbbox = image_data.copy()

                    for idx in range(bbox_data.shape[0]):

                        def draw_bbox():
                            y1, x1, y2, x2 = bbox_data[idx, :]
                            util.img.rectangle(I_bbox, (x1, y1), (x2, y2),
                                               color=util.img.COLOR_WHITE)

                        def draw_xys():
                            points = zip(xs_data[idx, :], ys_data[idx, :])
                            cnts = util.img.points_to_contours(points)
                            util.img.draw_contours(I_xys,
                                                   cnts,
                                                   -1,
                                                   color=util.img.COLOR_GREEN)

                        def draw_orbbox():
                            orbox = orbboxes[idx, :]
                            import cv2
                            rect = ((orbox[0], orbox[1]), (orbox[2], orbox[3]),
                                    orbox[4])
                            box = cv2.cv.BoxPoints(rect)
                            box = np.int0(box)
                            cv2.drawContours(I_orbbox, [box], 0,
                                             util.img.COLOR_RGB_RED, 1)

                        draw_bbox()
                        draw_xys()
                        draw_orbbox()

                    print(util.sit(I_bbox))
                    print(util.sit(I_xys))
                    print(util.sit(I_orbbox))
                    print(
                        'check the images and make sure that bboxes in difference colors are the same.'
                    )
                coord.request_stop()
                coord.join(threads)
def test():
    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)
    
    global_step = slim.get_or_create_global_step()
    with tf.name_scope('evaluation_%dx%d'%(FLAGS.eval_image_height, FLAGS.eval_image_width)):
        with tf.variable_scope(tf.get_variable_scope(), reuse = False):
            image = tf.placeholder(dtype=tf.int32, shape = [None, None, 3])
            image_shape = tf.placeholder(dtype = tf.int32, shape = [3, ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(image, None, None, None, None, 
                                                       out_shape = config.image_shape,
                                                       data_format = config.data_format, 
                                                       is_training = False)
            b_image = tf.expand_dims(processed_image, axis = 0)

            # build model and loss
            net = pixel_link_symbol.PixelLinkNet(b_image, is_training = False)
            masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
                net.pixel_pos_scores, net.link_pos_scores)
            
    sess_config = tf.ConfigProto(log_device_placement = False, allow_soft_placement = True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction;
    
    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore(
                tf.trainable_variables())
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()
        
    
    saver = tf.train.Saver(var_list = variables_to_restore)
    with tf.Session() as sess:
        saver.restore(sess, util.tf.get_latest_ckpt(FLAGS.checkpoint_path))
        
        files = util.io.ls(FLAGS.dataset_dir)
        
        for image_name in files:
            file_path = util.io.join_path(FLAGS.dataset_dir, image_name)
            image_data = util.img.imread(file_path)
            link_scores, pixel_scores, mask_vals = sess.run(
                    [net.link_pos_scores, net.pixel_pos_scores, masks],
                    feed_dict = {image: image_data})
            h, w, _ =image_data.shape
            def resize(img):
                return util.img.resize(img, size = (w, h), 
                                       interpolation = cv2.INTER_NEAREST)
            
            def get_bboxes(mask):
                return pixel_link.mask_to_bboxes(mask, image_data.shape)
            
            def draw_bboxes(img, bboxes, color):
                for bbox in bboxes:
                    points = np.reshape(bbox, [4, 2])
                    cnts = util.img.points_to_contours(points)
                    util.img.draw_contours(img, contours = cnts, 
                           idx = -1, color = color, border_width = 1)
            image_idx = 0
            pixel_score = pixel_scores[image_idx, ...]
            mask = mask_vals[image_idx, ...]

            bboxes_det = get_bboxes(mask)
            
            mask = resize(mask)
            pixel_score = resize(pixel_score)

            draw_bboxes(image_data, bboxes_det, util.img.COLOR_RGB_RED)
#             print util.sit(pixel_score)
#             print util.sit(mask)
            print util.sit(image_data)
Esempio n. 7
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    batch_size = FLAGS.batch_size;
    with tf.Graph().as_default():
        # Select the dataset.
        dataset = dataset_factory.get_dataset(
            FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)

        util.proc.set_proc_name(FLAGS.model_name + '_' + FLAGS.dataset_name)


        # =================================================================== #
        # Create a dataset provider and batches.
        # =================================================================== #
        with tf.device('/cpu:0'):
            with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
                provider = slim.dataset_data_provider.DatasetDataProvider(
                    dataset,
                    num_readers=FLAGS.num_readers,
                    common_queue_capacity=20 * batch_size,
                    common_queue_min=10 * batch_size,
                    shuffle=True)
            # Get for SSD network: image, labels, bboxes.
            [image, shape, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get(['image', 'shape',
                                                             'object/ignored',
                                                             'object/bbox', 
                                                             'object/oriented_bbox/x1',
                                                             'object/oriented_bbox/x2',
                                                             'object/oriented_bbox/x3',
                                                             'object/oriented_bbox/x4',
                                                             'object/oriented_bbox/y1',
                                                             'object/oriented_bbox/y2',
                                                             'object/oriented_bbox/y3',
                                                             'object/oriented_bbox/y4'
                                                             ])
            gxs = tf.transpose(tf.stack([x1, x2, x3, x4])) #shape = (N, 4)
            gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
            image = tf.identity(image, 'input_image')
            # Pre-processing image, labels and bboxes.
            image_shape = (FLAGS.train_image_size, FLAGS.train_image_size)
            image, gignored, gbboxes, gxs, gys = \
                            ssd_vgg_preprocessing.preprocess_image(image, gignored, gbboxes, gxs, gys, 
                                                               out_shape=image_shape,
                                                               is_training = True)
            gxs = gxs * tf.cast(image_shape[1], gxs.dtype)
            gys = gys * tf.cast(image_shape[0], gys.dtype)
            gorbboxes = tfe_seglink.tf_min_area_rect(gxs, gys)
            image = tf.identity(image, 'processed_image')
            
            with tf.Session() as sess:
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)
                i = 0
                while i < 2:
                    i += 1
                    image_data, label_data, bbox_data, xs_data, ys_data, orbboxes = \
                                 sess.run([image, gignored, gbboxes, gxs, gys, gorbboxes])
                    image_data = image_data + [123., 117., 104.]
                    image_data = np.asarray(image_data, np.uint8)
                    h, w = image_data.shape[0:-1]
                    bbox_data = bbox_data * [h, w, h, w]
                    I_bbox = image_data.copy()
                    I_xys = image_data.copy()
                    I_orbbox = image_data.copy()
                    
                    for idx in range(bbox_data.shape[0]):
                        
                        def draw_bbox():
                            y1, x1, y2, x2 = bbox_data[idx, :]
                            util.img.rectangle(I_bbox, (x1, y1), (x2, y2), color = util.img.COLOR_WHITE)
                        
                        def draw_xys():
                            points = zip(xs_data[idx, :], ys_data[idx, :])
                            cnts = util.img.points_to_contours(points);
                            util.img.draw_contours(I_xys, cnts, -1, color = util.img.COLOR_GREEN)

                        def draw_orbbox():
                            orbox = orbboxes[idx, :]
                            import cv2
                            rect = ((orbox[0], orbox[1]), (orbox[2], orbox[3]), orbox[4])
                            box = cv2.cv.BoxPoints(rect)
                            box = np.int0(box)
                            cv2.drawContours(I_orbbox, [box], 0, util.img.COLOR_RGB_RED, 1)
                        
                        draw_bbox()
                        draw_xys();
                        draw_orbbox();
                        
                    print util.sit(I_bbox)
                    print util.sit(I_xys)
                    print util.sit(I_orbbox)
                    print 'check the images and make sure that bboxes in difference colors are the same.'
                coord.request_stop()
                coord.join(threads)
Esempio n. 8
0
def test():
    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)
    output_dir = FLAGS.output_path

    global_step = slim.get_or_create_global_step()
    with tf.name_scope('evaluation_%dx%d' %
                       (FLAGS.eval_image_height, FLAGS.eval_image_width)):
        with tf.variable_scope(tf.get_variable_scope(), reuse=False):
            image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
            image_shape = tf.placeholder(dtype=tf.int32, shape=[
                3,
            ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
                image,
                None,
                None,
                None,
                None,
                out_shape=config.image_shape,
                data_format=config.data_format,
                is_training=False)
            b_image = tf.expand_dims(processed_image, axis=0)

            # build model and loss
            net = pixel_link_symbol.PixelLinkNet(b_image, is_training=False)
            masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
                net.pixel_pos_scores, net.link_pos_scores)

    sess_config = tf.ConfigProto(log_device_placement=False,
                                 allow_soft_placement=True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore(
            tf.trainable_variables())
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    saver = tf.train.Saver(var_list=variables_to_restore)
    with tf.Session() as sess:
        saver.restore(sess, util.tf.get_latest_ckpt(FLAGS.checkpoint_path))

        files = util.io.ls(FLAGS.dataset_dir)

        for image_name in files:

            if os.path.isfile(os.path.join(output_dir, image_name + ".png")):
                continue

            file_path = util.io.join_path(FLAGS.dataset_dir, image_name)
            image_data = util.img.imread(file_path)

            image_data, scale = resize_im(image_data,
                                          scale=768,
                                          max_scale=1280)

            start_tf_time = time.time()
            link_scores, pixel_scores, mask_vals = sess.run(
                [net.link_pos_scores, net.pixel_pos_scores, masks],
                feed_dict={image: image_data})
            end_tf_time = time.time()
            f = open(os.path.join('pkl', image_name) + '.pkl', 'wb')
            cPickle.dump(link_scores, f, protocol=-1)
            cPickle.dump(pixel_scores, f, protocol=-1)
            cPickle.dump(mask_vals, f, protocol=-1)
            f.close()

            h, w, _ = image_data.shape

            def resize(img):
                return util.img.resize(img,
                                       size=(w, h),
                                       interpolation=cv2.INTER_NEAREST)

            def get_bboxes(mask):
                return pixel_link.mask_to_bboxes(mask, image_data.shape)

            def draw_bboxes(img, bboxes, color):
                for bbox in bboxes:
                    points = np.reshape(bbox, [4, 2])
                    cnts = util.img.points_to_contours(points)
                    util.img.draw_contours(img,
                                           contours=cnts,
                                           idx=-1,
                                           color=color,
                                           border_width=4)

            image_idx = 0
            pixel_score = pixel_scores[image_idx, ...]
            mask = mask_vals[image_idx, ...]
            start_post_time = time.time()
            bboxes_det = get_bboxes(mask)
            end_post_time = time.time()

            print("Tensorflow inference time:", end_tf_time - start_tf_time)
            print("Post filtering time:", end_post_time - start_post_time)

            mask = resize(mask)
            pixel_score = resize(pixel_score)

            draw_bboxes(image_data, bboxes_det, util.img.COLOR_RGB_RED)
            #             print util.sit(pixel_score)
            #             print util.sit(mask)
            #             output_dir = os.path.join("test_output",'%.1f'%FLAGS.pixel_conf_threshold+"_"+'%.1f'%FLAGS.pixel_conf_threshold)

            if not os.path.exists(output_dir):
                os.mkdir(output_dir)
            print util.sit(image_data,
                           format='bgr',
                           path=os.path.join(output_dir, image_name + ".png"))