def create_dataset_batch_queue(dataset):
    from preprocessing import ssd_vgg_preprocessing

    with tf.device('/cpu:0'):
        with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=1000 * config.batch_size,
                common_queue_min=700 * config.batch_size,
                shuffle=True)
        # Get for SSD network: image, labels, bboxes.
        [image, glabel, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get([
                                                         'image',
                                                         'object/label',
                                                         'object/bbox', 
                                                         'object/oriented_bbox/x1',
                                                         'object/oriented_bbox/x2',
                                                         'object/oriented_bbox/x3',
                                                         'object/oriented_bbox/x4',
                                                         'object/oriented_bbox/y1',
                                                         'object/oriented_bbox/y2',
                                                         'object/oriented_bbox/y3',
                                                         'object/oriented_bbox/y4'
                                                         ])
        gxs = tf.transpose(tf.stack([x1, x2, x3, x4])) #shape = (N, 4)
        gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
        image = tf.identity(image, 'input_image')
        
        # Pre-processing image, labels and bboxes.
        image, glabel, gbboxes, gxs, gys = \
                ssd_vgg_preprocessing.preprocess_image(
                       image, glabel, gbboxes, gxs, gys, 
                       out_shape = config.train_image_shape,
                       data_format = config.data_format, 
                       use_rotation = config.use_rotation,
                       is_training = True)
        image = tf.identity(image, 'processed_image')
        
        # calculate ground truth
        pixel_cls_label, pixel_cls_weight, \
        pixel_link_label, pixel_link_weight = \
            pixel_link.tf_cal_gt_for_single_image(gxs, gys, glabel)
        
        # batch them
        with tf.name_scope(FLAGS.dataset_name + '_batch'):
            b_image, b_pixel_cls_label, b_pixel_cls_weight, \
            b_pixel_link_label, b_pixel_link_weight = \
                tf.train.batch(
                    [image, pixel_cls_label, pixel_cls_weight, 
                        pixel_link_label, pixel_link_weight],
                    batch_size = config.batch_size_per_gpu,
                    num_threads= FLAGS.num_preprocessing_threads,
                    capacity = 500)
        with tf.name_scope(FLAGS.dataset_name + '_prefetch_queue'):
            batch_queue = slim.prefetch_queue.prefetch_queue(
                [b_image, b_pixel_cls_label, b_pixel_cls_weight, 
                    b_pixel_link_label, b_pixel_link_weight],
                capacity = 50) 
    return batch_queue    
Beispiel #2
0
def create_dataset_batch_queue(dataset):
    from preprocessing import ssd_vgg_preprocessing

    with tf.device('/cpu:0'):
        with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=1000 * config.batch_size,
                common_queue_min=700 * config.batch_size,
                shuffle=True)
        # Get for SSD network: image, labels, bboxes.
        [image, glabel, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get([
                                                         'image',
                                                         'object/label',
                                                         'object/bbox', 
                                                         'object/oriented_bbox/x1',
                                                         'object/oriented_bbox/x2',
                                                         'object/oriented_bbox/x3',
                                                         'object/oriented_bbox/x4',
                                                         'object/oriented_bbox/y1',
                                                         'object/oriented_bbox/y2',
                                                         'object/oriented_bbox/y3',
                                                         'object/oriented_bbox/y4'
                                                         ])
        gxs = tf.transpose(tf.stack([x1, x2, x3, x4])) #shape = (N, 4)
        gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
        image = tf.identity(image, 'input_image')
        
        # Pre-processing image, labels and bboxes.
        image, glabel, gbboxes, gxs, gys = \
                ssd_vgg_preprocessing.preprocess_image(
                       image, glabel, gbboxes, gxs, gys, 
                       out_shape = config.train_image_shape,
                       data_format = config.data_format, 
                       use_rotation = config.use_rotation,
                       is_training = True)
        image = tf.identity(image, 'processed_image')
        
        # calculate ground truth
        pixel_cls_label, pixel_cls_weight, \
        pixel_link_label, pixel_link_weight = \
            pixel_link_train.tf_cal_gt_for_single_image(gxs, gys, glabel)
        
        # batch them
        with tf.name_scope(FLAGS.dataset_name + '_batch'):
            b_image, b_pixel_cls_label, b_pixel_cls_weight, \
            b_pixel_link_label, b_pixel_link_weight = \
                tf.train.batch(
                    [image, pixel_cls_label, pixel_cls_weight, 
                        pixel_link_label, pixel_link_weight],
                    batch_size = config.batch_size_per_gpu,
                    num_threads= FLAGS.num_preprocessing_threads,
                    capacity = 500)
        with tf.name_scope(FLAGS.dataset_name + '_prefetch_queue'):
            batch_queue = slim.prefetch_queue.prefetch_queue(
                [b_image, b_pixel_cls_label, b_pixel_cls_weight, 
                    b_pixel_link_label, b_pixel_link_weight],
                capacity = 50) 
    return batch_queue    
Beispiel #3
0
def test():
    with tf.name_scope('test'):
        image = tf.placeholder(dtype=tf.int32, shape = [None, None, 3])
        image_shape = tf.placeholder(dtype = tf.int32, shape = [3, ])
        processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(image, None, None, None, None,
                                                   out_shape = config.image_shape,
                                                   data_format = config.data_format,
                                                   is_training = False)
        b_image = tf.expand_dims(processed_image, axis = 0)
        net = pixel_link_symbol.PixelLinkNet(b_image, is_training = False)
        global_step = slim.get_or_create_global_step()


    sess_config = tf.ConfigProto(log_device_placement = False, allow_soft_placement = True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction;

    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)
    logdir = util.io.join_path(checkpoint_dir, 'test', FLAGS.dataset_name + '_' +FLAGS.dataset_split_name)

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore()
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    saver = tf.train.Saver(var_list = variables_to_restore)


    image_names = util.io.ls(FLAGS.dataset_dir)
    image_names.sort()

    checkpoint = FLAGS.checkpoint_path
    checkpoint_name = util.io.get_filename(str(checkpoint))
    dump_path = util.io.join_path(logdir, checkpoint_name)
    txt_path = util.io.join_path(dump_path,'test')

    with tf.Session(config = sess_config) as sess:
        saver.restore(sess, checkpoint)

        for iter, image_name in enumerate(tqdm(image_names)):
            image_data = util.img.imread(
                util.io.join_path(FLAGS.dataset_dir, image_name), rgb = True)
            image_name = image_name.split('.')[0]
            pixel_pos_scores, link_pos_scores = sess.run(
                [net.pixel_pos_scores, net.link_pos_scores],
                feed_dict = {
                    image:image_data
            })
            to_txt(txt_path,
                    image_name, image_data,
                    pixel_pos_scores, link_pos_scores)
Beispiel #4
0
def create_dataset_batch_queue(dataset):
    # 设置GPU
    with tf.device('/cpu:0'):
        # tf.name_scope可以让变量有相同的命名,只是限于tf.Variable的变量
        with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
            # 读取数据
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=50 * config.batch_size,
                common_queue_min=30 * config.batch_size,
                shuffle=True)
        # Get for SSD network: image, labels, bboxes.
        [image, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get([
                                                         'image',
                                                         'object/ignored',
                                                         'object/bbox', 
                                                         'object/oriented_bbox/x1',
                                                         'object/oriented_bbox/x2',
                                                         'object/oriented_bbox/x3',
                                                         'object/oriented_bbox/x4',
                                                         'object/oriented_bbox/y1',
                                                         'object/oriented_bbox/y2',
                                                         'object/oriented_bbox/y3',
                                                         'object/oriented_bbox/y4'
                                                         ])
        # tf.stack()矩阵拼接
        # tf.transpos()转置
        gxs = tf.transpose(tf.stack([x1, x2, x3, x4])) #shape = (N, 4)
        gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
        image = tf.identity(image, 'input_image')
        
        # Pre-processing image, labels and bboxes.
        image, gignored, gbboxes, gxs, gys = ssd_vgg_preprocessing.preprocess_image(image, gignored, gbboxes, gxs, gys,
                                                           out_shape = config.image_shape,
                                                           data_format = config.data_format, 
                                                           is_training = True)
        image = tf.identity(image, 'processed_image')
        
        # calculate ground truth
        # 计算真实标签
        seg_label, seg_loc, link_label = seglink.tf_get_all_seglink_gt(gxs, gys, gignored)
        
        # batch them
        # tf.train.batch():利用一个tensor的列表或字典来获取一个batch数据
        b_image, b_seg_label, b_seg_loc, b_link_label = tf.train.batch(
            [image, seg_label, seg_loc, link_label],
            batch_size = config.batch_size_per_gpu,
            num_threads= FLAGS.num_preprocessing_threads,
            capacity = 50)

        # prefetch_queue():从数据’Tensor‘ 中预取张量进入队列
        batch_queue = slim.prefetch_queue.prefetch_queue(
            [b_image, b_seg_label, b_seg_loc, b_link_label],
            capacity = 50) 
    return batch_queue    
Beispiel #5
0
 def preprocessing_fn(image,
                      labels,
                      bboxes,
                      out_shape,
                      data_format='NHWC',
                      **kwargs):
     return ssd_vgg_preprocessing.preprocess_image(image,
                                                   labels,
                                                   bboxes,
                                                   out_shape,
                                                   data_format=data_format,
                                                   is_training=is_training,
                                                   **kwargs)
Beispiel #6
0
def create_dataset_batch_queue(dataset):
    batch_size = config.batch_size
    with tf.device('/cpu:0'):
        with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=20 * batch_size,
                common_queue_min=10 * batch_size,
                shuffle=True)
        # Get for SSD network: image, labels, bboxes.
        [image, shape, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get([
                                                         'image', 'shape',
                                                         'object/ignored',
                                                         'object/bbox', 
                                                         'object/oriented_bbox/x1',
                                                         'object/oriented_bbox/x2',
                                                         'object/oriented_bbox/x3',
                                                         'object/oriented_bbox/x4',
                                                         'object/oriented_bbox/y1',
                                                         'object/oriented_bbox/y2',
                                                         'object/oriented_bbox/y3',
                                                         'object/oriented_bbox/y4'
                                                         ])
        gxs = tf.transpose(tf.stack([x1, x2, x3, x4])) #shape = (N, 4)
        gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
        image = tf.identity(image, 'input_image')
        
        # Pre-processing image, labels and bboxes.
        image, gignored, gbboxes, gxs, gys = ssd_vgg_preprocessing.preprocess_image(
                                                           image, gignored, gbboxes, gxs, gys, 
                                                           out_shape = config.image_shape,
                                                           data_format = config.data_format, 
                                                           is_training = True)
        image = tf.identity(image, 'processed_image')
        
        # calculate ground truth
        seg_label, seg_offsets, link_label = seglink.tf_get_all_seglink_gt(gxs, gys, gignored)

        # batch them
        b_image, b_seg_label, b_seg_offsets, b_link_label = tf.train.batch(
            [image, seg_label, seg_offsets, link_label],
            batch_size = config.batch_size_per_gpu,
            num_threads=FLAGS.num_preprocessing_threads,
            capacity = 50)
            
        batch_queue = slim.prefetch_queue.prefetch_queue(
            [b_image, b_seg_label, b_seg_offsets, b_link_label],
            capacity = 50) 
    return batch_queue    
Beispiel #7
0
def create_dataset_batch_queue(dataset):
    batch_size = config.batch_size
    with tf.device('/cpu:0'):
        with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=20 * batch_size,
                common_queue_min=10 * batch_size,
                shuffle=True)
        # Get for SSD network: image, labels, bboxes.
        [image, shape, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3,
         y4] = provider.get([
             'image', 'shape', 'object/ignored', 'object/bbox',
             'object/oriented_bbox/x1', 'object/oriented_bbox/x2',
             'object/oriented_bbox/x3', 'object/oriented_bbox/x4',
             'object/oriented_bbox/y1', 'object/oriented_bbox/y2',
             'object/oriented_bbox/y3', 'object/oriented_bbox/y4'
         ])
        gxs = tf.transpose(tf.stack([x1, x2, x3, x4]))  # shape = (N, 4)
        gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
        image = tf.identity(image, 'input_image')

        # Pre-processing image, labels and bboxes.
        image, gignored, gbboxes, gxs, gys = ssd_vgg_preprocessing.preprocess_image(
            image,
            gignored,
            gbboxes,
            gxs,
            gys,
            out_shape=config.image_shape,
            data_format=config.data_format,
            is_training=True)
        image = tf.identity(image, 'processed_image')

        # calculate ground truth
        seg_label, seg_offsets, link_label = seglink.tf_get_all_seglink_gt(
            gxs, gys, gignored)

        # batch them
        b_image, b_seg_label, b_seg_offsets, b_link_label = tf.train.batch(
            [image, seg_label, seg_offsets, link_label],
            batch_size=batch_size,
            num_threads=FLAGS.num_preprocessing_threads,
            capacity=50)

        batch_queue = slim.prefetch_queue.prefetch_queue(
            [b_image, b_seg_label, b_seg_offsets, b_link_label], capacity=50)
    return batch_queue
    def __init__(self):
        self._config_initialization()
        # build up computation graph
        image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
        processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(image, None, None, None, None,
                                                                             out_shape=config.image_shape,
                                                                             data_format=config.data_format,
                                                                             is_training=False)
        b_image = tf.expand_dims(processed_image, axis=0)

        # build model and loss
        net = pixel_link_symbol.PixelLinkNet(b_image, is_training=False)
        masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
            net.pixel_pos_scores, net.link_pos_scores)
        self._image = image
        self._net = net
        self._masks = masks
        # end of build up

        global_step = slim.get_or_create_global_step()
        sess_config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
        if FLAGS.gpu_memory_fraction < 0:
            sess_config.gpu_options.allow_growth = True
        elif FLAGS.gpu_memory_fraction > 0:
            sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction;

        checkpoint_dir = FLAGS.checkpoint_path

        if FLAGS.using_moving_average:
            variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay)
            variables_to_restore = variable_averages.variables_to_restore()
            variables_to_restore[global_step.op.name] = global_step
        else:
            variables_to_restore = slim.get_variables_to_restore()

        saver = tf.train.Saver(var_list=variables_to_restore)

        # with tf.Session(graph=self._graph) as sess:
        #     saver.restore(sess, checkpoint_dir)
        #     self._graph = sess.graph
            # self._graph = tf.get_default_graph()
            # self._var_list = tf.all_variables()
            # self._sess = sess
        sess = tf.Session(config=sess_config)
        saver.restore(sess, checkpoint_dir)
        # self._graph = tf.get_default_graph()
        #     self._var_list = tf.all_variables()
        self._sess = sess
Beispiel #9
0
def load_net_for_inference():
    global_step = slim.get_or_create_global_step()
    with tf.name_scope(
            'output'
    ):  # evaluation_%dx%d'%(FLAGS.eval_image_height, FLAGS.eval_image_width)):
        with tf.variable_scope(tf.get_variable_scope(), reuse=False):
            image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
            image_shape = tf.placeholder(dtype=tf.int32, shape=[
                3,
            ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
                image,
                None,
                None,
                None,
                None,
                out_shape=config.image_shape,
                data_format=config.data_format,
                is_training=False)
            b_image = tf.expand_dims(processed_image, axis=0)

            # build model and loss
            net = pixel_link_symbol.PixelLinkNet(b_image, is_training=False)
            #masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
            #    net.pixel_pos_scores, net.link_pos_scores)

    sess_config = tf.ConfigProto(log_device_placement=False,
                                 allow_soft_placement=True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore(
            tf.trainable_variables())
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    saver = tf.train.Saver(var_list=variables_to_restore)
    return net, saver, image
Beispiel #10
0
def get_batch(dataset_dir,
			  num_readers,
			  batch_size,
			  out_shape,
			  net,
			  anchors,
			  num_preprocessing_threads,
			  file_pattern = '*.tfrecord',
			  is_training = True):
	
	dataset = sythtextprovider.get_datasets(dataset_dir,file_pattern = file_pattern)

	provider = slim.dataset_data_provider.DatasetDataProvider(
				dataset,
				num_readers=num_readers,
				common_queue_capacity=20 * batch_size,
				common_queue_min=10 * batch_size,
				shuffle=True)
	
	[image, shape, glabels, gbboxes] = provider.get(['image', 'shape',
											 'object/label',
											 'object/bbox'])

	image, glabels, gbboxes,num = \
	ssd_vgg_preprocessing.preprocess_image(image,  glabels,gbboxes, 
											out_shape,is_training=is_training)

	gclasses, glocalisations, gscores = \
	net.bboxes_encode( glabels, gbboxes, anchors, num)

	batch_shape = [1] + [len(anchors)] * 3


	r = tf.train.batch(
		tf_utils.reshape_list([image, gclasses, glocalisations, gscores]),
		batch_size=batch_size,
		num_threads=num_preprocessing_threads,
		capacity=5 * batch_size)

	b_image, b_gclasses, b_glocalisations, b_gscores= \
		tf_utils.reshape_list(r, batch_shape)

	return [b_image, b_gclasses, b_glocalisations, b_gscores]
Beispiel #11
0
    def __init__(self):
        self.image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
        image_shape = tf.placeholder(dtype=tf.int32, shape=[
            3,
        ])
        processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
            self.image,
            None,
            None,
            None,
            None,
            out_shape=(768, 768),
            data_format='NHWC',
            is_training=False)
        b_image = tf.expand_dims(processed_image, axis=0)

        # build model and loss
        self.net = pixel_link_symbol.PixelLinkNet(b_image, is_training=False)
        self.masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
            self.net.pixel_pos_scores, self.net.link_pos_scores)
def main(_):
    image_shape = (FLAGS.export_image_height, FLAGS.export_image_width)
    config.load_config(FLAGS.train_dir)
    config.init_config(image_shape, 
                       batch_size = 1, 
                       pixel_conf_threshold = 0.8,
                       link_conf_threshold = 0.8,
                       num_gpus = 1, 
                   )

    image = tf.placeholder(dtype=tf.int32, shape = [None, None, 3], name='input_image')
    processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(image, None, None, None, None, 
                                                   out_shape = config.image_shape,
                                                   data_format = config.data_format, 
                                                   is_training = False)
    b_image = tf.expand_dims(processed_image, axis = 0)
    net = pixel_link_symbol.PixelLinkNet(b_image, is_training = True)
    pixel_pos_scores = tf.identity(net.pixel_pos_scores, name='pixel_pos_scores')
    link_pos_scores = tf.identity(net.link_pos_scores, name='link_pos_scores')

    saver = tf.train.Saver()

    ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
    checkpoint_exists = ckpt and ckpt.model_checkpoint_path
    if not checkpoint_exists:
        tf.logging.info('Checkpoint not exists in FLAGS.train_dir')
        return


    with tf.Session() as sess:
        saver.restore(sess, ckpt.model_checkpoint_path)
        output_graph_def = graph_util.convert_variables_to_constants(sess, sess.graph_def, output_node_names=['pixel_pos_scores', 'link_pos_scores'])

        with tf.gfile.FastGFile(FLAGS.output_file, mode='wb+') as f:
            print('write file : ' + FLAGS.output_file)
            ss = output_graph_def.SerializeToString()
            f.write(output_graph_def.SerializeToString())
            print('Write finish!')
Beispiel #13
0
def read_dataset(dataset):
    with tf.name_scope(FLAGS.dataset_name + '_' + FLAGS.dataset_split_name +
                       '_data_provider'):
        provider = slim.dataset_data_provider.DatasetDataProvider(
            dataset, num_readers=FLAGS.num_readers, shuffle=False)

    [
        image, shape, filename, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3,
        y4
    ] = provider.get([
        'image', 'shape', 'filename', 'object/ignored', 'object/bbox',
        'object/oriented_bbox/x1', 'object/oriented_bbox/x2',
        'object/oriented_bbox/x3', 'object/oriented_bbox/x4',
        'object/oriented_bbox/y1', 'object/oriented_bbox/y2',
        'object/oriented_bbox/y3', 'object/oriented_bbox/y4'
    ])
    gxs = tf.transpose(tf.stack([x1, x2, x3, x4]))  #shape = (N, 4)
    gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
    image = tf.identity(image, 'input_image')

    # Pre-processing image, labels and bboxes.
    image, gignored, gbboxes, gxs, gys = ssd_vgg_preprocessing.preprocess_image(
        image,
        gignored,
        gbboxes,
        gxs,
        gys,
        out_shape=config.image_shape,
        data_format=config.data_format,
        is_training=False)
    image = tf.identity(image, 'processed_image')

    # calculate ground truth
    seg_label, seg_loc, link_gt = seglink.tf_get_all_seglink_gt(
        gxs, gys, gignored)

    return image, seg_label, seg_loc, link_gt, filename, shape, gignored, gxs, gys
Beispiel #14
0
def read_dataset(dataset):
    with tf.name_scope(FLAGS.dataset_name +'_'  + FLAGS.dataset_split_name + '_data_provider'):
        provider = slim.dataset_data_provider.DatasetDataProvider(
            dataset,
            num_readers=FLAGS.num_readers,
            shuffle=False)
        
    [image, shape, filename, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get([
                                                     'image', 'shape', 'filename',
                                                     'object/ignored',
                                                     'object/bbox', 
                                                     'object/oriented_bbox/x1',
                                                     'object/oriented_bbox/x2',
                                                     'object/oriented_bbox/x3',
                                                     'object/oriented_bbox/x4',
                                                     'object/oriented_bbox/y1',
                                                     'object/oriented_bbox/y2',
                                                     'object/oriented_bbox/y3',
                                                     'object/oriented_bbox/y4'
                                                     ])
    gxs = tf.transpose(tf.stack([x1, x2, x3, x4])) #shape = (N, 4)
    gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
    image = tf.identity(image, 'input_image')
    
    # Pre-processing image, labels and bboxes.
    image, gignored, gbboxes, gxs, gys = ssd_vgg_preprocessing.preprocess_image(
                                                       image, gignored, gbboxes, gxs, gys, 
                                                       out_shape = config.image_shape,
                                                       data_format = config.data_format, 
                                                       is_training = False)
    image = tf.identity(image, 'processed_image')
    
    # calculate ground truth
    seg_label, seg_loc, link_gt = seglink.tf_get_all_seglink_gt(gxs, gys, gignored)
        
    return image, seg_label, seg_loc, link_gt, filename, shape, gignored, gxs, gys
Beispiel #15
0
def eval():
    with tf.name_scope('test'):
        with tf.variable_scope(tf.get_variable_scope(),
                               reuse=True):  # the variables has been created in config.init_config
            image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
            image_shape = tf.placeholder(dtype=tf.int32, shape=[3, ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(image, None, None, None, None,
                                                                                 out_shape=config.image_shape,
                                                                                 data_format=config.data_format,
                                                                                 is_training=False)
            b_image = tf.expand_dims(processed_image, axis=0)
            b_shape = tf.expand_dims(image_shape, axis=0)
            net = seglink_symbol.SegLinkNet(inputs=b_image, data_format=config.data_format)
            bboxes_pred = seglink.tf_seglink_to_bbox(net.seg_scores, net.link_scores,
                                                     net.seg_offsets,
                                                     image_shape=b_shape,
                                                     seg_conf_threshold=config.seg_conf_threshold,
                                                     link_conf_threshold=config.link_conf_threshold)

    image_names = util.io.ls(FLAGS.dataset_dir)

    sess_config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction

    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)
    logdir = util.io.join_path(FLAGS.checkpoint_path, 'test', FLAGS.dataset_name + '_' + FLAGS.dataset_split_name)

    saver = tf.train.Saver()
    if util.io.is_dir(FLAGS.checkpoint_path):
        checkpoint = util.tf.get_latest_ckpt(FLAGS.checkpoint_path)
    else:
        checkpoint = FLAGS.checkpoint_path

    tf.logging.info('testing', checkpoint)

    with tf.Session(config=sess_config) as sess:
        saver.restore(sess, checkpoint)
        checkpoint_name = util.io.get_filename(str(checkpoint))
        dump_path = util.io.join_path(logdir, checkpoint_name,
                                      'seg_link_conf_th_%f_%f' % (
                                      config.seg_conf_threshold, config.link_conf_threshold))

        txt_path = util.io.join_path(dump_path, 'txt')
        zip_path = util.io.join_path(dump_path, '%s_seg_link_conf_th_%f_%f.zip' % (
        checkpoint_name, config.seg_conf_threshold, config.link_conf_threshold))

        # write detection result as txt files
        def write_result_as_txt(image_name, bboxes, path):
            filename = util.io.join_path(path, 'res_%s.txt' % (image_name))
            lines = []
            for b_idx, bbox in enumerate(bboxes):
                values = [int(v) for v in bbox]
                line = "%d, %d, %d, %d, %d, %d, %d, %d\n" % tuple(values)
                lines.append(line)
            util.io.write_lines(filename, lines)
            print('result has been written to:', filename)

        for iter, image_name in enumerate(image_names):
            image_data = util.img.imread(util.io.join_path(FLAGS.dataset_dir, image_name), rgb=True)
            image_name = image_name.split('.')[0]
            image_bboxes = sess.run([bboxes_pred], feed_dict={image: image_data, image_shape: image_data.shape})
            print('%d/%d: %s' % (iter + 1, len(image_names), image_name))
            write_result_as_txt(image_name, image_bboxes[0], txt_path)
Beispiel #16
0
def test():
    with tf.name_scope('test'):
        image = tf.placeholder(dtype=tf.int32, shape = [None, None, 3])
        image_shape = tf.placeholder(dtype = tf.int32, shape = [3, ])
        processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(image, None, None, None, None, 
                                                   out_shape = config.image_shape,
                                                   data_format = config.data_format, 
                                                   is_training = False)
        b_image = tf.expand_dims(processed_image, axis = 0)
        net = pixel_link_symbol.PixelLinkNet(b_image, is_training = True)
        global_step = slim.get_or_create_global_step()

    
    sess_config = tf.ConfigProto(log_device_placement = False, allow_soft_placement = True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction;
    
    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)
    logdir = util.io.join_path(checkpoint_dir, 'test', FLAGS.dataset_name + '_' +FLAGS.dataset_split_name)

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore()
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()
    
    saver = tf.train.Saver(var_list = variables_to_restore)
    
    
    image_names = util.io.ls(FLAGS.dataset_dir)
    image_names.sort()
    
    checkpoint = FLAGS.checkpoint_path
    checkpoint_name = util.io.get_filename(str(checkpoint));
    dump_path = util.io.join_path(logdir, checkpoint_name)
    txt_path = util.io.join_path(dump_path,'txt')        
    zip_path = util.io.join_path(dump_path, checkpoint_name + '_det.zip')
    
    with tf.Session(config = sess_config) as sess:
        saver.restore(sess, checkpoint)

        for iter, image_name in enumerate(image_names):
            image_data = util.img.imread(
                util.io.join_path(FLAGS.dataset_dir, image_name), rgb = True)
            image_name = image_name.split('.')[0]
            pixel_pos_scores, link_pos_scores = sess.run(
                [net.pixel_pos_scores, net.link_pos_scores], 
                feed_dict = {
                    image:image_data
            })
               
            print '%d/%d: %s'%(iter + 1, len(image_names), image_name)
            to_txt(txt_path,
                    image_name, image_data, 
                    pixel_pos_scores, link_pos_scores)

            
    # create zip file for icdar2015
    cmd = 'cd %s;zip -j %s %s/*'%(dump_path, zip_path, txt_path);
    print cmd
    util.cmd.cmd(cmd);
    print "zip file created: ", util.io.join_path(dump_path, zip_path)
Beispiel #17
0
def eval():
    
    with tf.name_scope('test'):
        with tf.variable_scope(tf.get_variable_scope(), reuse = True):# the variables has been created in config.init_config
            image = tf.placeholder(dtype=tf.int32, shape = [None, None, 3])
            image_shape = tf.placeholder(dtype = tf.int32, shape = [3, ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(image, None, None, None, None, 
                                                       out_shape = config.image_shape,
                                                       data_format = config.data_format, 
                                                       is_training = False)
            b_image = tf.expand_dims(processed_image, axis = 0)
            b_shape = tf.expand_dims(image_shape, axis = 0)
            net = seglink_symbol.SegLinkNet(inputs = b_image, data_format = config.data_format)
            bboxes_pred = seglink.tf_seglink_to_bbox(net.seg_scores, net.link_scores, 
                                                     net.seg_offsets, 
                                                     image_shape = b_shape, 
                                                     seg_conf_threshold = config.seg_conf_threshold,
                                                     link_conf_threshold = config.link_conf_threshold)

    image_names = util.io.ls(FLAGS.dataset_dir)
    
    sess_config = tf.ConfigProto(log_device_placement = False, allow_soft_placement = True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction;
    
    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)
    logdir = util.io.join_path(FLAGS.checkpoint_path, 'test', FLAGS.dataset_name + '_' +FLAGS.dataset_split_name)
    
    saver = tf.train.Saver()
    if util.io.is_dir(FLAGS.checkpoint_path):
        checkpoint = util.tf.get_latest_ckpt(FLAGS.checkpoint_path)
    else:
        checkpoint = FLAGS.checkpoint_path
        
    tf.logging.info('testing', checkpoint)

    with tf.Session(config = sess_config) as sess:
        saver.restore(sess, checkpoint)
        checkpoint_name = util.io.get_filename(str(checkpoint));
        dump_path = util.io.join_path(logdir, checkpoint_name, 
                                      'seg_link_conf_th_%f_%f'%(config.seg_conf_threshold, config.link_conf_threshold))
        
        txt_path = util.io.join_path(dump_path,'txt')
        zip_path = util.io.join_path(dump_path, '%s_seg_link_conf_th_%f_%f.zip'%(checkpoint_name, config.seg_conf_threshold, config.link_conf_threshold))
        
        # write detection result as txt files
        def write_result_as_txt(image_name, bboxes, path):
          filename = util.io.join_path(path, 'res_%s.txt'%(image_name))
          lines = []
          for b_idx, bbox in enumerate(bboxes):
                values = [int(v) for v in bbox]
                line = "%d, %d, %d, %d, %d, %d, %d, %d\n"%tuple(values)
                lines.append(line)
          util.io.write_lines(filename, lines)
          print 'result has been written to:', filename
          
        for iter, image_name in enumerate(image_names):
            image_data = util.img.imread(util.io.join_path(FLAGS.dataset_dir, image_name), rgb = True)
            image_name = image_name.split('.')[0]
            image_bboxes = sess.run([bboxes_pred], feed_dict = {image:image_data, image_shape:image_data.shape})
            print '%d/%d: %s'%(iter + 1, len(image_names), image_name)
            write_result_as_txt(image_name, image_bboxes[0], txt_path)
                
        # create zip file for icdar2015
        cmd = 'cd %s;zip -j %s %s/*'%(dump_path, zip_path, txt_path);
        print cmd
        print util.cmd.cmd(cmd);
        print "zip file created: ", util.io.join_path(dump_path, zip_path)
Beispiel #18
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.DEBUG)
    with tf.Graph().as_default():
        # Config model_deploy. Keep TF Slim Models structure.
        # Useful if want to need multiple GPUs and/or servers in the future.
        deploy_config = model_deploy.DeploymentConfig(
            num_clones=FLAGS.num_clones,
            clone_on_cpu=FLAGS.clone_on_cpu,
            replica_id=0,
            num_replicas=1,
            num_ps_tasks=0)
        # Create global_step.
        with tf.device(deploy_config.variables_device()):
            global_step = slim.create_global_step()

        # Select the dataset.
        dataset = traffic_light.get_split(
            FLAGS.dataset_split_name, FLAGS.dataset_dir)

        # Get the SSD network and its anchors.
        ssd_class = ssd_vgg_512.SSDNet
        ssd_params = ssd_class.default_params._replace(num_classes=FLAGS.num_classes)
        ssd_net = ssd_class(ssd_params)
        ssd_shape = ssd_net.params.img_shape
        ssd_anchors = ssd_net.anchors(ssd_shape)

        tf_utils.print_configuration(FLAGS.__flags, ssd_params,
                                     dataset.data_sources, FLAGS.train_dir)
        # ================================================================== #
        # Create a dataset provider and batches.
        # ================================================================== #
        with tf.device(deploy_config.inputs_device()):
            with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
                provider = slim.dataset_data_provider.DatasetDataProvider(
                    dataset,
                    num_readers=FLAGS.num_readers,
                    common_queue_capacity=20 * FLAGS.batch_size,
                    common_queue_min=10 * FLAGS.batch_size,
                    shuffle=True)
            # Get for SSD network: image, labels, bboxes.
            [image, shape, glabels, gbboxes] = provider.get(['image', 'shape',
                                                             'object/label',
                                                             'object/bbox'])
            # Pre-processing image, labels and bboxes.
            image, glabels, gbboxes = \
                ssd_vgg_preprocessing.preprocess_image(image, glabels, gbboxes,
                                        out_shape=ssd_shape,
                                        data_format=DATA_FORMAT,
                                        is_training = True)
            # Encode groundtruth labels and bboxes.
            gclasses, glocalisations, gscores = \
                ssd_net.bboxes_encode(glabels, gbboxes, ssd_anchors)
            batch_shape = [1] + [len(ssd_anchors)] * 3

            # Training batches and queue.
            r = tf.train.batch(
                tf_utils.reshape_list([image, gclasses, glocalisations, gscores]),
                batch_size=FLAGS.batch_size,
                num_threads=FLAGS.num_preprocessing_threads,
                capacity=5 * FLAGS.batch_size)
            b_image, b_gclasses, b_glocalisations, b_gscores = \
                tf_utils.reshape_list(r, batch_shape)

            # Intermediate queueing: unique batch computation pipeline for all
            # GPUs running the training.
            batch_queue = slim.prefetch_queue.prefetch_queue(
                tf_utils.reshape_list([b_image, b_gclasses, b_glocalisations, b_gscores]),
                capacity=2 * deploy_config.num_clones)

        # ================================================================== #
        # Define the model running on every GPU.
        # ================================================================== #
        def clone_fn(batch_queue):
            """Allows data parallelism by creating multiple
            clones of network_fn."""
            # Dequeue batch.
            b_image, b_gclasses, b_glocalisations, b_gscores = \
                tf_utils.reshape_list(batch_queue.dequeue(), batch_shape)

            # Construct SSD network.
            arg_scope = ssd_net.arg_scope(weight_decay=FLAGS.weight_decay,
                                          data_format=DATA_FORMAT)
            with slim.arg_scope(arg_scope):
                predictions, localisations, logits, end_points = \
                    ssd_net.net(b_image, is_training=True)
            # Add loss function.
            ssd_net.losses(logits, localisations,
                           b_gclasses, b_glocalisations, b_gscores,
                           match_threshold=FLAGS.match_threshold,
                           negative_ratio=FLAGS.negative_ratio,
                           alpha=FLAGS.loss_alpha,
                           label_smoothing=FLAGS.label_smoothing)
            return end_points

        # Gather initial summaries.
        summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))

        # ================================================================== #
        # Add summaries from first clone.
        # ================================================================== #
        clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
        first_clone_scope = deploy_config.clone_scope(0)
        # Gather update_ops from the first clone. These contain, for example,
        # the updates for the batch_norm variables created by network_fn.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)

        # Add summaries for end_points.
        end_points = clones[0].outputs
        for end_point in end_points:
            x = end_points[end_point]
            summaries.add(tf.summary.histogram('activations/' + end_point, x))
            summaries.add(tf.summary.scalar('sparsity/' + end_point,
                                            tf.nn.zero_fraction(x)))
        # Add summaries for losses and extra losses.
        for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
            summaries.add(tf.summary.scalar(loss.op.name, loss))
        for loss in tf.get_collection('EXTRA_LOSSES', first_clone_scope):
            summaries.add(tf.summary.scalar(loss.op.name, loss))

        # Add summaries for variables.
        for variable in slim.get_model_variables():
            summaries.add(tf.summary.histogram(variable.op.name, variable))

        # ================================================================== #
        # Configure the moving averages.
        # ================================================================== #
        if FLAGS.moving_average_decay:
            moving_average_variables = slim.get_model_variables()
            variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay, global_step)
        else:
            moving_average_variables, variable_averages = None, None

        # ================================================================== #
        # Configure the optimization procedure.
        # ================================================================== #
        with tf.device(deploy_config.optimizer_device()):
            learning_rate = tf_utils.configure_learning_rate(FLAGS,
                                                             dataset.num_samples,
                                                             global_step)
            optimizer = tf_utils.configure_optimizer(FLAGS, learning_rate)
            summaries.add(tf.summary.scalar('learning_rate', learning_rate))

        if FLAGS.moving_average_decay:
            # Update ops executed locally by trainer.
            update_ops.append(variable_averages.apply(moving_average_variables))

        # Variables to train.
        variables_to_train = tf_utils.get_variables_to_train(FLAGS)

        # and returns a train_tensor and summary_op
        total_loss, clones_gradients = model_deploy.optimize_clones(
            clones,
            optimizer,
            var_list=variables_to_train)
        # Add total_loss to summary.
        summaries.add(tf.summary.scalar('total_loss', total_loss))

        # Create gradient updates.
        grad_updates = optimizer.apply_gradients(clones_gradients,
                                                 global_step=global_step)
        update_ops.append(grad_updates)
        update_op = tf.group(*update_ops)
        train_tensor = control_flow_ops.with_dependencies([update_op], total_loss,
                                                          name='train_op')

        # Add the summaries from the first clone. These contain the summaries
        summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
                                           first_clone_scope))
        # Merge all summaries together.
        summary_op = tf.summary.merge(list(summaries), name='summary_op')

        # ================================================================== #
        # Kicks off the training.
        # ================================================================== #
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
        config = tf.ConfigProto(log_device_placement=False,
                                gpu_options=gpu_options)
        saver = tf.train.Saver(max_to_keep=5,
                               keep_checkpoint_every_n_hours=0.2,
                               write_version=2,
                               pad_step_number=False)
        slim.learning.train(
            train_tensor,
            logdir=FLAGS.train_dir,
            master='',
            is_chief=True,
            init_fn=tf_utils.get_init_fn(FLAGS),
            summary_op=summary_op,
            number_of_steps=FLAGS.max_number_of_steps,
            log_every_n_steps=FLAGS.log_every_n_steps,
            save_summaries_secs=FLAGS.save_summaries_secs,
            saver=saver,
            save_interval_secs=FLAGS.save_interval_secs,
            session_config=config,
            sync_optimizer=None)
Beispiel #19
0
def eval_model(config, FLAGS, para_list=None, is_log=False):
    image_size = config['image_size']
    with tf.Graph().as_default():
        # image_ph = tf.placeholder(dtype=tf.uint8, shape=[
        #                           None, None, 3], name='input')
        path_ph = tf.placeholder(dtype=tf.string)

        raw_data = tf.read_file(path_ph)
        image = tf.image.decode_jpeg(raw_data, channels=3)
        image.set_shape((None, None, 3))
        out_shape = (
            image_size['h'],
            image_size['w']) if image_size['fixed_size'] == True else None
        scale = 1.0 if image_size['fixed_size'] == True else image_size['scale']
        image_process, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
            image, scale=scale, out_shape=out_shape, is_training=False)

        image_process = tf.expand_dims(image_process, 0)
        seg_maps, _ = model(image_process, is_training=False)

        # rescale seg_maps to origin size
        seg_map_list = []
        for i in range(config['n']):
            seg_map_list.append(
                tf.image.resize_images(
                    seg_maps[:, :, :, i:i + 1],
                    [tf.shape(image)[0],
                     tf.shape(image)[1]]))

        # choose the complete map as mask, apply to shrink map
        mask = tf.greater_equal(seg_map_list[0], config['threshold'])
        mask = tf.to_float(mask)
        seg_map_list = [seg_map * mask for seg_map in seg_map_list]

        chp_name = util.io.get_filename(config['ckpt'])

        dump_path = util.io.join_path(config['log_dir'], 'test',
                                      FLAGS.train_name)
        # os.system('rm -r {}'.format(dump_path))

        global_step = tf.train.get_or_create_global_step()
        # global_step = tf.Variable(0,trainable=False)
        # Variables to restore: moving avg. or normal weights.

        if FLAGS.using_moving_average:
            variable_averages = tf.train.ExponentialMovingAverage(0.9999)
            variables_to_restore = variable_averages.variables_to_restore()
            variables_to_restore[global_step.op.name] = global_step

            filter_variable = {}
            for var in variables_to_restore:
                if var.find('deformable/Variable') == -1:
                    filter_variable[var] = variables_to_restore[var]
        else:
            # variables_to_restore = slim.get_variables_to_restore()
            variables_to_restore = tf.get_collection(
                tf.GraphKeys.GLOBAL_VARIABLES)
            filter_variable = []
            for var in variables_to_restore:
                if var.name.find('deformable/Variable') == -1:
                    filter_variable.append(var)

        saver = tf.train.Saver(filter_variable)

        with tf.name_scope('debug'):
            # tf.summary.image('input',tf.expand_dims(image,0))
            tf.summary.image('process', image_process)
            for i in range(config['n']):
                tf.summary.image(('%d_' % i + seg_map_list[i].op.name),
                                 seg_map_list[i])

        summary = tf.summary.merge_all()

        tfconfig = tf.ConfigProto(allow_soft_placement=True)
        tfconfig.gpu_options.allow_growth = True
        # config.gpu_options.per_process_gpu_memory_fraction = 0.4
        with tf.Session(config=tfconfig) as sess:
            trans_var = tf.get_collection('transform')
            init_transform = tf.variables_initializer(trans_var)
            sess.run(init_transform)

            # ckpt='/workspace/lupu/PSENet/Logs/train/run_bat-b/model.ckpt-21579'
            # for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
            #     print(var)
            # if var.name.find('BatchNorm')!=-1:
            #   chkp.print_tensors_in_checkpoint_file(config['ckpt'], tensor_name=var.name.split(':')[0], all_tensors=False)

            saver.restore(sess, config['ckpt'])
            print('restore model from: ', config['ckpt'])

            # coord = tf.train.Coordinator()
            # threads = tf.train.start_queue_runners(coord=coord)
            sum_writer = tf.summary.FileWriter(dump_path, graph=sess.graph)

            files = tf.gfile.Glob(os.path.join(config['test_dir'], '*.jpg'))
            try:
                files_sorted = sorted(
                    files,
                    key=lambda path: int(path.split('/')[-1].split('.')[0]))
            except:
                files_sorted = sorted(
                    files,
                    key=lambda path: int(path.split('_')[-1].split('.')[0]))
            pbar = tqdm.tqdm(total=len(files_sorted))
            for iter, file_name in enumerate(files_sorted):
                pbar.update(1)
                # image_data = util.img.imread(
                #     util.io.join_path(config['test_dir'], image_name), rgb=True)
                image_name = os.path.basename(file_name)
                image_name = image_name.split('.')[0]

                if is_log:
                    # NOTE: useful func, analysis tf graph run time
                    run_options = tf.RunOptions(
                        trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()
                    # the input size must be  times 32(stride?)
                    image_arr, segment_maps, summ = sess.run(
                        [image, seg_map_list, summary],
                        feed_dict={path_ph: file_name},
                        options=run_options,
                        run_metadata=run_metadata)

                    # segment_maps=(segment_maps>0.5).astype(np.float32)

                    sum_writer.add_summary(summ, global_step=iter)
                    # add mermory and time info in graph
                    sum_writer.add_run_metadata(run_metadata, 'step%d' % iter)
                else:
                    # segment_maps is list
                    segment_maps = sess.run(seg_map_list,
                                            feed_dict={path_ph: file_name})

                para_list = [(config['threshold_kernel'], config['threshold'],
                              config['aver_score'])
                             ] if para_list == None else para_list

                pbar_para = tqdm.tqdm(total=len(para_list))
                for index, para in enumerate(para_list):
                    pbar_para.update(1)
                    config['threshold_kernel'], config['threshold'], config[
                        'aver_score'] = para[0], para[1], para[2]
                    config['id'] = 0 if len(para_list) == 1 else index + 1
                    infer_path = util.io.join_path(
                        dump_path, chp_name + '_' + str(config['id']))

                    txt_path = util.io.join_path(infer_path, 'txt_result')
                    zip_path = util.io.join_path(infer_path, 'detect.zip')
                    imgs_path = util.io.join_path(infer_path, 'image_log')

                    result_map = process_map(segment_maps,
                                             config['threshold_kernel'],
                                             config['threshold'])

                    bboxes, scores = map_to_bboxes(
                        segment_maps,
                        result_map,
                        image_size,
                        aver_score=config['aver_score'])

                    if is_log:
                        log_to_file(imgs_path, file_name, image_arr, bboxes,
                                    segment_maps)

                    write_to_file(bboxes, image_name, txt_path)
                pbar_para.close()
                # plt.show()
            pbar.close()

    # ====================================
    # Logging .....

    for index, para in enumerate(para_list):
        config['threshold_kernel'], config['threshold'], config[
            'aver_score'] = para[0], para[1], para[2]
        config['id'] = config['id'] if len(para_list) == 1 else index + 1
        infer_path = util.io.join_path(dump_path,
                                       chp_name + '_' + str(config['id']))

        txt_path = util.io.join_path(infer_path, 'txt_result')
        zip_path = util.io.join_path(infer_path, 'detect.zip')

        flags_log = tf.app.flags.FLAGS.flag_values_dict()
        with open(os.path.join(infer_path, 'flags.json'), 'w') as f:
            json.dump(flags_log, f, indent=2)
        with open(os.path.join(infer_path, 'config.json'), 'w') as f:
            json.dump(config, f, indent=2)

        cmd = 'cd %s;zip -j %s %s/*' % (infer_path, os.path.basename(zip_path),
                                        'txt_result')
        zip_path = util.io.get_absolute_path(zip_path)
        infer_path = util.io.get_absolute_path(infer_path)
        # print(cmd)
        util.cmd.cmd(cmd)
        # print("zip file created: ", zip_path)

        os.chdir('./metric')
        para = {'g': 'gt.zip', 's': zip_path, 'o': infer_path}
        import script
        func_name = 'script.eval(para)'
        try:
            res = eval(func_name)
            os.chdir('../')
        except:
            print('eval error!')
            os.chdir('../')
        with open(os.path.join(infer_path, 'result.json'), 'w') as f:
            json.dump(res, f, indent=2)
Beispiel #20
0
def test():
    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)

    global_step = slim.get_or_create_global_step()
    with tf.name_scope('evaluation_%dx%d' % (FLAGS.eval_image_height, FLAGS.eval_image_width)):
        with tf.variable_scope(tf.get_variable_scope(), reuse=False):
            image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
            image_shape = tf.placeholder(dtype=tf.int32, shape=[3, ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(image, None, None, None, None,
                                                                                 out_shape=config.image_shape,
                                                                                 data_format=config.data_format,
                                                                                 is_training=False)
            b_image = tf.expand_dims(processed_image, axis=0)

            # build model and loss
            net = pixel_link_symbol.PixelLinkNet(b_image, is_training=False)
            masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
                net.pixel_pos_scores, net.link_pos_scores)

    sess_config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction;

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore(
            tf.trainable_variables())
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    saver = tf.train.Saver(var_list=variables_to_restore)
    with tf.Session() as sess:
        saver.restore(sess, util.tf.get_latest_ckpt(FLAGS.checkpoint_path))

        model_dir = '/Users/ci.chen/src/pixel_link/conv2_2/'

        # Finally we serialize and dump the output graph to the filesystem

        files = util.io.ls(FLAGS.dataset_dir)
        rows = [["image", "id", "xMin", "xMax", "yMin", "yMax"]]
        for image_name in files:
            file_path = util.io.join_path(FLAGS.dataset_dir, image_name)
            image_data = util.img.imread(file_path)
            link_scores, pixel_scores, mask_vals = sess.run(
                [net.link_pos_scores, net.pixel_pos_scores, masks],
                feed_dict={image: image_data})
            h, w, _ = image_data.shape

            def resize(img):
                return util.img.resize(img, size=(w, h),
                                       interpolation=cv2.INTER_NEAREST)

            def get_bboxes(mask):
                return pixel_link.mask_to_bboxes(mask, image_data.shape)

            def draw_bboxes(img, bboxes, color):
                for bbox in bboxes:
                    points = np.reshape(bbox, [4, 2])
                    cnts = util.img.points_to_contours(points)
                    util.img.draw_contours(img, contours=cnts,
                                           idx=-1, color=color, border_width=1)

            def get_box_info(img, bboxes, name):
                boxes = []
                for id, bbox in enumerate(bboxes):
                    points = np.reshape(bbox, [4, 2])
                    x = [points[0][0], points[1][0], points[2][0], points[3][0]]
                    y = [points[0][1], points[1][1], points[2][1], points[3][1]]
                    boxes.append([name, id + 1, min(x),
                                  max(x), min(y), max(y)])
                return boxes

            image_idx = 0
            pixel_score = pixel_scores[image_idx, ...]
            mask = mask_vals[image_idx, ...]

            bboxes_det = get_bboxes(mask)

            mask = resize(mask)
            pixel_score = resize(pixel_score)
            bbox = get_box_info(image_data, bboxes_det, image_name)
            rows += bbox
            draw_bboxes(image_data, bboxes_det, util.img.COLOR_RGB_RED)
            #             print util.sit(pixel_score)
            #             print util.sit(mask)
            print(util.sit(image_data))

        def writeCSV(boxes):
            with open('/Users/ci.chen/temp/no-use/images/result.csv', 'w') as File:
                writer = csv.writer(File)
                writer.writerows(boxes)

        writeCSV(rows)
Beispiel #21
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    batch_size = FLAGS.batch_size
    with tf.Graph().as_default():
        # Select the dataset.
        dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
                                              FLAGS.dataset_split_name,
                                              FLAGS.dataset_dir)

        util.proc.set_proc_name(FLAGS.model_name + '_' + FLAGS.dataset_name)

        # =================================================================== #
        # Create a dataset provider and batches.
        # =================================================================== #
        with tf.device('/cpu:0'):
            with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
                provider = slim.dataset_data_provider.DatasetDataProvider(
                    dataset,
                    num_readers=FLAGS.num_readers,
                    common_queue_capacity=20 * batch_size,
                    common_queue_min=10 * batch_size,
                    shuffle=True)
            # Get for SSD network: image, labels, bboxes.
            [image, shape, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3,
             y4] = provider.get([
                 'image', 'shape', 'object/ignored', 'object/bbox',
                 'object/oriented_bbox/x1', 'object/oriented_bbox/x2',
                 'object/oriented_bbox/x3', 'object/oriented_bbox/x4',
                 'object/oriented_bbox/y1', 'object/oriented_bbox/y2',
                 'object/oriented_bbox/y3', 'object/oriented_bbox/y4'
             ])
            gxs = tf.transpose(tf.stack([x1, x2, x3, x4]))  # shape = (N, 4)
            gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
            image = tf.identity(image, 'input_image')
            # Pre-processing image, labels and bboxes.
            image_shape = (FLAGS.train_image_size, FLAGS.train_image_size)
            image, gignored, gbboxes, gxs, gys = \
                ssd_vgg_preprocessing.preprocess_image(image, gignored, gbboxes, gxs, gys,
                                                       out_shape=image_shape,
                                                       is_training=True)
            gxs = gxs * tf.cast(image_shape[1], gxs.dtype)
            gys = gys * tf.cast(image_shape[0], gys.dtype)
            gorbboxes = tfe_seglink.tf_min_area_rect(gxs, gys)
            image = tf.identity(image, 'processed_image')

            with tf.Session() as sess:
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)
                i = 0
                while i < 2:
                    i += 1
                    image_data, label_data, bbox_data, xs_data, ys_data, orbboxes = \
                        sess.run([image, gignored, gbboxes, gxs, gys, gorbboxes])
                    image_data = image_data + [123., 117., 104.]
                    image_data = np.asarray(image_data, np.uint8)
                    h, w = image_data.shape[0:-1]
                    bbox_data = bbox_data * [h, w, h, w]
                    I_bbox = image_data.copy()
                    I_xys = image_data.copy()
                    I_orbbox = image_data.copy()

                    for idx in range(bbox_data.shape[0]):

                        def draw_bbox():
                            y1, x1, y2, x2 = bbox_data[idx, :]
                            util.img.rectangle(I_bbox, (x1, y1), (x2, y2),
                                               color=util.img.COLOR_WHITE)

                        def draw_xys():
                            points = zip(xs_data[idx, :], ys_data[idx, :])
                            cnts = util.img.points_to_contours(points)
                            util.img.draw_contours(I_xys,
                                                   cnts,
                                                   -1,
                                                   color=util.img.COLOR_GREEN)

                        def draw_orbbox():
                            orbox = orbboxes[idx, :]
                            import cv2
                            rect = ((orbox[0], orbox[1]), (orbox[2], orbox[3]),
                                    orbox[4])
                            box = cv2.cv.BoxPoints(rect)
                            box = np.int0(box)
                            cv2.drawContours(I_orbbox, [box], 0,
                                             util.img.COLOR_RGB_RED, 1)

                        draw_bbox()
                        draw_xys()
                        draw_orbbox()

                    print(util.sit(I_bbox))
                    print(util.sit(I_xys))
                    print(util.sit(I_orbbox))
                    print(
                        'check the images and make sure that bboxes in difference colors are the same.'
                    )
                coord.request_stop()
                coord.join(threads)
def create_model():
    output_graph = 'frozen_model.pb'
    config_initialization()
    global_step = slim.get_or_create_global_step()
    with tf.name_scope('evaluation_%dx%d' %
                       (FLAGS.eval_image_height, FLAGS.eval_image_width)):
        with tf.variable_scope(tf.get_variable_scope(), reuse=False):
            image = tf.placeholder(dtype=tf.int32,
                                   shape=[None, None, 3],
                                   name='net/input_images')
            image_shape = tf.placeholder(dtype=tf.int32, shape=[
                3,
            ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
                image,
                None,
                None,
                None,
                None,
                out_shape=[360, 640],
                data_format=config.data_format,
                is_training=False)
            b_image = tf.expand_dims(processed_image, axis=0)

            # build model and loss
            net = pixel_link_symbol.PixelLinkNet(b_image, is_training=False)
            # b_image, b_pixel_cls_label, b_pixel_cls_weight, \
            # b_pixel_link_label, b_pixel_link_weight = batch_queue.dequeue()
            # net.build_loss(
            #     pixel_cls_labels,
            #     pixel_cls_weights,
            #     pixel_link_labels,
            #     pixel_link_weights,
            #     do_summary)

    variable_averages = tf.train.ExponentialMovingAverage(
        FLAGS.moving_average_decay)
    variables_to_restore = variable_averages.variables_to_restore(
        tf.trainable_variables())
    variables_to_restore[global_step.op.name] = global_step
    # open sess and then save the model.

    saver = tf.train.Saver()
    total_parameters = 0

    for variable in tf.trainable_variables():
        # shape is an array of tf.Dimension
        shape = variable.get_shape()
        variable_parameters = 1
        print("name:", variable.name)
        print("shape:", shape)

        for dim in shape:
            variable_parameters *= dim.value
        print('variable:', variable_parameters)
        total_parameters += variable_parameters
    print('total:', total_parameters)

    # builder = tf.saved_model.builder.SavedModelBuilder('test/')
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        # for op in sess.graph.get_operations():
        #     print(op.name)

        print(tf.contrib.framework.get_variables_to_restore())
        # builder.add_meta_graph_and_variables(sess,
        #                                      [tag_constants.TRAINING],
        #                                      signature_def_map=None,
        #                                      assets_collection=None)
        # tf.train.write_graph(sess.graph_def, './save/', 'mobile_net_0.01.pbtxt')
        saver.save(sess, 'save/mobile_net_0.01.ckpt')
        tf.train.write_graph(sess.graph_def,
                             '.',
                             'mobile_net_0.01' + '.pb',
                             as_text=False)
        graph_def = sess.graph_def

        from tensorflow.python.platform import gfile
        with gfile.GFile('./save/mobile_net_0.01.pbtxt', 'wb') as f:
            f.write(graph_def.SerializeToString())
def test():
    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)

    global_step = slim.get_or_create_global_step()
    with tf.name_scope('evaluation_%dx%d' %
                       (FLAGS.eval_image_height, FLAGS.eval_image_width)):
        with tf.variable_scope(tf.get_variable_scope(), reuse=False):
            image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
            image_shape = tf.placeholder(dtype=tf.int32, shape=[
                3,
            ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
                image,
                None,
                None,
                None,
                None,
                out_shape=config.image_shape,
                data_format=config.data_format,
                is_training=False)
            b_image = tf.expand_dims(processed_image, axis=0)

            # build model and loss
            net = pixel_link_symbol.PixelLinkNet(b_image, is_training=False)
            masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
                net.pixel_pos_scores, net.link_pos_scores)

    sess_config = tf.ConfigProto(log_device_placement=False,
                                 allow_soft_placement=True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore(
            tf.trainable_variables())
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    saver = tf.train.Saver(var_list=variables_to_restore)
    with tf.Session() as sess:
        saver.restore(sess, util.tf.get_latest_ckpt(FLAGS.checkpoint_path))

        files = util.io.ls(FLAGS.dataset_dir)

        for image_name in files:
            file_path = util.io.join_path(FLAGS.dataset_dir, image_name)
            image_data = util.img.imread(file_path)
            link_scores, pixel_scores, mask_vals = sess.run(
                [net.link_pos_scores, net.pixel_pos_scores, masks],
                feed_dict={image: image_data})
            h, w, _ = image_data.shape

            def resize(img):
                return util.img.resize(img,
                                       size=(w, h),
                                       interpolation=cv2.INTER_NEAREST)

            def get_bboxes(mask):
                return pixel_link.mask_to_bboxes(mask, image_data.shape)

            def draw_bboxes(img, bboxes, color):
                for bbox in bboxes:
                    points = np.reshape(bbox, [4, 2])
                    cnts = util.img.points_to_contours(points)
                    util.img.draw_contours(img,
                                           contours=cnts,
                                           idx=-1,
                                           color=color,
                                           border_width=1)

            image_idx = 0
            pixel_score = pixel_scores[image_idx, ...]
            mask = mask_vals[image_idx, ...]

            bboxes_det = get_bboxes(mask)

            mask = resize(mask)
            pixel_score = resize(pixel_score)

            import os
            ID = file_path.split('/')[-1].split('.')[0]
            '''
            txt_file = os.path.join('/data/VOC/train/tax_2/Txts3', '%s.txt' % ID)
            with open(txt_file, 'w') as f:
                count = 0
                for box in bboxes_det:
                    count = 1
                    x1, y1, x2, y2, x3, y3, x4, y4 = box
                    l_1 = int(math.sqrt((x1-x2)**2  (y1-y2)**2))
                    l_2 = int(math.sqrt((x2-x3)**2  (y2-y3)**2))

                    pts1 = np.float32([[box[0], box[1]], [box[2], box[3]], [box[6], box[7]], [box[4], box[5]]])

                    if l_1 < l_2:
                        width = l_2
                        height = l_1
                        pts2 = np.float32([[0, 0], [height, 0], [0, width], [height, width]])
                        M = cv2.getPerspectiveTransform(pts1, pts2)
                        ROI = cv2.warpPerspective(image_data, M, (height, width))
                        ROI = np.rot90(ROI)
                    else:
                        width = l_1
                        height = l_2
                        pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])
                        M = cv2.getPerspectiveTransform(pts1, pts2)
                        ROI = cv2.warpPerspective(image_data, M, (width, height))

                    nh, nw, nc = ROI.shape
                    # if nw /float(nh) > 5.:
                    #     cv2.imwrite('/data_sdd/crop/process_tax/crop_0104/vin_train/%s_%d.jpg' % (ID, count), ROI)
                    f.write('%d,%d,%d,%d,%d,%d,%d,%d,vin\n' % (x1, y1, x2, y2, x3, y3, x4, y4))
             '''
            draw_bboxes(image_data, bboxes_det, util.img.COLOR_RGB_RED)
 def preprocessing_fn(image, labels, bboxes, xs, ys, 
                      out_shape, data_format='NHWC', **kwargs):
     return ssd_vgg_preprocessing.preprocess_image(
         image, labels, bboxes, out_shape, xs, ys, data_format=data_format,
         is_training=is_training, **kwargs)
Beispiel #25
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    batch_size = FLAGS.batch_size;
    with tf.Graph().as_default():
        # Select the dataset.
        dataset = dataset_factory.get_dataset(
            FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)

        util.proc.set_proc_name(FLAGS.model_name + '_' + FLAGS.dataset_name)


        # =================================================================== #
        # Create a dataset provider and batches.
        # =================================================================== #
        with tf.device('/cpu:0'):
            with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
                provider = slim.dataset_data_provider.DatasetDataProvider(
                    dataset,
                    num_readers=FLAGS.num_readers,
                    common_queue_capacity=20 * batch_size,
                    common_queue_min=10 * batch_size,
                    shuffle=True)
            # Get for SSD network: image, labels, bboxes.
            [image, shape, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get(['image', 'shape',
                                                             'object/ignored',
                                                             'object/bbox', 
                                                             'object/oriented_bbox/x1',
                                                             'object/oriented_bbox/x2',
                                                             'object/oriented_bbox/x3',
                                                             'object/oriented_bbox/x4',
                                                             'object/oriented_bbox/y1',
                                                             'object/oriented_bbox/y2',
                                                             'object/oriented_bbox/y3',
                                                             'object/oriented_bbox/y4'
                                                             ])
            gxs = tf.transpose(tf.stack([x1, x2, x3, x4])) #shape = (N, 4)
            gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
            image = tf.identity(image, 'input_image')
            # Pre-processing image, labels and bboxes.
            image_shape = (FLAGS.train_image_size, FLAGS.train_image_size)
            image, gignored, gbboxes, gxs, gys = \
                            ssd_vgg_preprocessing.preprocess_image(image, gignored, gbboxes, gxs, gys, 
                                                               out_shape=image_shape,
                                                               is_training = True)
            gxs = gxs * tf.cast(image_shape[1], gxs.dtype)
            gys = gys * tf.cast(image_shape[0], gys.dtype)
            gorbboxes = tfe_seglink.tf_min_area_rect(gxs, gys)
            image = tf.identity(image, 'processed_image')
            
            with tf.Session() as sess:
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)
                i = 0
                while i < 2:
                    i += 1
                    image_data, label_data, bbox_data, xs_data, ys_data, orbboxes = \
                                 sess.run([image, gignored, gbboxes, gxs, gys, gorbboxes])
                    image_data = image_data + [123., 117., 104.]
                    image_data = np.asarray(image_data, np.uint8)
                    h, w = image_data.shape[0:-1]
                    bbox_data = bbox_data * [h, w, h, w]
                    I_bbox = image_data.copy()
                    I_xys = image_data.copy()
                    I_orbbox = image_data.copy()
                    
                    for idx in range(bbox_data.shape[0]):
                        
                        def draw_bbox():
                            y1, x1, y2, x2 = bbox_data[idx, :]
                            util.img.rectangle(I_bbox, (x1, y1), (x2, y2), color = util.img.COLOR_WHITE)
                        
                        def draw_xys():
                            points = zip(xs_data[idx, :], ys_data[idx, :])
                            cnts = util.img.points_to_contours(points);
                            util.img.draw_contours(I_xys, cnts, -1, color = util.img.COLOR_GREEN)

                        def draw_orbbox():
                            orbox = orbboxes[idx, :]
                            import cv2
                            rect = ((orbox[0], orbox[1]), (orbox[2], orbox[3]), orbox[4])
                            box = cv2.cv.BoxPoints(rect)
                            box = np.int0(box)
                            cv2.drawContours(I_orbbox, [box], 0, util.img.COLOR_RGB_RED, 1)
                        
                        draw_bbox()
                        draw_xys();
                        draw_orbbox();
                        
                    print util.sit(I_bbox)
                    print util.sit(I_xys)
                    print util.sit(I_orbbox)
                    print 'check the images and make sure that bboxes in difference colors are the same.'
                coord.request_stop()
                coord.join(threads)
def test():
    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)
    
    global_step = slim.get_or_create_global_step()
    with tf.name_scope('evaluation_%dx%d'%(FLAGS.eval_image_height, FLAGS.eval_image_width)):
        with tf.variable_scope(tf.get_variable_scope(), reuse = False):
            image = tf.placeholder(dtype=tf.int32, shape = [None, None, 3])
            image_shape = tf.placeholder(dtype = tf.int32, shape = [3, ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(image, None, None, None, None, 
                                                       out_shape = config.image_shape,
                                                       data_format = config.data_format, 
                                                       is_training = False)
            b_image = tf.expand_dims(processed_image, axis = 0)

            # build model and loss
            net = pixel_link_symbol.PixelLinkNet(b_image, is_training = False)
            masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
                net.pixel_pos_scores, net.link_pos_scores)
            
    sess_config = tf.ConfigProto(log_device_placement = False, allow_soft_placement = True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction;
    
    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore(
                tf.trainable_variables())
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()
        
    
    saver = tf.train.Saver(var_list = variables_to_restore)
    with tf.Session() as sess:
        saver.restore(sess, util.tf.get_latest_ckpt(FLAGS.checkpoint_path))
        
        files = util.io.ls(FLAGS.dataset_dir)
        
        for image_name in files:
            file_path = util.io.join_path(FLAGS.dataset_dir, image_name)
            image_data = util.img.imread(file_path)
            link_scores, pixel_scores, mask_vals = sess.run(
                    [net.link_pos_scores, net.pixel_pos_scores, masks],
                    feed_dict = {image: image_data})
            h, w, _ =image_data.shape
            def resize(img):
                return util.img.resize(img, size = (w, h), 
                                       interpolation = cv2.INTER_NEAREST)
            
            def get_bboxes(mask):
                return pixel_link.mask_to_bboxes(mask, image_data.shape)
            
            def draw_bboxes(img, bboxes, color):
                for bbox in bboxes:
                    points = np.reshape(bbox, [4, 2])
                    cnts = util.img.points_to_contours(points)
                    util.img.draw_contours(img, contours = cnts, 
                           idx = -1, color = color, border_width = 1)
            image_idx = 0
            pixel_score = pixel_scores[image_idx, ...]
            mask = mask_vals[image_idx, ...]

            bboxes_det = get_bboxes(mask)
            
            mask = resize(mask)
            pixel_score = resize(pixel_score)

            draw_bboxes(image_data, bboxes_det, util.img.COLOR_RGB_RED)
#             print util.sit(pixel_score)
#             print util.sit(mask)
            print util.sit(image_data)
Beispiel #27
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        tf_global_step = slim.get_or_create_global_step()

        # =================================================================== #
        # Dataset + SSD model + Pre-processing
        # =================================================================== #
        dataset = pascalvoc_2012.get_split(FLAGS.dataset_split_name, FLAGS.dataset_dir)

        # Get the SSD network and its anchors.
        ssd_class = ssd_vgg_300.SSDNet
        ssd_params = ssd_class.default_params._replace(num_classes=FLAGS.num_classes)
        ssd_net = ssd_class(ssd_params)

        # Evaluation shape and associated anchors: eval_image_size
        ssd_shape = ssd_net.params.img_shape
        ssd_anchors = ssd_net.anchors(ssd_shape)

        # =================================================================== #
        # Create a dataset provider and batches.
        # =================================================================== #
        with tf.device('/cpu:0'):
            with tf.name_scope('data_provider'):
                provider = slim.dataset_data_provider.DatasetDataProvider(
                    dataset,
                    common_queue_capacity=2 * FLAGS.batch_size,
                    common_queue_min=FLAGS.batch_size,
                    shuffle=False)
            # Get for SSD network: image, labels, bboxes.
            [image, shape, glabels, gbboxes] = provider.get(['image', 'shape',
                                                             'object/label',
                                                             'object/bbox'])
            if FLAGS.remove_difficult:
                [gdifficults] = provider.get(['object/difficult'])
            else:
                gdifficults = tf.zeros(tf.shape(glabels), dtype=tf.int64)

            # Pre-processing image, labels and bboxes.
            image, glabels, gbboxes, gbbox_img = \
                ssd_vgg_preprocessing.preprocess_image(image, glabels, gbboxes,
                                       out_shape=ssd_shape,
                                       data_format=DATA_FORMAT,
                                       resize=FLAGS.eval_resize,
                                       difficults=None)

            # Encode groundtruth labels and bboxes.
            gclasses, glocalisations, gscores = \
                ssd_net.bboxes_encode(glabels, gbboxes, ssd_anchors)
            batch_shape = [1] * 5 + [len(ssd_anchors)] * 3

            # Evaluation batch.
            r = tf.train.batch(
                tf_utils.reshape_list([image, glabels, gbboxes, gdifficults, gbbox_img,
                                       gclasses, glocalisations, gscores]),
                batch_size=FLAGS.batch_size,
                num_threads=FLAGS.num_preprocessing_threads,
                capacity=5 * FLAGS.batch_size,
                dynamic_pad=True)
            (b_image, b_glabels, b_gbboxes, b_gdifficults, b_gbbox_img, b_gclasses,
             b_glocalisations, b_gscores) = tf_utils.reshape_list(r, batch_shape)

        # =================================================================== #
        # SSD Network + Ouputs decoding.
        # =================================================================== #
        dict_metrics = {}
        arg_scope = ssd_net.arg_scope(data_format=DATA_FORMAT)
        with slim.arg_scope(arg_scope):
            predictions, localisations, logits, end_points = \
                ssd_net.net(b_image, is_training=False)
        # Add losses functions.
        ssd_net.losses(logits, localisations,
                       b_gclasses, b_glocalisations, b_gscores)

        # Performing post-processing on CPU: loop-intensive, usually more efficient.
        with tf.device('/device:CPU:0'):
            # Detected objects from SSD output.
            localisations = ssd_net.bboxes_decode(localisations, ssd_anchors)
            rscores, rbboxes = \
                ssd_net.detected_bboxes(predictions, localisations,
                                        select_threshold=FLAGS.select_threshold,
                                        nms_threshold=FLAGS.nms_threshold,
                                        clipping_bbox=None,
                                        top_k=FLAGS.select_top_k,
                                        keep_top_k=FLAGS.keep_top_k)
            # Compute TP and FP statistics.
            num_gbboxes, tp, fp, rscores = \
                tfe.bboxes_matching_batch(rscores.keys(), rscores, rbboxes,
                                          b_glabels, b_gbboxes, b_gdifficults,
                                          matching_threshold=FLAGS.matching_threshold)

        # Variables to restore
        variables_to_restore = slim.get_variables_to_restore()

        # =================================================================== #
        # Evaluation metrics.
        # =================================================================== #
        with tf.device('/device:CPU:0'):
            dict_metrics = {}
            # First add all losses.
            for loss in tf.get_collection(tf.GraphKeys.LOSSES):
                dict_metrics[loss.op.name] = slim.metrics.streaming_mean(loss)
            # Extra losses as well.
            for loss in tf.get_collection('EXTRA_LOSSES'):
                dict_metrics[loss.op.name] = slim.metrics.streaming_mean(loss)

            # Add metrics to summaries and Print on screen.
            for name, metric in dict_metrics.items():
                # summary_name = 'eval/%s' % name
                summary_name = name
                op = tf.summary.scalar(summary_name, metric[0], collections=[])
                # op = tf.Print(op, [metric[0]], summary_name)
                tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

            # FP and TP metrics.
            tp_fp_metric = tfe.streaming_tp_fp_arrays(num_gbboxes, tp, fp, rscores)
            for c in tp_fp_metric[0].keys():
                dict_metrics['tp_fp_%s' % c] = (tp_fp_metric[0][c],
                                                tp_fp_metric[1][c])

            # Add to summaries precision/recall values.
            aps_voc07 = {}
            aps_voc12 = {}
            for c in tp_fp_metric[0].keys():
                # Precison and recall values.
                prec, rec = tfe.precision_recall(*tp_fp_metric[0][c])

                # Average precision VOC07.
                v = tfe.average_precision_voc07(prec, rec)
                summary_name = 'AP_VOC07/%s' % c
                op = tf.summary.scalar(summary_name, v, collections=[])
                # op = tf.Print(op, [v], summary_name)
                tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
                aps_voc07[c] = v

                # Average precision VOC12.
                v = tfe.average_precision_voc12(prec, rec)
                summary_name = 'AP_VOC12/%s' % c
                op = tf.summary.scalar(summary_name, v, collections=[])
                # op = tf.Print(op, [v], summary_name)
                tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
                aps_voc12[c] = v

            # Mean average precision VOC07.
            summary_name = 'AP_VOC07/mAP'
            mAP = tf.add_n(list(aps_voc07.values())) / len(aps_voc07)
            op = tf.summary.scalar(summary_name, mAP, collections=[])
            op = tf.Print(op, [mAP], summary_name)
            tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

            # Mean average precision VOC12.
            summary_name = 'AP_VOC12/mAP'
            mAP = tf.add_n(list(aps_voc12.values())) / len(aps_voc12)
            op = tf.summary.scalar(summary_name, mAP, collections=[])
            op = tf.Print(op, [mAP], summary_name)
            tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

        # for i, v in enumerate(l_precisions):
        #     summary_name = 'eval/precision_at_recall_%.2f' % LIST_RECALLS[i]
        #     op = tf.summary.scalar(summary_name, v, collections=[])
        #     op = tf.Print(op, [v], summary_name)
        #     tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

        # Split into values and updates ops.
        names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(dict_metrics)

        # =================================================================== #
        # Evaluation loop.
        # =================================================================== #


        # Number of batches...
        if FLAGS.max_num_batches:
            num_batches = FLAGS.max_num_batches
        else:
            num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))

        if not FLAGS.wait_for_checkpoints:
            if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
                checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
            else:
                checkpoint_path = FLAGS.checkpoint_path
            tf.logging.info('Evaluating %s' % checkpoint_path)

            # Standard evaluation loop.
            start = time.time()
            slim.evaluation.evaluate_once(
                master=FLAGS.master,
                checkpoint_path=checkpoint_path,
                logdir=FLAGS.eval_dir,
                num_evals=num_batches,
                eval_op=flatten(list(names_to_updates.values())),
                variables_to_restore=variables_to_restore)
            # Log time spent.
            elapsed = time.time()
            elapsed = elapsed - start
            print('Time spent : %.3f seconds.' % elapsed)
            print('Time spent per BATCH: %.3f seconds.' % (elapsed / num_batches))

        else:
            checkpoint_path = FLAGS.checkpoint_path
            tf.logging.info('Evaluating %s' % checkpoint_path)

            # Waiting loop.
            slim.evaluation.evaluation_loop(
                master=FLAGS.master,
                checkpoint_dir=checkpoint_path,
                logdir=FLAGS.eval_dir,
                num_evals=num_batches,
                eval_op=flatten(list(names_to_updates.values())),
                variables_to_restore=variables_to_restore,
                eval_interval_secs=60,
                max_number_of_evaluations=np.inf,
                timeout=None)
Beispiel #28
0
def test():
    outfile = os.path.join(FLAGS.output_dir, 'DECT_result.txt')
    if os.path.exists(outfile):
        os.remove(outfile)
    wfile = open(outfile, 'w')
    # print ">> scale_resize", FLAGS.scale_resize, type(FLAGS.scale_resize)

    avg_conf_thresh = float(FLAGS.pixel_conf_threshold +
                            FLAGS.link_conf_threshold) / 2

    global_step = slim.get_or_create_global_step()
    # with tf.name_scope('evaluation_%dx%d'%(FLAGS.eval_image_height, FLAGS.eval_image_width)):
    with tf.name_scope('evaluation_%dx%d' % (0000, 0000)):
        with tf.variable_scope(tf.get_variable_scope(), reuse=False):
            image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
            image_shape = tf.placeholder(dtype=tf.int32, shape=[
                3,
            ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
                image,
                None,
                None,
                None,
                None,
                out_shape=config.image_shape,
                data_format=config.data_format,
                do_resize=False,
                is_training=False)
            b_image = tf.expand_dims(processed_image, axis=0)

            # build model and loss
            net = pixel_link_symbol.PixelLinkNet(b_image, is_training=False)
            masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
                net.pixel_pos_scores, net.link_pos_scores)

    sess_config = tf.ConfigProto(log_device_placement=False,
                                 allow_soft_placement=True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore(
            tf.trainable_variables())
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    timer = [[], [], [], [],
             []]  ## load_image, pad_image, inference, cal_box, total
    saver = tf.train.Saver(var_list=variables_to_restore)
    with tf.Session() as sess:
        saver.restore(sess, FLAGS.checkpoint_path)

        files = os.listdir(FLAGS.dataset_dir)

        for image_name in files:
            sp1 = time.time()
            file_path = os.path.join(FLAGS.dataset_dir, image_name)
            origin_image_data = cv2.imread(file_path)
            sp2 = time.time()
            '''padding to avoid distort'''
            # image_data = cv_pad(image_data, config.image_shape)
            if FLAGS.scale_resize != 1:
                image_data = scale_resize(origin_image_data,
                                          FLAGS.scale_resize)
            else:
                image_data = origin_image_data
            sp3 = time.time()

            link_scores, pixel_scores, mask_vals = sess.run(
                [net.link_pos_scores, net.pixel_pos_scores, masks],
                feed_dict={image: image_data})
            h, w, _ = image_data.shape
            sp4 = time.time()

            def resize(img):
                return cv2.resize(img,
                                  size=(w, h),
                                  interpolation=cv2.INTER_NEAREST)

            def get_bboxes(mask):
                return pixel_link.mask_to_bboxes(mask, image_data.shape)

            def points_to_contour(points):
                contours = [[list(p)] for p in points]
                return np.asarray(contours, dtype=np.int32)

            def points_to_contours(points):
                return np.asarray([points_to_contour(points)])

            def draw_bboxes(img, bboxes, color):
                for bbox in bboxes:
                    points = np.reshape(bbox, [4, 2])
                    cnts = points_to_contours(points)
                    cv2.drawContours(img,
                                     contours=cnts,
                                     idx=-1,
                                     color=color,
                                     border_width=1)

            image_idx = 0
            pixel_score = pixel_scores[image_idx, ...]
            mask = mask_vals[image_idx, ...]

            bboxes_det = get_bboxes(mask)
            _bboxes_det = revert_dectbox(bboxes_det, FLAGS.scale_resize)
            sp5 = time.time()
            # print ">> bboxes_det:",type(bboxes_det), bboxes_det
            # print ">> _bboxes_det:",type(_bboxes_det), _bboxes_det

            mask = resize(mask)
            pixel_score = resize(pixel_score)

            draw_bboxes(origin_image_data, _bboxes_det, (0, 0, 255))
            cv2.imwrite(
                os.path.join(FLAGS.output_dir,
                             'out_' + os.path.basename(file_path)),
                origin_image_data)

            nameID = image_name.split('.')[0]
            for bbox in _bboxes_det:
                # print "nameID, bbox", nameID, bbox
                _bbox = []
                for num in bbox:
                    _bbox.append(num)
                wfile.write("{}\t{}\t{}\n".format(nameID, avg_conf_thresh,
                                                  _bbox))

            ## timer accumulate
            timer[0].append(sp2 - sp1)
            timer[1].append(sp3 - sp2)
            timer[2].append(sp4 - sp3)
            timer[3].append(sp5 - sp4)
            timer[4].append(sp5 - sp1)
            print "{}:{}\t{}:{}\t{}:{}\t{}:{}\t{}:{}\n".format('Load', round(sp2-sp1,3), 'Pad', round(sp3-sp2,3), \
                        'Infer', round(sp4-sp3,3), 'Post', round(sp5-sp4,3), 'Total', round(sp5-sp1,3))
        print "\nAvg Timer Stat:"
        print "{}:{}\t{}:{}\t{}:{}\t{}:{}\t{}:{}\n".format('Load', round(np.mean(timer[0]),3), 'Pad', round(np.mean(timer[1]),3), \
            'Infer', round(np.mean(timer[2]),3), 'Post', round(np.mean(timer[3]),3), 'Total', round(np.mean(timer[4]),3))
        wfile.close()
Beispiel #29
0
def test(checkpoint_path):
    with tf.name_scope('test'):
        image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
        image_shape = tf.placeholder(dtype=tf.int32, shape=[
            3,
        ])
        processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
            image,
            None,
            None,
            None,
            None,
            out_shape=config.image_shape,
            data_format=config.data_format,
            is_training=False)
        b_image = tf.expand_dims(processed_image, axis=0)
        net = pixel_link_symbol.PixelLinkNet(b_image, is_training=True)
        global_step = slim.get_or_create_global_step()

    sess_config = tf.ConfigProto(log_device_placement=False,
                                 allow_soft_placement=True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction

    checkpoint_dir = util.io.get_dir(checkpoint_path)
    logdir = util.io.join_path(
        checkpoint_dir, 'test',
        FLAGS.dataset_name + '_' + FLAGS.dataset_split_name)

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore()
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    saver = tf.train.Saver(var_list=variables_to_restore)

    image_names = util.io.ls(FLAGS.dataset_dir)
    image_names.sort()

    checkpoint_name = util.io.get_filename(str(checkpoint_path))
    dump_path = util.io.join_path(logdir, checkpoint_name)
    txt_path = util.io.join_path(dump_path, 'txt')
    zip_path = util.io.join_path(dump_path, checkpoint_name + '_det.zip')

    with tf.Session(config=sess_config) as sess:
        saver.restore(sess, checkpoint_path)

        for iter, image_name in enumerate(image_names):
            image_data = util.img.imread(util.io.join_path(
                FLAGS.dataset_dir, image_name),
                                         rgb=True)

            scale = calculate_scale(image_data)
            image_data = cv2.resize(
                image_data, (FLAGS.eval_image_width, FLAGS.eval_image_height),
                interpolation=cv2.INTER_AREA)

            image_name = image_name.split('.')[0]

            score_nodes = [net.pixel_pos_scores, net.link_pos_scores]
            if not net.pixel_pos_scores_add is None:
                score_nodes.extend(
                    [net.pixel_pos_scores_add, net.link_pos_scores_add])

            #pixel_pos_scores, link_pos_scores = sess.run(,
            results = sess.run(score_nodes, feed_dict={image: image_data})

            print '%d/%d: %s' % (iter + 1, len(image_names), image_name)
            to_txt(txt_path, image_name, image_data, results, scale)

    # create zip file for icdar2015
    cmd = 'cd %s;zip -j %s %s/*' % (dump_path, zip_path, txt_path)
    print cmd
    util.cmd.cmd(cmd)
    print "zip file created: ", util.io.join_path(dump_path, zip_path)
Beispiel #30
0
def text_detection():
    cropped_dir = args.crop_dir
    if os.path.exists(cropped_dir):
        shutil.rmtree(cropped_dir)
    os.makedirs(cropped_dir)

    checkpoint_dir = util.io.get_dir(args.checkpoint_path)

    # global_step = slim.get_or_create_global_step()
    with tf.name_scope('evaluation_%dx%d' %
                       (args.eval_image_height, args.eval_image_width)):
        with tf.variable_scope(tf.get_variable_scope(), reuse=False):
            image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
            image_shape = tf.placeholder(dtype=tf.int32, shape=[
                3,
            ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
                image,
                None,
                None,
                None,
                None,
                out_shape=config.image_shape,
                data_format=config.data_format,
                is_training=False)
            b_image = tf.expand_dims(processed_image, axis=0)

            # build model and loss
            net = pixel_link_symbol.PixelLinkNet(b_image, is_training=False)
            masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
                net.pixel_pos_scores, net.link_pos_scores)

    sess_config = tf.ConfigProto(log_device_placement=False,
                                 allow_soft_placement=True)
    if args.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif args.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = args.gpu_memory_fraction

    # Variables to restore: moving avg. or normal weights.
    # if args.using_moving_average:
    variable_averages = tf.train.ExponentialMovingAverage(
        args.moving_average_decay)
    variables_to_restore = variable_averages.variables_to_restore(
        tf.trainable_variables())
    # variables_to_restore[global_step.op.name] = global_step
    # else:
    #     variables_to_restore = slim.get_variables_to_restore()

    saver = tf.train.Saver(var_list=variables_to_restore)
    with tf.Session() as sess:
        saver.restore(sess, util.tf.get_latest_ckpt(args.checkpoint_path))

        files = util.io.ls(args.dataset_dir)
        txt_folder = args.txt_dir
        if os.path.exists(txt_folder):
            shutil.rmtree(txt_folder)
        os.makedirs(txt_folder)
        for image_name in files:

            file_path = util.io.join_path(args.dataset_dir, image_name)
            image_format = [
                '.jpg', '.JPG', '.png', '.PNG', 'jpeg', 'JPEG', '.gif', '.GIF'
            ]
            if file_path[-4:] in image_format:
                ### subfolder
                subfolder_name = image_name.replace('.jpg', '')
                subfolder_path = os.path.join(cropped_dir, subfolder_name)
                os.mkdir(subfolder_path)

                image_data = util.img.imread(file_path)
                ## list boxes
                coord_boxes = []
                ## original width & height
                org_height = int(image_data.shape[0])
                org_width = int(image_data.shape[1])

                ### txt
                txt_name = image_name.replace('.jpg', '.txt')
                txt_path = os.path.join(txt_folder, txt_name)
                txt_file = open(txt_path, 'a')
                info_org_img = '{"image_name": ' + '"%s"' % image_name + ', ' + '"width":' + str(
                    org_width) + ', ' + '"height": ' + str(org_height) + '}\n'
                txt_file.write(info_org_img)

                link_scores, pixel_scores, mask_vals = sess.run(
                    [net.link_pos_scores, net.pixel_pos_scores, masks],
                    feed_dict={image: image_data})
                h, w, _ = image_data.shape

                def resize(img):
                    return util.img.resize(img,
                                           size=(1280, 768),
                                           interpolation=cv2.INTER_NEAREST)

                def get_bboxes(mask):
                    return pixel_link.mask_to_bboxes(mask, image_data.shape)

                def draw_bboxes(img, bboxes, color):
                    i = 0
                    for bbox in bboxes:
                        ### top_right -> top_left -> bottom_left -> bottom_right
                        values = [int(v) for v in bbox]
                        x_max = max(
                            [values[0], values[2], values[4], values[6]])
                        x_min = min(
                            [values[0], values[2], values[4], values[6]])
                        y_max = max(
                            [values[1], values[3], values[5], values[7]])
                        y_min = min(
                            [values[1], values[3], values[5], values[7]])
                        ### update coordiates
                        x_max = int(x_max * org_width / 1280)
                        x_min = int(x_min * org_width / 1280)
                        y_max = int(y_max * org_height / 768)
                        y_min = int(y_min * org_height / 768)

                        h = y_max - y_min
                        w = x_max - x_min

                        top_left = (x_min - 7, y_min)
                        bbox = [
                            x_max, y_min, x_min, y_min, x_min, y_max, x_max,
                            y_max
                        ]

                        points = np.reshape(bbox, [4, 2])
                        cnts = util.img.points_to_contours(points)
                        util.img.draw_contours(img,
                                               contours=cnts,
                                               idx=-1,
                                               color=color,
                                               border_width=1)

                        new_img = img[(y_min):y_min + h, (x_min):x_min + w]
                        tmp_1 = image_name.replace('.jpg', '')
                        img_crop_name = tmp_1 + "_" + str(i) + '.jpg'
                        img_crop_path = os.path.join(subfolder_path,
                                                     img_crop_name)
                        cv2.imwrite(img_crop_path, new_img)
                        cv2.putText(img,
                                    '%s' % (str(i)),
                                    top_left,
                                    cv2.FONT_HERSHEY_SIMPLEX,
                                    0.5, (0, 128, 255),
                                    1,
                                    lineType=cv2.LINE_AA)
                        i = i + 1

                        ### txt
                        txt_file = open(txt_path, 'a')
                        info_crop_img = '{"image_name":' + '"%s"' % img_crop_name + ', ' + '"id": ' + str(
                            i
                        ) + ', ' + '"x": ' + str(x_min) + ', ' + '"y": ' + str(
                            y_min) + ', ' + '"width": ' + str(
                                w) + ", " + '"height": ' + str(h) + '}\n'
                        # print (info_crop_img)
                        txt_file.write(info_crop_img)

                txt_file.close()

                def get_temp_path(name=''):
                    # _count = get_count();
                    img_name = "%s" % (image_name)
                    path = os.path.join(args.visual_dir, img_name)
                    path = path.replace('.jpg', '.png')
                    return path

                def sit(img=None, format='rgb', path=None, name=""):
                    if path is None:
                        path = get_temp_path(name)
                    if img is None:
                        plt.save_image(path)
                        return path

                    if format == 'bgr':
                        img = _img.bgr2rgb(img)
                    if type(img) == list:
                        plt.show_images(images=img,
                                        path=path,
                                        show=False,
                                        axis_off=True,
                                        save=True)
                    else:
                        plt.imwrite(path, img)

                    return path

                image_idx = 0
                pixel_score = pixel_scores[image_idx, ...]
                mask = mask_vals[image_idx, ...]
                ###
                bboxes_det = get_bboxes(mask)
                coord_boxes.append(bboxes_det)
                draw_bboxes(image_data, bboxes_det, util.img.COLOR_RGB_RED)
                print(sit(image_data))
            else:
                continue
Beispiel #31
0
def main(_):
    tf.logging.set_verbosity(tf.logging.DEBUG)
    with tf.Graph().as_default():
        # Create global_step.
        global_step = slim.create_global_step()

        # Select the dataset.
        dataset = pascalvoc_2012.get_split('train', FLAGS.dataset_dir)

        # Get the SSD network and its anchors.
        ssd_class = ssd_vgg_300.SSDNet
        ssd_params = ssd_class.default_params._replace(
            num_classes=FLAGS.num_classes)
        ssd_net = ssd_class(ssd_params)
        ssd_shape = ssd_net.params.img_shape
        # 计算所有先验框位置和大小[anchor=(x,y,h,w)....]
        ssd_anchors = ssd_net.anchors(ssd_shape)

        # =================================================================== #
        # Create a dataset provider and batches.
        # =================================================================== #
        with tf.name_scope('pascalvoc_2012_data_provider'):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=20 * FLAGS.batch_size,
                common_queue_min=10 * FLAGS.batch_size,
                shuffle=True)
        # Get for SSD network: image, labels, bboxes.
        [image, shape, glabels, gbboxes
         ] = provider.get(['image', 'shape', 'object/label', 'object/bbox'])
        # Pre-processing image, labels and bboxes.
        image, glabels, gbboxes = \
            ssd_vgg_preprocessing.preprocess_image(image, glabels, gbboxes,
                                                   out_shape=ssd_shape,
                                                   data_format=DATA_FORMAT,
                                                   is_training=True)
        # Encode groundtruth labels and bboxes.
        gclasses, glocalisations, gscores = \
            ssd_net.bboxes_encode(glabels, gbboxes, ssd_anchors)
        batch_shape = [1] + [len(ssd_anchors)] * 3

        # Training batches and queue.
        r = tf.train.batch(tf_utils.reshape_list(
            [image, gclasses, glocalisations, gscores]),
                           batch_size=FLAGS.batch_size,
                           num_threads=FLAGS.num_preprocessing_threads,
                           capacity=5 * FLAGS.batch_size)
        b_image, b_gclasses, b_glocalisations, b_gscores = \
            tf_utils.reshape_list(r, batch_shape)

        # Intermediate queueing
        batch_queue = slim.prefetch_queue.prefetch_queue(tf_utils.reshape_list(
            [b_image, b_gclasses, b_glocalisations, b_gscores]),
                                                         capacity=2)

        # Dequeue batch.
        b_image, b_gclasses, b_glocalisations, b_gscores = \
            tf_utils.reshape_list(batch_queue.dequeue(), batch_shape)

        # Construct SSD network.
        # 读取网络中的默认参数
        arg_scope = ssd_net.arg_scope(weight_decay=FLAGS.weight_decay,
                                      data_format=DATA_FORMAT)
        with slim.arg_scope(arg_scope):
            predictions, localisations, logits, end_points = \
                ssd_net.net(b_image, is_training=True)
        # Add loss function.
        ssd_net.losses(logits,
                       localisations,
                       b_gclasses,
                       b_glocalisations,
                       b_gscores,
                       match_threshold=FLAGS.match_threshold,
                       negative_ratio=FLAGS.negative_ratio,
                       alpha=FLAGS.loss_alpha,
                       label_smoothing=0.0)

        # Gather initial summaries.
        summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))

        # =================================================================== #
        # Add summaries.
        # =================================================================== #
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

        # Add summaries for end_points.
        for end_point in end_points:
            x = end_points[end_point]
            summaries.add(tf.summary.histogram('activations/' + end_point, x))
            summaries.add(
                tf.summary.scalar('sparsity/' + end_point,
                                  tf.nn.zero_fraction(x)))
        # Add summaries for losses and extra losses.
        for loss in tf.get_collection(tf.GraphKeys.LOSSES):
            summaries.add(tf.summary.scalar(loss.op.name, loss))
        for loss in tf.get_collection('EXTRA_LOSSES'):
            summaries.add(tf.summary.scalar(loss.op.name, loss))

        # Add summaries for variables.
        for variable in slim.get_model_variables():
            summaries.add(tf.summary.histogram(variable.op.name, variable))

        # =================================================================== #
        # Configure the optimization procedure.
        # =================================================================== #
        learning_rate = tf_utils.configure_learning_rate(
            FLAGS, dataset.num_samples, global_step)
        optimizer = tf.train.AdamOptimizer(learning_rate,
                                           beta1=0.9,
                                           beta2=0.999,
                                           epsilon=1.0)
        summaries.add(tf.summary.scalar('learning_rate', learning_rate))

        # Variables to train.
        variables_to_train = tf_utils.get_variables_to_train(FLAGS)

        # and returns a train_tensor and summary_op
        total_loss = tf.add_n(tf.get_collection(tf.GraphKeys.LOSSES))
        gradients = optimizer.compute_gradients(total_loss,
                                                var_list=variables_to_train)
        # Add total_loss to summary.
        summaries.add(tf.summary.scalar('total_loss', total_loss))

        # Create gradient updates.
        grad_updates = optimizer.apply_gradients(gradients,
                                                 global_step=global_step)
        update_ops.append(grad_updates)
        # 将所有的更新操作组合成一个operation
        update_op = tf.group(*update_ops)
        # 保证所有的更新操作执行后,才获取total_loss
        train_tensor = control_flow_ops.with_dependencies([update_op],
                                                          total_loss,
                                                          name='train_op')

        # Add the summaries from the first clone. These contain the summaries
        summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES))
        # Merge all summaries together.
        summary_op = tf.summary.merge(list(summaries), name='summary_op')

        saver = tf.train.Saver(max_to_keep=5,
                               keep_checkpoint_every_n_hours=1.0,
                               write_version=2,
                               pad_step_number=False)
        slim.learning.train(train_tensor,
                            logdir=FLAGS.train_dir,
                            init_fn=tf_utils.get_init_fn(FLAGS),
                            summary_op=summary_op,
                            number_of_steps=FLAGS.max_number_of_steps,
                            log_every_n_steps=FLAGS.log_every_n_steps,
                            save_summaries_secs=FLAGS.save_summaries_secs,
                            saver=saver,
                            save_interval_secs=FLAGS.save_interval_secs)
def test():
    if config.model_type == 'vgg16':
        from nets import pixel_link_symbol1 as pixel_link_symbol
    elif config.model_type == 'vgg16_dssd':
        from nets import pixel_link_symbol1 as pixel_link_symbol
    else:  # 'vgg16_dssd_ssd'
        from nets import pixel_link_symbol2 as pixel_link_symbol

    with tf.name_scope('test'):
        image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
        image_shape = tf.placeholder(dtype=tf.int32, shape=[
            3,
        ])
        processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
            image,
            None,
            None,
            None,
            None,
            out_shape=config.image_shape,
            data_format=config.data_format,
            is_training=False)
        b_image = tf.expand_dims(processed_image, axis=0)
        net = pixel_link_symbol.PixelLinkNet(b_image, is_training=True)
        global_step = slim.get_or_create_global_step()

    sess_config = tf.ConfigProto(log_device_placement=False,
                                 allow_soft_placement=True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction

    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)
    logdir = util.io.join_path(
        checkpoint_dir, 'test',
        FLAGS.dataset_name + '_' + FLAGS.dataset_split_name)

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore()
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    saver = tf.train.Saver(var_list=variables_to_restore)

    image_names = util.io.ls(FLAGS.dataset_dir)
    image_names.sort()
    # image_names = image_names[1100:]

    checkpoint = FLAGS.checkpoint_path
    checkpoint_name = util.io.get_filename(str(checkpoint))
    dump_path = util.io.join_path('/home/zhy/pixel_link/test/mtwi_2018/',
                                  checkpoint_name + '-dssd-eval')
    txt_path = util.io.join_path(dump_path, 'txt')
    zip_path = util.io.join_path(dump_path, checkpoint_name + '_det.zip')

    with tf.Session(config=sess_config) as sess:
        saver.restore(sess, checkpoint)

        for iter, image_name in enumerate(image_names):
            # image_name = 'TB210QccTAlyKJjSZFhXXc8XFXa_!!2227306651.jpg.jpg'
            image_data = util.img.imread(util.io.join_path(
                FLAGS.dataset_dir, image_name),
                                         rgb=True)
            # image_name = image_name.split('.jpg')[0]
            image_name = image_name[0:-4]
            pixel_pos_scores, link_pos_scores = sess.run(
                [net.pixel_pos_scores, net.link_pos_scores],
                feed_dict={image: image_data})

            mask = pixel_link.decode_batch(pixel_pos_scores,
                                           link_pos_scores)[0, ...]

            # bboxes = pixel_link.mask_to_bboxes(mask, image_data.shape)
            # pp.show_result(image_data.copy(), bboxes)
            bboxes = pp.modify_mask_to_bboxes(mask, image_data.shape)
            # pp.show_result(image_data.copy(), bboxes)

            print '%d/%d: %s' % (iter + 1, len(image_names), image_name)

            to_txt(txt_path, image_name, image_data, bboxes)

    final_precision, final_recall, final_score = \
        zhy_evaluator.zhy_evaluate(txt_path, '/home/zhy/pixel_link/dataset/mtwi_2018/data_train_eval_split/split_eval/txt_eval')
    print final_precision, final_recall, final_score
def test():
    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)

    global_step = slim.get_or_create_global_step()
    with tf.name_scope('evaluation_%dx%d' %
                       (FLAGS.eval_image_height, FLAGS.eval_image_width)):
        with tf.variable_scope(tf.get_variable_scope(), reuse=False):
            image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
            image_shape = tf.placeholder(dtype=tf.int32, shape=[
                3,
            ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
                image,
                None,
                None,
                None,
                None,
                out_shape=config.image_shape,
                data_format=config.data_format,
                is_training=False)
            b_image = tf.expand_dims(processed_image, axis=0)

            # build model and loss
            net = pixel_link_symbol.PixelLinkNet(b_image, is_training=False)
            masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
                net.pixel_pos_scores, net.link_pos_scores)

    sess_config = tf.ConfigProto(log_device_placement=False,
                                 allow_soft_placement=True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore(
            tf.trainable_variables())
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    saver = tf.train.Saver(var_list=variables_to_restore)
    with tf.Session() as sess:
        saver.restore(sess, util.tf.get_latest_ckpt(FLAGS.checkpoint_path))

        files = util.io.ls(FLAGS.dataset_dir)

        for image_name in files:
            file_path = util.io.join_path(FLAGS.dataset_dir, image_name)
            image_data = util.img.imread(file_path)
            link_scores, pixel_scores, mask_vals = sess.run(
                [net.link_pos_scores, net.pixel_pos_scores, masks],
                feed_dict={image: image_data})
            h, w, _ = image_data.shape

            def resize(img):
                return util.img.resize(img,
                                       size=(w, h),
                                       interpolation=cv2.INTER_NEAREST)

            def get_bboxes(mask):
                return pixel_link.mask_to_bboxes(mask, image_data.shape)

            def draw_bboxes(img, bboxes, color):
                for bbox in bboxes:
                    points = np.reshape(bbox, [4, 2])
                    cnts = util.img.points_to_contours(points)
                    util.img.draw_contours(img,
                                           contours=cnts,
                                           idx=-1,
                                           color=color,
                                           border_width=1)

            image_idx = 0
            pixel_score = pixel_scores[image_idx, ...]
            mask = mask_vals[image_idx, ...]

            bboxes_det = get_bboxes(mask)

            mask = resize(mask)
            pixel_score = resize(pixel_score)

            draw_bboxes(image_data, bboxes_det, util.img.COLOR_RGB_RED)
            #             print util.sit(pixel_score)
            #             print util.sit(mask)
            print util.sit(image_data)
def test():
    with tf.name_scope('test'):
        image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
        processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
            image,
            None,
            None,
            None,
            None,
            out_shape=config.image_shape,
            data_format=config.data_format,
            is_training=False)
        b_image = tf.expand_dims(processed_image, axis=0)
        net = pixel_link_symbol.PixelLinkNet(b_image, is_training=True)
        global_step = slim.get_or_create_global_step()

    sess_config = tf.ConfigProto(log_device_placement=False,
                                 allow_soft_placement=True)
    sess_config.gpu_options.allow_growth = True

    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)
    logdir = util.io.join_path(
        checkpoint_dir, 'test',
        FLAGS.dataset_name + '_' + FLAGS.dataset_split_name)

    # Variables to restore: moving avg. or normal weights.
    variable_averages = tf.train.ExponentialMovingAverage(
        FLAGS.moving_average_decay)
    variables_to_restore = variable_averages.variables_to_restore()
    variables_to_restore[global_step.op.name] = global_step

    saver = tf.train.Saver(var_list=variables_to_restore)

    image_names = util.io.ls(FLAGS.dataset_dir)
    image_names.sort()

    checkpoint = FLAGS.checkpoint_path
    checkpoint_name = util.io.get_filename(str(checkpoint))
    dump_path = util.io.join_path(logdir, checkpoint_name)
    txt_path = util.io.join_path(dump_path, 'txt')
    zip_path = util.io.join_path(dump_path, checkpoint_name + '_det.zip')

    with tf.Session(config=sess_config) as sess:
        saver.restore(sess, checkpoint)

        for iter, image_name in enumerate(image_names):
            image_data = util.img.imread(util.io.join_path(
                FLAGS.dataset_dir, image_name),
                                         rgb=True)
            image_name = image_name.split('.')[0]
            pixel_pos_scores, link_pos_scores = sess.run(
                [net.pixel_pos_scores, net.link_pos_scores],
                feed_dict={image: image_data})

            print '%d/%d: %s' % (iter + 1, len(image_names), image_name)
            to_txt(txt_path, image_name, image_data, pixel_pos_scores,
                   link_pos_scores)

    # create zip file for icdar2015
    cmd = 'cd %s;zip -j %s %s/*' % (dump_path, zip_path, txt_path)
    print cmd
    util.cmd.cmd(cmd)
    print "zip file created: ", util.io.join_path(dump_path, zip_path)
Beispiel #35
0
def test():
    with tf.name_scope('test'):
        image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
        image_shape = tf.placeholder(dtype=tf.int32, shape=[
            3,
        ])
        processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
            image,
            None,
            None,
            None,
            None,
            out_shape=config.image_shape,
            data_format=config.data_format,
            is_training=False)
        b_image = tf.expand_dims(processed_image, axis=0)
        net = pixel_link_symbol.PixelLinkNet(b_image, is_training=True)
        global_step = slim.get_or_create_global_step()

    sess_config = tf.ConfigProto(log_device_placement=False,
                                 allow_soft_placement=True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore()
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    saver = tf.train.Saver(var_list=variables_to_restore)

    video_names = util.io.ls(FLAGS.dataset_dir)
    video_names.sort()

    checkpoint = FLAGS.checkpoint_path
    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)
    output_dir = util.io.get_dir(FLAGS.output_dir)

    with tf.Session(config=sess_config) as sess:
        saver.restore(sess, checkpoint)

        for iter, video_name in enumerate(video_names):
            basename = os.path.splitext(os.path.basename(video_name))[0]
            vidcap = cv2.VideoCapture(
                util.io.join_path(FLAGS.dataset_dir, video_name))
            length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
            step = 100

            success, image_data = vidcap.read()
            count = 1
            while success:
                if count % step != 0:
                    success, image_data = vidcap.read()
                    count += 1
                    continue

                pixel_pos_scores, link_pos_scores = sess.run(
                    [net.pixel_pos_scores, net.link_pos_scores],
                    feed_dict={image: image_data})

                image_name = basename + '-' + str(count) + '.jpg'

                to_txt(output_dir, image_name, image_data, pixel_pos_scores,
                       link_pos_scores)

                success, image_data = vidcap.read()
                count += 1

            print('%d/%d: %s' % (iter + 1, len(video_names), video_name))
Beispiel #36
0
def test():
    checkpoint_dir = util.io.get_dir(FLAGS.checkpoint_path)
    output_dir = FLAGS.output_path

    global_step = slim.get_or_create_global_step()
    with tf.name_scope('evaluation_%dx%d' %
                       (FLAGS.eval_image_height, FLAGS.eval_image_width)):
        with tf.variable_scope(tf.get_variable_scope(), reuse=False):
            image = tf.placeholder(dtype=tf.int32, shape=[None, None, 3])
            image_shape = tf.placeholder(dtype=tf.int32, shape=[
                3,
            ])
            processed_image, _, _, _, _ = ssd_vgg_preprocessing.preprocess_image(
                image,
                None,
                None,
                None,
                None,
                out_shape=config.image_shape,
                data_format=config.data_format,
                is_training=False)
            b_image = tf.expand_dims(processed_image, axis=0)

            # build model and loss
            net = pixel_link_symbol.PixelLinkNet(b_image, is_training=False)
            masks = pixel_link.tf_decode_score_map_to_mask_in_batch(
                net.pixel_pos_scores, net.link_pos_scores)

    sess_config = tf.ConfigProto(log_device_placement=False,
                                 allow_soft_placement=True)
    if FLAGS.gpu_memory_fraction < 0:
        sess_config.gpu_options.allow_growth = True
    elif FLAGS.gpu_memory_fraction > 0:
        sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction

    # Variables to restore: moving avg. or normal weights.
    if FLAGS.using_moving_average:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay)
        variables_to_restore = variable_averages.variables_to_restore(
            tf.trainable_variables())
        variables_to_restore[global_step.op.name] = global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    saver = tf.train.Saver(var_list=variables_to_restore)
    with tf.Session() as sess:
        saver.restore(sess, util.tf.get_latest_ckpt(FLAGS.checkpoint_path))

        files = util.io.ls(FLAGS.dataset_dir)

        for image_name in files:

            if os.path.isfile(os.path.join(output_dir, image_name + ".png")):
                continue

            file_path = util.io.join_path(FLAGS.dataset_dir, image_name)
            image_data = util.img.imread(file_path)

            image_data, scale = resize_im(image_data,
                                          scale=768,
                                          max_scale=1280)

            start_tf_time = time.time()
            link_scores, pixel_scores, mask_vals = sess.run(
                [net.link_pos_scores, net.pixel_pos_scores, masks],
                feed_dict={image: image_data})
            end_tf_time = time.time()
            f = open(os.path.join('pkl', image_name) + '.pkl', 'wb')
            cPickle.dump(link_scores, f, protocol=-1)
            cPickle.dump(pixel_scores, f, protocol=-1)
            cPickle.dump(mask_vals, f, protocol=-1)
            f.close()

            h, w, _ = image_data.shape

            def resize(img):
                return util.img.resize(img,
                                       size=(w, h),
                                       interpolation=cv2.INTER_NEAREST)

            def get_bboxes(mask):
                return pixel_link.mask_to_bboxes(mask, image_data.shape)

            def draw_bboxes(img, bboxes, color):
                for bbox in bboxes:
                    points = np.reshape(bbox, [4, 2])
                    cnts = util.img.points_to_contours(points)
                    util.img.draw_contours(img,
                                           contours=cnts,
                                           idx=-1,
                                           color=color,
                                           border_width=4)

            image_idx = 0
            pixel_score = pixel_scores[image_idx, ...]
            mask = mask_vals[image_idx, ...]
            start_post_time = time.time()
            bboxes_det = get_bboxes(mask)
            end_post_time = time.time()

            print("Tensorflow inference time:", end_tf_time - start_tf_time)
            print("Post filtering time:", end_post_time - start_post_time)

            mask = resize(mask)
            pixel_score = resize(pixel_score)

            draw_bboxes(image_data, bboxes_det, util.img.COLOR_RGB_RED)
            #             print util.sit(pixel_score)
            #             print util.sit(mask)
            #             output_dir = os.path.join("test_output",'%.1f'%FLAGS.pixel_conf_threshold+"_"+'%.1f'%FLAGS.pixel_conf_threshold)

            if not os.path.exists(output_dir):
                os.mkdir(output_dir)
            print util.sit(image_data,
                           format='bgr',
                           path=os.path.join(output_dir, image_name + ".png"))