示例#1
0
def create_dataset_batch_queue(dataset):
    from preprocessing import ssd_vgg_preprocessing

    with tf.device('/cpu:0'):
        with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=1000 * config.batch_size,
                common_queue_min=700 * config.batch_size,
                shuffle=True)
        # Get for SSD network: image, labels, bboxes.
        [image, glabel, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get([
                                                         'image',
                                                         'object/label',
                                                         'object/bbox', 
                                                         'object/oriented_bbox/x1',
                                                         'object/oriented_bbox/x2',
                                                         'object/oriented_bbox/x3',
                                                         'object/oriented_bbox/x4',
                                                         'object/oriented_bbox/y1',
                                                         'object/oriented_bbox/y2',
                                                         'object/oriented_bbox/y3',
                                                         'object/oriented_bbox/y4'
                                                         ])
        gxs = tf.transpose(tf.stack([x1, x2, x3, x4])) #shape = (N, 4)
        gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
        image = tf.identity(image, 'input_image')
        
        # Pre-processing image, labels and bboxes.
        image, glabel, gbboxes, gxs, gys = \
                ssd_vgg_preprocessing.preprocess_image(
                       image, glabel, gbboxes, gxs, gys, 
                       out_shape = config.train_image_shape,
                       data_format = config.data_format, 
                       use_rotation = config.use_rotation,
                       is_training = True)
        image = tf.identity(image, 'processed_image')
        
        # calculate ground truth
        pixel_cls_label, pixel_cls_weight, \
        pixel_link_label, pixel_link_weight = \
            pixel_link.tf_cal_gt_for_single_image(gxs, gys, glabel)
        
        # batch them
        with tf.name_scope(FLAGS.dataset_name + '_batch'):
            b_image, b_pixel_cls_label, b_pixel_cls_weight, \
            b_pixel_link_label, b_pixel_link_weight = \
                tf.train.batch(
                    [image, pixel_cls_label, pixel_cls_weight, 
                        pixel_link_label, pixel_link_weight],
                    batch_size = config.batch_size_per_gpu,
                    num_threads= FLAGS.num_preprocessing_threads,
                    capacity = 500)
        with tf.name_scope(FLAGS.dataset_name + '_prefetch_queue'):
            batch_queue = slim.prefetch_queue.prefetch_queue(
                [b_image, b_pixel_cls_label, b_pixel_cls_weight, 
                    b_pixel_link_label, b_pixel_link_weight],
                capacity = 50) 
    return batch_queue    
示例#2
0
def create_dataset_batch_queue(dataset):
    from preprocessing import ssd_vgg_preprocessing

    with tf.device('/cpu:0'):
        with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=1000 * config.batch_size,
                common_queue_min=700 * config.batch_size,
                shuffle=True)
        # Get for SSD network: image, labels, bboxes.
        [image, glabel, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get([
                                                         'image',
                                                         'object/label',
                                                         'object/bbox', 
                                                         'object/oriented_bbox/x1',
                                                         'object/oriented_bbox/x2',
                                                         'object/oriented_bbox/x3',
                                                         'object/oriented_bbox/x4',
                                                         'object/oriented_bbox/y1',
                                                         'object/oriented_bbox/y2',
                                                         'object/oriented_bbox/y3',
                                                         'object/oriented_bbox/y4'
                                                         ])
        gxs = tf.transpose(tf.stack([x1, x2, x3, x4])) #shape = (N, 4)
        gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
        image = tf.identity(image, 'input_image')
        
        # Pre-processing image, labels and bboxes.
        image, glabel, gbboxes, gxs, gys = \
                ssd_vgg_preprocessing.preprocess_image(
                       image, glabel, gbboxes, gxs, gys, 
                       out_shape = config.train_image_shape,
                       data_format = config.data_format, 
                       use_rotation = config.use_rotation,
                       is_training = True)
        image = tf.identity(image, 'processed_image')
        
        # calculate ground truth
        pixel_cls_label, pixel_cls_weight, \
        pixel_link_label, pixel_link_weight = \
            pixel_link.tf_cal_gt_for_single_image(gxs, gys, glabel)
        
        # batch them
        with tf.name_scope(FLAGS.dataset_name + '_batch'):
            b_image, b_pixel_cls_label, b_pixel_cls_weight, \
            b_pixel_link_label, b_pixel_link_weight = \
                tf.train.batch(
                    [image, pixel_cls_label, pixel_cls_weight, 
                        pixel_link_label, pixel_link_weight],
                    batch_size = config.batch_size_per_gpu,
                    num_threads= FLAGS.num_preprocessing_threads,
                    capacity = 500)
        with tf.name_scope(FLAGS.dataset_name + '_prefetch_queue'):
            batch_queue = slim.prefetch_queue.prefetch_queue(
                [b_image, b_pixel_cls_label, b_pixel_cls_weight, 
                    b_pixel_link_label, b_pixel_link_weight],
                capacity = 50) 
    return batch_queue    
示例#3
0
def create_dataset_batch_queue_multiphase_multislice_mask(dataset):
    from preprocessing import ssd_vgg_preprocessing_multiphase_multislice_mask

    with tf.device('/cpu:0'):
        with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=1000 * config.batch_size,
                common_queue_min=700 * config.batch_size,
                shuffle=True)
        [nc_image, art_image, pv_image, mask_image, shape, glabel, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get(
            [
             'nc_image', 'art_image',
             'pv_image', 'mask_image', 'shape',
             'object/label',
             'object/bbox',
             'object/oriented_bbox/x1',
             'object/oriented_bbox/x2',
             'object/oriented_bbox/x3',
             'object/oriented_bbox/x4',
             'object/oriented_bbox/y1',
             'object/oriented_bbox/y2',
             'object/oriented_bbox/y3',
             'object/oriented_bbox/y4'])
        gxs = tf.transpose(tf.stack([x1, x2, x3, x4]))  # shape = (N, 4)
        gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
        nc_image = tf.identity(nc_image, 'input_nc_image')
        art_image = tf.identity(art_image, 'input_art_image')
        pv_image = tf.identity(pv_image, 'input_pv_image')
        mask_image = tf.identity(mask_image, 'mask_image')

        # Pre-processing image, labels and bboxes.
        print('nc image: ', nc_image)
        print('art image: ', art_image)
        print('pv image: ', pv_image)
        print('mask image: ', mask_image)
        # mask should be processed with original image at same time
        nc_image, art_image, pv_image, mask_image, glabel, gbboxes, gxs, gys = \
            ssd_vgg_preprocessing_multiphase_multislice_mask.preprocess_image_multiphase_multislice_mask(
                nc_image, art_image, pv_image, mask_image, glabel, gbboxes, gxs, gys,
                out_shape=config.train_image_shape,
                data_format=config.data_format,
                use_rotation=config.use_rotation,
                is_training=True)
        nc_image = tf.identity(nc_image, 'processed_nc_image')
        art_image = tf.identity(art_image, 'processed_art_image')
        pv_image = tf.identity(pv_image, 'processed_pv_image')
        mask_image = tf.identity(mask_image, 'processed_mask_image')
        # calculate ground truth
        pixel_cls_label, pixel_cls_weight, \
        pixel_link_label, pixel_link_weight = \
            pixel_link.tf_cal_gt_for_single_image(gxs, gys, glabel)

        # batch them
        with tf.name_scope(FLAGS.dataset_name + '_batch'):
            b_nc_image, b_art_image, b_pv_image, b_mask_image, b_pixel_cls_label, b_pixel_cls_weight, \
            b_pixel_link_label, b_pixel_link_weight = \
                tf.train.batch(
                    [nc_image, art_image, pv_image, mask_image, pixel_cls_label, pixel_cls_weight,
                     pixel_link_label, pixel_link_weight],
                    batch_size=config.batch_size_per_gpu,
                    num_threads=FLAGS.num_preprocessing_threads,
                    capacity=500)
        with tf.name_scope(FLAGS.dataset_name + '_prefetch_queue'):
            batch_queue = slim.prefetch_queue.prefetch_queue(
                [b_nc_image, b_art_image, b_pv_image, b_mask_image, b_pixel_cls_label, b_pixel_cls_weight,
                 b_pixel_link_label, b_pixel_link_weight],
                capacity=50)
    return batch_queue