def create_dataset_batch_queue(dataset):
    # 设置GPU
    with tf.device('/cpu:0'):
        # tf.name_scope可以让变量有相同的命名,只是限于tf.Variable的变量
        with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
            # 读取数据
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=50 * config.batch_size,
                common_queue_min=30 * config.batch_size,
                shuffle=True)
        # Get for SSD network: image, labels, bboxes.
        [image, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get([
                                                         'image',
                                                         'object/ignored',
                                                         'object/bbox', 
                                                         'object/oriented_bbox/x1',
                                                         'object/oriented_bbox/x2',
                                                         'object/oriented_bbox/x3',
                                                         'object/oriented_bbox/x4',
                                                         'object/oriented_bbox/y1',
                                                         'object/oriented_bbox/y2',
                                                         'object/oriented_bbox/y3',
                                                         'object/oriented_bbox/y4'
                                                         ])
        # tf.stack()矩阵拼接
        # tf.transpos()转置
        gxs = tf.transpose(tf.stack([x1, x2, x3, x4])) #shape = (N, 4)
        gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
        image = tf.identity(image, 'input_image')
        
        # Pre-processing image, labels and bboxes.
        image, gignored, gbboxes, gxs, gys = ssd_vgg_preprocessing.preprocess_image(image, gignored, gbboxes, gxs, gys,
                                                           out_shape = config.image_shape,
                                                           data_format = config.data_format, 
                                                           is_training = True)
        image = tf.identity(image, 'processed_image')
        
        # calculate ground truth
        # 计算真实标签
        seg_label, seg_loc, link_label = seglink.tf_get_all_seglink_gt(gxs, gys, gignored)
        
        # batch them
        # tf.train.batch():利用一个tensor的列表或字典来获取一个batch数据
        b_image, b_seg_label, b_seg_loc, b_link_label = tf.train.batch(
            [image, seg_label, seg_loc, link_label],
            batch_size = config.batch_size_per_gpu,
            num_threads= FLAGS.num_preprocessing_threads,
            capacity = 50)

        # prefetch_queue():从数据’Tensor‘ 中预取张量进入队列
        batch_queue = slim.prefetch_queue.prefetch_queue(
            [b_image, b_seg_label, b_seg_loc, b_link_label],
            capacity = 50) 
    return batch_queue    
Exemple #2
0
def create_dataset_batch_queue(dataset):
    batch_size = config.batch_size
    with tf.device('/cpu:0'):
        with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=20 * batch_size,
                common_queue_min=10 * batch_size,
                shuffle=True)
        # Get for SSD network: image, labels, bboxes.
        [image, shape, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get([
                                                         'image', 'shape',
                                                         'object/ignored',
                                                         'object/bbox', 
                                                         'object/oriented_bbox/x1',
                                                         'object/oriented_bbox/x2',
                                                         'object/oriented_bbox/x3',
                                                         'object/oriented_bbox/x4',
                                                         'object/oriented_bbox/y1',
                                                         'object/oriented_bbox/y2',
                                                         'object/oriented_bbox/y3',
                                                         'object/oriented_bbox/y4'
                                                         ])
        gxs = tf.transpose(tf.stack([x1, x2, x3, x4])) #shape = (N, 4)
        gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
        image = tf.identity(image, 'input_image')
        
        # Pre-processing image, labels and bboxes.
        image, gignored, gbboxes, gxs, gys = ssd_vgg_preprocessing.preprocess_image(
                                                           image, gignored, gbboxes, gxs, gys, 
                                                           out_shape = config.image_shape,
                                                           data_format = config.data_format, 
                                                           is_training = True)
        image = tf.identity(image, 'processed_image')
        
        # calculate ground truth
        seg_label, seg_offsets, link_label = seglink.tf_get_all_seglink_gt(gxs, gys, gignored)

        # batch them
        b_image, b_seg_label, b_seg_offsets, b_link_label = tf.train.batch(
            [image, seg_label, seg_offsets, link_label],
            batch_size = config.batch_size_per_gpu,
            num_threads=FLAGS.num_preprocessing_threads,
            capacity = 50)
            
        batch_queue = slim.prefetch_queue.prefetch_queue(
            [b_image, b_seg_label, b_seg_offsets, b_link_label],
            capacity = 50) 
    return batch_queue    
Exemple #3
0
def create_dataset_batch_queue(dataset):
    batch_size = config.batch_size
    with tf.device('/cpu:0'):
        with tf.name_scope(FLAGS.dataset_name + '_data_provider'):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=20 * batch_size,
                common_queue_min=10 * batch_size,
                shuffle=True)
        # Get for SSD network: image, labels, bboxes.
        [image, shape, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3,
         y4] = provider.get([
             'image', 'shape', 'object/ignored', 'object/bbox',
             'object/oriented_bbox/x1', 'object/oriented_bbox/x2',
             'object/oriented_bbox/x3', 'object/oriented_bbox/x4',
             'object/oriented_bbox/y1', 'object/oriented_bbox/y2',
             'object/oriented_bbox/y3', 'object/oriented_bbox/y4'
         ])
        gxs = tf.transpose(tf.stack([x1, x2, x3, x4]))  # shape = (N, 4)
        gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
        image = tf.identity(image, 'input_image')

        # Pre-processing image, labels and bboxes.
        image, gignored, gbboxes, gxs, gys = ssd_vgg_preprocessing.preprocess_image(
            image,
            gignored,
            gbboxes,
            gxs,
            gys,
            out_shape=config.image_shape,
            data_format=config.data_format,
            is_training=True)
        image = tf.identity(image, 'processed_image')

        # calculate ground truth
        seg_label, seg_offsets, link_label = seglink.tf_get_all_seglink_gt(
            gxs, gys, gignored)

        # batch them
        b_image, b_seg_label, b_seg_offsets, b_link_label = tf.train.batch(
            [image, seg_label, seg_offsets, link_label],
            batch_size=batch_size,
            num_threads=FLAGS.num_preprocessing_threads,
            capacity=50)

        batch_queue = slim.prefetch_queue.prefetch_queue(
            [b_image, b_seg_label, b_seg_offsets, b_link_label], capacity=50)
    return batch_queue
Exemple #4
0
def read_dataset(dataset):
    with tf.name_scope(FLAGS.dataset_name + '_' + FLAGS.dataset_split_name +
                       '_data_provider'):
        provider = slim.dataset_data_provider.DatasetDataProvider(
            dataset, num_readers=FLAGS.num_readers, shuffle=False)

    [
        image, shape, filename, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3,
        y4
    ] = provider.get([
        'image', 'shape', 'filename', 'object/ignored', 'object/bbox',
        'object/oriented_bbox/x1', 'object/oriented_bbox/x2',
        'object/oriented_bbox/x3', 'object/oriented_bbox/x4',
        'object/oriented_bbox/y1', 'object/oriented_bbox/y2',
        'object/oriented_bbox/y3', 'object/oriented_bbox/y4'
    ])
    gxs = tf.transpose(tf.stack([x1, x2, x3, x4]))  #shape = (N, 4)
    gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
    image = tf.identity(image, 'input_image')

    # Pre-processing image, labels and bboxes.
    image, gignored, gbboxes, gxs, gys = ssd_vgg_preprocessing.preprocess_image(
        image,
        gignored,
        gbboxes,
        gxs,
        gys,
        out_shape=config.image_shape,
        data_format=config.data_format,
        is_training=False)
    image = tf.identity(image, 'processed_image')

    # calculate ground truth
    seg_label, seg_loc, link_gt = seglink.tf_get_all_seglink_gt(
        gxs, gys, gignored)

    return image, seg_label, seg_loc, link_gt, filename, shape, gignored, gxs, gys
Exemple #5
0
def read_dataset(dataset):
    with tf.name_scope(FLAGS.dataset_name +'_'  + FLAGS.dataset_split_name + '_data_provider'):
        provider = slim.dataset_data_provider.DatasetDataProvider(
            dataset,
            num_readers=FLAGS.num_readers,
            shuffle=False)
        
    [image, shape, filename, gignored, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get([
                                                     'image', 'shape', 'filename',
                                                     'object/ignored',
                                                     'object/bbox', 
                                                     'object/oriented_bbox/x1',
                                                     'object/oriented_bbox/x2',
                                                     'object/oriented_bbox/x3',
                                                     'object/oriented_bbox/x4',
                                                     'object/oriented_bbox/y1',
                                                     'object/oriented_bbox/y2',
                                                     'object/oriented_bbox/y3',
                                                     'object/oriented_bbox/y4'
                                                     ])
    gxs = tf.transpose(tf.stack([x1, x2, x3, x4])) #shape = (N, 4)
    gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
    image = tf.identity(image, 'input_image')
    
    # Pre-processing image, labels and bboxes.
    image, gignored, gbboxes, gxs, gys = ssd_vgg_preprocessing.preprocess_image(
                                                       image, gignored, gbboxes, gxs, gys, 
                                                       out_shape = config.image_shape,
                                                       data_format = config.data_format, 
                                                       is_training = False)
    image = tf.identity(image, 'processed_image')
    
    # calculate ground truth
    seg_label, seg_loc, link_gt = seglink.tf_get_all_seglink_gt(gxs, gys, gignored)
        
    return image, seg_label, seg_loc, link_gt, filename, shape, gignored, gxs, gys