Ejemplo n.º 1
0
def _convert_to_example(image_data, label):
    #print 'shape: {}, height:{}, width:{}'.format(shape,shape[0],shape[1])
    example = tf.train.Example(features=tf.train.Features(
        feature={
            'image/encoded': bytes_feature(image_data),
            'label/encoded': bytes_feature(label)
        }))
    return example
def _convert_to_example(image_data, shape, imname):
    #print 'shape: {}, height:{}, width:{}'.format(shape,shape[0],shape[1])
    example = tf.train.Example(features=tf.train.Features(
        feature={
            'image/height': int64_feature(shape[1]),
            'image/width': int64_feature(shape[2]),
            'image/channels': int64_feature(shape[0]),
            'image/shape': int64_feature(shape),
            'image/format': bytes_feature('nii'),
            'image/encoded': bytes_feature(image_data),
            'image/name': bytes_feature(imname),
        }))
    return example
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
    """Converts the given filenames to a TFRecord dataset.

    Args:
        split_name: The name of the dataset, either 'train' or 'validation'.
        filenames: A list of absolute paths to png or jpg images.
        class_names_to_ids: A dictionary from class names (strings) to ids
            (integers).
        dataset_dir: The directory where the converted datasets are stored.
    """
    assert split_name in ['train', 'validation']

    num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))  # ceil:向下取整

    with tf.Graph().as_default():
        image_reader = ImageReader()

        with tf.Session('') as sess:

            for shard_id in range(_NUM_SHARDS):
                output_filename = _get_dataset_filename(dataset_dir, split_name, shard_id)

                with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
                    start_ndx = shard_id * num_per_shard
                    end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
                    for i in range(start_ndx, end_ndx):
                        sys.stdout.write('\r>> Converting image %d/%d shard %d' % (i+1, len(filenames), shard_id))
                        sys.stdout.flush()

                        # Read the filename:
                        image_data = tf.gfile.FastGFile(filenames[i], 'rb').read()
                        height, width = image_reader.read_image_dims(sess, image_data)

                        # 根据路径获得该图片分类
                        class_name = os.path.basename(os.path.dirname(filenames[i]))
                        class_id = class_names_to_ids[class_name]

                        example = tf.train.Example(features=tf.train.Features(feature={
                                                    'image/encoded': dataset_utils.bytes_feature(image_data),
                                                    'image/format': dataset_utils.bytes_feature(b'jpg'),
                                                    'image/class/label': dataset_utils.int64_feature(class_id),
                                                    'image/height': dataset_utils.int64_feature(height),
                                                    'image/width': dataset_utils.int64_feature(width)}))
                        tfrecord_writer.write(example.SerializeToString())

    sys.stdout.write('\n')
    sys.stdout.flush()
Ejemplo n.º 4
0
def _convert_to_example(image_data, shape, labels, labels_text, bboxes,
                        filename):
    """Build an Example proto for an image example.

    Args:
      image_data: string, PNG encoding of RGB image;
      labels: list of integers, identifier for the ground truth;
      labels_text: list of strings, human-readable labels;
      bboxes: list of bounding boxes; each box is a list of integers;
          specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong
          to the same label as the image label.
      shape: 3 integers, image shapes in pixels.
    Returns:
      Example proto
    """
    # Transpose bboxes, dimensions and locations.
    bboxes = list(map(list, zip(*bboxes)))

    # Iterators.
    it_bboxes = iter(bboxes)

    image_format = b'JPEG'
    example = tf.train.Example(features=tf.train.Features(
        feature={
            'image/height': int64_feature(shape[0]),
            'image/width': int64_feature(shape[1]),
            'image/channels': int64_feature(shape[2]),
            'image/shape': int64_feature(shape),
            'image/format': bytes_feature(image_format),
            'image/encoded': bytes_feature(image_data),
            'image/class/label': int64_feature(labels),
            'image/class/text': bytes_feature(labels_text),
            'image/object/bbox/xmin': float_feature(next(it_bboxes, [])),
            'image/object/bbox/ymin': float_feature(next(it_bboxes, [])),
            'image/object/bbox/xmax': float_feature(next(it_bboxes, [])),
            'image/object/bbox/ymax': float_feature(next(it_bboxes, [])),
            'image/object/class/label': int64_feature(labels),
            'image/filename': bytes_feature(filename.encode()),
        }))
    return example
Ejemplo n.º 5
0
def _convert_to_example(nc_roi_data,
                        art_roi_data,
                        pv_roi_data,
                        nc_patch_data,
                        art_patch_data,
                        pv_patch_data,
                        label,
                        attribute_flag=False,
                        attrs=None):
    """Build an Example proto for an image example.

    Args:
      image_data: string, JPEG encoding of RGB image;
      labels: list of integers, identifier for the ground truth;
      labels_text: list of strings, human-readable labels;
      bboxes: list of bounding boxes; each box is a list of integers;
          specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong
          to the same label as the image label.
      shape: 3 integers, image shapes in pixels.
    Returns:
      Example proto
    """
    if attribute_flag:
        image_format = b'JPEG'
        # print('attrs is ', attrs)
        example = tf.train.Example(features=tf.train.Features(
            feature={
                # 'image/height': int64_feature(shape[0]),
                # 'image/width': int64_feature(shape[1]),
                # 'image/channels': int64_feature(shape[2]),
                # 'image/shape': int64_feature(shape),
                # 'image/object/bbox/xmin': float_feature(xmin),
                # 'image/object/bbox/xmax': float_feature(xmax),
                # 'image/object/bbox/ymin': float_feature(ymin),
                # 'image/object/bbox/ymax': float_feature(ymax),
                # 'image/object/bbox/label': int64_feature(labels),
                # 'image/object/bbox/label_text': bytes_feature(labels_text),
                # 'image/object/bbox/difficult': int64_feature(difficult),
                # 'image/object/bbox/truncated': int64_feature(truncated),
                'images/attrs': float_feature(list(attrs)),
                'images/label': int64_feature(label),
                'images/nc_roi': bytes_feature(nc_roi_data),
                'images/art_roi': bytes_feature(art_roi_data),
                'images/pv_roi': bytes_feature(pv_roi_data),
                'images/nc_patch': bytes_feature(nc_patch_data),
                'images/art_patch': bytes_feature(art_patch_data),
                'images/pv_patch': bytes_feature(pv_patch_data),
                'images/format': bytes_feature(image_format)
            }))
    else:
        image_format = b'JPEG'
        example = tf.train.Example(features=tf.train.Features(
            feature={
                # 'image/height': int64_feature(shape[0]),
                # 'image/width': int64_feature(shape[1]),
                # 'image/channels': int64_feature(shape[2]),
                # 'image/shape': int64_feature(shape),
                # 'image/object/bbox/xmin': float_feature(xmin),
                # 'image/object/bbox/xmax': float_feature(xmax),
                # 'image/object/bbox/ymin': float_feature(ymin),
                # 'image/object/bbox/ymax': float_feature(ymax),
                # 'image/object/bbox/label': int64_feature(labels),
                # 'image/object/bbox/label_text': bytes_feature(labels_text),
                # 'image/object/bbox/difficult': int64_feature(difficult),
                # 'image/object/bbox/truncated': int64_feature(truncated),
                'images/label': int64_feature(label),
                'images/nc_roi': bytes_feature(nc_roi_data),
                'images/art_roi': bytes_feature(art_roi_data),
                'images/pv_roi': bytes_feature(pv_roi_data),
                'images/nc_patch': bytes_feature(nc_patch_data),
                'images/art_patch': bytes_feature(art_patch_data),
                'images/pv_patch': bytes_feature(pv_patch_data),
                'images/format': bytes_feature(image_format)
            }))
    return example
Ejemplo n.º 6
0
def _convert_to_example(nc_roi_data, art_roi_data, pv_roi_data, nc_patch_data,
                        art_patch_data, pv_patch_data, attr, label):
    """Build an Example proto for an image example.

    Args:
      image_data: string, JPEG encoding of RGB image;
      labels: list of integers, identifier for the ground truth;
      labels_text: list of strings, human-readable labels;
      bboxes: list of bounding boxes; each box is a list of integers;
          specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong
          to the same label as the image label.
      shape: 3 integers, image shapes in pixels.
    Returns:
      Example proto
    """

    image_format = b'RAW'
    # print(np.shape(nc_roi_data), np.shape(art_roi_data), np.shape(pv_roi_data), np.shape(nc_patch_data),
    #       np.shape(art_patch_data), np.shape(pv_patch_data))
    nc_roi_h, nc_roi_w, _ = np.shape(nc_roi_data)
    art_roi_h, art_roi_w, _ = np.shape(art_roi_data)
    pv_roi_h, pv_roi_w, _ = np.shape(pv_roi_data)
    nc_patch_h, nc_patch_w, _ = np.shape(nc_patch_data)
    art_patch_h, art_patch_w, _ = np.shape(art_patch_data)
    pv_patch_h, pv_patch_w, _ = np.shape(pv_patch_data)
    nc_roi_data = np.asarray(nc_roi_data, np.float32)
    art_roi_data = np.asarray(art_roi_data, np.float32)
    pv_roi_data = np.asarray(pv_roi_data, np.float32)
    nc_patch_data = np.asarray(nc_patch_data, np.float32)
    art_patch_data = np.asarray(art_patch_data, np.float32)
    pv_patch_data = np.asarray(pv_patch_data, np.float32)
    # print('np_array_feature')
    # print('attrs is ', attrs)
    example = tf.train.Example(features=tf.train.Features(
        feature={
            'images/attrs': float_feature(list(attr)),
            'images/label': int64_feature(label),
            'images/nc_roi': np_array_feature(nc_roi_data),
            'images/nc_roi/shape': int64_feature([nc_roi_h, nc_roi_w, 3]),
            'images/art_roi': np_array_feature(art_roi_data),
            'images/art_roi/shape': int64_feature([art_roi_h, art_roi_w, 3]),
            'images/pv_roi': np_array_feature(pv_roi_data),
            'images/pv_roi/shape': int64_feature([pv_roi_h, pv_roi_w, 3]),
            'images/nc_patch': np_array_feature(nc_patch_data),
            'images/nc_patch/shape': int64_feature([nc_patch_h, nc_patch_w, 3
                                                    ]),
            'images/art_patch': np_array_feature(art_patch_data),
            'images/art_patch/shape': int64_feature(
                [art_patch_h, art_patch_w, 3]),
            'images/pv_patch': np_array_feature(pv_patch_data),
            'images/pv_patch/shape': int64_feature([pv_patch_h, pv_patch_w, 3
                                                    ]),
            'images/format': bytes_feature(image_format)
        }))

    # 'image/height': int64_feature(shape[0]),
    # 'image/width': int64_feature(shape[1]),
    # 'image/channels': int64_feature(shape[2]),
    # 'image/shape': int64_feature(shape),
    # 'image/object/bbox/xmin': float_feature(xmin),
    # 'image/object/bbox/xmax': float_feature(xmax),
    # 'image/object/bbox/ymin': float_feature(ymin),
    # 'image/object/bbox/ymax': float_feature(ymax),
    # 'image/object/bbox/label': int64_feature(labels),
    # 'image/object/bbox/label_text': bytes_feature(labels_text),
    # 'image/object/bbox/difficult': int64_feature(difficult),
    # 'image/object/bbox/truncated': int64_feature(truncated),
    return example