def create_tf_record(output_filename, label_map_dict, annotations_dir,
                     image_dir, examples):
    """Creates a TFRecord file from examples.

    Args:
        output_filename: Path to where output file is saved.
        label_map_dict: The label map dictionary.
        annotations_dir: Directory where annotation files are stored.
        image_dir: Directory where image files are stored.
        examples: Examples to parse and save to tf record.
    """
    writer = tf.python_io.TFRecordWriter(output_filename)
    for idx, example in enumerate(examples):
        if idx % 100 == 0:
            print 'On image {} of {}'.format(idx, len(examples))
        path = os.path.join(annotations_dir, example + '.xml')
        print "processing...", example
        if not os.path.exists(path):
            print 'Could not find {}, ignoring example.'.format(path)
            continue
        with tf.gfile.GFile(path, 'r') as fid:
            try:
                xml_str = fid.read()
                xml = etree.fromstring(xml_str)
                data = dataset_util.recursive_parse_xml_to_dict(
                    xml)['annotation']
                tf_example = dict_to_tf_example(data, label_map_dict,
                                                image_dir)
                writer.write(tf_example.SerializeToString())
            except:
                print "Fail to open image: ", example
    writer.close()
Exemplo n.º 2
0
def create_tf_record(images_path,
                     output_path,
                     images_dir_name='images',
                     annotation_dir_name='xml'):

    label_map_dict = {"person": 1, "face": 2}

    logging.info('Creating {}'.format(output_path))

    writer = tf.python_io.TFRecordWriter(output_path)

    for idx in range(len(images_path)):
        if idx % 100 == 0:
            logging.info('On image %d of %d', idx, len(images_path))
        # xml_path = xmls_path[idx]
        image_path = images_path[idx]
        xml_path = image_path.replace('/{}/'.format(images_dir_name),
                                      '/{}/'.format(annotation_dir_name))
        xml_path = xml_path.replace('.jpg', '.xml')

        if os.path.exists(xml_path):
            with tf.gfile.GFile(xml_path, 'r') as fid:
                xml_str = fid.read()
            xml = etree.fromstring(xml_str)
            data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']

            tf_example = dict_to_tf_example(data, image_path, label_map_dict)
            writer.write(tf_example.SerializeToString())
        else:
            continue
            tf_example = background_tf_example(image_path)
            writer.write(tf_example.SerializeToString())

    writer.close()
Exemplo n.º 3
0
def main(_):
    data_dir = FLAGS.data_dir

    writer = tf.python_io.TFRecordWriter(FLAGS.output_path)

    label_map_dict = labels

    logging.info('Reading dataset.')

    annotations_dir = os.path.join(data_dir, FLAGS.annotations_dir)
    imgs_dir = os.path.join(data_dir, FLAGS.imgs_dir)
    img_list = os.listdir(imgs_dir)
    for idx, example in enumerate(img_list):
        if idx % 100 == 0:
            logging.info('On image %d of %d' % (idx, len(img_list)))
        example = os.path.splitext(example)[0]
        path = os.path.join(annotations_dir, example + '.xml')
        with tf.gfile.GFile(path, 'r') as fid:
            xml_str = fid.read()
        xml = etree.fromstring(xml_str)
        data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']

        tf_example = dict_to_tf_example(data, FLAGS.data_dir, label_map_dict,
                                        FLAGS.ignore_difficult_instances)
        writer.write(tf_example.SerializeToString())

    writer.close()
    print('Done!')
def main(_):
  if FLAGS.set not in SETS:
    raise ValueError('set must be in : {}'.format(SETS))

  data_dir = FLAGS.data_dir
  label_map_path = os.path.join(data_dir, "labels.pbtxt")
  examples_path = os.path.join(data_dir, "files.txt")
  annotations_dir = os.path.join(data_dir, "annotations")

  label_map_dict = label_map_util.get_label_map_dict(label_map_path)
  examples_list = dataset_util.read_examples_list(examples_path)

  writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
  for idx, example in enumerate(examples_list):
    if idx % 100 == 0:
      logging.info('On image %d of %d', idx, len(examples_list))
    path = os.path.join(annotations_dir, example + '.xml')
    with tf.gfile.GFile(path, 'r') as fid:
      xml_str = fid.read()
    xml = etree.fromstring(xml_str)
    data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
    tf_example = dict_to_tf_example(data, FLAGS.data_dir, label_map_dict, FLAGS.ignore_difficult_instances)
    writer.write(tf_example.SerializeToString())

  writer.close()
Exemplo n.º 5
0
def main(_):
    if FLAGS.set not in SETS:
        raise ValueError('set must be in : {}'.format(SETS))

    writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
    #print('hj')
    #raw_input()
    xmlDir = FLAGS.xml_dir
    logging.info('Reading from %s .', xmlDir)

    if FLAGS.datasetFlag == 'custom':
        xml_path_ = get_all_path_iterative(xmlDir)
        print('process custom data...')
#    raw_input()
    elif FLAGS.datasetFlag == 'coco':
        xml_path_ = []
        xmlList = os.listdir(xmlDir)
        for xmlInstance in xmlList:
            print(xmlInstance)
            xmlPath = os.path.join(xmlDir, xmlInstance)
            xml_path_.append(xmlPath)
    else:
        print('dataset flag error...')


#    raw_input()

    print('find xml num: %d' % len(xml_path_))
    #  raw_input()
    xmlId = 0
    for xmlPath in xml_path_:
        #print(xmlId)
        if xmlId % 1000 == 0:
            logging.info('On xml %d of %d', xmlId, len(xml_path_))
            print('On xml %d of %d', xmlId, len(xml_path_))
        #print('xmlPath %s'%xmlPath)
        with tf.gfile.GFile(xmlPath, 'r') as fid:
            xmlStr = fid.read()
        xmlTree = etree.fromstring(xmlStr)
        xmlData = dataset_util.recursive_parse_xml_to_dict(
            xmlTree)['annotation']

        #if not xmlData['object']:
        if 'object' not in xmlData:
            print('skip, there is no object in the xml...... %s' % xmlPath)
            #raw_input()
            continue
        dict_to_tf_example(writer, xmlData)
        #if(_all_num > 30100):
        #  break
        xmlId += 1

    print("all xml num: %d" % xmlId)
    print('all good person num: %d' % _all_num)
    writer.close()
Exemplo n.º 6
0
def main(_):
    if FLAGS.set not in SETS:
        raise ValueError('set must be in : {}'.format(SETS))

    data_dir = FLAGS.data_dir

    writer = tf.python_io.TFRecordWriter(FLAGS.output_path)

    all_boxes = []
    all_small_boxes_count = 0

    logging.info('Reading from Ferrari dataset.')
    examples_path = os.path.join(data_dir, 'training_split_files',
                                 FLAGS.set + '.txt')
    annotations_dir = os.path.join(data_dir, 'SSD_Training_Data',
                                   FLAGS.annotations_dir)
    examples_list = dataset_util.read_examples_list(examples_path)
    for idx, example in enumerate(examples_list):
        if idx % 100 == 0:
            logging.info('On image %d of %d', idx, len(examples_list))
        path = os.path.join(annotations_dir, example + '.xml')
        try:
            path = path.replace('.mp4', '_mp4')
            with tf.gfile.GFile(path, 'rb') as fid:
                xml_str = fid.read()
        except:
            import ipdb
            ipdb.set_trace()
        try:
            xml = etree.fromstring(xml_str)
        except:
            import ipdb
            ipdb.set_trace()
        data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']

        tf_example, boxes, small_boxes_count = dict_to_tf_example(
            data, FLAGS.data_dir, FLAGS.ignore_difficult_instances)
        writer.write(tf_example.SerializeToString())
        all_boxes += boxes
        all_small_boxes_count += small_boxes_count

    writer.close()

    import ipdb
    ipdb.set_trace()

    with open('{}_all_boxes.pkl'.format(FLAGS.set), 'wb') as f:
        pickle.dump(all_boxes, f)
Exemplo n.º 7
0
def proccess(set_path, output_path):
    writer = tf.python_io.TFRecordWriter(output_path)
    label_map_dict = get_label_map_dict_from_json(label_map_path)

    examples_path = set_path
    examples_list = dataset_util.read_examples_list(examples_path)
    for idx, example in enumerate(examples_list):
        if idx % 100 == 0:
            logging.info('On image %d of %d', idx, len(examples_list))
        path = os.path.join(annotations_dir, example + '.xml')
        with tf.gfile.GFile(path, 'r') as fid:
            xml = ET.parse(fid).getroot()
        data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
        tf_example = dict_to_tf_example(path, data, images_dir, label_map_dict,
                                        FLAGS.ignore_difficult_instances)
        writer.write(tf_example.SerializeToString())

    writer.close()
Exemplo n.º 8
0
def main(_):
    if FLAGS.year not in YEARS:
        raise ValueError('year must be in : {}'.format(YEARS))

    data_dir = FLAGS.data_dir
    years = ['VOC2007', 'VOC2012']
    if FLAGS.year != 'merged':
        years = [FLAGS.year]
    output_dir = FLAGS.output_dir
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    for year in years:
        for split in SETS:
            filename = os.path.join(output_dir,
                                    '{}_{}.tfrecord'.format(year, split))
            if os.path.exists(filename):
                if FLAGS.overwrite or os.path.getsize(filename) == 0:
                    os.remove(filename)
                else:
                    print('skipped %s.' % filename)
                    continue
            writer = tf.python_io.TFRecordWriter(filename)
            print('Reading from PASCAL %s %s dataset.' % (year, split))
            examples_path = os.path.join(data_dir, year, 'ImageSets', 'Main',
                                         split + '.txt')
            annotations_dir = os.path.join(data_dir, year, 'Annotations')
            examples_list = dataset_util.read_examples_list(examples_path)
            for example in tqdm(examples_list):
                try:
                    path = os.path.join(annotations_dir, example + '.xml')
                    with tf.gfile.GFile(path, 'r') as fid:
                        xml_str = fid.read()
                    xml = etree.fromstring(xml_str)
                    data = dataset_util.recursive_parse_xml_to_dict(
                        xml)['annotation']
                    tf_example = dict_to_tf_example(
                        data, FLAGS.data_dir, FLAGS.ignore_difficult_instances)
                    writer.write(tf_example.SerializeToString())
                except Exception as e:
                    print(str(e))
            writer.close()
def main(_):
    data_dir = FLAGS.data_dir

    writer = tf.python_io.TFRecordWriter(FLAGS.output_path)

    label_map_dict = {
        # "person": 1,
        "face": 1
    }

    # load list image files and xml files
    images_dir = os.path.join(data_dir, FLAGS.images_dir)
    print(data_dir)
    print(images_dir)

    images_path = glob.glob(os.path.join(images_dir, '*.jpg'))

    xmls_dir = os.path.join(data_dir, FLAGS.annotations_dir)
    xmls_path = glob.glob(os.path.join(xmls_dir, '*.xml'))
    print(data_dir)
    print(xmls_dir)

    print(len(images_path))
    print(len(xmls_path))
    assert len(images_path) == len(xmls_path)

    for idx in range(len(images_path)):
        if idx % 100 == 0:
            logging.info('On image %d of %d', idx, len(images_path))
        xml_path = xmls_path[idx]
        with tf.gfile.GFile(xml_path, 'r') as fid:
            xml_str = fid.read()
        xml = etree.fromstring(xml_str)
        data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']

        image_path = images_path[idx]
        tf_example = dict_to_tf_example(data, image_path, label_map_dict,
                                        FLAGS.ignore_difficult_instances)
        writer.write(tf_example.SerializeToString())

    writer.close()
def main(year, data_dir, annotations_dir, set, ignore_difficult_instances):
  if set not in SETS:
    raise ValueError('set must be in : {}'.format(SETS))
  if year not in YEARS:
    raise ValueError('year must be in : {}'.format(YEARS))

  data_dir = data_dir
  years = ['VOC2007', 'VOC2012']
  if year != 'merged':
    years = [year]

  writer = tf.python_io.TFRecordWriter(output_path)

  label_map_dict = label_map_util.get_label_map_dict(label_map_path)

  for year in years:
    logging.info('Reading from PASCAL %s dataset.', year)
    examples_path = os.path.join(data_dir, year, 'ImageSets', 'Main', set + '.txt')
    # annotations_dir = os.path.join(data_dir, year, annotations_dir)
    annotations_dir = annotations_dir
    examples_list = dataset_util.read_examples_list(examples_path)
    for idx, example in enumerate(examples_list):
      if idx % 100 == 0:
        logging.info('On image %d of %d', idx, len(examples_list))
      path = os.path.join(annotations_dir, example + '.xml')
      with tf.gfile.GFile(path, 'r') as fid:
        xml_str = fid.read()
      # xml = etree.fromstring(xml_str)
      xml = etree.fromstring(xml_str.encode('utf-8'))
      data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']

      tf_example = dict_to_tf_example(year, data, data_dir,
                                      label_map_dict, example,
                                      ignore_difficult_instances)
      writer.write(tf_example.SerializeToString())

  writer.close()
Exemplo n.º 11
0
def main(_):
    if FLAGS.set not in SETS:
        raise ValueError('set must be in : {}'.format(SETS))
    # if FLAGS.year not in YEARS:
    # raise ValueError('year must be in : {}'.format(YEARS))

    data_dir = FLAGS.data_dir
    # years = ['VOC2007', 'VOC2012']
    # if FLAGS.year != 'merged':
    # years = [FLAGS.year]

    writer = tf.python_io.TFRecordWriter(FLAGS.output_path)

    label_map_dict = get_label_map_dict(FLAGS.label_map_path)

    for year in range(1):
        logging.info('Reading from PASCAL-like customized dataset.')
        examples_path = os.path.join(data_dir, 'ImageSets', 'Main',
                                     FLAGS.set + '.txt')
        annotations_dir = os.path.join(data_dir, FLAGS.annotations_dir)
        examples_list = dataset_util.read_examples_list(examples_path)
        for idx, example in enumerate(examples_list):
            if idx % 100 == 0:
                logging.info('On image %d of %d', idx, len(examples_list))
            path = os.path.join(annotations_dir, example + '.xml')
            with tf.gfile.GFile(path, 'r') as fid:
                xml_str = fid.read()
            xml = etree.fromstring(xml_str)
            data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']

            tf_example = dict_to_tf_example(data, FLAGS.data_dir,
                                            label_map_dict,
                                            FLAGS.ignore_difficult_instances)
            writer.write(tf_example.SerializeToString())

    writer.close()
Exemplo n.º 12
0
def create_tf_record(images_path,
                     output_path,
                     images_dir_name='images',
                     annotation_dir_name='xml'):

    label_map_dict = {
        'person': 1,
        'face': 2,
        'potted plant': 3,
        'tvmonitor': 4,
        'chair': 5,
        'microwave': 6,
        'refrigerator': 7,
        'book': 8,
        'clock': 9,
        'vase': 10,
        'dining table': 11,
        'bear': 12,
        'bed': 13,
        'stop sign': 14,
        'truck': 15,
        'car': 16,
        'teddy bear': 17,
        'skis': 18,
        'oven': 19,
        'sports ball': 20,
        'baseball glove': 21,
        'tennis racket': 22,
        'handbag': 23,
        'backpack': 24,
        'bird': 25,
        'boat': 26,
        'cell phone': 27,
        'train': 28,
        'sandwich': 29,
        'bowl': 30,
        'surfboard': 31,
        'laptop': 32,
        'mouse': 33,
        'keyboard': 34,
        'bus': 35,
        'cat': 36,
        'airplane': 37,
        'zebra': 38,
        'tie': 39,
        'traffic light': 40,
        'apple': 41,
        'baseball bat': 42,
        'knife': 43,
        'cake': 44,
        'wine glass': 45,
        'cup': 46,
        'spoon': 47,
        'banana': 48,
        'donut': 49,
        'sink': 50,
        'toilet': 51,
        'broccoli': 52,
        'skateboard': 53,
        'fork': 54,
        'carrot': 55,
        'couch': 56,
        'remote': 57,
        'scissors': 58,
        'bicycle': 59,
        'sheep': 60,
        'bench': 61,
        'bottle': 62,
        'orange': 63,
        'elephant': 64,
        'motorcycle': 65,
        'horse': 66,
        'hot dog': 67,
        'frisbee': 68,
        'umbrella': 69,
        'dog': 70,
        'kite': 71,
        'pizza': 72,
        'fire hydrant': 73,
        'suitcase': 74,
        'cow': 75,
        'giraffe': 76,
        'snowboard': 77,
        'parking meter': 78,
        'toothbrush': 79,
        'toaster': 80,
        'hair drier': 81,
        'pottedplant': 82,
        'sofa': 83,
        'diningtable': 84,
        'motorbike': 85,
        'aeroplane': 86
    }

    logging.info('Creating {}'.format(output_path))

    writer = tf.python_io.TFRecordWriter(output_path)

    for idx in range(len(images_path)):
        if idx % 100 == 0:
            logging.info('process id: %d', os.getpid())
            logging.info('On image %d of %d', idx, len(images_path))
        # xml_path = xmls_path[idx]
        image_path = images_path[idx]
        xml_path = image_path.replace('/{}/'.format(images_dir_name),
                                      '/{}/'.format(annotation_dir_name))
        xml_path = xml_path.replace('.jpg', '.xml')

        if os.path.exists(xml_path):
            with tf.gfile.GFile(xml_path, 'r') as fid:
                xml_str = fid.read()
            xml = etree.fromstring(xml_str)
            data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']

            tf_example = dict_to_tf_example(data, image_path, label_map_dict)
            writer.write(tf_example.SerializeToString())
        else:
            continue
            tf_example = background_tf_example(image_path)
            writer.write(tf_example.SerializeToString())

    writer.close()
def _process_image_files(thread_index, ranges, name, exports, num_shards):
    """Processes and saves a subset of exports as TFRecord files in one thread.
    Args:
    thread_index: Integer thread identifier within [0, len(ranges)].
    ranges: A list of pairs of integers specifying the ranges of the dataset to
      process in parallel.
    name: Unique identifier specifying the dataset.
    exports: List of dictionaries corresponding to elements in dataset.
    num_shards: Integer number of shards for the output files.
    """
    # Each thread produces N shards where N = num_shards / num_threads. For
    # instance, if num_shards = 128, and num_threads = 2, then the first thread
    # would produce shards [0, 64).
    num_threads = len(ranges)
    assert not num_shards % num_threads
    num_shards_per_batch = int(num_shards / num_threads)

    shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],
                               num_shards_per_batch + 1).astype(int)
    num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]

    counter = 0
    for s in range(num_shards_per_batch):
        # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
        shard = thread_index * num_shards_per_batch + s
        output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
        output_file = os.path.join(FLAGS.output_root, 'TFRecords', output_filename)
        writer = tf.python_io.TFRecordWriter(output_file)

        shard_counter = 0
        images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
        for i in images_in_shard:
            path = os.path.join(FLAGS.output_root, 'Annotations', exports[i] + '.xml')

            with tf.gfile.GFile(path, 'r') as fid:
                xml_str = fid.read()

            xml = etree.fromstring(xml_str)

            data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']

            example = dict_to_tf_example(data, FLAGS.output_root, class_index_map)

            if example is not None:
                writer.write(example.SerializeToString())
                shard_counter += 1
                counter += 1

            if not counter % 1000:
                print("%s [thread %d]: Processed %d of %d items in thread batch." %
                      (datetime.now(), thread_index, counter, num_images_in_thread))
                sys.stdout.flush()

        writer.close()
        print("%s [thread %d]: Wrote %d images with annotations to %s" %
              (datetime.now(), thread_index, shard_counter, output_file))
        sys.stdout.flush()
        shard_counter = 0
        print("%s [thread %d]: Wrote %d images with annotations to %d shards." %
              (datetime.now(), thread_index, counter, num_shards_per_batch))
        sys.stdout.flush()
Exemplo n.º 14
0
label_dir = '/dataset/CES_2018/SSD_Training_Data/All_SSD/All_Labels/'
img_dir = '/dataset/CES_2018/SSD_Training_Data/All_SSD/All_Images/'

label_files = os.listdir(label_dir)

for lf in label_files:
    if 'TURN' in lf:
        # image_f = os.path.join(img_dir, lf.split('.xml')[0] + '.jpg')
        label_f = os.path.join(label_dir, lf)

        with tf.gfile.GFile(label_f, 'rb') as fid:
            xml_str = fid.read()

        xml = etree.fromstring(xml_str)
        data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']

        # im = imread(image_f)

        width = int(data['size']['width'])
        height = int(data['size']['height'])

        if width == 3800 and height == 2100:
            with open(label_f, 'r') as f:
                xml = f.readlines()

            xml = xml[0].replace('<width>3800</width>', '<width>3840</width>')
            xml = xml.replace('<height>2100</height>', '<height>2160</height>')

            with open(label_f, 'w') as f:
                f.write(xml)
Exemplo n.º 15
0
    def createTFExample(self):
        """Convert XML derived dict to tf.Example proto.
        Notice that this function normalizes the bounding box coordinates provided
        by the raw data.
        Args: None
        Returns:
            example: The converted tf.Example.
        """
        with tf.io.gfile.GFile(self.xml, 'r') as fid:
            xml_str = fid.read()
        xml = etree.fromstring(xml_str)
        data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
        # the image might be processed in a different location
        # so overwrite the path to the input image path for consistency
        data['path'] = self.jpg if self.crop == '' else self.__cropImage(data)

        print(f"Processing image {data['path']}")

        width = int(data['size']['width'])
        height = int(data['size']['height'])
        filename = data['filename'].encode('utf8')
        with tf.io.gfile.GFile(data['path'], 'rb') as fid:
            encoded_image_data = fid.read()
        image_format = 'jpeg'.encode('utf8')

        # List of normalized left x coordinates in bounding box (1 per box)
        xmins = []
        # List of normalized right x coordinates in bounding box (1 per box)
        xmaxs = []
        # List of normalized top y coordinates in bounding box (1 per box)
        ymins = []
        # List of normalized bottom y coordinates in bounding box (1 per box)
        ymaxs = []
        # List of string class name of bounding box (1 per box)
        classes_text = []
        classes_id = []  # List of integer class id of bounding box (1 per box)

        image = util.loadImage(data['path'])

        for obj in data['object']:
            if obj['name'] not in classes or not self.__isValidBox(
                    obj, width, height):
                print('Unexpected object: ' + str(obj) + ' in ' + data['path'])
                continue
            xmins.append(float(obj['bndbox']['xmin']) / width)
            ymins.append(float(obj['bndbox']['ymin']) / height)
            xmaxs.append(float(obj['bndbox']['xmax']) / width)
            ymaxs.append(float(obj['bndbox']['ymax']) / height)
            classes_text.append(obj['name'].encode('utf8'))
            classes_id.append(getClassID(obj['name']))
            util.drawBox(image, self.__encodeBox(obj['bndbox']))

        util.saveImage(image,
                       str(data['path']).replace(".jpg", "-with-boxes.jpg"))

        tf_example = tf.train.Example(features=tf.train.Features(
            feature={
                'image/height':
                dataset_util.int64_feature(height),
                'image/width':
                dataset_util.int64_feature(width),
                'image/filename':
                dataset_util.bytes_feature(filename),
                'image/source_id':
                dataset_util.bytes_feature(filename),
                'image/encoded':
                dataset_util.bytes_feature(encoded_image_data),
                'image/format':
                dataset_util.bytes_feature(image_format),
                'image/object/bbox/xmin':
                dataset_util.float_list_feature(xmins),
                'image/object/bbox/ymin':
                dataset_util.float_list_feature(ymins),
                'image/object/bbox/xmax':
                dataset_util.float_list_feature(xmaxs),
                'image/object/bbox/ymax':
                dataset_util.float_list_feature(ymaxs),
                'image/object/class/text':
                dataset_util.bytes_list_feature(classes_text),
                'image/object/class/label':
                dataset_util.int64_list_feature(classes_id),
            }))
        return tf_example