Esempio n. 1
0
    def __init__(self,
                 batch_size,
                 input_shape,
                 json_path,
                 classes,
                 num_parallel_calls=2,
                 prefetch_size=2):
        self.batch_size = batch_size
        self.input_shape = input_shape
        self.json_path = json_path
        self.num_parallel_calls = num_parallel_calls
        self.prefetch_size = prefetch_size

        ObjectDetectorJson.init_cache(self.json_path,
                                      cache_type='NONE',
                                      classes=classes)

        dataset, self.dataset_size = ObjectDetectorJson.create_dataset(
            self.json_path, classes=classes)
        _, self.transform_param = MobileNetSSD.create_transform_parameters(
            *input_shape[:2])
        self.transformer = AnnotatedDataTransformer(self.transform_param,
                                                    is_training=False)

        print('Total evaluation steps: {}'.format(
            math.ceil(self.dataset_size / self.batch_size)))

        transform_fn = lambda value: ObjectDetectorJson.transform_fn(
            value, self.transformer)
        map_fn = lambda value: tf.py_func(transform_fn, [value],
                                          (tf.float32, tf.string))
        self.dataset = dataset.map(map_fn,
                                   num_parallel_calls=num_parallel_calls)
        self.dataset = self.dataset.batch(
            self.batch_size).prefetch(prefetch_size)
Esempio n. 2
0
    def input_fn(self):
        transform_fn = lambda value: ObjectDetectorJson.transform_fn(
            value, self.train_transformer, cache_type=self.cache_type)

        def transform_batch_fn(value):
            images = []
            annotations = []
            for val in value:
                img, annot = transform_fn(val)
                images.append(img)
                annotations.append(annot)
            return images, annotations

        map_fn_batch = lambda value: tf.py_func(transform_batch_fn, [value],
                                                (tf.float32, tf.string))

        dataset = self.train_dataset.apply(
            tf.contrib.data.shuffle_and_repeat(buffer_size=self.dataset_size))
        dataset = dataset.batch(self.batch_size).map(
            map_fn_batch, num_parallel_calls=self.num_parallel_calls)
        dataset = dataset.prefetch(self.prefetch_size)

        images, annotation = dataset.make_one_shot_iterator().get_next()

        images.set_shape([self.batch_size] + list(self.input_shape))
        return images, annotation
Esempio n. 3
0
  def sample_data(json_path, num_samples, input_shape, classes, seed=666):
    if num_samples == 0:
      return None

    data, _ = ObjectDetectorJson.json_iterator(json_path, classes)
    data = [x for x in data()]
    # data = ObjectDetectorJson.convert_coco_to_toolbox_format(COCO(json_path), classes)

    ObjectDetectorJson.init_cache(json_path, cache_type='NONE', classes=classes)

    rng = random.Random(seed)
    selected_items = rng.sample(range(len(data)), num_samples)

    _, transform_param = MobileNetSSD.create_transform_parameters(*input_shape[:2])
    transformer = AnnotatedDataTransformer(transform_param, is_training=False)

    transform_fn = lambda value: ObjectDetectorJson.transform_fn(value, transformer, add_original_image=True)
    return [transform_fn(data[i]) for i in selected_items]