예제 #1
0
    def __init__(self, data_dir=None):

        dataset_builder = tfds.builder("cars196:2.*.*", data_dir=data_dir)
        dataset_builder.download_and_prepare()

        # Defines dataset specific train/val/trainval/test splits.
        tfds_splits = {}
        tfds_splits["train"] = "train[:{}%]".format(TRAIN_SPLIT_PERCENT)
        tfds_splits["val"] = "train[{}%:]".format(TRAIN_SPLIT_PERCENT)
        tfds_splits["trainval"] = "train"
        tfds_splits["test"] = "test"

        # Creates a dict with example counts for each split.
        num_samples_splits = {}
        trainval_count = dataset_builder.info.splits["train"].num_examples
        test_count = dataset_builder.info.splits["test"].num_examples
        num_samples_splits["train"] = (TRAIN_SPLIT_PERCENT *
                                       trainval_count) // 100
        num_samples_splits[
            "val"] = trainval_count - num_samples_splits["train"]
        num_samples_splits["trainval"] = trainval_count
        num_samples_splits["test"] = test_count
        super(CarsData, self).__init__(
            dataset_builder=dataset_builder,
            tfds_splits=tfds_splits,
            num_samples_splits=num_samples_splits,
            num_preprocessing_threads=400,
            shuffle_buffer_size=10000,
            # Note: Export only image and label tensors with their original types.
            base_preprocess_fn=base.make_get_tensors_fn(["image", "label"]),
            num_classes=dataset_builder.info.features["label"].num_classes)
예제 #2
0
    def __init__(self, config="tfds", data_dir=None):

        if config == "tfds":
            dataset_builder = tfds.builder("sun397/tfds:4.*.*",
                                           data_dir=data_dir)
            dataset_builder.download_and_prepare()

            tfds_splits = {
                "train": "train",
                "val": "validation",
                "test": "test",
                "trainval": "train+validation",
            }
            # Creates a dict with example counts.
            num_samples_splits = {
                "test": dataset_builder.info.splits["test"].num_examples,
                "train": dataset_builder.info.splits["train"].num_examples,
                "val": dataset_builder.info.splits["validation"].num_examples,
            }
            num_samples_splits["trainval"] = (num_samples_splits["train"] +
                                              num_samples_splits["val"])
        else:

            raise ValueError("No supported config %r for Sun397Data." % config)

        super(Sun397Data, self).__init__(
            dataset_builder=dataset_builder,
            tfds_splits=tfds_splits,
            num_samples_splits=num_samples_splits,
            num_preprocessing_threads=400,
            shuffle_buffer_size=10000,
            # Note: Export only image and label tensors with their original types.
            base_preprocess_fn=base.make_get_tensors_fn(["image", "label"]),
            num_classes=dataset_builder.info.features["label"].num_classes)
예제 #3
0
  def __init__(self, num_classes=10, data_dir=None):
    dataset_builder = tfds.builder("caltech101:3.*.*", data_dir=data_dir)
    dataset_builder.download_and_prepare()

    # Defines dataset specific train/val/trainval/test splits.
    tfds_splits = {}
    tfds_splits["train"] = "train[:{}%]".format(_TRAIN_SPLIT_PERCENT)
    tfds_splits["val"] = "train[{}%:]".format(_TRAIN_SPLIT_PERCENT)
    tfds_splits["trainval"] = "train"
    tfds_splits["test"] = "test"

    # Creates a dict with example counts for each split.
    trainval_count = dataset_builder.info.splits[tfds.Split.TRAIN].num_examples
    train_count = (_TRAIN_SPLIT_PERCENT * trainval_count) // 100
    test_count = dataset_builder.info.splits[tfds.Split.TEST].num_examples
    num_samples_splits = dict(
        train=train_count,
        val=trainval_count - train_count,
        trainval=trainval_count,
        test=test_count)

    super(Caltech101, self).__init__(
        dataset_builder=dataset_builder,
        tfds_splits=tfds_splits,
        num_samples_splits=num_samples_splits,
        num_preprocessing_threads=400,
        shuffle_buffer_size=3000,
        base_preprocess_fn=base.make_get_tensors_fn(("image", "label")),
        num_classes=dataset_builder.info.features["label"].num_classes)
예제 #4
0
    def __init__(self, features=("image", "label")):

        dataset_builder = tfds.builder("imagenet2012:5.*.*")

        # Defines dataset specific train/val/trainval/test splits.
        # Note, that the test split for "imagenet2012" dataset is not available.
        # Thus, we use the val split as test. Moreover, we split the train split
        # into two parts: new train split and new val split.
        tfds_splits = {}
        tfds_splits["train"] = "train[:{}%]".format(TRAIN_SPLIT_PERCENT)
        tfds_splits["val"] = "train[{}%:]".format(TRAIN_SPLIT_PERCENT)
        tfds_splits["trainval"] = "train"
        tfds_splits["test"] = "validation"

        # Creates a dict with example counts.
        num_samples_splits = {}
        trainval_count = dataset_builder.info.splits["train"].num_examples
        test_count = dataset_builder.info.splits["validation"].num_examples
        num_samples_splits["train"] = (TRAIN_SPLIT_PERCENT *
                                       trainval_count) // 100
        num_samples_splits[
            "val"] = trainval_count - num_samples_splits["train"]
        num_samples_splits["trainval"] = trainval_count
        num_samples_splits["test"] = test_count

        super(ImageNetData, self).__init__(
            dataset_builder=dataset_builder,
            tfds_splits=tfds_splits,
            num_samples_splits=num_samples_splits,
            num_preprocessing_threads=400,
            shuffle_buffer_size=10000,
            # Note: Export only image and label tensors with their original types.
            base_preprocess_fn=base.make_get_tensors_fn(features),
            filter_fn=self._get_filter_fn(),
            num_classes=dataset_builder.info.features["label"].num_classes)
예제 #5
0
  def test_make_get_tensors_fn(self):
    input_dict = {'tens1': 1, 'tens2': 2, 'tens3': 3}
    # Normal case.
    fn = base.make_get_tensors_fn(output_tensors=['tens1', 'tens2'])
    self.assertTrue(callable(fn))
    self.assertEqual(fn(input_dict), {'tens1': 1, 'tens2': 2})

    # One output tensor is not specified in the input dict.
    fn = base.make_get_tensors_fn(output_tensors=['tens1', 'tens2', 'tens4'])
    self.assertTrue(callable(fn))
    with self.assertRaises(KeyError):
      fn(input_dict)

    # Empty output.
    fn = base.make_get_tensors_fn(output_tensors=())
    self.assertTrue(callable(fn))
    self.assertEqual(fn(input_dict), {})
    def __init__(self,
                 config="btgraham-300",
                 heavy_train_augmentation=False,
                 data_dir=None):
        """Initializer for Diabetic Retinopathy dataset.

    Args:
      config: Name of the TFDS config to use for this dataset.
      heavy_train_augmentation: If True, use heavy data augmentation on the
        training data. Recommended to achieve SOTA.
      data_dir: directory for downloading and storing the data.
    """
        config_and_version = config + ":3.*.*"
        dataset_builder = tfds.builder(
            "diabetic_retinopathy_detection/{}".format(config_and_version),
            data_dir=data_dir)
        self._config = config
        self._heavy_train_augmentation = heavy_train_augmentation

        dataset_builder.download_and_prepare()

        # Defines dataset specific train/val/trainval/test splits.
        tfds_splits = {
            "train": "train",
            "val": "validation",
            "trainval": "train+validation",
            "test": "test",
            "train800": "train[:800]",
            "val200": "validation[:200]",
            "train800val200": "train[:800]+validation[:200]",
        }

        # Creates a dict with example counts for each split.
        train_count = dataset_builder.info.splits["train"].num_examples
        val_count = dataset_builder.info.splits["validation"].num_examples
        test_count = dataset_builder.info.splits["test"].num_examples
        num_samples_splits = {
            "train": train_count,
            "val": val_count,
            "trainval": train_count + val_count,
            "test": test_count,
            "train800": 800,
            "val200": 200,
            "train800val200": 1000,
        }

        super(RetinopathyData, self).__init__(
            dataset_builder=dataset_builder,
            tfds_splits=tfds_splits,
            num_samples_splits=num_samples_splits,
            num_preprocessing_threads=400,
            shuffle_buffer_size=10000,
            # Note: Export only image and label tensors with their original types.
            base_preprocess_fn=base.make_get_tensors_fn(["image", "label"]),
            num_classes=dataset_builder.info.features["label"].num_classes)
예제 #7
0
  def __init__(self, data_dir=None, train_split_percent=None):

    dataset_builder = tfds.builder("oxford_iiit_pet:3.*.*", data_dir=data_dir)
    dataset_builder.download_and_prepare()
    train_split_percent = train_split_percent or TRAIN_SPLIT_PERCENT

    # Creates a dict with example counts for each split.
    trainval_count = dataset_builder.info.splits[tfds.Split.TRAIN].num_examples
    test_count = dataset_builder.info.splits[tfds.Split.TEST].num_examples
    num_samples_splits = {
        "train": (train_split_percent * trainval_count) // 100,
        "val": trainval_count - (train_split_percent * trainval_count) // 100,
        "trainval": trainval_count,
        "test": test_count,
        "train800": 800,
        "val200": 200,
        "train800val200": 1000,
    }

    # Defines dataset specific train/val/trainval/test splits.
    tfds_splits = {
        "train": "train[:{}]".format(num_samples_splits["train"]),
        "val": "train[{}:]".format(num_samples_splits["train"]),
        "trainval": tfds.Split.TRAIN,
        "test": tfds.Split.TEST,
        "train800": "train[:800]",
        "val200": "train[{}:{}]".format(
            num_samples_splits["train"], num_samples_splits["train"]+200),
        "train800val200": "train[:800]+train[{}:{}]".format(
            num_samples_splits["train"], num_samples_splits["train"]+200),
    }

    super(OxfordIIITPetData, self).__init__(
        dataset_builder=dataset_builder,
        tfds_splits=tfds_splits,
        num_samples_splits=num_samples_splits,
        num_preprocessing_threads=400,
        shuffle_buffer_size=10000,
        # Note: Export only image and label tensors with their original types.
        base_preprocess_fn=base.make_get_tensors_fn(["image", "label"]),
        num_classes=dataset_builder.info.features["label"].num_classes)
예제 #8
0
    def __init__(self, data_dir=None):

        dataset_builder = tfds.builder("dtd:3.*.*", data_dir=data_dir)
        dataset_builder.download_and_prepare()

        # Defines dataset specific train/val/trainval/test splits.
        tfds_splits = {
            "train": "train",
            "val": "validation",
            "trainval": "train+validation",
            "test": "test",
            "train800": "train[:800]",
            "val200": "validation[:200]",
            "train800val200": "train[:800]+validation[:200]",
        }

        # Creates a dict with example counts for each split.
        train_count = dataset_builder.info.splits["train"].num_examples
        val_count = dataset_builder.info.splits["validation"].num_examples
        test_count = dataset_builder.info.splits["test"].num_examples
        num_samples_splits = {
            "train": train_count,
            "val": val_count,
            "trainval": train_count + val_count,
            "test": test_count,
            "train800": 800,
            "val200": 200,
            "train800val200": 1000,
        }

        super(DTDData, self).__init__(
            dataset_builder=dataset_builder,
            tfds_splits=tfds_splits,
            num_samples_splits=num_samples_splits,
            num_preprocessing_threads=400,
            shuffle_buffer_size=10000,
            # Note: Export only image and label tensors with their original types.
            base_preprocess_fn=base.make_get_tensors_fn(["image", "label"]),
            num_classes=dataset_builder.info.features["label"].num_classes)
예제 #9
0
    def __init__(self,
                 num_classes=10,
                 data_dir=None,
                 train_split_percent=None):

        if num_classes == 10:
            dataset_builder = tfds.builder("cifar10:3.*.*", data_dir=data_dir)
        elif num_classes == 100:
            dataset_builder = tfds.builder("cifar100:3.*.*", data_dir=data_dir)
        else:
            raise ValueError(
                "Number of classes must be 10 or 100, got {}".format(
                    num_classes))

        dataset_builder.download_and_prepare()

        train_split_percent = train_split_percent or TRAIN_SPLIT_PERCENT

        # Creates a dict with example counts for each split.
        trainval_count = dataset_builder.info.splits["train"].num_examples
        test_count = dataset_builder.info.splits["test"].num_examples
        num_samples_splits = {
            "train": (train_split_percent * trainval_count) // 100,
            "val":
            trainval_count - (train_split_percent * trainval_count) // 100,
            "trainval": trainval_count,
            "test": test_count,
            "train800": 800,
            "val200": 200,
            "train800val200": 1000,
        }

        # Defines dataset specific train/val/trainval/test splits.
        tfds_splits = {
            "train":
            "train[:{}]".format(num_samples_splits["train"]),
            "val":
            "train[{}:]".format(num_samples_splits["train"]),
            "trainval":
            "train",
            "test":
            "test",
            "train800":
            "train[:800]",
            "val200":
            "train[{}:{}]".format(num_samples_splits["train"],
                                  num_samples_splits["train"] + 200),
            "train800val200":
            "train[:800]+train[{}:{}]".format(
                num_samples_splits["train"],
                num_samples_splits["train"] + 200),
        }

        super(CifarData, self).__init__(
            dataset_builder=dataset_builder,
            tfds_splits=tfds_splits,
            num_samples_splits=num_samples_splits,
            num_preprocessing_threads=400,
            shuffle_buffer_size=10000,
            # Note: Export only image and label tensors with their original types.
            base_preprocess_fn=base.make_get_tensors_fn(
                ["image", "label", "id"]),
            num_classes=dataset_builder.info.features["label"].num_classes)