예제 #1
0
def test_camvid():
    batch_size = 3
    image_size = [256, 512]
    dataset = Camvid(batch_size=batch_size,
                     pre_processor=ResizeWithMask(image_size))
    dataset = DatasetIterator(dataset)

    val_dataset = Camvid(subset="validation",
                         batch_size=batch_size,
                         pre_processor=ResizeWithMask(image_size))
    val_dataset = DatasetIterator(val_dataset)

    assert dataset.num_classes == 11
    assert dataset.num_per_epoch == 367
    assert val_dataset.num_per_epoch == 101

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == image_size[0]
        assert labels.shape[2] == image_size[1]
예제 #2
0
def test_open_images_v4_object_detection():
    batch_size = 1
    image_size = [256, 256]
    dataset = OpenImagesV4BoundingBox(batch_size=batch_size,
                                      pre_processor=ResizeWithGtBoxes(image_size))
    dataset = DatasetIterator(dataset)

    num_max_boxes = dataset.num_max_boxes
    assert dataset.num_max_boxes == OpenImagesV4BoundingBox.count_max_boxes()

    for _ in range(5):
        images, labels = dataset.feed()

        # _show_images_with_boxes(images, labels)

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
예제 #3
0
def test_hue():
    batch_size = 3
    image_size = [256, 512]
    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=Hue((-10, 10)),
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
예제 #4
0
def test_filp_top_bottom():
    batch_size = 3
    image_size = [256, 512]
    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=FlipTopBottom(),
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
예제 #5
0
def test_ytfaces_facial_landmarks_detection():

    batch_size = 1
    image_size = [256, 320]
    stride = 2

    pre_processor = Sequence([
        ResizeWithJoints(image_size=image_size),
        JointsToGaussianHeatmap(image_size=image_size,
                                num_joints=68,
                                stride=stride)
    ])

    dataset = YoutubeFacialLandmarks(subset="train",
                                     batch_size=batch_size,
                                     pre_processor=pre_processor)
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == image_size[0] // stride
        assert labels.shape[2] == image_size[1] // stride
        assert labels.shape[3] == 68

    dataset = YoutubeFacialLandmarks(subset="validation",
                                     batch_size=batch_size,
                                     pre_processor=pre_processor)
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == image_size[0] // stride
        assert labels.shape[2] == image_size[1] // stride
        assert labels.shape[3] == 68
예제 #6
0
def test_mscoco_object_detection_person():
    batch_size = 3
    image_size = [256, 512]

    num_max_boxes = 14

    num_train = 38699
    num_val = 18513

    dataset = MscocoObjectDetectionPerson(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size))
    dataset = DatasetIterator(dataset)

    assert MscocoObjectDetectionPerson.count_max_boxes() == num_max_boxes
    assert dataset.num_max_boxes == num_max_boxes
    assert dataset.num_per_epoch == num_train
    val_dataset = MscocoObjectDetectionPerson(
        subset="validation",
        batch_size=batch_size,
        pre_processor=ResizeWithGtBoxes(image_size))
    val_dataset = DatasetIterator(val_dataset)

    num_max_boxes = MscocoObjectDetectionPerson.count_max_boxes()
    assert val_dataset.num_per_epoch == num_val

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(STEP_SIZE):
        images, labels = val_dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
예제 #7
0
def test_camvid_custom():
    batch_size = 1
    train_dataset = DummyCamvidCustom(subset="train", batch_size=batch_size)
    assert train_dataset.ignore_class_idx == 3
    train_dataset = DatasetIterator(train_dataset)
    test_dataset = DummyCamvidCustom(subset="validation",
                                     batch_size=batch_size)
    test_dataset = DatasetIterator(test_dataset)

    assert train_dataset.num_classes == 10
    colors = train_dataset.label_colors
    assert len(colors) == 10
    _test_camvid_basic(train_dataset, test_dataset)
예제 #8
0
def test_widerface():
    batch_size = 3
    image_size = [160, 160]

    num_max_boxes = 3

    num_train = 7250
    num_val = 1758

    dataset = WiderFace(batch_size=batch_size,
                        max_boxes=num_max_boxes,
                        pre_processor=ResizeWithGtBoxes(image_size))
    dataset = DatasetIterator(dataset)
    assert dataset.num_max_boxes == num_max_boxes
    assert dataset.num_per_epoch == num_train

    val_dataset = WiderFace(subset="validation",
                            batch_size=batch_size,
                            max_boxes=num_max_boxes,
                            pre_processor=ResizeWithGtBoxes(image_size))
    val_dataset = DatasetIterator(val_dataset)
    assert val_dataset.num_per_epoch == num_val

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(STEP_SIZE):
        images, labels = val_dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
예제 #9
0
def test_ssd_random_crop():
    batch_size = 3
    image_size = [256, 512]
    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=SSDRandomCrop(),
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
        assert np.all(labels[:, :, 2] <= 512)
        assert np.all(labels[:, :, 3] <= 256)
예제 #10
0
def test_custom_has_validation_open_images_v4_object_detection():
    batch_size = 8
    image_size = [196, 128]
    train_dataset = DummyHasValidation(subset="train", batch_size=batch_size,
                                       pre_processor=ResizeWithGtBoxes(image_size))
    train_dataset = DatasetIterator(train_dataset)
    validation_dataset = DummyHasValidation(subset="validation", batch_size=batch_size,
                                            pre_processor=ResizeWithGtBoxes(image_size))
    validation_dataset = DatasetIterator(validation_dataset)

    num_max_boxes = validation_dataset.num_max_boxes
    assert validation_dataset.num_max_boxes == DummyHasValidation.count_max_boxes()

    assert train_dataset.num_per_epoch == 10
    assert validation_dataset.num_per_epoch == 16
    assert len(train_dataset.classes) == 44
    assert len(validation_dataset.classes) == 44

    for _ in range(3):
        images, labels = train_dataset.feed()

        # _show_images_with_boxes(images, labels)

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(3):
        images, labels = validation_dataset.feed()

        # _show_images_with_boxes(images, labels)

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
예제 #11
0
def test_cityscapes():
    batch_size = 1
    train_dataset = Cityscapes(subset="train", batch_size=batch_size)
    train_dataset = DatasetIterator(train_dataset)

    test_dataset = Cityscapes(subset="validation", batch_size=batch_size)
    test_dataset = DatasetIterator(test_dataset)

    assert train_dataset.num_classes == 34
    colors = train_dataset.label_colors
    assert len(colors) == 34

    train_image_files, train_label_files = train_dataset.feed()
    assert train_image_files.shape[0] == batch_size
    assert train_label_files.shape[0] == batch_size
예제 #12
0
def test_camvid_custom_without_test_dataset():
    batch_size = 5
    validation_size = 0.2

    train_dataset = DummyCamvidCustomWithoutTestDataset(
        subset="train", batch_size=batch_size, validation_size=validation_size)
    train_dataset = DatasetIterator(train_dataset)
    test_dataset = DummyCamvidCustomWithoutTestDataset(
        subset="validation",
        batch_size=batch_size,
        validation_size=validation_size)
    test_dataset = DatasetIterator(test_dataset)

    assert train_dataset.num_per_epoch == 5 * (1 - validation_size)
    assert test_dataset.num_per_epoch == 5 * (validation_size)
예제 #13
0
def test_mscoco_2017_single_pose_estimation():
    batch_size = 3
    image_size = [160, 160]

    num_train = 149813
    num_val = 6352

    num_joints = 17
    num_dimensions = 2

    dataset = MscocoSinglePersonKeypoints(
        batch_size=batch_size, pre_processor=ResizeWithJoints(image_size))
    dataset = DatasetIterator(dataset)

    assert dataset.num_per_epoch == num_train
    val_dataset = MscocoObjectDetectionPerson(
        subset="validation",
        batch_size=batch_size,
        pre_processor=ResizeWithJoints(image_size))
    val_dataset = DatasetIterator(val_dataset)

    assert val_dataset.num_per_epoch == num_val

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_joints
        assert labels.shape[2] == num_dimensions + 1

    for _ in range(STEP_SIZE):
        images, labels = val_dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_joints
        assert labels.shape[2] == num_dimensions + 1
예제 #14
0
def setup_dataset(config, subset, rank):
    DatasetClass = config.DATASET_CLASS
    dataset_kwargs = dict(
        (key.lower(), val) for key, val in config.DATASET.items())
    dataset = DatasetClass(subset=subset, **dataset_kwargs)
    enable_prefetch = dataset_kwargs.pop("enable_prefetch", False)
    return DatasetIterator(dataset, seed=rank, enable_prefetch=enable_prefetch)
예제 #15
0
def test_cityscapes():
    batch_size = 3
    image_size = [256, 512]
    dataset = Cityscapes(batch_size=batch_size,
                         pre_processor=Resize(image_size))
    dataset = DatasetIterator(dataset)

    val_dataset = Cityscapes(subset="validation",
                             batch_size=batch_size,
                             pre_precessor=Resize(image_size))

    assert dataset.num_classes == 34
    assert dataset.num_per_epoch == 2975
    assert val_dataset.num_per_epoch == 500

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == image_size[0]
        assert labels.shape[2] == image_size[1]
예제 #16
0
def test_dataset_iterator_batch_order():
    """Assert that data given by iterator is same whether enabele_prefetch ture or false."""

    batch_size = 8
    dataset = Dummy(subset="train", batch_size=batch_size)
    dataset_iterator = DatasetIterator(dataset, seed=10, enable_prefetch=False)
    prefetch_dataset_iterator = DatasetIterator(dataset,
                                                seed=10,
                                                enable_prefetch=True)

    for i in range(0, 30):
        images, labels = next(dataset_iterator)
        prefetch_images, prefetch_labels = next(prefetch_dataset_iterator)

        assert np.all(images == prefetch_images)
        assert np.all(labels == prefetch_labels)
예제 #17
0
def test_camvid():
    batch_size = 1
    train_dataset = DummyCamvid(subset="train", batch_size=batch_size)
    assert train_dataset.ignore_class_idx is None
    train_dataset = DatasetIterator(train_dataset)
    test_dataset = DummyCamvid(subset="validation", batch_size=batch_size)
    test_dataset = DatasetIterator(test_dataset)

    assert train_dataset.num_classes == 11
    colors = train_dataset.label_colors
    assert len(colors) == 12

    train_image_files, train_label_files = train_dataset.feed()
    assert train_image_files.shape[0] == 1
    assert train_label_files.shape[0] == 1

    _test_camvid_basic(train_dataset, test_dataset)
예제 #18
0
def test_sequence():
    batch_size = 3
    image_size = [256, 512]
    augmentor = Sequence([
        FlipLeftRight(),
        FlipTopBottom(),
        SSDRandomCrop(),
    ])

    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=augmentor,
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
예제 #19
0
def test_dataset_iterator_batch_size():
    batch_size = 8
    dataset = Dummy(subset="train", batch_size=batch_size)
    dataset_iterator = DatasetIterator(dataset)

    for i in range(0, 10):
        images, labels = next(dataset_iterator)
        assert images.shape[0] == batch_size
        assert labels.shape[0] == batch_size

    batch_size = 32
    dataset = Dummy(subset="train", batch_size=batch_size)
    dataset_iterator = DatasetIterator(dataset)

    for i in range(0, 10):
        images, labels = next(dataset_iterator)
        assert images.shape[0] == batch_size
        assert labels.shape[0] == batch_size
예제 #20
0
def test_can_iterate(set_test_environment, subset):
    batch_size = 1
    image_size = (100, 100)

    dataset = Div2k(subset, batch_size=batch_size)
    iterator = DatasetIterator(dataset)

    for _ in range(len(dataset)):
        images, labels = iterator.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels[0] is None
예제 #21
0
def test_camvid_custom_without_ignore():
    batch_size = 1
    train_dataset = DummyCamvidCustomWithoutIgnoreClass(subset="train",
                                                        batch_size=batch_size)
    assert train_dataset.ignore_class_idx is None

    train_dataset = DatasetIterator(train_dataset)
    assert train_dataset.num_classes == 12
    colors = train_dataset.label_colors
    assert len(colors) == 12
예제 #22
0
def test_open_images_v4_classification():
    batch_size = 1
    image_size = [256, 256]
    dataset = OpenImagesV4Classification(batch_size=batch_size,
                                         pre_processor=Resize(image_size))
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == dataset.num_classes
예제 #23
0
def setup_dataset(config, subset, rank):
    """helper function from lmnet/train.py to setup the data iterator"""
    dataset_class = config.DATASET_CLASS
    dataset_kwargs = dict(
        (key.lower(), val) for key, val in config.DATASET.items())
    dataset = dataset_class(subset=subset, **dataset_kwargs)
    # TODO (Neil): Enable both train and validation
    # For some reasons processes are not terminated cleanly, enable prefetch ONLY for the train dataset.
    enable_prefetch = dataset_kwargs.pop("enable_prefetch",
                                         False) if subset == 'train' else False
    return DatasetIterator(dataset, seed=rank, enable_prefetch=enable_prefetch)
예제 #24
0
def test_caltech101():
    batch_size = 3
    image_size = [256, 512]
    dataset = Caltech101(batch_size=batch_size,
                         pre_processor=Resize(image_size))
    dataset = DatasetIterator(dataset)

    assert dataset.num_classes == 101

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == dataset.num_classes
예제 #25
0
def test_contrast():
    batch_size = 3
    image_size = [256, 512]
    dataset = LmThingsOnATable(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=Contrast((0.0, 2.0)),
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
예제 #26
0
def test_filp_left_right():
    batch_size = 3
    image_size = [256, 512]
    dataset = LmThingsOnATable(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=FlipLeftRight(),
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
예제 #27
0
def test_custom_open_images_v4_object_detection():
    validation_size = 0.2
    batch_size = 1
    image_size = [256, 128]
    train_dataset = Dummy(batch_size=batch_size,
                          validation_size=validation_size,
                          pre_processor=ResizeWithGtBoxes(image_size))
    train_dataset = DatasetIterator(train_dataset)

    validation_dataset = Dummy(batch_size=batch_size,
                               subset="validation",
                               validation_size=validation_size,
                               pre_processor=ResizeWithGtBoxes(image_size))
    validation_dataset = DatasetIterator(validation_dataset)

    num_max_boxes = train_dataset.num_max_boxes
    assert train_dataset.num_max_boxes == Dummy.count_max_boxes()

    assert train_dataset.num_per_epoch == 10 * (1 - validation_size)
    assert validation_dataset.num_per_epoch == 10 * (validation_size)

    for _ in range(13):
        images, labels = train_dataset.feed()

        # _show_images_with_boxes(images, labels)

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
예제 #28
0
파일: evaluate.py 프로젝트: ki-lm/blueoil
def setup_dataset(config, subset, seed):
    DatasetClass = config.DATASET_CLASS
    dataset_kwargs = {key.lower(): val for key, val in config.DATASET.items()}

    # If there is a settings for TFDS, TFDS dataset class will be used.
    tfds_kwargs = dataset_kwargs.pop("tfds_kwargs", {})
    if tfds_kwargs:
        if issubclass(DatasetClass, ObjectDetectionBase):
            DatasetClass = TFDSObjectDetection
        else:
            DatasetClass = TFDSClassification

    dataset = DatasetClass(subset=subset, **dataset_kwargs, **tfds_kwargs)
    enable_prefetch = dataset_kwargs.pop("enable_prefetch", False)
    return DatasetIterator(dataset, seed=seed, enable_prefetch=enable_prefetch)
예제 #29
0
def test_pascalvoc_2007_2012_no_skip_difficult():
    batch_size = 3
    image_size = [256, 512]
    dataset = Pascalvoc20072012(
        batch_size=batch_size,
        pre_processor=ResizeWithGtBoxes(image_size),
        skip_difficult=False,
    )
    dataset = DatasetIterator(dataset)
    num_max_boxes = 56

    num_train_val_2007 = 2501 + 2510
    num_train_val_2012 = 5717 + 5823
    num_test_2007 = 4952

    assert dataset.num_max_boxes == num_max_boxes
    assert Pascalvoc20072012.count_max_boxes(
        skip_difficult=False) == num_max_boxes
    assert dataset.num_per_epoch == num_train_val_2007 + num_train_val_2012

    val_dataset = Pascalvoc20072012(
        subset="validation",
        batch_size=batch_size,
        pre_processor=ResizeWithGtBoxes(image_size),
        skip_difficult=False)
    val_dataset = DatasetIterator(val_dataset)
    assert val_dataset.num_per_epoch == num_test_2007

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(STEP_SIZE):
        images, labels = val_dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
예제 #30
0
def test_mscoco_segmentation():
    batch_size = 3
    image_size = [256, 512]
    dataset = MscocoSegmentation(batch_size=batch_size,
                                 pre_processor=ResizeWithMask(image_size))
    dataset = DatasetIterator(dataset)

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == image_size[0]
        assert labels.shape[2] == image_size[1]