def test_repeated_random_crop_returns_different_images():
    # OPEN AN IMAGE
    kitti_data_source: KittiDataSource = KittiDataSource(KITTI_BASE_DIR)
    train_data: List[Tuple[str, str]] = kitti_data_source.get_train_data()
    test_camera_image_path: str = train_data[0][0]
    test_camera_image: Image.Image = Image.open(test_camera_image_path)

    # CROP IT TWICE
    random_crop: RandomCrop = RandomCrop(target_size=(16, 16))
    cropped_1: List[Image.Image] = random_crop(test_camera_image)
    cropped_2: List[Image.Image] = random_crop(test_camera_image)

    cropped_1_np: np.ndarray = from_pil_to_np(cropped_1[0])
    cropped_2_np: np.ndarray = from_pil_to_np(cropped_2[0])

    assert not np.array_equal(cropped_1_np, cropped_2_np)
コード例 #2
0
def test_label_integrity():
    """
    Check that the resized version of the original labels can be reconstructed from the matrix which will be the
    neural network's input
    """

    kitti_data_source: KittiDataSource = KittiDataSource(KITTI_BASE_DIR)
    train_data_generator: DataGenerator = DataGenerator(
        data_sources=[kitti_data_source],
        phase='train',
        batch_size=4,
        transformation=Crop((256, 256)),
        target_size=(256, 256),
        active_labels=[0, 1],
        random_seed=42)

    original_image, original_labels, _ = train_data_generator.get_batch(0)[0]
    resized_original = Resize((256, 256))(original_labels)[0]
    resized_original_array = from_pil_to_np(resized_original)

    resized_split = split_label_image(resized_original_array,
                                      CityscapesLabels.ALL)
    resized_merged = merge_label_images(resized_split, CityscapesLabels.ALL)

    assert (resized_original_array == resized_merged).all()
def test_resize_and_crop():
    # DUMMY IMAGE - SQUARE IMAGE WITH WHITE BANDS
    dummy_image: np.ndarray = np.zeros((12, 12, 3), dtype=np.uint8)
    dummy_image[3:9, :, :] = (1, 1, 1)

    # EXPECTED - JUST THE WHITE IMAGE
    expected: np.ndarray = np.ones(shape=(200, 100, 3), dtype=np.uint8)

    actual: Image.Image = resize_and_crop(dummy_image, (200, 100))
    actual_array: np.ndarray = from_pil_to_np(actual).swapaxes(0, 1)

    assert (expected == actual_array).all()
def test_image_pad_resize():
    # WHITE WIDE IMAGE
    dummy_image: Image.Image = Image.new(mode='RGB',
                                         size=(200, 100),
                                         color=(1, 1, 1))

    # EXPECTED
    expected: np.ndarray = np.zeros((12, 12, 3), dtype=np.uint8)
    expected[3:9, :, :] = (1, 1, 1)

    actual: Image.Image = pad_and_resize(dummy_image, (12, 12))
    actual_array: np.ndarray = from_pil_to_np(actual)

    assert (expected == actual_array).all()
コード例 #5
0
    def __getitem__(self, index) -> Tuple[np.ndarray, Dict[str, np.ndarray], Dict[str, np.ndarray]]:
        # SIMPLE 4-DIMENSIONAL MATRIX, FIRST DIMENSION IS BATCHES
        batch_images: np.ndarray = np.zeros(((self.batch_size,) + self.target_size + (3,)))
        # THIS IS A DICTIONARY, ALL MATRICES ARE ZEROED, EXCEPT FOR THE ACTIVE TARGETS
        batch_masks: Dict[str, np.ndarray] = {}
        for data_source in self.data_sources:
            batch_size: Tuple[int, int, int, int] = ((self.batch_size,) + self.target_size + (len(self.classes),))
            batch_masks[data_source.get_name()] = np.zeros(batch_size)
        # THIS IS A DICTIONARY OF 1-D VECTORS
        batch_sample_weights: Dict[str, np.ndarray] = {}
        for data_source in self.data_sources:
            batch_sample_weights[data_source.get_name()] = np.zeros(shape=self.batch_size)

        # LOOP THROUGH ORIGINAL DATA
        original_batch = self.get_batch(index)
        for batch_index, instance in enumerate(original_batch):
            image, mask, ds_name = instance

            # TRANSFORM IMAGE AND MASK
            transformed = self.transformation([image, mask])
            transformed_image: Image.Image = transformed[0]
            transformed_mask: Image.Image = transformed[1]

            # STORE IMAGE
            image_array: np.ndarray = from_pil_to_np(transformed_image) / 255
            batch_images[batch_index] = image_array

            # STORE MASK
            mask_array: np.ndarray = from_pil_to_np(transformed_mask)
            prepared_mask: np.ndarray = split_label_image(mask_array, self.classes)
            batch_masks[ds_name][batch_index] = prepared_mask

            # STORE SAMPLE WEIGHTS
            batch_sample_weights[ds_name][batch_index] = 1.0

        return batch_images, batch_masks, batch_sample_weights
コード例 #6
0
def test_argmax_on_split_images():
    kitti_data_source: KittiDataSource = KittiDataSource(KITTI_BASE_DIR)
    train_data_generator: DataGenerator = DataGenerator(
        data_sources=[kitti_data_source],
        phase='train',
        batch_size=4,
        transformation=Crop((256, 256)),
        target_size=(256, 256),
        active_labels=CityscapesLabels.ALL,
        random_seed=42)

    original_image, original_labels, _ = train_data_generator.get_batch(0)[0]
    resized_original_labels = Crop((256, 256))(original_labels)[0]
    resized_original_labels_np = from_pil_to_np(resized_original_labels)

    input_image, input_labels, _ = train_data_generator[0]
    input_labels = input_labels['kitti']
    input_labels = input_labels[0]
    input_labels_merged = np.argmax(input_labels, axis=-1)

    assert (input_labels_merged == resized_original_labels_np).all()
コード例 #7
0
    labels = CityscapesLabels.ALL
    index = 6

    # CREATE GENERATOR
    data_sources: List[DataSource] = [KittiDataSource(KITTI_BASE_DIR)]
    generator: DataGenerator = DataGenerator(data_sources=data_sources,
                                             phase='train',
                                             transformation=Fit((256, 256)),
                                             batch_size=1,
                                             target_size=(256, 256),
                                             active_labels=labels)

    # GENERATOR ORIGINAL IMAGES
    original_image, original_labels, _ = generator.get_batch(index)[0]
    original_image_np: np.ndarray = from_pil_to_np(original_image)
    original_labels_np: np.ndarray = from_pil_to_np(original_labels)
    original_labels_rgb: np.ndarray = generate_semantic_rgb(original_labels_np)

    # GENERATOR PRE-PROCESSED IMAGES
    image_batch, labels_batch, _ = generator[index]

    # GET SINGLE IMAGES FROM BATCH
    input_image = image_batch[0] * 255
    input_image = input_image.astype(np.uint8)
    input_labels = labels_batch['kitti'][0]
    input_labels = input_labels.astype(np.uint8)

    # COLORIZE LABELS
    merged_labels = merge_label_images(input_labels, labels)
    input_labels_rgb: np.ndarray = generate_semantic_rgb(merged_labels)
コード例 #8
0
        target_size=image_shape,
        transformation=Crop(image_shape),
        batch_size=4,
        active_labels=CityscapesLabels.ALL)
    input_images, input_labels, _ = validation_generator[0]
    original_batch = validation_generator.get_batch(0)
    original_image, original_labels, _ = original_batch[0]
    original_size: Tuple[int, int] = original_image.size

    # GENERATE A PREDICTION
    predicted: np.ndarray = model.predict(input_images)[0]
    # CONVERT TO LABEL IMAGES
    predicted_labels: np.ndarray = np.argmax(predicted.squeeze(), -1)

    # VISUALIZE ORIGINAL, TARGET AND PREDICTED
    original_labels_array = from_pil_to_np(original_labels)
    original_labels_rgb = generate_semantic_rgb(original_labels_array)
    predicted_rgb = generate_semantic_rgb(predicted_labels)

    # PLOT IMAGES
    fig, (ax1, ax2, ax3) = plt.subplots(1, 3)

    # ORIGINAL IMAGE
    ax1.set_title('Original')
    ax1.imshow(original_image)
    # FULL SEGMENTATION
    ax2.set_title('Semantic: Original')
    ax2.imshow(original_labels_rgb)

    # PREDICTED SEGMENTATION
    ax3.set_title('Semantic: Predicted')