예제 #1
0
def johnson_alahi_li_2016_content_transform(
    edge_size: int = 256, multiple: int = 16, impl_params: bool = True,
) -> ComposedTransform:
    class TopLeftCropToMultiple(Transform):
        def __init__(self, multiple: int):
            super().__init__()
            self.multiple = multiple

        def calculate_size(self, image: torch.Tensor) -> Tuple[int, int]:
            old_height, old_width = extract_image_size(image)
            new_height = old_height - old_height % self.multiple
            new_width = old_width - old_width % self.multiple
            return new_height, new_width

        def forward(self, image: torch.tensor) -> torch.Tensor:
            size = self.calculate_size(image)
            return top_left_crop(image, size)

    class OptionalGrayscaleToFakegrayscale(Transform):
        def forward(self, input_image: torch.Tensor) -> torch.Tensor:
            is_grayscale = extract_num_channels(input_image) == 1
            if is_grayscale:
                return grayscale_to_fakegrayscale(input_image)
            else:
                return input_image

    transforms = [
        TopLeftCropToMultiple(multiple),
        Resize((edge_size, edge_size)),
        OptionalGrayscaleToFakegrayscale(),
    ]
    if impl_params:
        transforms.append(CaffePreprocessing())

    return ComposedTransform(*transforms)
예제 #2
0
def li_wand_2016_preprocessor() -> CaffePreprocessing:
    return CaffePreprocessing()
예제 #3
0
def ulyanov_et_al_2016_preprocessor() -> CaffePreprocessing:
    return CaffePreprocessing()
예제 #4
0
def johnson_alahi_li_2016_preprocessor() -> CaffePreprocessing:
    return CaffePreprocessing()
예제 #5
0
def gatys_et_al_2017_preprocessor() -> CaffePreprocessing:
    return CaffePreprocessing()
예제 #6
0
def gatys_ecker_bethge_2015_preprocessor() -> nn.Module:
    return CaffePreprocessing()