def test_get_conv(subtests): in_channels = out_channels = 3 kernel_size = 3 stride = 1 for params in generate_param_combinations( padding=(None, 1), upsample=(True, False) ): with subtests.test(**params): conv = paper.conv( in_channels, out_channels, kernel_size, stride=stride, **params ) assert isinstance( conv, nn.ConvTranspose2d if params["upsample"] else nn.Conv2d ) with subtests.test("in_channels"): assert conv.in_channels == in_channels with subtests.test("out_channels"): assert conv.out_channels == out_channels with subtests.test("kernel_size"): assert conv.kernel_size == misc.to_2d_arg(kernel_size) with subtests.test("stride"): assert conv.stride == misc.to_2d_arg(stride) if params["padding"] is not None: with subtests.test("padding"): assert conv.padding == misc.to_2d_arg(params["padding"])
def extract_patches2d( x: torch.Tensor, patch_size: Union[int, Sequence[int]], stride: Union[int, Sequence[int]] = 1, ) -> torch.Tensor: assert x.dim() == 4 return _extract_patchesnd(x, to_2d_arg(patch_size), to_2d_arg(stride))
def test_style_loss(subtests, impl_params): style_loss = paper.style_loss(impl_params=impl_params) assert isinstance(style_loss, pystiche.loss.MultiLayerEncodingLoss) hyper_parameters = paper.hyper_parameters( impl_params=impl_params).style_loss with subtests.test("losses"): assert all( isinstance(loss, paper.MRFLoss) for loss in style_loss.children()) layers, layer_weights, patch_size, stride = zip( *[(loss.encoder.layer, loss.score_weight, loss.patch_size, loss.stride) for loss in style_loss.children()]) with subtests.test("layers"): assert layers == hyper_parameters.layers with subtests.test("layer_weights"): assert layer_weights == pytest.approx((1.0, ) * len(layers)) with subtests.test("patch_size"): assert patch_size == (misc.to_2d_arg( hyper_parameters.patch_size), ) * len(layers) with subtests.test("stride"): assert stride == (misc.to_2d_arg( hyper_parameters.stride), ) * len(layers) with subtests.test("score_weight"): assert style_loss.score_weight == pytest.approx( hyper_parameters.score_weight)
def __init__( self, kernel_size: Union[Tuple[int, int], int], stride: Optional[Union[Tuple[int, int], int]] = None, **kwargs: Any, ) -> None: kernel_size = to_2d_arg(kernel_size) stride = kernel_size if stride is None else to_2d_arg(stride) super().__init__(kernel_size, stride=stride, **kwargs)
def __init__( self, encoder: Encoder, patch_size: Union[int, Sequence[int]], stride: Union[int, Sequence[int]] = 1, target_transforms: Optional[Iterable[Transform]] = None, score_weight: float = 1.0, ): super().__init__(encoder, score_weight=score_weight) self.patch_size = to_2d_arg(patch_size) self.stride = to_2d_arg(stride) self.target_transforms = target_transforms
def __init__( self, encoder: Encoder, patch_size: Union[int, Sequence[int]], stride: Union[int, Sequence[int]] = 1, target_transforms: Optional[Iterable[Transform]] = None, score_weight: float = 1.0, num_scale_steps: Optional[int] = None, scale_step_width: Optional[float] = None, num_rotation_steps: Optional[int] = None, rotation_step_width: Optional[float] = None, ): if any( [ arg is not None for arg in ( num_scale_steps, scale_step_width, num_rotation_steps, rotation_step_width, ) ] ): msg = build_deprecation_message( ( "Parametrizing target transformations with any of " "num_scale_steps, scale_step_width, num_rotation_steps, or " "rotation_step_width through the constructor of MRFOperator" ), "0.4.0", info=( "Please provide an iterable of transformations via the parameter " "target_transforms. You can retain the old functionality with " "MRFOperator.rotate_and_scale_transforms()." ), ) warnings.warn(msg, UserWarning) target_transforms = self.scale_and_rotate_transforms( num_scale_steps=0 if num_scale_steps is None else num_scale_steps, scale_step_width=5e-2 if scale_step_width is None else scale_step_width, num_rotate_steps=0 if num_rotation_steps is None else num_rotation_steps, rotate_step_width=10.0 if rotation_step_width is None else rotation_step_width, ) super().__init__(encoder, score_weight=score_weight) self.patch_size = to_2d_arg(patch_size) self.stride = to_2d_arg(stride) self.target_transforms = target_transforms
def __init__( self, encoder: Encoder, patch_size: Union[int, Sequence[int]], stride: Union[int, Sequence[int]] = 1, num_scale_steps: int = 0, scale_step_width: float = 5e-2, num_rotation_steps: int = 0, rotation_step_width: float = 10, score_weight: float = 1.0, ): super().__init__(encoder, score_weight=score_weight) self.patch_size = to_2d_arg(patch_size) self.stride = to_2d_arg(stride) self.num_scale_steps = num_scale_steps self.scale_step_width = scale_step_width self.num_rotation_steps = num_rotation_steps self.rotation_step_width = rotation_step_width
def test_to_2d_arg(): val = 0 actual = misc.to_2d_arg(val) desired = (val, val) assert actual == desired val = (0, 0) actual = misc.to_2d_arg(val) desired = val assert actual == desired val = 0 actual = misc.to_2d_arg([val] * 2) desired = (val, val) assert actual == desired val = (0, ) with pytest.raises(RuntimeError): misc.to_2d_arg(val)
def __init__( self, encoder: enc.Encoder, patch_size: Union[int, Sequence[int]], *, stride: Union[int, Sequence[int]] = 1, target_transforms: Optional[Iterable[nn.Module]] = None, input_guide: Optional[torch.Tensor] = None, target_image: Optional[torch.Tensor] = None, target_guide: Optional[torch.Tensor] = None, score_weight: float = 1.0, ): super().__init__( encoder=encoder, input_guide=input_guide, target_image=target_image, target_guide=target_guide, score_weight=score_weight, ) self.patch_size = to_2d_arg(patch_size) self.stride = to_2d_arg(stride) self.target_transforms = target_transforms
def test_AutoPadAvgPool2d(subtests, auto_pad_pool_params, input_image): image_size = extract_image_size(input_image) for params in auto_pad_pool_params: with subtests.test(**params): conv = utils.AutoPadAvgPool2d(**params) output_image = conv(input_image) actual = extract_image_size(output_image) expected = tuple(side_length // stride for side_length, stride in zip( image_size, to_2d_arg(params["stride"]))) assert actual == expected
def rescale( image: torch.Tensor, factor: Union[float, Tuple[float, float]], interpolation_mode: str = "bilinear", ) -> torch.Tensor: height, width = extract_image_size(image) height_factor, width_factor = to_2d_arg(factor) height = round(height * height_factor) width = round(width * width_factor) image_size = (height, width) return cast( torch.Tensor, resize(image, image_size, interpolation_mode=interpolation_mode))
def test_Transformer(subtests, input_image): levels = 5 for impl_params in (True, False): with subtests.test(impl_params=impl_params): transformer = paper.Transformer(levels, impl_params=impl_params) with subtests.test("pyramid"): assert isinstance(transformer[0], SequentialWithOutChannels) with subtests.test("output_conv"): assert isinstance( transformer[1], nn.Conv2d if impl_params else paper.ConvBlock, ) output_conv = transformer[1] if impl_params else transformer[1][0] assert output_conv.out_channels == 3 assert output_conv.kernel_size == misc.to_2d_arg(1) assert output_conv.stride == misc.to_2d_arg(1) with subtests.test("forward size"): output_image = transformer(input_image) assert input_image.size() == output_image.size()
def test_AutoPadConvTranspose2d(subtests, auto_pad_conv_params, input_image): in_channels = out_channels = extract_num_channels(input_image) image_size = extract_image_size(input_image) for params in auto_pad_conv_params: with subtests.test(**params): conv = utils.AutoPadConvTranspose2d(in_channels, out_channels, **params) output_image = conv(input_image) actual = extract_image_size(output_image) expected = tuple(side_length * stride for side_length, stride in zip( image_size, to_2d_arg(params["stride"]))) assert actual == expected
def test_ConvBlock(subtests): in_channels = out_channels = 3 kernel_size = 3 stride = 1 conv_block = paper.ConvBlock(in_channels, out_channels, kernel_size, stride=stride) assert isinstance(conv_block, SequentialWithOutChannels) assert len(conv_block) == 3 with subtests.test("conv"): conv = conv_block[0] assert isinstance(conv, nn.Conv2d) assert conv.in_channels == in_channels assert conv.out_channels == out_channels assert conv.kernel_size == misc.to_2d_arg(kernel_size) assert conv.stride == misc.to_2d_arg(stride) with subtests.test("norm"): norm = conv_block[1] assert isinstance(norm, nn.InstanceNorm2d) with subtests.test("activation"): act = conv_block[2] assert isinstance(act, nn.ReLU)
def extract_normalized_patches2d( input: torch.Tensor, patch_size: Union[int, Sequence[int]], stride: Union[int, Sequence[int]], ) -> torch.Tensor: r"""Extract 2-dimensional patches from the input with normalized gradient. If ``stride >= patch_size``, this behaves just like :func:`pystiche.extract_patches2d`. Otherwise, the gradient of the input is normalized such that every value is divided by the number of patches it appears in. Examples: >>> import torch >>> import pystiche >>> input = torch.ones(1, 1, 4, 4).requires_grad_(True) >>> target = torch.zeros(1, 1, 4, 4).detach() >>> # without normalized gradient >>> input_patches = pystiche.extract_patches2d( ... input, patch_size=2, stride=1 ... ) >>> target_patches = pystiche.extract_patches2d( ... target, patch_size=2, stride=1 ... ) >>> loss = 0.5 * torch.sum((input_patches - target_patches) ** 2.0) >>> loss.backward() >>> input.grad tensor([[[[1., 2., 2., 1.], [2., 4., 4., 2.], [2., 4., 4., 2.], [1., 2., 2., 1.]]]]) >>> import torch >>> import pystiche >>> import pystiche_papers.li_wand_2016 as paper >>> input = torch.ones(1, 1, 4, 4).requires_grad_(True) >>> target = torch.zeros(1, 1, 4, 4).detach() >>> # with normalized gradient >>> input_patches = paper.extract_normalized_patches2d( ... input, patch_size=2, stride=1 ... ) >>> target_patches = pystiche.extract_patches2d( ... target, patch_size=2, stride=1 ... ) >>> loss = 0.5 * torch.sum((input_patches - target_patches) ** 2.0) >>> loss.backward() >>> input.grad tensor([[[[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]]]]) Args: input: Input tensor of shape :math:`B \times C \times H \times W` patch_size: Patch size stride: Stride """ patch_size = misc.to_2d_arg(patch_size) stride = misc.to_2d_arg(stride) for dim, size, step in zip(range(2, input.dim()), patch_size, stride): input = normalize_unfold_grad(input, dim, size, step) return pystiche.extract_patches2d(input, patch_size, stride)
def _create_motif_scaling_matrix( factor: Union[float, Tuple[float, float]]) -> torch.Tensor: factor_vert, factor_horz = to_2d_arg(factor) scaling_matrix = ((factor_horz, 0.0, 0.0), (0.0, factor_vert, 0.0), (0.0, 0.0, 1.0)) return torch.tensor(scaling_matrix, dtype=torch.float32)