Exemple #1
0
def li_wand_2016_perceptual_loss(
    impl_params: bool = True,
    multi_layer_encoder: Optional[MultiLayerEncoder] = None,
    content_loss_kwargs: Optional[Dict[str, Any]] = None,
    style_loss_kwargs: Optional[Dict[str, Any]] = None,
    regularization_kwargs: Optional[Dict[str, Any]] = None,
):
    if multi_layer_encoder is None:
        multi_layer_encoder = li_wand_2016_multi_layer_encoder()

    if content_loss_kwargs is None:
        content_loss_kwargs = {}
    content_loss = li_wand_2016_content_loss(
        impl_params=impl_params,
        multi_layer_encoder=multi_layer_encoder,
        **content_loss_kwargs,
    )

    if style_loss_kwargs is None:
        style_loss_kwargs = {}
    style_loss = li_wand_2016_style_loss(
        impl_params=impl_params,
        multi_layer_encoder=multi_layer_encoder,
        **style_loss_kwargs,
    )

    if regularization_kwargs is None:
        regularization_kwargs = {}
    regularization = li_wand_2016_regularization(
        impl_params=impl_params, **regularization_kwargs
    )

    return PerceptualLoss(content_loss, style_loss, regularization=regularization)
Exemple #2
0
def gatys_ecker_bethge_2015_perceptual_loss(
    impl_params: bool = True,
    multi_layer_encoder: Optional[MultiLayerEncoder] = None,
    content_loss_kwargs: Optional[Dict[str, Any]] = None,
    style_loss_kwargs: Optional[Dict[str, Any]] = None,
):
    if multi_layer_encoder is None:
        multi_layer_encoder = gatys_ecker_bethge_2015_multi_layer_encoder(
            impl_params=impl_params)

    if content_loss_kwargs is None:
        content_loss_kwargs = {}
    content_loss = gatys_ecker_bethge_2015_content_loss(
        impl_params=impl_params,
        multi_layer_encoder=multi_layer_encoder,
        **content_loss_kwargs,
    )

    if style_loss_kwargs is None:
        style_loss_kwargs = {}
    style_loss = gatys_ecker_bethge_2015_style_loss(
        impl_params=impl_params,
        multi_layer_encoder=multi_layer_encoder,
        **style_loss_kwargs,
    )

    return PerceptualLoss(content_loss, style_loss)
Exemple #3
0
def ulyanov_et_al_2016_perceptual_loss(
    impl_params: bool = True,
    instance_norm: bool = True,
    stylization: bool = True,
    multi_layer_encoder: Optional[MultiLayerEncoder] = None,
    content_loss_kwargs: Optional[Dict[str, Any]] = None,
    style_loss_kwargs: Optional[Dict[str, Any]] = None,
) -> PerceptualLoss:
    if multi_layer_encoder is None:
        multi_layer_encoder = ulyanov_et_al_2016_multi_layer_encoder()

    if style_loss_kwargs is None:
        style_loss_kwargs = {}
    style_loss = ulyanov_et_al_2016_style_loss(
        impl_params=impl_params,
        instance_norm=instance_norm,
        stylization=stylization,
        multi_layer_encoder=multi_layer_encoder,
        **style_loss_kwargs,
    )

    if stylization:
        if content_loss_kwargs is None:
            content_loss_kwargs = {}
        content_loss = ulyanov_et_al_2016_content_loss(
            impl_params=impl_params,
            instance_norm=instance_norm,
            multi_layer_encoder=multi_layer_encoder,
            **content_loss_kwargs,
        )
    else:
        content_loss = None

    return PerceptualLoss(content_loss, style_loss)
Exemple #4
0
def johnson_alahi_li_2016_perceptual_loss(
    impl_params: bool = True,
    instance_norm: bool = True,
    style: Optional[str] = None,
    multi_layer_encoder: Optional[MultiLayerEncoder] = None,
    content_loss_kwargs: Optional[Dict[str, Any]] = None,
    style_loss_kwargs: Optional[Dict[str, Any]] = None,
    total_variation_kwargs: Optional[Dict[str, Any]] = None,
) -> JohnsonAlahiLi2016PerceptualLoss:
    if multi_layer_encoder is None:
        multi_layer_encoder = johnson_alahi_li_2016_multi_layer_encoder(
            impl_params=impl_params)

    if content_loss_kwargs is None:
        content_loss_kwargs = {}
    content_loss = johnson_alahi_li_2016_content_loss(
        impl_params=impl_params,
        instance_norm=instance_norm,
        style=style,
        multi_layer_encoder=multi_layer_encoder,
        **content_loss_kwargs,
    )

    if style_loss_kwargs is None:
        style_loss_kwargs = {}
    style_loss = johnson_alahi_li_2016_style_loss(
        impl_params=impl_params,
        instance_norm=instance_norm,
        style=style,
        multi_layer_encoder=multi_layer_encoder,
        **style_loss_kwargs,
    )

    if total_variation_kwargs is None:
        total_variation_kwargs = {}
    regularization = johnson_alahi_li_2016_regularization(
        instance_norm=instance_norm, style=style, **total_variation_kwargs)

    return PerceptualLoss(content_loss,
                          style_loss,
                          regularization=regularization)
def get_style_op(encoder, layer_weight):
    patch_size = 3
    return MRFOperator(encoder,
                       patch_size,
                       stride=2,
                       score_weight=layer_weight)


style_loss = MultiLayerEncodingOperator(
    multi_layer_encoder,
    style_layers,
    get_style_op,
    score_weight=style_weight,
)

criterion = PerceptualLoss(content_loss, style_loss).to(device)
print(criterion)

########################################################################################
# Opposed to the prior examples we want to perform an NST on multiple resolutions. In
# ``pystiche`` this handled by an :class:`~pystiche.pyramid.ImagePyramid` . The
# resolutions are selected by specifying the ``edge_sizes`` of the images on each level
# . The optimization is performed for ``num_steps`` on the different levels.
#
# The resizing of all images, i.e. ``input_image`` and target images (``content_image``
# and ``style_image``) is handled by the ``pyramid``. For that we need to register the
# perceptual loss (``criterion``) as ``resize_targets``.
#
# .. note::
#
#   By default the ``edge_sizes`` correspond to the shorter ``edge`` of the images. To
Exemple #6
0
style_layers = ("relu1_1", "relu2_1", "relu3_1", "relu4_1", "relu5_1")
style_weight = 1e4


def get_style_op(encoder, layer_weight):
    return GramOperator(encoder, score_weight=layer_weight)


style_loss = MultiLayerEncodingOperator(
    multi_layer_encoder,
    style_layers,
    get_style_op,
    score_weight=style_weight,
)

criterion = PerceptualLoss(content_loss, style_loss).to(device)
print(criterion)

########################################################################################
# We set the target images for the optimization ``criterion``.

criterion.set_content_image(content_image)
criterion.set_style_image(style_image)

########################################################################################
# We perform the unguided NST and show the result.

starting_point = "content"
input_image = get_input_image(starting_point, content_image=content_image)

output_image = default_image_optim_loop(input_image,
Exemple #7
0
 def criterion_update_fn(  # type: ignore[misc]
     input_image: torch.Tensor,
     criterion: loss_.PerceptualLoss,
 ) -> None:
     criterion.set_content_image(input_image)