コード例 #1
0
ファイル: _loss.py プロジェクト: jbueltemeier/pystiche_papers
def content_loss(
    impl_params: bool = True,
    multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
    hyper_parameters: Optional[HyperParameters] = None,
) -> loss.FeatureReconstructionLoss:
    r"""Content loss from :cite:`JAL2016`.

    Args:
        impl_params: Switch the behavior and hyper-parameters between the reference
            implementation of the original authors and what is described in the paper.
            For details see :ref:`here <johnson_alahi_li_2016-impl_params>`.
        multi_layer_encoder: Pretrained :class:`~pystiche.enc.MultiLayerEncoder`. If
            omitted, the default
            :func:`~pystiche_papers.johnson_alahi_li_2016.multi_layer_encoder` is used.
        hyper_parameters: If omitted,
            :func:`~pystiche_papers.johnson_alahi_li_2016.hyper_parameters` is used.

    """
    if multi_layer_encoder is None:
        multi_layer_encoder = _multi_layer_encoder(impl_params=impl_params)

    if hyper_parameters is None:
        hyper_parameters = _hyper_parameters()

    return loss.FeatureReconstructionLoss(
        multi_layer_encoder.extract_encoder(
            hyper_parameters.content_loss.layer),
        score_weight=hyper_parameters.content_loss.score_weight,
    )
コード例 #2
0
    def test_set_style_image(self):
        torch.manual_seed(0)
        image = torch.rand(1, 1, 100, 100)
        content_loss = loss.FeatureReconstructionLoss(
            enc.SequentialEncoder((nn.Conv2d(1, 1, 1),))
        )
        style_loss = loss.FeatureReconstructionLoss(
            enc.SequentialEncoder((nn.Conv2d(1, 1, 1),))
        )

        perceptual_loss = loss.PerceptualLoss(content_loss, style_loss)
        perceptual_loss.set_style_image(image)

        actual = style_loss.target_image
        desired = image
        ptu.assert_allclose(actual, desired)
コード例 #3
0
ファイル: test_comparison.py プロジェクト: pystiche/pystiche
    def test_call(self, encoder):
        torch.manual_seed(0)
        target_image = torch.rand(1, 3, 128, 128)
        input_image = torch.rand(1, 3, 128, 128)

        loss = loss_.FeatureReconstructionLoss(encoder)
        loss.set_target_image(target_image)

        actual = loss(input_image)
        desired = F.mse_loss(encoder(input_image), encoder(target_image))
        ptu.assert_allclose(actual, desired)
コード例 #4
0
ファイル: model.py プロジェクト: stjordanis/lightning-flash
 def _get_perceptual_loss(
     self,
     *,
     backbone: str,
     content_layer: str,
     content_weight: float,
     style_layers: Sequence[str],
     style_weight: float,
 ) -> loss.PerceptualLoss:
     mle, _ = cast(enc.MultiLayerEncoder, self.backbones.get(backbone)())
     content_loss = loss.FeatureReconstructionLoss(mle.extract_encoder(content_layer), score_weight=content_weight)
     style_loss = loss.MultiLayerEncodingLoss(
         mle,
         style_layers,
         lambda encoder, layer_weight: self._modified_gram_loss(encoder, score_weight=layer_weight),
         layer_weights="sum",
         score_weight=style_weight,
     )
     return loss.PerceptualLoss(content_loss, style_loss)
コード例 #5
0
ファイル: _loss.py プロジェクト: jbueltemeier/pystiche_papers
def content_loss(
    impl_params: bool = True,
    instance_norm: bool = True,
    multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
    hyper_parameters: Optional[HyperParameters] = None,
) -> loss.FeatureReconstructionLoss:
    r"""Content loss from :cite:`ULVL2016,UVL2017`.

    Args:
        impl_params: Switch the behavior and hyper-parameters between the reference
            implementation of the original authors and what is described in the paper.
            For details see :ref:`here <li_wand_2016-impl_params>`.
        instance_norm: Switch the behavior and hyper-parameters between both
            publications of the original authors. For details see
            :ref:`here <ulyanov_et_al_2016-instance_norm>`.
        multi_layer_encoder: Pretrained :class:`~pystiche.enc.MultiLayerEncoder`. If
            omitted, :func:`~pystiche_papers.ulyanov_et_al_2016.multi_layer_encoder`
            is used.
        hyper_parameters: Hyper parameters. If omitted,
            :func:`~pystiche_papers.ulyanov_et_al_2016.hyper_parameters` is used.

    .. seealso::

        - :class:`pystiche.loss.FeatureReconstructionLoss`
    """
    if multi_layer_encoder is None:
        multi_layer_encoder = _multi_layer_encoder()

    if hyper_parameters is None:
        hyper_parameters = _hyper_parameters(impl_params=impl_params,
                                             instance_norm=instance_norm)

    return loss.FeatureReconstructionLoss(
        multi_layer_encoder.extract_encoder(
            hyper_parameters.content_loss.layer),
        score_weight=hyper_parameters.content_loss.score_weight,
    )
コード例 #6
0
# Perceptual Loss
# ---------------
#
# The core components of every NST are the ``content_loss`` and the ``style_loss``.
# Combined they make up the perceptual loss, i.e. the optimization criterion.
#
# In this example we use the :class:`~pystiche.loss.FeatureReconstructionLoss`
# introduced by Mahendran and Vedaldi :cite:`MV2015` as ``content_loss``. We first
# extract the ``content_encoder`` that generates encodings from the ``content_layer``.
# Together with the ``content_weight`` we can construct the ``content_loss``.

content_layer = "relu4_2"
content_encoder = multi_layer_encoder.extract_encoder(content_layer)
content_weight = 1e0
content_loss = loss.FeatureReconstructionLoss(
    content_encoder, score_weight=content_weight
)
print(content_loss)


########################################################################################
# We use the :class:`~pystiche.loss.GramLoss` introduced by Gatys, Ecker, and Bethge
# :cite:`GEB2016` as ``style_loss``. Unlike before, we use multiple ``style_layers``.
# The individual losses can be conveniently bundled in a
# :class:`~pystiche.loss.MultiLayerEncodingLoss`.

style_layers = ("relu1_1", "relu2_1", "relu3_1", "relu4_1", "relu5_1")
style_weight = 1e3


def get_style_op(encoder, layer_weight):
コード例 #7
0
 def get_guided_perceptual_loss():
     content_loss = loss.FeatureReconstructionLoss(
         enc.SequentialEncoder((nn.Conv2d(1, 1, 1),))
     )
     style_loss = ops.MultiRegionOperator(regions, get_op)
     return loss.GuidedPerceptualLoss(content_loss, style_loss)
コード例 #8
0
        raise ValueError(
            add_suggestion(
                f"Unknown multi-layer encoder '{mle_str}'.",
                word=mle_str,
                possibilities=MLES.keys(),
            )) from error

    return mle_fn()


# This needs to be filled manually, since some losses such as MRF need more parameters
# than just encoder and score_weight
LOSSES = {
    "featurereconstruction": (
        "FeatureReconstruction",
        lambda encoder, score_weight: loss.FeatureReconstructionLoss(
            encoder, score_weight=score_weight),
    ),
    "gram": (
        "Gram",
        lambda encoder, score_weight: loss.GramLoss(encoder,
                                                    score_weight=score_weight),
    ),
    "mrf": (
        "MRF",
        lambda encoder, score_weight: loss.MRFLoss(
            encoder, patch_size=3, stride=2, score_weight=score_weight),
    ),
}


def make_loss(
コード例 #9
0
ファイル: test_comparison.py プロジェクト: pystiche/pystiche
 def test_repr_smoke(self, encoder):
     assert isinstance(repr(loss_.FeatureReconstructionLoss(encoder)), str)