コード例 #1
0
def test_PerceptualLoss_set_content_image():
    torch.manual_seed(0)
    image = torch.rand(1, 1, 100, 100)
    content_loss = ops.FeatureReconstructionOperator(
        enc.SequentialEncoder((nn.Conv2d(1, 1, 1), )))
    style_loss = ops.FeatureReconstructionOperator(
        enc.SequentialEncoder((nn.Conv2d(1, 1, 1), )))

    perceptual_loss = loss.PerceptualLoss(content_loss, style_loss)
    perceptual_loss.set_content_image(image)

    actual = content_loss.target_image
    desired = image
    ptu.assert_allclose(actual, desired)
コード例 #2
0
def test_FeatureReconstructionOperator_call():
    torch.manual_seed(0)
    target_image = torch.rand(1, 3, 128, 128)
    input_image = torch.rand(1, 3, 128, 128)
    encoder = enc.SequentialEncoder((nn.Conv2d(3, 3, 1), ))

    op = ops.FeatureReconstructionOperator(encoder)
    op.set_target_image(target_image)

    actual = op(input_image)
    desired = mse_loss(encoder(input_image), encoder(target_image))
    ptu.assert_allclose(actual, desired)
コード例 #3
0
ファイル: model.py プロジェクト: charlesjhill/lightning-flash
 def _get_perceptual_loss(
     self,
     *,
     backbone: str,
     content_layer: str,
     content_weight: float,
     style_layers: Sequence[str],
     style_weight: float,
 ) -> loss.PerceptualLoss:
     mle, _ = cast(enc.MultiLayerEncoder, self.backbones.get(backbone)())
     content_loss = ops.FeatureReconstructionOperator(
         mle.extract_encoder(content_layer), score_weight=content_weight)
     style_loss = ops.MultiLayerEncodingOperator(
         mle,
         style_layers,
         lambda encoder, layer_weight: self._modified_gram_loss(
             encoder, score_weight=layer_weight),
         layer_weights="sum",
         score_weight=style_weight,
     )
     return loss.PerceptualLoss(content_loss, style_loss)
コード例 #4
0
 def get_guided_perceptual_loss():
     content_loss = ops.FeatureReconstructionOperator(
         enc.SequentialEncoder((nn.Conv2d(1, 1, 1), )))
     style_loss = ops.MultiRegionOperator(regions, get_op)
     return loss.GuidedPerceptualLoss(content_loss, style_loss)
コード例 #5
0
show_image(style_image)

########################################################################################
# Unguided image optimization
# ---------------------------
#
# As a baseline we use a default NST with a
# :class:`~pystiche.ops.FeatureReconstructionOperator` as ``content_loss`` and
# :class:`~pystiche.ops.GramOperator` s as ``style_loss``.

multi_layer_encoder = enc.vgg19_multi_layer_encoder()

content_layer = "relu4_2"
content_encoder = multi_layer_encoder.extract_encoder(content_layer)
content_weight = 1e0
content_loss = ops.FeatureReconstructionOperator(content_encoder,
                                                 score_weight=content_weight)

style_layers = ("relu1_1", "relu2_1", "relu3_1", "relu4_1", "relu5_1")
style_weight = 1e4


def get_style_op(encoder, layer_weight):
    return ops.GramOperator(encoder, score_weight=layer_weight)


style_loss = ops.MultiLayerEncodingOperator(
    multi_layer_encoder,
    style_layers,
    get_style_op,
    score_weight=style_weight,
)