コード例 #1
0
ファイル: utils.py プロジェクト: sourcery-ai-bot/pystiche
def gatys_ecker_bethge_2015_multi_layer_encoder(
    impl_params: bool = True,
) -> MultiLayerEncoder:
    multi_layer_encoder = vgg19_multi_layer_encoder(
        weights="caffe", preprocessing=False, allow_inplace=True
    )
    if impl_params:
        return multi_layer_encoder

    for name, module in multi_layer_encoder.named_children():
        if isinstance(module, nn.MaxPool2d):
            multi_layer_encoder._modules[name] = nn.AvgPool2d(
                **pystiche.pool_module_meta(module)
            )
    return multi_layer_encoder
コード例 #2
0
def multi_layer_encoder(impl_params: bool = True, ) -> enc.MultiLayerEncoder:
    r"""Multi-layer encoder from :cite:`GEB2016`.

    Args:
        impl_params: If ``True``, the :class:`~torch.nn.MaxPool2d` in
            the ``multi_layer_encoder`` are exchanged for :class:`~torch.nn.AvgPool2d`.

    """
    # TODO: check if the encoder used inplace ops
    multi_layer_encoder_ = enc.vgg19_multi_layer_encoder(
        framework="caffe", internal_preprocessing=False, allow_inplace=True)
    if impl_params:
        return multi_layer_encoder_

    for name, module in multi_layer_encoder_.named_children():
        if isinstance(module, nn.MaxPool2d):
            multi_layer_encoder_._modules[name] = nn.AvgPool2d(
                **meta.pool_module_meta(module))
    return multi_layer_encoder_
コード例 #3
0
from pystiche.optim import default_image_pyramid_optim_loop
from pystiche.pyramid import ImagePyramid

print(f"I'm working with pystiche=={pystiche.__version__}")

device = get_device()
print(f"I'm working with {device}")

images = demo_images()
images.download()

########################################################################################
# At first we define a :class:`~pystiche.loss.perceptual.PerceptualLoss` that is used
# as optimization ``criterion``.

multi_layer_encoder = vgg19_multi_layer_encoder()

content_layer = "relu4_2"
content_encoder = multi_layer_encoder.extract_single_layer_encoder(
    content_layer)
content_weight = 1e0
content_loss = FeatureReconstructionOperator(content_encoder,
                                             score_weight=content_weight)

style_layers = ("relu3_1", "relu4_1")
style_weight = 2e0


def get_style_op(encoder, layer_weight):
    patch_size = 3
    return MRFOperator(encoder,
コード例 #4
0
#
#   By default, :func:`~pystiche.enc.vgg19_multi_layer_encoder` adds an
#   ``internal_preprocessing`` so that the user can simply pass the image as is,
#   without worrying about it. We disable this here to enable a comparison.
#
# .. note::
#
#   By default, :func:`~pystiche.enc.vgg19_multi_layer_encoder` disallows in-place
#   operations since after they are carried out, the previous encoding is no longer
#   accessible. In order to enable a fair performance comparison, we allow them here,
#   since they are also used in :func:`~torchvision.models.vgg19`.
#
# .. note::
#
#   The fully connected stage of the original VGG19 architecture requires the input to
#   be exactly 224 pixels wide and high :cite:`SZ2014`. Since this requirement can
#   usually not be met in an NST, the builtin multi-layer encoder only comprises the
#   size invariant convolutional stage. Thus, we only use ``vgg19().features`` to
#   enable a comparison.

seq = models.vgg19()
mle = enc.vgg19_multi_layer_encoder(
    pretrained=False, internal_preprocessing=False, allow_inplace=True
)
mle.load_state_dict(seq.state_dict())

input = torch.rand((4, 3, 256, 256), device=device)

assert torch.allclose(mle(input), seq.features(input))
print(fdifftimeit(lambda: seq.features(input), lambda: mle(input)))
コード例 #5
0
ファイル: utils.py プロジェクト: sourcery-ai-bot/pystiche
def li_wand_2016_multi_layer_encoder() -> MultiLayerEncoder:
    return vgg19_multi_layer_encoder(weights="caffe",
                                     preprocessing=False,
                                     allow_inplace=True)
コード例 #6
0
ファイル: utils.py プロジェクト: jbueltemeier/pystiche
def ulyanov_et_al_2016_multi_layer_encoder() -> MultiLayerEncoder:
    return vgg19_multi_layer_encoder(weights="caffe", allow_inplace=True)
コード例 #7
0
def multi_layer_encoder() -> enc.MultiLayerEncoder:
    r"""Multi-layer encoder from :cite:`LW2016`."""
    return enc.vgg19_multi_layer_encoder(framework="caffe",
                                         internal_preprocessing=False,
                                         allow_inplace=True)
コード例 #8
0
device = get_device()
print(f"I'm working with {device}")


########################################################################################
# Multi-layer Encoder
# -------------------
# The ``content_loss`` and the ``style_loss`` operate on the encodings of an image
# rather than on the image itself. These encodings are generated by a pretrained
# encoder. Since we will be using encodings from multiple layers we load a
# multi-layer encoder. In this example we use the
# :func:`~pystiche.enc.vgg19_multi_layer_encoder` that is based on the VGG19
# architecture introduced by Simonyan and Zisserman :cite:`SZ2014` .

multi_layer_encoder = enc.vgg19_multi_layer_encoder()
print(multi_layer_encoder)


########################################################################################
# Perceptual Loss
# ---------------
#
# The core components of every NST are the ``content_loss`` and the ``style_loss``.
# Combined they make up the perceptual loss, i.e. the optimization criterion.
#
# In this example we use the :class:`~pystiche.loss.FeatureReconstructionLoss`
# introduced by Mahendran and Vedaldi :cite:`MV2015` as ``content_loss``. We first
# extract the ``content_encoder`` that generates encodings from the ``content_layer``.
# Together with the ``content_weight`` we can construct the ``content_loss``.
コード例 #9
0
ファイル: utils.py プロジェクト: sourcery-ai-bot/pystiche
def gatys_et_al_2017_multi_layer_encoder() -> MultiLayerEncoder:
    return vgg19_multi_layer_encoder(weights="caffe",
                                     preprocessing=False,
                                     allow_inplace=True)