コード例 #1
0
def test_show_image_smoke(subtests, mocker, test_image_file, test_image):
    mocker.patch("pystiche.image.io._show_pil_image")
    image_.show_image(test_image)

    with subtests.test(image=test_image_file):
        image_.show_image(test_image_file)

    with subtests.test(image=None):
        with pytest.raises(TypeError):
            image_.show_image(None)

    with subtests.test(size=100):
        image_.show_image(test_image, size=100)

    with subtests.test(size=(100, 200)):
        image_.show_image(test_image, size=(100, 200))
コード例 #2
0
#
#   By default the ``edge_sizes`` correspond to the shorter ``edge`` of the images. To
#   change that you can pass ``edge="long"``. For fine-grained control you can also
#   pass a sequence comprising ``"short"`` and ``"long"`` to select the ``edge`` for
#   each level separately.

edge_sizes = (300, 550)
num_steps = 200
pyramid = ImagePyramid(edge_sizes, num_steps, resize_targets=(criterion, ))
print(pyramid)

########################################################################################
# Next up, we load and show the images that will be used in the NST.

content_image = images["bird2"].read(device=device)
show_image(content_image, title="Input image")

########################################################################################

style_image = images["mosaic"].read(device=device)
show_image(style_image, title="Output image")

########################################################################################
# Although the images would be automatically resized during the optimization you might
# need to resize them before: if you are working with large source images you might
# run out of memory by setting up the targets of the perceptual loss. In that case it
# is good practice to resize the images upfront to the largest size the ``pyramid``
# will handle.

top_level = pyramid[-1]
content_image = top_level.resize_image(content_image)
コード例 #3
0
print(f"I'm working with pystiche=={pystiche.__version__}")

device = get_device()
print(f"I'm working with {device}")

########################################################################################
# In a first step we load and show the images that will be used in the NST.

images = demo_images()
images.download()
size = 500

########################################################################################

content_image = images["castle"].read(size=size, device=device)
show_image(content_image)

########################################################################################

style_image = images["church"].read(size=size, device=device)
show_image(style_image)

########################################################################################
# Unguided image optimization
# ---------------------------
#
# As a baseline we use a default NST with a
# :class:`~pystiche.ops.FeatureReconstructionOperator` as ``content_loss`` and
# :class:`~pystiche.ops.GramOperator` s as ``style_loss``.

multi_layer_encoder = vgg19_multi_layer_encoder()
コード例 #4
0
)

criterion = loss.PerceptualLoss(content_loss, style_loss).to(device)
print(criterion)

########################################################################################
# Next up, we load and show the images that will be used in the NST.

images = demo.images()
images.download()
size = 500

########################################################################################

content_image = images["bird2"].read(size=size, device=device)
show_image(content_image, title="Content image")
criterion.set_content_image(content_image)

########################################################################################

style_image = images["mosaic"].read(size=size, device=device)
show_image(style_image, title="Style image")
criterion.set_style_image(style_image)

########################################################################################
# Image optimization without pyramid
# ----------------------------------
#
# As a baseline we use a standard image optimization without pyramid.

starting_point = "content"
コード例 #5
0
perceptual_loss = perceptual_loss.to(device)
print(perceptual_loss)


########################################################################################
# Training
# --------
#
# In a first step we load the style image that will be used to train the
# ``transformer``.

images = demo.images()
size = 500

style_image = images["paint"].read(size=size, device=device)
show_image(style_image)


########################################################################################
# The training of the ``transformer`` is performed similar to other models in PyTorch.
# In every optimization step a batch of content images is drawn from a dataset, which
# serve as input for the transformer as well as ``content_image`` for the
# ``perceptual_loss``. While the ``style_image`` only has to be set once, the
# ``content_image`` has to be reset in every iteration step.
#
# While this can be done with a boilerplate optimization loop, ``pystiche`` provides
# :func:`~pystiche.optim.multi_epoch_model_optimization` that handles the above for you.
#
# .. note::
#
#   If the ``perceptual_loss`` is a :class:`~pystiche.loss.PerceptualLoss`, as is the
コード例 #6
0
ファイル: test_io.py プロジェクト: pystiche/pystiche
def test_show_image_size_smoke(patch_show_pil_image, test_image, size):
    image_.show_image(test_image, size=size)
コード例 #7
0
ファイル: test_io.py プロジェクト: pystiche/pystiche
def test_show_image_invalid_type():
    with pytest.raises(TypeError):
        image_.show_image(None)
コード例 #8
0
ファイル: test_io.py プロジェクト: pystiche/pystiche
def test_show_image_file_smoke(patch_show_pil_image, test_image_file):
    image_.show_image(test_image_file)
コード例 #9
0
ファイル: test_io.py プロジェクト: pystiche/pystiche
def test_show_image_tensor_smoke(patch_show_pil_image, test_image):
    image_.show_image(test_image)