예제 #1
0
def test_image_optimization_optimizer_preprocessor():
    input_image = torch.empty(1)
    criterion = nn.Module()
    optimizer = optim.default_image_optimizer(input_image)
    preprocessor = nn.Module()

    with pytest.raises(RuntimeError):
        optim.image_optimization(input_image,
                                 criterion,
                                 optimizer,
                                 preprocessor=preprocessor)
예제 #2
0
def test_default_image_optim_loop(optim_asset_loader):
    asset = optim_asset_loader("default_image_optim_loop")

    actual = optim.image_optimization(
        asset.input.image,
        asset.input.perceptual_loss,
        optimizer=asset.params.get_optimizer,
        num_steps=asset.params.num_steps,
        quiet=True,
    )
    desired = asset.output.image
    ptu.assert_allclose(actual, desired, rtol=1e-4)
예제 #3
0
def nst(
    content_image: torch.Tensor,
    style_image: torch.Tensor,
    impl_params: bool = True,
    hyper_parameters: Optional[HyperParameters] = None,
    quiet: bool = False,
) -> torch.Tensor:
    r"""NST from :cite:`GEB2016`.

    Args:
        content_image: Content image for the NST.
        style_image: Style image for the NST.
        impl_params: Switch the behavior and hyper-parameters between the reference
            implementation of the original authors and what is described in the paper.
            For details see :ref:`here <gatys_ecker_bethge_2016-impl_params>`.
        hyper_parameters: If omitted,
            :func:`~pystiche_papers.gatys_ecker_bethge_2016.hyper_parameters` is used.
        quiet: If ``True``, no information is logged during the optimization. Defaults
            to ``False``.
    """
    if hyper_parameters is None:
        hyper_parameters = _hyper_parameters()

    device = content_image.device

    criterion = perceptual_loss(impl_params=impl_params,
                                hyper_parameters=hyper_parameters)
    criterion = criterion.to(device)

    input_image = misc.get_input_image(
        starting_point=hyper_parameters.nst.starting_point,
        content_image=content_image)

    preprocessor = _preprocessor().to(device)
    postprocessor = _postprocessor().to(device)

    criterion.set_content_image(preprocessor(content_image))
    criterion.set_style_image(preprocessor(style_image))

    torch.autograd.set_detect_anomaly(True)

    return optim.image_optimization(
        input_image,
        criterion,
        optimizer=optimizer,
        num_steps=hyper_parameters.nst.num_steps,
        preprocessor=preprocessor,
        postprocessor=postprocessor,
        quiet=quiet,
    )
예제 #4
0
def test_default_image_optim_loop_processing(optim_asset_loader):
    asset = optim_asset_loader("default_image_optim_loop_processing")

    actual = optim.image_optimization(
        asset.input.image,
        asset.input.criterion,
        optimizer=asset.params.get_optimizer,
        num_steps=asset.params.num_steps,
        preprocessor=asset.params.preprocessor,
        postprocessor=asset.params.postprocessor,
        quiet=True,
    )
    desired = asset.output.image
    ptu.assert_allclose(actual, desired, rtol=1e-4)
예제 #5
0
def main(raw_args: Optional[List[str]] = None) -> None:
    args = parse_args(raw_args)
    config = make_config(args)

    config.perceptual_loss.set_content_image(config.content_image)
    config.perceptual_loss.set_style_image(config.style_image)

    output_image = image_optimization(config.input_image,
                                      config.perceptual_loss,
                                      num_steps=config.num_steps)

    if not config.output_image_specified:
        print(f"Saving result to {config.output_image}")
    write_image(output_image, config.output_image)
#   .. code-block:: python
#
#     starting_point = "random"
#     input_image = get_input_image(starting_point, content_image=content_image)


########################################################################################
# Finally we run the NST with the :func:`~pystiche.optim.image_optimization` for
# ``num_steps=500`` steps.
#
# In every step the ``perceptual_loss`` is calculated nd propagated backward to the
# pixels of the ``input_image``. If ``get_optimizer`` is not specified, as is the case
# here, the :func:`~pystiche.optim.default_image_optimizer`, i.e.
# :class:`~torch.optim.LBFGS` is used.

output_image = optim.image_optimization(input_image, perceptual_loss, num_steps=500)


########################################################################################
# After the NST is complete we show the result.

# sphinx_gallery_thumbnail_number = 4
show_image(output_image, title="Output image")


########################################################################################
# Conclusion
# ----------
#
# If you started with the basic NST example without ``pystiche`` this example hopefully
# convinced you that ``pystiche`` is a helpful tool. But this was just the beginning:
criterion = loss.PerceptualLoss(content_loss, style_loss).to(device)
print(criterion)

########################################################################################
# We set the target images for the optimization ``criterion``.

criterion.set_content_image(content_image)
criterion.set_style_image(style_image)

########################################################################################
# We perform the unguided NST and show the result.

starting_point = "content"
input_image = get_input_image(starting_point, content_image=content_image)

output_image = optim.image_optimization(input_image, criterion, num_steps=500)

########################################################################################

show_image(output_image)

########################################################################################
# While the result is not completely unreasonable, the building has a strong blueish
# cast that looks unnatural. Since the optimization was unconstrained the color of the
# sky was used for the building. In the remainder of this example we will solve this by
# dividing the images in multiple separate regions.

########################################################################################
# Guided image optimization
# -------------------------
#