コード例 #1
0
def test_TotalVariationLoss(input_image):
    loss = paper.TotalVariationLoss()
    actual = loss(input_image)

    desired = F.total_variation_loss(input_image, reduction="sum")

    assert actual == ptu.approx(desired)
コード例 #2
0
def test_approx():
    rel = 1e-6
    abs = 0.0
    approx_value = ptu.approx(torch.tensor(1.0), rel=rel, abs=abs)

    assert 1.0 + 0.9 * rel == approx_value
    assert approx_value == 1.0 + 0.9 * rel
コード例 #3
0
def test_MRFLoss(subtests, multi_layer_encoder_with_layer, target_image,
                 input_image):
    multi_layer_encoder, layer = multi_layer_encoder_with_layer
    encoder = multi_layer_encoder.extract_encoder(layer)
    target_enc = encoder(target_image)
    input_enc = encoder(input_image)

    patch_size = 3
    stride = 1
    configs = ((True, 1.0 / 2.0), (False, 1.0))
    for (
            impl_params,
            score_correction_factor,
    ) in configs:
        with subtests.test(impl_params=impl_params):

            loss = paper.MRFLoss(encoder,
                                 patch_size,
                                 impl_params=impl_params,
                                 stride=stride)
            loss.set_target_image(target_image)
            actual = loss(input_image)

            extract_patches2d = (paper.extract_normalized_patches2d if
                                 impl_params else pystiche.extract_patches2d)
            target_repr = extract_patches2d(target_enc, patch_size, stride)
            input_repr = extract_patches2d(input_enc, patch_size, stride)

            score = F.mrf_loss(input_repr,
                               target_repr,
                               reduction="sum",
                               batched_input=True)
            desired = score * score_correction_factor

            assert actual == ptu.approx(desired)
コード例 #4
0
def test_value_range_loss_zero():
    torch.manual_seed(0)
    input = torch.rand(1, 3, 128, 128)

    actual = F.value_range_loss(input)
    desired = 0.0
    assert desired == ptu.approx(actual)
コード例 #5
0
def test_MultiLayerEncodingLoss(subtests, multi_layer_encoder_with_layer,
                                target_image, input_image):
    multi_layer_encoder, layer = multi_layer_encoder_with_layer
    encoder = multi_layer_encoder.extract_encoder(layer)
    target_repr = pystiche.gram_matrix(encoder(target_image), normalize=True)
    input_repr = pystiche.gram_matrix(encoder(input_image), normalize=True)

    configs = ((True, 1.0), (False, 1.0 / 4.0))
    for impl_params, score_correction_factor in configs:
        with subtests.test(impl_params=impl_params):
            loss = paper.MultiLayerEncodingLoss(
                multi_layer_encoder,
                (layer, ),
                lambda encoder, layer_weight: pystiche.loss.GramLoss(
                    encoder, score_weight=layer_weight),
                impl_params=impl_params,
            )
            loss.set_target_image(target_image)
            actual = loss(input_image)

            score = mse_loss(
                input_repr,
                target_repr,
            )
            desired = score * score_correction_factor

            assert actual == ptu.approx(desired)
コード例 #6
0
ファイル: test_container.py プロジェクト: pystiche/pystiche
    def test_op_weights_str(self):
        class TestOperator(ops.Operator):
            def process_input_image(self, image):
                pass

        def get_op(name, score_weight):
            return TestOperator(score_weight=score_weight)

        names = [str(idx) for idx in range(3)]
        op_weights_config = ("sum", "mean")

        desireds = (float(len(names)), 1.0)
        for op_weights, desired in zip(op_weights_config, desireds):
            same_operator_container = ops.SameOperatorContainer(
                names, get_op, op_weights=op_weights
            )
            actual = sum(
                [getattr(same_operator_container, name).score_weight for name in names]
            )
            assert actual == ptu.approx(desired)

        with pytest.raises(ValueError):
            ops.SameOperatorContainer(
                names, get_op, op_weights="invalid",
            )
コード例 #7
0
def test_nonnegsqrt():
    vals = (-1.0, 0.0, 1.0, 2.0)
    desireds = (0.0, 0.0, 1.0, sqrt(2.0))

    for val, desired in zip(vals, desireds):
        x = torch.tensor(val, requires_grad=True)
        y = pystiche.nonnegsqrt(x)

        assert y == ptu.approx(desired)
コード例 #8
0
ファイル: test_math.py プロジェクト: pystiche/pystiche
    def test_main(self):
        vals = (-1.0, 0.0, 1.0, 2.0)
        desireds = (0.0, 0.0, 1.0, sqrt(2.0))

        for val, desired in zip(vals, desireds):
            x = torch.tensor(val)
            y = pystiche.nonnegsqrt(x)

            assert y == ptu.approx(desired)
コード例 #9
0
def test_nonnegsqrt_grad():
    vals = (-1.0, 0.0, 1.0, 2.0)
    desireds = (0.0, 0.0, 1.0 / 2.0, 1.0 / (2.0 * sqrt(2.0)))

    for val, desired in zip(vals, desireds):
        x = torch.tensor(val, requires_grad=True)
        y = pystiche.nonnegsqrt(x)
        y.backward()

        assert x.grad == ptu.approx(desired)
コード例 #10
0
def test_get_input_image_tensor_style():
    starting_point = "style"
    image = torch.tensor(0.0)

    actual = misc.get_input_image(starting_point, style_image=image)
    desired = image
    assert actual == ptu.approx(desired)

    with pytest.raises(RuntimeError):
        misc.get_input_image(starting_point, content_image=image)
コード例 #11
0
    def test_mul(self):
        losses = (2.0, 3.0)
        factor = 4.0

        loss_dict = pystiche.LossDict([(f"loss{idx}", torch.tensor(val))
                                       for idx, val in enumerate(losses)])
        loss_dict = loss_dict * factor

        for idx, loss in enumerate(losses):
            actual = float(loss_dict[f"loss{idx}"])
            desired = loss * factor
            assert actual == ptu.approx(desired)
コード例 #12
0
def test_total_variation_loss():
    def get_checkerboard(size):
        return ((torch.arange(size**2).view(size, size) +
                 torch.arange(size).view(size, 1)) % 2).bool()

    size = 128
    checkerboard = get_checkerboard(size)
    input = checkerboard.float().view(1, 1, size, size).repeat(1, 3, 1, 1)

    actual = F.total_variation_loss(input)
    desired = 2.0
    assert desired == ptu.approx(actual)
コード例 #13
0
def test_TotalVariationLoss(subtests, input_image):
    configs = ((True, 1.0 / 2.0), (False, 1.0))
    for impl_params, score_correction_factor in configs:
        with subtests.test(impl_params=impl_params):
            loss = paper.TotalVariationLoss(impl_params=impl_params, )
            actual = loss(input_image)

            score = F.total_variation_loss(input_image,
                                           exponent=loss.exponent,
                                           reduction="sum")

            desired = score * score_correction_factor

            assert actual == ptu.approx(desired)
コード例 #14
0
def test_optimizer(subtests, input_image):
    params = input_image
    optimizer = paper.optimizer(params)

    assert isinstance(optimizer, optim.LBFGS)
    assert len(optimizer.param_groups) == 1

    param_group = optimizer.param_groups[0]

    with subtests.test(msg="optimization params"):
        assert len(param_group["params"]) == 1
        assert param_group["params"][0] is params

    with subtests.test(msg="optimizer properties"):
        assert param_group["lr"] == ptu.approx(1.0)
        assert param_group["max_iter"] == 1
コード例 #15
0
def test_optimizer(subtests, input_image):
    transformer = nn.Conv2d(3, 3, 1)
    params = tuple(transformer.parameters())
    optimizer = paper.optimizer(transformer)

    assert isinstance(optimizer, optim.Adam)
    assert len(optimizer.param_groups) == 1

    param_group = optimizer.param_groups[0]

    with subtests.test(msg="optimization params"):
        assert len(param_group["params"]) == len(params)
        for actual, desired in zip(param_group["params"], params):
            assert actual is desired

    with subtests.test(msg="optimizer properties"):
        assert param_group["lr"] == ptu.approx(1e-3)
コード例 #16
0
def test_DelayedExponentialLR():
    transformer = nn.Conv2d(3, 3, 1)
    gamma = 0.1
    delay = 2
    num_steps = 5
    optimizer = paper.optimizer(transformer)
    lr_scheduler = paper.DelayedExponentialLR(optimizer, gamma, delay)

    param_group = optimizer.param_groups[0]
    base_lr = param_group["lr"]
    for i in range(num_steps):
        if i >= delay:
            base_lr *= gamma

        param_group = optimizer.param_groups[0]
        assert param_group["lr"] == ptu.approx(base_lr)
        optimizer.step()
        lr_scheduler.step()
コード例 #17
0
ファイル: test_container.py プロジェクト: pystiche/pystiche
    def test_op_weights_seq(self):
        class TestOperator(ops.Operator):
            def process_input_image(self, image):
                pass

        def get_op(name, score_weight):
            return TestOperator(score_weight=score_weight)

        names, op_weights = zip(*[(str(idx), float(idx) + 1.0) for idx in range(3)])

        same_operator_container = ops.SameOperatorContainer(
            names, get_op, op_weights=op_weights
        )

        for name, score_weight in zip(names, op_weights):
            actual = getattr(same_operator_container, name).score_weight
            desired = score_weight
            assert actual == ptu.approx(desired)
コード例 #18
0
def test_FeatureReconstructionLoss(subtests, multi_layer_encoder_with_layer,
                                   target_image, input_image):
    multi_layer_encoder, layer = multi_layer_encoder_with_layer
    encoder = multi_layer_encoder.extract_encoder(layer)
    target_enc = encoder(target_image)
    input_enc = encoder(input_image)

    configs = ((True, "mean"), (False, "sum"))
    for impl_params, loss_reduction in configs:
        with subtests.test(impl_params=impl_params):
            loss = paper.FeatureReconstructionLoss(
                encoder,
                impl_params=impl_params,
            )
            loss.set_target_image(target_image)
            actual = loss(input_image)

            desired = mse_loss(input_enc, target_enc, reduction=loss_reduction)

            assert actual == ptu.approx(desired)
コード例 #19
0
    def test_main(self, subtests, multi_layer_encoder_with_layer, target_image,
                  input_image):
        multi_layer_encoder, layer = multi_layer_encoder_with_layer
        encoder = multi_layer_encoder.extract_encoder(layer)
        target_enc = encoder(target_image)
        input_enc = encoder(input_image)

        configs = ((True, "mean", 1.0), (False, "sum", 1.0 / 2.0))
        for impl_params, loss_reduction, score_correction_factor in configs:
            with subtests.test(impl_params=impl_params):
                loss = paper.FeatureReconstructionLoss(encoder,
                                                       impl_params=impl_params)
                loss.set_target_image(target_image)
                actual = loss(input_image)

                score = mse_loss(input_enc,
                                 target_enc,
                                 reduction=loss_reduction)
                desired = score * score_correction_factor

                assert actual == ptu.approx(desired)
コード例 #20
0
def test_GramLoss(subtests, multi_layer_encoder_with_layer, target_image,
                  input_image):
    multi_layer_encoder, layer = multi_layer_encoder_with_layer
    encoder = multi_layer_encoder.extract_encoder(layer)

    configs = ((True, True), (False, False))
    for (impl_params, normalize_by_num_channels) in configs:
        with subtests.test(impl_params=impl_params):
            target_repr = pystiche.gram_matrix(encoder(target_image),
                                               normalize=True)
            input_repr = pystiche.gram_matrix(encoder(input_image),
                                              normalize=True)
            intern_target_repr = (target_repr / target_repr.size()[-1] if
                                  normalize_by_num_channels else target_repr)
            intern_input_repr = (input_repr / input_repr.size()[-1]
                                 if normalize_by_num_channels else input_repr)
            loss = paper.GramLoss(encoder, impl_params=impl_params)
            loss.set_target_image(target_image)
            actual = loss(input_image)

            desired = mse_loss(intern_input_repr, intern_target_repr)

            assert actual == ptu.approx(desired, rel=1e-3)
コード例 #21
0
def test_ulyanov_et_al_2016_optimizer(subtests, impl_params, instance_norm):
    transformer = nn.Conv2d(3, 3, 1)
    params = tuple(transformer.parameters())

    hyper_parameters = paper.hyper_parameters(
        impl_params=impl_params, instance_norm=instance_norm).optimizer

    optimizer = paper.optimizer(transformer,
                                impl_params=impl_params,
                                instance_norm=instance_norm)

    assert isinstance(optimizer, optim.Adam)
    assert len(optimizer.param_groups) == 1

    param_group = optimizer.param_groups[0]

    with subtests.test(msg="optimization params"):
        assert len(param_group["params"]) == len(params)
        for actual, desired in zip(param_group["params"], params):
            assert actual is desired

    with subtests.test(msg="optimizer properties"):
        assert param_group["lr"] == ptu.approx(hyper_parameters.lr)
コード例 #22
0
def test_GramLoss(subtests, multi_layer_encoder_with_layer, target_image,
                  input_image):
    multi_layer_encoder, layer = multi_layer_encoder_with_layer
    encoder = multi_layer_encoder.extract_encoder(layer)
    target_repr = pystiche.gram_matrix(encoder(target_image), normalize=True)
    input_repr = pystiche.gram_matrix(encoder(input_image), normalize=True)

    configs = ((True, target_repr.size()[1]), (False, 1.0))
    for impl_params, extra_num_channels_normalization in configs:
        with subtests.test(impl_params=impl_params):
            loss = paper.GramLoss(
                encoder,
                impl_params=impl_params,
            )
            loss.set_target_image(target_image)
            actual = loss(input_image)

            score = mse_loss(
                input_repr,
                target_repr,
            )
            desired = score / extra_num_channels_normalization**2

            assert actual == ptu.approx(desired, rel=1e-3)
コード例 #23
0
def test_ResidualBlock(double_module):
    input = torch.tensor(1.0)
    model = utils.ResidualBlock(double_module)
    assert model(input) == ptu.approx(3.0)
コード例 #24
0
def test_ResidualBlock_shortcut(double_module):
    input = torch.tensor(1.0)
    model = utils.ResidualBlock(double_module,
                                shortcut=utils.ResidualBlock(double_module))
    assert model(input) == ptu.approx(5.0)