Exemplo n.º 1
0
def test_dssim_channels_last(dummy):  # pylint:disable=unused-argument
    """ Basic test for DSSIM Loss """
    prev_data = K.image_data_format()
    K.set_image_data_format('channels_last')
    for input_dim, kernel_size in zip([32, 33], [2, 3]):
        input_shape = [input_dim, input_dim, 3]
        var_x = np.random.random_sample(4 * input_dim * input_dim * 3)
        var_x = var_x.reshape([4] + input_shape)
        var_y = np.random.random_sample(4 * input_dim * input_dim * 3)
        var_y = var_y.reshape([4] + input_shape)

        model = Sequential()
        model.add(
            Conv2D(32, (3, 3),
                   padding='same',
                   input_shape=input_shape,
                   activation='relu'))
        model.add(
            Conv2D(3, (3, 3),
                   padding='same',
                   input_shape=input_shape,
                   activation='relu'))
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        model.compile(loss=losses.DSSIMObjective(kernel_size=kernel_size),
                      metrics=['mse'],
                      optimizer=adam)
        model.fit(var_x, var_y, batch_size=2, epochs=1, shuffle='batch')

        # Test same
        x_1 = K.constant(var_x, 'float32')
        x_2 = K.constant(var_x, 'float32')
        dssim = losses.DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.0, K.eval(dssim(x_1, x_2)), atol=1e-4)

        # Test opposite
        x_1 = K.zeros([4] + input_shape)
        x_2 = K.ones([4] + input_shape)
        dssim = losses.DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.5, K.eval(dssim(x_1, x_2)), atol=1e-4)

    K.set_image_data_format(prev_data)
Exemplo n.º 2
0
 def __init__(self, config: dict) -> None:
     logger.debug("Initializing %s", self.__class__.__name__)
     self._config = config
     self._loss_dict = dict(gmsd=losses.GMSDLoss(),
                            l_inf_norm=losses.LInfNorm(),
                            laploss=losses.LaplacianPyramidLoss(),
                            logcosh=k_losses.logcosh,
                            ms_ssim=losses.MSSIMLoss(),
                            mae=k_losses.mean_absolute_error,
                            mse=k_losses.mean_squared_error,
                            pixel_gradient_diff=losses.GradientLoss(),
                            ssim=losses.DSSIMObjective(),
                            smooth_loss=losses.GeneralizedLoss(),)
     self._mask_channels = self._get_mask_channels()
     self._inputs: List[keras.layers.Layer] = []
     self._names: List[str] = []
     self._funcs: Dict[str, Callable] = {}
     logger.debug("Initialized: %s", self.__class__.__name__)
Exemplo n.º 3
0
    if get_backend() == "amd" and isinstance(loss_func, losses.GMSDLoss):
        pytest.skip("GMSD Loss is not currently compatible with PlaidML")
    y_a = K.variable(np.random.random((2, 16, 16, 3)))
    y_b = K.variable(np.random.random((2, 16, 16, 3)))
    objective_output = loss_func(y_a, y_b)
    if get_backend() == "amd":
        assert K.eval(objective_output).shape == output_shape
    else:
        output = objective_output.numpy()
        assert output.dtype == "float32" and not np.isnan(output)


_PLPARAMS = _PARAMS + [(k_losses.mean_absolute_error, (2, 16, 16)),
                       (k_losses.mean_squared_error, (2, 16, 16)),
                       (k_losses.logcosh, (2, 16, 16)),
                       (losses.DSSIMObjective(), ())]
_PLIDS = [
    "GeneralizedLoss", "GradientLoss", "GMSDLoss", "LInfNorm", "mae", "mse",
    "logcosh", "DSSIMObjective"
]
_PLIDS = ["{}[{}]".format(loss, get_backend().upper()) for loss in _PLIDS]


@pytest.mark.parametrize(["loss_func", "output_shape"], _PLPARAMS, ids=_PLIDS)
def test_penalized_loss(loss_func, output_shape):
    """ Test penalized loss wrapper works as expected """
    if get_backend() == "amd":
        if isinstance(loss_func, losses.GMSDLoss):
            pytest.skip("GMSD Loss is not currently compatible with PlaidML")
        if hasattr(loss_func, "__name__") and loss_func.__name__ == "logcosh":
            pytest.skip(
Exemplo n.º 4
0
    """ Basic shape tests for loss functions. """
    if get_backend() == "amd" and isinstance(loss_func, losses.GMSDLoss):
        pytest.skip("GMSD Loss is not currently compatible with PlaidML")
    y_a = K.variable(np.random.random((2, 16, 16, 3)))
    y_b = K.variable(np.random.random((2, 16, 16, 3)))
    objective_output = loss_func(y_a, y_b)
    if get_backend() == "amd":
        assert K.eval(objective_output).shape == output_shape
    else:
        output = objective_output.numpy()
        assert output.dtype == "float32" and not np.isnan(output)


_LWPARAMS = [losses.GeneralizedLoss(), losses.GradientLoss(), losses.GMSDLoss(),
             losses.LInfNorm(), k_losses.mean_absolute_error, k_losses.mean_squared_error,
             k_losses.logcosh, losses.DSSIMObjective(), losses.MSSSIMLoss()]
_LWIDS = ["GeneralizedLoss", "GradientLoss", "GMSDLoss", "LInfNorm", "mae", "mse", "logcosh",
          "DSSIMObjective", "MS-SSIM"]
_LWIDS = [f"{loss}[{get_backend().upper()}]" for loss in _LWIDS]


@pytest.mark.parametrize("loss_func", _LWPARAMS, ids=_LWIDS)
def test_loss_wrapper(loss_func):
    """ Test penalized loss wrapper works as expected """
    if get_backend() == "amd":
        if isinstance(loss_func, losses.GMSDLoss):
            pytest.skip("GMSD Loss is not currently compatible with PlaidML")
        if isinstance(loss_func, losses.MSSSIMLoss):
            pytest.skip("MS-SSIM Loss is not currently compatible with PlaidML")
        if hasattr(loss_func, "__name__") and loss_func.__name__ == "logcosh":
            pytest.skip("LogCosh Loss is not currently compatible with PlaidML")
Exemplo n.º 5
0
    y_b = K.variable(np.random.random((2, 16, 16, 3)))
    objective_output = loss_func(y_a, y_b)
    if get_backend() == "amd":
        assert K.eval(objective_output).shape == output_shape
    else:
        output = objective_output.numpy()
        assert output.dtype == "float32" and not np.isnan(output)


_LWPARAMS = [
    losses.GeneralizedLoss(),
    losses.GradientLoss(),
    losses.GMSDLoss(),
    losses.LInfNorm(), k_losses.mean_absolute_error,
    k_losses.mean_squared_error, k_losses.logcosh,
    losses.DSSIMObjective()
]
_LWIDS = [
    "GeneralizedLoss", "GradientLoss", "GMSDLoss", "LInfNorm", "mae", "mse",
    "logcosh", "DSSIMObjective"
]
_LWIDS = ["{}[{}]".format(loss, get_backend().upper()) for loss in _LWIDS]


@pytest.mark.parametrize("loss_func", _LWPARAMS, ids=_LWIDS)
def test_loss_wrapper(loss_func):
    """ Test penalized loss wrapper works as expected """
    if get_backend() == "amd":
        if isinstance(loss_func, losses.GMSDLoss):
            pytest.skip("GMSD Loss is not currently compatible with PlaidML")
        if hasattr(loss_func, "__name__") and loss_func.__name__ == "logcosh":