def test_EncoderNormalizer(kwargs):
    data = torch.rand(100)
    defaults = dict(method="standard",
                    log_scale=False,
                    coerce_positive=False,
                    center=True,
                    log_zero_value=0.0)
    defaults.update(kwargs)
    kwargs = defaults
    if kwargs["coerce_positive"] and kwargs["log_scale"]:
        with pytest.raises(AssertionError):
            normalizer = EncoderNormalizer(**kwargs)
    else:
        normalizer = EncoderNormalizer(**kwargs)
        if kwargs["coerce_positive"]:
            data = data - 0.5

        if kwargs["coerce_positive"]:
            assert (
                normalizer.inverse_transform(normalizer.fit_transform(data)) >=
                0).all(), "Inverse transform should yield only positive values"
        else:
            assert torch.isclose(
                normalizer.inverse_transform(normalizer.fit_transform(data)),
                data,
                atol=1e-5).all(), "Inverse transform should reverse transform"
def test_EncoderNormalizer(kwargs):
    data = torch.rand(100)
    defaults = dict(method="standard", center=True)
    defaults.update(kwargs)
    kwargs = defaults
    normalizer = EncoderNormalizer(**kwargs)
    if kwargs.get("transformation") in ["relu", "softplus"]:
        data = data - 0.5

    if kwargs.get("transformation") in ["relu", "softplus", "log1p"]:
        assert (normalizer.inverse_transform(normalizer.fit_transform(data)) >=
                0).all(), "Inverse transform should yield only positive values"
    else:
        assert torch.isclose(
            normalizer.inverse_transform(normalizer.fit_transform(data)),
            data,
            atol=1e-5).all(), "Inverse transform should reverse transform"
def test_EncoderNormalizer(kwargs):
    kwargs.setdefault("method", "standard")
    kwargs.setdefault("center", True)
    kwargs.setdefault("data", torch.rand(100))
    data = kwargs.pop("data")

    normalizer = EncoderNormalizer(**kwargs)

    if kwargs.get("transformation") in ["relu", "softplus", "log1p"]:
        assert (normalizer.inverse_transform(
            torch.as_tensor(normalizer.fit_transform(data))) >=
                0).all(), "Inverse transform should yield only positive values"
    else:
        assert torch.isclose(
            normalizer.inverse_transform(
                torch.as_tensor(normalizer.fit_transform(data))),
            torch.as_tensor(data),
            atol=1e-5,
        ).all(), "Inverse transform should reverse transform"