Пример #1
0
    def test_gumbel_softmax_distribution(self):
        # 5-categorical Gumble-Softmax.
        param_space = Float(shape=(5, ), main_axes="B")
        values_space = Float(shape=(5, ), main_axes="B")

        gumble_softmax_distribution = GumbelSoftmax(temperature=1.0)

        # Batch of size=2 and deterministic (True).
        input_ = param_space.sample(2)
        expected = softmax(input_)
        # Sample n times, expect always argmax value (deterministic draw).
        for _ in range(50):
            out = gumble_softmax_distribution.sample(input_,
                                                     deterministic=True)
            check(out, expected)
            out = gumble_softmax_distribution.sample_deterministic(input_)
            check(out, expected)

        # Batch of size=1 and non-deterministic -> expect roughly the vector of probs.
        input_ = param_space.sample(1)
        expected = softmax(input_)
        outs = []
        for _ in range(100):
            out = gumble_softmax_distribution.sample(input_)
            outs.append(out)
            out = gumble_softmax_distribution.sample_stochastic(input_)
            outs.append(out)

        check(np.mean(outs, axis=0), expected, decimals=1)

        return  # TODO: Figure out Gumbel Softmax log-prob calculation (our current implementation does not correspond with paper's formula).

        def gumbel_log_density(y, probs, num_categories, temperature=1.0):
            # https://arxiv.org/pdf/1611.01144.pdf.
            density = np.math.factorial(num_categories - 1) * np.math.pow(temperature, num_categories - 1) * \
                (np.sum(probs / np.power(y, temperature), axis=-1) ** -num_categories) * \
                np.prod(probs / np.power(y, temperature + 1.0), axis=-1)
            return np.log(density)

        # Test log-likelihood outputs.
        input_ = param_space.sample(3)
        values = values_space.sample(3)
        expected = gumbel_log_density(values,
                                      softmax(input_),
                                      num_categories=param_space.shape[0])

        out = gumble_softmax_distribution.log_prob(input_, values)
        check(out, expected)
Пример #2
0
    def test_categorical(self):
        # Create 5 categorical distributions of 3 categories each.
        param_space = Float(shape=(5, 3), low=-1.0, high=2.0, main_axes="B")
        values_space = Int(3, shape=(5, ), main_axes="B")

        # The Component to test.
        categorical = Categorical()

        # Batch of size=3 and deterministic (True).
        input_ = param_space.sample(3)
        expected = np.argmax(input_, axis=-1)
        # Sample n times, expect always max value (max likelihood for deterministic draw).
        for _ in range(10):
            out = categorical.sample(input_, deterministic=True)
            check(out, expected)
            out = categorical.sample_deterministic(input_)
            check(out, expected)

        # Batch of size=3 and non-deterministic -> expect roughly the mean.
        input_ = param_space.sample(3)
        outs = []
        for _ in range(100):
            out = categorical.sample(input_, deterministic=False)
            outs.append(out)
            out = categorical.sample_stochastic(input_)
            outs.append(out)

        check(np.mean(outs), 1.0, decimals=0)

        input_ = param_space.sample(1)
        probs = softmax(input_)
        values = values_space.sample(1)

        # Test log-likelihood outputs.
        out = categorical.log_prob(input_, values)
        check(out,
              np.log(
                  np.array([[
                      probs[0][0][values[0][0]], probs[0][1][values[0][1]],
                      probs[0][2][values[0][2]], probs[0][3][values[0][3]],
                      probs[0][4][values[0][4]]
                  ]])),
              decimals=4)

        # Test entropy outputs.
        out = categorical.entropy(input_)
        expected_entropy = -np.sum(probs * np.log(probs), axis=-1)
        check(out, expected_entropy)
Пример #3
0
    def test_layer_network_with_container_output_space_and_one_distribution(self):
        input_space = Float(-1.0, 1.0, shape=(5,), main_axes="B")
        output_space = Dict({"a": Float(shape=(2, 3)), "b": Int(3)}, main_axes="B")
        # Using keras layer as network spec.
        layer = tf.keras.layers.Dense(10)

        nn = Network(
            network=layer,
            output_space=output_space,
            # Only one output component is a distribution, the other not (Int).
            distributions=dict(a=True)
        )
        # Simple call -> Should return sample dict with "a"->float(2,3) and "b"->int(3,).
        input_ = input_space.sample(1000)
        result = nn(input_)
        check(np.mean(result["a"]), 0.0, decimals=0)
        check(np.mean(np.sum(softmax(result["b"]), axis=-1)), 1.0, decimals=5)

        # Call with value -> Should return likelihood of "a"-value and output for "b"-value.
        input_ = input_space.sample(3)
        value = output_space.sample(3)
        result, likelihood = nn(input_, value)
        self.assertTrue(result["a"] is None)  # a is None b/c value was already given for likelihood calculation
        self.assertTrue(result["b"].shape == (3,))  # b is the (batched) output values for the given int-numbers
        self.assertTrue(result["b"].dtype == np.float32)
        self.assertTrue(likelihood.shape == (3,))  # (total) likelihood is some float
        self.assertTrue(likelihood.dtype == np.float32)

        # Extract only the "b" value-output (one output for each int category).
        # Also: No likelihood output b/c "a" was invalidated.
        del value["a"]
        value["b"] = None
        result = nn(input_, value)
        self.assertTrue(result["a"] is None)
        self.assertTrue(result["b"].shape == (3, 3))
        self.assertTrue(result["b"].dtype == np.float32)

        value = output_space.sample(3)
        value["a"] = None
        del value["b"]
        result = nn(input_, value)
        self.assertTrue(result is None)
Пример #4
0
    def test_func_api_network_with_primitive_int_output_space_and_distribution(self):
        input_space = Float(-1.0, 1.0, shape=(3,), main_axes="B")
        output_space = Int(5, main_axes="B")

        # Using keras functional API to create network.
        i = tf.keras.layers.Input(shape=(3,))
        d = tf.keras.layers.Dense(10)(i)
        e = tf.keras.layers.Dense(5)(i)
        o = tf.concat([d, e], axis=-1)
        network = tf.keras.Model(inputs=i, outputs=o)

        # Use default distributions (i.e. categorical for Int).
        nn = Network(
            network=network,
            output_space=output_space,
            distributions="default"
        )
        input_ = input_space.sample(1000)
        result = nn(input_)
        # Check the sample for a proper mean value.
        check(np.mean(result), 2, decimals=0)

        # Function call with value -> Expect probabilities for given int-values.
        input_ = input_space.sample(6)
        values = output_space.sample(6)
        result = nn(input_, values)
        weights = nn.get_weights()
        expected = dense(np.concatenate(
            [dense(input_, weights[0], weights[1]), dense(input_, weights[2], weights[3])],
            axis=-1
        ), weights[4], weights[5])
        expected = softmax(expected)
        expected = np.sum(expected * one_hot(values, depth=output_space.num_categories), axis=-1)

        check(result, expected)

        # Function call with "likelihood" option set -> Expect sample plus probabilities for sampled int-values.
        input_ = input_space.sample(1000)
        sample, probs = nn(input_, likelihood=True)
        check(np.mean(sample), 2, decimals=0)
        check(np.mean(probs), 1.0 / output_space.num_categories, decimals=1)
Пример #5
0
    def test_neg_log_likelihood_loss_function_w_container_space(self):
        parameters_space = Dict(
            {
                # Make sure stddev params are not too crazy (just like our adapters do clipping for the raw NN output).
                "a": Tuple(Float(shape=(2, 3)), Float(
                    0.5, 1.0, shape=(2, 3))),  # normal (0.0 to 1.0)
                "b": Float(shape=(4, ), low=-1.0, high=1.0)  # 4-discrete
            },
            main_axes="B")

        labels_space = Dict({
            "a": Float(shape=(2, 3)),
            "b": Int(4)
        },
                            main_axes="B")

        loss_function = NegLogLikelihoodLoss(
            distribution=get_default_distribution_from_space(labels_space))

        parameters = parameters_space.sample(2)
        # Softmax the discrete params.
        probs_b = softmax(parameters["b"])
        # probs_b = parameters["b"]
        labels = labels_space.sample(2)

        # Expected loss: Sum of all -log(llh)
        log_prob_per_item_a = np.sum(np.log(
            sts.norm.pdf(labels["a"], parameters["a"][0], parameters["a"][1])),
                                     axis=(-1, -2))
        log_prob_per_item_b = np.array([
            np.log(probs_b[0][labels["b"][0]]),
            np.log(probs_b[1][labels["b"][1]])
        ])

        expected_loss_per_item = -(log_prob_per_item_a + log_prob_per_item_b)

        out = loss_function(parameters, labels)
        check(out, expected_loss_per_item, decimals=4)
Пример #6
0
    def test_joint_cumulative_distribution(self):
        param_space = Dict(
            {
                "a":
                Float(shape=(4, )),  # 4-discrete
                "b":
                Dict({
                    "ba":
                    Tuple([Float(shape=(3, )),
                           Float(0.1, 1.0, shape=(3, ))]),  # 3-variate normal
                    "bb":
                    Tuple([Float(shape=(2, )),
                           Float(shape=(2, ))]),  # beta -1 to 1
                    "bc":
                    Tuple([Float(shape=(4, )),
                           Float(0.1, 1.0, shape=(4, ))]),  # normal (dim=4)
                })
            },
            main_axes="B")

        values_space = Dict(
            {
                "a":
                Int(4),
                "b":
                Dict({
                    "ba": Float(shape=(3, )),
                    "bb": Float(shape=(2, )),
                    "bc": Float(shape=(4, ))
                })
            },
            main_axes="B")

        low, high = -1.0, 1.0
        cumulative_distribution = JointCumulativeDistribution(
            distributions={
                "a": Categorical(),
                "b": {
                    "ba": MultivariateNormal(),
                    "bb": Beta(low=low, high=high),
                    "bc": Normal()
                }
            })

        # Batch of size=2 and deterministic (True).
        input_ = param_space.sample(2)
        input_["a"] = softmax(input_["a"])
        expected_mean = {
            "a": np.argmax(input_["a"], axis=-1),
            "b": {
                "ba":
                input_["b"]["ba"][0],  # [0]=Mean
                # Mean for a Beta distribution: 1 / [1 + (beta/alpha)] * range + low
                "bb":
                (1.0 / (1.0 + input_["b"]["bb"][1] / input_["b"]["bb"][0])) *
                (high - low) + low,
                "bc":
                input_["b"]["bc"][0],
            }
        }
        # Sample n times, expect always mean value (deterministic draw).
        for _ in range(20):
            out = cumulative_distribution.sample(input_, deterministic=True)
            check(out, expected_mean)
            out = cumulative_distribution.sample_deterministic(input_)
            check(out, expected_mean)

        # Batch of size=1 and non-deterministic -> expect roughly the mean.
        input_ = param_space.sample(1)
        input_["a"] = softmax(input_["a"])
        expected_mean = {
            "a": np.sum(input_["a"] * np.array([0, 1, 2, 3])),
            "b": {
                "ba":
                input_["b"]["ba"][0],  # [0]=Mean
                # Mean for a Beta distribution: 1 / [1 + (beta/alpha)] * range + low
                "bb":
                (1.0 / (1.0 + input_["b"]["bb"][1] / input_["b"]["bb"][0])) *
                (high - low) + low,
                "bc":
                input_["b"]["bc"][0],
            }
        }

        outs = []
        for _ in range(500):
            out = cumulative_distribution.sample(input_)
            outs.append(out)
            out = cumulative_distribution.sample_stochastic(input_)
            outs.append(out)

        check(np.mean(np.stack([o["a"][0] for o in outs], axis=0), axis=0),
              expected_mean["a"],
              atol=0.3)
        check(np.mean(np.stack([o["b"]["ba"][0] for o in outs], axis=0),
                      axis=0),
              expected_mean["b"]["ba"][0],
              decimals=1)
        check(np.mean(np.stack([o["b"]["bb"][0] for o in outs], axis=0),
                      axis=0),
              expected_mean["b"]["bb"][0],
              decimals=1)
        check(np.mean(np.stack([o["b"]["bc"][0] for o in outs], axis=0),
                      axis=0),
              expected_mean["b"]["bc"][0],
              decimals=1)

        # Test log-likelihood outputs.
        params = param_space.sample(1)
        params["a"] = softmax(params["a"])
        # Make sure beta-values are within 0.0 and 1.0 for the numpy calculation (which doesn't have scaling).
        values = values_space.sample(1)
        log_prob_beta = np.log(
            beta.pdf(values["b"]["bb"], params["b"]["bb"][0],
                     params["b"]["bb"][1]))
        # Now do the scaling for b/bb (beta values).
        values["b"]["bb"] = values["b"]["bb"] * (high - low) + low
        expected_log_llh = np.log(params["a"][0][values["a"][0]]) + \
            np.sum(np.log(norm.pdf(values["b"]["ba"][0], params["b"]["ba"][0], params["b"]["ba"][1]))) + \
            np.sum(log_prob_beta) + \
            np.sum(np.log(norm.pdf(values["b"]["bc"][0], params["b"]["bc"][0], params["b"]["bc"][1])))

        out = cumulative_distribution.log_prob(params, values)
        check(out, expected_log_llh, decimals=0)
Пример #7
0
    def test_mixture(self):
        # Create a mixture distribution consisting of 3 bivariate normals weighted by an internal
        # categorical distribution.
        num_distributions = 3
        num_events_per_multivariate = 2  # 2=bivariate
        param_space = Dict(
            {
                "categorical":
                Float(shape=(num_distributions, ), low=-1.5, high=2.3),
                "parameters0":
                Tuple(
                    Float(shape=(num_events_per_multivariate, )),  # mean
                    Float(shape=(num_events_per_multivariate, ),
                          low=0.5,
                          high=1.0),  # diag
                ),
                "parameters1":
                Tuple(
                    Float(shape=(num_events_per_multivariate, )),  # mean
                    Float(shape=(num_events_per_multivariate, ),
                          low=0.5,
                          high=1.0),  # diag
                ),
                "parameters2":
                Tuple(
                    Float(shape=(num_events_per_multivariate, )),  # mean
                    Float(shape=(num_events_per_multivariate, ),
                          low=0.5,
                          high=1.0),  # diag
                ),
            },
            main_axes="B")
        values_space = Float(shape=(num_events_per_multivariate, ),
                             main_axes="B")
        # The Component to test.
        mixture = MixtureDistribution(
            # Try different spec types.
            MultivariateNormal(),
            "multi-variate-normal",
            "multivariate_normal")

        # Batch of size=n and deterministic (True).
        input_ = param_space.sample(1)
        # Make probs for categorical.
        categorical_probs = softmax(input_["categorical"])

        # Note: Usually, the deterministic draw should return the max-likelihood value
        # Max-likelihood for a 3-Mixed Bivariate: mean-of-argmax(categorical)()
        # argmax = np.argmax(input_[0]["categorical"], axis=-1)
        #expected = np.array([input_[0]["parameters{}".format(idx)][0][i] for i, idx in enumerate(argmax)])
        #    input_[0]["categorical"][:, 1:2] * input_[0]["parameters1"][0] + \
        #    input_[0]["categorical"][:, 2:3] * input_[0]["parameters2"][0]

        # The mean value is a 2D vector (bivariate distribution).
        expected = categorical_probs[:, 0:1] * input_["parameters0"][0] + \
            categorical_probs[:, 1:2] * input_["parameters1"][0] + \
            categorical_probs[:, 2:3] * input_["parameters2"][0]

        for _ in range(20):
            out = mixture.sample(input_, deterministic=True)
            check(out, expected)
            out = mixture.sample_deterministic(input_)
            check(out, expected)

        # Batch of size=1 and non-deterministic -> expect roughly the mean.
        input_ = param_space.sample(1)
        # Make probs for categorical.
        categorical_probs = softmax(input_["categorical"])
        expected = categorical_probs[:, 0:1] * input_["parameters0"][0] + \
            categorical_probs[:, 1:2] * input_["parameters1"][0] + \
            categorical_probs[:, 2:3] * input_["parameters2"][0]
        outs = []
        for _ in range(500):
            out = mixture.sample(input_, deterministic=False)
            outs.append(out)
            out = mixture.sample_stochastic(input_)
            outs.append(out)
        check(np.mean(np.array(outs), axis=0), expected, decimals=1)

        return
        # TODO: prob/log-prob tests for Mixture.

        # Test log-likelihood outputs (against scipy).
        for i in range(20):
            params = param_space.sample(1)
            # Make sure categorical params are softmaxed.
            category_probs = softmax(params["categorical"][0])
            values = values_space.sample(1)
            expected = 0.0
            v = []
            for j in range(3):
                v.append(
                    multivariate_normal.pdf(
                        values[0],
                        mean=params["parameters{}".format(j)][0][0],
                        cov=params["parameters{}".format(j)][1][0]))
                expected += category_probs[j] * v[-1]
            out = mixture.prob(params, values)
            check(out[0], expected, atol=0.1)

            expected = np.zeros(shape=(3, ))
            for j in range(3):
                expected[j] = np.log(category_probs[j]) + np.log(
                    multivariate_normal.pdf(
                        values[0],
                        mean=params["parameters{}".format(j)][0][0],
                        cov=params["parameters{}".format(j)][1][0]))
            expected = np.log(np.sum(np.exp(expected)))
            out = mixture.log_prob(params, values)
            print("{}: out={} expected={}".format(i, out, expected))
            check(out, np.array([expected]), atol=0.25)