Ejemplo n.º 1
0
    def __init__(
        self,
        k: int = 2,
        shape: Union[int, List[int]] = [],
        posterior=Categorical,
        prior=None,
        transform=None,
        initializer={"probs": xavier},
        var_transform={"probs": O.additive_logistic_transform},
        name="CategoricalParameter",
    ):

        # Check type of k
        if not isinstance(k, int):
            raise TypeError("k must be an integer")
        if k < 2:
            raise ValueError("k must be >1")

        # Make shape a list
        if isinstance(shape, int):
            shape = [shape]

        # Use a uniform prior
        if prior is None:
            prior = Categorical(O.ones(shape) / float(k))

        # Create shape of underlying variable array
        shape = shape + [k - 1]

        # Initialize the parameter
        super().__init__(
            shape=shape,
            posterior=posterior,
            prior=prior,
            transform=transform,
            initializer=initializer,
            var_transform=var_transform,
            name=name,
        )

        # shape should correspond to the sample shape
        self.shape = shape
Ejemplo n.º 2
0
def test_Categorical():
    """Tests Categorical distribution"""

    # Create the distribution
    dist = Categorical(tf.constant([0.0, 1.0, 2.0]))

    # Check default params
    assert isinstance(dist.logits, tf.Tensor)
    assert dist.probs is None

    # Call should return backend obj
    assert isinstance(dist(), tfd.Categorical)

    # Test methods
    zero = np.array([0.0])
    one = np.array([1.0])
    two = np.array([2.0])
    assert dist.prob(zero).numpy() < dist.prob(one).numpy()
    assert dist.prob(one).numpy() < dist.prob(two).numpy()
    assert dist.log_prob(zero).numpy() < dist.log_prob(one).numpy()
    assert dist.log_prob(one).numpy() < dist.log_prob(two).numpy()

    # Mean should return the mode!
    assert dist.mean().numpy() == 2

    # Test sampling
    samples = dist.sample()
    assert isinstance(samples, tf.Tensor)
    assert samples.ndim == 0
    samples = dist.sample(10)
    assert isinstance(samples, tf.Tensor)
    assert samples.ndim == 1
    assert samples.shape[0] == 10

    # Should be able to set params
    dist = Categorical(probs=tf.constant([0.1, 0.7, 0.2]))
    assert isinstance(dist.probs, tf.Tensor)
    assert dist.logits is None
    assert is_close(dist.prob(zero).numpy(), 0.1)
    assert is_close(dist.prob(one).numpy(), 0.7)
    assert is_close(dist.prob(two).numpy(), 0.2)
    assert dist.mean().numpy() == 1

    # But only with Tensor-like objs
    with pytest.raises(TypeError):
        dist = Categorical("lalala")
    with pytest.raises(TypeError):
        dist = Categorical()

    # Should use the last dim if passed a Tensor arg
    dist = Categorical(probs=tf.constant([
        [0.1, 0.7, 0.2],
        [0.8, 0.1, 0.1],
        [0.01, 0.01, 0.98],
        [0.3, 0.3, 0.4],
    ]))
    a1 = tf.constant([0.0, 1.0, 2.0, 2.0])
    a2 = tf.constant([2.0, 1.0, 0.0, 0.0])
    assert is_close(dist.prob(a1).numpy()[0], 0.1)
    assert is_close(dist.prob(a1).numpy()[1], 0.1)
    assert is_close(dist.prob(a1).numpy()[2], 0.98)
    assert is_close(dist.prob(a1).numpy()[3], 0.4)
    assert is_close(dist.prob(a2).numpy()[0], 0.2)
    assert is_close(dist.prob(a2).numpy()[1], 0.1)
    assert is_close(dist.prob(a2).numpy()[2], 0.01)
    assert is_close(dist.prob(a2).numpy()[3], 0.3)

    # And ensure sample dims are correct
    samples = dist.sample()
    assert isinstance(samples, tf.Tensor)
    assert samples.ndim == 1
    assert samples.shape[0] == 4
    samples = dist.sample(10)
    assert isinstance(samples, tf.Tensor)
    assert samples.ndim == 2
    assert samples.shape[0] == 10
    assert samples.shape[1] == 4
Ejemplo n.º 3
0
 def __call__(self, x):
     x = to_tensor(x)
     return Categorical(O.insert_col_of(self.network(x), 0))
Ejemplo n.º 4
0
 def __call__(self, x):
     x = to_tensor(x)
     return Categorical(O.insert_col_of(x @ self.weights() + self.bias(),
                                        0))
Ejemplo n.º 5
0
 def __call__(self, x):
     return Categorical(O.add_col_of(x @ self.weights() + self.bias(), 0))
Ejemplo n.º 6
0
 def __call__(self, x):
     return Categorical(O.add_col_of(self.network(x), 0))
Ejemplo n.º 7
0
def test_Categorical():
    """Tests Categorical distribution"""

    # Create the distribution
    dist = Categorical(torch.tensor([0.0, 1.0, 2.0]))

    # Check default params
    assert isinstance(dist.logits, torch.Tensor)
    assert dist.probs is None

    # Call should return backend obj
    assert isinstance(dist(), tod.categorical.Categorical)

    # Test methods
    zero = torch.zeros([1])
    one = torch.ones([1])
    two = 2.0 * torch.ones([1])
    assert dist.prob(zero).numpy() < dist.prob(one).numpy()
    assert dist.prob(one).numpy() < dist.prob(two).numpy()
    assert dist.log_prob(zero).numpy() < dist.log_prob(one).numpy()
    assert dist.log_prob(one).numpy() < dist.log_prob(two).numpy()
    """
    # Mean should return the mode!
    assert dist.mean().numpy() == 2
    #NOTE: pytorch doesn't implement mean()
    """

    # Test sampling
    samples = dist.sample()
    assert isinstance(samples, torch.Tensor)
    assert samples.ndim == 0
    samples = dist.sample(10)
    assert isinstance(samples, torch.Tensor)
    assert samples.ndim == 1
    assert samples.shape[0] == 10

    # Should be able to set params
    dist = Categorical(probs=torch.tensor([0.1, 0.7, 0.2]))
    assert isinstance(dist.probs, torch.Tensor)
    assert dist.logits is None
    assert is_close(dist.prob(zero).numpy(), 0.1)
    assert is_close(dist.prob(one).numpy(), 0.7)
    assert is_close(dist.prob(two).numpy(), 0.2)

    # But only with Tensor-like objs
    with pytest.raises(TypeError):
        dist = Categorical("lalala")

    # Should use the last dim if passed a Tensor arg
    dist = Categorical(probs=torch.tensor([
        [0.1, 0.7, 0.2],
        [0.8, 0.1, 0.1],
        [0.01, 0.01, 0.98],
        [0.3, 0.3, 0.4],
    ]))
    v1 = torch.tensor([0, 1, 2, 2])
    v2 = torch.tensor([2, 1, 0, 0])
    assert is_close(dist.prob(v1).numpy()[0], 0.1)
    assert is_close(dist.prob(v1).numpy()[1], 0.1)
    assert is_close(dist.prob(v1).numpy()[2], 0.98)
    assert is_close(dist.prob(v1).numpy()[3], 0.4)
    assert is_close(dist.prob(v2).numpy()[0], 0.2)
    assert is_close(dist.prob(v2).numpy()[1], 0.1)
    assert is_close(dist.prob(v2).numpy()[2], 0.01)
    assert is_close(dist.prob(v2).numpy()[3], 0.3)

    # And ensure sample dims are correct
    samples = dist.sample()
    assert isinstance(samples, torch.Tensor)
    assert samples.ndim == 1
    assert samples.shape[0] == 4
    samples = dist.sample(10)
    assert isinstance(samples, torch.Tensor)
    assert samples.ndim == 2
    assert samples.shape[0] == 10
    assert samples.shape[1] == 4