Exemplo n.º 1
0
    def __init__(
        self,
        shape: Union[int, List[int]],
        weight_posterior: Type[BaseDistribution] = Deterministic,
        bias_posterior: Type[BaseDistribution] = Deterministic,
        weight_prior: BaseDistribution = Normal(0, 1),
        bias_prior: BaseDistribution = Normal(0, 1),
        weight_initializer: Dict[str, Callable] = {"loc": xavier},
        bias_initializer: Dict[str, Callable] = {"loc": xavier},
        name="BatchNormalization",
    ):

        # Create the parameters
        self.weight = Parameter(
            shape=shape,
            posterior=weight_posterior,
            prior=weight_prior,
            initializer=weight_initializer,
            name=name + "_weight",
        )
        self.bias = Parameter(
            shape=shape,
            posterior=bias_posterior,
            prior=bias_prior,
            initializer=bias_initializer,
            name=name + "_bias",
        )
Exemplo n.º 2
0
 def __call__(self, x):
     x = to_tensor(x)
     if self.heteroscedastic:
         p = x @ self.weights() + self.bias()
         m_preds = p[..., :, :self.d_o]
         s_preds = O.exp(p[..., :, self.d_o:])
         return Normal(m_preds, s_preds)
     else:
         return Normal(x @ self.weights() + self.bias(), self.std())
Exemplo n.º 3
0
 def __call__(self, x):
     x = to_tensor(x)
     if self.heteroscedastic:
         p = self.network(x)
         m_preds = p[..., :, : self.d_out]
         s_preds = O.exp(p[..., :, self.d_out :])
         return Normal(m_preds, s_preds)
     else:
         return Normal(self.network(x), self.std())
Exemplo n.º 4
0
def test_r_squared():
    """Tests probflow.utils.metrics.r_squared"""

    # Predictive dist
    preds = tf.constant([0, 1, 2, 2, 2], dtype=tf.float32)
    pred_dist = Normal(preds, 1)
    y_true = np.array([0, 1, 2, 3, 4]).astype("float32")

    # Compare metric
    assert is_close(metrics.r_squared(y_true, pred_dist.mean()), 0.5)
Exemplo n.º 5
0
 def __call__(self, x):
     x = to_tensor(x)
     if self.heteroscedastic:
         p = self.network(x)
         Nd = int(p.shape[-1] / 2)
         m_preds = p[..., :, 0:Nd]
         s_preds = O.exp(p[..., :, Nd:2 * Nd])
         return Normal(m_preds, s_preds)
     else:
         return Normal(self.network(x), self.std())
Exemplo n.º 6
0
def test_sum_squared_error():
    """Tests probflow.utils.metrics.sum_squared_error"""

    # Predictive dist
    preds = tf.constant([0, 1, 2, 0, 0, 0], dtype=tf.float32)
    pred_dist = Normal(preds, 1)
    y_true = np.array([0, 0, 0, 0, 1, 2]).astype("float32")

    # Compare metric
    assert is_close(metrics.sum_squared_error(y_true, pred_dist.mean()), 10.0)
Exemplo n.º 7
0
    def __init__(self,
                 shape=1,
                 posterior=Normal,
                 prior=Normal(0, 1),
                 transform=None,
                 initializer={
                     'loc': xavier,
                     'scale': scale_xavier
                 },
                 var_transform={
                     'loc': None,
                     'scale': O.softplus
                 },
                 min: float = 0.0,
                 max: float = 1.0,
                 name='BoundedParameter'):

        # Check bounds
        if min > max:
            raise ValueError('min is larger than max')

        # Create the transform based on the bounds
        if transform is None:
            transform = lambda x: min + (max - min) * O.sigmoid(x)

        # Create the parameter
        super().__init__(shape=shape,
                         posterior=posterior,
                         prior=prior,
                         transform=transform,
                         initializer=initializer,
                         var_transform=var_transform,
                         name=name)
Exemplo n.º 8
0
    def __init__(
        self,
        k: Union[int, List[int]],
        d: Union[int, List[int]],
        posterior: Type[BaseDistribution] = Deterministic,
        prior: BaseDistribution = Normal(0, 1),
        initializer: Dict[str, Callable] = {"loc": xavier},
        name: str = "Embedding",
    ):

        # Convert to list if not already
        if isinstance(k, int):
            k = [k]
        if isinstance(d, int):
            d = [d]

        # Check values
        if len(k) != len(d):
            raise ValueError("d and k must be the same length")
        if any(e < 1 for e in k):
            raise ValueError("k must be >0")
        if any(e < 1 for e in d):
            raise ValueError("d must be >0")

        # Create the parameters
        self.embeddings = [
            Parameter(
                shape=[k[i], d[i]],
                posterior=posterior,
                prior=prior,
                initializer=initializer,
                name=name + "_" + str(i),
            ) for i in range(len(d))
        ]
Exemplo n.º 9
0
        def __call__(self, x):
            w = self.weight()
            b = self.bias()
            s = self.std()
            m = x * w + b

            # check shapes are as expected
            if self._is_training:
                assert x.ndim == 2
                assert x.shape[0] == 1
                assert x.shape[1] == 50
                assert w.shape[0] == 5
                assert w.shape[1] == 1
                assert b.shape[0] == 5
                assert b.shape[1] == 1
                assert s.shape[0] == 5
                assert s.shape[1] == 1
                assert m.shape[0] == 5
                assert m.shape[1] == 50
            else:  # predicting
                assert x.ndim == 1
                assert x.shape[0] == 11
                assert w.shape[0] == 1
                assert b.shape[0] == 1
                assert s.shape[0] == 1
                assert m.shape[0] == 11

            return Normal(m, s)
Exemplo n.º 10
0
    def __init__(
        self,
        shape: Union[int, List[int]],
        center_by: str = "all",
        name="CenteredParameter",
    ):

        # Get a list representing the shape
        if isinstance(shape, int):
            shape = [shape]
        if len(shape) == 1:
            shape += [1]
        if len(shape) > 2:
            raise ValueError(
                "Only vector and matrix CenteredParameters are supported")

        # Get the untransformed shape of the parameters
        if center_by == "row":
            K = shape[1]
            raw_shape = [K - 1, shape[0]]
        elif center_by == "column":
            K = shape[0]
            raw_shape = [K - 1, shape[1]]
        else:
            K = shape[0] * shape[1]
            raw_shape = [K - 1, 1]

        # Prior on the untransformed parameters
        scale = float(1.0 / np.sqrt(1 - 1.0 / K))
        prior = Normal(0, scale)

        # Precompute matrix by which we'll multiply the untransformed params
        A = np.eye(K)
        A[-1, :] = -1.0
        A[-1, -1] = 0.0
        Q, _ = np.linalg.qr(A)
        self._A_qr = to_default_dtype(to_tensor(Q[:, :-1]))

        # Transform function
        def A_qr_transform(u):
            if center_by == "row":
                return O.transpose(self._A_qr @ u)
            elif center_by == "all" and shape[1] > 1:
                new_shape = list(u.shape)  # to handle samples / n_mc > 1
                new_shape[-1] = shape[-1]
                new_shape[-2] = shape[-2]
                return O.reshape(self._A_qr @ u, new_shape)
            else:
                return self._A_qr @ u

        super().__init__(
            shape=raw_shape,
            posterior=Normal,
            prior=prior,
            transform=A_qr_transform,
            name=name,
        )
Exemplo n.º 11
0
def test_HiddenMarkovModel():
    """Tests hidden Markov model distribution"""

    # Create the distribution (3 states)
    initial = tf.random.normal([3])
    transition = tf.random.normal([3, 3])
    observation = Normal(tf.random.normal([3]), tf.exp(tf.random.normal([3])))
    steps = 5
    dist = HiddenMarkovModel(initial, transition, observation, steps)

    # Should fail w incorrect args
    with pytest.raises(TypeError):
        HiddenMarkovModel("lala", transition, observation, steps)
    with pytest.raises(TypeError):
        HiddenMarkovModel(initial, "lala", observation, steps)
    with pytest.raises(TypeError):
        HiddenMarkovModel(initial, transition, observation, "lala")
    with pytest.raises(ValueError):
        HiddenMarkovModel(initial, transition, observation, -1)

    # Call should return backend obj
    assert isinstance(dist(), tfd.HiddenMarkovModel)

    # Test sampling
    samples = dist.sample()
    assert isinstance(samples, tf.Tensor)
    assert samples.ndim == 1
    assert samples.shape[0] == 5
    samples = dist.sample(10)
    assert isinstance(samples, tf.Tensor)
    assert samples.ndim == 2
    assert samples.shape[0] == 10
    assert samples.shape[1] == 5

    # Test methods
    probs = dist.prob([-1.0, 1.0, 0.0, 0.0, 0.0])
    assert probs.ndim == 0
    probs = dist.prob(np.random.randn(7, 5))
    assert probs.ndim == 1
    assert probs.shape[0] == 7

    # Should also work w/ a backend distribution
    observation = tfd.Normal(tf.random.normal([3]),
                             tf.exp(tf.random.normal([3])))
    dist = HiddenMarkovModel(initial, transition, observation, steps)

    # Test sampling
    samples = dist.sample()
    assert isinstance(samples, tf.Tensor)
    assert samples.ndim == 1
    assert samples.shape[0] == 5
    samples = dist.sample(10)
    assert isinstance(samples, tf.Tensor)
    assert samples.ndim == 2
    assert samples.shape[0] == 10
    assert samples.shape[1] == 5
Exemplo n.º 12
0
    def __init__(
        self,
        shape: Union[int, List[int]] = 1,
        posterior: Type[BaseDistribution] = Normal,
        prior: BaseDistribution = Normal(0, 1),
        transform: Callable = None,
        initializer: Dict[str, Callable] = {
            "loc": xavier,
            "scale": scale_xavier,
        },
        var_transform: Dict[str, Callable] = {
            "loc": None,
            "scale": O.softplus,
        },
        name: str = "Parameter",
    ):

        # Make shape a list
        if isinstance(shape, int):
            shape = [shape]

        # Check values
        if any(e < 1 for e in shape):
            raise ValueError("all shapes must be >0")

        # Assign attributes
        self.shape = shape
        self.posterior_fn = posterior
        self.prior = prior
        self.transform = transform if transform else lambda x: x
        self.initializer = initializer
        self.name = name
        self._static_samples_uuid = None
        self.var_transform = {
            n: (f if f else lambda x: x) for (n, f) in var_transform.items()
        }

        # Create variables for the variational distribution
        self.untransformed_variables = dict()
        for var, init in initializer.items():

            # Int or float initializations = start whole array at that value
            if isinstance(init, (int, float)):
                initial_value = O.full(shape, init)
            else:  # TODO: should also support numpy arrays + backend tensors
                initial_value = init(shape)

            # Create the variables
            self.untransformed_variables[var] = O.new_variable(initial_value)
Exemplo n.º 13
0
    def __init__(
        self,
        shape: Union[int, List[int]] = 1,
        posterior: Type[BaseDistribution] = Normal,
        prior: BaseDistribution = Normal(0, 1),
        transform: Callable = None,
        initializer: Dict[str, Callable] = {
            "loc": xavier,
            "scale": scale_xavier,
        },
        var_transform: Dict[str, Callable] = {
            "loc": None,
            "scale": O.softplus,
        },
        name: str = "Parameter",
    ):

        # Make shape a list
        if isinstance(shape, int):
            shape = [shape]

        # Check values
        if any(e < 1 for e in shape):
            raise ValueError("all shapes must be >0")

        # Assign attributes
        self.shape = shape
        self.posterior_fn = posterior
        self.prior = prior
        self.transform = transform if transform else lambda x: x
        self.initializer = initializer
        self.name = name
        self.var_transform = {
            n: (f if f else lambda x: x)
            for (n, f) in var_transform.items()
        }

        # Create variables for the variational distribution
        self.untransformed_variables = dict()
        for var, init in initializer.items():
            if get_backend() == "pytorch":
                import torch

                self.untransformed_variables[var] = torch.nn.Parameter(
                    init(shape))
            else:
                import tensorflow as tf

                self.untransformed_variables[var] = tf.Variable(init(shape))
Exemplo n.º 14
0
 def __init__(self,
              shape=1,
              posterior=Deterministic,
              prior=Normal(0, 1),
              transform=None,
              initializer={'loc': xavier},
              var_transform={'loc': None},
              name='DeterministicParameter'):
     super().__init__(shape=shape,
                      posterior=posterior,
                      prior=prior,
                      transform=transform,
                      initializer=initializer,
                      var_transform=var_transform,
                      name=name)
Exemplo n.º 15
0
 def __init__(
     self,
     shape=1,
     posterior=Normal,
     prior=Normal(0, 1),
     transform=O.softplus,
     initializer={"loc": xavier, "scale": scale_xavier},
     var_transform={"loc": None, "scale": O.softplus},
     name="PositiveParameter",
 ):
     super().__init__(
         shape=shape,
         posterior=posterior,
         prior=prior,
         transform=transform,
         initializer=initializer,
         var_transform=var_transform,
         name=name,
     )
Exemplo n.º 16
0
    def __init__(self,
                 k: int,
                 d: int,
                 posterior: Type[BaseDistribution] = Deterministic,
                 prior: BaseDistribution = Normal(0, 1),
                 initializer: Dict[str, Callable] = {'loc': xavier},
                 name: str = 'Embeddings'):

        # Check types
        if k < 1:
            raise ValueError('k must be >0')
        if d < 1:
            raise ValueError('d must be >0')

        # Create the parameters
        self.embeddings = Parameter(shape=[k, d],
                                    posterior=posterior,
                                    prior=prior,
                                    initializer=initializer,
                                    name=name)
Exemplo n.º 17
0
 def __init__(self,
              shape=1,
              posterior=Normal,
              prior=Normal(0, 1),
              transform=O.softplus,
              initializer={
                  'loc': xavier,
                  'scale': scale_xavier
              },
              var_transform={
                  'loc': None,
                  'scale': O.softplus
              },
              name='PositiveParameter'):
     super().__init__(shape=shape,
                      posterior=posterior,
                      prior=prior,
                      transform=transform,
                      initializer=initializer,
                      var_transform=var_transform,
                      name=name)
Exemplo n.º 18
0
 def __call__(self, x):
     return Normal(self.module(x), self.std())
Exemplo n.º 19
0
 def __call__(self):
     return Normal(self.mean(), self.std())
Exemplo n.º 20
0
 def __call__(self, x):
     return Normal(self.net(x), 1.0)
Exemplo n.º 21
0
 def __call__(self, x):
     return Normal(self.network(x), self.std())
Exemplo n.º 22
0
 def __call__(self, x):
     w = self.weight()
     b = self.bias()
     s = self.std()
     m = x @ w + b
     return Normal(m, s)
Exemplo n.º 23
0
 def __call__(self, x):
     x = torch.tensor(x)
     return Normal(x @ self.weight() + self.bias(), self.std())
Exemplo n.º 24
0
 def __call__(self, x):
     return Normal(x * self.weight() + self.bias(), 0.1)
Exemplo n.º 25
0
 def __call__(self, x):
     x = to_tensor(x)
     return Normal(self.net(x), self.std())
Exemplo n.º 26
0
 def __call__(self, x):
     x = x[self.cols].values
     x = to_tensor(x)
     return Normal(x @ self.weight() + self.bias(), self.std())
Exemplo n.º 27
0
 def __call__(self, x):
     return Normal(x @ self.weight() + self.bias(), self.std())
Exemplo n.º 28
0
 def __call__(self, x):
     x = torch.tensor(x)
     return Normal(self.module(x), self.std())
Exemplo n.º 29
0
 def __call__(self, x):
     return Normal(x, 1.0)
Exemplo n.º 30
0
 def __call__(self, x):
     # can't check shapes b/c tracing it ignores this code
     # so just check that it works
     return Normal(x * self.weight() + self.bias(), self.std())