Example #1
0
def get_transforms(cache_size):
    transforms = [
        AbsTransform(cache_size=cache_size),
        ExpTransform(cache_size=cache_size),
        PowerTransform(exponent=2,
                       cache_size=cache_size),
        PowerTransform(exponent=torch.tensor(5.).normal_(),
                       cache_size=cache_size),
        PowerTransform(exponent=torch.tensor(5.).normal_(),
                       cache_size=cache_size),
        SigmoidTransform(cache_size=cache_size),
        TanhTransform(cache_size=cache_size),
        AffineTransform(0, 1, cache_size=cache_size),
        AffineTransform(1, -2, cache_size=cache_size),
        AffineTransform(torch.randn(5),
                        torch.randn(5),
                        cache_size=cache_size),
        AffineTransform(torch.randn(4, 5),
                        torch.randn(4, 5),
                        cache_size=cache_size),
        SoftmaxTransform(cache_size=cache_size),
        SoftplusTransform(cache_size=cache_size),
        StickBreakingTransform(cache_size=cache_size),
        LowerCholeskyTransform(cache_size=cache_size),
        CorrCholeskyTransform(cache_size=cache_size),
        ComposeTransform([
            AffineTransform(torch.randn(4, 5),
                            torch.randn(4, 5),
                            cache_size=cache_size),
        ]),
        ComposeTransform([
            AffineTransform(torch.randn(4, 5),
                            torch.randn(4, 5),
                            cache_size=cache_size),
            ExpTransform(cache_size=cache_size),
        ]),
        ComposeTransform([
            AffineTransform(0, 1, cache_size=cache_size),
            AffineTransform(torch.randn(4, 5),
                            torch.randn(4, 5),
                            cache_size=cache_size),
            AffineTransform(1, -2, cache_size=cache_size),
            AffineTransform(torch.randn(4, 5),
                            torch.randn(4, 5),
                            cache_size=cache_size),
        ]),
        ReshapeTransform((4, 5), (2, 5, 2)),
        IndependentTransform(
            AffineTransform(torch.randn(5),
                            torch.randn(5),
                            cache_size=cache_size),
            1),
        CumulativeDistributionTransform(Normal(0, 1)),
    ]
    transforms += [t.inv for t in transforms]
    return transforms
Example #2
0
 def __init__(self, loc, scale, validate_args=None):
     self.loc, self.scale = broadcast_all(loc, scale)
     finfo = _finfo(self.loc)
     if isinstance(loc, Number) and isinstance(scale, Number):
         base_dist = Uniform(finfo.tiny, 1 - finfo.eps)
     else:
         base_dist = Uniform(self.loc.new(self.loc.size()).fill_(finfo.tiny), 1 - finfo.eps)
     transforms = [ExpTransform().inv, AffineTransform(loc=0, scale=-torch.ones_like(self.scale)),
                   ExpTransform().inv, AffineTransform(loc=loc, scale=-self.scale)]
     super(Gumbel, self).__init__(base_dist, transforms, validate_args=validate_args)
Example #3
0
 def __init__(self, scale, alpha, validate_args=None):
     self.scale, self.alpha = broadcast_all(scale, alpha)
     base_dist = Exponential(self.alpha, validate_args=validate_args)
     transforms = [ExpTransform(), AffineTransform(loc=0, scale=self.scale)]
     super(Pareto, self).__init__(base_dist,
                                  transforms,
                                  validate_args=validate_args)
Example #4
0
def get_transforms(cache_size):
    transforms = [
        AbsTransform(cache_size=cache_size),
        ExpTransform(cache_size=cache_size),
        PowerTransform(exponent=2,
                       cache_size=cache_size),
        PowerTransform(exponent=torch.tensor(5.).normal_(),
                       cache_size=cache_size),
        SigmoidTransform(cache_size=cache_size),
        TanhTransform(cache_size=cache_size),
        AffineTransform(0, 1, cache_size=cache_size),
        AffineTransform(1, -2, cache_size=cache_size),
        AffineTransform(torch.randn(5),
                        torch.randn(5),
                        cache_size=cache_size),
        AffineTransform(torch.randn(4, 5),
                        torch.randn(4, 5),
                        cache_size=cache_size),
        SoftmaxTransform(cache_size=cache_size),
        StickBreakingTransform(cache_size=cache_size),
        LowerCholeskyTransform(cache_size=cache_size),
        CorrCholeskyTransform(cache_size=cache_size),
        ComposeTransform([
            AffineTransform(torch.randn(4, 5),
                            torch.randn(4, 5),
                            cache_size=cache_size),
        ]),
        ComposeTransform([
            AffineTransform(torch.randn(4, 5),
                            torch.randn(4, 5),
                            cache_size=cache_size),
            ExpTransform(cache_size=cache_size),
        ]),
        ComposeTransform([
            AffineTransform(0, 1, cache_size=cache_size),
            AffineTransform(torch.randn(4, 5),
                            torch.randn(4, 5),
                            cache_size=cache_size),
            AffineTransform(1, -2, cache_size=cache_size),
            AffineTransform(torch.randn(4, 5),
                            torch.randn(4, 5),
                            cache_size=cache_size),
        ]),
    ]
    transforms += [t.inv for t in transforms]
    return transforms
Example #5
0
    def __init__(self,
                 dim,
                 act=nn.ReLU(),
                 num_hiddens=[50],
                 nout=1,
                 conf=dict()):
        nn.Module.__init__(self)
        BNN.__init__(self)
        self.dim = dim
        self.act = act
        self.num_hiddens = num_hiddens
        self.nout = nout
        self.steps_burnin = conf.get('steps_burnin', 2500)
        self.steps = conf.get('steps', 2500)
        self.keep_every = conf.get('keep_every', 50)
        self.batch_size = conf.get('batch_size', 32)
        self.warm_start = conf.get('warm_start', False)

        self.lr_weight = np.float32(conf.get('lr_weight', 1e-3))
        self.lr_noise = np.float32(conf.get('lr_noise', 1e-3))
        self.lr_lambda = np.float32(conf.get('lr_lambda', 1e-3))
        self.alpha_w = torch.as_tensor(1. * conf.get('alpha_w', 6.))
        self.beta_w = torch.as_tensor(1. * conf.get('beta_w', 6.))
        self.alpha_n = torch.as_tensor(1. * conf.get('alpha_n', 6.))
        self.beta_n = torch.as_tensor(1. * conf.get('beta_n', 6.))
        self.noise_level = conf.get('noise_level', None)
        if self.noise_level is not None:
            prec = 1 / self.noise_level**2
            prec_var = (prec * 0.25)**2
            self.beta_n = torch.as_tensor(prec / prec_var)
            self.alpha_n = torch.as_tensor(prec * self.beta_n)
            print("Reset alpha_n = %g, beta_n = %g" %
                  (self.alpha_n, self.beta_n))

        self.prior_log_lambda = TransformedDistribution(
            Gamma(self.alpha_w, self.beta_w),
            ExpTransform().inv)  # log of gamma distribution
        self.prior_log_precision = TransformedDistribution(
            Gamma(self.alpha_n, self.beta_n),
            ExpTransform().inv)

        self.log_lambda = nn.Parameter(torch.tensor(0.))
        self.log_precs = nn.Parameter(torch.zeros(self.nout))
        self.nn = NN(dim, self.act, self.num_hiddens, self.nout)

        self.init_nn()
Example #6
0
 def expand(self, batch_shape, _instance=None):
     new = self._get_checked_instance(RelaxedOneHotCategorical, _instance)
     base_dist = self.base_dist.expand(batch_shape)
     super(RelaxedOneHotCategorical, new).__init__(base_dist,
                                                   ExpTransform(),
                                                   validate_args=False)
     new._validate_args = self._validate_args
     return new
Example #7
0
 def __init__(self,
              temperature,
              probs=None,
              logits=None,
              validate_args=None):
     super(RelaxedOneHotCategorical,
           self).__init__(ExpRelaxedCategorical(temperature, probs, logits),
                          ExpTransform(),
                          validate_args=validate_args)
Example #8
0
 def expand(self, batch_shape, _instance=None):
     new = self._get_checked_instance(LogNormal, _instance)
     batch_shape = torch.Size(batch_shape)
     base_dist = self.base_dist.expand(batch_shape)
     super(LogNormal, new).__init__(base_dist,
                                    ExpTransform(),
                                    validate_args=False)
     new._validate_args = self._validate_args
     return new
Example #9
0
def test_compose_transform_shapes():
    transform0 = ExpTransform()
    transform1 = SoftmaxTransform()
    transform2 = LowerCholeskyTransform()

    assert transform0.event_dim == 0
    assert transform1.event_dim == 1
    assert transform2.event_dim == 2
    assert ComposeTransform([transform0, transform1]).event_dim == 1
    assert ComposeTransform([transform0, transform2]).event_dim == 2
    assert ComposeTransform([transform1, transform2]).event_dim == 2
Example #10
0
    def _embed_posterior(self, x, epsilon):
        for xi in x:
            if xi not in self.loc:
                # Randomly initialize to break symmetry and prevent posteriors from
                # starting in the same spot
                self._loc[xi] = torch.nn.Parameter({
                    "random": torch.randn,
                    "prior": torch.zeros
                }[self.unseen_policy](self.d_out))
                self._scale[xi] = Param({
                    "random": lambda s: 0.1 * torch.ones(s),
                    "prior": torch.ones
                }[self.unseen_policy](self.d_out),
                                        transform=ExpTransform())

        # NB "epsilon" takes care of whether self.random or not.
        return torch.stack([
            self.loc[xi] + self.scale[xi].transform() * epsilon[xi] for xi in x
        ])
Example #11
0
    def __init__(self,
                 dim,
                 act=nn.ReLU(),
                 num_hiddens=[50],
                 nout=1,
                 conf=dict()):
        nn.Module.__init__(self)
        BNN.__init__(self)
        self.dim = dim
        self.act = act
        self.num_hiddens = num_hiddens
        self.nout = nout
        self.steps_burnin = conf.get('steps_burnin', 2500)
        self.steps = conf.get('steps', 2500)
        self.keep_every = conf.get('keep_every', 50)
        self.batch_size = conf.get('batch_size', 32)
        self.warm_start = conf.get('warm_start', False)

        self.lr_weight = conf.get('lr_weight', 2e-2)
        self.lr_noise = conf.get('lr_noise', 1e-1)
        self.alpha_n = torch.as_tensor(1. * conf.get('alpha_n', 1e-2))
        self.beta_n = torch.as_tensor(1. * conf.get('beta_n', 1e-1))

        # user can specify a suggested noise value, this will override alpha_n and beta_n
        self.noise_level = conf.get('noise_level', None)
        if self.noise_level is not None:
            prec = 1 / self.noise_level**2
            prec_var = (prec * 0.25)**2
            self.beta_n = torch.as_tensor(prec / prec_var)
            self.alpha_n = torch.as_tensor(prec * self.beta_n)
            print("Reset alpha_n = %g, beta_n = %g" %
                  (self.alpha_n, self.beta_n))

        self.prior_log_precision = TransformedDistribution(
            Gamma(self.alpha_n, self.beta_n),
            ExpTransform().inv)

        self.log_precs = nn.Parameter(torch.zeros(self.nout))
        self.nn = NN(dim, self.act, self.num_hiddens, self.nout)
        self.gain = 5. / 3  # Assume tanh activation

        self.init_nn()
Example #12
0
 def __init__(self, log10_low, log10_high):
     base_dist = torch.distributions.Uniform(
         np.log(10) * log10_low,
         np.log(10) * log10_high)
     super(LogUniform, self).__init__(base_dist, [ExpTransform()])
Example #13
0
 def __init__(self, loc, scale):
     super(LogNormal, self).__init__(Normal(loc, scale), ExpTransform())
Example #14
0
 def __init__(self, loc, scale, validate_args=None):
     super(LogNormal, self).__init__(Normal(loc, scale), ExpTransform(), validate_args=validate_args)
Example #15
0
 def __init__(self, loc, scale, censMsk, validate_args=None):
     super(LogNormalCens, self).__init__(NormalCens(loc, scale, censMsk), ExpTransform(), validate_args=validate_args)
Example #16
0
 def __init__(self, loc, scale, validate_args=None):
     base_dist = Normal(loc, scale, validate_args=validate_args)
     super(LogNormal, self).__init__(base_dist,
                                     ExpTransform(),
                                     validate_args=validate_args)
Example #17
0
def _transform_to_positive_ordered_vector(constraint):
    return ComposeTransform([OrderedTransform(), ExpTransform()])
Example #18
0

def test_compose_transform_shapes():
    transform0 = ExpTransform()
    transform1 = SoftmaxTransform()
    transform2 = LowerCholeskyTransform()

    assert transform0.event_dim == 0
    assert transform1.event_dim == 1
    assert transform2.event_dim == 2
    assert ComposeTransform([transform0, transform1]).event_dim == 1
    assert ComposeTransform([transform0, transform2]).event_dim == 2
    assert ComposeTransform([transform1, transform2]).event_dim == 2


transform0 = ExpTransform()
transform1 = SoftmaxTransform()
transform2 = LowerCholeskyTransform()
base_dist0 = Normal(torch.zeros(4, 4), torch.ones(4, 4))
base_dist1 = Dirichlet(torch.ones(4, 4))
base_dist2 = Normal(torch.zeros(3, 4, 4), torch.ones(3, 4, 4))


@pytest.mark.parametrize('batch_shape, event_shape, dist', [
    ((4, 4), (), base_dist0),
    ((4,), (4,), base_dist1),
    ((4, 4), (), TransformedDistribution(base_dist0, [transform0])),
    ((4,), (4,), TransformedDistribution(base_dist0, [transform1])),
    ((4,), (4,), TransformedDistribution(base_dist0, [transform0, transform1])),
    ((), (4, 4), TransformedDistribution(base_dist0, [transform0, transform2])),
    ((4,), (4,), TransformedDistribution(base_dist0, [transform1, transform0])),
Example #19
0
class TransformMixIn:
    """Mixin for providing pre- and post-processing capabilities to encoders.

    Class should have a ``transformation`` attribute to indicate how to preprocess data.
    """

    # dict of PyTorch functions that transforms and inversely transforms values.
    # inverse entry required if "reverse" is not the "inverse" of "forward".
    TRANSFORMATIONS = {
        "log":
        dict(forward=_clipped_log,
             reverse=torch.exp,
             inverse_torch=ExpTransform()),
        "log1p":
        dict(forward=torch.log1p,
             reverse=torch.exp,
             inverse=torch.expm1,
             inverse_torch=Expm1Transform()),
        "logit":
        dict(forward=_clipped_logit,
             reverse=_clipped_sigmoid,
             inverse_torch=SigmoidTransform()),
        "count":
        dict(forward=_plus_one,
             reverse=F.softplus,
             inverse=_minus_one,
             inverse_torch=MinusOneTransform()),
        "softplus":
        dict(forward=softplus_inv,
             reverse=F.softplus,
             inverse_torch=SoftplusTransform()),
        "relu":
        dict(forward=_identity,
             reverse=F.relu,
             inverse=_identity,
             inverse_torch=ReLuTransform()),
        "sqrt":
        dict(forward=torch.sqrt,
             reverse=_square,
             inverse_torch=PowerTransform(exponent=2.0)),
    }

    @classmethod
    def get_transform(
        cls, transformation: Union[str,
                                   Dict[str,
                                        Callable]]) -> Dict[str, Callable]:
        """Return transformation functions.

        Args:
            transformation (Union[str, Dict[str, Callable]]): name of transformation or
                dictionary with transformation information.

        Returns:
            Dict[str, Callable]: dictionary with transformation functions (forward, reverse, inverse and inverse_torch)
        """
        return cls.TRANSFORMATIONS.get(transformation, transformation)

    def preprocess(
        self, y: Union[pd.Series, pd.DataFrame, np.ndarray, torch.Tensor]
    ) -> Union[np.ndarray, torch.Tensor]:
        """
        Preprocess input data (e.g. take log).

        Uses ``transform`` attribute to determine how to apply transform.

        Returns:
            Union[np.ndarray, torch.Tensor]: return rescaled series with type depending on input type
        """
        if self.transformation is None:
            return y

        if isinstance(y, torch.Tensor):
            y = self.get_transform(self.transformation)["forward"](y)
        else:
            # convert first to tensor, then transform and then convert to numpy array
            if isinstance(y, (pd.Series, pd.DataFrame)):
                y = y.to_numpy()
            y = torch.as_tensor(y)
            y = self.get_transform(self.transformation)["forward"](y)
            y = np.asarray(y)
        return y

    def inverse_preprocess(
        self, y: Union[pd.Series, np.ndarray, torch.Tensor]
    ) -> Union[np.ndarray, torch.Tensor]:
        """
        Inverse preprocess re-scaled data (e.g. take exp).

        Uses ``transform`` attribute to determine how to apply inverse transform.

        Returns:
            Union[np.ndarray, torch.Tensor]: return rescaled series with type depending on input type
        """
        if self.transformation is None:
            pass
        elif isinstance(y, torch.Tensor):
            y = self.get_transform(self.transformation)["reverse"](y)
        else:
            # convert first to tensor, then transform and then convert to numpy array
            y = torch.as_tensor(y)
            y = self.get_transform(self.transformation)["reverse"](y)
            y = np.asarray(y)
        return y