コード例 #1
0
 def __init__(self, concentration1, concentration0, validate_args=None):
     base_distribution = Beta(concentration1,
                              concentration0,
                              validate_args=validate_args)
     super(RescaledBeta,
           self).__init__(base_distribution=base_distribution,
                          transforms=AffineTransform(loc=-1., scale=2.))
コード例 #2
0
    def distribution(
        self,
        distr_args,
        loc: Optional[torch.Tensor] = None,
        scale: Optional[torch.Tensor] = None,
    ) -> Distribution:
        r"""
        Construct the associated distribution, given the collection of
        constructor arguments and, optionally, a scale tensor.

        Parameters
        ----------
        distr_args
            Constructor arguments for the underlying Distribution type.
        loc
            Optional tensor, of the same shape as the
            batch_shape+event_shape of the resulting distribution.
        scale
            Optional tensor, of the same shape as the
            batch_shape+event_shape of the resulting distribution.
        """
        distr = self._base_distribution(distr_args)
        if loc is None and scale is None:
            return distr
        else:
            transform = AffineTransform(
                loc=0.0 if loc is None else loc,
                scale=1.0 if scale is None else scale,
            )
            return TransformedDistribution(distr, [transform])
コード例 #3
0
    def __init__(self, base_distribution: Distribution, loc=None, scale=None):

        self.scale = 1.0 if scale is None else scale
        self.loc = 0.0 if loc is None else loc

        super().__init__(base_distribution,
                         [AffineTransform(self.loc, self.scale)])
コード例 #4
0
 def __init__(self, flatparam, low, high):
     self.low = low
     self.high = high
     squash = TanhTransform(cache_size=1)
     shift = AffineTransform((high + low) / 2, (high - low) / 2,
                             cache_size=1,
                             event_dim=1)
     super().__init__(DiagNormal(flatparam), [squash, shift])
コード例 #5
0
 def __init__(self, base_dist, normalization_mean, normalization_std):
     self.loc_tensor = torch.tensor(normalization_mean).float().reshape(
         (1, )).to(device)
     self.scale_tensor = torch.tensor(normalization_std).float().reshape(
         (1, )).to(device)
     normalization_transform = AffineTransform(loc=self.loc_tensor,
                                               scale=self.scale_tensor)
     super().__init__(base_dist, normalization_transform)
コード例 #6
0
ファイル: affine.py プロジェクト: HaoWen6588/pyfilter
def _define_transdist(loc: torch.Tensor, scale: torch.Tensor,
                      inc_dist: Distribution, ndim: int):
    loc, scale = torch.broadcast_tensors(loc, scale)

    shape = loc.shape[:-ndim] if ndim > 0 else loc.shape

    return TransformedDistribution(inc_dist.expand(shape),
                                   AffineTransform(loc, scale, event_dim=ndim))
コード例 #7
0
ファイル: arm.py プロジェクト: HEmile/storchastic
 def __init__(self, loc: float, scale: float):
     # Copied from https://pytorch.org/docs/stable/distributions.html#torch.distributions.transformed_distribution.TransformedDistribution
     super().__init__(
         Uniform(0, 1),
         transforms=[
             SigmoidTransform().inv,
             AffineTransform(loc=loc, scale=scale)
         ],
     )
コード例 #8
0
 def __init__(self, loc, scale, validate_args=None):
     base_dist = Uniform(t.zeros(loc.shape),
                         t.ones(loc.shape),
                         validate_args=validate_args)
     if not base_dist.batch_shape:
         base_dist = base_dist.expand([1])
     super(Logistic, self).__init__(
         base_dist, [SigmoidTransform().inv,
                     AffineTransform(loc, scale)])
コード例 #9
0
    def distribution(
        self, distr_args, scale: Optional[torch.Tensor] = None
    ) -> Distribution:
        distr = Independent(Normal(*distr_args), 1)

        if scale is None:
            return distr
        else:
            return TransformedDistribution(distr, [AffineTransform(loc=0, scale=scale)])
コード例 #10
0
 def __init__(self, base: Distribution, lower=-0.1, upper=1.1):
     assert lower < 0. and upper > 1., "You need to specify lower < 0 and upper > 1"
     super(Stretched,
           self).__init__(base,
                          AffineTransform(loc=lower, scale=upper - lower))
     self.lower = lower
     self.upper = upper
     self.loc = lower
     self.scale = upper - lower
コード例 #11
0
    def distribution(
        self, distr_args, scale: Optional[torch.Tensor] = None
    ) -> Distribution:

        if scale is None:
            return self.distr_cls(*distr_args)
        else:
            distr = self.distr_cls(*distr_args)
            return TransformedDistribution(distr, [AffineTransform(loc=0, scale=scale)])
コード例 #12
0
    def distribution(
        self, distr_args, scale: Optional[torch.Tensor] = None
    ) -> Distribution:
        loc, scale_tri = distr_args
        distr = MultivariateNormal(loc=loc, scale_tril=scale_tri)

        if scale is None:
            return distr
        else:
            return TransformedDistribution(distr, [AffineTransform(loc=0, scale=scale)])
コード例 #13
0
ファイル: params.py プロジェクト: kuperov/ptvi
def global_param(
    prior: Prior = None,
    name: str = None,
    transform: Union[Transform, str] = None,
    rename: str = None,
):
    """Define a scalar global model parameter.

    Args:
        prior: parameter prior, a Distribution object
        name: optional, name for parameter
        transform: optional transformation to apply (its domain should be an
                   unconstrained space)
        rename: optional, name of parameter in unconstrained space
    """
    if prior is None:
        prior = ImproperPrior()
    if rename is not None and transform is None:
        raise Exception("rename requires a transform")
    if transform is None:
        return ModelParameter(name=name, prior=prior)
    transform_desc = "transformed"
    if isinstance(transform, str):
        transform_desc = transform
        if transform == "log":
            transform = torch.distributions.ExpTransform().inv
            if rename is None and name is not None:
                rename = f"log{name}"
        elif transform == "exp":
            raise Exception("Use 'log' to constrain parameters > 0")
        elif transform == "logit":
            transform = torch.distributions.SigmoidTransform().inv
            if rename is None and name is not None:
                rename = f"logit{name}"
        elif transform == "slogit":
            # nasty and hacky attempt to avoid saturating the logistic transform
            inv_transform = ComposeTransform(
                [AffineTransform(loc=0, scale=1e-4),
                 SigmoidTransform()])
            transform = inv_transform.inv
            if rename is None and name is not None:
                rename = f"slogit{name}"
        elif transform == "sigmoid":
            raise Exception("Use 'logit' to constrain parameters to (0, 1)")
        else:
            raise Exception(f"Unknown transform {transform}")
    if rename is None and name is not None:
        rename = f"{transform_desc}_{name}"
    return TransformedModelParameter(
        name=name,
        prior=prior,
        transform=transform,
        transformed_name=rename,
        transform_desc=transform_desc,
    )
コード例 #14
0
def test_log_prob_d2(eta):
    dist = LKJCorrCholesky(2, torch.tensor([eta]))
    test_dist = TransformedDistribution(Beta(eta, eta),
                                        AffineTransform(loc=-1., scale=2.0))

    samples = dist.sample(torch.Size([100]))
    lp = dist.log_prob(samples)
    x = samples[..., 1, 0]
    tst = test_dist.log_prob(x)

    assert_tensors_equal(lp, tst, prec=1e-6)
コード例 #15
0
 def distribution(
     self,
     distr_args,
     loc: Optional[torch.Tensor] = 0,
     scale: Optional[torch.Tensor] = None,
 ) -> PiecewiseLinear:
     if scale is None:
         return self.distr_cls(*distr_args)
     else:
         distr = self.distr_cls(*distr_args)
         return TransformedPiecewiseLinear(
             distr, [AffineTransform(loc=loc, scale=scale)])
コード例 #16
0
    def distribution(self,
                     distr_args,
                     scale: Optional[torch.Tensor] = None) -> Distribution:
        mix_logits, loc, dist_scale = distr_args

        distr = MixtureSameFamily(Categorical(logits=mix_logits),
                                  Normal(loc, dist_scale))
        if scale is None:
            return distr
        else:
            return TransformedDistribution(
                distr, [AffineTransform(loc=0, scale=scale)])
コード例 #17
0
ファイル: test_lkj.py プロジェクト: yufengwa/pyro
def test_log_prob_d2(concentration):
    dist = LKJCholesky(2, torch.tensor([concentration]))
    test_dist = TransformedDistribution(Beta(concentration, concentration),
                                        AffineTransform(loc=-1., scale=2.0))

    samples = dist.sample(torch.Size([100]))
    lp = dist.log_prob(samples)
    x = samples[..., 1, 0]
    tst = test_dist.log_prob(x)
    # LKJ prevents inf values in log_prob
    lp[tst == math.inf] = math.inf  # substitute inf for comparison
    assert_tensors_equal(lp, tst, prec=1e-3)
コード例 #18
0
ファイル: affine.py プロジェクト: merz9b/pyfilter
    def i_sample(self, shape=None, as_dist=False):
        shape = size_getter(shape)

        dist = TransformedDistribution(
            self.noise0.expand(shape),
            AffineTransform(self.i_mean(),
                            self.i_scale(),
                            event_dim=self._event_dim))

        if as_dist:
            return dist

        return dist.sample()
コード例 #19
0
    def distribution(self,
                     distr_args,
                     scale: Optional[torch.Tensor] = None) -> Distribution:
        mix_logits, loc, scale = distr_args

        comp_distr = Normal(loc, scale)
        if scale is None:
            return MixtureSameFamily(Categorical(logits=mix_logits),
                                     comp_distr)
        else:
            scaled_comp_distr = TransformedDistribution(
                comp_distr, [AffineTransform(loc=0, scale=scale)])
            return MixtureSameFamily(Categorical(logits=mix_logits),
                                     scaled_comp_distr)
コード例 #20
0
ファイル: affine.py プロジェクト: merz9b/pyfilter
    def _define_transdist(self, loc, scale):
        """
        Helper method for defining the transition density
        :param loc: The mean
        :type loc: torch.Tensor
        :param scale: The scale
        :type scale: torch.Tensor
        :return: Distribution
        :rtype: Distribution
        """

        loc, scale = torch.broadcast_tensors(loc, scale)

        shape = _get_shape(loc, self.ndim)

        return TransformedDistribution(
            self.noise.expand(shape),
            AffineTransform(loc, scale, event_dim=self._event_dim))
コード例 #21
0
def test_torch_transform(ctx):
    """try using torch.Transform in combination with bgflow.Flow"""
    torch.manual_seed(10)
    x = torch.torch.randn(10, 3, **ctx)
    flow = SequentialFlow([
        TorchTransform(IndependentTransform(SigmoidTransform(), 1)),
        TorchTransform(
            AffineTransform(loc=torch.randn(3, **ctx),
                            scale=2.0 + torch.rand(3, **ctx),
                            event_dim=1), ),
        BentIdentity(),
        # test the reinterpret_batch_ndims arguments
        TorchTransform(SigmoidTransform(), 1)
    ])
    z, dlogp = flow.forward(x)
    y, neg_dlogp = flow.forward(z, inverse=True)
    tol = 1e-7 if ctx["dtype"] is torch.float64 else 1e-5
    assert torch.allclose(x, y, atol=tol)
    assert torch.allclose(dlogp, -neg_dlogp, atol=tol)
コード例 #22
0
    def distribution(
        self,
        distr_args,
        scale: Optional[torch.Tensor] = None,
    ) -> ImplicitQuantile:

        args_proj = self.get_args_proj(self.in_features)
        implicit_quantile_function = args_proj.proj.eval()
        distr = self.distr_cls(
            implicit_quantile_function=implicit_quantile_function,
            taus=list(args_proj.buffers())[0],
            nn_output=list(args_proj.buffers())[1],
            predicted_quantiles=distr_args,
        )
        if scale is None:
            return distr
        else:
            return TransformedImplicitQuantile(
                distr, [AffineTransform(loc=0, scale=scale)])
コード例 #23
0
    def distribution(
        self,
        distr_args,
        loc: Optional[torch.Tensor] = 0,
        scale: Optional[torch.Tensor] = None,
    ) -> ISQF:
        """
        function outputing the distribution class
        distr_args: distribution arguments
        loc: shift to the data mean
        scale: scale to the data
        """

        distr_args, qk_x = self.reshape_spline_args(distr_args, self.qk_x)

        distr = self.distr_cls(*distr_args, qk_x, self.tol)

        if scale is None:
            return distr
        else:
            return TransformedISQF(distr,
                                   [AffineTransform(loc=loc, scale=scale)])
コード例 #24
0
    def distribution(
        self,
        picnn: torch.nn.Module,
        hidden_state: torch.Tensor,
        loc: Optional[torch.Tensor] = 0,
        scale: Optional[torch.Tensor] = None,
    ) -> MQF2Distribution:

        distr = self.distr_cls(
            picnn,
            hidden_state,
            prediction_length=self.prediction_length,
            threshold_input=self.threshold_input,
            es_num_samples=self.es_num_samples,
            is_energy_score=self.is_energy_score,
            beta=self.beta,
        )

        if scale is None:
            return distr
        else:
            return TransformedMQF2Distribution(
                distr, [AffineTransform(loc=loc, scale=scale)])
コード例 #25
0
def get_x_corr_params(x_max,
                      n_points,
                      C,
                      K=50,
                      lr=1e-2,
                      T=10000,
                      path_to_file=None,
                      symmetric=True,
                      early_stop=-1):
    """
	C : the variance on X_normal
	symmetric: enforce symmetric model; in practice use mean of model and mirrored model; returned parameters then include the mirrored copies (so have 2K components)
	"""
    torch.set_default_tensor_type('torch.DoubleTensor')
    base_distribution = Uniform(0, 1)
    transforms = [SigmoidTransform().inv, AffineTransform(loc=0, scale=1)]
    logistic = TransformedDistribution(base_distribution, transforms)
    mus0 = 0.1 * torch.randn(K)
    #mus0[K//2:] = -mus0[:K//2]
    mus = mus0.detach().requires_grad_(True)
    sigmas0 = 0.1 * torch.randn(K)
    #sigmas0[K//2:] = sigmas0[:K//2]
    sigmas = sigmas0.detach().requires_grad_(True)
    pis0 = torch.rand(K)  #0.2*
    pis = pis0.detach().requires_grad_(True)
    normal_sigma = torch.sqrt(torch.ones(1) * C)

    x_log = torch.linspace(-x_max, x_max, n_points)
    y_log = logistic.log_prob(x_log)
    params = [mus, sigmas, pis]
    optimizer = torch.optim.Adam(params, lr=lr)

    min_loss = 10**5
    counter = 0
    for i in range(T):
        optimizer.zero_grad()
        loss = loss_func(params, x_log, normal_sigma, y_log)
        if loss < min_loss:
            min_loss = loss
            counter = 0
        else:
            counter += 1
            if early_stop == counter:
                print('Stopping early..')
                break
        if i % 1000 == 0:
            print('loss: {}, iter: {}/{}'.format(loss.detach().numpy(), i, T))
        loss.backward(retain_graph=True)
        optimizer.step()

    mus, sigmas, pis = params
    mus = mus.data.numpy()
    sigmas = np.exp(sigmas.data.numpy())
    pis = torch.softmax(pis, dim=-1).data.numpy()
    if symmetric:
        mus = np.concatenate((mus, -mus))
        sigmas = np.concatenate((sigmas, sigmas))
        pis = np.concatenate((.5 * pis, .5 * pis))

    if path_to_file == None:
        #fname = '../Corr_MoG/X_corr_{}_{}_{}_torch.pickle'.format(n_points,x_max,C)
        fname = './X_corr/X_corr_{}_{}_{}_torch.pickle'.format(
            n_points, x_max, C)
    else:
        fname = path_to_file
    if path_to_file != 'no_write':
        pickle.dump([mus, sigmas, pis], open(fname, 'wb'))
        print('Wrote params to {}'.format(fname))
    return [mus, sigmas, pis]
コード例 #26
0
ファイル: torus_dbn.py プロジェクト: ahmadsalim/jeffreys
def torus_dbn(phis=None,
              psis=None,
              lengths=None,
              num_sequences=None,
              num_states=55,
              prior_conc=0.1,
              prior_loc=0.0,
              prior_length_shape=100.,
              prior_length_rate=100.,
              prior_kappa_min=10.,
              prior_kappa_max=1000.):
    # From https://pyro.ai/examples/hmm.html
    with ignore_jit_warnings():
        if lengths is not None:
            assert num_sequences is None
            num_sequences = int(lengths.shape[0])
        else:
            assert num_sequences is not None
    transition_probs = pyro.sample(
        'transition_probs',
        dist.Dirichlet(
            torch.ones(num_states, num_states, dtype=torch.float) *
            num_states).to_event(1))
    length_shape = pyro.sample('length_shape',
                               dist.HalfCauchy(prior_length_shape))
    length_rate = pyro.sample('length_rate',
                              dist.HalfCauchy(prior_length_rate))
    phi_locs = pyro.sample(
        'phi_locs',
        dist.VonMises(
            torch.ones(num_states, dtype=torch.float) * prior_loc,
            torch.ones(num_states, dtype=torch.float) *
            prior_conc).to_event(1))
    phi_kappas = pyro.sample(
        'phi_kappas',
        dist.Uniform(
            torch.ones(num_states, dtype=torch.float) * prior_kappa_min,
            torch.ones(num_states, dtype=torch.float) *
            prior_kappa_max).to_event(1))
    psi_locs = pyro.sample(
        'psi_locs',
        dist.VonMises(
            torch.ones(num_states, dtype=torch.float) * prior_loc,
            torch.ones(num_states, dtype=torch.float) *
            prior_conc).to_event(1))
    psi_kappas = pyro.sample(
        'psi_kappas',
        dist.Uniform(
            torch.ones(num_states, dtype=torch.float) * prior_kappa_min,
            torch.ones(num_states, dtype=torch.float) *
            prior_kappa_max).to_event(1))
    element_plate = pyro.plate('elements', 1, dim=-1)
    with pyro.plate('sequences', num_sequences, dim=-2) as batch:
        if lengths is not None:
            lengths = lengths[batch]
            obs_length = lengths.float().unsqueeze(-1)
        else:
            obs_length = None
        state = 0
        sam_lengths = pyro.sample('length',
                                  dist.TransformedDistribution(
                                      dist.GammaPoisson(
                                          length_shape, length_rate),
                                      AffineTransform(0., 1.)),
                                  obs=obs_length)
        if lengths is None:
            lengths = sam_lengths.squeeze(-1).long()
        for t in pyro.markov(range(lengths.max())):
            with poutine.mask(mask=(t < lengths).unsqueeze(-1)):
                state = pyro.sample(f'state_{t}',
                                    dist.Categorical(transition_probs[state]),
                                    infer={'enumerate': 'parallel'})
                if phis is not None:
                    obs_phi = Vindex(phis)[batch, t].unsqueeze(-1)
                else:
                    obs_phi = None
                if psis is not None:
                    obs_psi = Vindex(psis)[batch, t].unsqueeze(-1)
                else:
                    obs_psi = None
                with element_plate:
                    pyro.sample(f'phi_{t}',
                                dist.VonMises(phi_locs[state],
                                              phi_kappas[state]),
                                obs=obs_phi)
                    pyro.sample(f'psi_{t}',
                                dist.VonMises(psi_locs[state],
                                              psi_kappas[state]),
                                obs=obs_psi)
コード例 #27
0
def init_trans(dist, kappa, gamma, sigma):
    return TransformedDistribution(dist, AffineTransform(gamma, sigma / (2 * kappa).sqrt()))