Example #1
0
def MoG_prop_APT_training_vars(prop, n_train_round, n_components):
    if isinstance(prop, dd.Uniform):
        prop_Pms = np.zeros((n_train_round, n_components, prop.ndim))
        prop_Ps = np.zeros((n_train_round, n_components, prop.ndim, prop.ndim))
        prop_ldetPs = np.zeros(n_train_round, n_components)
        prop_las = np.full((n_train_round, n_components),
                           np.log(1.0 / n_components))
        prop_QFs = np.zeros(n_train_round, n_components)
        return prop_Pms, prop_Ps, prop_ldetPs, prop_las, prop_QFs

    if isinstance(prop, dd.Gaussian):
        prop = dd.MoG(a=np.ones(1), xs=[prop])
    assert isinstance(prop, dd.MoG), "input must be Gaussian, Uniform or MoG"
    if prop.n_components == 1:
        prop = dd.MoG(a=np.ones(n_components) / n_components,
                      xs=[prop.xs[0] for _ in range(n_components)])

    assert prop.n_components == n_components, "invalid number of components"
    prop_Pms = repnewax(np.stack([x.Pm for x in prop.xs], axis=0),
                        n_train_round)
    prop_Ps = repnewax(np.stack([x.P for x in prop.xs], axis=0), n_train_round)
    prop_ldetPs = repnewax(np.stack([x.logdetP for x in prop.xs], axis=0),
                           n_train_round)
    prop_las = repnewax(np.log(prop.a), n_train_round)
    prop_QFs = repnewax(
        np.stack([np.sum(x.Pm * x.m) for x in prop.xs], axis=0), n_train_round)
    return prop_Pms, prop_Ps, prop_ldetPs, prop_las, prop_QFs
Example #2
0
    def gen_single(self, param):
        # See BaseSimulator for docstring
        param = np.asarray(param).reshape(-1)
        assert param.ndim == 1
        assert param.shape[0] == self.dim_param

        if self.bimodal:
            sample = dd.MoG(a=self.a, ms=[ (-1)**p * param for p in range(2)],
                            Ss=self.noise_cov, seed=self.gen_newseed()).gen(1)
        else:
            sample = dd.MoG(a=self.a, ms=[param for p in range(2)],
                            Ss=self.noise_cov, seed=self.gen_newseed()).gen(1)
        if self.return_abs:
            sample = np.abs(sample)

        return {'data': sample.reshape(-1)}
Example #3
0
def test_mixture_of_gaussians_1d():
    N = 1000
    m = [1.]
    S = [[3.]]
    ms = [m, m]
    Ss = [S, S]
    dist = dd.MoG(a=[0.5, 0.5], ms=ms, Ss=Ss, seed=seed)
    samples = dist.gen(N)
    logprobs = dist.eval(samples)

    assert samples.shape == (N, 1)
    assert logprobs.shape == (N,)
Example #4
0
    def _backup_predict_from_Gaussian_prop(self, mog, thresh=0.):
        """Predict posterior given x

        Predicts posteriors from the attached MDN and corrects for
        missmatch in the prior and proposal prior if the latter is
        given by a Gaussian object.

        Can still be used if the result would have improper covariances:
        Will try to replace directions of negative precisions in the
        posterior with proposal precisions.

        Parameters
        ----------
        mog : mixture of Gaussian object
            Uncorrected MoG posterior estimate
        thresh : float
            Threshold for precisions of the MoG components.
            Pick >0 for added numerical stability.
        """

        proposal, prior = self.generator.proposal, self.generator.prior
        assert isinstance(proposal, Gaussian) and isinstance(prior, Gaussian)

        xs_new = []
        for c in mog.xs:

            # corrected precision matrix
            Pc = c.P - proposal.P + prior.P

            # spectrum and eigenvectors of corrected precision matrix
            Lu, Q = np.linalg.eig(Pc)
            # precisions along eigenvectors of corrected precision matrix
            Lp = np.diag((Q.T.dot(proposal.P).dot(Q)))

            # identify degenerate precisions
            idx = np.where(Lu <= thresh)[0]

            # replace degenerate precisions with those from proposal
            L = Lu.copy()
            if idx.size > 0:
                L[idx] = np.maximum(Lp[idx], thresh)

            # recompute means and covariances
            S = Q.dot(np.diag(1. / L)).dot(Q.T)
            m = S.dot(c.Pm - proposal.Pm + prior.Pm)

            xs_new.append(Gaussian(m=m, S=S))

        return dd.MoG(xs=xs_new, a=mog.a)
Example #5
0
    def predict(self, *args, **kwargs):
        p = super().predict(*args, **kwargs)

        if self.round > 0 and self.proposal_used[-1] in ['gaussian', 'mog']:
            assert self.network.density == 'mog' and isinstance(p, dd.MoG)
            P_offset = np.eye(p.ndim) * self.Ptol
            # add the prior precision to each posterior component if needed
            if self.add_prior_precision and isinstance(self.generator.prior,
                                                       dd.Gaussian):
                P_offset += self.generator.prior.P
            p = dd.MoG(a=p.a,
                       xs=[
                           dd.Gaussian(m=x.m, P=x.P + P_offset, seed=x.seed)
                           for x in p.xs
                       ])

        return p
Example #6
0
    def get_mog(self, stats, deterministic=True):
        """Return the conditional MoG at location x

        Parameters
        ----------
        stats : np.array
            single input location
        deterministic : bool
            if True, mean weights are used for Bayesian network
        """
        assert stats.shape[0] == 1, 'x.shape[0] needs to be 1'

        comps = self.eval_comps(stats, deterministic)
        a = comps['a'][0]
        ms = [comps['m' + str(i)][0] for i in range(self.n_components)]
        Us = [comps['U' + str(i)][0] for i in range(self.n_components)]

        return dd.MoG(a=a, ms=ms, Us=Us, seed=self.gen_newseed())
Example #7
0
    def get_mog(self, stats, n_samples=None):
        """Return the conditional MoG at location x

        Parameters
        ----------
        stats : np.array
            single input location
        n_samples : None or int
            ...
        """
        assert stats.shape[0] == 1, 'x.shape[0] needs to be 1'

        comps = self.eval_comps(stats)
        a = comps['a'][0]
        ms = [comps['m' + str(i)][0] for i in range(self.n_components)]
        Us = [comps['U' + str(i)][0] for i in range(self.n_components)]

        return dd.MoG(a=a, ms=ms, Us=Us, seed=self.gen_newseed())
Example #8
0
def test_rng_repeatability():
    mu = np.atleast_1d([0.0])
    S = np.atleast_2d(1.0)

    # distributions
    pG = dd.Gaussian(m=mu, S=S)
    check_repeatability_dist(pG)

    pMoG = dd.MoG(a=np.array([0.25, 0.75]), ms=[mu, mu], Ss=[S, S])
    check_repeatability_dist(pMoG)

    # simulators
    mG = sims.Gauss()
    check_repeatability_sim(mG, np.zeros(mG.dim_param).reshape(-1, 1))

    mMoG = sims.GaussMixture()
    check_repeatability_sim(mMoG, np.zeros(mMoG.dim_param).reshape(-1, 1))

    # generators
    g = gen.Default(model=mMoG, prior=pMoG, summary=Identity())
    check_repeatability_gen(g)

    # inference methods
    # we're going to create each one with a different deepcopy of g to make
    # sure thre are are no side effects e.g. changes to the proposal
    x0 = g.gen(1, verbose=False)[1]
    inf_opts = dict(obs=x0,
                    n_components=2,
                    n_hiddens=[5, 5],
                    verbose=False,
                    pilot_samples=0)

    yB_nosvi = inf.Basic(deepcopy(g), svi=False, **inf_opts)
    check_repeatability_infer(yB_nosvi)

    yB_svi = inf.Basic(deepcopy(g), svi=True, **inf_opts)
    check_repeatability_infer(yB_svi)

    # skip CDELFI for now since it might crash if we don't use the prior
    #yC = inf.CDELFI(deepcopy(g), **inf_opts)
    #check_repeatability_infer(yC)

    yS = inf.SNPE(deepcopy(g), prior_mixin=0.5, **inf_opts)
    check_repeatability_infer(yS)
Example #9
0
    def gen_single(self, param):
        # See BaseSimulator for docstring
        param = np.asarray(param).reshape(-1)
        assert param.ndim == 1
        assert param.shape[0] == self.dim_param

        q_moving = dd.Gaussian(m=param,
                               S=self.noise_cov,
                               seed=self.gen_newseed())
        q_distractors = dd.MoG(a=self.a,
                               ms=self.ms,
                               Ss=self.Ss,
                               seed=self.gen_newseed())

        samples = []
        for _ in range(self.n_samples):
            if np.random.rand() < self.p_true:
                samples.append(q_moving.gen(1))
            else:
                samples.append(q_distractors.gen(1))

        return {'data': np.concatenate(samples, axis=0)}
Example #10
0
    def _predict_from_MoG_prop(self, x, threshold=0.01):
        """Predict posterior given x

        Predicts posteriors from the attached MDN and corrects for
        missmatch in the prior and proposal prior if the latter is
        given by a Gaussian mixture with multiple mixture components.

        Assumes proposal mixture components are well-separated, which
        allows to locally correct each posterior component only for the
        closest proposal component.

        Parameters
        ----------
        x : array
            Stats for which to compute the posterior
        threshold: float
            Threshold for pruning MoG components (percent of posterior mass)
        """
        # mog is posterior given proposal prior
        mog = super(CDELFI, self).predict(x)  # via super

        proposal, prior = self.generator.proposal, self.generator.prior
        assert isinstance(prior, Gaussian)

        ldetP0, d0 = logdet(prior.P), prior.m.dot(prior.Pm)
        means = np.vstack([c.m for c in proposal.xs])

        xs_new, a_new = [], []
        for c, j in zip(mog.xs, np.arange(mog.a.size)):

            # greedily pairing proposal and posterior components by means
            # (should probably at least use Mahalanobis distance)
            dists = np.sum((means - np.atleast_2d(c.m))**2, axis=1)
            i = np.argmin(dists)

            c_prop = proposal.xs[i]
            a_prop = proposal.a[i]

            # correct means and covariances of individual proposals
            c_post = (c * prior) / c_prop

            # correct mixture coefficients a[i]

            # prefactors
            log_a = np.log(mog.a[j]) - np.log(a_prop)
            # determinants
            log_a += 0.5 * (logdet(c.P) + ldetP0 - logdet(c_prop.P) -
                            logdet(c_post.P))
            # Mahalanobis distances
            log_a -= 0.5 * c.m.dot(c.Pm)
            log_a -= 0.5 * d0
            log_a += 0.5 * c_prop.m.dot(c_prop.Pm)
            log_a += 0.5 * c_post.m.dot(c_post.Pm)
            a_i = np.exp(log_a)

            xs_new.append(c_post)
            a_new.append(a_i)

        a_new = np.array(a_new)
        # alpha defined only up to \tilde{p}(x) / p(x), i.e. need to normalize
        a_new /= a_new.sum()

        mog = dd.MoG(xs=xs_new, a=a_new)
        mog.prune_negligible_components(threshold=threshold)

        return mog