Esempio n. 1
0
    def _sample_mu_k_Sigma_k(self, k):
        """Draw posterior updates for `mu_k` and `Sigma_k`.
        """
        W_k = self.W[self.Z == k]
        N_k = W_k.shape[0]

        if N_k > 0:
            W_k_bar = W_k.mean(axis=0)
            diff = W_k - W_k_bar
            mu_post = (self.prior_obs * self.mu0) + (N_k * W_k_bar)
            mu_post /= (N_k + self.prior_obs)
            SSE = np.dot(diff.T, diff)
            prior_diff = W_k_bar - self.mu0
            SSE_prior = np.outer(prior_diff.T, prior_diff)
            nu_post = self.nu0 + N_k
            lambda_post = self.prior_obs + N_k
            Psi_post = self.Psi0 + SSE
            Psi_post += ((self.prior_obs * N_k) / lambda_post) * SSE_prior

            self.Sigma[k] = invwishart.rvs(nu_post, Psi_post)
            cov = self.Sigma[k] / lambda_post
            self.mu[k] = self.rng.multivariate_normal(mu_post, cov)
        else:
            self.Sigma[k] = invwishart.rvs(self.nu0, self.Psi0)
            cov = self.Sigma[k] / self.prior_obs
            self.mu[k] = self.rng.multivariate_normal(self.mu0, cov)
def predictB(
        nIter, nTakes, nSim, seed,
        zetaMu, zetaSi, psiB, omegaB, psiW, omegaW,
        indID, obsID, altID, chosen,
        xRnd):
    
    np.random.seed(seed)
    
    ###
    #Prepare data
    ###
    
    nRnd = xRnd.shape[1]
    
    xList = [xRnd]
    (xList,
     nInd, nObs, nRow,
     chosenIdx, nonChosenIdx,
     rowsPerInd, rowsPerObs,
     _, map_avail_to_obs) = prepareData(xList, indID, obsID, chosen)
    xRnd = xList[0]
    
    sim_xRnd = np.tile(xRnd, (nSim, 1))
    sim_rowsPerObs = np.tile(rowsPerObs, (nSim,))
    sim_map_avail_to_obs = scipy.sparse.kron(scipy.sparse.eye(nSim), map_avail_to_obs)

    ###
    #Prediction
    ###
    
    pPred = np.zeros((nRow + nObs,))  
    vFix = 0 
    
    zetaCh = np.linalg.cholesky(zetaSi)
    
    for i in np.arange(nIter):
        zeta_tmp = zetaMu + zetaCh @ np.random.randn(nRnd,)
        chB_tmp = np.linalg.cholesky(invwishart.rvs(omegaB, psiB).reshape((nRnd, nRnd)))
        chW_tmp = np.linalg.cholesky(invwishart.rvs(omegaW, psiW).reshape((nRnd, nRnd)))
        
        pPred_iter = np.zeros((nRow + nObs,))
        
        for j in np.arange(nSim * nTakes):
            betaRndInd = zeta_tmp.reshape((1,nRnd)) + (chB_tmp @ np.random.randn(nRnd, nInd)).T
            betaRndInd_perRow = np.tile(np.repeat(betaRndInd, rowsPerInd, axis = 0), (nSim, 1))
                
            for k in np.arange(nTakes):
                betaRndObs = (chW_tmp @ np.random.randn(nRnd, nObs * nSim)).T
                betaRnd = betaRndInd_perRow + np.repeat(betaRndObs, sim_rowsPerObs, axis = 0)
                vRnd = np.sum(sim_xRnd * betaRnd, axis = 1)
                    
                pPred_take = pPredMxl(vFix, vRnd, sim_map_avail_to_obs, nSim, chosenIdx, nonChosenIdx)
                pPred_iter += pPred_take 
        
        pPred += pPred_iter / (nSim * nTakes**2)
        
    pPred /= nIter  
    return pPred
Esempio n. 3
0
 def sample_prior_hyperparameters(self, X_mean, X_cov, d):
     mulinha = multivariate_normal.rvs(mean=X_mean, cov=X_cov)
     Sigmalinha = invwishart.rvs(df=d, scale=d * X_cov)
     while matrix_is_well_conditioned(Sigmalinha) is not True:
         Sigmalinha = invwishart.rvs(df=d, scale=d * X_cov)
     Hlinha = wishart.rvs(df=d, scale=X_cov / d)
     while matrix_is_well_conditioned(Hlinha) is not True:
         Hlinha = wishart.rvs(df=d, scale=X_cov / d)
     sigmalinha = invgamma.rvs(1, 1 / d) + d
     return mulinha, Sigmalinha, Hlinha, sigmalinha
Esempio n. 4
0
def testConditional():
  D1 = 2
  D2 = 3

  C = iw.rvs(10, np.eye(D1))
  Ci = np.linalg.inv(C)

  u = mvn.rvs(np.zeros(D1))

  mu = mvn.rvs(np.zeros(D1+D2))
  mu1 = mu[:D1]
  mu2 = mu[D1:]

  Sigma = iw.rvs(20, np.eye(D1 + D2))
  Sigma11 = Sigma[:D1,:D1]
  Sigma12 = Sigma[:D1,D1:]
  Sigma21 = Sigma[D1:,:D1]
  Sigma22 = Sigma[D1:,D1:]

  # x = mvn.rvs(mu, Sigma)
  # x1 = x[:D1]
  # x2 = x[D1:]
  
  # x1 | x2
  Ci_Sigma11_CiT = Ci @ Sigma11 @ Ci.T
  Ci_Sigma12 = Ci @ Sigma12
  Ci_mu1_u = Ci @ (mu1 - u)
  modifiedSigma = np.block([ [Ci_Sigma11_CiT, Ci_Sigma12], [Ci_Sigma12.T, Sigma22]])
  modifiedMu = np.concatenate((Ci_mu1_u, mu2))

  nSamples = 10000
  # draw from (x1, x2) joint, get x1 marginal
  x1x2 = mvn.rvs(modifiedMu, modifiedSigma, size=nSamples)
  x1 = x1x2[:,:D1]

  # draw from (Cx1 + u, x2) joint, get x1 marginal
  Cx1_u_x2 = mvn.rvs(mu, Sigma, size=nSamples)
  y = Cx1_u_x2[:,:D1]
  x1_ = np.stack([ Ci @ (y[n] - u) for n in range(nSamples) ])

  print(np.mean(x1,axis=0))
  print(np.cov(x1.T))

  print(np.mean(x1_,axis=0))
  print(np.cov(x1_.T))

  plt.scatter(*x1.T, s=1, color='b', alpha=0.5)
  plt.scatter(*x1_.T, s=1, color='g', alpha=0.5)
  plt.show()
Esempio n. 5
0
def sampler(wk_inv, vk, betak, mk, scaler, n_samples):
    """
    :param k: int
    :param wk: [K, dim, dim]
    :param vk: [K,]
    :param betak: [K,]
    :param mk: [K, dim]
    :return:
    """
    K = vk.shape[0]
    means = []
    stds = []
    for k in range(K):
        samples = []
        inv_lambda_k = invwishart.rvs(df=vk[k],
                                      scale=wk_inv[k],
                                      size=n_samples)
        for sample in range(n_samples):
            mu_k = multivariate_normal.rvs(mean=mk[k],
                                           cov=np.reciprocal(betak[k]) *
                                           inv_lambda_k[sample],
                                           size=1)
            x = multivariate_normal.rvs(mean=mu_k,
                                        cov=inv_lambda_k[sample],
                                        size=1)
            samples.append(x)
        samples = scaler.inverse_transform(np.array(samples))
        means.append(np.mean(samples, axis=0))
        stds.append(np.std(samples, axis=0))
    return means, stds
Esempio n. 6
0
    def sample_parameters(self, prior={}):
        """ Sample parameters
        Args:
            prior (dict): (optional)
                mean_mean (ndarray): mean for mean
                mean_sd (ndarray): standard deviation for mean
                cov_psi (ndarray): scale matrix parameter for inverse Wishart
                cov_nu (double): df parameter for inverse Wishart
                df_alpha (double): shape for Gamma
                df_beta (double): rate for Gamma
        """
        if not isinstance(prior, dict):
            raise TypeError("Prior must be dict not '{0}'".format(type(prior)))
        mean_mean = prior.get("mean_mean", np.zeros(self.num_dim))
        mean_sd = prior.get("mean_sd", np.ones(self.num_dim))
        cov_psi = prior.get("cov_psi", np.eye(self.num_dim))
        cov_nu = prior.get("cov_nu", self.num_dim + 2)
        df_alpha = prior.get("df_alpha", 8.0)
        df_beta = prior.get("df_beta", 4.0)

        mean = np.random.normal(size=self.num_dim) * mean_sd + mean_mean
        cov = invwishart.rvs(df=cov_nu, scale=cov_psi)
        df = gamma.rvs(a=df_alpha, scale=1.0 / df_beta)
        parameters = Map(mean=mean, cov=cov, df=df)
        return parameters
Esempio n. 7
0
    def __init__(self, nb_states, obs_dim, act_dim, nb_lags=1, **kwargs):
        self.nb_states = nb_states
        self.obs_dim = obs_dim
        self.act_dim = act_dim
        self.nb_lags = nb_lags

        # self.A = npr.randn(self.nb_states, self.obs_dim, self.obs_dim * self.nb_lags)
        # self.B = npr.randn(self.nb_states, self.obs_dim, self.act_dim)
        # self.c = npr.randn(self.nb_states, self.obs_dim, )
        # self._sigma_chol = 5. * npr.randn(self.nb_states, self.obs_dim, self.obs_dim)

        self.A = np.zeros(
            (self.nb_states, self.obs_dim, self.obs_dim * self.nb_lags))
        self.B = np.zeros((self.nb_states, self.obs_dim, self.act_dim))
        self.c = np.zeros((self.nb_states, self.obs_dim))
        self._sigma_chol = np.zeros(
            (self.nb_states, self.obs_dim, self.obs_dim))

        for k in range(self.nb_states):
            _sigma = invwishart.rvs(self.obs_dim + 1, np.eye(self.obs_dim))
            self._sigma_chol[k] = np.linalg.cholesky(_sigma *
                                                     np.eye(self.obs_dim))
            self.A[k] = mvn.rvs(mean=None,
                                cov=1e2 * _sigma,
                                size=(self.obs_dim * self.nb_lags, )).T
            self.B[k] = mvn.rvs(mean=None,
                                cov=1e2 * _sigma,
                                size=(self.act_dim, )).T
            self.c[k] = mvn.rvs(mean=None, cov=1e2 * _sigma, size=(1, ))
Esempio n. 8
0
    def __init__(self, nb_states, obs_dim, act_dim,
                 nb_lags=1, degree=1, **kwargs):

        assert nb_lags > 0

        self.nb_states = nb_states
        self.obs_dim = obs_dim
        self.act_dim = act_dim
        self.nb_lags = nb_lags

        self.degree = degree
        self.feat_dim = int(sc.special.comb(self.degree + self.obs_dim, self.degree)) - 1
        self.basis = PolynomialFeatures(self.degree, include_bias=False)

        # self.K = npr.randn(self.nb_states, self.act_dim, self.feat_dim)
        # self.kff = npr.randn(self.nb_states, self.act_dim)
        # self._sigma_chol = 5. * npr.randn(self.nb_states, self.act_dim, self.act_dim)

        self.K = np.zeros((self.nb_states, self.act_dim, self.feat_dim))
        self.kff = np.zeros((self.nb_states, self.act_dim))
        self._sigma_chol = np.zeros((self.nb_states, self.act_dim, self.act_dim))

        for k in range(self.nb_states):
            _sigma = invwishart.rvs(self.act_dim + 1, np.eye(self.act_dim))
            self._sigma_chol[k] = np.linalg.cholesky(_sigma * np.eye(self.act_dim))
            self.K[k] = mvn.rvs(mean=None, cov=1e2 * _sigma, size=(self.feat_dim, )).T
            self.kff[k] = mvn.rvs(mean=None, cov=1e2 * _sigma, size=(1, ))
Esempio n. 9
0
def next_g0_k(mu0, Si0, nu, invASq, R):
    mu_k = mu0 + np.linalg.cholesky(Si0) @ np.random.randn(R, )
    iwDiagA_k = np.random.gamma(1 / 2, 1 / invASq)
    Sigma_k = np.array(
        invwishart.rvs(nu + R - 1, 2 * nu * np.diag(iwDiagA_k)).reshape(
            (R, R)))
    return mu_k, Sigma_k, iwDiagA_k
Esempio n. 10
0
def next_Sigma(x, mu, nu, iwDiagA, diagCov, nInd, R):
    xS = np.array(x.reshape((nInd, R)) - mu.reshape((1, R))).reshape((nInd, R))
    Sigma = np.array(
        invwishart.rvs(nu + nInd + R - 1,
                       2 * nu * np.diag(iwDiagA) + xS.T @ xS)).reshape((R, R))
    if diagCov: Sigma = np.diag(np.diag(Sigma))
    return Sigma
Esempio n. 11
0
    def draw_first_part(self):
        """Function for the first step draw
        """
        # get from class
        a0 = self.a0
        A0 = self.A0
        b0 = self.b0
        B0 = self.B0
        v0 = self.v0
        S0 = self.S0
        data = self.__data_loader()
        # store value
        #        theta = pd.DataFrame(index=range(R), columns=['a0', 'a1', 'b_mean'])
        #        w = pd.DataFrame(index=range(R), columns=['w0', 'w1'])
        #        SIGMA = pd.DataFrame(
        #            index=range(R), columns=[
        #                'e_sig', 'en_sig', 'n_sig'])
        # initial value
        theta_ini = np.random.multivariate_normal(a0, A0)
        w_ini = np.random.multivariate_normal(b0, B0)
        from scipy.stats import invwishart
        SIGMA_ini = invwishart.rvs(v0, S0)

        theta_r, data = self.__draw_theta(theta_ini, w_ini, SIGMA_ini, data)
        # draw_w
        w_r = self.__draw_w(w_ini, theta_r, SIGMA_ini, data)
        # draw_SIGMA
        SIGMA_r = self.__draw_SIGMA(theta_r, w_r, data)
        print('First step draw')
        # return
        return theta_r, w_r, SIGMA_r
Esempio n. 12
0
 def __draw_SIGMA(self, theta_r, w_r, data):
     """Function for drawing w given theta and w
     """
     # e_hat and n_hat
     one_jt = pd.Series(1, index=np.arange(len(data['z_jt'])))
     X_jt = pd.concat([one_jt, data['x_jt'], data['p_jt']], axis=1)
     Z_jt = pd.concat([one_jt, data['z_jt']], axis=1)
     e_hat = data['d_jt'] - np.dot(X_jt, theta_r)
     n_hat = data['p_jt'] - np.dot(Z_jt, w_r)
     # S_hat
     JT = len(n_hat)
     S_hat = np.zeros([2, 2])
     for jt in range(JT):
         e_hat_jt = e_hat[jt]
         n_hat_jt = n_hat[jt]
         e_n_jat_jt = np.matrix([e_hat_jt, n_hat_jt])
         S_hat += np.dot(e_n_jat_jt.T, e_n_jat_jt)
     S_hat /= JT
     # v1 and S1
     v1 = 2 + JT
     I = np.identity(2)
     S1 = (2 * I + JT * S_hat) / (2 + JT)
     # draw SIGMA
     from scipy.stats import invwishart
     SIGMA_r = invwishart.rvs(v1, S1)
     return SIGMA_r
Esempio n. 13
0
    def reset_parameter(self, trans_var_init='raw', init_var_scale=1.0):

        to_init_transition_mu = self.transition_mu.unsqueeze(0)
        nn.init.xavier_normal_(to_init_transition_mu)
        self.transition_mu.data = to_init_transition_mu.squeeze(0)
        if trans_var_init == 'raw':
            # here the init of var should be alert
            nn.init.uniform_(self.transition_cho)
            weight = self.transition_cho.data - 0.5
            # maybe here we need to add some
            weight = torch.tril(weight)
            # weight = self.atma(weight)
            self.transition_cho.data = weight + init_var_scale * torch.eye(
                2 * self.dim)
        elif trans_var_init == 'wishart':
            transition_var = invwishart.rvs(self.dim,
                                            np.eye(self.dim) / self.dim,
                                            size=self.t_comp_num,
                                            random_state=None)
            self.transition_cho.data = torch.from_numpy(
                np.linalg.cholesky(transition_var))
        else:
            raise ValueError("Error transition init method")
        nn.init.xavier_normal_(self.output_mu)
        nn.init.uniform_(self.output_cho)
Esempio n. 14
0
 def sample_mu_Sigma_new(self, mu0, Sigma0, size):
     sig0_chol = cholesky(Sigma0)
     mu_new = mu0 + normal(size=(size, mu0.shape[0])) @ sig0_chol
     Sigma_new = invwishart.rvs(df=self.priors.Sigma.nu,
                                scale=self.priors.Sigma.psi,
                                size=size)
     return mu_new, Sigma_new
Esempio n. 15
0
 def sample_Sigma(self, mu, alphas):
     n = alphas.shape[0]
     diff = np.log(alphas) - mu
     C = sum([np.outer(diff[i], diff[i]) for i in range(alphas.shape[0])])
     _psi = self.priors.Sigma.psi + C * self.inv_temper_temp
     _nu  = self.priors.Sigma.nu + n * self.inv_temper_temp
     return invwishart.rvs(df = _nu, scale = _psi)
Esempio n. 16
0
def test2():
    np.random.seed(0)
    nside = 8
    ell = np.arange(3. * nside)
    Cltrue = np.zeros_like(ell)
    Cltrue[2:] = 1 / ell[2:]**2
    m = hp.synfast(Cltrue, nside)
    Clhat = hp.anafast(m)

    sigma_l = Clhat

    plt.plot(ell[2:], Cltrue[2:])
    plt.plot(ell[2:], Clhat[2:], '.')

    # Let's replace this with inverse-gamma so the transition to inverse-wishart
    # is simpler
    #for i in range(100):
    #    rho_l = np.random.randn(len(sigma_l)-2, len(sigma_l))
    #    plt.plot(ell, sigma_l/(rho_l**2).mean(axis=0), 'k.', alpha=0.1,
    #            zorder=-1)
    for l in ell[2:]:
        l = int(l)
        alpha = (2 * ell[l] - 1) / 2
        beta = (2 * ell[l] + 1) * sigma_l[l] / 2
        #cl_draw = invgamma.rvs(alpha, scale=beta, size=1000)
        cl_draw = invwishart.rvs(df=alpha * 2, scale=beta * 2, size=1000)
        plt.plot([l] * 1000, cl_draw, 'k.', alpha=0.01, zorder=-1)

    plt.yscale('log')
    plt.savefig('test2.pdf')
Esempio n. 17
0
    def sample_prior(self, proj_2_prob=False):
        """
        Samples Probability matrix from the prior

        Parameters
        ----------
        proj_2_prob: Bool: if TRUE  returns the projection to the probability simplex

        Returns
        -------
            x - Dict: keys: mu, Sigma, Psi, Pi (optional)
            mu: [MxK_prime] Normal sample from the prior
            Sigma: [MxM] Inverse Wishart sample from the prior
            Psi: [MxK_prime] Normal sample from the prior distribution
            Pi: [M x K] Probability matrix over K categories
        """
        Psi = []
        Sigma = invwishart.rvs(self.hyper['nu_0'], inv(self.hyper['W_0']))
        mu = np.zeros((self.M, self.K_prime))
        for k in range(self.K_prime):
            mu[:,
               k] = multivariate_normal(self.hyper['mu_0'][:, k],
                                        1 / self.hyper['lambda_0'][k] * Sigma)
            Psi.append(multivariate_normal(mu[:, k], Sigma))

        # Pi = stick_breaking(np.array(Psi))

        if proj_2_prob:
            Pi = stick_breaking(np.array(Psi))
            x = {'Psi': Psi, 'Pi': Pi, 'mu': mu, 'Sigma': Sigma}
        else:
            x = {'Psi': Psi, 'mu': mu, 'Sigma': Sigma}

        return x
Esempio n. 18
0
    def test_inferNormalConditional_diag(s):
        dx1 = 2
        dx2 = 3
        d = dx1 + dx2

        H = lie.so2.alg(np.random.rand())
        b = mvn.rvs(np.zeros(dx1), 5 * np.eye(dx1))
        x2 = mvn.rvs(np.zeros(dx2), 5 * np.eye(dx2))
        u = mvn.rvs(np.zeros(d), 5 * np.eye(d))
        S = np.diag(np.diag(iw.rvs(2 * d, 10 * np.eye(d))))
        Si = np.linalg.inv(S)

        # analytically infer x1 ~ N(mu, Sigma)
        mu, Sigma = SED.inferNormalConditional(x2, H, b, u, S)

        # determine mu as MAP estimate
        def nllNormalConditional(v):
            val = np.concatenate((H.dot(v) + b, x2))
            t1 = val - u
            return 0.5 * t1.dot(Si).dot(t1)

        g = nd.Gradient(nllNormalConditional)
        map_est = so.minimize(nllNormalConditional,
                              np.zeros(dx1),
                              method='BFGS',
                              jac=g)

        norm = np.linalg.norm(map_est.x - mu)
        # print(f'norm: {norm:.12f}')
        assert norm < 1e-2, f'SED.inferNormalConditional bad, norm {norm:.6f}'
Esempio n. 19
0
    def sampleMeansAndVariancesConditioned(self, data, posterior,
                                           gaussianNumber):
        tmpGmm = GMM(
            [1],
            self.numberOfContrasts,
            self.useDiagonalCovarianceMatrices,
            initialHyperMeans=np.array([self.hyperMeans[gaussianNumber]]),
            initialHyperMeansNumberOfMeasurements=np.array(
                [self.hyperMeansNumberOfMeasurements[gaussianNumber]]),
            initialHyperVariances=np.array(
                [self.hyperVariances[gaussianNumber]]),
            initialHyperVariancesNumberOfMeasurements=np.array(
                [self.hyperVariancesNumberOfMeasurements[gaussianNumber]]))
        tmpGmm.initializeGMMParameters(data, posterior)
        tmpGmm.fitGMMParameters(data, posterior)
        N = posterior.sum()

        # Murphy, page 134 with v0 = hyperVarianceNumberOfMeasurements - numberOfContrasts - 2
        variance = invwishart.rvs(
            N + tmpGmm.hyperVariancesNumberOfMeasurements[0] -
            self.numberOfContrasts - 2, tmpGmm.variances[0] *
            (tmpGmm.hyperVariancesNumberOfMeasurements[0] + N))

        # If numberOfContrast is 1 force variance to be a (1,1) array
        if self.numberOfContrasts == 1:
            variance = np.atleast_2d(variance)

        if self.useDiagonalCovarianceMatrices:
            variance = np.diag(np.diag(variance))

        mean = np.random.multivariate_normal(
            tmpGmm.means[0],
            variance / (tmpGmm.hyperMeansNumberOfMeasurements[0] + N)).reshape(
                -1, 1)
        return mean, variance
    def draw_pi0_sigma0(self, x_0):
        """
        Draws from pi_0, sigma_0 | x_0

        :returns: draw from the posterior
                pi_0, sigma_0 | x_0 ~ NIW(mu_1, lambda_1, Psi_1, nu_1),
            where here the posterior's parameters are
                mu_1 = (lambda mu_0 + x_0) / (lambda + 1)
                lambda_1 = lambda + 1
                nu_1 = nu + 1
                Psi_1 = Psi + lambda / (lambda + 1) [x_0 - mu_0]^T [x_0 - mu_0].
        """
        # Compute posterior's parameters
        mu_1 = (self.p_pi0_sigma0["lambda"] * self.p_pi0_sigma0["mu_0"] + x_0) \
                                                                        / (self.p_pi0_sigma0["lambda"] + 1.0)
        lambda_1 = self.p_pi0_sigma0["lambda"] + 1.0
        nu_1 = self.p_pi0_sigma0["nu"] + 1.0
        Psi_1 = self.p_pi0_sigma0["Psi"] + self.p_pi0_sigma0["lambda"] / (self.p_pi0_sigma0["lambda"] + 1.0) \
                                                            * np.square(x_0 - self.p_pi0_sigma0["mu_0"]).sum()

        # Draw from the inv-Wishart
        sigma_0 = invwishart.rvs(nu_1, Psi_1)
        # Draw from the normal
        pi_0 = mvn_sample(mu_1, sigma_0 / lambda_1)

        return pi_0, sigma_0
Esempio n. 21
0
    def reset_parameter(self,
                        trans_cho_method='random',
                        output_cho_scale=0,
                        t_cho_scale=0):
        # transition mu init
        to_init_transition_mu = self.transition_mu.unsqueeze(0)
        nn.init.xavier_normal_(to_init_transition_mu)
        self.transition_mu.data = to_init_transition_mu.squeeze(0)
        # transition var init
        if trans_cho_method == 'random':
            nn.init.uniform_(self.transition_cho)
            weight = self.transition_cho.data - 0.5
            weight = torch.tril(weight)
            self.transition_cho.data = weight + t_cho_scale * torch.eye(
                2 * self.dim)
        elif trans_cho_method == 'wishart':
            transition_var = invwishart.rvs(2 * self.dim, (4 * self.dim + 1) *
                                            np.eye(2 * self.dim),
                                            size=self.t_comp_num,
                                            random_state=None)
            self.transition_cho.data = torch.from_numpy(
                np.linalg.cholesky(transition_var)).float()
        else:
            raise ValueError("Error transition init method")
        # output mu init
        if self.gaussian_decode:
            nn.init.xavier_normal_(self.output_mu)

            # output var init
            if output_cho_scale == 0:
                nn.init.uniform_(self.output_cho, a=0.1, b=1.0)
            else:
                nn.init.constant_(self.output_cho, output_cho_scale)
        else:
            nn.init.xavier_normal_(self.decode_layer.weight)
Esempio n. 22
0
def create_dataset(n_dim,
                   n_clust,
                   n_tasks,
                   n_entities,
                   seed=None,
                   pi_samp=None,
                   Si_samp=None,
                   mu_samp=None):
    """
    Create the amortised clustering dataset
    :param n_dim: number of dimensions
    :param n_clust: pair (lo,hi) number of clusters uniformly in the range(lo,hi)
    :param n_tasks: number of tasks
    :param n_entities: pair (lo,hi) number of entities uniformly in the range(lo,hi)
    :param seed: random seed
    :return: data set
    """
    if seed is not None:
        np.random.seed(seed)

    tasks = []
    for i in range(n_tasks):

        n_clust_ = np.random.randint(*n_clust)
        Si = np.zeros((n_clust_, n_dim, n_dim))
        mu = np.zeros((n_clust_, n_dim))
        x = []
        idx = []

        n_ent = np.random.randint(*n_entities)

        if pi_samp is not None:
            pi = pi_samp(n_clust_)
        else:
            pi = np.ones(n_clust_) / n_clust_

        for j, n in enumerate(*multinomial.rvs(n_ent, pi, 1)):
            if Si_samp is not None:
                Si[j] = Si_samp(n_dim)
            else:
                Si[j] = invwishart.rvs(4, 0.05 * np.eye(n_dim))

            if mu_samp is not None:
                mu[j] = mu_samp(n_dim)
            else:
                mu[j] = np.random.randn(n_dim)
            if n > 0:
                x.append(
                    multivariate_normal.rvs(mu[j], Si[j], size=[n]).astype(
                        np.float32).reshape(n, -1))
                idx.append(j * np.ones(n, dtype=np.long))

        j = np.random.permutation(n_ent)
        x = np.concatenate(x, 0)[j]
        idx = np.concatenate(idx, 0)[j]

        tasks.append((x, idx, mu, Si))

    return tasks
Esempio n. 23
0
    def sampleMeansAndVariancesConditioned(self,
                                           data,
                                           posterior,
                                           gaussianNumber,
                                           constraints=None):
        tmpGmm = GMM(
            [1],
            self.numberOfContrasts,
            self.useDiagonalCovarianceMatrices,
            initialHyperMeans=np.array([self.hyperMeans[gaussianNumber]]),
            initialHyperMeansNumberOfMeasurements=np.array(
                [self.hyperMeansNumberOfMeasurements[gaussianNumber]]),
            initialHyperVariances=np.array(
                [self.hyperVariances[gaussianNumber]]),
            initialHyperVariancesNumberOfMeasurements=np.array(
                [self.hyperVariancesNumberOfMeasurements[gaussianNumber]]))
        tmpGmm.initializeGMMParameters(data, posterior)
        tmpGmm.fitGMMParameters(data, posterior)
        N = posterior.sum()

        # Murphy, page 134 with v0 = hyperVarianceNumberOfMeasurements - numberOfContrasts - 2
        variance = invwishart.rvs(
            N + tmpGmm.hyperVariancesNumberOfMeasurements[0] -
            self.numberOfContrasts - 2, tmpGmm.variances[0] *
            (tmpGmm.hyperVariancesNumberOfMeasurements[0] + N))

        # If numberOfContrast is 1 force variance to be a (1,1) array
        if self.numberOfContrasts == 1:
            variance = np.atleast_2d(variance)

        if self.useDiagonalCovarianceMatrices:
            variance = np.diag(np.diag(variance))

        mean = np.random.multivariate_normal(
            tmpGmm.means[0],
            variance / (tmpGmm.hyperMeansNumberOfMeasurements[0] + N)).reshape(
                -1, 1)
        if constraints is not None:

            def truncsample(mean, var, lower, upper):
                from scipy.stats import truncnorm

                # print("Sampling from truncnorm: mean=%.4f, var=%.4f, bounds = (%.4f,%.4f)"%(mean,var,lower,upper))
                a, b = (lower - mean) / np.sqrt(var), (upper -
                                                       mean) / np.sqrt(var)
                try:
                    ts = truncnorm.rvs(a, b, loc=mean, scale=np.sqrt(var))
                except:
                    return lower  #TODO: Find out how to deal with samples being out of bounds
                # print("Sampled = %.4f"%ts)
                return ts

            for constraint in constraints:
                mean_idx, bounds = constraint
                mean[mean_idx] = truncsample(
                    tmpGmm.means[0][mean_idx], variance[mean_idx, mean_idx] /
                    (tmpGmm.hyperMeansNumberOfMeasurements[0] + N), bounds[0],
                    bounds[1])
        return mean, variance
Esempio n. 24
0
 def _next_Sigma(self, gamma, mu, a, nu):
     """ Updates Sigma. """
     diff = gamma - mu
     Sigma = (invwishart.rvs(nu + self.N + self.n_rnd - 1,
                             2 * nu * np.diag(a) + diff.T @ diff))\
         .reshape((self.n_rnd, self.n_rnd))
     SigmaInv = np.linalg.inv(Sigma)
     return Sigma, SigmaInv
Esempio n. 25
0
def next_Omega(paramRnd, zeta, nu, iwDiagA, diagCov, nRnd, nInd):
    betaS = paramRnd - zeta
    Omega = np.array(
        invwishart.rvs(nu + nInd + nRnd - 1,
                       2 * nu * np.diag(iwDiagA) + betaS.T @ betaS)).reshape(
                           (nRnd, nRnd))
    if diagCov: Omega = np.diag(np.diag(Omega))
    return Omega
def next_Sigma(x, mu, nu, iwDiagA, diagCov, nInd, R):
    xS = x - mu
    Sigma = np.array(invwishart.rvs(nu + nInd + R - 1, 2 * nu * np.diag(iwDiagA) + xS.T @ xS)).reshape((R, R))
    if diagCov: 
        Sigma_tmp = np.array(Sigma)
        Sigma = np.diag(np.diag(Sigma_tmp))
        Sigma[2:,2:] = Sigma_tmp[2:,2:] 
    return Sigma
 def __draw_b_std(self, coefficient_r, coefficient_mean_r):
     """Function for drawing the std at the r-round
     """
     I = self.I
     df = 1 + I
     scale = (I + np.var(coefficient_r)) / df
     coefficient_std_draw = invwishart.rvs(df, scale)
     return coefficient_std_draw
Esempio n. 28
0
        def generate_covariance(true_mu, dims, df):
            S = (np.tril(iw.rvs(df, 1, size=dims**2).reshape(dims, dims))) * df
            cov = np.dot(S, S.T)
            while (abs(np.linalg.det(cov)) < 1.5):
                cov = cov + 0.5 * np.diag(np.diag(cov))
            mu = np.random.multivariate_normal(true_mu, cov, 1)[0]

            return mu, cov
Esempio n. 29
0
def generate_covariance(true_mu, dims, df):
    S = np.tril(iw.rvs(df, 1, size=dims**2).reshape(dims, dims))
    cov = np.dot(S, S.T)
    while (np.linalg.det(cov) < 1):
        cov = cov * 2
    mu = np.random.multivariate_normal(true_mu, cov, 1)[0]

    return mu, cov
Esempio n. 30
0
    def test_wishart_invwishart_2D_rvs(self):
        dim = 3
        df = 10

        # Construct a simple non-diagonal positive definite matrix
        scale = np.eye(dim)
        scale[0,1] = 0.5
        scale[1,0] = 0.5

        # Construct frozen Wishart and inverse Wishart random variables
        w = wishart(df, scale)
        iw = invwishart(df, scale)

        # Get the generated random variables from a known seed
        np.random.seed(248042)
        w_rvs = wishart.rvs(df, scale)
        np.random.seed(248042)
        frozen_w_rvs = w.rvs()
        np.random.seed(248042)
        iw_rvs = invwishart.rvs(df, scale)
        np.random.seed(248042)
        frozen_iw_rvs = iw.rvs()

        # Manually calculate what it should be, based on the Bartlett (1933)
        # decomposition of a Wishart into D A A' D', where D is the Cholesky
        # factorization of the scale matrix and A is the lower triangular matrix
        # with the square root of chi^2 variates on the diagonal and N(0,1)
        # variates in the lower triangle.
        np.random.seed(248042)
        covariances = np.random.normal(size=3)
        variances = np.r_[
            np.random.chisquare(df),
            np.random.chisquare(df-1),
            np.random.chisquare(df-2),
        ]**0.5

        # Construct the lower-triangular A matrix
        A = np.diag(variances)
        A[np.tril_indices(dim, k=-1)] = covariances

        # Wishart random variate
        D = np.linalg.cholesky(scale)
        DA = D.dot(A)
        manual_w_rvs = np.dot(DA, DA.T)

        # inverse Wishart random variate
        # Supposing that the inverse wishart has scale matrix `scale`, then the
        # random variate is the inverse of a random variate drawn from a Wishart
        # distribution with scale matrix `inv_scale = np.linalg.inv(scale)`
        iD = np.linalg.cholesky(np.linalg.inv(scale))
        iDA = iD.dot(A)
        manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T))

        # Test for equality
        assert_allclose(w_rvs, manual_w_rvs)
        assert_allclose(frozen_w_rvs, manual_w_rvs)
        assert_allclose(iw_rvs, manual_iw_rvs)
        assert_allclose(frozen_iw_rvs, manual_iw_rvs)
Esempio n. 31
0
    def test_wishart_invwishart_2D_rvs(self):
        dim = 3
        df = 10

        # Construct a simple non-diagonal positive definite matrix
        scale = np.eye(dim)
        scale[0,1] = 0.5
        scale[1,0] = 0.5

        # Construct frozen Wishart and inverse Wishart random variables
        w = wishart(df, scale)
        iw = invwishart(df, scale)

        # Get the generated random variables from a known seed
        np.random.seed(248042)
        w_rvs = wishart.rvs(df, scale)
        np.random.seed(248042)
        frozen_w_rvs = w.rvs()
        np.random.seed(248042)
        iw_rvs = invwishart.rvs(df, scale)
        np.random.seed(248042)
        frozen_iw_rvs = iw.rvs()

        # Manually calculate what it should be, based on the Bartlett (1933)
        # decomposition of a Wishart into D A A' D', where D is the Cholesky
        # factorization of the scale matrix and A is the lower triangular matrix
        # with the square root of chi^2 variates on the diagonal and N(0,1)
        # variates in the lower triangle.
        np.random.seed(248042)
        covariances = np.random.normal(size=3)
        variances = np.r_[
            np.random.chisquare(df),
            np.random.chisquare(df-1),
            np.random.chisquare(df-2),
        ]**0.5

        # Construct the lower-triangular A matrix
        A = np.diag(variances)
        A[np.tril_indices(dim, k=-1)] = covariances

        # Wishart random variate
        D = np.linalg.cholesky(scale)
        DA = D.dot(A)
        manual_w_rvs = np.dot(DA, DA.T)

        # inverse Wishart random variate
        # Supposing that the inverse wishart has scale matrix `scale`, then the
        # random variate is the inverse of a random variate drawn from a Wishart
        # distribution with scale matrix `inv_scale = np.linalg.inv(scale)`
        iD = np.linalg.cholesky(np.linalg.inv(scale))
        iDA = iD.dot(A)
        manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T))

        # Test for equality
        assert_allclose(w_rvs, manual_w_rvs)
        assert_allclose(frozen_w_rvs, manual_w_rvs)
        assert_allclose(iw_rvs, manual_iw_rvs)
        assert_allclose(frozen_iw_rvs, manual_iw_rvs)
def Base_distribution_sampling(sample_mean, dim): # NIW (prior)
    kappa_0 = 0.1
    Lam_0 = np.eye(dim)
    nu_0 = dim + 2

    Cov_sampled = invwishart.rvs(df=nu_0,scale=Lam_0) / kappa_0
    mu_sampled = np.random.multivariate_normal(sample_mean, Cov_sampled)

    return mu_sampled, Cov_sampled
def generate(alpha, K, N, So, nuo, muo, kappao):
    d = muo.shape[0]
    x = np.zeros((N, d))
    z = np.zeros(N, dtype=int)
    sigma = []
    mu = []

    for k in xrange(K):
        sigmak = invwishart.rvs(df=nuo, scale=So)
        sigma.append(sigmak)
        muk = multivariate_normal(muo, 1/kappao*sigmak, 1)[0]
        mu.append(muk)

    pi = dirichlet(np.ones(K)*alpha)
    for i in xrange(N):
        z[i] = choice(K, 1, p=pi)[0]
        x[i, :] = multivariate_normal(mu[z[i]], sigma[z[i]], 1)[0]
    return x, z
def normal_inverse_wishart_draw(mu0,beta0,v0,W0):
    sigma = invwishart.rvs(df=v0, scale=W0)
    sigma = sigma if sigma.shape != () else numpy.array([[sigma]])
    mu = multivariate_normal_draw(mu=mu0,sigma=sigma/beta0)
    return (mu,sigma)
def Base_distribution_posterior_sampling(X_cl, N_cl, old_mu, old_cov, dim=2):
    kappa_0 = 0.1
    nu_0 = dim + 2
    m0 = old_mu
    x_bar = np.mean(X_cl,axis=0)
    kappa_n = kappa_0 + N_cl
    nu_n = nu_0 + N_cl

    if len(X_cl) != N_cl:
        print "something wrong"
        return None

    if N_cl == 1:
        x = X_cl[0]
        # print 'here'
        k0 = 0.01
        mN = (k0 / (k0 + N_cl))*m0 + (N_cl/(k0+N_cl)) * x_bar
        # SN = np.dot(np.reshape(x-old_mu,(dim,1)),np.reshape(x,(dim,1)).T)
        # _,SN,_ = np.linalg.svd(SN)
        # SN = np.diag(SN)
        SN = np.eye(dim)
        try:
            iSN = np.linalg.inv(SN)
        except:
            iSN = np.linalg.inv(SN + 1e-6*np.eye(dim))

        try:
            mu_new = np.random.multivariate_normal(mN, SN /(nu_n - dim + 1) )
        except:
            mu_new = np.random.multivariate_normal(mN, SN /(nu_n - dim + 1) + 1e-6*np.eye(dim) )
        cov_new = invwishart.rvs(df=nu_n, scale=iSN)
        _,cov_new,_ = np.linalg.svd(cov_new)
        cov_new = np.diag(cov_new)
        return mu_new,cov_new
    else:
        Cov_est = np.zeros((dim,dim))
        for idx in range(N_cl):
            # print X_cl[idx]
            diff = np.reshape(X_cl[idx]-x_bar,(dim,1))
            Cov_est += np.dot(diff,diff.T)
        ''' NIW posterior param '''
        mN = (kappa_0 / (kappa_0 + N_cl))*m0 + (N_cl/(kappa_0+N_cl)) * x_bar
        SN = old_cov + Cov_est
        x_bar_ = np.reshape(x_bar,(dim,1))
        SN += (kappa_0*N)/(kappa_0+N)*np.dot(x_bar_ - old_mu,x_bar_ - old_mu.T)
        _,SN,_ = np.linalg.svd(SN)
        SN = np.diag(SN)

        try:
            iSN = np.linalg.inv(SN)
        except:
            iSN = np.linalg.inv(SN + 1e-6*np.eye(dim))
        try:
            mu_new = np.random.multivariate_normal(mN, SN /(nu_n - dim + 1) )
        except:
            mu_new = np.random.multivariate_normal(mN, SN /(nu_n - dim + 1) + 1e-6*np.eye(dim) )
        try:
            cov_new = invwishart.rvs(df=nu_n, scale=iSN)
        except:
            cov_new = np.eye(dim)
            # print iSN, np.linalg.det(iSN)
        _,cov_new,_ = np.linalg.svd(cov_new)
        cov_new = np.diag(cov_new)
        return mu_new,cov_new