Ejemplo n.º 1
0
    def __init__(self, V, K, X=None, b=None, sigmasq_b=1.0,
                 sigmasq_prior_prms=None, name=None):
        self.V, self.K = V, K

        # Initialize prior
        sigmasq_prior_prms = sigmasq_prior_prms if sigmasq_prior_prms is not None else {}
        self.sigmasq_x_prior = self._sigmasq_x_prior_class(K, **sigmasq_prior_prms)
        self.sigmasq_b = sigmasq_b

        # Initialize parameters
        self.X = np.sqrt(self.sigmasq_x) * npr.randn(V, K) if X is None else X * np.ones((V, K))

        self.b = np.zeros((V, V)) if b is None else b * np.ones((V, V))

        # Models encapsulate data
        # A:  observed adjacency matrix
        # m:  mask for network n specifying which features to use
        # mask: mask specifying which entries in A were observed/hidden
        self.As = []
        self.ms = []
        self.masks = []

        # Polya-gamma RNGs
        num_threads = get_omp_num_threads()
        seeds = npr.randint(2 ** 16, size=num_threads)
        self.ppgs = [PyPolyaGamma(seed) for seed in seeds]

        # Name the model
        self.name = name if name is not None else "lsm_K{}".format(K)
Ejemplo n.º 2
0
def sample_aux_vars(betas, num_matches, time_range, covariates=None):
    pg = PyPolyaGamma()
    if covariates is None:
        covariates = identity_matrix(len(betas))

    if covariates.ndim == 2:
        num_players = len(covariates)
        aux_vars = [
            np.matrix([
                [
                    pg.pgdraw(num_matches[t][i, j],
                              (covariates[i] - covariates[j]).dot(betas[t]))
                    #entries
                    for j in range(num_players)  # columns
                ] for i in range(num_players)  # rows
            ]) for t in time_range  # index of matrix-list
        ]
    else:
        num_players = len(covariates[0])
        aux_vars = [
            np.matrix([
                [
                    pg.pgdraw(num_matches[t][i, j],
                              (covariates[t][i] - covariates[t][j]).dot(
                                  betas[t]))
                    #entries
                    for j in range(num_players)  # columns
                ] for i in range(num_players)  # rows
            ]) for t in time_range  # index of matrix-list
        ]

    return aux_vars
def pg_spike_train(X, Y, C, Omega, D_out, nthreads=None, N=1, neg_bin=False):
    """
    Sample Polya-Gamma wy|Y,C,D,X where Y are spike trains and X are the continuous latent states
    :param X: List of continuous latent states
    :param Y: list of spike trains
    :param C: emission parameters. bias parameter is appended to last column.
    :param Omega: list used for storing polya-gamma variables
    :param D_out: Dimension of output i..e number of neurons
    :param nthreads: Number of threads for parallel sampling.
    :param N: Maximum number of spikes N for a binomial distribution, or number of failures in negative binomial
    :param neg_bin: Boolean flag dictating whether likelihood is negative binomial
    :return:
    """
    for idx in range(len(X)):
        T = X[idx][0, 1:].size
        b = N * np.ones(T * D_out)
        if neg_bin:
            b += Y[idx].flatten(order='F')
        if nthreads is None:
            nthreads = n_cpu
        out = np.empty(T * D_out)
        V = C[:, :-1] @ X[
            idx][:,
                 1:] + C[:,
                         -1][:,
                             na]  # Ignore the first point of the time series

        seeds = np.random.randint(2**16, size=nthreads)
        ppgs = [PyPolyaGamma(seed) for seed in seeds]

        pypolyagamma.pgdrawvpar(ppgs, b, V.flatten(order='F'), out)
        Omega[idx] = out.reshape((D_out, T), order='F')

    return Omega
def pg_tree_posterior(states, omega, R, path, depth, nthreads=None):
    '''
    Sample Polya-Gamma w_n,t|x_t,z_{t+1} where the subscript n denotes the hyperplane
    for which we are augmenting with the Polya-Gamma. Thus will augment all the logistic regressions
    that was taken while traversing down the tree
    :param states: This variable contains the continuous latent states. It is a list of numpy arrays
    :param omega: list for storing polya-gamma variables
    :param R: normal vectors of hyper-plane where the bias term is the last element in that array. The format is a list of arrays.
    :param path: path taken through the tree at time t. a list of numpy arrays
    :param depth: maximum depth of the tree
    :return: a list of pg rvs for each time series
    '''
    for idx in range(len(states)):
        T = states[idx][0, :].size
        b = np.ones(T * (depth - 1))
        if nthreads is None:
            nthreads = cpu_count()
        v = np.ones((depth - 1, T))
        out = np.empty(T * (depth - 1))
        #Compute parameters for conditional
        for d in range(depth - 1):
            for t in range(T):
                index = int(path[idx][d, t] - 1)  # Find which node you went through
                v[d, t] = np.matmul(R[d][:-1, index], np.array(states[idx][:, t])) + R[d][-1, index]
        seeds = np.random.randint(2 ** 16, size=nthreads)
        ppgs = [PyPolyaGamma(seed) for seed in seeds]
        #Sample in parallel
        pypolyagamma.pgdrawvpar(ppgs, b, v.flatten(order='F'), out)
        omega[idx] = out.reshape((depth - 1, T), order='F')

    return omega
Ejemplo n.º 5
0
    def __init__(self, rng, data, n_burn, n_iters, latent_dim, n_clusters,
                 n_rffs, dp_prior_obs, dp_df, disp_prior, bias_var):
        """Initialize base class for logistic RFLVMs.
        """
        # `_BaseRFLVM` will call `_init_specific_params`, and these need to be
        # set first.
        self.disp_prior = disp_prior
        self.bias_var = bias_var

        super().__init__(rng=rng,
                         data=data,
                         n_burn=n_burn,
                         n_iters=n_iters,
                         latent_dim=latent_dim,
                         n_clusters=n_clusters,
                         n_rffs=n_rffs,
                         dp_prior_obs=dp_prior_obs,
                         dp_df=dp_df)

        # Polya-gamma augmentation.
        self.pg = PyPolyaGamma()
        prior_Sigma = np.eye(self.M + 1)
        prior_Sigma[-1, -1] = np.sqrt(self.bias_var)
        self.inv_B = np.linalg.inv(prior_Sigma)
        mu_A_b = np.zeros(self.M + 1)
        self.inv_B_b = self.inv_B @ mu_A_b
        self.omega = np.empty(self.Y.shape)

        # Linear coefficients `beta`.
        b0 = np.zeros(self.M + 1)
        B0 = np.eye(self.M + 1)
        self.beta = self.rng.multivariate_normal(b0, B0, size=self._j_func())
Ejemplo n.º 6
0
    def __init__(self, nrows, ncols, ndepth, pg_seed=42, **kwargs):
        super().__init__(nrows, ncols, ndepth, **kwargs)

        # Initialize the Polya-Gamma sampler
        from pypolyagamma import PyPolyaGamma
        self.pg = PyPolyaGamma(seed=pg_seed)
        self.nu2 = np.zeros((nrows, ncols, ndepth))
        self.nu2_flat = np.zeros(np.prod(self.nu2.shape))
        self.sample_nu2 = True
Ejemplo n.º 7
0
def nb_fit_bayes(Z):
    from pypolyagamma import PyPolyaGamma
    from scipy.stats import norm
    results = []
    pgr = PyPolyaGamma(seed=0)
    model_logr = np.zeros(Z.shape[0])
    model_Psi = np.zeros(Z.shape)
    model_r = np.exp(model_logr)
    model_P = ilogit(model_Psi)
    prior_logr_sd = 100.
    Omegas = np.zeros_like(Z)
    for step in xrange(3000):
        # Random-walk MCMC for log(r)
        for mcmc_step in xrange(30):
            candidate_logr = model_logr + np.random.normal(
                0, 1, size=Z.shape[0])
            candidate_r = np.exp(candidate_logr)
            accept_prior = norm.logpdf(
                candidate_logr, loc=0, scale=prior_logr_sd) - norm.logpdf(
                    model_logr, loc=0, scale=prior_logr_sd)
            accept_likelihood = negBinomRatio(Z,
                                              candidate_r[:, np.newaxis],
                                              model_r[:, np.newaxis],
                                              model_P,
                                              model_P,
                                              log=True).sum(axis=1)
            accept_probs = np.exp(
                np.clip(accept_prior + accept_likelihood, -10, 1))
            accept_indices = np.random.random(size=Z.shape[0]) <= accept_probs
            model_logr[accept_indices] = candidate_logr[accept_indices]
            model_r = np.exp(model_logr)

        # Polya-Gamma sampler -- Marginal test version only
        N_ij = Z + model_r[:, np.newaxis]
        [
            pgr.pgdrawv(N_ij[i], np.repeat(model_Psi[i, 0], Z.shape[1]),
                        Omegas[i]) for i in xrange(Z.shape[0])
        ]

        # Sample the logits using only the expressed values -- Marginal test version only
        v = 1 / (Omegas.sum(axis=1) + 1 / 100.**2)
        m = v * (Z.sum(axis=1) - Z.shape[1] * model_r) / 2.
        model_Psi = np.random.normal(loc=m, scale=np.sqrt(v))[:, np.newaxis]
        model_P = ilogit(model_Psi)

        if step > 1000 and (step % 2) == 0:
            results.append([model_r, model_P[:, 0]])
            # print(model_r, model_P[:,0])
    return np.array(results)
Ejemplo n.º 8
0
    def _smpl_fn(cls, rng, b, c, size):
        pg = PyPolyaGamma(rng.randint(2 ** 16))

        if not size and b.shape == c.shape == ():
            return pg.pgdraw(b, c)
        else:
            b, c = np.broadcast_arrays(b, c)
            out_shape = b.shape + tuple(size or ())
            smpl_val = np.empty(out_shape, dtype="double")
            b = np.tile(b, tuple(size or ()) + (1,))
            c = np.tile(c, tuple(size or ()) + (1,))
            pg.pgdrawv(
                np.asarray(b.flat).astype("double", copy=True),
                np.asarray(c.flat).astype("double", copy=True),
                np.asarray(smpl_val.flat),
            )
            return smpl_val
Ejemplo n.º 9
0
    def logisticAndReject(self, X, Y):
        pg = PyPolyaGamma()  # use N(0, I) prior
        n = X.shape[0]
        # Output layer
        out_fit = LinearRegression(fit_intercept=False).fit(
            self.layers[self.nlayer - 1].h, Y)
        self.layers[self.nlayer].W = out_fit.coef_
        # Hidden layers
        for l in range(self.nlayer - 1, 0, -1):
            #    for j in range(self.hid_dim):
            # Draw prior beta
            #prior = np.random.normal(0, 1, size = self.hid_dim)
            # Draw latent w
            #w = np.zeros(n)
            #for k in range(n):
            #    w[k] = pg.pgdraw(1, np.dot(self.layers[l-1].h[k,:], prior))
            # Draw posterior beta
            #kappa = self.layers[l].h[:,j] - 0.5
            #omega = np.diag(w)
            #Vw = np.linalg.inv(np.dot(np.dot(np.transpose(self.layers[l].h), omega), self.layers[l].h) + np.eye(self.hid_dim))
            #mw = np.dot(Vw, np.dot(np.transpose(self.layers[l].h), kappa))
            #self.layers[l].W[:,j] = np.random.multivariate_normal(mw, Vw)

            # Propose
            propW = np.zeros(self.layers[l].W.shape)
            logalpha = 0
            for j in range(self.hid_dim):
                hid_fit = LogisticRegression(fit_intercept=False).fit(
                    self.layers[l - 1].h, self.layers[l].h[:, j])
                propW[:,
                      j] = hid_fit.coef_ + np.random.normal(size=len(propW[:,
                                                                           j]))
                prop_hW = expit(np.dot(self.layers[l - 1].h, propW[:, j]))
                curr_hW = expit(
                    np.dot(self.layers[l - 1].h, self.layers[l].W[:, j]))
                # Accept-Reject
                logalpha = sum(
                    self.layers[l].h[:, j] * np.log(prop_hW / curr_hW) +
                    (1 - self.layers[l].h[:, j]) * np.log((1 - prop_hW) /
                                                          (1 - curr_hW)))
                if np.log(np.random.uniform()) < logalpha:
                    self.layers[l].W[:, j] = propW[:, j]
Ejemplo n.º 10
0
    def _sample_reference_posterior(
        self,
        num_samples: int,
        num_observation: Optional[int] = None,
    ) -> torch.Tensor:
        from pypolyagamma import PyPolyaGamma
        from tqdm import tqdm

        self.dim_data = 10
        # stimulus_I = torch.load(self.path / "files" / "stimulus_I.pt")
        design_matrix = torch.load(self.path / "files" / "design_matrix.pt")
        true_parameters = self.get_true_parameters(num_observation)
        self.raw = True
        observation_raw = self.get_observation(num_observation)
        self.raw = False

        mcmc_num_samples_warmup = 25000
        mcmc_thinning = 25
        mcmc_num_samples = mcmc_num_samples_warmup + mcmc_thinning * num_samples

        pg = PyPolyaGamma()
        X = design_matrix.numpy()
        obs = observation_raw.numpy()
        Binv = self.prior_params["precision_matrix"].numpy()

        sample = true_parameters.numpy().reshape(-1)  # Init at true parameters
        samples = []
        for j in tqdm(range(mcmc_num_samples)):
            psi = np.dot(X, sample)
            w = np.array([pg.pgdraw(1, b) for b in psi])
            O = np.diag(w)  # noqa: E741
            V = np.linalg.inv(np.dot(np.dot(X.T, O), X) + Binv)
            m = np.dot(V, np.dot(X.T, obs.reshape(-1) - 1 * 0.5))
            sample = np.random.multivariate_normal(np.ravel(m), V)
            samples.append(sample)
        samples = np.asarray(samples).astype(np.float32)
        samples_subset = samples[mcmc_num_samples_warmup::mcmc_thinning, :]

        reference_posterior_samples = torch.from_numpy(samples_subset)

        return reference_posterior_samples
Ejemplo n.º 11
0
    def rng_fn(cls, rng, b, c, size):
        pg = PyPolyaGamma(rng.randint(2**16))

        if not size and b.shape == c.shape == ():
            return pg.pgdraw(b, c)
        else:
            b, c = np.broadcast_arrays(b, c)
            size = tuple(size or ())

            if len(size) > 0:
                b = np.broadcast_to(b, size)
                c = np.broadcast_to(c, size)

            smpl_val = np.empty(b.shape, dtype="double")

            pg.pgdrawv(
                np.asarray(b.flat).astype("double", copy=True),
                np.asarray(c.flat).astype("double", copy=True),
                np.asarray(smpl_val.flat),
            )
            return smpl_val
Ejemplo n.º 12
0
 def logisticAndReject(self, X, Y):
     pg = PyPolyaGamma()  # use N(0, I) prior
     n = X.shape[0]
     # Output layer
     #out_fit = LinearRegression(fit_intercept = False).fit(self.layers[self.nlayer-1].h, Y)
     #self.layers[self.nlayer].W = out_fit.coef_
     prior = np.random.normal(0, 1, size=self.hid_dim)
     w = np.zeros(n)
     for k in range(n):
         w[k] = pg.pgdraw(
             1, np.dot(self.layers[self.nlayer - 1].h[k, :], prior))
     kappa = self.layers[self.nlayer].h[:, 0] - 0.5
     omega = np.diag(w)
     Vw = np.linalg.inv(
         np.dot(np.dot(np.transpose(self.layers[self.nlayer].h), omega),
                self.layers[self.nlayer].h) + 1)[0]
     mw = Vw * np.dot(np.transpose(self.layers[self.nlayer].h), kappa)[0]
     self.layers[self.nlayer].W[:, 0] = np.random.normal(mw, Vw)
     # Hidden layers
     for l in range(self.nlayer - 1, 0, -1):
         for j in range(self.hid_dim):
             # Draw prior beta
             curr = np.random.normal(0, 1, size=self.hid_dim)
             for t in range(self.mc_iter):
                 # Draw latent w
                 w = np.zeros(n)
                 for k in range(n):
                     w[k] = pg.pgdraw(
                         1, np.dot(self.layers[l - 1].h[k, :], curr))
                 # Draw posterior beta
                 kappa = self.layers[l].h[:, j] - 0.5
                 omega = np.diag(w)
                 Vw = np.linalg.inv(
                     np.dot(np.dot(np.transpose(self.layers[l].h), omega),
                            self.layers[l].h) + np.eye(self.hid_dim))
                 mw = np.dot(Vw,
                             np.dot(np.transpose(self.layers[l].h), kappa))
                 curr = np.random.multivariate_normal(mw, Vw)
             self.layers[l].W[:, j] = curr
def pg_spike_train(X, C, Omega, D_out, nthreads=None):
    '''
    Sample Polya-Gamma wy|Y,C,D,X where Y are spike trains and X are the continuous latent states
    :param X: continuous latent states
    :param C: emission parameters. bias parameter is appended to last column.
    :param Omega: list used for storing polya-gamma variables
    :param D_out: Dimension of output i..e number of neurons
    :return: polya gamma samples from conditional posterior in a list of numpy arrays
    '''
    for idx in range(len(X)):
        T = X[idx][0, 1:].size
        b = np.ones(T * D_out)
        if nthreads is None:
            nthreads = cpu_count()
        out = np.empty(T * D_out)
        V = C[:, :-1] @ X[idx][:, 1:] + C[:, -1][:, na]  # Ignore the initial point of the time series

        seeds = np.random.randint(2 ** 16, size=nthreads)
        ppgs = [PyPolyaGamma(seed) for seed in seeds]

        pypolyagamma.pgdrawvpar(ppgs, b, V.flatten(order='F'), out)
        Omega[idx] = out.reshape((D_out, T), order='F')

    return Omega
Ejemplo n.º 14
0
    def _init_embedding_aux_params(self):
        self.pg = PyPolyaGamma()
        self.gamma = np.empty((self.n_topics, self.n_words))
        self.gamma_sum_ax1 = np.zeros(self.n_topics)
        self.SIGMA_inv = np.empty(
            (self.n_topics, self.embedding_size, self.embedding_size))
        self.b_cgam = np.empty((self.n_topics, self.n_words))
        self.b_cgam_sum_ax1 = np.zeros(self.n_topics)
        self.MU = np.empty((self.n_topics, self.embedding_size))
        for k in range(self.n_topics):
            for word_index in range(self.n_words):
                self.gamma[k, word_index] = self.pg.pgdraw(
                    1, self.pi[k, word_index])
                self.gamma_sum_ax1[k] += self.gamma[k, word_index]

            self.SIGMA_inv[k] = np.matmul(self.f_outer.T,
                                          self.gamma[k]) + self.sig_I_lamb_inv
            self.b_cgam[k] = self.b[k] - .5 - self.c[k] * self.gamma[k]
            self.b_cgam_sum_ax1[k] = np.sum(self.b_cgam[k])

        self.b_cgam_f = np.matmul(self.b_cgam, self.f)
        for k in range(self.n_topics):
            SIGMA_k = np.linalg.inv(self.SIGMA_inv[k])
            self.MU[k] = np.matmul(SIGMA_k, self.b_cgam_f[k])
Ejemplo n.º 15
0
    def __init__(self, *args, **kwargs):
        super(GibbSampler, self).__init__(*args, **kwargs)

        self.pg = PyPolyaGamma(seed=np.random.randint(2 ** 16, size=None))
Ejemplo n.º 16
0
 def set_seed(self, seed):
     self.np_random.seed(seed)
     pg_seed = np.random.randint(1, 1 + np.iinfo(np.uint32).max)
     ts_seed = np.random.randint(1, 1 + np.iinfo(np.uint32).max)
     self.pg = PyPolyaGamma(seed=pg_seed)
     self.ts = ExpTiltedStableDist(seed=ts_seed)
Ejemplo n.º 17
0
    def __init__(self,
                 X,
                 cov_params,
                 base_measure,
                 lmbda=None,
                 burnin=1000,
                 num_integration_points=1000,
                 max_iterations=2000,
                 update_hyperparams=True,
                 update_basemeasure=True,
                 nthreads=1,
                 gp_mu=0,
                 sample_hyperparams_iter=10):
        """ Initialises class of Gibbs sampler. Sampled data is saved in
        dictionary 'self.data'.

        The dictionary self.data contains all the sampled data. 'X' are the
        locations (observations and latent), 'g' the GP at these locations,
        'lmbda' the max rate of latent Poisson process, 'cov_params' the kernel
        parameters, 'M' the number of latent events, 'time' the time for
        samples, 'bm_params' the base measure parameters, 'gp_mu' the mean of
        the GP prior.

        :param X: Data.
        :type X: numpy.ndarray [instances x features]
        :param cov_params: Kernel hyperparameters. List with first entry the
        prefactor and second a D-dimensional array with length scales.
        :type cov_params: list
        :param base_measure:
        :type base_measure: BaseMeasure
        :param lmbda: Initial value for max. Poisson rate. If None
        it will be equal to number of data points. Default is None.
        :type lmbda: float
        :param burnin: Number of iteration before the posterior will be
        sampled. Default=1000.
        :type burnin: int
        :param num_integration_points: Number of integration points. Only
        used for predictive likelihood. Default=1000.
        :type num_integration_points: int
        :param max_iterations: Number of iterations the posterior is sampled.
        Default=2000.
        :type max_iterations: int
        :param update_hyperparams: Whether GP hyperparameters should be
        sampled. Default=True.
        :type update_hyperparams: bool
        :param update_basemeasure: Whether base measure parameters should be
        sampled. Can only be done for certain base measure ('normal',
        'laplace', 'standard_t'). Default=True.
        :type update_basemeasure: bool
        :param nthreads: Number of threads used for PG sampling. Default=1.
        :type nthreads: int
        :param gp_mu: Mean of GP prior.
        :type gp_mu: float
        :param sample_hyperparams_iter: Every x^th step hyperparameters are
        sampler. Default=0.
        :type sample_hyperparams_iter: float
        """
        self.max_iterations = int(max_iterations)
        self.D = X.shape[1]
        self.cov_params = cov_params
        self.X = X
        self.N = self.X.shape[0]
        self.base_measure = base_measure
        self.noise = 1e-4

        if lmbda is None:
            self.lmbda = self.N / 1.
        else:
            self.lmbda = lmbda
        seeds = numpy.random.randint(2**16, size=nthreads)
        self.pg = [PyPolyaGamma(seed) for seed in seeds]
        self.M = int(self.lmbda)
        self.M_save = numpy.empty(self.max_iterations)
        # Position of all events (first N are the actual observed ones)
        self.X_all = numpy.empty([self.N + self.M, self.D])
        self.X_all[:self.N] = self.X
        self.X_all[self.N:] = base_measure.sample_density(self.M)
        self.marks = numpy.empty(self.N + self.M)
        self.K = self.cov_func(self.X_all, self.X_all)
        self.K += self.noise * numpy.eye(self.K.shape[0])
        self.L = numpy.linalg.cholesky(self.K)
        self.L_inv = solve_triangular(self.L,
                                      numpy.eye(self.L.shape[0]),
                                      lower=True,
                                      check_finite=False)
        self.K_inv = self.L_inv.T.dot(self.L_inv)
        self.gp_mu = gp_mu
        self.pred_log_likelihood = []
        self.g = numpy.zeros([self.N + self.M])
        # Probability of insertion or deletion proposal
        self.num_iterations = 0
        self.burnin = int(burnin)
        self.num_integration_points = num_integration_points
        self.place_integration_points()
        self.update_hyperparams = update_hyperparams
        self.update_basemeasure = update_basemeasure
        self.update_hyperparams_iter = sample_hyperparams_iter

        self.data = {
            'X': [],
            'g': [],
            'lmbda': [],
            'cov_params': [],
            'M': [],
            'time': [],
            'bm_params': [],
            'gp_mu': []
        }
Ejemplo n.º 18
0
def pg_mcmc(true_params, obs, duration=100, dt=1, seed=None,
    prior_dist=None):
    """Polya-Gamma sampler for GLM

    Returns
    -------
    array : samples from posterior
    """

    if prior_dist is None:
        prior_dist = smoothing_prior(n_params=true_params.size, seed=seed)

    # seeding
    np.random.seed(seed)
    pg = PyPolyaGamma()  # seed=seed

    # observation
    I = obs['I'].reshape(1,-1)
    S_obs = obs['data'].reshape(-1)

    # simulation protocol
    num_param_inf = len(true_params)
    dt = 1
    t = np.arange(0, duration, dt)

    N = 1   # Number of trials
    M = num_param_inf-1   # Length of the filter

    # build covariate matrix X, such that X * h returns convolution of x with filter h
    X = np.zeros(shape=(len(t), M))
    for j in range(M):
        X[j:,j] = I[0,0:len(t)-j]

    # prior
    # smoothing prior on h; N(0, 1) on b0. Smoothness encouraged by penalyzing
    # 2nd order differences of elements of filter
    #prior_dist = prior(n_params=true_params.size, seed=seed)
    Binv = prior_dist.P

    # The sampler consists of two iterative Gibbs updates
    # 1) sample auxiliary variables: w ~ PG(N, psi)
    # 2) sample parameters: beta ~ N(m, V); V = inv(X'O X + Binv), m = V*(X'k), k = y - N/2
    nsamp = 500000   # samples to evaluate the posterior

    # add a column of 1s to the covariate matrix X, in order to model the offset too
    X = np.concatenate((np.ones(shape=(len(t), 1)), X), axis=1)

    beta = true_params*1.
    BETA = np.zeros((M+1,nsamp))

    for j in tqdm(range(1, nsamp)):
        psi = np.dot(X, beta)
        w = np.array([pg.pgdraw(N, b) for b in psi])
        O = np.diag(w)

        V = np.linalg.inv(np.dot(np.dot(X.T, O), X) + Binv)
        m = np.dot(V, np.dot(X.T, S_obs - N * 0.5))

        beta = np.random.multivariate_normal(np.ravel(m), V)

        BETA[:,j] = beta

    # burn-in
    burn_in = 100000
    BETA_sub_samp = BETA[:, burn_in:nsamp:30]

    # return sampling results
    return BETA_sub_samp
Ejemplo n.º 19
0
import argparse

import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns

from polyagamma import polyagamma
from pypolyagamma import PyPolyaGamma

sns.set_style("darkgrid")

rng = np.random.default_rng(0)
pg = PyPolyaGamma(0)

data = {
    "devroye": None,
    "alternate": None,
    "gamma": None,
    "saddle": None,
    "$pypolyagamma$": None
}


def plot_densities(h=1, z=0, size=1000):
    for method in data:
        if method == "$pypolyagamma$":
            data[method] = [pg.pgdraw(h, z) for _ in range(size)]
        else:
            data[method] = polyagamma(h=h,
                                      z=z,
                                      method=method,
Ejemplo n.º 20
0
def s_blk(g_num, b_mu_ll, q_mu, b_v, q_v, b_mat_mu, q_arr, b_mu_lk, b_mat_v,
          ob, g_ij, z_i):
    #sample b_lk q
    n_lk, n_lk1, m_l, m_l1 = get_nlk(ob, g_num, g_ij, z_i)
    for l in range(g_num):
        for k in range(l, g_num):
            samplenum = 100
            if l == k:
                b = np.zeros((samplenum, 2))
                b[0, 0] = b_mu_ll
                b[0, 1] = q_mu
                mu = np.array([b_mu_ll, q_mu])
                var = np.array(([b_v, 0], [0, q_v]))
                pg = PyPolyaGamma(seed=0)
                omegas = np.ones(2)
                x = np.array(([1, 0], [1, 1]))
                k_arr = np.array(
                    [n_lk1[l, l] - n_lk[l, l] / 2, m_l1[l] - m_l[l] / 2])

                for t in range(1, samplenum):

                    omegas[0] = pg.pgdraw(n_lk[l, l], b[t - 1, 0])
                    omegas[1] = pg.pgdraw(m_l[l], np.sum(b[t - 1, :]))
                    omega = np.array(([omegas[0], 0], [0, omegas[1]]))
                    v = inv(
                        np.dot(np.dot(np.transpose(x), omega), x) + inv(var))
                    m = np.dot(
                        v,
                        np.dot(np.transpose(x), np.transpose(k_arr)) +
                        np.dot(inv(var), mu))
                    s = npr.multivariate_normal(m, v)
                    b[t, 0] = np.copy(s[0])
                    b[t, 1] = np.copy(s[1])
                b_mat_mu[l, l] = np.sum(b[50:samplenum, 0]) / (samplenum - 50)
                q_arr[l] = np.sum(b[50:samplenum, 1]) / (samplenum - 50)

            else:
                b = np.zeros((samplenum, 2))
                b[0, 0] = b_mu_lk
                b[0, 1] = b_mu_lk
                mu = np.array([b_mu_lk, b_mu_lk])
                var = np.copy(b_mat_v[:, :, l, k])
                pg = PyPolyaGamma(seed=0)
                omegas = np.ones(2)
                k_arr = np.array([
                    n_lk1[l, k] - n_lk[l, k] / 2, n_lk1[k, l] - n_lk[k, l] / 2
                ])
                x = np.array(([1, 0], [0, 1]))
                for t in range(1, samplenum):
                    omegas[0] = pg.pgdraw(n_lk[l, k], b[t - 1, 0])
                    omegas[1] = pg.pgdraw(n_lk[k, l], b[t - 1, 1])
                    omega = np.array(([omegas[0], 0], [0, omegas[1]]))

                    v = inv(
                        np.dot(np.dot(np.transpose(x), omega), x) + inv(var))
                    m = np.dot(
                        v,
                        np.dot(np.transpose(x), np.transpose(k_arr)) +
                        np.dot(inv(var), mu))
                    s = npr.multivariate_normal(m, v)
                    b[t, 0] = np.copy(s[0])
                    b[t, 1] = np.copy(s[1])
                b_mat_mu[l, k] = np.sum(b[50:samplenum, 0]) / (samplenum - 50)
                b_mat_mu[k, l] = np.sum(b[50:samplenum, 1]) / (samplenum - 50)