def resample_omega(self, augmented_data_list): """ Resample omega from its conditional Polya-gamma distribution :return: """ K = self.K for data in augmented_data_list: x = data["x"] T = data["T"] # TODO: Fix this hack if "z" in data: z = data["z"] elif "states" in data: z = data["states"].stateseq else: raise Exception("Could not find latent states in augmented data!") psi = z.dot(self.C.T) + self.mu[None, :] N = N_vec(x).astype(np.float) tmp_omg = np.zeros(N.size) ppg.pgdrawvpar(self.ppgs, N.ravel(), psi.ravel(), tmp_omg) data["omega"] = tmp_omg.reshape((T, self.K-1)) # Clip out zeros data["omega"] = np.clip(data["omega"], 1e-8,np.inf)
def resample_auxiliary_variables(self): C, D, ed = self.C, self.D, self.emission_distn psi = self.gaussian_states.dot(C.T) + self.inputs.dot(D.T) + ed.b.T b = ed.b_func(self.data) import pypolyagamma as ppg ppg.pgdrawvpar(self.ppgs, b.ravel(), psi.ravel(), self.omega.ravel())
def sample_w(self): """ This method samples the augmenting w parameters from its conditional posterior distribution. For details about the augmentation see the paper. :return: samples for w_i from a polyagamma distribution. list of lists of arrays num_images x num_subjects x T(image, subject). """ nthreads = pypolyagamma.get_omp_num_threads() seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] w = [] for i in range(len(self.saliencies_ts)): w.append([]) for saliency_ts in self.saliencies_ts[i]: T = saliency_ts.shape[0] A = np.ones(T) w_is = np.zeros(T) pypolyagamma.pgdrawvpar( ppgs, A, np.abs(self.b.value * (saliency_ts - self.s_0.value)), w_is) w[-1].append(w_is) return w
def pg_spike_train(X, Y, C, Omega, D_out, nthreads=None, N=1, neg_bin=False): """ Sample Polya-Gamma wy|Y,C,D,X where Y are spike trains and X are the continuous latent states :param X: List of continuous latent states :param Y: list of spike trains :param C: emission parameters. bias parameter is appended to last column. :param Omega: list used for storing polya-gamma variables :param D_out: Dimension of output i..e number of neurons :param nthreads: Number of threads for parallel sampling. :param N: Maximum number of spikes N for a binomial distribution, or number of failures in negative binomial :param neg_bin: Boolean flag dictating whether likelihood is negative binomial :return: """ for idx in range(len(X)): T = X[idx][0, 1:].size b = N * np.ones(T * D_out) if neg_bin: b += Y[idx].flatten(order='F') if nthreads is None: nthreads = n_cpu out = np.empty(T * D_out) V = C[:, :-1] @ X[ idx][:, 1:] + C[:, -1][:, na] # Ignore the first point of the time series seeds = np.random.randint(2**16, size=nthreads) ppgs = [PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, b, V.flatten(order='F'), out) Omega[idx] = out.reshape((D_out, T), order='F') return Omega
def pg_tree_posterior(states, omega, R, path, depth, nthreads=None): ''' Sample Polya-Gamma w_n,t|x_t,z_{t+1} where the subscript n denotes the hyperplane for which we are augmenting with the Polya-Gamma. Thus will augment all the logistic regressions that was taken while traversing down the tree :param states: This variable contains the continuous latent states. It is a list of numpy arrays :param omega: list for storing polya-gamma variables :param R: normal vectors of hyper-plane where the bias term is the last element in that array. The format is a list of arrays. :param path: path taken through the tree at time t. a list of numpy arrays :param depth: maximum depth of the tree :return: a list of pg rvs for each time series ''' for idx in range(len(states)): T = states[idx][0, :].size b = np.ones(T * (depth - 1)) if nthreads is None: nthreads = cpu_count() v = np.ones((depth - 1, T)) out = np.empty(T * (depth - 1)) #Compute parameters for conditional for d in range(depth - 1): for t in range(T): index = int(path[idx][d, t] - 1) # Find which node you went through v[d, t] = np.matmul(R[d][:-1, index], np.array(states[idx][:, t])) + R[d][-1, index] seeds = np.random.randint(2 ** 16, size=nthreads) ppgs = [PyPolyaGamma(seed) for seed in seeds] #Sample in parallel pypolyagamma.pgdrawvpar(ppgs, b, v.flatten(order='F'), out) omega[idx] = out.reshape((depth - 1, T), order='F') return omega
def _resample_b(self): V = self.V # Sample auxiliary variables. # We could be more efficient here since we only need # the lower triangular part. XXTs = [(self.X * m).dot(self.X.T) for m in self.ms] psis = [self.b + XXT for XXT in XXTs] omegas = [] for psi in psis: omega = np.zeros(V**2) pgdrawvpar(self.ppgs, np.ones(V**2), psi.ravel(), omega) omegas.append(omega.reshape((V, V))) # Sample b J = 1.0 / (self.sigmasq_b + 1e-8) h = 0.0 for A, mask, XXT, omega in zip(self.As, self.masks, XXTs, omegas): J += omega * mask h += (A - 0.5 - omega * XXT) * mask sigmasq = 1. / J mu = sigmasq * h self.b = mu + np.sqrt(sigmasq) * npr.randn(V, V) # Symmetrize -- only keep lower triangular part L = np.tril(np.ones((V, V)), k=-1) self.b = self.b * L + self.b.T * L.T
def resample_omega(self): pgdrawvpar( self.ppgs, N_vec(self.time_word_topic_counts, axis=1).astype('float64').ravel(), self.psi.ravel(), self.omega.ravel()) np.clip(self.omega, 1e-32, np.inf, out=self.omega)
def sample_marks(self): """ Samples Polya-Gamma variables (at observed and latent events). :return: """ self.marks = numpy.empty(self.N + self.M) pgdrawvpar(self.pg, numpy.ones(self.N + self.M), self.g, self.marks)
def resample_omega(z, x): # Resample with Jesse Windle's ported code b = 1. / T omega = np.zeros(1) psi = z ppg.pgdrawvpar(ppgs, np.array([b]), np.array([psi]), omega) return omega[0]
def resample_omega(self): pgdrawvpar( self.ppgs, N_vec(self.time_word_topic_counts, axis=1).astype("float64").ravel(), self.psi.ravel(), self.omega.ravel(), ) np.clip(self.omega, 1e-32, np.inf, out=self.omega)
def _info_form_heldout_log_likelihood(self, X, M=10): """ We can analytically integrate out z (latent states) given omega. To estimate the heldout log likelihood of a data sequence, we Monte Carlo integrate over omega, where omega is drawn from the prior. :param data: :param M: number of Monte Carlo samples for integrating out omega :return: """ # assert len(self.data_list) == 1, "TODO: Support more than 1 data set" T, K = X.shape assert K == self.K kappa = kappa_vec(X) N = N_vec(X) # Compute the data-specific normalization constant from the # augmented multinomial distribution Z_mul = (gammaln(N + 1) - gammaln(X[:, :-1] + 1) - gammaln(N - X[:, :-1] + 1)).sum() Z_mul += (-N * np.log(2.)).sum() # Monte carlo integrate wrt omega ~ PG(N, 0) import pypolyagamma as ppg hlls = np.zeros(M) for m in range(M): # Sample omega using the emission distributions samplers omega = np.zeros(N.size) ppg.pgdrawvpar(self.emission_distn.ppgs, N.ravel(), np.zeros(N.size), omega) omega = omega.reshape((T, K - 1)) # Exactly integrate out the latent states z using message passing # The "data" is the normal potential from the states = MultinomialLDSStates(model=self, data=X) conditional_mean = kappa / np.clip( omega, 1e-64, np.inf) - self.emission_distn.mu[None, :] conditional_prec = np.zeros((T, K - 1, K - 1)) for t in range(T): conditional_prec[t, :, :] = np.diag(omega[t, :]) Z_lds = states.info_log_likelihood(conditional_mean, conditional_prec) # Sum them up to get the heldout log likelihood for this omega hlls[m] = Z_mul + Z_lds # Now take the log of the average to get the log likelihood hll = logsumexp(hlls) - np.log(M) # Use bootstrap to compute error bars samples = np.random.choice(hlls, size=(100, M), replace=True) hll_samples = logsumexp(samples, axis=1) - np.log(M) std_hll = hll_samples.std() return hll, std_hll
def resample_omega(self, x): """ Resample omega from its conditional Polya-gamma distribution :return: """ assert x.ndim == 2 N = x.sum() # Sum the N's (i.e. the b's in the denominator) ppg.pgdrawvpar(self.ppgs, N * np.ones(self.K), self.rho, self.omega)
def omega(self, X, y): """ In the Polya-gamma augmentation, the precision is given by an auxiliary variable that we must sample """ import pypolyagamma as ppg psi = self.activation(X) omega = np.zeros(y.size) ppg.pgdrawvpar(self.ppgs, self.b_func(y).ravel(), psi.ravel(), omega) return omega.reshape(y.shape)
def _info_form_heldout_log_likelihood(self, X, M=10): """ We can analytically integrate out z (latent states) given omega. To estimate the heldout log likelihood of a data sequence, we Monte Carlo integrate over omega, where omega is drawn from the prior. :param data: :param M: number of Monte Carlo samples for integrating out omega :return: """ # assert len(self.data_list) == 1, "TODO: Support more than 1 data set" T, K = X.shape assert K == self.K kappa = kappa_vec(X) N = N_vec(X) # Compute the data-specific normalization constant from the # augmented multinomial distribution Z_mul = (gammaln(N + 1) - gammaln(X[:,:-1]+1) - gammaln(N-X[:,:-1]+1)).sum() Z_mul += (-N * np.log(2.)).sum() # Monte carlo integrate wrt omega ~ PG(N, 0) import pypolyagamma as ppg hlls = np.zeros(M) for m in range(M): # Sample omega using the emission distributions samplers omega = np.zeros(N.size) ppg.pgdrawvpar(self.emission_distn.ppgs, N.ravel(), np.zeros(N.size), omega) omega = omega.reshape((T, K-1)) # Exactly integrate out the latent states z using message passing # The "data" is the normal potential from the states = MultinomialLDSStates(model=self, data=X) conditional_mean = kappa / np.clip(omega, 1e-64,np.inf) - self.emission_distn.mu[None, :] conditional_prec = np.zeros((T, K-1, K-1)) for t in range(T): conditional_prec[t,:,:] = np.diag(omega[t,:]) Z_lds = states.info_log_likelihood(conditional_mean, conditional_prec) # Sum them up to get the heldout log likelihood for this omega hlls[m] = Z_mul + Z_lds # Now take the log of the average to get the log likelihood hll = logsumexp(hlls) - np.log(M) # Use bootstrap to compute error bars samples = np.random.choice(hlls, size=(100, M), replace=True) hll_samples = logsumexp(samples, axis=1) - np.log(M) std_hll = hll_samples.std() return hll, std_hll
def resample_omega(self, x): """ Resample omega from its conditional Polya-gamma distribution :return: """ assert x.ndim == 2 N = N_vec(x) # Sum the N's (i.e. the b's in the denominator) NN = N.sum(0).astype(np.float) ppg.pgdrawvpar(self.ppgs, NN, self.psi, self.omega)
def resample_omega(self): # Resample the omega's given N and psi for data in self.data_list: M = data["M"] N = data["N"] psi = data["psi"] + self.mu[None, :] # Go through each GP and resample psi given the likelihood tmp_omg = np.zeros(N.size) ppg.pgdrawvpar(self.ppgs, N.ravel(), psi.ravel(), tmp_omg) data["omega"] = tmp_omg.reshape((M, self.K-1))
def resample(self, augmented_data_list, temperature=1.0): """ Resample omega given xi and psi, then resample psi given omega, X, w, and sigma """ for augmented_data in augmented_data_list: psi = self.activation.compute_psi(augmented_data) # Resample with Jesse Windle's ported code b = self.b(augmented_data) * temperature ppg.pgdrawvpar(self.ppgs, b.ravel(), psi.ravel(), augmented_data["omega"].ravel()) # Update kappa for the new temperature a = self.a(augmented_data) * temperature augmented_data["kappa"] = a - b/2.0
def monte_carlo_approx(M=100000): ppgs = initialize_polya_gamma_samplers() # Compute the left hand side analytically loglhs = psi*a - b * np.log1p(np.exp(psi)) # Compute the right hand side with Monte Carlo omegas = np.ones(M) ppg.pgdrawvpar(ppgs, b*np.ones(M), np.zeros(M), omegas) logrhss = -b * np.log(2) + (a-b/2.)*psi -0.5 * omegas*psi**2 logrhs = logsumexp(logrhss) - np.log(M) print("Monte Carlo") print("log LHS: ", loglhs) print("log RHS: ", logrhs)
def resample(self, augmented_data_list, temperature=1.0): """ Resample omega given xi and psi, then resample psi given omega, X, w, and sigma """ for augmented_data in augmented_data_list: psi = self.activation.compute_psi(augmented_data) # Resample with Jesse Windle's ported code b = self.b(augmented_data) * temperature ppg.pgdrawvpar(self.ppgs, b.ravel(), psi.ravel(), augmented_data["omega"].ravel()) # Update kappa for the new temperature a = self.a(augmented_data) * temperature augmented_data["kappa"] = a - b / 2.0
def monte_carlo_approx(M=100000): ppgs = initialize_polya_gamma_samplers() # Compute the left hand side analytically loglhs = psi * a - b * np.log1p(np.exp(psi)) # Compute the right hand side with Monte Carlo omegas = np.ones(M) ppg.pgdrawvpar(ppgs, b * np.ones(M), np.zeros(M), omegas) logrhss = -b * np.log(2) + (a - b / 2.) * psi - 0.5 * omegas * psi**2 logrhs = logsumexp(logrhss) - np.log(M) print("Monte Carlo") print("log LHS: ", loglhs) print("log RHS: ", logrhs)
def test_parallel(verbose=False): # Call the parallel vectorized version np.random.seed(0) n = 5 nthreads = 8 v3 = np.zeros(n) a = 14 * np.ones(n) b = 0 * np.ones(n) seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, a, b, v3) if verbose: print(v3) return True
def sample_w_i(S, J_i): """ :param S: observation matrix :param J_i: neuron i's couplings :return: samples for w_i from a polyagamma distribution """ nthreads = pypolyagamma.get_omp_num_threads() seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] T = S.shape[0] A = np.ones(T) w_i = np.zeros(T) pypolyagamma.pgdrawvpar(ppgs, A, np.dot(S, J_i), w_i) return w_i
def _resample_Omega(self, As=[]): """ Sample auxiliary Polya-gamma variables for each adjacency matrix :param As: :return: """ Omegas = [] for A in As: tmp = np.empty(A.size, dtype=np.float) ppg.pgdrawvpar(self.ppgs, np.ones(A.size, dtype=np.int32), self.Mu.ravel("C"), tmp) Omega = tmp.reshape((self.N, self.N), order="C") Omegas.append(Omega) return Omegas
def test_parallel(verbose=False): # Call the parallel vectorized version np.random.seed(0) n = 5 nthreads = pypolyagamma.get_omp_num_threads() v3 = np.zeros(n) a = 14 * np.ones(n) b = 0 * np.ones(n) seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, a, b, v3) if verbose: print(v3) return True
def _resample_omegav(self, v): V = self.V notv = np.ones(V, dtype=bool) notv[v] = False omegas = [] for n, (A, m) in enumerate(zip(self.As, self.ms)): # Scale the covariates by the mask Xnotv = self.X[notv] * m xv = self.X[v] psi = self.b[v][notv] + Xnotv.dot(xv) bb = np.ones(V-1) omega = np.zeros(V-1) pgdrawvpar(self.ppgs, bb, psi, omega) omegas.append(omega) return omegas
def resample_transition_auxiliary_variables(self): # Resample the auxiliary variable for the transition matrix trans_distn = self.trans_distn prev_state = one_hot(self.stateseq[:-1], self.num_states) next_state = one_hot(self.stateseq[1:], self.num_states) A = trans_distn.A[:, :self.num_states] C = trans_distn.A[:, self.num_states:self.num_states + self.D_latent] # D = trans_distn.A[:, self.num_states+self.D_latent:] b = trans_distn.b psi = prev_state.dot(A.T) \ + self.covariates.dot(C.T) \ + b.T \ # + self.inputs.dot(D.T) \ b_pg = trans_distn.b_func(next_state[:,:-1]) import pypolyagamma as ppg ppg.pgdrawvpar(self.ppgs, b_pg.ravel(), psi.ravel(), self.trans_omegas.ravel())
def augment_data(self, augmented_data): """ Add a matrix of augmented counts :param augmented_data: :return: """ S = augmented_data["S"] T = S.shape[0] assert S.shape[1] == self.N # Initialize auxiliary variables augmented_data["omega"] = np.empty((T, self.N)) ppg.pgdrawvpar(self.ppgs, np.ones(T*self.N), np.zeros(T*self.N), augmented_data["omega"].ravel()) # Precompute kappa (assuming that it is constant given data) # That is, we can only do this if xi is not resampled augmented_data["kappa"] = self.a(augmented_data) - self.b(augmented_data)/2.0 # Initialize the mean field local variational parameters augmented_data["omega"] = np.empty((T, self.N))
def augment_data(self, augmented_data): """ Add a matrix of augmented counts :param augmented_data: :return: """ S = augmented_data["S"] T = S.shape[0] assert S.shape[1] == self.N # Initialize auxiliary variables augmented_data["omega"] = np.empty((T, self.N)) ppg.pgdrawvpar(self.ppgs, np.ones(T * self.N), np.zeros(T * self.N), augmented_data["omega"].ravel()) # Precompute kappa (assuming that it is constant given data) # That is, we can only do this if xi is not resampled augmented_data["kappa"] = self.a( augmented_data) - self.b(augmented_data) / 2.0 # Initialize the mean field local variational parameters augmented_data["omega"] = np.empty((T, self.N))
def resample_auxiliary_variables(self): # TODO: move this to cython T, C, D, ed = self.T, self.C, self.D, self.emission_distn data, size, indices, indptr \ = self.masked_data, self.masked_data.size, \ self.masked_data.indices, self.masked_data.indptr psi = np.zeros(size) offset = 0 for t in range(T): for n in indices[indptr[t]:indptr[t + 1]]: psi[offset] = self.gaussian_states[t].dot(C[n]) psi[offset] += self.inputs[t].dot(D[n]) psi[offset] += ed.b[n] offset += 1 psi = csr_matrix((psi, indices, indptr), shape=data.shape) b = ed.b_func(data) # Allocate vector for omega self.omega = np.zeros(size) ppg.pgdrawvpar(self.ppgs, b.data, psi.data, self.omega) self.omega = csr_matrix((self.omega, indices, indptr), shape=data.shape)
def pg_spike_train(X, C, Omega, D_out, nthreads=None): ''' Sample Polya-Gamma wy|Y,C,D,X where Y are spike trains and X are the continuous latent states :param X: continuous latent states :param C: emission parameters. bias parameter is appended to last column. :param Omega: list used for storing polya-gamma variables :param D_out: Dimension of output i..e number of neurons :return: polya gamma samples from conditional posterior in a list of numpy arrays ''' for idx in range(len(X)): T = X[idx][0, 1:].size b = np.ones(T * D_out) if nthreads is None: nthreads = cpu_count() out = np.empty(T * D_out) V = C[:, :-1] @ X[idx][:, 1:] + C[:, -1][:, na] # Ignore the initial point of the time series seeds = np.random.randint(2 ** 16, size=nthreads) ppgs = [PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, b, V.flatten(order='F'), out) Omega[idx] = out.reshape((D_out, T), order='F') return Omega
""" Call the different sample methods """ import numpy as np np.random.seed(0) import pypolyagamma as pypolyagamma rng = pypolyagamma.PyRNG(0) ppg = pypolyagamma.PyPolyaGamma(np.random.randint(2**16)) # # Call the single sample # v1 = ppg.pgdraw(1.,1.) # print v1 # # # Call the vectorized version n = 5 # v2 = np.zeros(n) a = 14 * np.ones(n, dtype=np.float) b = 0 * np.ones(n, dtype=np.float) # ppg.pgdrawv(a, b, v2) # print v2 # Call the parallel vectorized version # n = 5 nthreads = 8 v3 = np.zeros(n) seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, a, b, v3) print(v3)
def resample_omega(self): pgdrawvpar( self.ppgs, N_vec(self.doc_topic_counts).astype('float64').ravel(), self.psi.ravel(), self.omega.ravel()) np.clip(self.omega, 1e-32, np.inf, out=self.omega)
def draw_w(self): """w: L-by-N; augmented latent variable""" ns = np.ones(self.N, dtype=np.float) ## draw polya gamma parallelly for l in xrange(self.L): ppg.pgdrawvpar(self.ppgs, ns, self.psi[l, :], self.w[l, :])
def _resample_auxiliary_vars(self): import pypolyagamma as ppg b, psi = self.b, self.psi ppg.pgdrawvpar(self.ppgs, b.ravel(), psi.ravel(), self.omega.ravel())
def resample_omega(): ppg.pgdrawvpar(ppgs, np.ones(T*N), psi.ravel(), omega.ravel())
""" Call the different sample methods """ import numpy as np np.random.seed(0) import pypolyagamma as ppg # Call the parallel vectorized version n = 16 b = 2 * np.ones(n, dtype=np.float) z = 0 * np.ones(n, dtype=np.float) v3 = np.zeros(n) # # print "Different seeds" # nthreads = 1 # seeds = np.random.randint(2**16, size=nthreads) # ppgs = [ppg.PyPolyaGamma(seed) for seed in seeds] # ppg.pgdrawvpar(ppgs, b, z, v3) # print v3 # Now try it where they all have the same seed print "Same seed" nthreads = ppg.get_omp_num_threads() print "N threads: ", nthreads seeds = 5 * np.ones(nthreads, dtype=np.uint) ppgs = [ppg.PyPolyaGamma(seed) for seed in seeds] ppg.pgdrawvpar(ppgs, b, z, v3) print v3
def resample(self, psi): ppg.pgdrawvpar(self.ppgs, self.b.ravel(), psi.ravel(), self.omega.ravel())
def test_parallel2(): """Test multiple cases of OMP""" num_threads = pypolyagamma.get_omp_num_threads() if num_threads < 2: return np.random.seed(0) # Case 1: n < nthreads, nthreads = num_threads nthreads = num_threads n = nthreads - 1 v3 = np.zeros(n) a = 14 * np.ones(n) b = 0 * np.ones(n) seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, a, b, v3) # Case 2: n < nthreads, nthreads < num_threads nthreads = num_threads - 1 n = nthreads - 1 v3 = np.zeros(n) a = 14 * np.ones(n) b = 0 * np.ones(n) seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, a, b, v3) # Case 3: n < nthreads, nthreads > num_threads nthreads = num_threads + 1 n = nthreads - 1 v3 = np.zeros(n) a = 14 * np.ones(n) b = 0 * np.ones(n) seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, a, b, v3) # Case 4: n > nthreads, nthreads = num_threads nthreads = num_threads n = nthreads + 1 v3 = np.zeros(n) a = 14 * np.ones(n) b = 0 * np.ones(n) seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, a, b, v3) # Case 5: n > nthreads, nthreads < num_threads nthreads = num_threads - 1 n = nthreads + 1 v3 = np.zeros(n) a = 14 * np.ones(n) b = 0 * np.ones(n) seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, a, b, v3) # Case 6: n > nthreads, nthreads > num_threads nthreads = num_threads + 1 n = nthreads + 1 v3 = np.zeros(n) a = 14 * np.ones(n) b = 0 * np.ones(n) seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, a, b, v3) # Case 7: n = nthreads, nthreads = num_threads nthreads = num_threads n = nthreads v3 = np.zeros(n) a = 14 * np.ones(n) b = 0 * np.ones(n) seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, a, b, v3) # Case 8: n = nthreads, nthreads < num_threads nthreads = num_threads - 1 n = nthreads v3 = np.zeros(n) a = 14 * np.ones(n) b = 0 * np.ones(n) seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, a, b, v3) # Case 9: n = nthreads, nthreads > num_threads nthreads = num_threads + 1 n = nthreads v3 = np.zeros(n) a = 14 * np.ones(n) b = 0 * np.ones(n) seeds = np.random.randint(2**16, size=nthreads) ppgs = [pypolyagamma.PyPolyaGamma(seed) for seed in seeds] pypolyagamma.pgdrawvpar(ppgs, a, b, v3) return True