Пример #1
0
 def draw(self, K = 10, N = 1*10**5, m = 3, gaussian = False):
     
     if self.seed is not None:
         np.random.seed(self.seed)
  
     alphas = gamma.rvs(5, size=m)               # shape parameter
     #print(sum(alphas))                              # equivalent sample size
     self.p = dirichlet.rvs(alpha = alphas, size = 1)[0]
     self.phi_is = multinomial.rvs(1, self.p, size=N)       # draw from categorical p.m.f
     
     self.x_draws = np.zeros((N,K))
     self.hyper_loc, self.hyper_scale, self.thetas, self.var, self.covs, self.rdraws = dict(), dict(), dict(), tuple(), tuple(), tuple()
     
     for i in range(m):
     
           self.hyper_loc["mean"+str(i+1)] = norm.rvs(size = 1, loc = 0, scale = 5)
           self.hyper_scale["scale"+str(i+1)] = 1/gamma.rvs(5, size=1)
           
           self.thetas["mean"+str(i+1)] = norm.rvs(size = K, loc = self.hyper_loc["mean"+str(i+1)], 
                       scale = self.hyper_scale["scale"+str(i+1)])
           self.thetas["Sigma"+str(i+1)] = np.eye(K)*(1/gamma.rvs(5, size=K))
           self.thetas["nu"+str(i+1)] = randint.rvs(K+2, K+10, size=1)[0]
     
           if gaussian:
              self.covs += (self.thetas['Sigma'+str(i+1)], )
           else:
              self.covs += (wishart.rvs(df = self.thetas['nu'+str(i+1)], scale = self.thetas['Sigma'+str(i+1)], size=1),)
              self.var += (self.thetas["nu"+str(i+1)]/(self.thetas["nu"+str(i+1)]-2)*self.covs[i],)       # variance covariance matrix of first Student-t component
           self.rdraws += (np.random.multivariate_normal(self.thetas["mean"+str(i+1)], self.covs[i], N),)
     
           self.Phi = np.tile(self.phi_is[:,i], K).reshape(K,N).T              # repeat phi vector to match with random matrix
           self.x_draws += np.multiply(self.Phi, self.rdraws[i])                
     return self.x_draws
Пример #2
0
def part_b(fname=fname):
    fname += '_b'
    n0_list = np.arange(1,250)
    n = int(1e5)
    alpha=237
    beta=20 
    pA = gamma.rvs(alpha, scale=1.0/beta, size=n)
    Pr = np.zeros(n0_list.size) 
    for j, n0 in enumerate(n0_list):
        y = 0.0
        alpha=12*n0+113
        beta=n0+13 
        pB = gamma.rvs(alpha, scale=1.0/beta, size=n)
        for i in range(n):
            if (pB[i] < pA[i]):
                y += 1.0
        Pr[j] = y/float(n)
    plt.figure()
    plt.plot(n0_list, Pr, lw=1.5)
    plt.xlabel(r'$n_0$',fontsize=labelFontSize)
    plt.ylabel(r'$Pr(\theta_B < \theta_A \mid y_A, y_B)$',fontsize=labelFontSize)
    plt.title('4.2b',fontsize=titleFontSize)
    plt.xticks(fontsize=tickFontSize)
    plt.yticks(fontsize=tickFontSize)
    plt.savefig(fname+'.'+imgFmt, format=imgFmt)
Пример #3
0
    def generate_posterior_predictive_simplex_3d(self, cols, m = 20):
        keep_idx = np.array(sample(range(self.nSamp), self.nDat), dtype = int)

        delta = self.samples.delta[keep_idx]
        alpha = [self.samples.alpha[i][:,cols] for i in keep_idx]
        beta  = [self.samples.beta[i][:,cols] for i in keep_idx]
        eta   = self.samples.eta[keep_idx]
        mu    = self.samples.mu[keep_idx][:,cols]
        Sigma = self.samples.Sigma[keep_idx][:,cols][:,:,cols]

        postpred = np.empty((self.nDat, len(cols)))
        for n in range(self.nDat):
            dmax = delta[n].max()
            njs = cu.counter(delta[n], dmax + 1 + m)
            ljs = njs + (njs == 0) * eta[n] / m
            new_log_alpha = mu[n].reshape(1,-1) + \
                    (cholesky(Sigma[n]) @ normal.rvs(size = (len(cols), m))).T
            new_alpha = np.exp(new_log_alpha)
            if cols[0] == 0:
                new_beta = np.hstack((
                    np.ones((m, 1)),
                    gamma.rvs(a = 2., scale = 1/2., size = (m, len(cols) - 1))
                    ))
            else:
                new_beta = gamma.rvs(a = 2., scale = 1/2, size = (m, len(cols)))
            prob = ljs / ljs.sum()
            a = np.vstack((alpha[n], new_alpha))
            b = np.vstack((beta[n], new_beta))
            delta_new = np.random.choice(range(dmax + 1 + m), 1, p = prob)
            postpred[n] = gamma.rvs(a = a[delta_new], scale = 1 / b[delta_new])
        np.nan_to_num(postpred, copy = False)
        return (postpred.T / postpred.sum(axis = 1)).T
Пример #4
0
def gibbs(x, E = 5200, BURN_IN = 200,  frequency=100):
    
    # Initialize the chain
    n       = int(round(uniform.rvs()*N))
    lambda1 = gamma.rvs(a,scale=1./b)
    lambda2 = gamma.rvs(a,scale=1./b)
    
    # Store the samples
    chain_n       = np.array([0.]*(E-BURN_IN))
    chain_lambda1 = np.array([0.]*(E-BURN_IN))
    chain_lambda2 = np.array([0.]*(E-BURN_IN))
    
    for e in range(E):
        if e%frequency==0:
            print (f'At iteration {e}')
        # sample lambda1 and lambda2 from their posterior conditionals, Equation 8 and Equation 9, respectively.
        lambda1 = gamma.rvs(a+sum(x[0:n]), scale=1./(n+b))
        lambda2 = gamma.rvs(a+sum(x[n:N]), scale=1./(N-n+b))
    
        # sample n, Equation 10
        mult_n = np.array([0]*N)
        for i in range(N):
            mult_n[i] = sum(x[0:i])*log(lambda1) - i*lambda1 + sum(x[i:N])*log(lambda2) - (N-i)*lambda2
        mult_n = exp(mult_n-max(mult_n))
        n      = np.where(multinomial(1,mult_n/sum(mult_n),size=1)==1)[1][0]
    
        # store
        if e>=BURN_IN:
            chain_n[e-BURN_IN]       = n
            chain_lambda1[e-BURN_IN] = lambda1
            chain_lambda2[e-BURN_IN] = lambda2
            
    return (chain_lambda1,chain_lambda2,chain_n)
Пример #5
0
def part_a(fname=fname):
    fname = fname + '_a'
    nsamps = int(5000)
    yA = np.zeros(nsamps)
    yB = np.zeros(nsamps)
    for i in range(nsamps):   # do Monte Carlo
        thetaA = gamma.rvs(56, scale=1.0/59)
        yA[i] = poisson.rvs(thetaA)    
        thetaB = gamma.rvs(307, scale=1.0/219)
        yB[i] = poisson.rvs(thetaB)
    
    bins = range(9)
    plt.figure()
    plt.hist(yA, bins=bins, normed=True)
    plt.xlabel(r'$\tilde{Y}_A$',fontsize=labelFontSize)
    plt.ylabel(r'$p(\tilde{Y}_A \mid \mathbf{y}_A)$',fontsize=labelFontSize)
    plt.title(r"4.8a  Bachelor's Degree",fontsize=titleFontSize)
    plt.xticks(bins, fontsize=tickFontSize)
    plt.yticks(fontsize=tickFontSize)
    plt.savefig(fname+'bach_density.'+imgFmt, format=imgFmt)
    print(yA)
    
    plt.figure()
    plt.hist(yB, bins=bins, normed=True)
    plt.xlabel(r'$\tilde{Y}_B$',fontsize=labelFontSize)
    plt.ylabel(r'$p(\tilde{Y}_B \mid \mathbf{y}_B)$',fontsize=labelFontSize)
    plt.title(r"4.8a  No Bachelor's Degree",fontsize=titleFontSize)
    plt.xticks(bins, fontsize=tickFontSize)
    plt.yticks(fontsize=tickFontSize)
    plt.savefig(fname+'nobach_density.'+imgFmt, format=imgFmt)
def updateHyperparameters(r, zeta, xi, tau, niu, ellInverse, rhoSquare, means,
                          lambdaInverse, A):
    D = A.shape[1]
    M = len(tau)
    inverseRhoSquare = 1.0 / rhoSquare

    for m in range(0, M):
        for d in range(0, D):
            ellInverse[m][d] = gamma.rvs((1 + r[m]) / 2.0,
                                         (lambdaInverse[m][d] + zeta[d]) / 2.0,
                                         size=1)

        inverseSigmam = np.linalg.inv(returnCovarianceMatrix(lambdaInverse[m]))
        sigmaHat = np.linalg.inv(inverseRhoSquare * np.identity(D) +
                                 tau[m] * inverseSigmam)
        newmean = np.dot(
            sigmaHat,
            inverseRhoSquare * niu + tau[m] * np.dot(inverseSigmam, means[m]))

        if np.isnan(sigmaHat).any():
            sigmaHat = 0.0001 * np.identity(sigmaHat.shape[0])
        try:
            xi[m] = multivariate_normal.rvs(mean=newmean, cov=sigmaHat, size=1)
        except ValueError:
            print "stop"
        alpha = D + 1
        beta = 1.0 / rhoSquare + np.dot(
            np.dot((means[m] - xi[m]), inverseSigmam), (means[m] - xi[m]))
        tau[m] = gamma.rvs(alpha / 2.0, beta / 2.0, size=1)

    #gibbs sampling, so we accept
    return xi, tau, ellInverse
Пример #7
0
def run_line_nikita(tries, workers, chr, n, a):
    res = []

    def append_to_res():
        bpg = g.make_bg_real_data_graph()
        res.append(
            [n, breaks, chr,
             bpg.d(),
             bpg.b(),
             bpg.p_odd(),
             bpg.p_even()] + [bpg.p_m(m) for m in range(p_m_max)] + [bpg.c()] +
            [bpg.c_m(m) for m in range(1, c_m_max)])

    for i in range(int(tries / workers)):
        print(i, "of", int(tries / workers), ";",
              round(i / tries * workers * 100, 1), "%", datetime.now())
        if chr == 0:
            dist = gamma.rvs(a, size=n)
            dist /= sum(dist)
            g = DirichletBPGraph(n, dist)
        else:
            dist = gamma.rvs(a, size=(n + chrs))
            dist /= sum(dist)
            g = GenomeGraph(n, chr)
        breaks = 0

        for x_index, x in enumerate(xs):
            append_to_res()
            while breaks <= int(round(x * n / 2)):
                # print("____K =", breaks + 1, "of", int(round(X_MAX * n / 2)))
                g.do_k2_break()
                breaks += 1
            append_to_res()
    # print(ys)
    return res
Пример #8
0
def simulate_gamma(psth, trials, duration, num_trials=20):

    #rescale the ISIs
    dt = 0.001
    rs_isis = []
    for trial in trials:
        if len(trial) < 1:
            continue
        csum = np.cumsum(psth) * dt
        for k, ti in enumerate(trial[1:]):
            tj = trial[k]
            if ti > duration or tj > duration or ti < 0.0 or tj < 0.0:
                continue
            ti_index = int((ti / duration) * len(psth))
            tj_index = int((tj / duration) * len(psth))
            #print 'k=%d, ti=%0.6f, tj=%0.6f, duration=%0.3f' % (k, ti, tj, duration)
            #print '  ti_index=%d, tj_index=%d, len(psth)=%d, len(csum)=%d' % (ti_index, tj_index, len(psth), len(csum))
            #get rescaled time as difference in cumulative intensity
            ui = csum[ti_index] - csum[tj_index]
            if ui < 0.0:
                print 'ui < 0! ui=%0.6f, csum[ti]=%0.6f, csum[tj]=%0.6f' % (
                    ui, csum[ti_index], csum[tj_index])
            else:
                rs_isis.append(ui)
    rs_isis = np.array(rs_isis)
    rs_isi_x = np.arange(rs_isis.min(), rs_isis.max(), 1e-5)

    #fit a gamma distribution to the rescaled ISIs
    gamma_alpha, gamma_loc, gamma_beta = gamma.fit(rs_isis)
    gamma_pdf = gamma.pdf(rs_isi_x,
                          gamma_alpha,
                          loc=gamma_loc,
                          scale=gamma_beta)
    print 'Rescaled ISI Gamma Fit Params: alpha=%0.3f, beta=%0.3f, loc=%0.3f' % (
        gamma_alpha, gamma_beta, gamma_loc)

    #simulate new trials using rescaled ISIs
    new_trials = []
    for nt in range(num_trials):
        ntrial = []
        next_rs_time = gamma.rvs(gamma_alpha, loc=gamma_loc, scale=gamma_beta)
        csum = 0.0
        for t_index, pval in enumerate(psth):
            csum += pval * dt
            if csum >= next_rs_time:
                #spike!
                t = t_index * dt
                ntrial.append(t)
                #reset integral and generate new rescaled ISI
                csum = 0.0
                next_rs_time = gamma.rvs(gamma_alpha,
                                         loc=gamma_loc,
                                         scale=gamma_beta)
        new_trials.append(ntrial)
    #plt.figure()
    #plt.hist(rs_isis, bins=20, normed=True)
    #plt.plot(rs_isi_x, gamma_pdf, 'r-')
    #plt.title('Rescaled ISIs')

    return new_trials
def generer ():
    graphe = Graphe()
    i = 0
    L = list()
    Delta = list()
    while i < 10:
        graphe.ajouter_sommet()
        L.append([])
        Delta.append([])
        i += 1

    graphe.tirage_A()
    i = 0
    cascade = SetCascade(graphe,200)

    j = 1
    pile_depart = []
    pile_depart.append((int(floor(10*random())), 5., gamma.rvs(8)))
    while j < 10:
        gam = gamma.rvs(8)
        inserer_dans_liste((j, float(j*5), gam),pile_depart)
        inserer_dans_liste((j,float(j*5)+gam, - gam),pile_depart)
        j += 1

    creer_cascade_exp(graphe,L,cascade.T,Delta,pile_depart)

    enregistrer_matrix_adj(graphe,'adj.txt')
    enregistrer_cascades(L,Delta,'casc.txt')
Пример #10
0
    def random(cls,
               L=1,
               avg_mu=1.0,
               alphabet='nuc',
               pi_dirichlet_alpha=1,
               W_dirichlet_alpha=3.0,
               mu_gamma_alpha=3.0):
        """
        Creates a random GTR model

        Parameters
        ----------
        L : int, optional
            number of sites for which to generate a model
        avg_mu : float
           Substitution rate
        alphabet : str
           Alphabet name (should be standard: 'nuc', 'nuc_gap', 'aa', 'aa_gap')
        pi_dirichlet_alpha : float, optional
            parameter of dirichlet distribution
        W_dirichlet_alpha : float, optional
            parameter of dirichlet distribution
        mu_gamma_alpha : float, optional
            parameter of dirichlet distribution

        Returns
        -------
        GTR_site_specific
            model with randomly sampled frequencies
        """

        from scipy.stats import gamma
        alphabet = alphabets[alphabet]
        gtr = cls(alphabet=alphabet, seq_len=L)
        n = gtr.alphabet.shape[0]

        # Dirichlet distribution == l_1 normalized vector of samples of the Gamma distribution
        if pi_dirichlet_alpha:
            pi = 1.0 * gamma.rvs(pi_dirichlet_alpha, size=(n, L))
        else:
            pi = np.ones((n, L))

        pi /= pi.sum(axis=0)
        if W_dirichlet_alpha:
            tmp = 1.0 * gamma.rvs(W_dirichlet_alpha, size=(n, n))
        else:
            tmp = np.ones((n, n))
        tmp = np.tril(tmp, k=-1)
        W = tmp + tmp.T

        if mu_gamma_alpha:
            mu = gamma.rvs(mu_gamma_alpha, size=(L, ))
        else:
            mu = np.ones(L)

        gtr.assign_rates(mu=mu, pi=pi, W=W)
        gtr.mu *= avg_mu / np.mean(gtr.average_rate())

        return gtr
Пример #11
0
def rprior(size, hyperparameters):
    """ returns untransformed parameters """
    sigma = 1 / gamma.rvs(hyperparameters["sigma_shape"], scale = hyperparameters["sigma_scale"], size = size)
    tau = 1 / gamma.rvs(hyperparameters["tau_shape"], scale = hyperparameters["tau_scale"], size = size)
    parameters = zeros((2, size))
    parameters[0, :] = sigma
    parameters[1, :] = tau
    return parameters
Пример #12
0
def rprior(size, hyperparameters):
    """ returns untransformed parameters """
    sigma = 1 / gamma.rvs(hyperparameters["sigma_shape"], scale = hyperparameters["sigma_scale"], size = size)
    tau = 1 / gamma.rvs(hyperparameters["tau_shape"], scale = hyperparameters["tau_scale"], size = size)
    parameters = zeros((2, size))
    parameters[0, :] = sigma
    parameters[1, :] = tau
    return parameters
Пример #13
0
def rprior(size, hyperparameters):
    """ returns untransformed parameters """
    sigma_w = 1 / gamma.rvs(hyperparameters["sigma_w_shape"], scale =
            hyperparameters["sigma_w_scale"], size = size)
    sigma_v = 1 / gamma.rvs(hyperparameters["sigma_v_shape"], scale = hyperparameters["sigma_v_scale"], size = size)
    parameters = zeros((2, size))
    parameters[0, :] = sigma_w
    parameters[1, :] = sigma_v
    return parameters
Пример #14
0
    def draw(self, K=10, N=1 * 10**5, m=3, gaussian=False):
        """
        Inputs:
        -------
        N: sample size
        K: Dimension of Normal/Student distr.
        m: number of mixture components
        """
        np.random.seed(self.seed)
        self.st0 = np.random.get_state()  # get initial state of RNG
        #np.random.set_state(self.st0)
        print("Drawing from", m, "component mixture distribution.")
        alphas = gamma.rvs(5, size=m)  # shape parameter
        #print(sum(alphas))                              # equivalent sample size
        self.p = dirichlet.rvs(alpha=alphas, size=1)[0]
        self.phi_is = multinomial.rvs(1, self.p,
                                      size=N)  # draw from categorical p.m.f

        self.x_draws = np.zeros((N, K))
        self.hyper_loc, self.hyper_scale, self.thetas, self.var, self.covs, self.rdraws = dict(
        ), dict(), dict(), tuple(), tuple(), tuple()

        for i in range(m):

            self.hyper_loc["mean" + str(i + 1)] = norm.rvs(size=1,
                                                           loc=0,
                                                           scale=5)
            self.hyper_scale["scale" + str(i + 1)] = 1 / gamma.rvs(5, size=1)

            self.thetas["mean" + str(i + 1)] = norm.rvs(
                size=K,
                loc=self.hyper_loc["mean" + str(i + 1)],
                scale=self.hyper_scale["scale" + str(i + 1)])
            self.thetas["Sigma" +
                        str(i + 1)] = np.eye(K) * (1 / gamma.rvs(5, size=K))
            self.thetas["nu" + str(i + 1)] = randint.rvs(K + 2, K + 10,
                                                         size=1)[0]

            if gaussian:
                self.covs += (self.thetas['Sigma' + str(i + 1)], )
            else:
                self.covs += (wishart.rvs(df=self.thetas['nu' + str(i + 1)],
                                          scale=self.thetas['Sigma' +
                                                            str(i + 1)],
                                          size=1), )
                self.var += (
                    self.thetas["nu" + str(i + 1)] /
                    (self.thetas["nu" + str(i + 1)] - 2) * self.covs[i],
                )  # variance covariance matrix of first Student-t component
            self.rdraws += (np.random.multivariate_normal(
                self.thetas["mean" + str(i + 1)], self.covs[i], N), )

            self.Phi = np.tile(self.phi_is[:, i], K).reshape(
                K, N).T  # repeat phi vector to match with random matrix
            self.x_draws += np.multiply(self.Phi, self.rdraws[i])

        return self.x_draws, np.argmax(self.phi_is, 1)  # X, latent
Пример #15
0
 def rvs(self, n=None):
     """ Returns independent observations of this random variable.
     """
     a = self.__a
     b = self.__b
     if n is None:
         return gamma.rvs(a, scale=b)
     else:
         return [gamma.rvs(a, scale=b) for i in range(n)]
Пример #16
0
 def send(self, socket, address, pkt, indice):
     if self._simulate:
         ret = self.send_simulate(socket, address, pkt, indice)
         if ret == 0:
             self._delay = gamma.rvs(self._alpha_d, scale=self._scale_d, size=1000)
             self._evento = gamma.rvs(self._alpha_e, scale=self._scale_e, size=1000)
             self._perdita = gamma.rvs(self._alpha_p, scale=self._scale_p, size=1000)
     else:  # ci sarà un ritardo e una perdita reale
         pkt = pkt + (bytearray(struct.pack("d", time.time())))
         self.send_pkt(socket, address, pkt)
Пример #17
0
 def levy(self, n, beta=1.5):
     sigma = math.pow(
         gamma.rvs(1, 1 + beta) * sin(beta * pi / 2) /
         (gamma.rvs(1,
                    (1 + beta) / 2)) * beta * math.pow(2, (beta - 1) / 2),
         1 / beta)
     v = np.random.normal(0, sigma, n)
     u = np.random.normal(0, 1, n)
     z = u / np.power(np.abs(v), (1 / beta))
     return z
Пример #18
0
 def sample(self):
     if self.max_range == None:
         return self.post_process(
             gamma.rvs(self.shape, self.loc, self.scale))
     else:
         x = self.max_range + 1
         while x > self.max_range:
             x = self.post_process(
                 gamma.rvs(self.shape, self.loc, self.scale))
         return x
Пример #19
0
def credint(a,b,c,d,conf=0.95):
  nit=1000000
  eps=0.5
  A=gamma.rvs(a+eps,size=nit)
  B=gamma.rvs(b+eps,size=nit)
  C=gamma.rvs(c+eps,size=nit)
  D=gamma.rvs(d+eps,size=nit)
  l=A*D/(B*C)
  l.sort()
  return (l[int((1-conf)/2*nit)], l[nit//2], l[int((1+conf)/2*nit)])
Пример #20
0
def ClimateInit():
    """
    Genera valores iniales para theta 
    a partir de su distribución a priori.
    theta = [alpha1, phi2, psi2, sig_T2, sig_V1, sig_C1].
    """
    alpha1 = gamma.rvs(a=0.01, scale=1 / 3.33e-6)
    phi2 = gamma.rvs(a=0.01, scale=1 / 5e-4)
    psi2 = gamma.rvs(a=0.01, scale=1 / 4.34783e-8)
    return array([alpha1, phi2, psi2])
Пример #21
0
def ProductionInit():
    """
    Genera valores iniales para theta 
    a partir de su distribución a priori.
    theta = [nu, a, b, sigma_F].
    """
    nu = random.random()
    a = gamma.rvs(a=0.01, scale=1 / 3.03e-3)
    b = random.random()
    sigma_F = gamma.rvs(a=16, scale=1 / 16)
    return array([nu, a, b, sigma_F])
	def sample_from_prior(self, periods=1, length=720):
		#now sample from distribution,
		#start by drawing alpha hats and sigma hats for the distribution
		alpha_hats = gamma.rvs(a=self.shape_alpha, 
			loc=self.location_alpha, scale=self.scale_alpha, size=periods)
		sigma_hats = gamma.rvs(a=self.shape_sigma,
		 loc=self.location_sigma, scale=self.scale_sigma, size=periods)
		gamma_params = [i for i in zip(alpha_hats, sigma_hats)]
		month_sims = [self.generate_winds(i, length) for i in gamma_params]
		results=[month_sims, gamma_params]
		return(results) #returns a list of monthly simulated data
Пример #23
0
def gen_graph_mal_workers(dataset, num_mal_workers, avg_labels, ansfile): 
    ag,bg,cg = getDistributionNormalData(ansfile, dataset)
    if dataset == 'product': 
        #number of tasks per mal_workers
        
        samples = gamma.rvs(ag,bg,cg, num_mal_workers)
        while np.mean(samples) > avg_labels + 10: 
            samples = gamma.rvs(ag,bg,cg, num_mal_workers)
        
        print(np.mean(samples))
    
    return samples
Пример #24
0
def simulate_gamma(psth, trials, duration, num_trials=20):

    #rescale the ISIs
    dt = 0.001
    rs_isis = []
    for trial in trials:
        if len(trial) < 1:
            continue
        csum = np.cumsum(psth)*dt
        for k,ti in enumerate(trial[1:]):
            tj = trial[k]
            if ti > duration or tj > duration or ti < 0.0 or tj < 0.0:
                continue
            ti_index = int((ti / duration) * len(psth))
            tj_index = int((tj / duration) * len(psth))
            #print 'k=%d, ti=%0.6f, tj=%0.6f, duration=%0.3f' % (k, ti, tj, duration)
            #print '  ti_index=%d, tj_index=%d, len(psth)=%d, len(csum)=%d' % (ti_index, tj_index, len(psth), len(csum))
            #get rescaled time as difference in cumulative intensity
            ui = csum[ti_index] - csum[tj_index]
            if ui < 0.0:
                print 'ui < 0! ui=%0.6f, csum[ti]=%0.6f, csum[tj]=%0.6f' % (ui, csum[ti_index], csum[tj_index])
            else:
                rs_isis.append(ui)
    rs_isis = np.array(rs_isis)
    rs_isi_x = np.arange(rs_isis.min(), rs_isis.max(), 1e-5)

    #fit a gamma distribution to the rescaled ISIs
    gamma_alpha,gamma_loc,gamma_beta = gamma.fit(rs_isis)
    gamma_pdf = gamma.pdf(rs_isi_x, gamma_alpha, loc=gamma_loc, scale=gamma_beta)
    print 'Rescaled ISI Gamma Fit Params: alpha=%0.3f, beta=%0.3f, loc=%0.3f' % (gamma_alpha, gamma_beta, gamma_loc)

    #simulate new trials using rescaled ISIs
    new_trials = []
    for nt in range(num_trials):
        ntrial = []
        next_rs_time = gamma.rvs(gamma_alpha, loc=gamma_loc,scale=gamma_beta)
        csum = 0.0
        for t_index,pval in enumerate(psth):
            csum += pval*dt
            if csum >= next_rs_time:
                #spike!
                t = t_index*dt
                ntrial.append(t)
                #reset integral and generate new rescaled ISI
                csum = 0.0
                next_rs_time = gamma.rvs(gamma_alpha, loc=gamma_loc,scale=gamma_beta)
        new_trials.append(ntrial)
    #plt.figure()
    #plt.hist(rs_isis, bins=20, normed=True)
    #plt.plot(rs_isi_x, gamma_pdf, 'r-')
    #plt.title('Rescaled ISIs')

    return new_trials
Пример #25
0
def sample(G, n, t=3, alpha_portion=0.9, seed=None, node_order=None):
    """
  Sample <n> random values according to a distribution specified by G.
  Each value is a len(node_order) size vector of positive reals.

  If a node in G has no parents, its value is drawn according to Gamma(t^alpha_portion, t^(1-alpha_portion)).
  t is a reparameterization so that t is a location parameter.
  The default alpha_portion is chosen to be high so that the distribution is less uniform and more peaked around that location.

  Nodes which are present in <node_order> but not <G> are also have values drawn according to the aforementioned distribution.

  A node Y which has parents X_1, ..., X_k is specified by Y = \sum_k Gamma(X_k^alpha_portion, X_k^(1-alpha_portion))
  Y also has a Gamma distribution but it is not easily expressed. For Y = \sum_k Gamma(alpha_k, beta),
  Y ~ Gamma(\sum_k alpha_k, beta). The Y here is different but close.
  """
    loc = 0
    # TODO assumes connected
    if not G.is_directed():
        raise FactorLibException('G must be directed')
    if seed is not None:
        np.random.seed(seed)
    if node_order is None:
        node_order = G.nodes()

    m = len(node_order)
    rv = np.zeros((m, n))
    node_to_ind = {}
    for i, node in enumerate(node_order):
        node_to_ind[node] = i

    roots = [n for n, d in G.in_degree().items() if d == 0]
    for j in range(n):
        for root in roots:
            root_ind = node_to_ind[root]
            alpha = math.pow(t, alpha_portion)
            beta = math.pow(t, 1 - alpha_portion)
            rv[root_ind, j] = gamma.rvs(alpha, loc, beta)
            for source, target in nx.dfs_edges(G, root):
                source_ind = node_to_ind[source]
                target_ind = node_to_ind[target]
                alpha = math.pow(rv[source_ind, j], alpha_portion)
                beta = math.pow(rv[source_ind, j], 1 - alpha_portion)
                rv[target_ind, j] += gamma.rvs(alpha, loc, beta)

    other_nodes = set(node_order) - set(G.nodes())
    for j in range(n):
        for node in other_nodes:
            node_ind = node_to_ind[node]
            alpha = math.pow(t, alpha_portion)
            beta = math.pow(t, 1 - alpha_portion)
            rv[node_ind, j] = gamma.rvs(alpha, loc, beta)

    return rv
Пример #26
0
 def move(i, sample):
     if i == 0:
         return gamma.rvs(a + sum(x[0:int(sample[2])]),
                          scale=1. / (int(sample[2]) + b))
     elif i == 1:
         return gamma.rvs(a + sum(x[int(sample[2]):N]),
                          scale=1. / (N - int(sample[2]) + b))
     elif i == 2:
         mult_n = np.array([0] * N)
         for i in range(N):
             mult_n[i] = sum(x[0:i])*log(sample[0]) - i*sample[0]\
                         + sum(x[i:N])*log(sample[1]) - (N-i)*sample[1]
         mult_n = exp(mult_n - max(mult_n))
         return np.where(multinomial(1, mult_n /
                                     sum(mult_n), size=1) == 1)[1][0]
Пример #27
0
Файл: gsm.py Проект: afcarl/isa
    def initialize(self, method='cauchy'):
        if method.lower() == 'student':
            self.scales = 1. / sqrt(gamma.rvs(1, 0, 1, size=self.num_scales))

        elif method.lower() == 'cauchy':
            self.scales = 1. / sqrt(gamma.rvs(0.5, 0, 2, size=self.num_scales))

        elif method.lower() == 'laplace':
            self.scales = rayleigh.rvs(size=self.num_scales)

        else:
            raise ValueError(
                'Unknown initialization method \'{0}\'.'.format(method))

        self.normalize()
def gamma_expect(f,
                 point,
                 index_points,
                 index_random,
                 parameters_dist,
                 n_samples=N_SAMPLES,
                 double=False):
    """
    Computes the expectation of f(z), where z=(point, x) which is equal to:
        mean(f((point, x)): x in domain_random), where
    z[index_points[i]] = point[i].

    If double is True, it computes the mean over all the points. Used for the variance.

    :param f: function
    :param point: np.array(1xk)
    :param index_points: [int]
    :param index_random: [int]
    :param parameters_dist: {'scale':[float], 'a': [int]}
    :param n_samples: int
    :param double: boolean
    :return: np.array
    """

    if double:
        n_samples = 100

    a = parameters_dist['a'][0]
    scale = parameters_dist['scale'][0]

    dim_w = len(index_random)

    new_points = np.zeros((n_samples, len(index_random) + point.shape[1]))
    new_points[:, index_points] = np.repeat(point, n_samples, axis=0)
    random = gamma.rvs(a, scale=scale, size=(n_samples, dim_w))
    new_points[:, index_random] = random

    if double:
        random_2 = gamma.rvs(a, scale=scale, size=(n_samples, dim_w))
        new_points_2 = new_points.copy()
        new_points_2[:, index_random] = random_2
        new_points = np.concatenate([new_points, new_points_2], axis=1)
        values = f(new_points)
        return np.mean(values)

    values = f(new_points)

    return np.mean(values, axis=0)
Пример #29
0
    def random(cls,
               L=1,
               avg_mu=1.0,
               alphabet='nuc',
               pi_dirichlet_alpha=1,
               W_dirichlet_alpha=3.0,
               mu_gamma_alpha=3.0):
        """
        Creates a random GTR model

        Parameters
        ----------

         mu : float
            Substitution rate

         alphabet : str
            Alphabet name (should be standard: 'nuc', 'nuc_gap', 'aa', 'aa_gap')


        """
        from scipy.stats import gamma
        alphabet = alphabets[alphabet]
        gtr = cls(alphabet=alphabet, seq_len=L)
        n = gtr.alphabet.shape[0]

        if pi_dirichlet_alpha:
            pi = 1.0 * gamma.rvs(pi_dirichlet_alpha, size=(n, L))
        else:
            pi = np.ones((n, L))

        pi /= pi.sum(axis=0)
        if W_dirichlet_alpha:
            tmp = 1.0 * gamma.rvs(W_dirichlet_alpha, size=(n, n))
        else:
            tmp = np.ones((n, n))
        tmp = np.tril(tmp, k=-1)
        W = tmp + tmp.T

        if mu_gamma_alpha:
            mu = gamma.rvs(mu_gamma_alpha, size=(L, ))
        else:
            mu = np.ones(L)

        gtr.assign_rates(mu=mu, pi=pi, W=W)
        gtr.mu *= avg_mu / np.mean(gtr.mu)

        return gtr
def _clayton(M, N, alpha):
    if(alpha<0):
        raise ValueError('Alpha must be >=0 for Clayton Copula Family')
    if(N<2):
        raise ValueError('Dimensionality Argument [N] must be an integer >= 2')
    elif(N==2):
        u1 = uniform.rvs(size=M)
        p = uniform.rvs(size=M)
        if(alpha<np.spacing(1)):
            u2 = p
        else:
            u2 = u1*np.power((np.power(p,(-alpha/(1.0+alpha))) - 1 + np.power(u1,alpha)),(-1.0/alpha))
        
        U = np.column_stack((u1,u2))
    else:
        # Algorithm 1 described in both the SAS Copula Procedure, as well as the
        # paper: "High Dimensional Archimedean Copula Generation Algorithm"
        U = np.empty((M,N))
        for ii in range(0,M):
            shape = 1.0/alpha
            loc = 0
            scale = 1
            v = gamma.rvs(shape)
            
            # sample N independent uniform random variables
            x_i = uniform.rvs(size=N)
            t = -1*np.log(x_i)/v
            if(alpha<0):
                tmp = np.maximum(0, 1.0-t)
            else:
                tmp = 1.0 + t
            
            U[ii,:] = np.power(tmp, -1.0/alpha)

    return U
Пример #31
0
 def ts(self, n): 
     Z = G.rvs(2, scale=2, size=n) 
     X = np.empty(n)
     X[0] = self.beta / (1 - self.alpha)  # Stationary mean
     for t in range(1, n): 
         X[t] = self.beta + self.alpha * X[t-1] + self.s * Z[t]
     return X
Пример #32
0
def gamma_test_case():
    '''
    Runs a test case with simulated data from a normal distribution.
    '''
    obs, fa, dur = [], [], []
    delta_angle = np.arange(180)
    for n in range(15):
        mode = piecewise_predictor(
            delta_angle,
            100 + plt.randn()*20,
            250 + plt.randn()*20,
            1 + plt.randn()/2.0,
            -1 + plt.randn()/2.0)
        a, b = np_gamma_params(mode, 10)
        for _ in range(10):
            d = gamma.rvs(a=a, scale=1.0/b)
            fa.append(delta_angle)
            dur.append(d)
            obs.append(d*0+n)
    dur, fa, obs = np.concatenate(dur), np.concatenate(fa), np.concatenate(obs)
    m = gamma_model(dur, fa, obs.astype(int))
    trace = sample_model(m, 5000)
    predict(trace, 5, 2500 )
    plt.figure()
    traceplot(trace, 2, 2500)
    return dur, fa, obs, trace
def run(src_dir, mod, random_state=1234):

    if isinstance(src_dir, str):
        mat, labels_arr = load_mat_and_labels(src_dir, mod)
    else:
        mat, labels_arr = (src_dir, mod)

    masker = SimpleMaskerPipeline(threshold=.2)
    svc = SVC(kernel='linear')

    pipeline = Pipeline([('masker', masker),
                         ('anova', SelectKBest(k=500)),
                         ('svc', svc)])

    c_range = gamma.rvs(size=100, a=1.99, random_state=random_state)

    param_dist = {"svc__C": c_range}

    n_iter = 100
    cv = StratifiedShuffleSplit(labels_arr, n_iter=n_iter, test_size=1/6.0, random_state=random_state)

    total_runs = n_iter
    scorer = verbose_scorer(total_runs)

    search = RandomizedSearchCV(pipeline, param_distributions=param_dist, cv=cv, scoring=scorer,
                                random_state=random_state)
    search.fit(mat, labels_arr)

    return search
Пример #34
0
def gen_new_proposal(network, funds, supply, trigger_func):
    j = len([node for node in network.nodes])
    network.add_node(j)
    network.nodes[j]['type'] = "proposal"

    network.nodes[j]['conviction'] = 0
    network.nodes[j]['status'] = 'candidate'
    network.nodes[j]['age'] = 0

    rescale = scale_factor * funds
    r_rv = gamma.rvs(3, loc=0.001, scale=rescale)
    network.node[j]['funds_requested'] = r_rv

    network.nodes[j]['trigger'] = trigger_func(r_rv, funds, supply)

    participants = get_nodes_by_type(network, 'participant')
    proposing_participant = np.random.choice(participants)

    for i in participants:
        network.add_edge(i, j)
        if i == proposing_participant:
            network.edges[(i, j)]['affinity'] = 1
        else:
            rv = np.random.rand()
            a_rv = 1 - 4 * (1 - rv) * rv  #polarized distribution
            network.edges[(i, j)]['affinity'] = a_rv

        network.edges[(i, j)]['conviction'] = 0
        network.edges[(i, j)]['tokens'] = 0
    return network
Пример #35
0
async def iot_handler(websocket, path):
    await websocket.send(motd)
    # mode_query = "What kind of sensor would you like? (temperature,occupancy)"
    # await websocket.send(mode_query)

    # mode = await websocket.recv()
    mode = "all"

    rooms = get_simulated_rooms()

    while True:
        await asyncio.sleep(erlang.rvs(1, 0, size=1))

        room = random.choice(list(rooms.keys()))
        dat = {"time": datetime.now().isoformat()}

        if mode.startswith(("all", "tem")):
            dat["temperature"] = cauchy.rvs(loc=rooms[room]["loc"],
                                            scale=rooms[room]["scale"],
                                            size=1).tolist()
        if mode.startswith(("all", "occ")):
            dat["occupancy"] = poisson.rvs(rooms[room]["occ"], size=1).tolist()
        if mode.startswith(("all", "co")):
            dat["co2"] = gamma.rvs(rooms[room]["co"], size=1).tolist()

        await websocket.send(json.dumps({room: dat}))
Пример #36
0
    def add_chemical_noise(self, nb_of_noise_peaks, noise_fraction):
        """
        Adds additional peaks with uniform distribution in the m/z domain
        and gamma distribution in the intensity domain. The spectrum does NOT need
        to be normalized. Accordingly, the method does not normalize the intensity afterwards!
        noise_fraction controls the amount of noise signal in the spectrum.
        nb_of_noise_peaks controls the number of peaks added.

        Return: list
            A boolean list indicating if a given peak corresponds to noise
        """
        span = min(x[0] for x in self.confs), max(x[0] for x in self.confs)
        span_increase = 1.2  # increase the mass range by a factor of 1.2
        span = [
            span_increase * x + (1 - span_increase) * sum(span) / 2
            for x in span
        ]
        noisex = uniform.rvs(loc=span[0],
                             scale=span[1] - span[0],
                             size=nb_of_noise_peaks)
        noisey = gamma.rvs(a=2, scale=2, size=nb_of_noise_peaks)
        noisey /= sum(noisey)
        signal = sum(x[1] for x in self.confs)
        noisey *= signal * noise_fraction / (1 - noise_fraction)
        noise = [(x, y) for x, y in zip(noisex, noisey)]
        self.confs += noise
        self.sort_confs()
        self.merge_confs()
        return [
            True if mz in noisex else False
            for mz in [x[0] for x in self.confs]
        ]
def poissonGammaRVS(lambda_, alpha, beta):
    # パラメタ(λ,α,β)を設定しPoisson-Gamma乱数を生成
    Po = poisson.rvs(mu=lambda_)  #Poisson乱数
    res = np.ones_like(lambda_) * Po
    res[Po > 0] = gamma.rvs(alpha * Po[Po > 0],
                            1 / beta[Po > 0])  #Gamma(N*alpha, beta)
    return res
Пример #38
0
    def sample_forecast_max_hail(self, dist_model_name, condition_model_name,
                                 num_samples, condition_threshold=0.5, query=None):
        """
        Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes.

        Hail sizes are sampled from each predicted gamma distribution. The total number of samples equals
        num_samples * area of the hail object. To get the maximum hail size for each realization, the maximum
        value within each area sample is used.

        Args:
            dist_model_name: Name of the distribution machine learning model being evaluated
            condition_model_name: Name of the hail/no-hail model being evaluated
            num_samples: Number of maximum hail samples to draw
            condition_threshold: Threshold for drawing hail samples
            query: A str that selects a subset of the data for evaluation

        Returns:
            A numpy array containing maximum hail samples for each forecast object.
        """
        if query is not None:
            dist_forecasts = self.matched_forecasts["dist"][dist_model_name].query(query)
            dist_forecasts = dist_forecasts.reset_index(drop=True)
            condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query)
            condition_forecasts = condition_forecasts.reset_index(drop=True)
        else:
            dist_forecasts = self.matched_forecasts["dist"][dist_model_name]
            condition_forecasts = self.matched_forecasts["condition"][condition_model_name]
        max_hail_samples = np.zeros((dist_forecasts.shape[0], num_samples))
        areas = dist_forecasts["Area"].values
        for f in np.arange(dist_forecasts.shape[0]):
            condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]]
            if condition_prob >= condition_threshold:
                max_hail_samples[f] = np.sort(gamma.rvs(*dist_forecasts.loc[f, self.forecast_bins["dist"]].values,
                                                        size=(num_samples, areas[f])).max(axis=1))
        return max_hail_samples
def run(full, target_col, random_state=1234, c_range_alpha=.05, c_range_size=100, normalize=False,
        score_fn=r2_score):

    svr = linearSVRPermuteCoefFactory()
    
    pipeline_steps = [('svr', svr)]

    pipeline = Pipeline(pipeline_steps)

    c_range = gamma.rvs(size=c_range_size, a=c_range_alpha, random_state=random_state)

    param_dist = {"svr__C": c_range}

    data, target = separate(full, target_col)
    
    if normalize:
        data = scale(data)

    n_iter = 100
    cv = ShuffleSplit(len(target), n_iter=n_iter, test_size=1/6.0, random_state=random_state)

    total_runs = n_iter
    scorer = verbose_scorer(total_runs, score_fn)

    search = RandomizedSearchCV(pipeline, param_distributions=param_dist, cv=cv, scoring=scorer,
                                random_state=random_state)

    search.fit(data, target)

    return search
Пример #40
0
def add_proposals_and_relationships_to_network(
        n: nx.DiGraph, proposals: int, funding_pool: float,
        token_supply: float) -> nx.DiGraph:
    participant_count = len(n)
    for i in range(proposals):
        j = participant_count + i
        n.add_node(j, type="proposal", conviction=0, status="candidate", age=0)

        r_rv = gamma.rvs(3, loc=0.001, scale=10000)
        n.nodes[j]['funds_requested'] = r_rv
        n.nodes[j]['trigger'] = trigger_threshold(r_rv, funding_pool,
                                                  token_supply)

        for i in range(participant_count):
            n.add_edge(i, j)
            rv = np.random.rand()
            a_rv = 1 - 4 * (1 - rv) * rv  #polarized distribution
            n.edges[(i, j)]['affinity'] = a_rv
            n.edges[(i, j)]['tokens'] = 0
            n.edges[(i, j)]['conviction'] = 0
            n.edges[(i, j)]['type'] = 'support'


# Conflict Rate is a potential variable to optimize
# Relative Influence is a potential variable to optimize
        n = initial_conflict_network(n, rate=.25)
        n = initial_social_network(n, scale=1)
    return n
    def __get_distribution_scale_parameter():
        """
        Get sample shape parameter for the gamma distribution of firing rate over objects.
        See derivation in kurtosis_fit.py.

        :rtype : scale parameter.
        """
        return np.float(gamma.rvs(37.4292, scale=0.062, loc=0, size=1))
Пример #42
0
def make_fake_data(times, omega, A0, A1, B1, s2mu, B, VB, Vsigma, S, VS):
    '''
    Implement the full generative model from the paper.  Is this
    correct?
    '''
    mus = mu(times, omega, A0, A1, B1)
    sigma2s = Gamma.rvs(S**2/VS, scale=VS/S, size=times.size)
    sobs2s = np.array([Gamma.rvs(m**2/Vsigma, scale=Vsigma/m, size=1)[0] for m in sigma2s])
    fobss = mus + np.sqrt(sigma2s + s2mu) * Norm.rvs(size=times.size)
    bs = B + np.sqrt(VB) * Norm.rvs(size=times.size)
    good = fobss > bs
    bad = good == False
    f0 = 1.e-6
    ms = flux2mag(fobss[good])
    ferrs = np.sqrt(sobs2s[good])
    merrs = 0.5 * (flux2mag(fobss[good] - ferrs) - flux2mag(fobss[good] + ferrs))
    return times[good], ms, merrs, times[bad]
Пример #43
0
    def propose(self):
        ret = copy(self)
        ret.value = gamma.rvs(self.value * self.proposal_scale, scale=1./self.proposal_scale)

        fb = gamma.logpdf(ret.value, self.value * self.proposal_scale, scale=1./self.proposal_scale) -\
             gamma.logpdf(self.value, ret.value * self.proposal_scale, scale=1./self.proposal_scale)

        return ret, fb
Пример #44
0
    def __init__(self, value=None, a=1.0, scale=1.0, proposal_scale=1.0, **kwargs):
        Stochastic.__init__(self, value=value, **kwargs)
        self.a = a
        self.scale = scale
        self.proposal_scale = proposal_scale

        if value is None:
            self.set_value(gamma.rvs(a, scale=scale))
Пример #45
0
def N_sample(x , mu, sigma, prior_normal, prior_gamma, trace=0):
    """
    sample mu and sigma from a normal distribution using the usual theory
    
    arguments:
    ----------
    x - data vector
    mu - mean
    sigma - sd
    prior_normal, a 2-vector (prior_mu, prior_sigma)
    prior_gamma, a 2-vector (prior_a, prior_b)
    
    output:
    ------
    a 2-vector containing the sampled values of mu and sigma
    """	
    n = len(x)
    prior_mu = prior_normal[0]
    prior_sigma = prior_normal[1]
    prior_a = prior_gamma[0]
    prior_b = prior_gamma[1]

    if trace >= 2:
        print "sampling from normal distribution", "\n"

    v = 1 / (prior_sigma**-2 + n*sigma**-2)
    m = v * (prior_mu / prior_sigma**2 + np.sum(x) / sigma**2 )
    sampled_mu = norm.rvs(loc=m,scale=np.sqrt(v),size=1)

    if trace >=2:
        print "SD: " + str(np.sqrt(v)) +  " computed from\n"
        print "- prior sd: " + str(prior_sigma) + " \n"
        print "- current sd:" + str(sigma)  + " \n" 
        print "- N:" + str(N) + " \n"
        print "mean: " + str(m) + " \n"
        print "previous value of mean:" + str(mu) + " \n"
        print "sampled value of mean:" + str(sampled_mu) + " \n"

    if trace >= 2:
        print "sampling from gamma distribution \n"

    a = prior_a + n / 2
    b = prior_b + np.sum((x-mu)**2) / 2
    sampled_sigma = np.sqrt(1/gamma.rvs(a=a,scale=b,size=1)) ## check appropriate parameterization
    
    if trace >=2:
        print "sampling from gamma distribution \n"

        
    if trace>=2:
        print "a: " + str(a)
        print "b: " + str(b)
        print "previous value of sigma: " + str(sigma)
        print "sampled value of sigma:", sampled_sigma
    
    return np.array([sampled_mu, sampled_sigma])
    def __init__(self, list_of_objects):
        """
        A statistical model of selectivity & max spike rate distribution based on:

        Lehky, S. R., Kiani, R., Esteky, H., & Tanaka, K. (2011). Statistics of
            visual responses in primate inferotemporal cortex to object stimuli.
            Journal of Neurophysiology, 106(3), 1097–117.

        This paper shows (with a large passive-viewing IT dataset) that selectivity
        (kurtosis of neuron responses to different images) is lower than sparseness
        (kurtosis of population responses to each image). They propose a simple
        model (see pg. 1112) that explains this difference in terms of heterogeneity
        of spike rate distributions.

        They model these spike rate distributions as gamma functions,  We don't directly use
        Lehky et al.'s distribution of gamma PDF parameters, since probably some of their
        variability was due to stimulus parameters such as size, position, etc. being
        non-optimal for many neurons. We do use their shape-parameter distribution, but we
        use a different scale parameter distribution that approximates that of Lehky et al.
        after being scaled by a realistic distribution of scale factors for non-optimal size,
        position, etc. For derivation of scale factors see kurtosis_fit.py

        Additionally a function is provided to get the max firing rate of the neuron. Once
        parameters of the gamma distribution over the objects is calculated, we take the point at
        which the CDF = 0.99 as the maximum.
        """
        self.type = 'kurtosis'

        self.a = self.__get_distribution_shape_parameter()
        self.b = self.__get_distribution_scale_parameter()

        obj_preferences = gamma.rvs(self.a, loc=0, scale=self.b, size=len(list_of_objects))
        obj_preferences = obj_preferences / self.get_max_firing_rate()

        self.objects = {item: obj_preferences[item_idx]
                        for item_idx, item in enumerate(list_of_objects)}

        # TODO: Remove this code and function, above is a faster way to generate object preferences
        # self.objects = {item: self.__get_object_preference(np.float(np.random.uniform(size=1)))
        #                 for item in list_of_objects}

        self.activity_fraction_measured = \
            calculate_activity_fraction(np.array(self.objects.values()))

        # To calculate absolute activity fraction, the stimuli set consists of all objects the
        # neuron responds. Model this by getting firing rates distributed over the entire cdf
        #  with a small step
        rates_distribution_for_cdf = np.linspace(start=0, stop=1, num=1000, endpoint=False)
        rates_all_obj = self.__get_object_preference(rates_distribution_for_cdf)

        self.activity_fraction_absolute = \
            calculate_activity_fraction(rates_all_obj)

        # Calculate the excess kurtosis of the neuron
        self.kurtosis_absolute = 6.0 / self.a
        self.kurtosis_measured = calculate_kurtosis(np.array(self.objects.values()))
Пример #47
0
	def initialize(self, method='student'):
		"""
		Randomly initializes parameters.
		"""

		if method.lower() == 'student':
			self.scales = 1. / sqrt(gamma.rvs(1, 0, 1, size=self.num_components))
			self.means *= 0.

		elif method.lower() == 'cauchy':
			self.scales = 1. / sqrt(gamma.rvs(0.5, 0, 2, size=self.num_components))
			self.means *= 0.

		elif method.lower() == 'laplace':
			self.scales = rayleigh.rvs(size=self.num_components)
			self.means *= 0.

		else:
			raise ValueError('Unknown initialization method \'{0}\'.'.format(method))
Пример #48
0
    def random(cls, L=1, avg_mu=1.0, alphabet='nuc', pi_dirichlet_alpha=1,
               W_dirichlet_alpha=3.0, mu_gamma_alpha=3.0):
        """
        Creates a random GTR model

        Parameters
        ----------

         mu : float
            Substitution rate

         alphabet : str
            Alphabet name (should be standard: 'nuc', 'nuc_gap', 'aa', 'aa_gap')


        """
        from scipy.stats import gamma
        alphabet=alphabets[alphabet]
        gtr = cls(alphabet=alphabet, seq_len=L)
        n = gtr.alphabet.shape[0]

        if pi_dirichlet_alpha:
            pi = 1.0*gamma.rvs(pi_dirichlet_alpha, size=(n,L))
        else:
            pi = np.ones((n,L))

        pi /= pi.sum(axis=0)
        if W_dirichlet_alpha:
            tmp = 1.0*gamma.rvs(W_dirichlet_alpha, size=(n,n))
        else:
            tmp = np.ones((n,n))
        tmp = np.tril(tmp,k=-1)
        W = tmp + tmp.T

        if mu_gamma_alpha:
            mu = gamma.rvs(mu_gamma_alpha, size=(L,))
        else:
            mu = np.ones(L)

        gtr.assign_rates(mu=mu, pi=pi, W=W)
        gtr.mu *= avg_mu/np.mean(gtr.mu)

        return gtr
    def __get_distribution_shape_parameter():
        """
        Get sample shape parameter for the gamma distribution of firing rates over objects.
        See derivation in kurtosis_fit.py.

        :rtype : shape parameter.
        """
        shape_param = np.float(gamma.rvs(4.0, scale=0.5, loc=0, size=1))

        return np.maximum(1.01, shape_param)  # Avoid making PDF go to infinity at zero spike rate.
Пример #50
0
def sampler(i):
   
   d = epsilon + 1
   while (d > epsilon):

       proposed_mu = np.random.normal(hyper_mu , hyper_sigma , 1) #draw from the prior over the mean
       proposed_tau = gamma.rvs(1. , size = 1)                    #draw from the prior over precision
       x = np.random.normal(proposed_mu , proposed_tau**-.5 , n)  #forward model
       d = rho(data , x)                                          #distance metric
   #print i
   return proposed_mu , proposed_tau
def rprior(size, hyperparameters):
    """ returns untransformed parameters """
    eps = 1 / gamma.rvs(hyperparameters["eps_shape"], scale = hyperparameters["eps_scale"], size = size)
    w = 1 / gamma.rvs(hyperparameters["w_shape"], scale = hyperparameters["w_scale"], size = size)
    n0 = truncnorm.rvs((hyperparameters["n0_a"] - hyperparameters["n0_mean"]) / hyperparameters["n0_sd"], \
            (hyperparameters["n0_b"] - hyperparameters["n0_mean"]) / hyperparameters["n0_sd"], size = size, \
            loc = hyperparameters["n0_mean"], scale = hyperparameters["n0_sd"])
    r = random.exponential(scale = hyperparameters["r_scale"], size = size)
    K = truncnorm.rvs((hyperparameters["K_a"] - hyperparameters["K_mean"]) / hyperparameters["K_sd"], \
            (hyperparameters["K_b"] - hyperparameters["K_mean"]) / hyperparameters["K_sd"], size = size, \
            loc = hyperparameters["K_mean"], scale = hyperparameters["K_sd"])
    theta = random.exponential(scale = hyperparameters["theta_scale"], size = size)
    parameters = zeros((6, size))
    parameters[0, :] = eps
    parameters[1, :] = w
    parameters[2, :] = log(n0)
    parameters[3, :] = r
    parameters[4, :] = K
    parameters[5, :] = theta
    return parameters
def consume(fruits, filled, gutsize = 15, alfa = 10, delta = 9):
    '''
    Como sera a estrutura de sementes ingeridas e tempo de passagem? Pensar depois
    '''
    fruits = fruits.item()
    #filled = filled.item()
    
    hip = (alfa * fruits) / (delta + fruits)
    frac = (1 - filled/gutsize) * gutsize
    amount = int(min(hip, frac))
    
    time = gamma.rvs(3, scale = 9, size = 1).item()
    
    return amount, time
Пример #53
0
 def sample_obs_max_hail(self, dist_model_name, num_samples, query=None):
     if query is not None:
         dist_obs = self.matched_forecasts["dist"][dist_model_name].query(query)
         dist_obs = dist_obs.reset_index(drop=True)
     else:
         dist_obs = self.matched_forecasts["dist"][dist_model_name]
     max_hail_samples = np.zeros((dist_obs.shape[0], num_samples))
     areas = dist_obs["Area"].values
     for f in np.arange(dist_obs.shape[0]):
         dist_params = dist_obs.loc[f, self.type_cols["dist"]].values
         if dist_params[0] > 0:
             max_hail_samples[f] = np.sort(gamma.rvs(*dist_params,
                                                     size=(num_samples, areas[f])).max(axis=1))
     return max_hail_samples
def perch_time(nind, time_left, shape = 4, scale = 1.25):
    '''
    
    '''
    #left = time_left    
    
    time = gamma.rvs(shape, scale = scale, size = nind)
    
    if time > time_left:
        time = np.array([time_left])
    
    #for i in np.arange(nind):   
    #    if time[i] > time_left[i]:
    #        time[i] = time_left[i]
    return time
Пример #55
0
def sample_posterior(Nsample):
    
    xbar , s = np.mean(data) , np.var(data)
    hyper_t = 1./hyper_sigma**2.
    a_pos = n/2. + hyper_a
    scale_pos  = 1./(1 + n*s/2.)
    
    m = np.zeros((Nsample))
    t = np.zeros((Nsample))
    
    for i in range(Nsample):
   
        t[i] = gamma.rvs(a_pos, loc=0, scale=scale_pos) 
        sigma_pos = (n*t[i] + hyper_t)**-.5
        mean_pos = (n*xbar*t[i] + hyper_mu*hyper_t)/(n*t[i] + hyper_t)
        m[i] = norm.rvs(loc = mean_pos, scale=sigma_pos)
    
    return m, t
Пример #56
0
def generate_uncertainties(N, dist='Gamma', rseed=None):
    """
    This function generates a uncertainties for the white noise component
    in the synthetic light curve. 
    
    Parameters
    ---------
    N: positive integer
        Lenght of the returned uncertainty vector
    dist: {'EMG', 'Gamma'}
        Probability density function (PDF) used to generate the 
        uncertainties
    rseed:
        Seed for the random number generator
        
    Returns
    -------
    s: ndarray
        Vector containing the uncertainties
    expected_s_2: float
        Expectation of the square of s computed analytically
        
    """
    np.random.seed(rseed)  
    #print(dist)
    if dist == 'EMG':  # Exponential modified Gaussian
        # the mean of a EMG rv is mu + 1/(K*sigma)
        # the variance of a EMG rv is sigma**2 + 1/(K*sigma)**2
        K = 1.824328605481941
        sigma = 0.05*0.068768312946785953
        mu = 0.05*0.87452567616276777
        # IMPORTANT NOTE
        # These parameters were obtained after fitting uncertainties
        # coming from 10,000 light curves of the VVV survey
        expected_s_2 = sigma**2 + mu**2 + 2*K*mu*sigma + 2*K**2*sigma**2 
        s = exponnorm.rvs(K, loc=mu, scale=sigma, size=N)
    elif dist == 'Gamma':
        # The mean of a gamma rv is k*sigma
        # The variance of a gamma rv is k*sigma**2
        k = 3.0
        sigma = 0.05/k  #  mean=0.05, var=0.05**2/k
        s = gamma.rvs(k, loc=0.0, scale=sigma, size=N)
        expected_s_2 = k*(1+k)*sigma**2  
    return s, expected_s_2
Пример #57
0
    def genRandomWebLogs(self):
        """
            Method for generating random web log data.
        """

        self.count += 1
        if self.randomWebLogsWindowOpenedFlag == False:

            self.randomWebLogsWindowOpenedFlag = True # set window opened
            global RandomWebLogsWindow

            def toggleFlag():
                self.randomWebLogsWindowOpenedFlag = False # set window closed
                RandomWebLogsWindow.destroy()

            RandomWebLogsWindow = tk.Toplevel(self)
            RandomWebLogsWindow.minsize(300, 500)
            RandomWebLogsWindow.geometry("300x500+100+100")
            RandomWebLogsWindow.title("Random web log data")
            RandomWebLogsWindow.config(bd=5)
            RandomWebLogsWindow.protocol("WM_DELETE_WINDOW", toggleFlag)

            x = sp.arange(1, 31 * 24) # 1 month of traffic data
            y = sp.array(200 * (sp.sin(2 * sp.pi * x / (7 * 24))), dtype=int)
            y += gamma.rvs(15, loc=0, scale=100, size=len(x))
            y += 2 * sp.exp(x / 100.0)
            y = sp.ma.array(y, mask=[y < 0])
            sp.savetxt(os.path.join("sample_data", "sample_web_traffic.tsv"), list(zip(x, y)), delimiter="\t", fmt="%s")
            model = TableModel() # create a new TableModel for table data
            table = TableCanvas(RandomWebLogsWindow, model=model, editable=False) # create a new TableCanvas for showing the table
            table.createTableFrame()
            tableData = {} # dictionary for storing table data
            for k, v in list(zip(x,y)):
                tableData[uuid.uuid4()] = {'Hour': str(k), 'Hits': str(v)}
            model.importDict(tableData)
            table.resizeColumn(0, 100)
            table.resizeColumn(1, 100)
            table.sortTable(columnName='Hour')
            table.redrawTable()

        else:
            RandomWebLogsWindow.deiconify()  
Пример #58
0
def place_field(xmax=100, firing_rate=0.1, baseline=0.0001, **kwargs):
    """
    Creates a 1D Gaussian place field with center pos and
    covariance matrix. The max is scaled to desired firing_rate.
    Baseline gives the baseline firing rate.

    :return pdf: Probability density function
    """
    if 'preloaded' in kwargs:
        pos = kwargs['preloaded'][0]
        var = kwargs['preloaded'][1]
        n_modes = len(pos)
    else:
        n_modes = floor(gamma.rvs(3, 0, 1))
        if n_modes < 1.:
            n_modes = 1
        if n_modes > 4:
            n_modes = 4
        pos = random.uniform(1, xmax, n_modes)
        var = random.uniform(1.5, xmax / 10, n_modes)

    gauss_m = list()
    for p, v in zip(pos, var):
        mv = norm(p, v)
        scale = mv.pdf(p)
        gauss_m.append((mv, scale))

    def pdf(arena):
        prob = 0.
        for g in gauss_m:
            prob += g[0].pdf(arena) / g[1]
        prob /= n_modes
        fr = firing_rate * prob + baseline
        return fr

    def info():
        parameters = (pos, var)
        return parameters

    return pdf, info
def run_class(full, target_col, random_state=1234, c_range_alpha=1.99, c_range_size=100):
    svc = LinearSVC()

    pipeline = Pipeline([('svc', svc)])

    c_range = gamma.rvs(size=c_range_size, a=c_range_alpha, random_state=random_state)

    param_dist = {"svc__C": c_range}

    data, target = separate(full, target_col)
    target_c = target > 0

    n_iter = 100
    cv = ShuffleSplit(len(target), n_iter=n_iter, test_size=1/6.0, random_state=random_state)

    total_runs = n_iter
    scorer = verbose_scorer(total_runs)

    search = RandomizedSearchCV(pipeline, param_distributions=param_dist, cv=cv, scoring=scorer,
                                random_state=random_state)

    search.fit(data, target_c)

    return search