コード例 #1
0
ファイル: definitions_2.py プロジェクト: haru0422kr/CoalSim
def recur_traversal(output1, output2, sample, mu, n):
    if (sample.left == None) & (sample.right == None):
        if n == 1:
            output1 = output1 + str(sample.identity) + ':{0:.{1}f}'.format(sample.time, 10)
            sample.mutations = poisson.rvs(mu * sample.time)
            output2 = output2 + str(sample.identity) + ':' + str(sample.mutations)
            return output1, output2
        else:
            output2 = output2 + str(sample.identity) + ':' + str(sample.mutations)
            return output1, output2
    current = sample.right
    output1, output2 = recur_traversal((output1 + '('), (output2 + '('), current, mu, n)
    if n > 1:
        current, children_list = update_children(current, current.children_list)
        current.children_list = children_list
    while current.next != sample.left:
        current = current.next
        output1, output2 = recur_traversal(output1 + ', ', output2 + ', ' , current, mu, n)
        if n > 1:
            current, children_list = update_children(current, current.children_list)
            current.children_list = children_list
    current = sample.left
    output1, output2 = recur_traversal((output1 + ', '), (output2 + ', '), current, mu, n)
    if n == 1:
        output1 = output1 + ')' + str(sample.identity) + ':{0:.{1}f}'.format(sample.time, 10)
        sample.mutations = poisson.rvs(mu * sample.time)
        output2 = output2 + ')' + str(sample.identity) + ':' + str(sample.mutations)
        current, children_list = update_children(current, current.children_list)
        current.children_list = children_list
    else:
        output2 = output2 + ')'
        if sample.mutations > 0:
            output2 = output2 + str(sample.identity) + ':' + str(sample.mutations)
    return output1, output2    
コード例 #2
0
ファイル: scenarioscript.py プロジェクト: duy/dispersy
        def __poisson_churn(self):
            while True:
                delay = poisson.rvs(self.__poisson_online_mu)
                if self._community is None:
                    if __debug__:
                        dprint("poisson wants us online for the next ", delay, " seconds")
                    self.log("scenario-poisson", state="online", duration=delay)
                    self._community = self.community_class.load_community(
                        self._master_member, *self.community_args, **self.community_kargs
                    )
                else:
                    if __debug__:
                        dprint("poisson wants us online for the next ", delay, " seconds (we are already online)")
                    self.log("scenario-poisson", state="stay-online", duration=delay)
                yield float(delay)

                delay = poisson.rvs(self.__poisson_offline_mu)
                if self._community is None:
                    if __debug__:
                        dprint("poisson wants us offline for the next ", delay, " seconds (we are already offline)")
                    self.log("scenario-poisson", state="stay-offline", duration=delay)
                else:
                    if __debug__:
                        dprint("poisson wants us offline for the next ", delay, " seconds")
                    self.log("scenario-poisson", state="offline", duration=delay)
                    self._community.unload_community()
                    self._community = None
                yield float(delay)
コード例 #3
0
def initialize_d(X,Y,lbda,eta,lhs_len,maxlhs,nruleslen, Volume, Y2length):
    m = Inf
    while m>=len(X):
        m = poisson.rvs(lbda) #sample the length of the list from Poisson(lbda), truncated at len(X)
    #prepare the list
    d_t = []
    empty_rulelens = [r for r in range(1,maxlhs+1) if r not in nruleslen]
    used_rules = []
    for i in range(m):
        #Sample a rule size.
        r = 0
        while r==0 or r > maxlhs or r in empty_rulelens:
            r = poisson.rvs(eta) #Sample the rule size from Poisson(eta), truncated at 0 and maxlhs and not using empty rule lens
        #Now sample a rule of that size uniformly at random
        rule_cands = [j for j,lhslen in enumerate(lhs_len) if lhslen == r and j not in used_rules]
        random.shuffle(rule_cands)
        j = rule_cands[0]
        #And add it in
        d_t.append(j)
        used_rules.append(j)
        assert lhs_len[j] == r
        if len(rule_cands) == 1:
            empty_rulelens.append(r)
    #Done adding rules. We have added m rules. Finish up.
    d_t.append(0) #all done
    d_t.extend([i for i in range(len(X)) if i not in d_t])
    R_t = d_t.index(0)
    assert R_t == m
    #Figure out what rules are used to classify what points
    N_t, Volume_t = compute_rule_usage(d_t,R_t,X,Y, Volume, Y2length)
    return d_t,R_t,N_t, Volume_t
コード例 #4
0
ファイル: 11-10.py プロジェクト: haru0422kr/CoalSim
def recurTraversal(mean_sep_time, sample):
    #base case
    global total_branch_length, total_mutations
    weight = 0;
    if (sample.left == None) & (sample.right == None):
        total_branch_length += sample.time
        identity = str(sample.getIdentity())
        if not 'A' in identity:
            k = 1
        else:
            k = len(sample.descendent_list)
        weight = ( k * (sample_size - k)) / comb(sample_size, 2);
        mean_sep_time = mean_sep_time + (weight * sample.time);
        sample.mutations = poisson.rvs(mu * sample.time)
        total_mutations += sample.getMutations()
        return mean_sep_time
    mean_sep_time = recurTraversal(mean_sep_time, sample.right)
    current = sample.right
    while current.next != None:
        mean_sep_time = recurTraversal(mean_sep_time, current.next)
        current = current.next
    total_branch_length += sample.time
    
    identity = str(sample.getIdentity())
    if not 'A' in identity:
        k = 1
    else:
        k = len(sample.descendent_list)
    weight = ( k * (sample_size - k)) / comb(sample_size, 2);
    mean_sep_time = mean_sep_time + (weight * sample.time);
    sample.mutations = poisson.rvs(mu * sample.time)
    total_mutations += sample.getMutations()
    return mean_sep_time
コード例 #5
0
        def __poisson_churn(self):
            while True:
                delay = float(poisson.rvs(self.__poisson_online_mu))
                self.scenario_churn("online", delay)
                yield delay

                delay = float(poisson.rvs(self.__poisson_offline_mu))
                self.scenario_churn("offline", delay)
                yield delay
コード例 #6
0
ファイル: simulate_spec.py プロジェクト: me-manu/eblstud
    def simulateNexcess(self, expBkg, alpha, numSim = 1, flux = False, Tobs = 1.):
	"""
	Simulate the number of excess events in each energy bin.
	The expected number of signal events is calculated by nPhotBin and stored in self.nPhot, the energy bin bounds are given in self.EbinBounds.
    
	Arguments
	---------
	expBkg:	n-dim array (same energy bins as self.nPhot), expected number of background counts
	alpha:	float, ratio between ON and OFF exposure

	kwargs
	------
	numSim:	integer, number of simulations
	flux:	boolean, if true, return simulated number of counts divided by exposure / effective area and bin width
	Tobs:	float, observation time in seconds (if exposure is given instead of effective area, this should be one, default = 1.)

	Returns:
	--------
	(numSim x EbinBounds.shape[0]) - dim array with poissonian random numbers for the excess events 
	(2 x numSim x EbinBounds.shape[0]) - dim array with lo, up errors for poissonian random numbers for the excess events 
	(numSim x EbinBounds.shape[0]) - dim masked array with Li & Ma significances, mask: (fON > 0.) & (fOFF > 0.)
	"""
	dummy = np.ones(numSim)			# dummy array for right shape	
	nn,dd = meshgrid(self.nPhot,dummy)	# nn: numsim x self.nPhot.shape dim array with self.nPhot in each row
	bb,dd = meshgrid(expBkg,dummy)	# nn: numsim x self.nPhot.shape dim array with self.nPhot in each row
	fON   = poisson.rvs(nn + bb)			# do the random number generation
	fOFF  = poisson.rvs(bb / alpha)			# do the random number generation
	fExcess	= fON - alpha * fOFF

#	S	= np.ma.masked_array(li_ma(np.ma.masked_array(fON, mask = fON <= 0.),
#					    np.ma.masked_array(fOFF, mask = fON <= 0.),alpha), 
#				    mask = (fON <= 0.) & (fOFF <= 0.)
#				    )
	S	= li_ma(fON * (fON > 0.) + 1e-5 * (fON <= 0.),
			fOFF * (fOFF > 0.) + 1e-5 * (fOFF <= 0.),alpha) 
				    

	maskON	= fON	> self.GAUSL
	maskOFF	= fOFF	> self.GAUSL

	dfExcess = np.array([
		    sqrt((fON * maskON + self.poisLo[fON.astype(int) * invert(maskON)] * invert(maskON) ) + \
			    alpha**2. * (fOFF * maskOFF + self.poisLo[fOFF.astype(int) * invert(maskOFF)] * invert(maskOFF) )),
		    sqrt((fON * maskON + self.poisUp[fON.astype(int) * invert(maskON)] * invert(maskON) ) + \
			    alpha**2. * (fOFF * maskOFF + self.poisUp[fOFF.astype(int) * invert(maskOFF)] * invert(maskOFF) ))
		    ])
	if flux:
	    return fExcess / self.dEbin / self.expAve / Tobs, dfExcess / self.dEbin / self.expAve / Tobs, S
	else:
	    dfExcess = sqrt(fON + alpha**2. * fOFF)
	    return fExcess , dfExcess, S
コード例 #7
0
    def _poisson_distribution(self, first_occupation_moment, seed=None, **kwargs):
        """ Poisson distribution used to draw Monte Carlo occupation statistics
        for satellite-like populations in which per-halo abundances are unbounded.

        Parameters
        ----------
        first_occupation_moment : array
            Array giving the first moment of the occupation distribution function.

        seed : int, optional
            Random number seed used to generate the Monte Carlo realization.
            Default is None.

        Returns
        -------
        mc_abundance : array
            Integer array giving the number of galaxies in each of the input table.
        """
        np.random.seed(seed=seed)
        # The scipy built-in Poisson number generator raises an exception
        # if its input is zero, so here we impose a simple workaround
        first_occupation_moment = np.where(first_occupation_moment <=0,
            model_defaults.default_tiny_poisson_fluctuation, first_occupation_moment)

        result = poisson.rvs(first_occupation_moment)
        if 'table' in kwargs:
            kwargs['table']['halo_num_'+self.gal_type] = result
        return result
コード例 #8
0
ファイル: dmdd.py プロジェクト: joezuntz/dmdd
    def simulate_data(self):
        """
        Do Poisson simulation of data according to scattering model's dR/dQ.
        """
        Nexpected = self.model_N
        if Nexpected > 0:
            npts = 10000
            Nevents = poisson.rvs(Nexpected)
      
            Qgrid = np.linspace(self.experiment.Qmin,self.experiment.Qmax,npts)
            efficiency = self.experiment.efficiency(Qgrid)
            pdf = self.model.dRdQ(Qgrid,**self.dRdQ_params) * efficiency / self.model_R
            cdf = pdf.cumsum()
            cdf /= cdf.max()
            u = random.rand(Nevents)
            Q = np.zeros(Nevents)
            for i in np.arange(Nevents):
                Q[i] = Qgrid[np.absolute(cdf - u[i]).argmin()]
        else:
            Q = np.array([])
            Nevents = 0
            Nexpected = 0

        if not self.silent:
            print "simulated: %i events (expected %.0f)." % (Nevents,Nexpected)
        return Q
コード例 #9
0
ファイル: lumfun.py プロジェクト: imcgreer/simqso
 def _fast_sample(self,Mrange,zrange,p,**kwargs):
     verbose = kwargs.pop('verbose',0)
     if verbose > 1:
         print('using fast sample for QLF')
     skyfrac = kwargs.get('skyArea',skyDeg2) / skyDeg2
     eps_M,eps_z = 0.05,0.10
     magLimPad = 0.2
     full_Mrange = Mrange(zrange)
     nM = int(-np.diff(full_Mrange) / eps_M)
     nz = int(np.diff(zrange) / eps_z)
     Medges = np.linspace(full_Mrange[0],full_Mrange[1],nM)
     zedges = np.linspace(zrange[0],zrange[1],nz)
     # XXX shouldn't assume evenly spaced bins here
     dM = -np.diff(Medges)[0]
     dz = np.diff(zedges)[0]
     Mbins = Medges[:-1] + np.diff(Medges)/2
     zbins = zedges[:-1] + np.diff(zedges)/2
     Mlim_z = np.array([ Mrange(z)[0] for z in zbins ])
     dVdzdO = self.cosmo.differential_comoving_volume(zbins).value
     V_ij = dVdzdO * dz * dM * skyfrac * 4*np.pi
     Mi,zj = np.meshgrid(Mbins,zbins,indexing='ij')
     Phi_ij = self.Phi(Mi,zj)
     N_ij = Phi_ij * V_ij
     N_ij = poisson.rvs(N_ij)
     N_ij[Mi>Mlim_z+magLimPad] = 0
     ij = np.where(N_ij > 0)
     Mz = [ ( np.repeat(M,n), np.repeat(z,n) )
               for M,z,n in zip(Mi[ij],zj[ij],N_ij[ij]) ]
     M,z = np.hstack(Mz)
     M += dM * (np.random.rand(len(M)) - 0.5)
     z += dz * (np.random.rand(len(M)) - 0.5)
     if verbose > 1:
         print('to generate {} quasars'.format(len(M)))
     return M,z
コード例 #10
0
ファイル: LLHStatistics.py プロジェクト: olivas/pisa
def get_random_map(template):
    '''
    Gets an event map with integer entries from non-integer entries
    (in general) in the template, varied according to Poisson
    statistics.
    '''
    return poisson.rvs(template)
コード例 #11
0
 def _generate_sample_from_state(self, state, random_state=None):
     res = []
     for dim in range(self.n_features):
         erg = round(sum([poisson.rvs(self.p[dim][comp][state]) * self.c[dim][comp][state] for comp in range(self.distr_magnitude)]))
         res.append(erg)
     
     return np.array(res)
コード例 #12
0
ファイル: simulation.py プロジェクト: dougwt/runway-sim
    def __init__(self, numCustomers=100):
        """Initializes the simulation."""
        self.numCustomers = numCustomers
        self.customers = []

        # initialize State Variables
        self.clock = 0
        self.Idle = 0
        self.Busy = 1
        self.s1 = self.Idle
        self.s2 = self.Idle
        self.q1 = 0
        self.q2 = 0

        ### additional values to be tracked during simulation

        # average waiting time for all customers
        self.averageWaitingTime = 0
        self.averageQ1Time = 0
        self.averageQ2Time = 0

        # avg waiting time for customers who wait
        self.averageWaitTimeWhoWait = 0
        self.averageQ1TimeWait = 0
        self.averageQ2TimeWait = 0

        # average service time for all customers
        self.averageServiceTime = 0
        self.averageService1Time = 0
        self.averageService2Time = 0

        # avg interarrival time for all customers
        self.averageInterarrrivalTime = 0

        # avg total system time for all customers
        self.averageSystemTime = 0

        # probability a customer has to wait
        self.waitProbability = 0

        # percentage of time server is idle
        self.idleProbability = 0

        # queue sizes at the moment each customer arrived
        self.q1sizes = {}
        self.q2sizes = {}

        ### generate values for random variables

        self.interarrivalTimeValues = poisson.rvs(4, 0, size=numCustomers).tolist()
        self.serviceTime1Values = expon.rvs(6, size=numCustomers).tolist()
        self.serviceTime2Values = expon.rvs(8, size=numCustomers).tolist()
        self.balkValues = [random.random() for x in xrange(numCustomers)]

        # print self.interarrivalTimeValues
        # print expon.rvs(6, 0, size=numCustomers)
        # print self.serviceTime2Values
        # print self.balkValues

        self.populate() # Ready. Set. Go!
コード例 #13
0
ファイル: test_toolbox.py プロジェクト: spacepy/spacepy
 def test_poisson_fit(self):
     """Make sure that we get the right Poisson fit answer"""
     numpy.random.seed(8675309)
     ans = 20
     data = poisson.rvs(ans, size=1000)
     res = tb.poisson_fit(data)
     self.assertEqual(ans, numpy.round(res.x))
コード例 #14
0
ファイル: xcnograph.py プロジェクト: ltobalina/cellnopt
    def _random_poisson_graph(self, n=10, mu=2.5, ratio=0.9, 
            remove_unconnected=True, 
            remove_self_loops=True,  Nsignals=5, Nstimuli=5):
        from scipy.stats import poisson
        z = [poisson.rvs(mu) for i in range(0,n)]
        G = nx.expected_degree_graph(z)
        self.clear()

        # converts to strings
        edges = [(unicode(e[0]), unicode(e[1])) for e in G.edges()]
        assert ratio >= 0
        assert ratio <= 1

        N = int(len(edges)* ratio)
        edges_pos = edges[0:N]
        edges_neg = edges[N:]
        self.add_edges_from(edges_pos, link="+")
        self.add_edges_from(edges_neg, link="-")

        # remove self loop first
        if remove_self_loops:
            self.remove_self_loops()

        if remove_unconnected == False:
            # add all nodes (even though they me be unconnected
            self.add_nodes_from(G.nodes())

        ranks = self.get_same_rank()
        sources = ranks[0]
        sinks = ranks[max(ranks.keys())]
        Nstim = min(len(sources), Nstimuli)
        Nsignals = min(len(sinks), Nsignals)
        self._stimuli = sources[0:Nstim]
        self._signals = sinks[0:Nsignals]
        self.set_default_node_attributes()
コード例 #15
0
def imod2snrconv(magdata, bandmod):
	from scipy import signal
	from photutils.isophote import EllipseGeometry
	from photutils.isophote import Ellipse
	from photutils.isophote import build_ellipse_model
	
	cr = csstpkg.mag2cr(magdata['MOD_' + bandmod], band=bandmod)
	cr300poiss = poisson.rvs(cr * texp, size=1)
	
	agalaxy = csstpkg.galser(lumtot=cr300poiss, reff=magdata['reff'], nser=magdata['nser'], ellip=1-magdata['Eab'])
	# agalaxy.plotmodel()
	
	# Generating convolution image:
	apsf = csstpkg.psfgauss()
	normpsf = apsf.gauss()[2]/np.sum(apsf.gauss()[2])
	agalconv = signal.fftconvolve(agalaxy.sermod()[2], normpsf, mode='same')
	
	# FITS file generating:
	hduagalaxy = fits.PrimaryHDU(agalaxy.sermod()[2])
	hduagalaxy.writeto('test_agalaxy.fits',overwrite=True)
	# hduapsf = fits.PrimaryHDU(normpsf)
	# hduapsf.writeto('test_apsf.fits', overwrite=True)
	
	hdr = fits.Header()
	hdr['UVUDFID']=magdata['ID']
	hduagalconv = fits.PrimaryHDU(data=agalconv,header=hdr)
	hduagalconv.writeto('test_agalconv.fits', overwrite=True)

	#cirtain aperture photometry and SNR:
	photcr = csstpkg.aptrphot(agalconv, origcen=agalaxy.orig0, reff=magdata['reff'], ellip=magdata['Eab'])
	snr = csstpkg.cr2snr(photcr['aperture_sum'] / texp, npix=math.pi * magdata['reff'] ** 2,
	                     bsky=csstpkg.backsky[bandmod], poiss=False)
	
	return snr
コード例 #16
0
def scale(n,t,max_files=1,tag="default"):
    m = 0

    while m<max_files:
        outfilename = "toy_%s_%05d.dat" % (tag,m)
        f = open(outfilename,'w+')
        output = ""
        
        xpts = np.array([])
        ypts = np.array([])
        #ypts_new = np.array([])
        ypts_pois = np.array([])
        n.seek(0)
        for line in n:
            vals = line.split()
            x = float(vals[0])
            y = float(vals[1])

            xpts = np.append(xpts,x)
            ypts = np.append(ypts,y)
            
        # Use the scaling factor as a Poisson input.
        # In other words, don't generate the same number of events each time.
        # Instead, the number of events will be random for each one, but will
        # be distributed according to a Poisson distribution.
        #scaling_factor = poisson.rvs(int(t))
        # ACTUALLY, MAYBE WE DON'T NEED TO DO THIS PART
        scaling_factor = t
        #print "number of events for this sample: %d" % (scaling_factor)    

        for i in ypts:
            new_y = i*(scaling_factor/sum(ypts))
            #ypts_new = np.append(ypts_new,new_y)

            #rv = poisson(new_y)
            R = 0.0
            if np.ceil(new_y)>0:
                R = poisson.rvs(np.ceil(new_y))

            ypts_pois = np.append(ypts_pois,R)

        # This is to make sure that we get the same number of entries as our
        # new_scaling (Poisson total for this toy).
        # I *think* this works. It will give you floats for the bin heights,
        # but I think that's OK for these toy tests.
        # ACTUALLY MAYBE THIS IS NOT OK. 
        #nentries= sum(ypts_pois)
        #new_scaling = scaling_factor/nentries
        #ypts_pois *= new_scaling

        print "number of events for this sample: %d" % (sum(ypts_pois))

                
        for x,y in zip(xpts,ypts_pois):
            output = "%f %f\n" % (x,y)     
            f.write(output)
            
        f.close()

        m += 1
コード例 #17
0
ファイル: rain_disagg.py プロジェクト: neel9102/ambhas
 def disaggregate(self,rf):
     len_rf = len(rf)        
     # generating rainfall from t h to t/2 h
     rf_pre = np.zeros((1,len_rf*2))
     for j in range(1):
         for i in xrange(0,len_rf*2,2):
             W = self.A*(self.lp[1])**poisson.rvs(1, size=2)
             W[W<0] = 1e-6
             rf_pre[j,i] = rf[int(i/2)]*W[0]/(W[0]+W[1])
             rf_pre[j,i+1] = rf[int(i/2)]*W[1]/(W[0]+W[1])              
                          
     
     rf_pre = np.mean(rf_pre, axis=0)
     
     # rounding up the simulated rainfall to the least count of raingauge 
     for i in xrange(0,len_rf*2,2):
         if np.mod(rf_pre[i],0.5) !=0:
             TB = np.mod(rf_pre[i],0.5)
         else:
             TB = 0
         
         rf_pre[i] -= TB
         rf_pre[i+1] += TB
         
         
     
     return rf_pre
コード例 #18
0
def main():
    prob_region_boundaries = [500, 750, 2500, 5000, 10000]
    num_reads = 50000
    genome_size = prob_region_boundaries[-1]
    latent_variance = compute_latent_variance(prob_region_boundaries)
    latent_mean = 1 / float(genome_size)
    print 'true latent mean: {0}, latent variance: {1}'.format(latent_mean,
        latent_variance)
    print 'true jaffe stat: {0}'.format(latent_variance / float(latent_mean**2))
    var_samples = []
    est_var_samples_var = []
    jaffe_samples = []
    jaffe_est_vars = []
    jaffe_est_vars2 = []
    for k in range(100):
        #count_samples = generate_counts(prob_region_boundaries, num_reads)
        count_samples = poisson.rvs(num_reads / float(genome_size),
            size=num_reads)
        
        count_mean = compute_sample_mean(count_samples)
        count_var = compute_sample_variance(count_samples)
        var_samples.append(count_var)

        jaffe_stat = (((count_var / count_mean**2) - (1 / count_mean)) +
            (1 / float(num_reads))) * (float(num_reads) / float(num_reads - 1))
        jaffe_samples.append(jaffe_stat)
   
   
        count_kurtosis = kurtosis(count_samples)
        e_vsv = count_var ** 2 * (2 / float(len(count_samples) - 1) +
            count_kurtosis / float(len(count_samples)))
        est_var_samples_var.append(e_vsv)
        
        jvar = (float(num_reads**2) / float((num_reads - 1)**2) *
            count_mean**(-4) * e_vsv)

        jaffe_est_vars.append(jvar)

    var_samples_mean = compute_sample_mean(var_samples)
    var_samples_var = compute_sample_variance(var_samples)
    
    print ('count var mean: {0}, count var variance: {1}'
        .format(var_samples_mean, var_samples_var))
    mean_est_var_samples_var = compute_sample_mean(est_var_samples_var)
    print 'mean var estimate: {0}'.format(mean_est_var_samples_var)
    
    #for v in range(len(est_var_samples_var)):
    #    print (est_var_samples_var[v] / var_samples_var)
    
    mean_jaffe_stat = compute_sample_mean(jaffe_samples)
    var_jaffe_stat = compute_sample_variance(jaffe_samples)
    print 'mean jaffe stat: {0}, var jaffe stat: {1}'.format(mean_jaffe_stat,
        var_jaffe_stat)
    mean_jaffe_est_vars = compute_sample_mean(jaffe_est_vars)
    var_jaffe_est_vars = compute_sample_variance(jaffe_est_vars)
    print ('mean jaffe var est: {0} var jaffe var est: {1}'
        .format(mean_jaffe_est_vars, var_jaffe_est_vars))
    
    return 0
コード例 #19
0
ファイル: regression.py プロジェクト: sheridanz/SFPedRisk
def sim_data(data,xvars): # simulate Poisson data from model 
    regr = bilin_regr(data,xvars,plot=False)
    X = shape_xvars(data[xvars])
    predicted = regr.predict(X)
    rsim = poisson.rvs(predicted)
    plt.plot(data['pvtraf'],regr.predict(shape_xvars(data[xvars])))
    plt.scatter(data['pvtraf'],rsim)
    plt.show()
コード例 #20
0
ファイル: csstpkg_nser_poiss.py プロジェクト: solatale/pycode
def cr2snr(crs, t=texp, npix=npix85, bsky=0.1, bdet=bdet0, nread=nread0, rn=rn0, poiss=False):
	# print 'Input Count Rate:', crs
	if poiss==True:
		cr300 = poisson.rvs(crs*t,size=1)
		# print 'crt 300s & sample: ', crs * t, cr300poiss
	else:
		cr300 = crs*t
	snrcal = cr300/(cr300+npix*(bsky+bdet)*t+npix*nread*rn**2)**0.5
	# print 'area:',npix
	return snrcal
コード例 #21
0
ファイル: fast_sbm.py プロジェクト: tbmbob/spectral-vs-bp
def fast_sbm(c_cc, c_cp, c_pp, n):
  mav_cc = c_cc * n / 4
  mav_cp = c_cp * n / 2
  mav_pp = c_pp * n / 4

  m_in1 = poisson.rvs(mav_cc)
  m_in2 = poisson.rvs(mav_pp)
  m_out = poisson.rvs(mav_cp)

  G = Graph()
  for i in range(n):
    G.add_node(i)

  # Generate first comm edges
  counter = 0
  while counter < m_in1:
    u = randrange(0,n//2)
    v = randrange(0,n//2)
    if u != v:
      G.add_edge(u,v)
      counter += 1

  # Generate second comm edges
  counter = 0
  while counter < m_in2:
    u = randrange(n//2,n)
    v = randrange(n//2,n)
    if u != v:
      G.add_edge(u,v)
      counter += 1

  # Generate between comm edges
  counter = 0
  while counter < m_out:
    u = randrange(0,n//2)
    v = randrange(n//2,n)
    if u != v:
      G.add_edge(u,v)
      counter += 1

  # Create sparse adjacency matrix
  return G
コード例 #22
0
    def go(self):
        plotter = Plotter("outdir", "sim", clean=False)
        grid = Grid(plotter)

        for num_of_agents in poisson.rvs(5, size=42):
            agents = [Agent.build() for i in xrange(num_of_agents)]
            # print agents
            print "\nStep {0}\n".format(self.step)
            grid.push(agents)
            grid.tick()
            self.step += 1
コード例 #23
0
ファイル: LLHStatistics.py プロジェクト: mamday/pisa
def get_random_map(template, seed=None):
    """
    Gets an event map with integer entries from non-integer entries
    (in general) in the template, varied according to Poisson
    statistics.
    """
    #Set the seed if given
    if not seed is None:
        np.random.seed(seed=seed)

    return poisson.rvs(template)
コード例 #24
0
ファイル: fast_sbm.py プロジェクト: tbmbob/uncertain-networks
def fast_sbm(c_in, c_out, n):
  mav_in = c_in * n / 4.
  mav_out = c_out * n / 2.

  m_in1 = poisson.rvs(mav_in)
  m_in2 = poisson.rvs(mav_in)
  m_out = poisson.rvs(mav_out)

  G = Graph()
  for i in range(n):
    G.add_node(i)

  # Generate first comm edges
  counter = 0
  while counter < m_in1:
    u = randrange(0,n//2)
    v = randrange(0,n//2)
    if u != v:
      G.add_edge(u,v)
      counter += 1

  # Generate second comm edges
  counter = 0
  while counter < m_in2:
    u = randrange(n//2,n)
    v = randrange(n//2,n)
    if u != v:
      G.add_edge(u,v)
      counter += 1

  # Generate between comm edges
  counter = 0
  while counter < m_out:
    u = randrange(0,n//2)
    v = randrange(n//2,n)
    if u != v:
      G.add_edge(u,v)
      counter += 1

  # Create sparse adjacency matrix
  return G
コード例 #25
0
ファイル: cox_process.py プロジェクト: JoyceYa/edward
def build_toy_dataset(N, V):
  """A simulator mimicking the data set from 2015-2016 NBA season with
  308 NBA players and ~150,000 shots."""
  L = np.tril(np.random.normal(2.5, 0.1, size=[V, V]))
  K = np.matmul(L, L.T)
  x = np.zeros([N, V])
  for n in range(N):
    f_n = multivariate_normal.rvs(cov=K, size=1)
    for v in range(V):
      x[n, v] = poisson.rvs(mu=np.exp(f_n[v]), size=1)

  return x
コード例 #26
0
def add_gc_bias(meancoverages,targetcoverage):
	rand=poisson.rvs(targetcoverage)
	cumprob=poisson.cdf(rand,targetcoverage) # cdf(x, mu, loc=0)	Cumulative density function.
	
	toret=[]
	for cov in meancoverages:
		if cov==0:
			toret.append(0)
		else:
			t=int(poisson.ppf(cumprob,cov)) # ppf(q, mu, loc=0)	Percent point function (inverse of cdf percentiles).
			toret.append(t)
	return toret
コード例 #27
0
ファイル: 15_DeliQueue.py プロジェクト: amaggi/am_bayes
def setup_queue(lam, mu, tol_time, time_limit):

    # set up arrival intervals and serving times as poisson processes
    arrival_intervals = poisson.rvs(lam, size=nsamp)
    serving_times = poisson.rvs(mu, size=nsamp)
    tolerance_times = poisson.rvs(tol_time, size=nsamp)

    # calculate arrival times
    arrival_times = np.empty(nsamp, dtype=float)
    arrival_times[0] = arrival_intervals[0]
    for i in xrange(nsamp-1):
        arrival_times[i+1] = arrival_times[i] + arrival_intervals[i+1]

    # find number of customers arriving before closing time
    n_customers = np.searchsorted(arrival_times, time_limit) 

    # cut the arrays down to size
    arrival_times = np.resize(arrival_times, n_customers)
    serving_times = np.resize(serving_times, n_customers)

    return arrival_times, serving_times, tolerance_times
コード例 #28
0
def simulate_spikes(tuning_curve, rx, ry):
    """
    Compute firing rate for each neuron given place field center
    and sample number of observed spikes in one time unit.
    """
    rates = []
    obs_spikes = []
    for n, pfield in enumerate(tuning_curve):
        rate = pfield((rx, ry))
        spikes = poisson.rvs(rate)
        rates.append(rate)
        obs_spikes.append(spikes)
    return rates, obs_spikes
コード例 #29
0
def imod2snr(magdata, bandmod):
	cr = csstpkg.mag2cr(magdata['MOD_'+bandmod], band=bandmod)
	cr300poiss = poisson.rvs(cr * texp, size=1)
	
	agalaxy = csstpkg.galser(lumtot=cr300poiss, reff=magdata['reff'],\
	                         nser=magdata['nser'], ellip=magdata['Eab'])
	photcr = agalaxy.aperphot()

	snr = csstpkg.cr2snr(photcr['aperture_sum'] / texp,\
                         npix=math.pi * magdata['reff'] ** 2,\
                         bsky=csstpkg.backsky[bandmod], poiss=False)

	return snr
コード例 #30
0
def make_neighbors(mu, nnum):
    """creates x, y coordinates for neighbor galaxies"""
    # radial distance
    r = poisson.rvs(mu, size=nnum)

    # angular position
    ang = np.random.uniform(low=0.0, high=2.0*np.pi, size=nnum)

    #convert to x, y
    x = r*np.cos(ang)
    y = r*np.sin(ang)
    
    return x,y
コード例 #31
0
# -*- coding: utf-8 -*-
"""
Created on Fri Feb  2 18:00:46 2018

@author: Gabo
"""

import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import poisson, uniform

c14std, c12std_min, c12std_max = 178., 61621875000000., 61490625000000.
c14m, c12m_min, c12m_max = 212.4090909,
c14f, c12f_min, c12f_max = 36.66666667, 96813333333333., 78416666666667.

N = 1e4
rm = poisson.rvs(c14std, size=N) / uniform.rvs
コード例 #32
0
ファイル: central_limit.py プロジェクト: nickhand/astro250
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
from scipy.stats import poisson

# sum n random variables
n=1000
# take the sum
trials = 1000

mu=3

# generate random variables
randomsamples = poisson.rvs(mu,size=[n,trials])
# sum 
sums = np.sum(randomsamples,axis=0)
average_sums = sums*1./n # ARP: why multiply by inverse, rather than just divide?
print 'mean:' + `np.mean(average_sums)` + ' std dev: ' + `np.std(average_sums)`
print 'expected mean:' + `mu` + ' expected std dev:' + `np.sqrt(mu*1./n)`
# ARP: spot on.

z_scores = (average_sums-mu)/np.sqrt(mu*1./n)
plt.hist(z_scores,100, normed=True) # ARP: does this improve with n?

# plot from z=-5 to z=5
z = np.linspace(-5,5,500)
#plot the normal pdf
plt.plot(z,norm.pdf(z))
plt.show() # ARP: looks like you were missing this...
コード例 #33
0
 def number_of_new_phytomers():
     mean_y = theta1
     new_phytomers = poisson.rvs(mean_y)
     return new_phytomers
コード例 #34
0
# -*- coding: utf-8 -*-
"""
Created on Tue Jan  7 10:54:18 2020

@author: rsholes
"""

#import matplotlib.pyplot as plt
#from IPython.display import Math, Latex
#from IPython.core.display import Image
import seaborn as sns

sns.set(color_codes=True)
sns.set(rc={'figure.figsize':(10,10)})

#import uniform distribution
from scipy.stats import poisson

#random numbers from uniform distribution

n = 10000
start = 0
data_poisson = poisson.rvs(size=n, loc=start, mu=3)

ax = sns.distplot(data_poisson, bins=30, kde=False, color ='skyblue', hist_kws ={'linewidth': 0.2, 'alpha' :1})
ax.set(xlabel='Poisson Distribution', ylabel='Frequency')
コード例 #35
0
'''
Created on Jun 11, 2018

@author: Balakrishna Akuleti
'''

from scipy.stats import poisson
import seaborn as sb
import matplotlib.pyplot as plt

data_poission = poisson.rvs(mu=4, size=10000)
ax = sb.distplot(data_poission,
                 kde=True,
                 color='green',
                 hist_kws={
                     "linewidth": 25,
                     'alpha': 1
                 })

ax.set(xlabel='Poisson', ylabel='Frequency')
plt.show()
コード例 #36
0
import csv
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats  import poisson
from scipy.stats import binom
import sys

sys.stdout = open('hybrid1results.txt', 'w')

poissonSimulations = []
tempSum = 0
averages = []
for s in range(10):
    print("Poisson Simulation " + str(s+1) + ":")
    data_poisson = poisson.rvs(mu = 3, size = 100)
    poissonSimulations.append(data_poisson)
    for x in range(100):
        print(str(data_poisson[x]))
        tempSum += data_poisson[x]
    print("Average of Sim " + str(s+1) + ": " + str(tempSum/100))
    print()
    averages.append(tempSum/100)
    tempSum = 0
    
sumAverages = 0
for i in range(len(averages)):
    sumAverages += averages[i]
print("Average of the averages of each Poisson Simulation: " + str(sumAverages/len(averages)))
a = np.array(averages).astype(np.float)
print("Standard deviation of the averages of each Poisson Simulation: " + str(np.std(a)))
print("Difference between mean value and expected value (3.0): " + str(abs(3-sumAverages/len(averages))))
コード例 #37
0
activate_this = "/Users/stefano.romano/DataScience/bin/activate_this.py"
execfile(activate_this, dict(__file__ = activate_this))

import numpy as np
import pymc as pm
from matplotlib import pyplot as plt

from scipy.stats import poisson as pois

sample_size = 500
mus  = [2, 10, 23]
colors = ["r", "b", "g"]
plt.clf()
for i in range(3):
    samples = pois.rvs(mus[i], size = sample_size)
    means = np.array([mean(samples[:k]) for k in xrange(sample_size)])
    plt.plot((means - (mus[i]))/float(mus[i]), color = colors[i],
             label = r"Relative convergence of $\mu_%d$" %i, lw = 2)


plt.axhline(y=0, ls = "--", color = "k", lw = 3)
plt.ylim(-0.2, 0.2)
plt.legend()

## Aggregated geographical data example
from numpy.random import random_integers as dunif
from scipy.stats import norm
import pandas as pd
from pandas import DataFrame
countries = np.concatenate([np.repeat(i, dunif(100, 1500)) for i in xrange(5000)])
height_data = DataFrame({"height": norm.rvs(150, 15, size = len(countries)),
    print('P(more than 10 trains) = {}'.format(poisson.sf(10, mu)))
    print('P(more than 11 trains) = {}'.format(poisson.sf(11, mu)))

    # Add new observations
    new_obs = np.array([
        13, 14, 11, 10, 11, 13, 13, 9, 11, 14, 12, 11, 12, 14, 8, 13, 10, 14,
        12, 13, 10, 9, 14, 13, 11, 14, 13, 14
    ])

    obs = np.concatenate([obs, new_obs])
    mu = np.mean(obs)

    print('mu = {}'.format(mu))

    # Repeat the analysis of the same probabilities
    print('P(more than 8 trains) = {}'.format(poisson.sf(8, mu)))
    print('P(more than 9 trains) = {}'.format(poisson.sf(9, mu)))
    print('P(more than 10 trains) = {}'.format(poisson.sf(10, mu)))
    print('P(more than 11 trains) = {}'.format(poisson.sf(11, mu)))

    # Generate 2000 samples from the Poisson process
    syn = poisson.rvs(mu, size=2000)

    # Plot the complete distribution
    fig, ax = plt.subplots(figsize=(14, 7), frameon=False)

    sns.distplot(syn, kde=True, color="b", ax=ax)
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    plt.show()
コード例 #39
0
ファイル: NRRP_CV.py プロジェクト: NataliBarros/MVR_project
N = 50
minDist_array = []  # safe the 50 closest to in vitro CV profiles
minNRRP = []
L_min = []

for iter in range(N):
    print 'ITERATION # %s' % iter
    EPSP_amp2_dic = {}
    CV_arr2_dic = {}
    NRRPdic = {}

    for l in lambda_values:
        print 'computing CV for lambda = %.1f' % l
        NRRPdic['%.1f' % l] = []
        nrrp = poisson.rvs(l, size=100, loc=1)
        # print nrrp
        nrrp_mean = np.mean(nrrp)
        nrrp_std = np.std(nrrp)
        NRRPdic['%.1f' % l].append(nrrp_mean)
        NRRPdic['%.1f' % l].append(nrrp_std)
        CV2_arr = []
        EPSP2_arr = []
        for a, n in zip(range(1, 100), nrrp):
            try:
                file = 'noise_simulation_new03_%s.h5' % a
                raw_data2 = h5py.File(RAW_DATA_PATH2 + file)
                if n > 24:
                    sample_connection2 = raw_data2['nrrp24'].value
                else:
                    sample_connection2 = raw_data2['nrrp%s' % n].value
コード例 #40
0
def expression_split(number, number_of_subsections, distribution, seed,
                     min_random_number_desired):
    split_number_list = []
    cumulative_sum_of_random_numbers = 0
    current_subsection = 1
    max_random_number = int(number / number_of_subsections)
    if isinstance(seed, int):
        np.random.RandomState(seed)
    else:
        seed = np.random.RandomState()
    if min_random_number_desired < number:
        if min_random_number_desired > max_random_number:
            #             print("WARNING: Cannot have min number as {} and split {} in {} subsections".format(min_random_number_desired, number, number_of_subsections))
            number_of_subsections = int(
                np.floor(number / min_random_number_desired))
            return expression_split(number, number_of_subsections,
                                    distribution, seed,
                                    min_random_number_desired)

        elif distribution == 'uniform':
            split_num1 = uniform.rvs(size=number_of_subsections,
                                     loc=10,
                                     scale=20,
                                     random_state=seed)
        elif distribution == 'gamma':
            split_num1 = gamma.rvs(a=5,
                                   size=number_of_subsections,
                                   random_state=seed)
        elif distribution == 'exponential':
            split_num1 = expon.rvs(scale=1,
                                   loc=0,
                                   size=number_of_subsections,
                                   random_state=seed)
        elif distribution == 'poisson':
            split_num1 = poisson.rvs(mu=3,
                                     size=number_of_subsections,
                                     random_state=seed)
        try:
            split_num1 = [
                int(number * v / sum(split_num1)) for v in split_num1
            ]
        except:
            # Error may occur when split_num1 = [0]
            expression_split(number, number_of_subsections, distribution, seed,
                             min_random_number_desired)
        if len(split_num1) > 1:
            num_test = [
                1 for v in split_num1 if v <= min_random_number_desired
            ]
            if sum(num_test) > int(0.25 * len(split_num1)):
                return expression_split(number,
                                        int(number_of_subsections * 0.75),
                                        distribution, seed,
                                        min_random_number_desired)
        for i in range(len(split_num1)):
            if split_num1[i] < min_random_number_desired:
                split_num1[i] = min_random_number_desired
        split_num1[-1] = number - sum(split_num1[:-1])
        if sum([1 for v in split_num1 if v <= 0]) >= 1:
            return expression_split(number, int(number_of_subsections * 0.75),
                                    distribution, seed,
                                    min_random_number_desired)
        return split_num1
    else:
        #         print('WARNING : minimum depth is greater than provided number and can not be splitted.')
        expression_split(number, 1, distribution, seed, number)
コード例 #41
0
ファイル: simulate.py プロジェクト: jan-glx/MOFA
    def generateData(self,
                     W,
                     Z,
                     Tau,
                     Mu,
                     likelihood,
                     missingness=0.0,
                     missing_view=False):
        """ Initialisation of observations 

        PARAMETERS
        ----------
        W (list of length M where each element is a np array with shape (Dm,K)): weights
        Z (np array with shape (N,K): latent variables
        Tau (list of length M where each element is a np array with shape (Dm,)): precision of the normally-distributed noise
        Mu (list of length M where each element is a np array with shape (Dm,)): feature-wise means
        likelihood (str): type of likelihood
        missingness (float): percentage of missing values
        """

        Y = [s.zeros((self.N, self.D[m])) for m in range(self.M)]

        if likelihood == "gaussian":
            # Vectorised
            for m in range(self.M):
                Y[m] = s.dot(Z, W[m].T) + Mu[m] + norm.rvs(
                    loc=0, scale=1 / s.sqrt(Tau[m]), size=[self.N, self.D[m]])
            # Non-vectorised, slow
            # for m in range(self.M):
            # for n in range(self.N):
            # for d in range(self.D[m]):
            # Y[m][n,d] = s.dot(Z[n,:],W[m][d,:].T) + Mu[m][d] + norm.rvs(loc=0,scale=1/s.sqrt(Tau[m][d]))

        elif likelihood == "warp":
            raise NotImplementedError()
            # for m in range(self.M):
            #     Y[m] = s.exp(s.dot(Z,W[m].T) + Mu[m] + norm.rvs(loc=0, scale=1/s.sqrt(Tau[m]), size=[self.N, self.D[m]]))

        # Sample observations using a poisson likelihood
        elif likelihood == "poisson":

            ## Unvectorised
            # for m in range(self.M):
            #     for n in range(self.N):
            #         for d in range(self.D[m]):
            #             f = s.dot(Z[n,:],W[m][d,:].T)
            #             # f = s.dot(Z[n,:],W[m][d,:].T) + norm.rvs(loc=0,scale=s.sqrt(1/Tau[m][d]))
            #             rate = s.log(1+s.exp(f))
            #             # Sample from the Poisson distribution
            #             # Y[m][n,d] = poisson.rvs(rate)
            #             # Use the more likely values
            #             Y[m][n,d] = s.special.round(rate)

            ## Vectorised
            for m in range(self.M):
                F = s.dot(Z, W[m].T)
                rate = s.log(1 + s.exp(F))

                # Without noise
                # Y[m] = s.special.round(rate)

                # With noise, sample from the Poisson distribution
                Y[m] = poisson.rvs(rate).astype(float)

        # Sample observations using a bernoulli likelihood
        elif likelihood == "bernoulli":
            for m in range(self.M):

                ## Vectorised
                f = sigmoid(s.dot(Z, W[m].T))

                # without noise
                # Y[m] = s.special.round(f)

                # with noise
                Y[m] = bernoulli.rvs(f).astype(float)

                ## Unvectorised

                # for n in range(self.N):
                # for d in range(self.D[m]):
                # Without noise
                # f = sigmoid( s.dot(Z[n,:],W[m][d,:].T) )

                # With noise
                # Y[m][n,d] = bernoulli.rvs(f)
                # Use the more likely state
                # Y[m][n,d] = s.special.round(f)

        # Introduce missing values into the data
        if missingness > 0.0:
            for m in range(self.M):
                nas = s.random.choice(range(self.N * self.D[m]),
                                      size=int(missingness * self.N *
                                               self.D[m]),
                                      replace=False)
                tmp = Y[m].flatten()
                tmp[nas] = s.nan
                Y[m] = tmp.reshape((self.N, self.D[m]))
        if missing_view > 0.0:  # percentage of samples missing a view
            # select samples missing one view
            n_missing = s.random.choice(range(self.N),
                                        int(missing_view * self.N),
                                        replace=False)
            Y[0][n_missing, :] = s.nan

        # Convert data to pandas data frame
        for m in range(self.M):
            Y[m] = pd.DataFrame(data=Y[m])

        return Y
コード例 #42
0
 def draw(self):
     return min(poisson.rvs(self.p), self.trunc)
from scipy.stats import poisson
import seaborn as sb

n = 100000
x = poisson.rvs(mu=5.2, size =n )

sum=0
count = 0
num=0
while num<n:
    sum = sum + x[num]
    if x[num]< 2:
        count = count+1
    num = num+1

print(count/n)
コード例 #44
0
ファイル: cpu_stress_pod.py プロジェクト: thomasaab/teg
def run_module():
    # define available arguments/parameters a user can pass to the module
    module_args = dict(
        namespace=dict(type='str', required=True),
        pod=dict(type='str', required=True),
        amount=dict(type='int', required=True),
        duration=dict(type='int', required=True),
    )

    module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)

    rc = 0
    stderr = "err"
    stderr_lines = ["errl1", "errl2"]
    stdout = "out"
    stdout_lines = ["outl1", "outl1"]

    module.log(msg='test!!!!!!!!!!!!!!!!!')

    namespace = module.params['namespace']
    amount = module.params['amount']

    result = dict(
        changed=True,
        stdout=stdout,
        stdout_lines=stdout_lines,
        stderr=stderr,
        stderr_lines=stderr_lines,
        rc=rc,
    )

    result['fact'] = random.choice(FACTS).format(
        name=module.params['namespace'])

    # random numbers from poisson distribution
    n = amount
    a = 0
    duration = module.params['duration']
    load_kubernetes_config()
    configuration = client.Configuration()
    configuration.assert_hostname = False
    client.api_client.ApiClient(configuration=configuration)

    podName = module.params['pod']
    if (podName == 'random poisson'):
        data_poisson = poisson.rvs(mu=10, size=n, loc=a)
        counts, bins, bars = plt.hist(data_poisson)
        plt.close()
        for experiment in counts:
            pod_list = get_pods(namespace=namespace)
            aux_li = []
            for fil in pod_list.items:
                if fil.status.phase == "Running":
                    aux_li.append(fil)
            pod_list = aux_li

            # From the Running pods I randomly choose those to die
            # based on the histogram length
            print("-------")
            print("Pod list length: " + str(len(pod_list)))
            print("Number of pods to get: " + str(int(experiment)))
            print("-------")
            # In the case of the experiment being longer than the pod list,
            # then the maximum will be the lenght of the pod list
            if (int(experiment) > len(pod_list)):
                to_be_cpu = random.sample(pod_list, len(pod_list))
            else:
                to_be_cpu = random.sample(pod_list, int(experiment))

            for pod in to_be_cpu:
                inyect_cpu(pod.metadata.name, pod.metadata.namespace, module,
                           duration)
            global_kill.append((datetime.datetime.now(), int(experiment)))
            # time.sleep(10)
            print(datetime.datetime.now())
    else:
        pod = get_pod_by_name(namespace=namespace, name=podName)
        inyect_cpu(pod.metadata.name, pod.metadata.namespace, module, duration)
    print("Ending histogram execution")

    if module.check_mode:
        return result

    module.exit_json(**result)
コード例 #45
0
ファイル: iet_plots.py プロジェクト: rosafilgueira/VarPy
def rate_histogram(obj1,
                   model=None,
                   interval=None,
                   t_lims=None,
                   lon_lims=None,
                   lat_lims=None,
                   z_lims=None,
                   Mc=None):
    """
    Plot a histogram of earthquake rates
    
    Args:
        obj1: a varpy object containing event catalogue data
        model: option to fit and bootstrap CoIs for model. Existing options: Poisson
        interval: bin width (default is daily)
        t_lims: [t_min, t_max] defining time axis limits
        lon_lims: [lon_min, lon_max] defining x-axis limits        
        lat_lims: [lat_min, lat_max] defining y-axis limits
        z_lims: [z_min, z_max] defining depth range
        Mc: magnitude cut-off
    
    Returns:
        fig1: a png image of the resulting plot
    """
    if obj1.type == 'volcanic':
        data = obj1.ecvd.dataset
        header = obj1.ecvd.header
    else:
        data = obj1.ecld.dataset
        header = obj1.ecld.header

    if t_lims is not None:
        try:
            t_min = conversion.date2int(t_lims[0])
            t_max = conversion.date2int(t_lims[1])
        except:
            t_min = float(t_lims[0])
            t_max = float(t_lims[1])
            pass
        data = data[logical_and(data[:, header.index('datetime')] >= t_min,
                                data[:, header.index('datetime')] < t_max), :]

    if lon_lims is not None:
        data = data[
            logical_and(data[:, header.index('longitude')] >= lon_lims[0],
                        data[:, header.index('longitude')] < lon_lims[1]), :]

    if lat_lims is not None:
        data = data[
            logical_and(data[:, header.index('latitude')] >= lat_lims[0],
                        data[:, header.index('latitude')] < lat_lims[1]), :]

    if z_lims is not None:
        data = data[logical_and(data[:, header.index('depth')] >= z_lims[0],
                                data[:, header.index('depth')] < z_lims[1]), :]

    if Mc is not None:
        data = data[data[:, header.index('magnitude')] >= Mc, :]

    dt_data = data[:, header.index('datetime')]

    if t_lims is None:
        t_min = floor(dt_data.min())
        t_max = ceil(dt_data.max())

    if interval is not None:
        bin_width = interval
    else:
        bin_width = 1.

    der_bins = arange(t_min, t_max + bin_width, bin_width)

    ders, der_bes = histogram(dt_data, der_bins)

    rate_bins = arange(-0.5, ders.max() + 1.5)
    mid_rate_bins = rate_bins[:-1] + diff(rate_bins) / 2.
    rate_freqs, rate_bes = histogram(ders, rate_bins)

    fig1 = plt.figure(1, figsize=(8, 6))
    ax1 = fig1.add_subplot(111, axisbg='lightgrey')

    ax1.bar(mid_rate_bins,
            rate_freqs,
            color='grey',
            edgecolor='darkgrey',
            align='center')

    if model is not None:
        der_mean = mean(ders)

        #Bootstrap 95% COIs
        rate_bstps = 1000

        rates_bstps = zeros((len(rate_bins) - 1, rate_bstps))

        for j in range(rate_bstps):
            if model is 'Poisson':
                model_sim = poisson.rvs(der_mean, size=len(ders))

            rates_bstps[:, j], model_bes = histogram(model_sim, rate_bins)

        poisson_coi_95 = scoreatpercentile(rates_bstps.transpose(), 95, axis=0)
        poisson_coi_5 = scoreatpercentile(rates_bstps.transpose(), 5, axis=0)

        ax1.plot(
            mid_rate_bins,
            poisson.pmf(mid_rate_bins, der_mean) * diff(rate_bins) * len(ders),
            '-or')
        ax1.plot(mid_rate_bins, poisson_coi_95, 'r:')
        ax1.plot(mid_rate_bins, poisson_coi_5, 'r:')

    ax1.set_xlabel('Rate', fontsize=8)
    ax1.set_ylabel('Frequency', fontsize=8)

    ax1.xaxis.set_ticks_position('bottom')

    png_name = obj1.figure_path + '/rate_histogram.png'
    eps_name = obj1.figure_path + '/rate_histogram.eps'
    plt.savefig(png_name)
    plt.savefig(eps_name)
コード例 #46
0
ファイル: synth.py プロジェクト: Ahdhn/OpenSubdiv
def gen_graph(mu, num_nodes):
    expected_degrees = poisson.rvs(mu, size=num_nodes)
    return expected_degree_graph(expected_degrees)
コード例 #47
0
def vectores1(n, lambda1, p):
    x = poisson.rvs(lambda1, size=n)
    y = np.empty(shape=n)
    for i in range(n):
        y[i] = binom.rvs(x[i], p, size=1)
    return np.column_stack((x, y))
コード例 #48
0
ファイル: code_8.13.py プロジェクト: rohitn/BMAD
from scipy.stats import norm, uniform, poisson

# Data
np.random.seed(1656)  # set seed to replicate example
N = 2000  # number of obs in model
NGroups = 10

x1 = uniform.rvs(size=N)
x2 = uniform.rvs(size=N)

Groups = np.array([200 * [i] for i in range(NGroups)]).flatten()
a = norm.rvs(loc=0, scale=0.5, size=NGroups)
eta = 1 + 0.2 * x1 - 0.75 * x2 + a[list(Groups)]
mu = np.exp(eta)

y = poisson.rvs(mu, size=N)

with pm.Model() as model:
    # Define priors
    sigma_a = pm.Uniform('sigma_a', 0, 100)
    beta1 = pm.Normal('beta1', 0, sd=100)
    beta2 = pm.Normal('beta2', 0, sd=100)
    beta3 = pm.Normal('beta3', 0, sd=100)

    # priors for random intercept (RI) parameters
    a_param = pm.Normal(
        'a_param',
        np.repeat(0, NGroups),  # mean
        sd=np.repeat(sigma_a, NGroups),  # standard deviation
        shape=NGroups)  # number of RI parameters
コード例 #49
0
def mock(density=[], boxsize=100, Npart=100):
    from scipy.stats import poisson
    dim = np.shape(np.shape(density))[0]
    Nmesh = np.shape(density)[0]
    ll = boxsize / Nmesh
    density = poisson.rvs(Npart * density / np.sum(density))

    i = 0
    j = 0
    k = 0

    if (dim == 3):
        xpoints = np.random.uniform(low=i * ll,
                                    high=(i + 1) * ll,
                                    size=(density[i, j, k]))
        ypoints = np.random.uniform(low=j * ll,
                                    high=(j + 1) * ll,
                                    size=(density[i, j, k]))
        zpoints = np.random.uniform(low=k * ll,
                                    high=(k + 1) * ll,
                                    size=(density[i, j, k]))
        points = np.transpose(np.vstack((xpoints, ypoints, zpoints)))
    else:
        xpoints = np.random.uniform(low=i * ll,
                                    high=(i + 1) * ll,
                                    size=(density[i, j]))
        ypoints = np.random.uniform(low=j * ll,
                                    high=(j + 1) * ll,
                                    size=(density[i, j]))
        points = np.transpose(np.vstack((xpoints, ypoints)))
    for i in range(1, Nmesh):
        for j in range(Nmesh):
            if (dim == 3):
                for k in range(Nmesh):
                    xpoints = np.random.uniform(low=i * ll,
                                                high=(i + 1) * ll,
                                                size=(density[i, j, k]))
                    ypoints = np.random.uniform(low=j * ll,
                                                high=(j + 1) * ll,
                                                size=(density[i, j, k]))
                    zpoints = np.random.uniform(low=k * ll,
                                                high=(k + 1) * ll,
                                                size=(density[i, j, k]))
                    points = np.vstack(
                        (points,
                         np.transpose(np.vstack((xpoints, ypoints, zpoints)))))
            else:
                xpoints = np.random.uniform(low=i * ll,
                                            high=(i + 1) * ll,
                                            size=(density[i, j]))
                ypoints = np.random.uniform(low=j * ll,
                                            high=(j + 1) * ll,
                                            size=(density[i, j]))
                points = np.vstack(
                    (points, np.transpose(np.vstack((xpoints, ypoints)))))
    print('number point samples:', np.shape(points)[0])

    if (dim == 3):
        rans = np.random.uniform(low=0.0, high=boxsize, size=(10 * Npart, 3))
    if (dim == 2):
        rans = np.random.uniform(low=0.0, high=boxsize, size=(10 * Npart, 2))

    return points, rans
コード例 #50
0
#brazil = 1
#x = "credit"
#print('%(x)s-%(germany)d : %(brazil)d' % (vars()))
#credit-7 : 1


#dict_test = {'x':"credit", 'germany':7, 'brazil':1}
#print('{tk[x]}-{tk[germany]} : {tk[brazil]}'.format(tk = dict_test))
#credit-7 : 1


#使用泊松分布模拟比分

from scipy.stats import poisson

r = poisson.rvs(3, size=1000)[0]



#球队攻防能力如下:

#alpha,beta
#Arg,5,2
#Nig,3,3


#模拟比赛得分

from scipy.stats import poisson
import pandas as pd  # 读取球队进球率、失球率参数
コード例 #51
0
def nu_floor(sig_low, sig_high, n_sigs=10, model="sigma_si", mass=6., fnfp=1.,
             element='germanium', exposure=1., delta=0., GF=False, time_info=False,
             file_tag='', n_runs=20):

    sig_list = np.logspace(np.log10(sig_low), np.log10(sig_high), n_sigs) 

    testq = 0
	
    for sigmap in sig_list:

        coupling = "fnfp" + model[5:]

        print 'Run Info:'
        print 'Experiment: ', element
        print 'Model: ', model
        print 'Coupling: ', coupling, fnfp
        print 'Mass: {:.0f}, Sigma: {:.2e}'.format(mass, sigmap)

        file_info = path + '/Saved_Files/'
        file_info += element + '_' + model + '_' + coupling + '_{:.0f}'.format(fnfp)
        file_info += '_Exposure_{:.1f}_tonyr_DM_Mass_{:.0f}_GeV'.format(exposure, mass)
        file_info += file_tag + '.dat'
        print 'Output File: ', file_info
        print '\n'
        experiment_info, Qmin, Qmax = Element_Info(element)

        drdq_params = default_rate_parameters.copy()
        drdq_params['element'] = element
        drdq_params['mass'] = mass
        drdq_params[model] = sigmap
        drdq_params[coupling] = fnfp
        drdq_params['delta'] = delta
        drdq_params['GF'] = GF
        drdq_params['time_info'] = time_info

        # 3\sigma for Chi-square Dist with 1 DoF means q = 9.0
        q_goal = 9.0

        # make sure there are enough points for numerical accuracy/stability
        er_list = np.logspace(np.log10(Qmin), np.log10(Qmax), 500)
        time_list = np.zeros_like(er_list)

        dm_spec = dRdQ(er_list, time_list, **drdq_params) * 10. ** 3. * s_to_yr
        dm_rate = R(Qmin=Qmin, Qmax=Qmax, **drdq_params) * 10. ** 3. * s_to_yr * exposure
        dm_pdf = dm_spec / dm_rate
        cdf_dm = dm_pdf.cumsum()
        cdf_dm /= cdf_dm.max()
        dm_events_sim = int(dm_rate * exposure)
        
        # TODO generalize beyond B8
        nu_comp = ['B8','hep']
		
        # neutrino ER spectrum
		
        nuspec = np.zeros(2, dtype=object)
        nu_rate = np.zeros(2, dtype=object)
        nu_pdf = np.zeros(2, dtype=object)
        cdf_nu = np.zeros(2, dtype=object)
        Nu_events_sim = np.zeros(2)
		
        nuspec[0] = np.zeros_like(er_list)
        nuspec[1] = np.zeros_like(er_list)
		
        for iso in experiment_info:
            nuspec[0] += Nu_spec().nu_rate(nu_comp[0], er_list, iso)
            nuspec[1] += Nu_spec().nu_rate(nu_comp[1], er_list, iso)

        nu_rate[0] = np.trapz(nuspec[0], er_list)
        nu_pdf[0] = nuspec[0] / nu_rate[0]
        cdf_nu[0] = nu_pdf[0].cumsum()
        cdf_nu[0] /= cdf_nu[0].max()
        Nu_events_sim[0] = int(nu_rate[0] * exposure)
		
        nu_rate[1] = np.trapz(nuspec[1], er_list)
        nu_pdf[1] = nuspec[1] / nu_rate[1]
        cdf_nu[1] = nu_pdf[1].cumsum()
        cdf_nu[1] /= cdf_nu[1].max()
        Nu_events_sim[1] = int(nu_rate[1] * exposure)
		
        nevts_n = np.zeros(2)
        nevent_dm = 0

        tstat_arr = np.zeros(n_runs)
        # While loop goes here. Fill tstat_arr for new sims and extract median/mean
        nn = 0
		
        while nn < n_runs:

            print 'Run {:.0f} of {:.0f}'.format(nn + 1, n_runs)
            nevts_dm = poisson.rvs(int(dm_events_sim))
            nevts_n[0] = poisson.rvs(int(Nu_events_sim[0])) 
            nevts_n[1] = poisson.rvs(int(Nu_events_sim[1])) 
            if not QUIET:
                print 'Predicted Number of Nu events: {}'.format(Nu_events_sim[0] + Nu_events_sim[1])
                print 'Predicted Number of DM events: {}'.format(dm_events_sim)

            # Simulate events
            print('ev_nu1:{}  ev_nu2:{} ev_dm:{}'.format(nevts_n[0], nevts_n[1], nevts_dm))

            Nevents = int(nevts_n[0] + nevts_n[1] + nevts_dm)
            if not QUIET:
                print 'Simulation {:.0f} events...'.format(Nevents)
            u = random.rand(Nevents)
            # Generalize to rejection sampling algo for time implimentation
            e_sim = np.zeros(Nevents)
            for i in range(Nevents):
                if i < int(nevts_n[0]):
                    e_sim[i] = er_list[np.absolute(cdf_nu[0] - u[i]).argmin()]
                elif i < int(nevts_n[1]):
                    e_sim[i] = er_list[np.absolute(cdf_nu[1] - u[i]).argmin()]
                else:
                    e_sim[i] = er_list[np.absolute(cdf_dm - u[i]).argmin()]
            times = np.zeros_like(e_sim)
            #print e_sim

            if not QUIET:
                print 'Running Likelihood Analysis...'
            # Minimize likelihood -- MAKE SURE THIS MINIMIZATION DOESNT FAIL. CONSIDER USING GRADIENT INFO
            like_init_nodm = Likelihood_analysis(model, coupling, mass, 0., fnfp,
                                                 exposure, element, experiment_info, e_sim, times,
                                                 Qmin=Qmin, Qmax=Qmax, time_info=time_info, GF=False)
            max_nodm = minimize(like_init_nodm.likelihood, np.array([0.,0.]), args=(np.array([-100.])), tol=0.01) #np_array expand to N-zeroes
            #print max_nodm

            like_init_dm = Likelihood_analysis(model, coupling, mass, 1., fnfp,
                                               exposure, element, experiment_info, e_sim, times,
                                               Qmin=Qmin, Qmax=Qmax, time_info=time_info, GF=False)
            print sigmap, type(sigmap)								      
            
            max_dm = minimize(like_init_dm.like_multi_wrapper, np.array([0.,0., np.log10(sigmap)]), tol=0.01,
                              jac=False) #np_log 
							  #np.array([NU = [0. .... ] - 1 component, DM = np.log10(sigmap)])

            if not QUIET:
                print 'BF Neutrino normalization without DM: {:.2e}'.format(10.**max_nodm.x[0])
                print 'BF Neutrino normalization with DM: {:.2e}'.format(10.**max_dm.x[0])
                print 'BF DM sigma_p: {:.2e} \n\n'.format(10.**max_dm.x[1])

            test_stat = np.max([max_nodm.fun - max_dm.fun, 0.])

            pval = chi2.sf(test_stat,1)

            if not QUIET:
                print 'TS: ', test_stat
                print 'p-value: ', pval

            tstat_arr[nn] = test_stat
            nn += 1

        print 'FINISHED CYCLE \n'
        print 'True DM mass: ', mass
        print 'True DM sigma_p: ', sigmap
        print 'Median Q: {:.2f}'.format(np.median(tstat_arr))
        print 'Mean Q: {:.2f}'.format(np.median(tstat_arr))
		
        testq = np.mean(tstat_arr)

        print 'testq (mean, end n cycle): {}'.format(testq)

        if testq > 20:
            print 'testq: {} --> BREAK'.format(testq)
            
            break
        
        elif testq > 1:
            print 'testq: {} --> WRITE'.format(testq)
            
            if os.path.exists(file_info):
                load_old = np.loadtxt(file_info)
                new_arr = np.vstack((load_old, np.array([np.log10(sigmap), np.mean(tstat_arr)])))
                new_arr = new_arr[new_arr[:, 0].argsort()]
                np.savetxt(file_info, new_arr)
            else:
                np.savetxt(file_info, np.array([np.log10(sigmap), np.median(tstat_arr)]))
        

    return
コード例 #52
0
def sample_episodes(numepisodes, physics):

    episodes = []

    total_events, num_reasonable_events = 0, 0

    for epinum in xrange(numepisodes):

        events = []
        detections = []
        assocs = []

        # first generate all the events

        numevents = poisson.rvs(physics.lambda_e * 4 * pi * physics.R**2 *
                                physics.T)

        for evnum in xrange(numevents):

            # longitude is uniform from -180 to 180
            evlon = uniform.rvs(-180, 360)
            # sin(latitude) is uniform from -1 to 1
            evlat = degrees(arcsin(uniform.rvs(-1, 2)))
            # magnitude has an exponential distribution as per Gutenberg-Richter law
            while True:
                evmag = expon.rvs(physics.mu_m, physics.theta_m)
                # magnitude saturates at some maximum value,
                # re-sample if we exceed the max
                if evmag > physics.gamma_m:
                    continue
                else:
                    break

            # time is uniform
            evtime = uniform.rvs(0, physics.T)

            event = Event(evlon, evlat, evmag, evtime)

            events.append(event)

            truedets = []

            #print ("event mag %f" % event.mag)

            # for each event generate its set of true detections
            for stanum, station in enumerate(STATIONS):

                dist = compute_distance((station.lon, station.lat),
                                        (event.lon, event.lat))
                sta_to_ev_az = compute_azimuth((station.lon, station.lat),
                                               (event.lon, event.lat))

                detprob = logistic(physics.mu_d0[stanum] +
                                   physics.mu_d1[stanum] * event.mag +
                                   physics.mu_d2[stanum] * dist)

                #print ("stanum %d dist %f detprob %f" % (stanum, dist, detprob))

                # is detected ?
                if bernoulli.rvs(detprob):

                    dettime = laplace.rvs(
                        event.time + compute_travel_time(dist) +
                        physics.mu_t[stanum], physics.theta_t[stanum])

                    # Note: the episode only has detections within the first T
                    # seconds. Late arriving detections will not be available.
                    if dettime < physics.T:
                        degdiff = laplace.rvs(physics.mu_z[stanum],
                                              physics.theta_z[stanum])
                        detaz = (sta_to_ev_az + degdiff + 360) % 360

                        detslow = laplace.rvs(
                            compute_slowness(dist) + physics.mu_s[stanum],
                            physics.theta_s[stanum])

                        while True:
                            # resample if the detection amplitude is infinite
                            try:
                                detamp = exp(
                                    norm.rvs(
                                        physics.mu_a0[stanum] +
                                        physics.mu_a1[stanum] * event.mag +
                                        physics.mu_a2[stanum] * dist,
                                        physics.sigma_a[stanum]))

                            except FloatingPointError:
                                continue

                            # disallow zero or infinite amplitudes
                            if detamp == 0 or isinf(detamp):
                                continue
                            break

                        truedets.append(len(detections))
                        detections.append(
                            Detection(stanum, dettime, detaz, detslow, detamp))

            assocs.append(truedets)

            total_events += 1

            if len(truedets) >= 2:
                num_reasonable_events += 1

        # now generate the false detections
        for stanum in xrange(len(STATIONS)):
            numfalse = poisson.rvs(physics.lambda_f[stanum] * physics.T)

            for dnum in xrange(numfalse):
                dettime = uniform.rvs(0, physics.T)
                detaz = uniform.rvs(0, 360)
                detslow = uniform.rvs(
                    compute_slowness(180),
                    compute_slowness(0) - compute_slowness(180))

                while True:
                    # resample if the detection amplitude is infinite
                    try:
                        detamp = exp(
                            cauchy.rvs(physics.mu_f[stanum],
                                       physics.theta_f[stanum]))
                    except FloatingPointError:
                        continue

                    # disallow zero or infinite amplitudes
                    if detamp == 0 or isinf(detamp):
                        continue
                    break

                detections.append(
                    Detection(stanum, dettime, detaz, detslow, detamp))

        episodes.append(Episode(events, detections, assocs))

    print("{:d} events generated".format(total_events))
    print("{:.1f} % events have at least two detections".format(
        100 * num_reasonable_events / total_events))
    return episodes
コード例 #53
0
def poisson_distribution(select_size, power=10):
    return poisson.rvs(power, size=select_size)
"""Assignment8.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1LbUpvNyTZ6qFHaKWOOkHeb2sZgD2i8dt
"""

import numpy as np
from scipy.stats import poisson

#no. of time intervals
sample_size = 100000

#simulating jobs arrival at workstation
arrival_data = poisson.rvs(mu=12, size=sample_size)
#simulating jobs completion at workstation
service_data = poisson.rvs(mu=15, size=sample_size)
#print(arrival_data-service_data)

#calculating jobs at workstation
jobs_at_machine = [0]
for x in range(1, sample_size):
    jobs_tentative = jobs_at_machine[x - 1] + arrival_data[x] - service_data[x]
    if jobs_tentative >= 0:
        jobs_at_machine.append(jobs_tentative)
    else:
        jobs_at_machine.append(jobs_at_machine[x - 1])
print(
    "Experimental and Theoritical Expectation value of jobs at workstation  {}--{}"
    .format(np.sum(jobs_at_machine) / sample_size, 4))
コード例 #55
0
ファイル: utility_old.py プロジェクト: xuhuifan/SDREM
def initialize_model(dataR, dataR_H, dataNum, KK, LL, feaMat):
    # Input:
    # dataR: positive relational data # positive edges x 2
    # KK: number of communities
    # LL: number of features
    # feaMat: feature matrix N X K

    # Output:
    # M: Poisson distribution parameter in generating X_{ik}
    # X_i: latent counts for node i
    # Z_ik: latent integers summary, calculating as \sum_{j,k_2} Z_{ij,kk_2}
    # Z_k1k2: latent integers summary, calculating as \sum_{k,k_2} Z_{ij,kk_2}
    # pis: LL X N X KK: layer-wise mixed-membership distributions
    # FT: F X K, feature transition coefficients
    # betas: LL X N X N: layer-wise information propagation coefficient
    # Lambdas: community compatibility matrix
    # QQ: scaling parameters for Lambdas
    # scala_val: not use at the momment

    pis = np.zeros((LL, dataNum, KK))

    betas = gamma.rvs(1, 1, size=(LL - 1, dataNum, dataNum))
    FT = gamma.rvs(1, 1, size=(feaMat.shape[1], KK))

    pis_ll = np.dot(feaMat, FT) + 0.1

    psi_inte = gamma.rvs(a=pis_ll / (1 + 0.01), scale=1)
    psi_inte = psi_inte / (np.sum(psi_inte, axis=1)[:, np.newaxis]) + 1e-6
    pis[-1] = psi_inte / (np.sum(psi_inte, axis=1)[:, np.newaxis])

    for ll in np.arange(LL - 2, -1, -1):  #  From LL-2 to 0

        psi_ll = np.dot(betas[ll].T, pis[ll + 1])

        psi_ll += 0.01  #
        psi_inte = gamma.rvs(a=psi_ll / (1 + 0.01), scale=1)
        psi_inte = psi_inte / (np.sum(psi_inte, axis=1)[:, np.newaxis]) + 1e-6
        pis[ll] = psi_inte / (np.sum(psi_inte, axis=1)[:, np.newaxis])

    # for ii in range(dataNum):
    #     pis[-1][ii] = dirichlet.rvs(pis_ll[ii])
    #
    # for ll in np.arange(LL-2, -1, -1):  #  From LL-2 to 0
    #     psi_ll = np.dot(betas[ll].T, pis[ll+1])     ########################### update here
    #     psi_ll += 0.1 #
    #     for ii in range(dataNum):
    #         pis[ll, ii] = dirichlet.rvs(psi_ll[ii])

    M = dataNum
    X_i = poisson.rvs(M * pis[0]).astype(int)

    ################################
    ################################

    R_KK = np.ones((KK, KK)) / (KK**2)
    np.fill_diagonal(R_KK, 1 / KK)
    Lambdas = gamma.rvs(a=R_KK, scale=1)

    # k_Lambda = 1/KK
    # c_val_Lambda = 1
    # r_k = gamma.rvs(a = k_Lambda, scale = 1, size = KK)/c_val_Lambda
    #
    # Lambdas = np.dot(r_k.reshape((-1, 1)), r_k.reshape((1, -1)))
    # epsilon = 1
    # np.fill_diagonal(Lambdas, epsilon*r_k)

    ################################
    ################################

    Z_ik = np.zeros((dataNum, KK), dtype=int)
    Z_k1k2 = np.zeros((KK, KK), dtype=int)
    for ii in range(len(dataR)):
        pois_lambda = (X_i[dataR[ii][0]][:, np.newaxis] *
                       X_i[dataR[ii][1]][np.newaxis, :]) * Lambdas
        total_val = positive_poisson_sample(np.sum(pois_lambda))

        new_counts = np.random.multinomial(
            total_val,
            pois_lambda.reshape((-1)) / np.sum(pois_lambda)).reshape((KK, KK))
        Z_k1k2 += new_counts
        Z_ik[dataR[ii][0]] += np.sum(new_counts, axis=1)
        Z_ik[dataR[ii][1]] += np.sum(new_counts, axis=0)

    return M, X_i, Z_ik, Z_k1k2, pis, FT, betas, Lambdas
コード例 #56
0
print("Con, M = 40, n = 5 y N = 3:")
variable = hypergeom.rvs(Mvar, nvar, Nvar, size=size)
a3, b3 = np.unique(variable, return_counts=True)

c3 = b3 / size
f3 = c3[1]

print(
    "Probabilidad de que se encuentre exactamente un componente defectuoso\ncon 10000000 de simulaciones:",
    f3)

print("-----------------------------------------------------------------")
print("Ejercicio 3)\n")

print("Con lambda = 1:")
z = poisson.rvs(1, size=size)

a4, b4 = np.unique(z, return_counts=True)

c4 = b4 / size
poiss0 = (c4[0])
poiss1 = (c4[1])
poiss2 = (c4[2])
poiss3 = (c4[3])
poiss4 = (c4[4])
poiss5 = (c4[5])
poiss6 = (c4[6])

print("para k(overflow floods in 100 years) = 0 con 10000000 simulaciones: ",
      poiss0)
print("para k(overflow floods in 100 years) = 1 con 10000000 simulaciones: ",
コード例 #57
0
ファイル: resp_family.py プロジェクト: wuyuchong/glm_lasso
def _resp_poisson(x, beta):
    eta = np.dot(x, beta)
    mu = np.exp(eta)
    return poisson.rvs(mu)
コード例 #58
0
hb = hb.merge(right = c_event_user_pivot, how = 'left', left_on = 'org:resource' , right_index = True)

cluster_user = hb.filter(['case','event','cluster'])
cluster_user_pivot = cluster_user.pivot_table(values = 'case', index = 'cluster', columns = 'event',  aggfunc='count', fill_value = 0)


# creating a poisson distribution for the incoming events

hb_create_times = hb[hb['event'] == 'FIN'].filter(['completeTime']).sort_values(['completeTime'])
hb_timespan = hb_create_times.iloc[len(hb_create_times)-1,0] - hb_create_times.iloc[0,0]
mean_time_between_cases = hb_timespan / len(hb_create_times)
mean_hours_between_cases = mean_time_between_cases.total_seconds() / 60.0 / 60.0
print('Mean time between cases is: ' + str(round(mean_hours_between_cases,2)) + ' hours')

from scipy.stats import poisson
data_poisson = poisson.rvs(mu=mean_hours_between_cases, size=len(hb_create_times))
print(data_poisson)

import seaborn as sns
ax = sns.distplot(data_poisson,
                  bins=30,
                  kde=False,
                  color='skyblue',
                  hist_kws={"linewidth": 15,'alpha':1})
ax.set(xlabel='Poisson Distribution', ylabel='Frequency')


# Calculating throughput times for the entering of distributions in Arena
min_completion_times = pd.read_csv('C:/Users/vince_000/Documents/GitHub/Sim_V_MzW/Data/Created/min_completion_times.txt')
min_completion_times['min_completion_time'] = min_completion_times['min_completion_time'].astype("datetime64") 
コード例 #59
0
def generate_synthetic_activity_data():

    actions = ["Sin_5", "MA1", "Sin_1"]
    # transition matrix
    T = np.array([[0.4, 0.3, 0.3],
                  [0.7, 0.2, 0.1],
                  [0.4, 0.1, 0.5]])

    acts = [Sinusoidal(freq=5), MA1(), Sinusoidal(freq=2)]

    act_samples = []
    for i in range(len(acts)):
        act_samples.append(acts[i].get_samples(0.0, 100))

    iters = 2000
    avg_durs = [30, 30, 30]
    activity = 0  # start with A1
    starts = list()
    activities = list()
    samples = np.zeros(0)
    start = 0
    curr = 0.
    for i in range(iters):
        starts.append(start)
        activities.append(activity)
        dur = poisson.rvs(avg_durs[activity])
        curr_samples = acts[activity].get_samples(curr, dur)
        samples = np.append(samples, curr_samples)
        curr = curr_samples[-1]
        start += dur
        if False:
            activity += 1  # maybe lookup transition matrix later
            activity = 0 if activity >= len(actions) else activity
        else:
            activity = np.argmax(rnd.multinomial(1, T[activity, :]))

    n = len(samples)
    logger.debug("n: %d" % n)
    logger.debug("samples:\n%s" % str(list(samples)))

    # save the generated data to file(s)
    output_path = "./temp/timeseries"
    write_to_file(samples, "%s/samples_%d.csv" % (output_path, iters), add_row_index=True, fmt=["%3.6f"])
    write_to_file(np.array(activities), "%s/activities_%d.csv" % (output_path, iters), add_row_index=True, fmt=["%d"])
    write_to_file(np.array(starts), "%s/starts_%d.csv" % (output_path, iters), add_row_index=True, fmt=["%d"])

    pdfpath = "temp/timeseries/timeseries_simulation.pdf"
    dp = DataPlotter(pdfpath=pdfpath, rows=3, cols=1)

    for i in range(len(acts)):
        ns = len(act_samples[i])
        pl = dp.get_next_plot()
        plt.title("Simulated Time Series (%s)" % actions[i], fontsize=8)
        pl.set_xlim([0, ns])
        pl.plot(np.arange(0, ns), act_samples[i], 'b-')

    pl = dp.get_next_plot()
    plt.title("Simulated Time Series", fontsize=8)
    pl.set_xlim([0, n])
    pl.plot(np.arange(0, n), samples, 'b-')
    if False:
        for x in starts:
            pl.axvline(x, color='red', linestyle='solid')

    dp.close()
コード例 #60
0
ファイル: alpha_beta_plots.py プロジェクト: ryu577/stochproc
from stochproc.count_distributions.compound_poisson import CompoundPoisson
from stochproc.hypothesis.rate_test import rateratio_test, rateratio_test_two_sided

# mpl.rcParams.update({'text.color' : "white",
#                         'axes.labelcolor' : "white",
#                         'xtick.color' : "white",
#                         'ytick.color' : "white",
#                         "axes.edgecolor" : "white"})

# fig, ax = plt.subplots(facecolor='black')
# #ax.set_axis_bgcolor("black")
# ax.set_facecolor("black")
#fig, ax = plt.subplots()

dist_rvs_compound = lambda lmb,t: CompoundPoisson.rvs_s(lmb*t,32,.3,compound='binom')
dist_rvs_poisson = lambda lmb,t: poisson.rvs(lmb*t)

def plot_tests_on_distributions():
    alphas1,betas1,alpha_hats1 = run_simulns(fn=dist_rvs_poisson)
    alphas2,betas2,alpha_hats2 = run_simulns(fn=dist_rvs_poisson, hypoth_fn=rateratio_test_two_sided)
    #alphas2,betas2,alpha_hats2 = run_simulns(fn=dist_rvs_compound, n_sim=50000)
    alphas3,betas3,alpha_hats3 = run_simulns(fn=dist_rvs_interarrivalw, n_sim=5000)
    alphas4,betas4,alpha_hats4 = run_simulns(fn=dist_rvs_interarrivalw, n_sim=5000, scale=25.0)
    alphas5,betas5,alpha_hats5 = run_simulns(fn=dist_rvs_interarrivalw, n_sim=5000, scale=1/10.0)

    plt.plot(alphas1,betas1,label='UMP poisson on poisson')
    plt.plot(alphas2,betas2,label='UMP poisson on compound poisson')
    plt.plot(alphas3,betas3,label='UMP poisson on interarrival weibull')
    plt.plot(alphas4,betas4,label='UMP poisson sc:25.0 on interarrival weibull')
    plt.plot(alphas5,betas5,label='UMP poisson sc:0.1 on interarrival weibull')
    plt.xlabel('Alpha')