Example #1
0
    def fitted_plots(self):
        n = self.count_data.number_of_observations()

        m = 16
        count_range = np.array(range(m))
        nbs = np.repeat(np.array(count_range), self.num_segments).reshape([self.num_segments, m], order='F')  # S-by-m
        segment_probs = self.segment_probs.reshape(self.num_segments, 1)  # make sure shape is S-by-1

        figs = list()
        for cat in range(self.count_data.num_categories):
            counts_observed = [(self.count_data.arr[:, cat] == count).sum() for count in count_range]

            poisson_means = self.lambdas[:, cat].reshape(self.num_segments, 1)  # S-by-1
            poisson_pmf = poisson(poisson_means).pmf(nbs)  # S-by-m
            if self.inflated_zeros:
                deflate_probs = self.deflate_probs[:, cat].reshape(self.num_segments, 1)  # S-by-1
                poisson_pmf *= deflate_probs
                poisson_pmf += (1-deflate_probs) * poisson(1e-2).pmf(nbs)
            counts_predicted = (poisson_pmf * segment_probs).sum(axis=0) * n

            fig = plt.figure()
            plt.plot(count_range, counts_predicted, '--D', label="Fitted", zorder=4)
            plt.bar(count_range-0.45, counts_observed, width=0.9, color='lightgray', label="Observed", linewidth=1, edgecolor="gray",zorder=3)
            plt.grid(axis='y', which='major', color=(0.1, 0.1, 0.1), linestyle=':',zorder=0)
            plt.xlabel("Count of %s" % self.count_data.category_names[cat], fontsize=16)
            plt.ylabel("Number of observations", fontsize=16)
            plt.xticks(range(16), fontsize=13)
            plt.tick_params('both', length=0, width=1, which='major')
            plt.title("Aggregate Comparison at NumSegments=%d" % self.num_segments,fontsize=18)
            plt.legend(fontsize=16)
            plt.gca().set_axis_bgcolor((0.98, 0.98, 0.99))
            plt.xlim(-1,15.9)
            figs.append(fig)
Example #2
0
def test_intensity():
    from ctapipe.image.toymodel import Gaussian

    np.random.seed(0)
    geom = CameraGeometry.from_name('LSTCam')

    x, y = u.Quantity([0.2, 0.3], u.m)
    width = 0.05 * u.m
    length = 0.15 * u.m
    intensity = 50
    psi = '30d'

    # make a toymodel shower model
    model = Gaussian(x=x, y=y, width=width, length=length, psi=psi)

    image, signal, noise = model.generate_image(
        geom, intensity=intensity, nsb_level_pe=5,
    )

    # test if signal reproduces given cog values
    assert np.average(geom.pix_x.to_value(u.m), weights=signal) == approx(0.2, rel=0.15)
    assert np.average(geom.pix_y.to_value(u.m), weights=signal) == approx(0.3, rel=0.15)

    # test if signal reproduces given width/length values
    cov = np.cov(geom.pix_x.value, geom.pix_y.value, aweights=signal)
    eigvals, eigvecs = np.linalg.eigh(cov)

    assert np.sqrt(eigvals[0]) == approx(width.to_value(u.m), rel=0.15)
    assert np.sqrt(eigvals[1]) == approx(length.to_value(u.m), rel=0.15)

    # test if total intensity is inside in 99 percent confidence interval
    assert poisson(intensity).ppf(0.05) <= signal.sum() <= poisson(intensity).ppf(0.95)
def poisson_best_fit(dataset, board_id=0):
    """ Returns the poisson fit for a sample set
    :param board_id: Board identifier.
    :param dataset: Data values.
    """
    # poisson_model = smf.poisson(AVG_TESTPROD_COLUMN + " ~ 1", data=dataset)
    poisson_model = sm.Poisson(dataset[AVG_TESTPROD_COLUMN], np.ones_like(dataset[AVG_TESTPROD_COLUMN]))

    result = poisson_model.fit()
    lmbda = np.exp(result.params)

    poisson_dist = stats.poisson(lmbda.values)
    lower_poisson_dist = stats.poisson(np.exp(result.conf_int().values)[0, 0])
    higher_poisson_dist = stats.poisson(np.exp(result.conf_int().values)[0, 1])

    print 'result.params ', result.params
    print 'lmbda ', lmbda
    print result.summary()

    testprod_samples = dataset[AVG_TESTPROD_COLUMN]
    print 'testprod_samples.mean ', testprod_samples.mean()

    plot_discrete_distributions(testprod_samples, [{"dist": poisson_dist,
                                                    "color": "red",
                                                    "name": "Poisson Fit"},
                                                   {"dist": lower_poisson_dist,
                                                    "color": "green",
                                                    "name": "Poisson Fit Lower"},
                                                   {"dist": higher_poisson_dist,
                                                    "color": "steelblue",
                                                    "name": "Poisson Fit Higher"}], board_id)

    return poisson_dist
Example #4
0
def test_intensity():
    from .. import toymodel

    np.random.seed(0)

    geom = CameraGeometry.from_name('LSTCam')

    width = 0.05
    length = 0.15
    intensity = 50

    # make a toymodel shower model
    model = toymodel.generate_2d_shower_model(
        centroid=(0.2, 0.3),
        width=width, length=length,
        psi='30d',
    )

    image, signal, noise = toymodel.make_toymodel_shower_image(
        geom, model.pdf, intensity=intensity, nsb_level_pe=5,
    )

    # test if signal reproduces given cog values
    assert np.average(geom.pix_x.value, weights=signal) == approx(0.2, rel=0.15)
    assert np.average(geom.pix_y.value, weights=signal) == approx(0.3, rel=0.15)

    # test if signal reproduces given width/length values
    cov = np.cov(geom.pix_x.value, geom.pix_y.value, aweights=signal)
    eigvals, eigvecs = np.linalg.eigh(cov)

    assert np.sqrt(eigvals[0]) == approx(width, rel=0.15)
    assert np.sqrt(eigvals[1]) == approx(length, rel=0.15)

    # test if total intensity is inside in 99 percent confidence interval
    assert poisson(intensity).ppf(0.05) <= signal.sum() <= poisson(intensity).ppf(0.95)
def pmm_to_cluster(patientToGenes, classes, lam, p_k):
    clusterToPatient = {}

    for k in classes:
        clusterToPatient[k] = set()

    clusterToPatient[-1] = set()


    for patient in patientToGenes:
        d = len(patientToGenes[patient])

        max_class = -1
        max_ll = -np.inf
        for k in classes:
            if (np.log(p_k[k]) + np.log(poisson(lam[k]).pmf(d))) > -np.inf:
                if (np.log(p_k[k]) + np.log(poisson(lam[k]).pmf(d))) > max_ll:
                    max_class = k
                    max_ll = (np.log(poisson(lam[k]).pmf(d)))


        clusterToPatient[max_class].add(patient)

    for cluster in clusterToPatient:
        if not clusterToPatient[cluster]:
            clusterToPatient[cluster].add('EMPTY PATIENTS')

    return clusterToPatient
Example #6
0
def gradWB(new,objVOI,BN,keep):
    points=objVOI._points
    kern=objVOI._k
    alpha1=0.5*((kern.alpha[0:n1])**2)/scaleAlpha**2
    alpha2=0.5*((kern.alpha[n1:n1+n2])**2)/scaleAlpha**2
    variance0=kern.variance
    wNew=new[0,n1:n1+n2].reshape((1,n2))
    gradWBarray=np.zeros([len(keep),n2])
    M=len(keep)
    parameterLamb=parameterSetsPoisson
   # quantil=int(poisson.ppf(.99999999,max(parameterLamb)))
   # expec=np.array([i for i in xrange(quantil)])
   # logproductExpectations=0.0
  #  a=range(n2)
    X=new[0,0:n1]
    W=new[0,n1:n1+n2]
   
    for i in xrange(n2):
        logproductExpectations=0.0
        a=range(n2)
        del a[i]
        for r in a:
            G=poisson(parameterLamb[r])
            temp=G.dist.expect(lambda z: np.exp(-alpha2[r]*((z-W[r])**2)),G.args)
            logproductExpectations+=np.log(temp)
        G=poisson(parameterLamb[i])
        temp=G.dist.expect(lambda z: -2.0*alpha2[i]*(-z+W[i])*np.exp(-alpha2[i]*((z-W[i])**2)),G.args)
        productExpectations=np.exp(logproductExpectations)*temp
        for j in xrange(M):
            gradWBarray[j,i]=np.log(variance0)-np.sum(alpha1*((points[keep[j],:]-X)**2))
            gradWBarray[j,i]=np.exp(gradWBarray[j,i])*productExpectations
    return gradWBarray
Example #7
0
def metro_exp_poison(chute=[1], N=1000):
    valores = chute
    taxa = []

    priori = expon(1)

    for i in range(N):
        expo_aux = expon(1)
        valor = expo_aux.rvs()

        U = random.rand()

        x_dado_y = poisson(valores[-1])
        y_dado_x = poisson(valor)

        teste = ( priori.pdf(valor) * x_dado_y.pmf(int(valores[-1])) ) / ( priori.pdf(valores[-1]) * y_dado_x.pmf(int(valor)) )

        if min([teste,1]) > U:
            valores.append(valor)
            taxa.append(1)
        else:
            valores.append(valores[-1])
            taxa.append(0)

    return {"valores":valores , "taxa":sum(taxa)/len(taxa)}
Example #8
0
def SalehValenzuela(**kwargs):
    """ generic Saleh and Valenzuela Model

    Parameters
    ----------

    Lam : clusters Poisson Process parameter (ns)
    lam : rays Poisson Process parameter (ns)
    Gam : clusters exponential decay factor
    gam : rays exponential decay factor
    tauM : maximum delay


    """
    defaults = { 'Lam' : 10.,
                 'lam' : 5.,
                 'Gam' : 30.,
                 'gam' : 5. ,
                 'tauM': 1000.}

    for k in defaults:
        if k not in kwargs:
            kwargs[k]=defaults[k]

    Lam = kwargs['Lam']
    lam = kwargs['lam']
    Gam = kwargs['Gam']
    gam = kwargs['gam']
    tauM = kwargs['tauM']
    Nc = tauM/Lam
    Nr = tauM/lam

    p1 = st.poisson(Lam)
    p2 = st.poisson(lam)
    # cluster time of arrival
    tc   = np.cumsum(e1.rvs(Nr))
    tc   = tc[np.where(tc<T)]
    Nc   = len(tc)
    tauc = np.kron(tc,np.ones((1,Nr)))[0,:]
    # rays time of arrival
    taur = np.cumsum(e2.rvs((Nr,Nc)),axis=0).ravel()
    # exponential decays of cluster and rays
    etc = np.exp(-tauc/(1.0*Gam))
    etr = np.exp(-taur/(1.0*gam))
    et = etc*etr
    tau = tauc+taur
    # filtering < T and reordering in delay domain
    tau = tau[np.where(tau<T)]
    et = et[np.where(tau<T)]
    u = np.argsort(tau)
    taus = tau[u]
    ets = et[u]
    # limiting in delay domain
    v = np.where(taus<tauM)[0]
    taus = taus[v]
    ets = ets[v]
    SVir = bs.Bsignal(taus,ets)
    return(SVir)
Example #9
0
    def test_multivariate_gaussian(self):
        from scipy.stats import poisson, norm

        po_normal = poisson(10)
        po_anomaly = poisson(25)

        po_normal2 = poisson(2)
        po_anomaly2 = poisson(3)

        gs_normal = norm(1, 12)
        gs_anomaly = norm(2, 30)

        normal_len = 10000
        anomaly_len = 15

        data = np.column_stack(
            [
                [1] * (normal_len + anomaly_len),
                list(po_normal.rvs(normal_len)) + list(po_anomaly.rvs(anomaly_len)),
                list(po_normal2.rvs(normal_len)) + list(po_anomaly2.rvs(anomaly_len)),
                list(gs_normal.rvs(normal_len)) + list(gs_anomaly.rvs(anomaly_len)),
            ]
        )
        anomaly_detector = pyisc.AnomalyDetector(
            component_models=[
                pyisc.P_PoissonOnesided(1, 0),  # columns 1 and 0
                pyisc.P_Poisson(2, 0),  # columns 2 and 0
                pyisc.P_Gaussian(3)  # column 3
            ],
            output_combination_rule=pyisc.cr_max
        )

        anomaly_detector.fit(data);
        # This above should fail this test if the problem still occurs:
        '''
        ---------------------------------------------------------------------------
AssertionError                            Traceback (most recent call last)
<ipython-input-5-ecd0c0a2a8d4> in <module>()
----> 1 anomaly_detector.fit(data);

C:\ProgramData\Anaconda3\envs\pyISC_py27\lib\site-packages\_pyisc_modules\BaseISC.pyc in fit(self, X, y)
    313         
    314
--> 315         return self._fit(X,y)
    316
    317     def _fit(self,X,y=None):

C:\ProgramData\Anaconda3\envs\pyISC_py27\lib\site-packages\_pyisc_modules\BaseISC.pyc in _fit(self, X, y)
    352
    353             if data_object is not None:
--> 354                 assert self._max_index < data_object.length()  # ensure that data distribution has not to large index into the data
    355
    356                 return self._fit(data_object)

AssertionError:
        '''

        assert True;
Example #10
0
    def test_timer(self):
        #Test a basic timer
        print()
        print("=====Testing RandomTimer with immediate = False=======")
        dp.Session.new()
        sim = dp.Simulation()            
        model1 = dp.model.Component("Timer_Test_Model_A")
        sim.model = model1   

        sim.config.seed = 731                  
        dist1 = stats.poisson(10)
        cb_class = TimerCallback()
        model1.add_component(dp.model.RandomTimer("timer",
                                                  dist1,
                                                  cb_class.call))
        self.assertEqual(len(model1.components), 1)
        
        results = sim.irunf(100)
         
        trace1 = results.trace
        self.assertEqual(trace1[0]['time'], 7)
        self.assertEqual(trace1[1]['time'], 15)
        self.assertEqual(trace1[2]['time'], 23)
         
        #Test timer with immediate = True
        print()
        print("=====Testing RandomTimer with immediate = True======")
        model2 = dp.model.Component("Timer_Test_Model_B")
        dist2 = stats.poisson(150)
        model2.add_component(
                   dp.model.RandomTimer("Timer", dist2, timerCB_function))
        sim2 = dp.Simulation(model2)
        sim2.config.seed = 704
        results = sim2.irunf(1000)
        trace2 = results.trace
        self.assertEqual(trace2[0]['time'], 142)
        self.assertEqual(trace2[1]['time'], 285)
        self.assertEqual(len(trace2), 3)
         
        #Test timer with Priority.LATE
        print()
        print("=====Testing Priority Attribute =======================")
        session = dp.Session.new()
        model3 = dp.model.Component("Timer_Test_Model_C")
 
        dist3 = drand.get_empirical_pmf([5, 10], [0.3, 0.7])
        model3.add_component(
                   dp.model.RandomTimer("timer", dist3, timerCB_function,
                                priority = dp.LATE))
        session.sim = sim3 = dp.Simulation(model3)
        session.config.seed = 731        
        results = sim3.irunf(100)
        trace3 = results.trace
        self.assertEqual(trace3[0]['priority'], 1)
        self.assertEqual(trace3[1]['priority'], 1)
        self.assertEqual(trace3[2]['interval'], 10)
        self.assertEqual(trace3[4]['interval'], 10)
        self.assertEqual(trace3[10]['interval'], 10)
def test_single_bin():
    conf = test_conf(mc=True, analysis_space=[['x', [-40,40 ]]])

    lf = BinnedLogLikelihood(conf)
    lf.add_rate_parameter('s0')
    lf.prepare()

    # Make a single event at x=0
    lf.set_data(np.zeros(1,
                         dtype=[('x', np.float), ('source', np.int)]))

    assert lf() == stats.poisson(1000).logpmf(1)
    assert lf(s0_rate_multiplier=5.4) == stats.poisson(5400).logpmf(1)
def test_twobin_mc():
    conf = test_conf(mc=True, analysis_space=[['x', [-40, 0, 40]]])

    lf = BinnedLogLikelihood(conf)
    lf.add_rate_parameter('s0')
    lf.prepare()

    # Make 100 events at x=1
    lf.set_data(np.ones(100,
                        dtype=[('x', np.float), ('source', np.int)]))

    assert almost_equal(lf(),
                        stats.poisson(500).logpmf(100) + stats.poisson(500).logpmf(0),
                        1e-2)
Example #13
0
def upper_bound(mean, alpha):
    fun = poisson(mu=mean).cdf
    i = 0
    while True:
        if fun(i) > 1 - alpha/2.0:
            return i
        i += 1
Example #14
0
def lower_bound(mean, alpha):
    fun = poisson(mu=mean).cdf
    i = 0
    while True:
        if fun(i) > alpha/2.0:
            return max(i - 1, 0)
        i += 1
Example #15
0
def generate_hits(n, nstrain, ntarget, ngene, prDecoy, lambdaTrue,
                  lambdaError=0., pFail=0., residual=1e-06,
                  targetProbs=None):
    '''generate lists of hit counts in real targets vs. nontargets in
    multihit screen, using probability of hitting target
    gene(s) conditioned on requiring at least one such hit.
    decoyCounts vectors are offset by poissonRange.nmin'''
    prTarget = PoissonRange(ntarget * lambdaTrue, residual / nstrain, 1)
    prTarget.renormalize() # condition on at least one hit in target region
    targetN = prTarget.sample_totals(nstrain, n)
    if targetProbs: # user-supplied target size vector
        targetProbs = [(p * (1. - pFail)) for p in targetProbs] + [pFail]
    else: # uniform target sizes
        targetProbs = ((1. - pFail) / ntarget,) * ntarget + (pFail,)
    targetProbs = numpy.array(targetProbs)
    if lambdaError:
        poisErr = stats.poisson(nstrain * lambdaError)
        targetNoise = poisErr.rvs((n, ntarget))
    decoyCounts = numpy.random.multinomial(ngene - ntarget,
                                           prDecoy.pmf, size=n)
    for i in xrange(n): # analyze replicates
        targetHits = numpy.random.multinomial(targetN[i], targetProbs)
        if lambdaError:
            hits = targetHits[:ntarget] + targetNoise[i]
        else:
            hits = targetHits[:ntarget]
        nmax = hits.max()
        if nmax < ntarget: # list all counts from 0 to nmax
            targetCounts = [(hits == j).sum() for j in xrange(nmax + 1)]
        else: # use efficient container for sparse list
            targetCounts = HitCountList(hits)
        yield targetCounts, decoyCounts[i]
Example #16
0
    def AverageUpperLimit(self, bkg):
        """
        For a number of events b, compute the average upper limit. That is:
        UL = Sum Po(n;b) * Upper (n,b)
        """
        ### The Poisson distribution, Po(n;b), is defined only for b>0.
        ### Therefore, this method returns 0 if bkg is negative, and uses
        ### a number close to 0 for the computation if bkg=0.
        if bkg<0.:
            return 0.
        elif bkg==0.:
            bkg=1.E-5

        ### We'll compute the sum in the range [-5sigma, +5sigma] around
        ### the mean, where sigma is the standard deviation of the Poisson
        ### distribution.
        sigma = math.sqrt(bkg)
        nmin = max(0,  int(bkg-5.*sigma))   # Use 0 if nmin<0
        nmax = max(20, int(bkg+5.*sigma)+1) # Use at least 20 for low means
        #print "nmin=%f, nmax=%f" % (nmin,nmax)

        po = poisson(bkg)
        UL = 0.

        for i in range(nmin, nmax):
            pmf = po.pmf(i)
            ul = self.FC.CalculateUpperLimit(i, bkg)
            #print "i=%i, Po(i)=%f, U(i,b)=%f" % (i, pmf, ul)
            UL += po.pmf(i) * self.FC.CalculateUpperLimit(i,bkg)

        return UL
Example #17
0
def B(x,XW,n1,n2):
    x=np.array(x).reshape((x.shape[0],n1))
    results=np.zeros(x.shape[0])
    parameterLamb=parameterSetsPoisson
    X=XW[0:n1]
    W=XW[n1:n1+n2]
    alpha2=0.5*((kernel.alpha[n1:n1+n2])**2)/scaleAlpha**2
    alpha1=0.5*((kernel.alpha[0:n1])**2)/scaleAlpha**2
    variance0=kernel.variance
    
    logproductExpectations=0.0
    print "veamos B"
    print "x is"
    print x
    print "parameters"
    print parameterLamb
    print "X"
    print X
    print "W"
    print W
    print "alphas,variance"
    print alpha1,alpha2,variance0

    for j in xrange(n2):
        G=poisson(parameterLamb[j])
        temp=G.dist.expect(lambda z: np.exp(-alpha2[j]*((z-W[j])**2)),G.args)
        logproductExpectations+=np.log(temp)
    for i in xrange(x.shape[0]):
        results[i]=logproductExpectations+np.log(variance0)-np.sum(alpha1*((x[i,:]-X)**2))
    return np.exp(results)
	def gen_data(self,erange,step,L=-1):
		if (L < 0):
			L = self.L
		higgs = self.higgs
		beam = self.beam
		seff = self.seff
		isrcorr = self.isrcorr
		b = self.bkg
		x = []
		y = []
		yerr = []
		nstep = int(erange/step)
		self.nstep=nstep
		lstep = L/(2.0*nstep)
		self.lstep=lstep
		s = seff*isrcorr*higgs_smear(higgs,beam,higgs[0])
		for i in range(-nstep,nstep+1):
			ecm = higgs[0] + step*i
			s = seff*isrcorr*higgs_smear(higgs,beam,ecm)
			mu = lstep*(s+b)
			N = stats.poisson(mu).rvs()
			x.append(ecm)
			y.append(N)
			if(N>0):
				yerr.append(math.sqrt(N))
			else:
				yerr.append(1)
		out = [ x, y, yerr ]
		self.x = np.array(x)
		self.y = np.array(y)
		self.yerr = np.array(yerr)
		return out
def test_BeestonBarlowSingleBin():

    instructions_mc = [dict(n_events=32, x=0.5)]
    data, n_mc = make_data(instructions_mc)

    conf = test_conf(default_source_class=FixedSampleSource,
                     events_per_day=32/5,
                     analysis_space=[['x', [0, 1]]],
                     data=data)

    likelihood_config = {'model_statistical_uncertainty_handling': 'bb_single',
                         'bb_single_source': 0}
    lf = BinnedLogLikelihood(conf, likelihood_config=likelihood_config)
    lf.prepare()
    assert lf.n_model_events is not None

    # Make a single event at x=0
    lf.set_data(np.zeros(2, dtype=[('x', np.float), ('source', np.int)]))

    assert lf.n_model_events is not None
    assert almost_equal(28.0814209, beeston_barlow_root2(np.array([32]), 0.2, np.array([1]), np.array([2])))

    # A = beeston_barlow_root2(np.array([32]), 0.2, np.array([0]), np.array([2]))
    A = (2+32)/(1+0.2)

    assert almost_equal(lf(), stats.poisson(0.2*A).logpmf(2))
Example #20
0
def _poisson_inputs(data):
    observed = np.asanyarray(data)
    
    #All possible frequencies from [min(observed) to max(observed)]
    #Those who are not in observed, have frequency = 0. The frequency
    #of a value is is accessed by all_freq[value].
    all_freqs = np.bincount(observed)
    all_values = np.arange(len(all_freqs))
    
    #Estimating the mean of the Poisson
    aux = (all_freqs * all_values).sum()
    total = all_freqs.sum()
    estimated_mean = aux / total

    #Computes expected frequencies in ascending order of the values
    #First for all values till the one before the last
    dist = stats.poisson(estimated_mean)
    probabilites = np.apply_along_axis(dist.pmf, 0, all_values[:-1])
    last_value = all_values[-1]
    
    #Add greater or equal last one
    geq_last = dist.sf(last_value) + dist.pmf(last_value)
    probabilites = np.append(probabilites, geq_last)
    sum_probs = probabilites.sum()

    #Now the arrays are matched (each index is the frequency of the same value)
    expected_freqs = total * probabilites
    
    return all_freqs, expected_freqs
def test_multi_bin_single_dim():
    instructions_mc = [dict(n_events=24, x=0.5),
                       dict(n_events=56, x=1.5)]
    data, n_mc = make_data(instructions_mc)

    conf = test_conf(events_per_day=42,
                     analysis_space=[['x', [0, 1, 5]]], default_source_class=FixedSampleSource, data=data)

    lf = BinnedLogLikelihood(conf)
    lf.add_rate_parameter('s0')

    instructions_data = [dict(n_events=18, x=0.5),
                         dict(n_events=70, x=1.5)]
    data, _ = make_data(instructions_data)
    lf.set_data(data)

    mus = [42 / n_mc * instructions_mc[i]['n_events']
           for i in range(len(instructions_mc))]
    seen = [instructions_data[i]['n_events']
            for i in range(len(instructions_data))]

    assert almost_equal(lf(),
                        np.sum([stats.poisson(mu).logpmf(seen_in_bin)
                                for mu, seen_in_bin in zip(mus, seen)]),
                        1e-6)
Example #22
0
 def __init__(self, stack):
     super(ImpressionViewer, self).__init__()
     self.stack = stack
     self.mean_requests_per_batch = 300
     self.batch_index = 0
     self.batch_size_dist = poisson(self.mean_requests_per_batch)
     self.batch_target = self.batch_size_dist.rvs(1)[0]
def show_poisson_views():
    """Show different views of a Poisson distribution"""
    
    sns.set_palette(sns.color_palette('muted'))
    
    fig, ax = plt.subplots(3,1)
    
    k = np.arange(25)
    pd = stats.poisson(10)
    setFonts(12)
    
    ax[0].plot(k, pd.pmf(k),'x-')
    ax[0].set_title('Poisson distribution', fontsize=24)
    ax[0].set_xticklabels([])
    ax[0].set_ylabel('PMF (X)')
    
    ax[1].plot(k, pd.cdf(k))
    ax[1].set_xlabel('X')
    ax[1].set_ylabel('CDF (X)')
    
    y = np.linspace(0,1,100)
    ax[2].plot(y, pd.ppf(y))
    ax[2].set_xlabel('X')
    ax[2].set_ylabel('PPF (X)')
    
    plt.tight_layout()
    plt.show()
def fig2():
    '''
        Plot histogram and solve characteristics of probability_cut_nooverlapsability, that we cut fibers.
        Compare with binomial distribution.
        Set n_sim >> 1
    '''
    figure( 2 )
    delta = 0.
    p = probability_cut_nooverlaps( spec.l_x, fib.lf, delta )

    rvb = binom( fib.n, p )
    rvp = poisson( fib.n * p )
    rvn = norm( fib.n * p, sqrt( fib.n * p * ( 1 - p ) ) )

    graph_from = floor( bin_mean - 4 * bin_stdv )
    graph_to = floor( bin_mean + 4 * bin_stdv ) + 1


    x = arange( graph_from , graph_to )
    plot( x, n_sim * rvb.pmf( x ), color = 'red', linewidth = 2, label = 'Binomial' )
    plot( x, n_sim * rvp.pmf( x ), color = 'green', linewidth = 2, label = 'Poisson' )
    plot( x, n_sim * rvn.pdf( x ), color = 'blue', linewidth = 2, label = 'Normal' )
    #plot( x, 20 * rv.pmf( x ) )

    pdf, bins, patches = hist( v, n_sim, normed = 0 ) #, facecolor='green', alpha=1
    #set_xlim( bin_mean - 2 * bin_stdv, bin_mean + 2 * bin_stdv )
    #plot( sx, sy, 'rx' )   # centroids
    #print sum( pdf * diff( bins ) )
    legend()
    draw()
def prob_noise_above_th(rate, T, m):
    """Returns the probability that noise is above the burst search threshold.

    Basically is the probability that a poisson process with rate "rate" had
    "m" or more events in a time window "T".
    """
    return poisson(rate*T).sf(m-1)
def show_poisson():
    """Show different views of a Poisson distribution"""
    
    fig, ax = plt.subplots(3,1)
    
    k = np.arange(25)
    pd = stats.poisson(10)
    mystyle.set(12)
    
    ax[0].plot(k, pd.pmf(k),'x-')
    ax[0].set_title('Poisson distribition')
    ax[0].set_xticklabels([])
    ax[0].set_ylabel('PMF (X)')
    
    ax[1].plot(k, pd.cdf(k))
    ax[1].set_xlabel('X')
    ax[1].set_ylabel('CDF (X)')
    
    y = np.linspace(0,1,100)
    ax[2].plot(y, pd.ppf(y))
    ax[2].set_xlabel('X')
    ax[2].set_ylabel('PPF (X)')
    
    plt.tight_layout()
    plt.show()
Example #27
0
def plot_poisson():
    fig, ax = plt.subplots(1, 1)

    # This is prediction for Wawrinka in 2014
    mu = 7.869325

    x = np.arange(poisson.ppf(0.01, mu), poisson.ppf(0.999, mu))
    ax.plot(x, poisson.pmf(x, mu), 'wo', ms=8, label='poisson pmf')
    ax.vlines(x, 0, poisson.pmf(x, mu),
              colors=['b', 'b', 'b', 'b', 'b', 'r', 'r', 'r', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g'], lw=5, alpha=0.5)

    rv = poisson(mu)
    ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1, label='frozen pmf')

    plt.title("Stanislas Wawrinka")
    plt.xlabel('# QF+ Finishes in 2014')
    plt.ylabel('Probability')

    prob0 = poisson.cdf(6, mu)
    prob123 = poisson.cdf(9, mu) - poisson.cdf(6, mu)
    probAbove3 = poisson.cdf(10000, mu) - poisson.cdf(9, mu)
    print prob0
    print prob123
    print probAbove3

    plt.show()
    def __init__(self):
        """
        Inizialise of the model by default

        """
        super(MLE, self).__init__()

        # Metadata
        self.appliance = None
        self.stats = []
        self.units = None
        self.resistive = False
        self.thDelta = 0
        self.thLikelihood = 0
        self.sample_period = None
        self.sampling_method = None
        # FEATURES:
        self.onpower = {'name': 'gmm', 'model': mixture.GMM(n_components=1)}
        self.offpower = {'name': 'gmm', 'model': mixture.GMM(n_components=1)}
        self.duration = {'name': 'poisson', 'model': poisson(0)}

        # Trainings:
        self.onpower_train = pd.DataFrame(columns=['onpower'])
        self.offpower_train = pd.DataFrame(columns=['offpower'])
        self.duration_train = pd.DataFrame(columns=['duration'])

        # Constrains
        self.powerNoise = 0    # Background noise in the main
        self.powerPair = 0  # Max diff between onpower and offpower
        self.timeWindow = 0        # To avoid high computation
Example #29
0
def B(x,XW,n1,n2,kernel,logproductExpectations=None):
    """Computes B(x)=\int\Sigma_{0}(x,w,XW[0:n1],XW[n1:n1+n2])dp(w).
      
       Args:
          x: Vector of points where B is evaluated
          XW: Point (x,w)
          n1: Dimension of x
          n2: Dimension of w
          kernel
          logproductExpectations: Vector with the logarithm
                                  of the product of the
                                  expectations of
                                  np.exp(-alpha2[j]*((z-W[i,j])**2))
                                  where W[i,:] is a point in the history.
          
    """
    x=np.array(x).reshape((x.shape[0],n1))
    results=np.zeros(x.shape[0])
    parameterLamb=parameterSetsPoisson
    X=XW[0:n1]
    inda=n1+n2
    W=XW[n1:inda]
    alpha2=0.5*((kernel.alpha[n1:n1+n2])**2)/scaleAlpha**2
    alpha1=0.5*((kernel.alpha[0:n1])**2)/scaleAlpha**2
    variance0=kernel.variance
    if logproductExpectations is None:
        logproductExpectations=0.0
        for j in xrange(n2):
            G=poisson(parameterLamb[j])
            temp=G.dist.expect(lambda z: np.exp(-alpha2[j]*((z-W[j])**2)),G.args)
            logproductExpectations+=np.log(temp)
    for i in xrange(x.shape[0]):
        results[i]=logproductExpectations+np.log(variance0)-np.sum(alpha1*((x[i,:]-X)**2))
    return np.exp(results)
Example #30
0
def mean_variance_demo(reruns=35, maxn=100):
    means = []
    stds = []
    ns = range(2, maxn + 1)
    # use a poisson dist
    mu = 5

    for r in range(reruns):

        if r % 10 == 0:
            print('On rerun {} of {}'.format(r, reruns))

        means.append([])
        stds.append([])

        # show convergence of mean and std
        for n in ns:
            rvs = stats.poisson(mu).rvs(size=n)
            means[r].append( np.mean(rvs) )
            stds[r].append( np.std(rvs) )

    np_means = np.asarray(means)
    np_stds = np.asarray(stds)

    return np_means, np_stds
Example #31
0
 def _do_estep(self, X, S):
     """
     E-step
     """
     N = X.shape[0]
     K = self.n_components
     r = np.zeros([N, K])
     for n in range(N):
         norm = [
             multivariate_normal(self.means[j], self.covs[j])
             for j in range(K)
         ]
         pois = [poisson(self.rates[j]) for j in range(K)]
         for k in range(K):
             r[n][k] = self.weights[k] * norm[k].pdf(X[n]) * pois[k].pmf(
                 S[n])
         r[n, :] = r[n, :] / np.sum(r[n, :])
     self.r = r
     return self
Example #32
0
    def pmf(self, input_values, x):
        """Calculates the probability mass function of the distribution at point x.

        Parameters
        ----------
        input_values: list
            List of input parameters, in the same order as specified in the InputConnector passed to the init function
        x: integer
            The point at which the pmf should be evaluated.

        Returns
        -------
        Float
            The evaluated pmf at point x.
        """

        pmf = poisson(int(input_values[0])).pmf(x)
        self.calculated_pmf = pmf
        return pmf
Example #33
0
 def __init__(self,cdts,Rcut,hyp,Dist,centre_init):
     """
     Constructor of the logposteriorModule
     """
     rad,thet        = Deg2pc(cdts,centre_init,Dist)
     c,r,t,self.Rmax = TruncSort(cdts,rad,thet,Rcut)
     self.pro        = c[:,2]
     self.cdts       = c[:,:2]
     self.Dist       = Dist
     #------------- poisson ----------------
     self.quadrants  = [0,np.pi/2.0,np.pi,3.0*np.pi/2.0,2.0*np.pi]
     self.poisson    = st.poisson(len(r)/4.0)
     #-------------- priors ----------------
     self.Prior_0    = st.norm(loc=centre_init[0],scale=hyp[0])
     self.Prior_1    = st.norm(loc=centre_init[1],scale=hyp[1])
     self.Prior_2    = st.halfcauchy(loc=0,scale=hyp[2])
     self.Prior_3    = st.uniform(loc=0.01,scale=hyp[3])
     self.Prior_4    = st.uniform(loc=0,scale=2)
     print("Module Initialized")
Example #34
0
def distributions_example12():
    plt.figure(figsize=(13, 10))

    n = 10_000

    plt.suptitle(f'Poisson Distribution, {n:,} simulations')
    all_bins = [[0, 7], [0, 21], [0, 21], [60, 140, 10]]
    for i, mu in enumerate([1, 5, 10, 100]):
        plt.subplot(2, 2, i + 1)
        x = stats.poisson(mu).rvs(n)
        bins = all_bins[i]
        plt.hist(x,
                 bins=range(*bins),
                 align='left',
                 edgecolor='black',
                 color='white')
        plt.title(f'$\mu$={mu}')
        plt.xlabel('Number of events') if i > 1 else None
        plt.ylabel('')
Example #35
0
def compare_binom_poisson(mu=4, n1=8, n2=50):
    """
    二项分布与泊松分布的比较
    :param mu: 泊松分布的参数,保持mu不变
    :param n1: 第一个二项分布中的实验次数,n比较小
    :param n2: 第二个二项分布中的实验次数,n比较大
    :return:
    """
    # 为了具有可比性, 利用mu = n * p, 计算p
    p1 = mu / n1  # 二项分布中的参数,单次实验成功的概率
    p2 = mu / n2
    poisson_dist = stats.poisson(mu)  # 初始化泊松分布
    binom_dist1 = stats.binom(n1, p1)  # 初始化第一个二项分布
    binom_dist2 = stats.binom(n2, p2)  # 初始化第二个二项分布

    # 计算pmf
    X = np.arange(poisson_dist.ppf(0.0001), poisson_dist.ppf(0.9999))
    y_po = poisson_dist.pmf(X)
    print(X)
    print(y_po)
    y_bi1 = binom_dist1.pmf(X)
    y_bi2 = binom_dist2.pmf(X)

    # 作图
    # First group
    # 当n比较小,p比较大时,两者差别比较大
    plt.figure(1)
    plt.subplot(211)
    plt.plot(X, y_bi1, 'b-', label='binom1 (n={}, p={})'.format(n1, p1))
    plt.plot(X, y_po, 'r--', label='poisson (mu={})'.format(mu))
    plt.ylabel('Probability')
    plt.title('Comparing PMF of Poisson Dist. and Binomial Dist.')
    plt.legend(loc='best', frameon=False)

    # second group
    # 当n比较大,p比较小时,两者非常相似
    plt.subplot(212)
    plt.plot(X, y_bi2, 'b-', label='binom1 (n={}, p={})'.format(n2, p2))
    plt.plot(X, y_po, 'r--', label='poisson (mu={})'.format(mu))
    plt.ylabel('Probability')
    plt.legend(loc='best', frameon=False)
    plt.show()
Example #36
0
def checkFitCalls(mData, vParameter, sDistribution):
    """
    Purpose: to check the fit of the Poisson distribution to the observed data
        
    Input:
        -mCalls, matrix of integers, represents calls received per hour (columns) and day (rows)
        -vL, vector of doubles, represents lambda parameter needed for the Poisson distribution for each hour
    
    Output:
        -plots for each hour, containing 
            in the first subplot: the Poisson distribution given the lambda and a histogram of the observed amount of calls in that hour
            in the second subplot: a QQ plot showing the goodness of fit
        -results from a chi-squared test
    """
    vLabels = [
        '7-8', '8-9', '9-10', '10-11', '11-12', '12-13', '13-14', '14-15',
        '15-16', '16-17', '17-18', '18-19', '19-20', '20-21'
    ]
    iM = len(mData[:, 0])
    iN = len(mData[0])

    for i in range(iN):
        vData = mData[:, i]
        dParameter = vParameter[i]

        dMin = np.min(vData)
        dMax = np.max(vData)
        dStepSize = dMax / iM
        vK = np.arange(dMax)
        #vK= np.linspace(dMin, dMax, iM)
        vDistribution = st.poisson.pmf(vK, dParameter)
        sLabel = vLabels[i]

        plt.figure()
        plt.subplot(1, 2, 1)
        plotDistribution(vData, vDistribution, sDistribution)
        plt.subplot(1, 2, 2)
        st.probplot(vData, dist=st.poisson(dParameter), plot=plt)
        plt.suptitle(sLabel)
        plt.grid()
        plt.tight_layout()
        plt.show()
def generate_linear_data(N, n, n_rel=-1, data_rel_var=1.0 ,data_irrel_var=1.0, noise_var=0.5, correlated=False, standardized=True):
    '''
    N: dimension of data (excluding intercept)
    n_rel: Number of relevant predictor variables
    noise_var: Variance of noise
    standardized: Returns standardized model if true
    '''
    noise = np.random.randn(N)*noise_var
    if n_rel==-1:
        n_rel=n
    # relevant x
    X_rel = np.random.randn(N,n_rel)*data_rel_var
    ones = np.ones(N)
    X = np.c_[ones, X_rel]
    beta = np.random.randn(n_rel+1) ## intercept is included
    if n-n_rel >0:
        X_irrrel = np.random.randn(N,n-n_rel)*data_irrel_var
        X=np.c_[X,X_irrrel]
        beta_irrel = np.zeros(n-n_rel)
        beta=np.concatenate((beta,beta_irrel), axis=0)
    if correlated:
        rvs = stats.poisson(30, loc=10).rvs
        rs = random_sparse(n, n, density=0.1, data_rvs=rvs)
        # rs = np.random.binomial(1,0.1,size=(n,n)).astype('int')
        rs+=np.eye(n).astype('int')
        print(rs)
        X[:,1:] = X[:, 1:].dot(rs)
    y = X.dot(beta)+noise #+ 0.005*(X**2).dot(beta)
    # y = (X**2).dot(beta)+noise
    
    if not standardized:
        return [X,y,beta]
    
    dataset=np.c_[X,y][:, 1:] # remove intercept term
    mean = dataset.mean(axis=0)
    sq_diff = (dataset-mean)**2
    sq_diff = sq_diff.sum(axis=0)/(N-1)
    std = np.sqrt(sq_diff)
    
    beta_reg=beta[1:]*std[:-1]/std[-1]
    dataset = (dataset-mean)/(std*np.sqrt(N-1))
    return [dataset[:,:-1], dataset[:, -1], beta_reg]
Example #38
0
def parseGeneInfo(fileName, recRate=0):
    '''
    parse input gene file (*.tsv) and return dict obj of gene info
    '''
    info = {'pos': [], 'maf': [], 'annotation': [], 'd_idx': [], 'd_maf': []}
    try:
        reader = csv.reader(open(fileName, 'rb'), delimiter='\t')
        rows = list(reader)
    except Exception:
        raise ValueError("Inappropriate argument value --genes")
    info['chr'] = [int(i[0]) for i in rows]
    info['pos'] = [int(i[1]) for i in rows]
    info['maf'] = [float(i[2]) for i in rows]
    info['annotation'] = [bool(int(i[4])) for i in rows]
    if recRate:
        # read the last column as recombination rate (unit: CM - centimorgan)
        info['cm'] = [float(i[5]) for i in rows]
        info['num_rec'] = [(info['cm'][i + 1] - info['cm'][i]) / 100.
                           for i in range(len(info['cm']) - 1)]
        # assume that number of crossing overs between any two adjacent var sites can be modeled by a Poisson distribution (absence of interference)
        from scipy.stats import poisson
        import math
        info['prob_rec'] = [0]
        for mu in info['num_rec']:
            rv = poisson(mu)
            pmf = rv.pmf(0)
            info['prob_rec'].append(0) if math.isnan(
                pmf) else info['prob_rec'].append(1 - rv.pmf(0))
        # info['prob_rec'] = [0] + [(info['cm'][i+1]-info['cm'][i])/100. for i in range(len(info['cm'])-1)]
        from operator import mul
        info['rec_rate'] = 1 - reduce(mul, [1 - i
                                            for i in info['prob_rec']], 1)
    info['d_idx'] = [i for i, j in enumerate(info['annotation']) if j]
    info['nd_idx'] = [i for i, j in enumerate(info['annotation']) if not j]
    info['d_maf'] = [info['maf'][i] for i in info['d_idx']]
    cumu_dMaf = [
        sum(info['d_maf'][:i]) for i in range(1,
                                              len(info['d_maf']) + 1)
    ]
    sum_dMaf = sum(info['d_maf'])
    info['cumuProbs_dMaf'] = [i / sum_dMaf for i in cumu_dMaf]
    return info
Example #39
0
 def generate_event(self, x_src, t_src=0, N_src=10, b=1):
     '''
     generates one event
     
     Parameters:
     
     x_src : float
         Source position
     t_src : float
         Source time
     N_src : int
         Amount of photons sent out
     b : float
         perpendicaulr distance off of sensor line
         
     Returns:
     
     Ns : arrayy
         observed number of photons per detector, x-position of detectors, indices of detectors
     ts : list
         observed photon times, x-position of detectors, indices of detectors
     '''
     Ns = []
     Ns_sensor_idx = []
     Ns_sensor_x = []
     ts = []
     ts_sensor_idx = []
     ts_sensor_x = []
     lambda_ds = lambda_d(self.detector_xs, x_src, b, N_src)
     for i, x in enumerate(self.detector_xs):
         N_exp = lambda_ds[i]
         N_obs = stats.poisson(mu=N_exp).rvs()
         Ns.append(N_obs)
         Ns_sensor_x.append(x)
         Ns_sensor_idx.append(i)
         if N_obs > 0:
             pulse_times = self.get_p_d(x, t_src, x_src, b).rvs(size=N_obs)
             ts.extend(pulse_times)
             ts_sensor_idx.extend([i] * N_obs)
             ts_sensor_x.extend([x] * N_obs)
     return np.array([Ns, Ns_sensor_x, Ns_sensor_idx
                      ]).T, np.array([ts, ts_sensor_x, ts_sensor_idx]).T
Example #40
0
def N_test_poisson(num_obs_events: int, rupture_rate: float,
                   conf_interval: float) -> dict:

    conf_min, conf_max = poisson(rupture_rate).interval(conf_interval)

    test_pass = conf_min <= num_obs_events <= conf_max

    test_res = "Pass" if test_pass else "Fail"
    logging.info(f"N-Test: {test_res}")

    test_result = {
        "conf_interval_pct": conf_interval,
        "conf_interval": (conf_min, conf_max),
        "inv_time_rate": rupture_rate,
        "n_obs_earthquakes": num_obs_events,
        "test_res": test_res,
        "test_pass": bool(test_pass),
    }

    return test_result
Example #41
0
def section8new():
    tsamp = 0.05
    nsamp = 1000
    time = timestamp()
    name = 'section8-' + time + '-{0}-{1}'.format(tsamp, nsamp)
    counts = get_counts(tsamp, nsamp)
    save_data(name, counts)
    mean = sum(counts) / len(counts)
    h = histogram(counts, name)
    hr = h[0]
    hist = h[1]
    prob = []
    total_hist = sum(hist)
    for i in range(len(hist)):
        p = float(hist[i]) / total_hist
        prob.append(p)
    plt.figure()
    plt.plot(hr, prob, '-o')
    plt.plot(hr, poisson(hr, mean), '-o')
    plt.show()
Example #42
0
 def setUp(self):
     '''
     Saves the current random state for later recovery, sets the random seed
     to get reproducible results and manually constructs a mixed vine.
     '''
     # Save random state for later recovery
     self.random_state = np.random.get_state()
     # Set fixed random seed
     np.random.seed(0)
     # Manually construct mixed vine
     self.dim = 3  # Dimension
     self.vine = MixedVine(self.dim)
     # Specify marginals
     self.vine.set_marginal(0, norm(0, 1))
     self.vine.set_marginal(1, poisson(5))
     self.vine.set_marginal(2, gamma(2, 0, 4))
     # Specify pair copulas
     self.vine.set_copula(1, 0, GaussianCopula(0.5))
     self.vine.set_copula(1, 1, FrankCopula(4))
     self.vine.set_copula(2, 0, ClaytonCopula(5))
def plot_poison_pmf(mean_calculated, title):
    #mu - calculate (of counts) + sum and divide by length.
    fig, ax = plt.subplots(1, 1)
    mu = mean_calculated
    mean, var, skew, kurt = poisson.stats(mu, moments='mvsk')
    x = np.arange(poisson.ppf(0.01, mu), poisson.ppf(0.99, mu))
    ax.plot(x, poisson.pmf(x, mu), 'bo', ms=8, label='poisson pmf')
    ax.vlines(x, 0, poisson.pmf(x, mu), colors='b', lw=5, alpha=0.5)
    rv = poisson(mu)
    ax.vlines(x,
              0,
              rv.pmf(x),
              colors='k',
              linestyles='-',
              lw=1,
              label='frozen pmf')
    ax.legend(loc='best', frameon=False)
    plt.title(title)
    interactive(False)
    plt.show()
Example #44
0
def recDistribution():
    data = loadFeatures()
    recs = array(data['recs'], float)
    mu = numpy.average(recs)
    print mu
    dist = poisson(mu)
    dist2 = geom((1.0 / mu))
    dist3 = pareto(recs)
    x = numpy.arange(1, numpy.amax(recs))
    h = plt.hist(recs, bins=range(40), normed=True)
    plt.plot(dist3[0], dist3[1], color='yellow', label='Pareto', linewidth=3)
    plt.plot(x, dist.pmf(x), color='black', label='Poisson', linewidth=3)
    plt.plot(x, dist2.pmf(x), color='red', label='Geometric', linewidth=3)
    plt.legend()
    plt.xlabel('Recommendation Count')
    plt.ylabel('Actual Value (% of Data) / Probability')
    plt.legend()
    plt.suptitle('Fitting Rec. Count')
    plt.xlim(0, 40)
    plt.show()
Example #45
0
def score_genes(geneSNPdict, nsample, totalSize, geneLengths,
                useBonferroni=True):
    '''Uses binomial scoring for unpooled library data (i.e. each library
    tag is a single sample.'''
    if useBonferroni:
        correction = len(geneSNPdict)
    else:
        correction = 1.
    nSNP = sum([len(l) for l in geneSNPdict.values()]) # total in all samples
    mu = float(nSNP) / (nsample * totalSize) # SNP density per exome nucleotide
    geneScores = []
    for geneID, snps in geneSNPdict.items():
        d = {}
        for snp in snps: # only count one snp per sample
            d[snp.tag] = 0
        pois = stats.poisson(mu * geneLengths[geneID]) # pmf for one sample
        pmf = stats.binom(nsample, pois.sf(0))
        geneScores.append((correction * pmf.sf(len(d) - 1), geneID))
    geneScores.sort()
    return geneScores
Example #46
0
def graficar_poi(x, v_mu):
    plt.ylabel('Probabilidad de que X ocurra')
    plt.xlabel('Variable Aleatoria X')
    plt.title('Distribución de Poisson')
    y1 = []
    y2 = []
    poi = poisson(v_mu)
    for num in range(0, x):
        y1.append(poi.pmf(num))  # Función de Masa de Probabilidad
    for num in range(0, x):
        y2.append(poi.cdf(num))  # Función de Probabilidad Acumulada

    # prob = poi.pmf(20)  # ejemplo de probabilidad puntual
    plt.grid(True)
    plt.minorticks_on()
    plt.plot(y1, lw=2.0, label="f (x)", color="blue")
    plt.plot(y2, lw=2.0, label="F (x)", color="orange")
    # plt.plot([20], [prob], marker='o', markersize=6, color="red")
    plt.legend()
    plt.show()
Example #47
0
    def _rvs(self, p, mu, phi):
        p = np.array(p, ndmin=1)
        if not (p > 1).all() & (p < 2).all():
            raise ValueError('p only valid for 1 < p < 2')
        size, rndm = self._size, self._random_state
        rate = est_kappa(mu, p) / phi
        scale = est_gamma(phi, p, mu)
        shape = -est_alpha(p)
        N = poisson(rate).rvs(size=size, random_state=rndm)
        mask = N > 0
        if not np.isscalar(scale) and len(scale) == len(mask):
            scale = scale[mask]
        if not np.isscalar(shape) and len(shape) == len(mask):
            shape = shape[mask]

        rvs = gamma(a=N[mask] * shape, scale=scale).rvs(size=np.sum(mask),
                                                        random_state=rndm)
        rvs2 = np.zeros(N.shape, dtype=rvs.dtype)
        rvs2[mask] = rvs
        return rvs2
Example #48
0
    def test_PDFSampler(self):
        np.random.seed(seed=42)
        pdf = norm(0, 10)
        gaussian = PDFSampler(pdf)
        f1 = Factory(self.ts, gaussian)
        series = f1.create()
        self.assertEqual(series[0], 4.9671415301123272)

        np.random.seed(seed=42)
        mu = 1.6
        pdf = poisson(mu)
        pois = PDFSampler(pdf)

        f2 = Factory(self.ts, pois)
        series = f2.create()
        self.assertEqual(series[0], 3.0)
        self.assertEqual(series[-1], 0.0)

        f3 = Factory(self.ts, pois + gaussian)
        series = f3.create()
Example #49
0
def sample_forward_model(n,
                         report_dists,
                         spatial_dists=None,
                         n_samples=1,
                         report_bits=1,
                         dist_bits=1,
                         mech_dist=1,
                         sz=8,
                         spacing=np.pi / 4,
                         encoding_rate=None):
    if encoding_rate is not None:
        n_enc_stim = min(sts.poisson(encoding_rate).rvs(), n)
        enc_stim_inds = np.sort(
            np.random.choice(range(n), n_enc_stim, replace=False))
        n = n_enc_stim
        report_dists = report_dists[enc_stim_inds]
        if spatial_dists is not None:
            spatial_dists = spatial_dists[enc_stim_inds]
    else:
        enc_stim_inds = range(n)
    if 0 in enc_stim_inds:
        out = _get_ae_probabilities(n,
                                    report_dists,
                                    spatial_dists=spatial_dists,
                                    report_bits=report_bits,
                                    dist_bits=dist_bits,
                                    mech_dist=mech_dist,
                                    sz=sz,
                                    spacing=spacing)
        report_distortion, ae_probs = out
        ae_probs[0] = 1 - np.sum(ae_probs[1:n])
        if ae_probs[0] < 0:
            ae_probs[0] = 0
            ae_probs[1:n] = ae_probs[1:n] / np.sum(ae_probs[1:n])
        resps = np.random.choice(range(n), n_samples, p=ae_probs)
        means = report_dists[resps]
        var = sts.norm(0, np.sqrt(report_distortion)).rvs(means.shape)
        samples = means + var
    else:
        samples = sts.uniform(-np.pi, 2 * np.pi).rvs(n_samples)
    return samples
Example #50
0
    def test_plus_by_temporal_jitter(self):
        recipe_1 = {'start': '2018-01-01', 'end': '2018-01-02', 'delta': '1 h'}

        ts1 = TemporalTemplate(recipe_1)
        np.random.seed(seed=42)

        mu = 0.6
        rv = poisson(mu)
        j = TemporalJitter(pdf=rv, resolution='m')

        ts_all = ts1 + j
        self.assertEqual(ts_all.length, ts1.length,
                         'The summed by delta must not change the length.')
        self.assertEqual(
            ts_all.ticks[1], pd.to_datetime('2018-01-01 01:02:00'),
            'The jitter must move the second ticks by 2 minutes.')

        np.random.seed(seed=42)
        rv = norm(0, 10)
        j = TemporalJitter(pdf=rv, resolution='m')

        ts_all = ts1 + j
        self.assertEqual(ts_all.length, ts1.length,
                         'The summed by delta must not change the length.')
        self.assertEqual(ts_all.ticks[0],
                         pd.to_datetime('2018-01-01 00:04:00'),
                         'The jitter must move the first ticks by 4 minutes.')
        self.assertEqual(ts_all.ticks[-1],
                         pd.to_datetime('2018-01-01 23:55:00'),
                         'The jitter must move the last ticks by 5 minutes.')

        np.random.seed(seed=42)
        ts_all = j + ts1
        self.assertEqual(ts_all.length, ts1.length,
                         'The summed by delta must not change the length.')
        self.assertEqual(ts_all.ticks[0],
                         pd.to_datetime('2018-01-01 00:04:00'),
                         'The jitter must move the first ticks by 4 minutes.')
        self.assertEqual(ts_all.ticks[-1],
                         pd.to_datetime('2018-01-01 23:55:00'),
                         'The jitter must move the last ticks by 5 minutes.')
 def __init__(
     self,
     initial_inventory: int,
     time_steps: int,
     price_lambda_pairs: Sequence[Tuple[float, float]]
 ):
     self.initial_inventory = initial_inventory
     self.time_steps = time_steps
     self.price_lambda_pairs = price_lambda_pairs
     distrs = [poisson(l) for _, l in price_lambda_pairs]
     prices = [p for p, _ in price_lambda_pairs]
     self.single_step_mdp: FiniteMarkovDecisionProcess[int, int] =\
         FiniteMarkovDecisionProcess({
             s: {i: Categorical(
                 {(s - k, prices[i] * k):
                  (distrs[i].pmf(k) if k < s else 1 - distrs[i].cdf(s - 1))
                  for k in range(s + 1)})
                 for i in range(len(prices))}
             for s in range(initial_inventory + 1)
         })
     self.mdp = finite_horizon_MDP(self.single_step_mdp, time_steps)
Example #52
0
def simulate(N, beta0, X=None):
    """Generate data for testing a poisson GLM.

    Args:
        N:      number of variates
        beta0:  true regression coefficient vector
        X:      covariate matrix (optional)

    Returns:
        (y, X) as a tuple
    """
    k = beta0.shape[0]
    assert k >= 1
    if X is None:
        X = stats.norm(0, 0.1).rvs(size=[N, k])
        X[:, 0] = 1.0
    else:
        assert X.shape == (N, k)
    eta = X @ beta0
    y = stats.poisson(np.exp(eta)).rvs()
    return (y, X)
Example #53
0
def pmm_get_likelihood(patientToGenes, lam, p_k):
    D = [len(patientToGenes[p]) for p in patientToGenes]
    numCases = len(D)
    classes = range(len(lam))

    ll_kd = np.array(
        [[np.log(p_k[k]) + np.log(poisson(lam[k]).pmf(d)) for d in D]
         for k in classes])
    likelihood_sums = np.zeros(numCases)

    for i in range(numCases):
        likelihood_sums[i] = sum([
            (np.exp(ll_kd[k][i]) if ll_kd[k][i] > -np.inf else 0)
            for k in range(num_components)
        ])

    # complete log likelihood

    ll_new = sum(np.log(np.array([ls for ls in likelihood_sums if ls > 0])))

    return ll_new
Example #54
0
 def __init__(self,
              neur_means,
              neur_stds=None,
              model='poisson',
              eps=.000001):
     self.distribs = {}
     self.model = model
     self.eps = eps
     for cond in neur_means.keys():
         self.distribs[cond] = np.zeros(neur_means[cond].shape,
                                        dtype='object')
         for i in xrange(neur_means[cond].shape[0]):
             for j in xrange(neur_means[cond].shape[1]):
                 if model.lower() == 'poisson':
                     d = ssts.poisson(neur_means[cond][i, j])
                 elif model.lower() == 'gaussian':
                     d = ssts.norm(loc=neur_means[cond][i, j],
                                   scale=neur_stds[cond][i, j] + eps)
                 else:
                     raise RuntimeError('distribution not recognized')
                 self.distribs[cond][i, j] = d
def Hist_Poisson():
    poisson_param = 10  # Параметры
    poisson_label = "Poisson distribution"
    poisson_color = "violet"
    for size in selection_size:
        fig, ax = plt.subplots(1, 1)
        pdf = poisson(poisson_param)
        random_values = poisson.rvs(poisson_param, size=size)
        ax.hist(random_values,
                density=True,
                histtype=HIST_TYPE,
                alpha=hist_visibility,
                color=poisson_color)
        # Create_plot(ax, poisson_label, pdf, size) Пуассон не такой
        x = np.arange(poisson.ppf(0.01, 10), poisson.ppf(0.99, 10))
        ax.plot(x, pdf.pmf(x), LINE, lw=line_width)
        ax.set_xlabel(poisson_label)
        ax.set_ylabel(Y_LABEL)
        ax.set_title(TITLE + str(size))
        plt.grid()
        plt.show()
Example #56
0
    def _poisson_likelihood_2d(self, model, bins=10):
        """ Bin the model samples in 2D and renormalise to match the number of observations. This
        fixes the rate paramter of the Poisson distribution in each bin. The likelihood is
        evaluated by calculating the Poisson probability of the observed counts in each bin.
        """
        range = parameter_ranges['uae'], parameter_ranges['rec']

        uae_obs = self._observations["mueff_av"].values
        rec_obs = self._observations["rec_arcsec"].values
        obs, xedges, yedges = np.histogram2d(uae_obs, rec_obs, range=range, bins=bins)

        # Rescale model by number of observations
        model = model.astype("float") * self._observations.shape[0]

        # Calculate Poisson probability for each bin
        obs = obs.reshape(-1).astype("float")
        model = model.reshape(-1)
        probs = stats.poisson(mu=model).pmf(obs)

        # Return overall log likelihood
        return np.log(probs).sum()
Example #57
0
 def init_parameters(self):
     # Init weights
     if self.init_method == 'x': # Xavier            
         torch.nn.init.xavier_uniform_(self.weight)
     elif self.init_method == 'k': # Kaiming
         torch.nn.init.kaiming_uniform_(self.weight)
     elif self.init_method == 'p': # Poisson
         mu=self.kernel_size[0]/2 
         dist = poisson(mu)
         x = np.arange(0, self.kernel_size[0])
         y = np.expand_dims(dist.pmf(x),1)
         w = signal.convolve2d(y, y.transpose(), 'full')
         w = torch.Tensor(w).type_as(self.weight)
         w = torch.unsqueeze(w,0)
         w = torch.unsqueeze(w,1)
         w = w.repeat(self.out_channels, 1, 1, 1)
         w = w.repeat(1, self.in_channels, 1, 1)
         self.weight.data = w + torch.rand(w.shape)
         
     # Init bias
     self.bias = torch.nn.Parameter(torch.zeros(self.out_channels)+0.01)
Example #58
0
def multiple_test(indices: np.ndarray, contact_array: np.ndarray,
                  lambda_array: np.ndarray, fdrs: Tuple[float],
                  sigs: Tuple[float],
                  method: str) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
    """Conduct poisson test on each pixel and multiple test correction for all tests.

    :param indices:
    :param contact_array: np.ndarray. 1-d array contains observed contacts(number of counts) of all pixels.
    :param lambda_array: np.ndarray. 2-d array contains lambdas(background) of all pixels.
    'lambda_array.shape[0]' denotes the number of backgrounds and 'lambda_array.shape[1]' denotes the number of pixles.
    :param fdrs: tuple. Tuple of fdrs to control the false discovery rate for each background.
    Length of this tuple must equal to 'lambda_array.shape[0]'
    :param sigs: tuple. Tuple of padjs threshold. Length of this tuple must equal to 'lambda_array.shape[0]'
    :param method: str. Method used for multiple test. Supported methods see: statsmodels.stats.multitest.
    :return: Tuple[pvals, padjs, rejects]. pvals and padjs are both 2-d ndarrays with the same shape as 'lambda_array'.
    rejects is a 1-d Boolean array with shape of 'lambda_array.shape[1]', each value represents whether reject the null hypothesis.
    """
    num_test, len_test = lambda_array.shape
    pvals = np.full((num_test, len_test), 1, np.float)
    padjs = np.full((num_test, len_test), 1, np.float)
    rejects = np.full((num_test, len_test), False, np.bool)

    for test_i in range(num_test):
        for _, end, lambda_mask in lambda_chunks(lambda_array[test_i]):
            chunk_size = lambda_mask.sum()
            if chunk_size == 0:
                continue
            # poisson_model = stats.poisson(np.ones(chunk_size) * end)
            poisson_model = stats.poisson(lambda_array[test_i, lambda_mask])
            _pvals = 1 - poisson_model.cdf(contact_array[lambda_mask])
            reject, _padjs, _, _ = multitest.multipletests(pvals=_pvals,
                                                           alpha=fdrs[test_i],
                                                           method=method)
            rejects[test_i][lambda_mask] = reject
            padjs[test_i][lambda_mask] = _padjs
            pvals[test_i][lambda_mask] = _pvals

    rejects = rejects & (padjs < np.array(sigs)[:, None])

    return pvals, padjs, rejects
Example #59
0
def generate_hits(n,
                  nstrain,
                  ntarget,
                  ngene,
                  prDecoy,
                  lambdaTrue,
                  lambdaError=0.,
                  pFail=0.,
                  residual=1e-06,
                  targetProbs=None):
    '''generate lists of hit counts in real targets vs. nontargets in
    multihit screen, using probability of hitting target
    gene(s) conditioned on requiring at least one such hit.
    decoyCounts vectors are offset by poissonRange.nmin'''
    prTarget = PoissonRange(ntarget * lambdaTrue, residual / nstrain, 1)
    prTarget.renormalize()  # condition on at least one hit in target region
    targetN = prTarget.sample_totals(nstrain, n)
    if targetProbs:  # user-supplied target size vector
        targetProbs = [(p * (1. - pFail)) for p in targetProbs] + [pFail]
    else:  # uniform target sizes
        targetProbs = ((1. - pFail) / ntarget, ) * ntarget + (pFail, )
    targetProbs = numpy.array(targetProbs)
    if lambdaError:
        poisErr = stats.poisson(nstrain * lambdaError)
        targetNoise = poisErr.rvs((n, ntarget))
    decoyCounts = numpy.random.multinomial(ngene - ntarget,
                                           prDecoy.pmf,
                                           size=n)
    for i in xrange(n):  # analyze replicates
        targetHits = numpy.random.multinomial(targetN[i], targetProbs)
        if lambdaError:
            hits = targetHits[:ntarget] + targetNoise[i]
        else:
            hits = targetHits[:ntarget]
        nmax = hits.max()
        if nmax < ntarget:  # list all counts from 0 to nmax
            targetCounts = [(hits == j).sum() for j in xrange(nmax + 1)]
        else:  # use efficient container for sparse list
            targetCounts = HitCountList(hits)
        yield targetCounts, decoyCounts[i]
def output_prob_state(k,
                      l=None,
                      c=0.4,
                      gamma_loc=10,
                      gamma_scale=11,
                      show_lambda=False):
    """
    k is the number of basis/transformations in the current state
    l is the lambda parameter for poisson
    c is constant taken to be 0.4
    
    bk = c min (1, p(k+1)/p(k))
    dk = c min (1, p(k)/p(k+1))
    pk = 1-bk-dk
    
    If we have 1 or 0 basis function always return birth prob to be 1. 
    
    This is for calculating:
    
    `b_k`, `d_k`, `p_k` respectively
    """

    if l is None:
        # generate a sample l from Gamma
        l = gamma.rvs(gamma_loc + k, gamma_scale, size=1)[0]
    if show_lambda:
        print("Lambda selected to be: {}".format(l))

    if k <= 1:
        return 1, 0, 0

    poisson_obj = poisson(l)
    birth = c * min(1, (poisson_obj.pmf(k + 1) / poisson_obj.pmf(k)))
    death = c * min(1, (poisson_obj.pmf(k) / poisson_obj.pmf(k + 1)))
    change = 1.0 - birth - death

    # output the probabilities...which are used for the generated unichr
    # slot.

    return birth, death, change