def get_ui(self, params: List[ndarray], bounds: Tuple[float, float]) -> ndarray: mean = params[0] * params[1] return [ poisson.ppf(bounds[0], mu=mean), poisson.ppf(bounds[1], mu=mean) ]
def apply_shape_settings(self): self._PERIOD_WITH_COMPUTED_LIMITS = int( config.LIMITER_FULL_COLLECTION_PERIOD_S * 1.2) self._MIN_SUBMITS_IN_TIME = \ [poisson.ppf(config.LIMITER_MIN_SUBMITS_PERCENTILE, i * config.LIMITER_TARGET_SUBMISSION_RATE * config.LIMITER_MIN_OK_SUBMITS_RATIO) \ for i in range(self._PERIOD_WITH_COMPUTED_LIMITS + 1)] self._MAX_SUBMITS_IN_TIME = \ [poisson.ppf(config.LIMITER_MAX_SUBMITS_PERCENTILE, i * config.LIMITER_TARGET_SUBMISSION_RATE * config.LIMITER_MAX_OK_SUBMITS_RATIO) \ for i in range(self._PERIOD_WITH_COMPUTED_LIMITS + 1)] # Find the first non-zero expected value of minimal submission count for i in range(1, config.LIMITER_FULL_COLLECTION_PERIOD_S): # At least one submit is expected in i-th second if self._MIN_SUBMITS_IN_TIME[i] >= 1: self._NO_SHARE_RECALCULATION_PERIOD = i break # If we didn't find the right spot, full collection period is used # (should never happen) else: self._NO_SHARE_RECALCULATION_PERIOD = config.LIMITER_FULL_COLLECTION_PERIOD_S log.error("Non-zero minimal submissions not found") # Recalculate collection period self._HALF_COLLECTION_PERIOD = int( config.LIMITER_FULL_COLLECTION_PERIOD_S / 2) self._FINE_TUNE_UPPER_RATE = config.LIMITER_TARGET_SUBMISSION_RATE * \ config.LIMITER_FINE_TUNE_UPPER_RATIO self._FINE_TUNE_LOWER_RATE = config.LIMITER_TARGET_SUBMISSION_RATE * \ config.LIMITER_FINE_TUNE_LOWER_RATIO
def plot_total( self, kwargs: Dict = dict(ls='', marker='.'), line_kwargs: Dict = dict(), fill_kwargs: Dict = dict() ) -> None: r"""Plot the SFS using matplotlib Args: kwargs: keyword arguments for scatter plot line_kwargs: keyword arguments for expectation line fill_kwargs: keyword arguments for marginal fill """ x = self.X.sum(1, keepdims=True) plt.plot(range(1, len(x) + 1), x, **kwargs) if self.η is not None: if 'label' in kwargs: del kwargs['label'] if self.μ is not None: z = self.μ.Z.sum(1) else: z = np.ones_like(self.η.y) ξ = self.L.dot(z) plt.plot(range(1, self.n), ξ, **line_kwargs) ξ_lower = poisson.ppf(.025, ξ) ξ_upper = poisson.ppf(.975, ξ) plt.fill_between(range(1, self.n), ξ_lower, ξ_upper, **fill_kwargs) plt.xlabel('sample frequency') plt.ylabel(r'variant count') plt.tight_layout()
def rentA(self): prob_dict = {} x = np.arange(poisson.ppf(0.0001, 3), poisson.ppf(0.9999, 3)) for i in x: prob_dict[i] = poisson.pmf(i, 3) return prob_dict
def returnB(self): prob_dict = {} x = np.arange(poisson.ppf(0.0001, 2), poisson.ppf(0.9999, 2)) for i in x: prob_dict[i] = poisson.pmf(i, 2) return prob_dict
def locate_termination(samfile, chrom, strand, gene_exons): ''' Returns the window termination location of the gene in gene_exons ''' # Initialize params threshold = 0.01 s_t = 75 s_w = 100 l_g = 1000 n_w = (l_g - s_w) / s_t + 1 if strand == '-': start = gene_exons['LOC1'].iloc[0] end = start + 1000 N = samfile.count(chrom, start=start, end=end) N_w = N * s_w / l_g m = poisson.ppf(threshold, N_w) else: end = gene_exons['LOC2'].iloc[-1] start = end - 1000 N = samfile.count(chrom, start=start, end=end) N_w = N * s_w / l_g m = poisson.ppf(threshold, N_w) if m != 0: return slide_window(samfile, chrom, start, end, strand, n_w, m, s_w, s_t) else: return 1
def apply_shape_settings(self): self._PERIOD_WITH_COMPUTED_LIMITS = int(config.LIMITER_FULL_COLLECTION_PERIOD_S * 1.2) self._MIN_SUBMITS_IN_TIME = \ [poisson.ppf(config.LIMITER_MIN_SUBMITS_PERCENTILE, i * config.LIMITER_TARGET_SUBMISSION_RATE * config.LIMITER_MIN_OK_SUBMITS_RATIO) \ for i in range(self._PERIOD_WITH_COMPUTED_LIMITS + 1)] self._MAX_SUBMITS_IN_TIME = \ [poisson.ppf(config.LIMITER_MAX_SUBMITS_PERCENTILE, i * config.LIMITER_TARGET_SUBMISSION_RATE * config.LIMITER_MAX_OK_SUBMITS_RATIO) \ for i in range(self._PERIOD_WITH_COMPUTED_LIMITS + 1)] # Find the first non-zero expected value of minimal submission count for i in range(1, config.LIMITER_FULL_COLLECTION_PERIOD_S): # At least one submit is expected in i-th second if self._MIN_SUBMITS_IN_TIME[i] >= 1: self._NO_SHARE_RECALCULATION_PERIOD = i break # If we didn't find the right spot, full collection period is used # (should never happen) else: self._NO_SHARE_RECALCULATION_PERIOD = config.LIMITER_FULL_COLLECTION_PERIOD_S log.error("Non-zero minimal submissions not found") # Recalculate collection period self._HALF_COLLECTION_PERIOD = int(config.LIMITER_FULL_COLLECTION_PERIOD_S / 2) self._FINE_TUNE_UPPER_RATE = config.LIMITER_TARGET_SUBMISSION_RATE * \ config.LIMITER_FINE_TUNE_UPPER_RATIO self._FINE_TUNE_LOWER_RATE = config.LIMITER_TARGET_SUBMISSION_RATE * \ config.LIMITER_FINE_TUNE_LOWER_RATIO
def probf_baharev(df1, df2, noncen, fcrit): x = 1 - special.btdtri(df1, df2, fcrit) eps = 1.0e-7 itr_cnt = 0 f = None while itr_cnt <= 10: mu = noncen / 2.0 ql = poisson.ppf(eps, mu) qu = poisson.ppf(1 - eps, mu) k = qu c = beta.cdf(x, df1 + k, df2) d = x * (1.0 - x) / (df1 + k - 1.0) * beta.pdf(x, df1 + k - 1, df2, 0) p = poisson.pmf(k, mu) f = p * c p = k / mu * p k = qu - 1 while k >= ql: c = c + d d = (df1 + k) / (x * (df1 + k + df2 - 1)) * d f = f + p * c p = k / mu * p k = k - 1 itr_cnt = itr_cnt + 1 if (itr_cnt == 11): print("newton iteration failed") return f
def plot_poisson(): fig, ax = plt.subplots(1, 1) # This is prediction for Wawrinka in 2014 mu = 7.869325 x = np.arange(poisson.ppf(0.01, mu), poisson.ppf(0.999, mu)) ax.plot(x, poisson.pmf(x, mu), 'wo', ms=8, label='poisson pmf') ax.vlines(x, 0, poisson.pmf(x, mu), colors=['b', 'b', 'b', 'b', 'b', 'r', 'r', 'r', 'g', 'g', 'g', 'g', 'g', 'g', 'g', 'g'], lw=5, alpha=0.5) rv = poisson(mu) ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1, label='frozen pmf') plt.title("Stanislas Wawrinka") plt.xlabel('# QF+ Finishes in 2014') plt.ylabel('Probability') prob0 = poisson.cdf(6, mu) prob123 = poisson.cdf(9, mu) - poisson.cdf(6, mu) probAbove3 = poisson.cdf(10000, mu) - poisson.cdf(9, mu) print prob0 print prob123 print probAbove3 plt.show()
def tpoissrnd(lam): # lam MUST be an array if not np.isscalar(lam): x = np.ones(len(lam)) ind = lam > 1e-5 # below this value x=1 whp #ind = ind[:, 0] if np.any(ind): n_ = sum(ind) lam_ = lam[ind] x[ind] = poisson.ppf( np.exp(-lam_) + np.multiply(np.random.rand(n_), 1 - np.exp(-lam_)), lam_) #[:, 0] else: x = 1 ind = lam > 1e-5 if np.any(ind): n_ = ind # lam_ = lam[ind] x = poisson.ppf( np.exp(-lam) + np.random.rand() * (1 - np.exp(-lam)), lam) #if x == 0: # x = 1 return x
def run(p, tMax, fBest, rateIncreaseRatio, outputFilename): alpha = 0.005 badBlocks = genAttackBlocks(alpha, p, fBest, rateIncreaseRatio, tMax) # for i in xrange(len(badBlocks)): # print("Block at %.2f, bounds: (%.2f, %.2f)" % # ((badBlocks[i],) + # poissonInterval(i, alpha, p * badBlocks[i]))) # print("Actual rate: %.2f" % (len(badBlocks) / badBlocks[-1])) # Generate CSV output = "" output += "t, GoodFreqLow, GoodFreqAvg, GoodFreqHigh, BadFreqAvg, AdvantageRatio\n" pointCount = 100 badBlockI = 0 for t in linspace(0, badBlocks[-2], num=pointCount)[1:]: while badBlocks[badBlockI + 1] <= t: badBlockI += 1 output += "%.2f, %.2f, %.2f, %.2f, %.2f, %.2f\n" % ( t, # Current time poisson.ppf(0.05, t * fBest * p), # Number of good blocks: lower bound round(t * fBest * p), # Rounded number of good blocks estimate poisson.ppf(0.95, t * fBest * p), # Number of good blocks: upper bound round(badBlockI), # Number of bad blocks float(badBlockI) / (t * fBest * p) # Advantage ratio ) outputFile = open(outputFilename, "w") outputFile.write(output) outputFile.close()
def poisson_and_sim_events(mu, size=10000): """ This function is used for calculating the Poisson probabilities and for randomnly generating new events from the Poisson distribution for a number of years for the scenario we want to check. :param: mu: the average annual number of storms for the scenario size: the number of years :return: sim_ev: the time series of new simulated events x: array of potential numbers of hurricanes per year based on the mu """ # Getting the Poisson probabilities # Array of potential numbers of hurricanes per year x = np.arange(poisson.ppf(0.01, mu), poisson.ppf(0.99, mu) * 2) # Probability of each of x occuring prob = poisson.cdf(x, mu) # Checking accuracy of cdf and ppf print(np.allclose(x, poisson.ppf(prob, mu))) # must be True # GENERATING RANDOM NUMBERS FROM THE POISSON DISTRIBUTION # size = 10000 # choosing how many years of events we want sim_ev = poisson.rvs(mu, size=size) # getting the random numbers return sim_ev, x
def model(): µ = 4 x = np.arange(poisson.ppf(0.01, µ), poisson.ppf(0.99, µ)) y = poisson.pmf(x, µ) prior_hist = plot.hist(x, np.arange(0, 10), weights=y, align="left", rwidth=0.8)
def test_poisson(self): mu = 0.6 mean, var, skew, kurt = poisson.stats(mu, moments='mvsk') self.assertEqual(mean, 0.6) self.assertEqual(var, 0.6) self.assertEqual(skew, 1.2909944487358056) self.assertEqual(kurt, 1.6666666666666667) n = np.array([0., 1., 2.]) x = np.arange(poisson.ppf(0.01, mu), poisson.ppf(0.99, mu)) self.assertTrue(np.array_equal(n, x))
def ppf_cached(y,cache={}): x=round(y,4) if x==0: return(poisson.ppf(0.99999999,y)) if x in cache: return cache[x] ppf=poisson.ppf(0.99999999,x) if np.isnan(ppf) or ppf==np.inf: print("wtf:",y,x) cache[x]=max(ppf,1) return ppf
def test_round_poisson(self): random_state = RandomState() avg = 3.4 samples = 1000 rounds = [random_state.round_poisson(avg) for i in range(samples)] obs_avg = np.mean(rounds) min = poisson.ppf(0.001, mu=avg) max = poisson.ppf(0.999, mu=avg) self.assertGreater(obs_avg, min) self.assertLess(obs_avg, max)
def generate_data(self, copulas, lambdas=None): arr = [] if lambdas is None: # if no vars is passed, randomly generate dependence lambdas = np.random.uniform(0.5, 6, size=len(copulas)) firstArr = self.removeNans(copulas[0]) arr_poisson = np.array(poisson.ppf(firstArr, lambdas[0])) for i in range(len(copulas)): poiss = np.array( poisson.ppf(self.removeNans(copulas[i]), lambdas[i])) arr.append(poiss) return np.asarray(arr)
def forward_epi_step(self, dB: int = 0): # get previous state S, E, I, R, D, N = (vector[-1] for vector in (self.S, self.E, self.I, self.R, self.D, self.N)) # update state Rt = self.Rt0 * float(S) / float(N) b = np.exp(self.gamma * (Rt - 1)) rate_T = max(0, self.b[-1] * self.dT[-1]) num_cases = poisson.rvs(rate_T) self.upper_CI.append(poisson.ppf(self.CI, rate_T)) self.lower_CI.append(poisson.ppf(1 - self.CI, rate_T)) E += num_cases S -= num_cases rate_I = self.sigma * E num_inf = poisson.rvs(rate_I) E -= num_inf I += num_inf rate_D = self.m * self.gamma * I num_dead = poisson.rvs(rate_D) D += num_dead rate_R = (1 - self.m) * self.gamma * I num_recov = poisson.rvs(rate_R) R += num_recov I -= (num_dead + num_recov) if S < 0: S = 0 if E < 0: E = 0 if I < 0: I = 0 if R < 0: R = 0 if D < 0: D = 0 N = S + E + I + R beta = (num_cases * N) / (b * S * I) # update state vectors self.Rt.append(Rt) self.b.append(b) self.S.append(S) self.E.append(E) self.I.append(I) self.R.append(R) self.D.append(D) self.N.append(N) self.beta.append(beta) self.dT.append(num_cases) self.total_cases.append(E + I + R + D)
def durations(self): """ 持続時間の分布のsdも確率的に生成させたほうが良さそう。 20msを1としたとき、20, 100, 150, 200 あたりが ただし /u/ の「内在時間長」は「とりわけ」短い。 poisson のつかいかたは [poisson] を参照 [poisson]: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.poisson.html """ eps = np.finfo(float).eps mu_range = np.arange(poisson.ppf(eps, min(self.poisson_params)), poisson.ppf(1 - eps, max(self.poisson_params))) return np.array([poisson.pmf(mu_range, pa) for pa in self.poisson_params])
def poissonFunc(): mu = 10 for i in range(len(size)): n = size[i] fig, ax = plt.subplots(1, 1) ax.set_title("Распределение Пуассона, n = " + str(n)) x = np.arange(poisson.ppf(0.01, mu), poisson.ppf(0.99, mu)) ax.plot(x, poisson(mu).pmf(x), 'b-', ms=8) r = poisson.rvs(mu, size=n) ax.hist(r, density=True, histtype='stepfilled', alpha=0.2) plt.show()
def poisNumbers(): for size in sizes: fig, ax = plt.subplots(1, 1) ax.hist(poisson.rvs(10, size=size), histtype='stepfilled', alpha=0.5, color='blue', density=True) x = np.arange(poisson.ppf(0.01, 10), poisson.ppf(0.99, 10)) ax.plot(x, poisson(10).pmf(x), '-') ax.set_title('PoisNumbers n = ' + str(size)) ax.set_xlabel('PoisNumbers') ax.set_ylabel('density') plt.grid() plt.show() return
def Poisson(): for s in size: den = poisson(10) hist = poisson.rvs(10, size=s) fig, ax = plt.subplots(1, 1) ax.hist(hist, density=True, alpha=0.6) x = np.arange(poisson.ppf(0.01, 10), poisson.ppf(0.99, 10)) ax.plot(x, den.pmf(x), LINE_TYPE, lw=1.5) ax.set_xlabel("POISSON") ax.set_ylabel("DENSITY") ax.set_title("SIZE: " + str(s)) plt.grid() plt.show()
def ppf_mtc(y,N,cache={}): pp=(1.0-(args.pvalue/N)) if pp==1.0: pp=1.0-np.finfo(float).resolution x=round(y,4) if x==0: ppf=poisson.ppf(pp,y) if np.isnan(ppf) or np.isinf(ppf): print("wtf:",y,x,ppf,(1.0-(args.pvalue/N)),N) return ppf if (x) in cache: return cache[x] ppf=poisson.ppf(pp,x) cache[x]=max(ppf,1) return ppf
def _get_prediction(self, rates, data, percentile): flag = list() for ind, rate in enumerate(rates): if rate > 0.0: lower = poisson.ppf(percentile, rate) upper = poisson.ppf(1 - percentile, rate) if data[ind] < lower or data[ind] > upper: flag.append(1) else: flag.append(0) else: rospy.logwarn("Occurrence rate is %.2f" % rate) flag.append(-1) return flag
def qNBI(q: float, location: np.ndarray, scale: np.ndarray): """Quantile function. """ n = 1 / scale p = n / (n + location) if len(scale) > 1: quant = np.where(scale > 1e-04, nbinom.ppf(q=q, n=n, p=p), poisson.ppf(q=q, mu=location)) else: quant = poisson.ppf(q=q, mu=location) if scale < 1e-04 else nbinom.ppf( q=q, n=n, p=p) return quant
def _get_prediction(self, rates, data, percentile): flag = list() for ind, rate in enumerate(rates): if rate > 0.0: lower = poisson.ppf(percentile, rate) upper = poisson.ppf(1-percentile, rate) if data[ind] < lower or data[ind] > upper: flag.append(1) else: flag.append(0) else: rospy.logwarn("Occurrence rate is %.2f" % rate) flag.append(-1) return flag
def index(): if request.method == 'GET': if (app.vars['firstTime']): return render_template('intro_beforeMap.html') else: try: rtDF = getRealTimeDockStatusData(app.vars['url2scrapeRT']) mergedDF = (app.vars['stations']).merge( rtDF, 'inner', 'terminalname') mergedDF['muBikesW'] = mergedDF['bikeDemand'] * app.vars[ 'window'] / 60. mergedDF['muDocksW'] = mergedDF['dockDemand'] * app.vars[ 'window'] / 60. mergedDF['ppf0005_B'] = 1 + poisson.ppf( 0.9995, mergedDF['muBikesW']).astype(int) mergedDF['ppf0005_D'] = 1 + poisson.ppf( 0.9995, mergedDF['muDocksW']).astype(int) mergedDF['pEmpty'] = mergedDF.apply(lambda x: pOutage( x['muBikesW'], x['muDocksW'], x['nbbikes']), axis=1) mergedDF['pFull'] = mergedDF.apply(lambda x: pOutage( x['muDocksW'], x['muBikesW'], x['nbemptydocks']), axis=1) sendGJ = df_to_geojson(mergedDF, [ 'terminalname', 'name', 'nbbikes', 'nbemptydocks', 'bikeDemand', 'dockDemand', 'ppf0005_B', 'ppf0005_D', 'pEmpty', 'pFull' ], lat='lat', lon='long') return render_template('withMap.html', num=app.vars['window'], gjFC_StationData=sendGJ) except: #print('fail') return render_template('withoutMap.html', num=app.vars['window'], gjFC_StationData=app.vars['gjS']) else: #request was a POST tempInput = request.form['myWindow'] app.vars['firstTime'] = False try: app.vars['window'] = min([abs(int(float(tempInput))), 60]) # limit one hour except: app.vars[ 'window'] = 15 # default to 15 minutes, if input cannot be converted to numeric return redirect('/')
def plot_total( self, kwargs: Dict = dict(ls="", marker="."), line_kwargs: Dict = dict(), fill_kwargs: Dict = dict(), folded: bool = False, ) -> None: r"""Plot the SFS using matplotlib Args: kwargs: keyword arguments for scatter plot line_kwargs: keyword arguments for expectation line fill_kwargs: keyword arguments for marginal fill folded: if ``True``, plot the folded SFS and fit """ if self.X.ndim == 1: x = self.X else: x = self.X.sum(1) if folded: x = utils.fold(x) plt.plot(range(1, len(x) + 1), x, **kwargs) if self.η is not None: if "label" in kwargs: del kwargs["label"] if self.μ is not None: self.η.check_grid(self.μ) z = self.μ.Z.sum(1) else: z = self.mu0 * np.ones_like(self.η.y) ξ = self.L.dot(z) if folded: ξ = utils.fold(ξ) else: if self.r is None: raise TypeError("ancestral state misidentification rate " "is not inferred, do you want " "folded=True?") ξ = (1 - self.r) * ξ + self.r * self.AM_freq @ ξ plt.plot(range(1, len(ξ) + 1), ξ, **line_kwargs) ξ_lower = poisson.ppf(0.025, ξ) ξ_upper = poisson.ppf(0.975, ξ) plt.fill_between(range(1, len(ξ) + 1), ξ_lower, ξ_upper, **fill_kwargs) plt.xlabel("sample frequency") plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True)) plt.ylabel(r"variant count") plt.tight_layout()
def parallel_forward_epi_step(self, dB: int = 0, num_sims=10000): # get previous state S, I, R, D, N = (vector[-1].copy() for vector in (self.S, self.I, self.R, self.D, self.N)) # update state Rt = self.Rt0 * S / N b = np.exp(self.gamma * (Rt - 1)) rate_T = (self.b[-1] * self.dT[-1]).clip(0) num_cases = poisson.rvs(rate_T, size=num_sims) self.upper_CI.append(poisson.ppf(self.CI, rate_T)) self.lower_CI.append(poisson.ppf(1 - self.CI, rate_T)) I += num_cases S -= num_cases rate_D = self.m * self.gamma * I num_dead = poisson.rvs(rate_D, size=num_sims) D += num_dead rate_R = (1 - self.m) * self.gamma * I num_recov = poisson.rvs(rate_R, size=num_sims) R += num_recov I -= (num_dead + num_recov) S = S.clip(0) I = I.clip(0) D = D.clip(0) N = S + I + R beta = (num_cases * N) / (b * S * I) # update state vectors self.Rt.append(Rt) self.b.append(b) self.S.append(S) self.I.append(I) self.R.append(R) self.D.append(D) self.dR.append(num_recov) self.dD.append(num_dead) self.N.append(N) self.beta.append(beta) self.dT.append(num_cases) self.total_cases.append(I + R + D)
def ztp(N, lambda_): """Zero truncated Poisson distribution""" temp = [poisson.pmf(0, item) for item in lambda_] p = [uniform.rvs(loc=item, scale=1-item) for item in temp] ztp = [int(poisson.ppf(p[i], lambda_[i])) for i in range(len(p))] return np.array(ztp)
def test_statistics(self): # This is a statistical test that has a non-zero chance of failure # during normal operation. Thus, we set the random seed to a value that # creates a realization passing the test. np.random.seed(seed=12345) for rate in [self.rate_profile, self.rate_profile.rescale(kHz)]: spiketrain = stgen.inhomogeneous_poisson_process(rate) intervals = isi(spiketrain) # Computing expected statistics and percentiles expected_spike_count = (np.sum(rate) * rate.sampling_period).simplified percentile_count = poisson.ppf(.999, expected_spike_count) expected_min_isi = (1 / np.min(rate)) expected_max_isi = (1 / np.max(rate)) percentile_min_isi = expon.ppf(.999, expected_min_isi) percentile_max_isi = expon.ppf(.999, expected_max_isi) # Testing (each should fail 1 every 1000 times) self.assertLess(spiketrain.size, percentile_count) self.assertLess(np.min(intervals), percentile_min_isi) self.assertLess(np.max(intervals), percentile_max_isi) # Testing t_start t_stop self.assertEqual(rate.t_stop, spiketrain.t_stop) self.assertEqual(rate.t_start, spiketrain.t_start) # Testing type spiketrain_as_array = stgen.inhomogeneous_poisson_process( rate, as_array=True) self.assertTrue(isinstance(spiketrain_as_array, np.ndarray)) self.assertTrue(isinstance(spiketrain, neo.SpikeTrain))
def test_statistics(self): # This is a statistical test that has a non-zero chance of failure # during normal operation. Thus, we set the random seed to a value that # creates a realization passing the test. np.random.seed(seed=12345) for rate in [self.rate_profile, self.rate_profile.rescale(kHz)]: spiketrain = stgen.inhomogeneous_poisson_process(rate) intervals = isi(spiketrain) # Computing expected statistics and percentiles expected_spike_count = (np.sum( rate) * rate.sampling_period).simplified percentile_count = poisson.ppf(.999, expected_spike_count) expected_min_isi = (1 / np.min(rate)) expected_max_isi = (1 / np.max(rate)) percentile_min_isi = expon.ppf(.999, expected_min_isi) percentile_max_isi = expon.ppf(.999, expected_max_isi) # Testing (each should fail 1 every 1000 times) self.assertLess(spiketrain.size, percentile_count) self.assertLess(np.min(intervals), percentile_min_isi) self.assertLess(np.max(intervals), percentile_max_isi) # Testing t_start t_stop self.assertEqual(rate.t_stop, spiketrain.t_stop) self.assertEqual(rate.t_start, spiketrain.t_start) # Testing type spiketrain_as_array = stgen.inhomogeneous_poisson_process( rate, as_array=True) self.assertTrue(isinstance(spiketrain_as_array, np.ndarray)) self.assertTrue(isinstance(spiketrain, neo.SpikeTrain))
def findOptimalPolicy(self): # algorithm on Page 659 ystar = poisson.ppf(self.b / (self.b + self.h), self.mu).astype(int) #base stock level s = ystar - 1 #upper bound for s S_0 = ystar + 0 #lower bound for S_0 #calculate the optimal s for S fixed at its lower bound S0 while self.c(s, S_0) > self.G(s): s -= 1 s_0 = s # + 0 #optimal value of s for S0 c0 = self.c(s_0, S_0) #costs for this starting value S0 = S_0 # + 0 # S0 = S^0 of the paper S = S0 + 1 while self.G(S) <= c0: if self.c(s, S) < c0: S0 = S + 0 while self.c(s, S0) <= self.G(s + 1): s += 1 c0 = self.c(s, S0) S += 1 #print(str(s) + " " + str(S)) self.s_star = s self.S_star = S0 return s, S0
def findOptimalPolicy(self): # algorithm on Page 659 ystar = poisson.ppf(self.b / (self.b + self.h), self.mu).astype(int) #base stock level s = ystar - 1 #upper bound for s S_0 = ystar + 0 #lower bound for S_0 #calculate the optimal s for S fixed at its lower bound S0 self.execution_path.append([s, S_0]) while self.c(s, S_0) > self.G(s): s -= 1 self.execution_path.append([s, S_0]) s_0 = s # + 0 #optimal value of s for S0 c0 = self.c(s_0, S_0) #costs for this starting value S0 = S_0 # + 0 # S0 = S^0 of the paper S = S0 + 1 self.execution_path.append([s, S]) while self.G(S) <= c0: if self.c(s, S) < c0: S0 = S + 0 while self.c(s, S0) <= self.G(s + 1): s += 1 self.execution_path.append([s, S0]) c0 = self.c(s, S0) self.execution_path.append([s, S]) S += 1 #print(np.array(self.execution_path)) #self.plot() self.s_star = s self.S_star = S0 return s, S0
def qpois(p,mu): """ Calculates the quantile function of the Poisson distribution """ from scipy.stats import poisson result=poisson.ppf(q=p,mu=mu) return result
def ztpoisson(N, lambda_par): """Zero truncated Poisson distribution.""" temp = poisson.pmf(0, lambda_par) p = [uniform.rvs(loc=item, scale=1-item) for item in temp] ztp = [int(poisson.ppf(p[i],lambda_par[i])) for i in range(N)] return np.array(ztp)
def add_gc_bias(meancoverages,targetcoverage): rand=poisson.rvs(targetcoverage) cumprob=poisson.cdf(rand,targetcoverage) # cdf(x, mu, loc=0) Cumulative density function. toret=[] for cov in meancoverages: if cov==0: toret.append(0) else: t=int(poisson.ppf(cumprob,cov)) # ppf(q, mu, loc=0) Percent point function (inverse of cdf percentiles). toret.append(t) return toret
def replenishment(stock,open_orders,quantile,sales_prog): ''' :param stock: stock :param open_orders: open incoming orders :param quantile: chosen quantile :param sales_prog: forecast for demand :return: order ''' sales_quant = poisson.ppf(quantile/100.,sales_prog+1.E-3) order = max(0,(sales_quant) - (stock + open_orders)) return stochastic_round(order)
def get_num_trips(self, lam): """ Samples a poisson distribution with the given lambda and returns the number of trips produced from the dist. Returns -1 if lambda = 0 -> undefined function. """ probability = random.random() while probability == 0: probability = random.random() num_trips = poisson.ppf(probability, lam.value) if numpy.isnan(num_trips): #TODO: Should we do something here? num_trips = -1 return int(num_trips)
def _V(self, price, t, n): p = self.sales_prob(price, t) _sum = 0 for i in range(int(poisson.ppf(0.9999, p)) + 1): if i > n: break pi = poisson.pmf(i, p) today_profit = min(n, i) * price holding_costs = n * self.L _, V_future = self.V(t + 1, max(0, n - i)) exp_future_profits = self.delta * V_future _sum += pi * (today_profit - holding_costs + exp_future_profits) return _sum
def cnv(bamfile, windowSize, probability, genomeindexfile): bam = pysam.Samfile(bamfile, 'rb') chr = None allBaseSums = [] listForArray = [] zScores = [] zScoreIndex = [] giantList = [] possibleCNVList = [] upperLimit = 0 lowerLimit = 0 for col in bam.pileup(): pos = col.pos cov = col.n if bam.getrname(col.tid) != chr: chr = bam.getrname(col.tid) baseSum = cov numw = int(pos/windowSize) #which window else: if int(pos/windowSize) == numw: baseSum += cov else: if baseSum >= windowSize*3: allBaseSums += [(chr, numw*windowSize, (numw+1)*windowSize, baseSum)] listForArray += [baseSum] numw = int(pos/windowSize) baseSum = cov if baseSum >= windowSize*3: allBaseSums += [(chr, numw*windowSize, (numw+1)*windowSize, baseSum)] listForArray += [baseSum] average = np.mean(listForArray) #new lambda for poisson distribution upperLimit = np.percentile(listForArray, probability*100) lowerLimit = np.percentile(listForArray, (1-probability)*100) cutOff = poisson.ppf(probability, average) for (chr, start, end, baseSum) in allBaseSums: if baseSum > cutOff: possibleCNVList += [(chr, start, end, baseSum)] """ stdDev = np.std(listForArray) for (chr, start, end, baseSum) in allBaseSums: zScoreTemp = ((baseSum - average)/float(stdDev)) giantList += [(chr, start, end, baseSum, zScoreTemp)] #all windows' base sums and z scores if (zScoreTemp >= determinedZScore) or (zScoreTemp <= -determinedZScore): possibleCNVList += [(chr, start,end,baseSum,zScoreTemp)] """ cnvList = findThreeInARow(possibleCNVList) return allBaseSums, cnvList, upperLimit, lowerLimit
def kleinauGP(lL, K,p,l,h,L): """ Calculates the reorder point and quantity parameters from [KleinauThonemann2004]_ full Genetic Programming solution. :param lL: demand distribution expected value :param K: order setup cost :param p: backorder penalty :param l: total demand :param h: holding cost :param L: lead time :returns: reorder point, reorder quantity :rtype: tuple """ r = poisson.ppf(1 - math.sqrt(h / p), mu = l*L) Q = math.sqrt(L + (K / h) + math.sqrt(2.1029*l*(K + h)*math.sqrt(K/h))) return r, Q
def _V(price, t, n): x = np.hstack((price, competitor_prices, rank(price, competitor_prices))).reshape(1, -1) # sales_prob = round(sales_model(x)[0]) sales_prob = sales_model(x)[0] _sum = 0 # TODO: Check here # for i in range(2): # print(sales_prob) pi_sum = 0 for i in range(int(poisson.ppf(0.9999, sales_prob)) + 1): pi = poisson.pmf(i, sales_prob) pi_sum += pi today_profit = min(n, i) * price holding_costs = n * L _, V_future = V(t + 1, max(0, n - i)) exp_future_profits = delta * V_future _sum += pi * (today_profit - holding_costs + exp_future_profits) # print(pi_sum) return _sum
temp_bin_no_spatial_outliers = temp_bin[temp_bin['cluster_label'] != -1] temp_bin_no_spatial_outliers.rename(columns = {'created_time' : 'num_posts_per_time_slot'}, inplace = True) posts_per_time_period = temp_bin_no_spatial_outliers.groupby(['num_posts_per_time_slot','day_of_week','hour_of_day']).count()['cluster_label'].reset_index() def arrival_statistics(time_bin,day_of_week): dist_posts_per_time_period = posts_per_time_period[(posts_per_time_period['hour_of_day'] == time_bin) & (posts_per_time_period['day_of_week'] == day_of_week)]['cluster_label'].reset_index()['cluster_label'] emp_dist = (dist_posts_per_time_period/dist_posts_per_time_period.sum()).values mu = np.dot(np.array(range(0,len(emp_dist))),emp_dist) return mu post_threshhold = np.zeros([bins_in_day,7]) for ii in xrange(0,bins_in_day): for jj in xrange(0,7): post_threshhold[ii][jj] = np.ceil(poisson.ppf(0.9999, arrival_statistics(ii,jj))) #print post_threshhold df2 = last_day_df[['post_id','created_time','lat','longitude','stand_res_url']] day_of_week = datetime.datetime.fromtimestamp(max(df2['created_time'])).weekday() df2['created_time'] = df2['created_time'].apply(lambda x: int(datetime.datetime.fromtimestamp(x).hour)) # Bin by some number of hours time_bin_hours = 4 df2['created_time'] = df2['created_time'].apply(lambda x : x / time_bin_hours) df2['cluster_label'] = df2[['lat','longitude']].apply(lambda x: check_spatial_membership(x['lat'],x['longitude']), axis = 1)
def __call__(self,cube): return poisson.ppf(cube,loc=self.m)
def _rvs(self, mu): return poisson.ppf(uniform(low=poisson.pmf(0, mu)), mu)
#from https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.stats.poisson.html#scipy.stats.poisson from scipy.stats import poisson import matplotlib.pyplot as plt import numpy as np fig, ax = plt.subplots(1, 1) mu = 0.6 mean, var, skew, kurt = poisson.stats(mu, moments='mvsk') x = np.arange(poisson.ppf(0.01, mu), poisson.ppf(0.99, mu)) ax.plot(x, poisson.pmf(x, mu), 'bo', ms=8, label='poisson pmf') ax.vlines(x, 0, poisson.pmf(x, mu), colors='b', lw=5, alpha=0.5) rv = poisson(mu) ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,label='frozen pmf') ax.legend(loc='best', frameon=False) plt.show()
def _ppf(self, q, mu): return poisson.ppf(poisson.sf(0, mu) * q + poisson.pmf(0, mu), mu)
###upper bounds for X upperX = np.zeros(n1) temBikes = bikeData[:, 2] for i in xrange(n1): temp = cluster[i] indsTemp = np.array([a[0] for a in temp]) upperX[i] = np.sum(temBikes[indsTemp]) prob = 0 U = np.zeros(nDays) L = np.zeros(nDays) for i in xrange(nDays): temp = poisson.ppf([0.95], poissonParameters[i])[0] temp2 = poisson.ppf([0.001], poissonParameters[i])[0] U[i] = temp L[i] = temp2 print L print U f = open("percentilesDays.txt", "w") np.savetxt(f, L) np.savetxt(f, U) f.close() prob = 0 for i in xrange(nDays): for j in range(int(L[i]), int(U[i]) + 1):