def random_branch(romes, rho, m, mean_hirings, siret='.', nb='.'): return Branch(rd.choice(romes), rd.choice(rho), m[rd.choice(range(len(m)))], rd.chisquare(mean_hirings), siret=siret, nb=nb)
def random_modi(size): x = randn(size) range_y = np.exp2(0.3 * x**3 + np.sqrt(abs(x))) range_y[np.where(range_y>1.3)] = 0.23 * \ chisquare(13,np.where(range_y>1.3)[0].size) factor_y = range_y / 3.0 * randn(size) * 0.1 return (factor_y)
def rwish(shape, scale, samples=1): ''' Generate random samples from Wishart distribution. Based on rwish() in the MCMCpack R package. That package is licensed under GPL-v3 and can be found: http://cran.r-project.org/web/packages/MCMCpack/index.html :attr:`shape` is the shape parameter, a real number. :attr:`scale` is the scale parameter, a square matrix whose dimensions must not be greater than the shape parameter :attr:`samples` is the number of random samples to return ''' if len(scale.shape) != 2: if scale.shape[0] != 1: raise ValueError('Scale parameter must be a 2-D matrix') if scale.shape[0] != scale.shape[1]: raise ValueError('Scale parameter must be a square matrix') p = scale.shape[0] if shape < p: raise ValueError( 'Shape parameter must be equal to or greater than the number of dimensions.' ) chol = la.cholesky(scale) result = np.zeros((samples, p, p)) for i in range(samples): z = np.eye(p) z = z * np.sqrt(npr.chisquare([x + shape for x in range(0, -p, -1)], p)) if p > 1: pseq = range(1, p) z[np.triu_indices(p, 1)] = np.random.normal(size=p * (p - 1) / 2) a = chol.dot(z) result[i] = a.dot(a.T) return result
def chisquareAverage(numSamples=100000): runningSum = 0 for _ in range(numSamples): runningSum += chisquare(2) return runningSum/numSamples
def generate(self, S) -> np.ndarray: gaussian = rd.multivariate_normal(np.zeros(N), self.cov, S) chi2 = rd.chisquare(self.nu, (S, 1)) LogScenarios = gaussian / np.sqrt(self.nu / chi2) + np.array(self.mean) return np.concatenate(((1 + self.r) * np.ones( (S, 1)), np.exp(LogScenarios)), axis=1)
def MiChisquare(dof): """ChiSquare Distribution Function dof: Degrees of Freedom""" global manflag if not manflag: setManual() return np.chisquare(dof,1)
def chi2_mean_std(self, mean=1., std=0.1): ''' Chi-squared random variable with given mean and standard deviation. ''' scale = 2. * mean / std nu = mean * scale return npr.chisquare(nu) / scale
def rwish(shape, scale, samples=1): ''' Generate random samples from Wishart distribution. Based on rwish() in the MCMCpack R package. That package is licensed under GPL-v3 and can be found: http://cran.r-project.org/web/packages/MCMCpack/index.html :attr:`shape` is the shape parameter, a real number. :attr:`scale` is the scale parameter, a square matrix whose dimensions must not be greater than the shape parameter :attr:`samples` is the number of random samples to return ''' if len(scale.shape) != 2: if scale.shape[0] != 1: raise ValueError('Scale parameter must be a 2-D matrix') if scale.shape[0] != scale.shape[1]: raise ValueError('Scale parameter must be a square matrix') p = scale.shape[0] if shape < p: raise ValueError('Shape parameter must be equal to or greater than the number of dimensions.') chol = la.cholesky(scale) result = np.zeros((samples,p,p)) for i in range(samples): z = np.eye(p) z = z * np.sqrt(npr.chisquare([x+shape for x in range(0,-p,-1)],p)) if p > 1: pseq = range(1,p) z[np.triu_indices(p,1)] = np.random.normal(size=p*(p-1)/2) a = chol.dot(z) result[i] = a.dot(a.T) return result
def get_dist_num(args): dist = args[0] for i in range(len(args[1:])): args[i + 1] = float(args[1:][i]) if dist == 'EXP': return exponential(args[1]) elif dist == 'NOR': return normal(loc=args[1], scale=args[2]) # loc = média , scale = desvio elif dist == 'TRI': return triangular(args[1], args[2], args[3]) elif dist == 'UNI': return uniform(low=args[1], high=args[2]) elif dist == 'BET': return beta(args[1], args[2]) elif dist == 'WEI': return weibull(args[1]) elif dist == 'CAU': # CAU: Cauchy return 0 elif dist == 'CHI': return chisquare(args[1]) elif dist == 'ERL': # ERL: Erlang return 0 elif dist == 'GAM': return gamma(args[1], scale=args[2]) elif dist == 'LOG': return lognormal(mean=args[1], sigma=args[2]) elif dist == 'PAR': return pareto(args[1]) elif dist == 'STU': return standard_t(args[1])
def estimate_sigma_e(s_e, y, X, b, Z, u, tau_e, Tau_e, family_indices): """ Updates the estimate for sigma_e """ y_copy = y.copy() X_copy = X.copy() Z_copy = Z.copy() S_t = 0 for family_ind in family_indices: # Calculate the S_t for every group m separately y_subset = np.squeeze(y_copy[family_ind]) X_subset = X_copy[family_ind] Z_subset = Z_copy[family_ind] s_e_i = s_e[family_ind][0] S_t_m = calculate_sum(s_e_i, y_subset, X_subset, b, Z_subset, u) S_t = S_t + S_t_m nominator = (tau_e * Tau_e) + S_t df = tau_e + len(y) sigma_estimate = nominator / chisquare(df=df) return sigma_estimate
def estimate_s_e(sigma_e, nu_e, y, X, b, Z, u, family_indices): """ Generates s_e for each i, where i = {1, ..., m} and m is the number of groups. In this case m is the number of families. """ # Initialize s_e s_e = np.zeros(len(y)) for ind in family_indices: y_copy = np.squeeze(y.copy()) X_copy = X.copy() Z_copy = Z.copy() # Calculate the S_e for every other observation, except belonging to this group y_subset = y_copy[ind] X_subset = X_copy[ind, :] Z_subset = Z_copy[ind, :] S_e = calculate_S_e(sigma_e, nu_e, y_subset, X_subset, b, Z_subset, u) # Calculate the estimate for the family i df = nu_e + len(ind) s_e_i = chisquare(df) / S_e # Update the s_e for ith family s_e[ind] = s_e_i return s_e
def generate_Wishart(n, V): """ Generate a sample from Wishart density Parameters ---------- n: float, the number of degrees of freedom of the Wishart density V: array of shape (n,n) the scale matrix of the Wishart density Returns ------- W: array of shape (n,n) the draw from Wishart density """ icv = cholesky(V) p = V.shape[0] A = nr.randn(p, p) for i in range(p): A[i, i:] = 0 A[i, i] = np.sqrt(nr.chisquare(n - i)) R = np.dot(icv, A) W = np.dot(R, R.T) return W
def distribution(): sample_size = 500 rn1 = npr.standard_normal(sample_size) rn2 = npr.normal(100, 20, sample_size) rn3 = npr.chisquare(df=0.5, size=sample_size) rn4 = npr.poisson(lam=1.0, size=sample_size) fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(10, 8)) ax1.hist(rn1, bins=25, stacked=True) ax1.set_title('Standard normal') ax1.set_ylabel('frequency') ax2.hist(rn2, bins=25) ax2.set_title('Normal 100,20') ax3.hist(rn3, bins=25) ax3.set_title('Chi squared') ax3.set_ylabel("frequency") ax4.hist(rn4, bins=25) ax4.set_title('Poisson') plt.show()
def generateStudentTScenarios(NbScenarios=1000, nu=3, start=None, end=None, seed=None): """ Generates random scenarios based on a multivariate 'student' t distribution of the log returns. - NbScenarios: int - Number of scenarios to compute - start/end: period - Period on which to compute the variance-covariance matrix - seed: int - Seed for random generation """ LocalData = Data[start:end] LocalReturns = (LocalData / LocalData.shift() - 1)[1:] MeanLocalReturns = LocalReturns.mean() CovLocalReturns = LocalReturns.cov() if seed is not None: rd.seed(seed) gaussian = rd.multivariate_normal(np.zeros(len(Data.columns)), CovLocalReturns, NbScenarios) chi2 = rd.chisquare(nu, (NbScenarios, 1)) scenarios = gaussian / np.sqrt(nu / chi2) + np.array(MeanLocalReturns) probas = np.ones(NbScenarios) / NbScenarios return scenarios, probas
def estimate_sigma_u(s_u, y, X, b, Z, u, tau_u, Tau_u): covariance_u = calculate_uAu(Z.copy(), u.copy()) nominator = (tau_u*Tau_u + s_u*covariance_u) df = tau_u + Z.shape[1] updated_sigma_u = nominator / chisquare(df=df) return updated_sigma_u
def estimate_s_u(Z, u, sigma_u, nu_u): """ Generates s_u for each familial random effect. """ df = nu_u + Z.shape[1] scaler = calculate_covariance_u(Z.copy(), u.copy(), sigma_u, nu_u) s_u_estimate = chisquare(df) / scaler return s_u_estimate
def simulateCopula(simulations=10, type=str('g'), rho=float, lamda=tuple, tDof=4, basketSize=5, useGPU=False): result = [] """ $\tau = F^{-1}(u) = -\frac{log(1-u)}{\lambda}$ """ print 'simulating t distribution' if type == 't' else 'simulating gaussian dist' for z in xrange(0, simulations): # for the t distribution we use the same method but # sample from the chisquared distribution # if GPU is enabled, hand over to GPU to provide random number sample if useGPU and type == 'g': z1, z2, z3, z4, z5 = rng.getPseudoRandomNumbers_Standard_cuda(basketSize) else: z1, z2, z3, z4, z5 = random.chisquare(tDof, size=basketSize) if type == 't' else random.normal(size=5) # z1, z2, z3, z4, z5 = chi2.rvs(1, size=5) if type == 't' else random.normal(size=5) x1 = z1 # using factorised copula procedure # $A_i = w_iZ + \sqrt{1-w{^2}{_i}\Epsilon_i $ x2, x3, x4, x5 = [z1 * rho + sqrt(1 - square(rho)) * zn for zn in [z2, z3, z4, z5]] # converting to normal variables from t or normal distribution successfully # via cdf of relevant distribution if type == 't': u1, u2, u3, u4, u5 = [t.cdf(x, 1) for x in [x1, x2, x3, x4, x5]] else: u1, u2, u3, u4, u5 = [norm.cdf(x) for x in [x1, x2, x3, x4, x5]] u = [u1, u2, u3, u4, u5] # $\tau_i = -\frac{-log(1-u)}{\lambda_i} $ tau1, tau2, tau3, tau4, tau5 = [-log(1 - u) / lamda[index] for index, u in enumerate(u)] result.append({'z1': z1, 'z2': z2, 'z3': z3, 'z4': z4, 'z5': z5, 'x1': x1, 'x2': x2, 'x3': x3, 'x4': x4, 'x5': x5, 'u1': u1, 'u2': u2, 'u3': u3, 'u4': u4, 'u5': u5, 'tau1': tau1, 'tau2': tau2, 'tau3': tau3, 'tau4': tau4, 'tau5': tau5, }) return DataFrame(result)
def SigmaPrior(): Lambda = 0.2 m = 5 DegreeOfFreedom = n_obs + m - 1 sigma_sq_inv = rand.chisquare(DegreeOfFreedom) sigma_sq = dict() sigma_sq['Value'] = float(m * Lambda) / sigma_sq_inv sigma_sq['Lambda'] = Lambda sigma_sq['m'] = m return sigma_sq
def chi2_mean_std(self,mean=1.,std=0.1): """ Chi-squared random variable with given mean and standard deviation. :param mean: :param std: :return: """ scale = 2.*mean/std nu = mean*scale return npr.chisquare(nu)/scale
def gen_logit(N): Data=[] for n in range(N): x1=nprd.normal()*1.414+1 x2=nprd.chisquare(2) d_star=beta_0+beta_1*x1+beta_2*x2 p_star=Logistic(d_star) d=(1 if nprd.uniform()<p_star else 0) Data.append((d,x1,x2)) return Data
def time_to_mutation_rate(tree): if not hasattr(GC, "NUMPY_SEEDED"): from numpy.random import seed as numpy_seed numpy_seed(seed=GC.random_number_seed) GC.random_number_seed += 1 GC.NUMPY_SEEDED = True t = read_tree_newick(tree) for node in t.traverse_preorder(): if node.edge_length is not None: node.edge_length *= chisquare(df=GC.tree_rate_df) return str(t)
def UpdateSigma(): Alpha = Parameters.Alpha['Value'] Lambda = Parameters.Sigma_Sq['Lambda'] m = Parameters.Sigma_Sq['m'] v = Log_H - Alpha[0] - Alpha[1] * Log_Lag_H Numerator = m * Lambda + np.sum(np.square(v)) Chi2Draw = rand.chisquare(df=m + len(v) - 1) NewValue = Numerator / Chi2Draw NewSigma_Sq = Parameters.Sigma_Sq.copy() NewSigma_Sq['Value'] = NewValue return NewSigma_Sq
def empiricalEW(i, w, p, numSamples=100000): runningSum = 0 for _ in range(numSamples): x = chisquare(2) eigPart = (p*(1-p)*x)/(2*w) runningSum += log(eigPart + 1, 2) return runningSum/numSamples
def _loop_gain_ll(self, cur_select, cur_update): """循环获取经纬度的信息""" failure = 0 while cur_select.rownumber < cur_select.rowcount: try: sample_info = cur_select.fetchone() self._gain_ll(sample_info, cur_update) i = chisquare(0.5) time.sleep(i) except: failure += 1 print(u'经纬度获取失败,累计获取失败样本:%d 条' % failure) finally: self.conn.commit()
def PlotRandomSVD(nrow=51,ncol=51,std=1,dist='normal'): if dist=='normal': Q = random.normal(0,std,nrow * ncol).reshape(nrow,ncol) elif dist=='chisq': Q = random.chisquare(std,nrow * ncol).reshape(nrow,ncol) else: raise ValueError(dist + " Unknown dist choice") U,S,V = np.linalg.svd(np.matrix(Q),full_matrices=True) plt.plot(np.arange(S.shape[0]),S) plt.xlabel('Descending Sorted Order') plt.ylabel('%s Random Eigen Values'%(dist.upper()))
def PlotRandomSVD(nrow=51, ncol=51, std=1, dist='normal'): if dist == 'normal': Q = random.normal(0, std, nrow * ncol).reshape(nrow, ncol) elif dist == 'chisq': Q = random.chisquare(std, nrow * ncol).reshape(nrow, ncol) else: raise ValueError(dist + " Unknown dist choice") U, S, V = np.linalg.svd(np.matrix(Q), full_matrices=True) plt.plot(np.arange(S.shape[0]), S) plt.xlabel('Descending Sorted Order') plt.ylabel('%s Random Eigen Values' % (dist.upper()))
def random_numbers(): print(npr.rand(10)) print(npr.rand(5,5)) #interval a to b, 5 to 10 a = 5; b=10 print(npr.rand(10)*(b-a) + a) sample_size = 500 rn1 = npr.rand(sample_size, 3) rn2 = npr.randint(0, 10, sample_size) rn3 = npr.sample(size = sample_size) a = [0, 25, 50, 75, 100] rn4 = npr.choice(a, size = sample_size) print("rn1 : %s" % rn1) print("rn2 : %s" % rn2) print("rn3 : %s" % rn3) print("rn4 : %s" % rn4) fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(7, 7)) ax1.hist(rn1, bins=25, stacked=True) ; ax1.set_title("rand") ; ax1.set_ylabel("frequency") ; ax1.grid(False) ax2.hist(rn2, bins=25) ; ax2.set_title("randint") ; ax2.grid(True) ax3.hist(rn3, bins=25) ; ax3.set_title("sample") ; ax3.set_ylabel("frequency") ax4.hist(rn4, bins=25) ; ax4.set_title("choice") ; ax4.grid(True) plt.show() #distributions sample_size = 500 rn1 = npr.standard_normal(sample_size) rn2 = npr.normal(100, 20, sample_size) rn3 = npr.chisquare(df=0.5, size=sample_size) rn4 = npr.poisson(lam=1.0, size=sample_size) fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(10,10)) ax1.hist(rn1, bins=25) ; ax1.set_title("standard normal") ; ax1.set_ylabel("frequency") ; ax1.grid(True) ax2.hist(rn2, bins=25) ; ax2.set_title("normal(100, 20)") ; ax2.set_xlabel("hi") ; ax2.grid(True) ax3.hist(rn3) ; ax3.set_title("chi square") ; ax3.set_ylabel("frequency") ; ax3.grid(True) ax4.hist(rn4, bins=100) ; ax4.set_title("Poisson") plt.show() pass
def rvs(self): # Random normal variates for off-diagonal elements n_tril = self.dim * (self.dim - 1) // 2 covariances = npr.normal(size=n_tril).reshape((n_tril,)) # Random chi-square variates for diagonal elements variances = (np.r_[[npr.chisquare(self.nu - (i + 1) + 1, size=1)**0.5 for i in range(self.dim)]].reshape((self.dim,)).T) A = np.zeros((self.dim, self.dim)) # Input the covariances tril_idx = np.tril_indices(self.dim, k=-1) A[tril_idx] = covariances # Input the variances diag_idx = np.diag_indices(self.dim) A[diag_idx] = variances T = np.dot(self.psi_chol, A) return np.dot(T, T.T)
def distribute(args): n = args.s feats = args.f if n >= 10**6: csv_name = f"{int(n/10**6)}M" else: csv_name = f"{int(n/10**3)}K" base = np.random.normal(loc=.5, scale=.5, size=n) correlated = [] epsilons = [ np.random.normal(loc=.5, scale=.5, size=n) for _ in range(feats) ] labels = list(string.ascii_lowercase.upper()[0:10]) uniform = random.uniform(size=(feats, n)) normal = random.normal(loc=.5, scale=5, size=(feats, n)) chisquare = random.chisquare(df=feats, size=(feats, n)) for index, i in enumerate(epsilons): if index == 0: correlated.append(base) else: correlated.append(base + random.uniform(0.5, 1) * i) for index, x in enumerate(correlated): correlated[index] = (x - min(x)) / (max(x) - min(x)) for index, x in enumerate(normal): normal[index] = (x - min(x)) / (max(x) - min(x)) for index, x in enumerate(chisquare): chisquare[index] = (x - min(x)) / (max(x) - min(x)) uniform_df = pd.DataFrame(data=uniform.T, columns=labels[0:feats]) normal_df = pd.DataFrame(data=normal.T, columns=labels[0:feats]) chisquare_df = pd.DataFrame(data=chisquare.T, columns=labels[0:feats]) correlated_df = pd.DataFrame(data=np.array(correlated).T, columns=labels[0:feats]) uniform_df.to_csv(f"{csv_name}x{feats}_uniform.csv", index=False) normal_df.to_csv(f"{csv_name}x{feats}_normal.csv", index=False) chisquare_df.to_csv(f"{csv_name}x{feats}_chisquare.csv", index=False) correlated_df.to_csv(f"{csv_name}x{feats}_correlated.csv", index=False)
def generate_Wishart(n,V): """ Generate a sample from Wishart Parameters ---------- n (scalar) = the number of degrees of freedom (dofs) V = array of shape (n,n) the scale matrix Returns ------- W: array of shape (n,n): the Wishart draw """ from numpy.linalg import cholesky L = cholesky(V) p = V.shape[0] A = nr.randn(p,p) a = np.array([np.sqrt(nr.chisquare(n-i)) for i in range(p)]) for i in range(p): A[i,i:] = 0 A[i,i] = a[i] R = np.dot(L,A) W = np.dot(R,R.T) return W
def rvs(self, size=1): # Random normal variates for off-diagonal elements n_tril = self.dim * (self.dim - 1) // 2 covariances = npr.normal(size=n_tril).reshape((n_tril, )) # Random chi-square variates for diagonal elements variances = (np.r_[[ npr.chisquare(self.nu - (i + 1) + 1, size=1)**0.5 for i in range(self.dim) ]].reshape((self.dim, )).T) A = np.zeros((self.dim, self.dim)) # Input the covariances tril_idx = np.tril_indices(self.dim, k=-1) A[tril_idx] = covariances # Input the variances diag_idx = np.diag_indices(self.dim) A[diag_idx] = variances eye = np.eye(self.dim) L, lower = sc.linalg.cho_factor(self.psi, lower=True) inv_scale = sc.linalg.cho_solve((L, lower), eye) C = sc.linalg.cholesky(inv_scale, lower=True) trtrs = get_lapack_funcs(('trtrs'), (A, )) T = np.dot(C, A) if self.dim > 1: T, _ = trtrs(T, eye, lower=True) else: T = 1. / T return np.dot(T.T, T)
ax3.hist(rn3, bins=25) ax3.set_title('sample') ax3.set_ylabel('frequency') ax3.grid(True) ax4.hist(rn4, bins=25) ax4.set_title('choice') ax4.grid(True) plt.show() ## Generating Random Numbers using distributions. # A few select functions below from numpy random library sample_size = 500 rn1 = npr.standard_normal(sample_size) # standard normal distribution rn2 = npr.normal(100, 20, sample_size) # normal distribution rn3 = npr.chisquare(df=0.5, size=sample_size) # chi square distribution rn4 = npr.poisson(lam=1.0, size=sample_size) # Poisson distribution fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(7, 7)) ax1.hist(rn1, bins=25) ax1.set_title('standard normal') ax1.set_ylabel('frequency') ax1.grid(True) ax2.hist(rn2, bins=25) ax2.set_title('normal (100,20)') ax2.grid(True) ax3.hist(rn3, bins=25) ax3.set_title('chi square') ax3.set_ylabel('frequency') ax3.grid(True)
def _numpy(self, loc=0.0, scale=1.0, size=(1,)): return lambda: nr.chisquare(df=self.df, size=size) * scale + loc
# chi_square_distribution # used as a basis to verify the hypothesis # It has two parameters # df - (degree of freedom) # size - the shape of the returned array # Draw out a sample for chi squared distribution with degree of freedom 2 with size 2x3 from numpy import random x = random.chisquare(df=2, size=(2, 3)) print(x) # visualization of chi-square distribution # from numpy import random import matplotlib.pyplot as plt import seaborn as sns sns.distplot(random.chisquare(df=1, size=1000), hist=False) plt.show()
import numpy.random as rnd from matplotlib import pyplot as plt labels = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 ] rnd01 = rnd.normal(0, 2, 30) rnd02 = rnd.chisquare(1, 30) rnd03 = rnd.exponential(1, 30) plt.subplot(1, 3, 1) plt.scatter(x=labels, y=rnd01) plt.xlabel('normal') plt.subplot(1, 3, 2) plt.scatter(x=labels, y=rnd02) plt.xlabel('chisquare') plt.subplot(1, 3, 3) plt.scatter(x=labels, y=rnd03) plt.xlabel('exponential') plt.show()
def np_chi_square_distribution(): x = random.chisquare(df=2, size=(2, 3)) print(x) sns.distplot(random.chisquare(df=1, size=1000), hist=False) plt.show()
def rstudent(mu, lam, alpha): X = R.chisquare(alpha, mu.shape) Z = R.standard_normal(mu.shape) return mu + Z * sqrt(alpha / X) / sqrt(lam)
# normalizedH = h/sqrt(n) e1 = np.linalg.eig(normalizedC) # e2 = np.linalg.eig(normalizedH) # print e1[0] # print e2[0] # x1 = sorted(np.multiply(e1[0], np.conjugate(e1[0]))) # x2 = sorted(np.multiply(e2[0], np.conjugate(e2[0]))) x1 = np.multiply(e1[0], np.conjugate(e1[0])) # x2 = np.multiply(e2[0], np.conjugate(e2[0])) # print x1 x3 = chisquare(2, int((n-1)/2)) # print x3.shape x3 = sorted(np.repeat(x3, 2)) # print x3 pl.plot(x1[:-1], "b-") # pl.plot(x2[:-1], "r-") # pl.plot(x3, "g-") pl.show() sys.exit() if False:
def f(x, n): """calculation of chi-squared""" return ((exp(-x/2))*x**((n/2)-1))/((2**(n/2))*sp.gamma(n/2)) def Prob(chi, n): """return probability that X**2> definit limit""" # return 1-integrate.quad(lambda x: f(x, n), 0, alpha)[0] return st.chi2.sf(chi, n) # Main N = 10000 # number of repetitions n = 10. # degrees of freedome mu = 130 # mean sigma = sqrt(n) # mean derivation x_arr = linspace(0, 50, 1000, endpoint=False) # an array chi = chisquare(n, N) # Random distribution chi-squared #Draw function plt.figure(1, figsize=[15, 8]) #plt.subplot(111) #plt.hist(chi, bins=300, label='datos', normed=True) #plt.plot(x_arr, f(x_arr, n), '-', color='r', label='funciona chi-quadrado') #plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) plt.subplot(111) plt.axis([0, 50, 0, 0.16]) plt.hist(distribucion(chiqua, mu, sigma, n, N), bins=100, label='datos', normed=True) plt.plot(x_arr, f(x_arr, n), '-', color='r', label='funciona chi-quadrado') #plt.plot(x_arr, f(x_arr, n*3.5), '-', color='y', label='nueva aproximacion de funciona chi-quadrado') plt.legend(loc=1)
# chi square distribution is used as a basis to verify hypothesis. # it has two parameters # df - (degree of freedom). # size - shape of returned array. from numpy import random import matplotlib.pyplot as plt import seaborn as sns arr1 = random.chisquare(df=1000, size=10) print(arr1) sns.distplot(arr1, hist=False) plt.show()
ax2.set_title('randint') ax2.grid(True) ax3.hist(rn3,bins=25) ax3.set_title('sample') ax3.set_ylabel('frequency') ax3.grid(True) ax4.hist(rn4,bins=25) ax4.set_title('choice') ax4.grid(True) #Visualize random draws from distributions sample_size=500 rn1=npr.standard_normal(sample_size) rn2=npr.normal(100,20,sample_size) rn3=npr.chisquare(df=0.5,size=sample_size) rn4=npr.poisson(lam=1.0,size=sample_size) fig,((ax1,ax2),(ax3,ax4))=plt.subplots(nrows=2,ncols=2,figsize=(7,7)) ax1.hist(rn1,bins=25,stacked=True) ax1.set_title('standard normal') ax1.set_ylabel('frequency') ax1.grid(True) ax2.hist(rn2,bins=25) ax2.set_title('normal(100,20)') ax2.grid(True) ax3.hist(rn3,bins=25) ax3.set_title('chi square') ax3.set_ylabel('frequency') ax3.grid(True)
import numpy as np import numpy.random as npr import matplotlib.pyplot as plt sample_size = 500 rn1 = npr.standard_normal(sample_size) rn2 = npr.normal(100, 20, sample_size) rn3 = npr.chisquare(df=0.5, size=sample_size) rn4 = npr.poisson(lam=1.0, size=sample_size) print(rn1) print(rn2) print(rn3) print(rn4) fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(7, 7)) ax1.hist(rn1, bins=25) ax1.set_title('standard normal') ax1.set_ylabel('frequency') ax1.grid(True) ax2.hist(rn2, bins=25) ax2.set_title('normal(100, 20)') ax2.grid(True) ax3.hist(rn3, bins=25) ax3.set_title('chi square') ax3.set_ylabel('frequency') ax3.grid(True) ax4.hist(rn4, bins=25) ax4.set_title('Poisson') ax4.grid(True)