def __init__(self, tilt): super(MySource, self).__init__(tilt) #not much to do here self.stddev = 0.0033 / 3 self.mean = tilt self.anggenerator = norm(loc=self.mean, scale=self.stddev) #make a pdf based on the sum-of-four-lorentzians ka11 = cauchy(loc=8047.837, scale=2.285) ka12 = cauchy(loc=8045.637, scale=3.358) ka21 = cauchy(loc=8027.994, scale=2.667) ka22 = cauchy(loc=8026.504, scale=3.571) #noinspection PyMethodOverriding class kadist(rv_continuous): def _pdf(self, x): return (0.957 * ka11.pdf(x) + 0.090 * ka12.pdf(x) + 0.334 * ka21.pdf(x) + 0.111 * ka22.pdf(x)) / 1.492 def _cdf(self, x): return (0.957 * ka11.cdf(x) + 0.090 * ka12.cdf(x) + 0.334 * ka21.cdf(x) + 0.111 * ka22.cdf(x)) / 1.492 self.egenerator = kadist()
def main(): # Define our test distribution: a mix of Cauchy-distributed variables np.random.seed(0) x = np.concatenate([stats.cauchy(-5, 1.8).rvs(500), stats.cauchy(-4, 0.8).rvs(2000), stats.cauchy(-1, 0.3).rvs(500), stats.cauchy(2, 0.8).rvs(1000), stats.cauchy(4, 1.5).rvs(500)]) # truncate values to a reasonable range x = x[(x > -15) & (x < 15)] pl.figure() pl.hist(x, bins=10, normed=True) # Histogram with more bins, pl.figure() pl.hist(x, bins=100, normed=True) pl.figure() # plot a standard histogram in the background, with alpha transparency pl.hist(x, bins=200, histtype='stepfilled', alpha=0.2, normed=True) # plot an adaptive-width histogram on top pl.hist(x, bins=bayesian_blocks(x), color='black', histtype='step', normed=True) pl.show()
def cauchyNumbers(): for size in sizes: fig, ax = plt.subplots(1, 1) ax.hist(cauchy.rvs(size=size), histtype='stepfilled', alpha=0.5, color='blue', density=True) x = np.linspace(cauchy().ppf(0.01), cauchy().ppf(0.99), 100) ax.plot(x, cauchy().pdf(x), '-') ax.set_title('CauchyNumbers n = ' + str(size)) ax.set_xlabel('CauchyNumbers') ax.set_ylabel('density') plt.grid() plt.show() return
def dataY(k, l): if k == 1: return scs.norm(0, l).pdf(dataX(k)) if k == 2: return scs.norm(0, l).cdf(dataX(k)) if k == 3: return scs.cauchy().pdf(dataX(k)) if k == 4: return scs.cauchy().cdf(dataX(k)) if k == 5: return scs.gamma(l).pdf(dataX(k)) if k == 6: return scs.gamma(l).cdf(dataX(k))
def __init__(self, sad_h5_file, sad_key="SAD", compute_norm=True, recompute_norm=False): self.sad_h5_file = sad_h5_file self.sad_h5_open = h5py.File(self.sad_h5_file, "r") self.sad_matrix = self.sad_h5_open[sad_key] self.num_snps, self.num_targets = self.sad_matrix.shape self.target_ids = np.array( [tl.decode("UTF-8") for tl in self.sad_h5_open["target_ids"]]) self.target_labels = np.array( [tl.decode("UTF-8") for tl in self.sad_h5_open["target_labels"]]) # read SAD percentile indexes into memory self.pct_sad = np.array(self.sad_h5_open["SAD_pct"]) # read percentiles self.percentiles = np.around(self.sad_h5_open["percentiles"], 3) self.percentiles = np.append(self.percentiles, self.percentiles[-1]) if compute_norm: # fit, if not present if recompute_norm or not "target_cauchy_fit_loc" in self.sad_h5_open: self.fit_cauchy() # make target-specific fit cauchy's target_cauchy_fit_params = zip( self.sad_h5_open["target_cauchy_fit_loc"], self.sad_h5_open["target_cauchy_fit_scale"], ) self.target_cauchy_fit = [ cauchy(*cp) for cp in target_cauchy_fit_params ] # choose normalizing values, if not present if recompute_norm or not "target_cauchy_norm_loc" in self.sad_h5_open: self.norm_cauchy() # make target-specific normalizing cauchy's target_cauchy_norm_params = zip( self.sad_h5_open["target_cauchy_norm_loc"], self.sad_h5_open["target_cauchy_norm_scale"], ) self.target_cauchy_norm = [ cauchy(*cp) for cp in target_cauchy_norm_params ]
def get_characteristic(distrib, n): if distrib == "Standard normal": x_sample = st.norm(0, 1) elif distrib == "Uniform": x_sample = st.uniform(loc=-3 ** 0.5, scale=2 * (3 ** 0.5)) elif distrib == "Cauchy": x_sample = st.cauchy(loc=0, scale=1) elif distrib == "Laplace": x_sample = st.laplace(loc=0, scale=(2 ** (-0.5))) elif distrib == "Poisson": x_sample = st.poisson(mu=7) else: x_sample = np.ndarray(shape=(1, n)) result = defaultdict(list) for k in range(1000): x_n = x_sample.rvs(n) result["sample_mean"].append(np.mean(x_n)) result["med"].append(np.median(x_n)) result["range"].append((np.amax(x_n) + np.amin(x_n)) / 2) result["quart_range"].append(quartile_range(x_n, n)) result["trunc_mean"].append(trunc_mean(x_n, n)) return result
def ajuste_lorentz(x, a, b, A, mu, sigma): ''' ajuste de una recta menos una perfil de lorentz, los parametros (a,b) pertenecen a la recta y los paramatros (A, mu, sigma) al perfil de lorentz ''' y = (a + b * x) - A * scist.cauchy(loc=mu, scale=sigma).pdf(x) return y
def __init__(self, m_latent, b_latent, sigma): self._sigma = sigma self._dist = stats.cauchy(scale=sigma, loc=0) super(CauchyData, self).__init__(m_latent, b_latent)
def Modelo_lorentz(x, a, b, A, mu, sigma): """ entrega valores de la funcion del modelo lorentziano evaluada en punto o set de puntos """ f = (a*x + b) - A * stats.cauchy(loc=mu, scale=sigma).pdf(x) return f
def adjust_noise(optimizer, epoch, args, noise_dict): keys = list(noise_dict.keys()) keys.sort() select = keys[0] for k in keys: select = k if k > epoch: break if args.noisemodel == 'johnson': noise_model = st.johnsonsu(*noise_dict[select]) if args.noisemodel == 'gaussian': print("GAUSSIAN NOISE set") noise_model = st.norm(*noise_dict[select]) if args.noisemodel == 'laplace': noise_model = st.laplace(*noise_dict[select]) if args.noisemodel == 'cauchy': noise_model = st.cauchy(*noise_dict[select]) if args.noisemodel == 'gennorm': noise_model = st.gennorm(*noise_dict[select]) if args.noisemodel == 'studentt': noise_model = st.t(*noise_dict[select]) print('noise model is', args.noisemodel, noise_dict[select]) optimizer.noise_generator = noise_model
def get_functions(): rv_n = norminvgauss(1, 0) rv_l = laplace(scale=1 / m.sqrt(2), loc=0) rv_p = poisson(10) rv_c = cauchy() rv_u = uniform(loc=-m.sqrt(3), scale=2 * m.sqrt(3)) return [rv_n, rv_l, rv_p, rv_c, rv_u]
def cauchy_CLT(outer_n=100, inner_n=20, density=False, showMe=False): dist = stats.cauchy() f = plt.figure(figsize=(20, 10)) ax1 = f.add_subplot(121) ax2 = f.add_subplot(122) x1 = np.linspace(dist.ppf(0.01), dist.ppf(0.99), 100) ax1.plot(x1, dist.pdf(x1)) n_bins = int(np.log10(outer_n) * 50) x_bar = np.array([np.mean(dist.rvs(size=inner_n)) for i in range(outer_n)]) if not density: ax2.hist(x_bar, bins=n_bins, normed=True, label='sample means') else: sns.kdeplot(x_bar, ax=ax2, shade=True, label='sample means') if not showMe: x = np.linspace(stats.norm.ppf(0.001), stats.norm.ppf(0.999), 100) ax2.plot(x, stats.norm.pdf(x), color='black', label='standard normal') else: ax2.plot(x1, dist.pdf(x1), label='Cauchy') ax2.legend() sns.despine(fig=f, trim=True) plt.show()
def testCauchyLogPDFMultidimensional(self): batch_size = 6 loc = tf.constant([[3.0, -3.0]] * batch_size, dtype=tf.float32) scale = tf.constant([[ np.sqrt(10.0, dtype=np.float32), np.sqrt(15.0, dtype=np.float32) ]] * batch_size) x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T cauchy = tfd.Cauchy(loc=loc, scale=scale, validate_args=True) log_pdf = cauchy.log_prob(x) log_pdf_values = self.evaluate(log_pdf) self.assertEqual(log_pdf.shape, (6, 2)) self.assertAllEqual(self.evaluate(cauchy.batch_shape_tensor()), log_pdf.shape) self.assertAllEqual(self.evaluate(cauchy.batch_shape_tensor()), self.evaluate(log_pdf).shape) self.assertAllEqual(cauchy.batch_shape, log_pdf.shape) self.assertAllEqual(cauchy.batch_shape, self.evaluate(log_pdf).shape) pdf = cauchy.prob(x) pdf_values = self.evaluate(pdf) self.assertEqual(pdf.shape, (6, 2)) self.assertAllEqual(self.evaluate(cauchy.batch_shape_tensor()), pdf.shape) self.assertAllEqual(self.evaluate(cauchy.batch_shape_tensor()), pdf_values.shape) self.assertAllEqual(cauchy.batch_shape, pdf.shape) self.assertAllEqual(cauchy.batch_shape, pdf_values.shape) expected_log_pdf = sp_stats.cauchy(self.evaluate(loc), self.evaluate(scale)).logpdf(x) self.assertAllClose(expected_log_pdf, log_pdf_values) self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testCauchyLogPDF(self): batch_size = 6 loc = tf.constant([3.0] * batch_size) scale = tf.constant([np.sqrt(10.0, dtype=np.float32)] * batch_size) x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32) cauchy = tfd.Cauchy(loc=loc, scale=scale, validate_args=True) log_pdf = cauchy.log_prob(x) self.assertAllEqual(self.evaluate(cauchy.batch_shape_tensor()), log_pdf.shape) self.assertAllEqual(self.evaluate(cauchy.batch_shape_tensor()), self.evaluate(log_pdf).shape) self.assertAllEqual(cauchy.batch_shape, log_pdf.shape) self.assertAllEqual(cauchy.batch_shape, self.evaluate(log_pdf).shape) pdf = cauchy.prob(x) self.assertAllEqual(self.evaluate(cauchy.batch_shape_tensor()), pdf.shape) self.assertAllEqual(self.evaluate(cauchy.batch_shape_tensor()), self.evaluate(pdf).shape) self.assertAllEqual(cauchy.batch_shape, pdf.shape) self.assertAllEqual(cauchy.batch_shape, self.evaluate(pdf).shape) expected_log_pdf = sp_stats.cauchy(self.evaluate(loc), self.evaluate(scale)).logpdf(x) self.assertAllClose(expected_log_pdf, self.evaluate(log_pdf)) self.assertAllClose(np.exp(expected_log_pdf), self.evaluate(pdf))
def __init__(self, loc=0, scale=1): super(Cauchy, self).__init__() self._distr = stats.cauchy(loc=loc, scale=scale) self._loc = loc self._scale = scale self._lower_bound = 0 self._upper_bound = 1.5 / (math.pi * self._scale)
def __init__(self, seed=42, num_inputs=11, kind='chisq', balance=0.5, noise='gauss', spread=0.1): """ Initialize the instance :param seed: Int. The seed for the numpy random state. :param num_inputs: Int. Number of inputs in the input vector of each event. :param kind: String. Type of event stream to generate. :param balance: Float between 0. and 1.0. Fraction of the 0 class in the event stream :param noise: String. One of gaussian, uniform :param spread: Float. Spread of the noise in terms of percentiles of the :return: """ assert isinstance(seed, int) assert isinstance(num_inputs, int) assert kind in ('chisq', 'cauchy') assert noise in ('gauss', 'uniform') assert isinstance(balance, float) assert (balance >= 0.) and (balance <= 1.) self.seed = seed np.random.seed(seed) self.num_inputs = num_inputs self.kind = kind if kind == 'chisq': self.cutoff = chi2.isf(balance, df=num_inputs) self.spread = (chi2.isf(balance + spread/2., df=num_inputs) - chi2.isf(balance - spread/2., df=num_inputs)) elif kind == 'cauchy': assert (self.num_inputs % 2 == 0) # only implemented for even number of inputs self.cauchy = cauchy(0., np.sqrt(self.num_inputs)/np.pi) self.cutoff = self.cauchy.isf(balance) self.spread = self.cauchy.isf(balance - spread/2.) - self.cauchy.isf(balance + spread/2.) print 'spread', self.spread self.noise = noise
def distributions(): return [ norm(loc=0, scale=1), laplace(scale=1 / numpy.sqrt(2), loc=0), poisson(10), cauchy(), uniform(loc=-numpy.sqrt(3), scale=2 * numpy.sqrt(3)) ]
def drawCauchy(size): fc = cauchy(0, 1) x = np.linspace(fc.ppf(0.01), fc.ppf(0.99), 1000) plt.plot(x, fc.pdf(x), 'k', lw=2) bins = fc.rvs(size=size) plt.tight_layout() sb.histplot(bins, stat="density", alpha=0.5, color="blue")
def cell1(): grid = np.linspace(-5, 5, 1000).reshape((-1, 1)) draw_likelihood(sps.norm(loc=grid).pdf, grid, [[-1, 1], [-5, 5], [-1, 5]], '$\\mathcal{N}(\\theta, 1)$') draw_likelihood(sps.expon(loc=grid).pdf, grid, [[1, 2], [0.1, 1], [1, 10]], '$Exp(\\theta)$') draw_likelihood(sps.uniform(scale=grid).pdf, grid, [[0.2, 0.8], [0.5, 1], [0.5, 1.3]], '$U[0, \\theta]$') draw_likelihood(sps.binom(n=5, p=grid).pmf, grid, [[0, 1], [5, 5], [0, 5]], '$Bin(5, \\theta)$') draw_likelihood(sps.poisson(mu=grid).pmf, grid, [[0, 1], [0, 10], [5, 10]], '$Pois(\\theta)$') draw_likelihood(sps.cauchy(loc=grid).pdf, grid, [[-0.5, 0.5], [-2, 2], [-4, 0, 4]], '$Сauchy(\\theta)$')
def make_sing_multtrip(nu_max): import numpy as np import pandas as pd from scipy.stats import norm from scipy.stats import cauchy freqs=np.arange(1800,4000,4) N=freqs.size lp=cauchy(0).pdf(np.arange(-1,1,1./2)) offset=30 other_mode=25 arr=np.where(nu_max==freqs) i=arr[0] #act_offset=np.random.randint(10,high=20) #for i in range(offset+lp.size+5,N-lp.size-5-offset): #act_offset=np.random.randint(0,high=20) act_offset=8#fixed distance spec=np.zeros(N) spec[i-lp.size/2:i+lp.size/2]=2*lp pos=np.arange(i-lp.size/2,i+lp.size/2) pos_off=pos-act_offset spec[pos_off]=0.5*spec[pos] pos_off=0. pos_off=pos+act_offset spec[pos_off]=0.5*spec[pos] pos_off=0. pos=np.arange(i-lp.size/2,i+lp.size/2) pos+=other_mode spec[pos]=2*lp pos_off=pos-act_offset spec[pos_off]=0.5*spec[pos] pos_off=0. pos_off=pos+act_offset spec[pos_off]=0.5*spec[pos] pos_off=0. pos=np.arange(i-lp.size/2,i+lp.size/2) pos-=other_mode spec[pos]=2*lp spec[i-lp.size-5:i-5]=1*lp # pos=0. # pos=np.arange(i-lp.size-5,i-5,1) pos_off=pos-act_offset spec[pos_off]=0.5*spec[pos] pos_off=0. pos_off=pos+act_offset spec[pos_off]=0.5*spec[pos] pos_off=0. # spec[pos]=0 #act_offset=np.random.randint(0,high=20) #act_offset=5#fixed distance rand_noise=np.random.random(N)*np.random.randint(1,high=2) sig=spec sig/=np.max(sig) sig_noise=spec+rand_noise sig_noise/=np.max(sig_noise) return(sig,sig_noise)
class DistHandler: """ Class handles five distributions: Normal(0, 1), Cauchy(0, 1), Laplace(0, 1/sqrt(2)), Poisson(10), Uniform(-sqrt(3), sqrt(3)) """ distributions = { 'Normal': ss.norm(loc=0, scale=1), 'Cauchy': ss.cauchy(loc=0, scale=1), 'Laplace': ss.laplace(loc=0, scale=1. / sqrt(2.)), 'Poisson': ss.poisson(mu=10), 'Uniform': ss.uniform(loc=-sqrt(3.), scale=2 * sqrt(3.)) } @staticmethod def get_sample(dist_name, n, with_random_seed=False): dist = DistHandler.distributions[dist_name] if with_random_seed: return dist.rvs(n, random_state=RANDOM_SEED) return dist.rvs(n) @staticmethod def get_pdf_data(sample, dist_name, bounds=None, n_splits=1000): dist = DistHandler.distributions[dist_name] sample_min = sample.min() sample_max = sample.max() if dist_name == 'Poisson': x = np.arange(sample_max + 1) if not bounds else np.arange(*bounds) y = dist.pmf(x) else: x = np.linspace(sample_min, sample_max, n_splits) if not bounds else np.linspace( *bounds, n_splits) y = dist.pdf(x) return x, y @staticmethod def get_cdf_data(sample, dist_name, linspace_params, n_splits=1000): dist = DistHandler.distributions[dist_name] if dist_name == 'Poisson': x = np.linspace(*linspace_params, n_splits) y = dist.cdf(x) else: x = np.linspace(*linspace_params, n_splits) y = dist.cdf(x) return x, y
def test_pdf(): """ Test pdf. """ cauchy_benchmark = stats.cauchy(np.array([3.0]), np.array([[2.0], [4.0]])) expect_pdf = cauchy_benchmark.pdf([1.0, 2.0]).astype(np.float32) pdf = Prob() output = pdf(Tensor([1.0, 2.0], dtype=dtype.float32)) tol = 1e-6 assert (np.abs(output.asnumpy() - expect_pdf) < tol).all()
def test_log_cdf(): """ Test log cdf. """ cauchy_benchmark = stats.cauchy(np.array([3.0]), np.array([[2.0], [4.0]])) expect_logcdf = cauchy_benchmark.logcdf([1.0, 2.0]).astype(np.float32) logcdf = LogCDF() output = logcdf(Tensor([1.0, 2.0], dtype=dtype.float32)) tol = 5e-5 assert (np.abs(output.asnumpy() - expect_logcdf) < tol).all()
def test_log_survival(): """ Test log_survival. """ cauchy_benchmark = stats.cauchy(np.array([3.0]), np.array([[2.0], [4.0]])) expect_log_survival = cauchy_benchmark.logsf([1.0, 2.0]).astype(np.float32) log_survival = LogSF() output = log_survival(Tensor([1.0, 2.0], dtype=dtype.float32)) tol = 2e-5 assert (np.abs(output.asnumpy() - expect_log_survival) < tol).all()
def test_log_likelihood(): """ Test log_pdf. """ cauchy_benchmark = stats.cauchy(np.array([3.0]), np.array([[2.0], [4.0]])) expect_logpdf = cauchy_benchmark.logpdf([1.0, 2.0]).astype(np.float32) logprob = LogProb() output = logprob(Tensor([1.0, 2.0], dtype=dtype.float32)) tol = 1e-6 assert (np.abs(output.asnumpy() - expect_logpdf) < tol).all()
def __init__(self, tilt): super(Monoangle, self).__init__(tilt) self.tilt = tilt #make a pdf based on the sum-of-four-lorentzians ka11 = cauchy(loc=8047.837, scale=2.285) ka12 = cauchy(loc=8045.637, scale=3.358) ka21 = cauchy(loc=8027.994, scale=2.667) ka22 = cauchy(loc=8026.504, scale=3.571) #noinspection PyMethodOverriding class kadist(rv_continuous): def _pdf(self, x): return (0.957 * ka11.pdf(x) + 0.090 * ka12.pdf(x) + 0.334 * ka21.pdf(x) + 0.111 * ka22.pdf(x)) / 1.492 def _cdf(self, x): return (0.957 * ka11.cdf(x) + 0.090 * ka12.cdf(x) + 0.334 * ka21.cdf(x) + 0.111 * ka22.cdf(x)) / 1.492 self.egenerator = kadist()
def __init__(self, location=None, scale=None): self.location = location self.scale = scale self.bounds = np.array([-np.inf, np.inf]) self.skewness = np.nan self.kurtosis = np.nan if self.scale is not None: self.x_range_for_pdf = np.linspace(-15*self.scale, 15*self.scale, RECURRENCE_PDF_SAMPLES) self.parent = cauchy(loc=self.location, scale=self.scale) self.mean = np.mean(self.get_samples(m=1000)) self.variance = np.var(self.get_samples(m=1000))
def __init__(self, n_dimensions, mean, scale, cov=0): # "mean" self.n_dimensions = n_dimensions self.mean = mean self.scale = scale self.cov = cov if self.cov == 0: # independent self.dist = stats.cauchy([self.mean] * self.n_dimensions, [self.scale] * self.n_dimensions) else: raise NotImplementedError
def Cauchy(self): rv = sts.cauchy() # Генерируем выборку на основе массива, определенного в конструкторе cdf = rv.cdf(self.arr) print('Массив величин распределенных по закону распределения Коши: \n', cdf) plt.title('Распределение Коши') plt.plot(self.arr, cdf) plt.ylabel('$F(x)$') plt.xlabel('$x$') plt.grid(True) plt.show()
def __init__(self, tilt): super(MySource, self).__init__(tilt) #not much to do here self.stddev = 0.0033/3 self.mean = tilt self.anggenerator = norm(loc=self.mean, scale=self.stddev) #make a pdf based on the sum-of-four-lorentzians ka11 = cauchy(loc=8047.837, scale=2.285) ka12 = cauchy(loc=8045.637, scale=3.358) ka21 = cauchy(loc=8027.994, scale=2.667) ka22 = cauchy(loc=8026.504, scale=3.571) #noinspection PyMethodOverriding class kadist(rv_continuous): def _pdf(self, x): return (0.957 * ka11.pdf(x) + 0.090 * ka12.pdf(x) + 0.334 * ka21.pdf(x) + 0.111 * ka22.pdf(x)) / 1.492 def _cdf(self, x): return (0.957 * ka11.cdf(x) + 0.090 * ka12.cdf(x) + 0.334 * ka21.cdf(x) + 0.111 * ka22.cdf(x)) / 1.492 self.egenerator = kadist()
def Cauchy(): for s in size: density = cauchy() histogram = cauchy.rvs(size=s) fig, ax = plt.subplots(1, 1) ax.hist(histogram, density=True, alpha=0.6) x = np.linspace(density.ppf(0.01), density.ppf(0.99), 100) ax.plot(x, density.pdf(x), LINE_TYPE, lw=1.5) ax.set_xlabel("CAUCHY") ax.set_ylabel("DENSITY") ax.set_title("SIZE: " + str(s)) plt.grid() plt.show()
def __init__(self, location=None, scale=None): self.location = location self.scale = scale self.bounds = np.array([-np.inf, np.inf]) #self.mean = np.nan #self.variance = np.nan self.skewness = np.nan self.kurtosis = np.nan if self.scale is not None: self.x_range_for_pdf = np.linspace(-15*self.scale, 15*self.scale, RECURRENCE_PDF_SAMPLES) self.parent = cauchy(loc=self.location, scale=self.scale) self.mean = np.mean(self.getSamples(m=1000)) self.variance = np.var(self.getSamples(m=1000))
def Hist_Cauchy(): cauchy_scale = 1 # Параметры cauchy_loc = 0 cauchy_label = "Cauchy distribution" cauchy_color = "grey" for size in selection_size: fig, ax = plt.subplots(1, 1) pdf = cauchy() random_values = cauchy.rvs(size=size) ax.hist(random_values, density=True, histtype=HIST_TYPE, alpha=hist_visibility, color=cauchy_color) Create_plot(ax, cauchy_label, pdf, size)
def genF(i): """ F in (0,1] """ lst = [] while len(lst) < i.np: temp = cauchy(i.u_f, 0.1).rvs() if temp >= 1: lst.append(1) elif temp <= 0: continue else: lst.append(temp) i.fa = lst[:] return
def gen_f(self): """ F in (0,1] """ lst = [] while len(lst) < self.np: temp = cauchy(self.u_f, 0.1).rvs() if temp >= 1: lst.append(1) elif temp <= 0: continue else: lst.append(temp) self.fa = lst[:] return
import numpy as np import pandas as pd from scipy.stats import norm from scipy.stats import cauchy freqs=np.arange(1800,4000,5) N=freqs.size lp=cauchy(0).pdf(np.arange(-1,1,1./10)) nu_max=0 offset=0 #act_offset=np.random.randint(10,high=20) for i in range(offset+lp.size+5,N-lp.size-5-offset): #act_offset=np.random.randint(0,high=20)a act_offset=0 spec=np.zeros(N) spec[i-lp.size/2:i+lp.size/2]=1*lp spec[i-lp.size-5:i-5]=.5*lp pos=np.arange(i-lp.size-5,i-5,1) pos_off=pos-act_offset spec[pos_off]=spec[pos] #spec[pos]=0 #act_offset=np.random.randint(0,high=20) spec[i+5:i+lp.size+5]=.5*lp pos=np.arange(i+5,i+lp.size+5,1) pos_off=pos+act_offset #spec[pos_off]=spec[pos] #spec[pos]=0 rand_noise=np.random.random(N)*np.random.randint(1,high=3) df=pd.DataFrame({'Frequency':freqs, 'l0':spec, 'noise':rand_noise,}) df.to_csv('/home/rakesh/Fake_Data/Test/Spec_numax_%d.csv'%(freqs[i]))
def lorentz(x, C, mu, sigma): lorentz = C * cauchy(loc=mu, scale=sigma).pdf(x) return lorentz
xi = np.asarray(xi) n = xi.size shape = np.broadcast(sigma, mu).shape xi = xi.reshape(xi.shape + tuple([1 for s in shape])) return ((n - 1) * np.log(sigma) - np.sum(np.log(sigma ** 2 + (xi - mu) ** 2), 0)) #---------------------------------------------------------------------- # Draw the sample from a Cauchy distribution np.random.seed(44) mu_0 = 0 gamma_0 = 2 xi = cauchy(mu_0, gamma_0).rvs(10) #---------------------------------------------------------------------- # Perform MCMC: # set up our Stochastic variables, mu and gamma mu = pymc.Uniform('mu', -5, 5) log_gamma = pymc.Uniform('log_gamma', -10, 10, value=0) @pymc.deterministic def gamma(log_gamma=log_gamma): return np.exp(log_gamma) # set up our observed variable x x = pymc.Cauchy('x', mu, gamma, observed=True, value=xi)
residuals = residuals * 180 / np.pi pi = 180 # x axis for plotting x = np.linspace(-pi, pi, 1000) c_loc, c_gamma = cauchy.fit(residuals) fwhm = 2 * c_gamma g_mu_bad, g_sigma_bad = norm.fit(residuals) g_mu, g_sigma = norm.fit(residuals[np.abs(residuals) < 10]) plt.hist(residuals, bins='auto', label='Histogram', normed=True, alpha=.7) plt.plot( x, cauchy(c_loc, c_gamma).pdf(x), label='Lorentz: FWHM $=${:.3f}'.format(fwhm), linewidth=2 ) plt.plot( x, norm(g_mu_bad, g_sigma_bad).pdf(x), label='Unrestricted Gauss: $\sigma =$ {:.3f}'.format(g_sigma_bad), linewidth=2 ) plt.plot( x, norm(g_mu, g_sigma).pdf(x), label='+- 10 deg Gauss: $\sigma =$ {:.3f}'.format(g_sigma), linewidth=2 )
import numpy as np from scipy.stats import cauchy import matplotlib.pyplot as plt n = 1000 distribution = cauchy() fig, ax = plt.subplots() data = distribution.rvs(n) if 0: ax.plot(list(range(n)), data, 'bo', alpha=0.5) ax.vlines(list(range(n)), 0, data, lw=0.2) ax.set_title("{} observations from the Cauchy distribution".format(n)) if 1: # == Compute sample mean at each n == # sample_mean = np.empty(n) for i in range(n): sample_mean[i] = np.mean(data[:i]) # == Plot == # ax.plot(list(range(n)), sample_mean, 'r-', lw=3, alpha=0.6, label=r'$\bar X_n$') ax.plot(list(range(n)), [0] * n, 'k--', lw=0.5) ax.legend() fig.show()
# you can set usetex to False. from astroML.plotting import setup_text_plots setup_text_plots(fontsize=8, usetex=True) # ------------------------------------------------------------ # Define the distribution parameters to be plotted gamma_values = [0.5, 1.0, 2.0] linestyles = ["-", "--", ":"] mu = 0 x = np.linspace(-10, 10, 1000) # ------------------------------------------------------------ # plot the distributions fig, ax = plt.subplots(figsize=(5, 3.75)) for gamma, ls in zip(gamma_values, linestyles): dist = cauchy(mu, gamma) plt.plot(x, dist.pdf(x), ls=ls, color="black", label=r"$\mu=%i,\ \gamma=%.1f$" % (mu, gamma)) plt.xlim(-4.5, 4.5) plt.ylim(0, 0.65) plt.xlabel("$x$") plt.ylabel(r"$p(x|\mu,\gamma)$") plt.title("Cauchy Distribution") plt.legend() plt.show()
eta01 = 3 # center of Lorentzian distribution for excitability eta02 = -5 k01 = -3 # center of Lorentzian distribution for synaptic strength k02 = 5 deltaeta1 = 1 # degree of heterogeneity for excitability deltak1 = 1 # degree of heterogeneity for synaptic strength deltaeta2 = 0.5 deltak2 = 0.5 sharp = 2 # sharpness of the synaptic function an_values = {2: 2/3, # normalization constants, pre-calculated 3: 2/5, 5: 8/63, 9: 128/12155, 15: 2048/9694845} an = an_values[sharp] # normalization constant for specific sharpness dist1 = cauchy(k01, deltak1) # Lorentzian distribution dist2 = cauchy(k02, deltak2) k1 = dist1.rvs(size=halfn).tolist() # initializing the k-value for all the neurons k2 = dist2.rvs(size=halfn).tolist() dist3 = cauchy(eta01, deltaeta1) dist4 = cauchy(eta02, deltaeta2) eta1 = dist3.rvs(size=halfn).tolist() eta2 = dist4.rvs(size=halfn).tolist() print('Initial Values Loaded') foldername = raw_input('Enter Folder Name: ') os.makedirs(foldername) def main(): t = ti # t is the time variable
xi = xi.reshape(xi.shape + tuple([1 for s in shape])) return ((n - 1) * np.log(gamma) - np.sum(np.log(gamma ** 2 + (xi - mu) ** 2), 0)) #------------------------------------------------------------ # Define the grid and compute logL gamma = np.linspace(0.1, 5, 70) mu = np.linspace(-5, 5, 70) np.random.seed(44) mu0 = 0 gamma0 = 2 xi = cauchy(mu0, gamma0).rvs(10) logL = cauchy_logL(xi, gamma[:, np.newaxis], mu) logL -= logL.max() #------------------------------------------------------------ # Find the max and print some information i, j = np.where(logL >= np.max(logL)) print "mu from likelihood:", mu[j] print "gamma from likelihood:", gamma[i] print med, sigG = median_sigmaG(xi) print "mu from median", med print "gamma from quartiles:", sigG / 1.483 # Equation 3.54
tout = 0.001 # output interval #### Model Variables #### eta0 = -0.5 # center of Lorentzian distribution for excitability k0 = 0.5 # center of Lorentzian distribution for synaptic strength deltaeta = 0.5 # degree of heterogeneity for excitability deltak = 0.1 # degree of heterogeneity for synaptic strength sharp = 2 # sharpness of the synaptic function an_values = {2: 2/3, # normalization constants, pre-calculated 3: 2/5, 5: 8/63, 9: 128/12155, 15: 2048/9694845} an = an_values[sharp] # normalization constant for specific sharpness dist1 = cauchy(k0, deltak) # Lorentzian distribution k = dist1.rvs(size=n).tolist() # initializing the k-value for all the neurons dist2 = cauchy(eta0, deltaeta) eta = dist2.rvs(size=n).tolist() print('Initial Values Loaded') foldername = raw_input('Enter Folder Name: ') os.makedirs(foldername) def main(): t = ti # t is the time variable theta = [None]*n # initializing a list with n entries for each neuron t_output = [t] # t_output collects data to be plotted load = 0 loadstep = tout/(tf-ti)
def lorentz(A, mu, sigma, x): return A * stats.cauchy(loc=mu, scale=sigma).pdf(x)
print "hello world!" # Define our test distribution: a mix of Cauchy-distributed variables import numpy as np from scipy import stats np.random.seed(0) x = np.concatenate([stats.cauchy(-5, 1.8).rvs(500), stats.cauchy(-4, 0.8).rvs(2000), stats.cauchy(-1, 0.3).rvs(500), stats.cauchy(2, 0.8).rvs(1000), stats.cauchy(4, 1.5).rvs(500)]) # truncate values to a reasonable range x = x[(x > -15) & (x < 15)]
def case2(indexes=CASE_2_ATTRIBUTE_INDEXES,output=True): accuracy_in_each_turn = list() precision_in_each_turn_spam = list() recall_in_each_turn_spam = list() precision_in_each_turn_ham = list() recall_in_each_turn_ham = list() m = np.loadtxt(open("resources/normalized_data.csv","rb"),delimiter=',') shuffled = np.random.permutation(m) valid.validate_cross_validation(NUMBER_OF_ROUNDS,TRAIN_TEST_RATIO) # equiprobable priors prior_spam = 0.5 prior_ham = 0.5 for i in xrange(NUMBER_OF_ROUNDS): # we're using cross-validation so each iteration we take a different # slice of the data to serve as test set train_set,test_set = prep.split_sets(shuffled,TRAIN_TEST_RATIO,i) #parameter estimation #but now we take 10 attributes into consideration sample_means_word_spam = list() sample_means_word_ham = list() sample_variances_word_spam = list() sample_variances_word_ham = list() for attr_index in indexes: sample_means_word_spam.append(nb.take_mean_spam(train_set,attr_index,SPAM_ATTR_INDEX)) sample_means_word_ham.append(nb.take_mean_ham(train_set,attr_index,SPAM_ATTR_INDEX)) sample_variances_word_spam.append(nb.take_variance_spam(train_set,attr_index,SPAM_ATTR_INDEX)) sample_variances_word_ham.append(nb.take_variance_ham(train_set,attr_index,SPAM_ATTR_INDEX)) #sample standard deviations from sample variances sample_std_devs_spam = map(lambda x: x ** (1/2.0), sample_variances_word_spam) sample_std_devs_ham = map(lambda x: x ** (1/2.0), sample_variances_word_ham) hits = 0.0 misses = 0.0 #number of instances correctly evaluated as spam correctly_is_spam = 0.0 #total number of spam instances is_spam = 0.0 #total number of instances evaluated as spam guessed_spam = 0.0 #number of instances correctly evaluated as ham correctly_is_ham = 0.0 #total number of ham instances is_ham = 0.0 #total number of instances evaluated as ham guessed_ham = 0.0 # now we test the hypothesis against the test set for row in test_set: # ou seja, o produto de todas as prob. condicionais das palavras dada a classe # eu sei que ta meio confuso, mas se olhar com cuidado eh bonito fazer isso tudo numa linha soh! =) product_of_all_conditional_probs_spam = reduce(lambda acc,cur: acc * stats.cauchy(sample_means_word_spam[cur], sample_std_devs_spam[cur]).pdf(row[indexes[cur]]) , xrange(10), 1) # nao precisa dividir pelo termo de normalizacao pois so queremos saber qual e o maior! posterior_spam = prior_spam * product_of_all_conditional_probs_spam product_of_all_conditional_probs_ham = reduce(lambda acc,cur: acc * stats.cauchy(sample_means_word_ham[cur], sample_std_devs_ham[cur]).pdf(row[indexes[cur]]) , xrange(10), 1) posterior_ham = prior_ham * product_of_all_conditional_probs_ham # whichever is greater - that will be our prediction if posterior_spam > posterior_ham: guess = 1 else: guess = 0 if(row[SPAM_ATTR_INDEX] == guess): hits += 1 else: misses += 1 # we'll use these to calculate metrics if (row[SPAM_ATTR_INDEX] == 1 ): is_spam += 1 if guess == 1: guessed_spam += 1 correctly_is_spam += 1 else: guessed_ham += 1 else: is_ham += 1 if guess == 1: guessed_spam += 1 else: guessed_ham += 1 correctly_is_ham += 1 #accuracy = number of correctly evaluated instances/ # number of instances # # accuracy = hits/(hits+misses) #precision_spam = number of correctly evaluated instances as spam/ # number of spam instances # # # in order to avoid divisions by zero in case nothing was found if(is_spam == 0): precision_spam = 0 else: precision_spam = correctly_is_spam/is_spam #recall_spam = number of correctly evaluated instances as spam/ # number of evaluated instances como spam # # # in order to avoid divisions by zero in case nothing was found if(guessed_spam == 0): recall_spam = 0 else: recall_spam = correctly_is_spam/guessed_spam #precision_ham = number of correctly evaluated instances as ham/ # number of ham instances # # # in order to avoid divisions by zero in case nothing was found if(is_ham == 0): precision_ham = 0 else: precision_ham = correctly_is_ham/is_ham #recall_ham = number of correctly evaluated instances as ham/ # number of evaluated instances como ham # # # in order to avoid divisions by zero in case nothing was found if(guessed_ham == 0): recall_ham = 0 else: recall_ham = correctly_is_ham/guessed_ham accuracy_in_each_turn.append(accuracy) precision_in_each_turn_spam.append(precision_spam) recall_in_each_turn_spam.append(recall_spam) precision_in_each_turn_ham.append(precision_ham) recall_in_each_turn_ham.append(recall_ham) # calculation of means for each metric at the end mean_accuracy = np.mean(accuracy_in_each_turn) std_dev_accuracy = np.std(accuracy_in_each_turn) variance_accuracy = np.var(accuracy_in_each_turn) mean_precision_spam = np.mean(precision_in_each_turn_spam) std_dev_precision_spam = np.std(precision_in_each_turn_spam) variance_precision_spam = np.var(precision_in_each_turn_spam) mean_recall_spam = np.mean(recall_in_each_turn_spam) std_dev_recall_spam = np.std(recall_in_each_turn_spam) variance_recall_spam = np.var(recall_in_each_turn_spam) mean_precision_ham = np.mean(precision_in_each_turn_ham) std_dev_precision_ham = np.std(precision_in_each_turn_ham) variance_precision_ham = np.var(precision_in_each_turn_ham) mean_recall_ham = np.mean(recall_in_each_turn_ham) std_dev_recall_ham = np.std(recall_in_each_turn_ham) variance_recall_ham = np.var(recall_in_each_turn_ham) if output: print "\033[1;32m" print '=============================================' print 'CASE 2 - TEN ATTRIBUTES - USING CAUCHY MODEL' print '=============================================' print "\033[00m" print 'MEAN ACCURACY: '+str(round(mean_accuracy,5)) print 'STD. DEV. OF ACCURACY: '+str(round(std_dev_accuracy,5)) print 'VARIANCE OF ACCURACY: '+str(round(variance_accuracy,8)) print '' print 'MEAN PRECISION FOR SPAM: '+str(round(mean_precision_spam,5)) print 'STD. DEV. OF PRECISION FOR SPAM: '+str(round(std_dev_precision_spam,5)) print 'VARIANCE OF PRECISION FOR SPAM: '+str(round(variance_precision_spam,8)) print '' print 'MEAN RECALL FOR SPAM: '+str(round(mean_recall_spam,5)) print 'STD. DEV. OF RECALL FOR SPAM: '+str(round(std_dev_recall_spam,5)) print 'VARIANCE OF RECALL FOR SPAM: '+str(round(variance_recall_spam,8)) print '' print 'MEAN PRECISION FOR HAM: '+str(round(mean_precision_ham,5)) print 'STD. DEV. OF PRECISION FOR HAM: '+str(round(std_dev_precision_ham,5)) print 'VARIANCE OF PRECISION FOR HAM: '+str(round(variance_precision_ham,8)) print '' print 'MEAN RECALL FOR HAM: '+str(round(mean_recall_ham,5)) print 'STD. DEV. OF RECALL FOR HAM: '+str(round(std_dev_recall_ham,5)) print 'VARIANCE OF RECALL FOR HAM: '+str(round(variance_recall_ham,8))
def fake_spec(nu_min,nu_max,df,delt_nu,numax,modes): from scipy.stats import norm from scipy.stats import cauchy import numpy as np import matplotlib.pyplot as plt import pandas as pd max_freq=nu_max min_freq=nu_min span=nu_max-nu_min N=int(span/df)+1 Freq_axis=np.arange(min_freq,max_freq+1,df) freq_bins=np.arange(0,120,1)#Freq bins from 20 to 100 \muHz in steps of 1 lmodes=2 D0=1.5 pos1=np.zeros(lmodes+1) epsilon=0 spec=np.zeros(N)##l=0 spectra spec2=np.zeros(N)##l=2 spec1=np.zeros(N)##l=1 gmean=((((numax)-(np.mean(Freq_axis)))/df)/Freq_axis.size) gp=norm(gmean,0.25*2).pdf(np.arange(-1,1,2./N))#Gaussian profile for envelope lp=cauchy(3.0).pdf(np.arange(-2,2,2./60))#lorentzian profile for monopole lp2=cauchy(3.0).pdf(np.arange(-2,2,2./60))#lorentzian profile for dipole and quadrapole moments modes.size for i in range(1,modes+1): if(i==1): spec[spec.size/2]=1.0 spec2[spec.size/(2)+100]=0.5 spec1[spec.size/(2)-100]=0.5 if(i>=1): spec[spec.size/(2**i)+epsilon]=1 spec2[spec.size/(2**i)+4*lp.size-epsilon]=0.5 spec2[spec.size/(2**i)+pos1[2]]=0.5 spec1[spec.size/(2**i)-4*lp.size+epsilon]=0.5 #spec1[spec.size/(2**i)-pos1[1]]=0.5 spec[spec.size-spec.size/(2**i)]=1 spec2[spec.size-(spec.size/(2**i))+epsilon]=0.5 #spec2[spec.size-(spec.size/(2**i)-pos1[2])]=0.5 spec1[spec.size-(spec.size/(2**i)+4*lp.size)-epsilon]=0.5 spec2[spec.size-(spec.size/(2**i)+pos1[1])]=0.5 """ Now add the lorentzian profile """ for i in range(N): if(spec[i]==1.): """ Check for all values equal to monopole TODO:Find non-zero value """ spec[i]=0; #spec[i-lp.size/2:i+lp.size/2]+=lp*np.random.randint(1,high=10)#random amplitude variations spec[i-lp.size/2:i+lp.size/2]+=lp for i in range(N): if(spec2[i]==0.5): """ Check for dipole/quadrapole """ spec2[i]=0; #spec2[i-lp2.size/2:i+lp2.size/2]+=lp2*np.random.randint(1,high=10)#random amplitude variations spec2[i-lp2.size/2:i+lp2.size/2]+=lp2 for i in range(N): if(spec1[i]==0.5): spec1[i]=0; #spec1[i-lp2.size/2:i+lp2.size/2]+=lp2*np.random.randint(1,high=10)#random amplitude variations spec1[i-lp2.size/2:i+lp2.size/2]+=lp2 """ random noise generated with normal distribution """ rand_noise=np.random.random(N)*np.random.randint(1,high=3) print np.size(rand_noise) spec_comb=spec+spec1+spec2 dat_frame=pd.DataFrame({'Frequency':Freq_axis[0:N], 'l0':spec, 'l1':spec1, 'l2':spec2, 'noise':rand_noise,}) dat_frame.to_csv('/home/rakesh/Fake_Data/Spec_numax_%d_modes_%d.csv'%(numax, modes))
def modelo2(x, params): A, mu, sigma, a, b = params return -A * cauchy(loc=mu, scale=sigma).pdf(x) + a * x + b
# -*- coding: UTF-8 -*- # Ejemplo de distrbuciones del módulo stats de Scipy import numpy as np import matplotlib.pyplot as plt from scipy.stats import cauchy, gamma, maxwell, norm SAMPLE_SIZE = 100 x = np.linspace(-5,5,1000) # Inicializo las distribuciones cauchy_dist = cauchy() gamma_dist = gamma(1, scale=2.) maxwell_dist = maxwell() norm_dist = norm() # Calculo la funcion de densidad de probabilidad de cada distribucion cauchy_pdf = cauchy_dist.pdf(x) gamma_pdf = gamma_dist.pdf(x) maxwell_pdf = maxwell_dist.pdf(x) norm_pdf = norm_dist.pdf(x) fig = plt.figure("Distribuciones") sp1 = fig.add_subplot(221, title='Distribucion de Cauchy') sp2 = fig.add_subplot(222, title='Distribucion Gamma') sp3 = fig.add_subplot(223, title='Distribucion de Maxwell')
from __future__ import division import numpy as np from scipy.stats import t, beta, lognorm, expon, gamma, poisson, cauchy import matplotlib.pyplot as plt n = 100 dists = {"student's t with 10 dof": t(10), "beta(2,2)": beta(2,2), "lognormal LN(0,1/2)" : lognorm(0.5), "gamma(5,1/2)" : gamma(5, scale=2), "poisson(4)" : poisson(4), "exp(1)" : expon(1), "cauchy" : cauchy()} D = 3 fig, ax = plt.subplots(D,1, figsize=(10,10)) plt.subplots_adjust(hspace=0.5) for d in range(D): name = np.random.choice(dists.keys()) X = dists[name].rvs(n) Xbar = np.cumsum(X) / range(1,n+1) ax[d].plot(range(1,n+1), X, 'bo', alpha=0.5, markersize=4) ax[d].vlines(range(1,n+1), dists[name].mean(), X, 'b', alpha=0.5) ax[d].plot(range(1,n+1), Xbar, 'g', lw=2, label=r'$\bar X_n$ for $X_i \sim $ ' + name) ax[d].plot(range(1,n+1), np.ones(n)*dists[name].mean() , 'k--', label=r'$\mu$', lw=2) ax[d].legend(loc = 3, ncol =2, mode='expand', bbox_to_anchor = (.05, 1.02, .9, .102))
post_mean_l = laplace_mean[3]/laplace_ml[3] # Use results to plot a Gaussian PDF here. # Print using string formatting: print 'Beta case:' print 'Marg. like.: {:10.4e} (quad), {:10.4e} (Laplace)'.format(bi.mlike, laplace_ml[3]) print 'Posterior mean: {:4.2f} (quad), {:4.2f} (Laplace)'.format(bi.post_mean, post_mean_l) print #------------------------------------------------------------------------------- # 3rd case: Cauchy, const prior x0, d = 5., 3. data = stats.cauchy(x0, d).rvs(5) flat_pdf = .001 # e.g., for prior range 1e3 cli = CauchyLocationInference(d, data, flat_pdf, (-15., 25.)) cfig = figure() cli.plot(alpha=.5) xlim(-10, 15.) xlabel('$x_0$') ylabel('Posterior PDF') title('Cauchy case; CDF method') samps = [] for i in range(10000): samps.append(cli.samp_cdf()) samps = array(samps)
from scipy import stats from astroML.density_estimation import KDE, KNeighborsDensity from astroML.plotting import hist #------------------------------------------------------------ # Generate our data: a mix of several Cauchy distributions # this is the same data used in the Bayesian Blocks figure np.random.seed(0) N = 10000 mu_gamma_f = [(5, 1.0, 0.1), (7, 0.5, 0.5), (9, 0.1, 0.1), (12, 0.5, 0.2), (14, 1.0, 0.1)] true_pdf = lambda x: sum([f * stats.cauchy(mu, gamma).pdf(x) for (mu, gamma, f) in mu_gamma_f]) x = np.concatenate([stats.cauchy(mu, gamma).rvs(int(f * N)) for (mu, gamma, f) in mu_gamma_f]) np.random.shuffle(x) x = x[x > -10] x = x[x < 30] #------------------------------------------------------------ # plot the results fig = plt.figure(figsize=(8, 8)) fig.subplots_adjust() N_values = (500, 5000) subplots = (211, 212) k_values = (10, 100)
if I_gauss == 0: O_CG = np.inf err_O_CG = np.inf else: O_CG = I_cauchy / I_gauss err_O_CG = O_CG * np.sqrt((err_gauss / I_gauss) ** 2) return (I_gauss, err_gauss), (I_cauchy, err_cauchy), (O_CG, err_O_CG) #------------------------------------------------------------ # Draw points from a Cauchy distribution np.random.seed(44) mu = 0 gamma = 2 xi = cauchy(mu, gamma).rvs(100) #------------------------------------------------------------ # compute the odds ratio for the first 10 points ((I_gauss, err_gauss), (I_cauchy, err_cauchy), (O_CG, err_O_CG)) = calculate_odds_ratio(xi[:10]) print("Results for first 10 points:") print(" L(M = Cauchy) = %.2e +/- %.2e" % (I_cauchy, err_cauchy)) print(" L(M = Gauss) = %.2e +/- %.2e" % (I_gauss, err_gauss)) print(" O_{CG} = %.3g +/- %.3g" % (O_CG, err_O_CG)) #------------------------------------------------------------ # calculate the results as a function of number of points Nrange = np.arange(10, 101, 2)
y2 = rv2.cdf(x) y3 = rv3.cdf(x) # plot the pdf plt.clf() plt.plot(x, y1, lw=3, label='scale=5') plt.plot(x, y2, lw=3, label='scale=3') plt.plot(x, y3, lw=3, label='scale=7') plt.xlabel('X', fontsize=20) plt.ylabel('PDF', fontsize=15) plt.legend() plt.savefig('/home/tomer/articles/python/tex/images/norm_cdf.png') # generate instance cauchy, chi, exponential, uniform rv1 = st.cauchy(loc=0, scale=5) rv2 = st.chi(2, loc=0, scale=8) rv3 = st.expon(loc=0, scale=7) rv4 = st.uniform(loc=0, scale=20) # estimate pdf at some points y1 = rv1.pdf(x) y2 = rv2.pdf(x) y3 = rv3.pdf(x) y4 = rv4.pdf(x) # plot the pdf plt.clf() plt.plot(x, y1, lw=3, label='Cauchy') plt.plot(x, y2, lw=3, label='Chi') plt.plot(x, y3, lw=3, label='Exponential')