def laplaceFunc(): for i in range(len(size)): n = size[i] fig, ax = plt.subplots(1, 1) ax.set_title("Распределение Лапласа, n = " + str(n)) x = np.linspace(laplace.ppf(0.01), laplace.ppf(0.99), 100) ax.plot(x, laplace.pdf(x), 'b-', lw=5, alpha=0.6) r = laplace.rvs(size=n) ax.hist(r, density=True, histtype='stepfilled', alpha=0.2) plt.show()
def obtain_ell(self, p=0): if p == 0.99499: p = 0.995 * 0.85 p = 0.99575 m = self.args.m eps = self.args.epsilon delta = 1 / self.args.n**2 b = eps / (2 * math.log(1 / delta)) noise = 'lap' beta_lt = 0.3 * 0.02 ss, p_percentile = self.obtain_ss(m, p, b) if noise == 'lap': inv_cdf = laplace.ppf(1 - beta_lt) a = eps / 2 ns = laplace.rvs() else: inv_cdf = norm.ppf(1 - beta_lt) a = eps / math.sqrt(-math.log(delta)) ns = norm.rvs() kappa = 1 / (1 - (math.exp(b) - 1) * inv_cdf / a) tau = p_percentile + kappa * ss / a * (ns + inv_cdf) return max(0, tau)
def bounds(self, bootstrap=False): if not bootstrap: _bounds = [] for a in self.interval_widths: edge = (1 - a) / 2.0 _bounds.append(laplace.ppf([edge, 1 - edge], 0.0, self.scale)) return _bounds else: return super().bounds(bootstrap)
def generate_det_candidates(physics, det, numperturb): """ Generate multiple candidates for an event from a detection by perturbing the detection's observed slowness and azimuth. The numperturb parameter controls the number of perturbations to each of them. Note, we will generate approximately numperturb^2 candidates. The size of each perturbation will be determined by the uncertainty in the azimuth and slowness. """ # We will perturb the azimuth and slowness along evenly spaced percentile # values. However, we must always try the 50th percentile as well # because that is the unperturbed value. percentiles = list(np.linspace(.1, .9, numperturb)) if .5 not in percentiles: percentiles.append(.5) # Using the percentiles compute the appropriate perturbation to # azimuth and slowness. delaz_values = [ laplace.ppf(perc, loc=physics.mu_z[det.stanum], scale=physics.theta_z[det.stanum]) for perc in percentiles ] delslow_values = [ laplace.ppf(perc, loc=physics.mu_s[det.stanum], scale=physics.theta_s[det.stanum]) for perc in percentiles ] candidates = [] for delslow in delslow_values: for delaz in delaz_values: event = invert_detection(physics, det, delslow, delaz) candidates.append(event) return candidates
def generate_det_candidates(physics, det, numperturb): """ Generate multiple candidates for an event from a detection by perturbing the detection's observed slowness and azimuth. The numperturb parameter controls the number of perturbations to each of them. Note, we will generate approximately numperturb^2 candidates. The size of each perturbation will be determined by the uncertainty in the azimuth and slowness. """ # We will perturb the azimuth and slowness along evenly spaced percentile # values. However, we must always try the 50th percentile as well # because that is the unperturbed value. percentiles = list(np.linspace(.1, .9, numperturb)) if .5 not in percentiles: percentiles.append(.5) # Using the percentiles compute the appropriate perturbation to # azimuth and slowness. delaz_values = [laplace.ppf(perc, loc = physics.mu_z[det.stanum], scale = physics.theta_z[det.stanum]) for perc in percentiles] delslow_values = [laplace.ppf(perc, loc = physics.mu_s[det.stanum], scale = physics.theta_s[det.stanum]) for perc in percentiles] candidates = [] for delslow in delslow_values: for delaz in delaz_values: event = invert_detection(physics, det, delslow, delaz) candidates.append(event) return candidates
def compute_noise_level_percentile(location, epsilon, j, delta, percentile): # Equation: # b = 2 J \Delta\eta / \epsilon b = (2 * j * delta) / epsilon return laplace.ppf(percentile, scale=b, loc=location)
def quantile(self, X, q): return self.noiseless(X) + laplace.ppf(q, scale=2)
def tune_scale(self, u: float, y: float): self.scale = abs(u - y) / laplace.ppf(q=((self.p + 1) / 2))
def sample_model(args, betaTCVAE): """ Sample the model """ test_x = get_data(args, "test") x = [] xhat = [] z = [] lbl = [] if args.dataset.lower() == "helium": rad = [] for ii, x_test in enumerate(test_x): x.extend(x_test[0]) logits, _, (z_mean, log_var) = betaTCVAE.model(x_test) xhat.extend(tf.math.sigmoid(logits)) # we "comb" the mean and the std of every dist in z together # so that z looks like: [z1_mean, z1_std, z2_mean, z2_std, ...] z_std = tf.math.exp(log_var * .5) # print(np.shape(out[0]), np.shape(out[1]), np.shape(z_mean), np.shape(z_std)) z_combed = np.zeros([np.shape(z_std)[0], int(args.z_dim * 2)]) # idx_even = np.arange(0, int(args.z_dim * 2), 2) # print(np.shape(z_combed), np.shape(idx_even)) z_combed[:, ::2] = z_mean z_combed[:, 1::2] = z_std z.extend(z_combed) # we concatenate mean and std # z.extend(np.hstack([z_mean, z_std])) if args.dataset.lower() == "helium": lbl.extend(x_test[-2]) rad.extend(x_test[-1]) else: lbl.extend(x_test[-1]) z = np.array(z).reshape(2000, int(args.z_dim * 2)) if args.dataset.lower() == "helium": lbl = np.array(lbl).reshape([-1]) rad = np.array(rad).reshape([-1]) lbl = (lbl, rad) else: lbl = np.array(lbl).reshape([-1]) if args.z_dim <= 6: if args.prior.lower() == "normal": grid_x = norm.ppf(np.linspace(0.05, 0.95, 25)) if args.prior.lower() == "laplace": grid_x = laplace.ppf(np.linspace(0.05, 0.95, 25)) z_ppf = np.vstack( [pp.reshape(-1) for pp in np.meshgrid(grid_x, grid_x)]).T if args.z_dim == 2: x_decoded = betaTCVAE.decode(z_ppf, apply_sigmoid=True) return (x, xhat, x_decoded), z, lbl if 2 < args.z_dim <= 6: # We make grids for all 2-d permutations, while keeping the other dims fixed. # Fixations are ppl(0.05), ppl(0.25), ppl(0.5), ppl(0.75) and ppl(0.95). # This means we end up with (z-dim over 2) * 5 grid maps -> with z_dim = 6, we get 75 images x_decoded = [] if args.prior.lower() == "normal": fixed_z = norm.ppf([0.05, 0.25, 0.5, 0.75, 0.95]) if args.prior.lower() == "laplace": fixed_z = laplace.ppf([0.05, 0.25, 0.5, 0.75, 0.95]) for comb in combinations(np.arange(args.z_dim, dtype=int), 2): # args.logger.info(comb) for fixed_latent in fixed_z: sample_z = np.ones([625, args.z_dim]) * fixed_latent sample_z[:, int(comb[0])] = z_ppf[:, 0] sample_z[:, int(comb[1])] = z_ppf[:, 1] x_decoded.append(betaTCVAE.decode(sample_z, apply_sigmoid=True)) # x_decoded = np.array(x_decoded) return (x, xhat, x_decoded), z, lbl return (x, xhat), z, lbl
import numpy as np from scipy.stats import laplace import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 1) mean, var, skew, kurt = laplace.stats(moments='mvsk') x = np.linspace(laplace.ppf(0.01), laplace.ppf(0.99), 100) ax.plot(x, laplace.pdf(x), 'r-', lw=5, alpha=0.6, label='laplace pdf') rv = laplace() ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf') vals = laplace.ppf([0.001, 0.5, 0.999]) np.allclose([0.001, 0.5, 0.999], laplace.cdf(vals)) r = laplace.rvs(size=1000) #ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2) #ax.legend(loc='best', frameon=False) plt.show()
return numerator / denominator def foo(data, sigma, x): total = 0 for xi in data: total += gaussian_kernel(sigma, x, xi) return (1.0 / 1000) * total def get_data_y(data_x, sigma): return [foo(data_x, sigma, x) for x in data_x] sigmas = np.linspace(0.01, 1.0, 9) data_x = sorted([np.random.laplace() for i in range(1000)]) real_laplace = np.linspace(laplace.ppf(0.01), laplace.ppf(0.99), 100) fig, ax = plt.subplots(nrows=3, ncols=3) i = 0 for row in ax: for col in row: data_y = get_data_y(data_x, sigmas[i]) col.plot(data_x, data_y, markersize=2) col.plot(real_laplace, laplace.pdf(real_laplace), markersize=3) col.set_title('sigma = ' + str(sigmas[i])) i += 1 plt.show()
import scipy as sc import matplotlib.pyplot as plt from scipy.stats import laplace from scipy import interpolate import cProfile import numpy as np import scipy as sc import matplotlib.pyplot as plt import math from scipy.stats import laplace #The scale variable b = (1 / 3) #Set up the distribution and plot it fig, ax = plt.subplots(1, 1) mean, var, skew, kurt = laplace.stats(moments='mvsk') x = np.linspace(laplace.ppf(0.01), laplace.ppf(0.99), 10000) deku = [] test_2_container = [] #Generate the uniform random variable. test_1 = np.random.uniform(-1, 1, 1000) test_2 = np.random.uniform(-1, 1, 100) #For loop to accurately transform the data for x in test_1: if x <= 0: add_me = 2 * b * math.log(abs(2 * x)) else: add_me = -1 * b * math.log(abs(-2 * x + 2))
def gen(df, config, params, output_path): df = df[-(df['remove'])] figures_path = create_fig_folder(output_path, 'figures') fig_names = [] exp_names = np.sort(df[config['col_names']['raw_file']].unique()) num_experiments = len(exp_names) # ceil PEP to 1 df[config['col_names']['pep']].loc[df[config['col_names']['pep']] > 1] = 1 # split PEP into 10 bins, for coloring points later #pep_col_code = pd.cut(df[config['col_names']['pep']], 10) pep_col_code = pd.cut(df[config['col_names']['pep']], np.linspace(0, 1, 11)) # generate figures for each experiment for exp in range(0, num_experiments): logger.info('Generating Summary for Experiment {} | {}'.format(exp+1, exp_names[exp])) exp_params = params['exp'].iloc[exp] #exp_indices = params['pair']['muij_to_exp'] == exp exp_inds = (df['exp_id'] == exp) & (~pd.isnull(df['pep_new'])) # dont plot if there aren't any points if np.sum(exp_inds) == 0: continue predicted = df['muij'][exp_inds].values predicted_sd = df['sigmaij'][exp_inds].values mus = df['mu'][exp_inds].values # observed values observed = df[config['col_names']['retention_time']][exp_inds].values obs_peps = df[config['col_names']['pep']][exp_inds].values obs_code = pep_col_code[exp_inds].values residual = observed - predicted # plot the 2-segment linear fit of mus to observed RTs plt.subplot(121) plt.scatter(mus, observed, s=1, color='black') plt.plot([0, exp_params['split_point']], [exp_params['beta_0'], (exp_params['split_point'] * exp_params['beta_1']) + exp_params['beta_0']], color='red') plt.plot([exp_params['split_point'], 300], [(exp_params['split_point'] * exp_params['beta_1']) + exp_params['beta_0'], (exp_params['split_point'] * exp_params['beta_1']) + ((300-exp_params['split_point']) * exp_params['beta_2']) + exp_params['beta_0']], color='green') plt.plot(np.repeat(exp_params['split_point'], 2), [-100, 300], color='blue', linestyle='dashed') plt.axis([0, mus.max() + 10, exp_params['beta_0']-10, observed.max() + 10]) plt.title('Experiment {} - {}'.format(exp, exp_names[exp])) plt.xlabel('Reference RT (min)') plt.ylabel('Observed RT (min)') # plot residuals, quantiles of residuals, and color points by PEP plt.subplot(122) plt.scatter(predicted, residual, s=4, c=pep_col_code.cat.codes.values[exp_inds], alpha=0.5) plt.plot([0, exp_params["split_point"]], [0, 0], color="red") plt.plot([exp_params["split_point"], 300], [0, 0], color="green") plt.plot(np.repeat(exp_params["split_point"], 2), [-100, 300], color="blue", linestyle="dashed") # confidence intervals, 2.5% and 97.5% conf_x = predicted[np.argsort(predicted)] conf_2p5 = laplace.ppf(0.025, loc=0, scale=predicted_sd)[np.argsort(predicted)] conf_97p5 = laplace.ppf(0.975, loc=0, scale=predicted_sd)[np.argsort(predicted)] plt.plot(conf_x, conf_2p5, color="red") plt.plot(conf_x, conf_97p5, color="red") plt.axis([predicted.min()-5, predicted.max()+5, residual.min()-5, residual.max()+5]) plt.ylim(np.min(conf_2p5) - 0.1, np.max(conf_97p5) + 0.1) plt.xlim(conf_x[0], conf_x[-1]) cbar = plt.colorbar() cbar.set_label('Spectral PEP (Error Probability)') cbar.ax.set_yticklabels(pep_col_code.cat.categories.values) plt.xlabel("Inferred RT (min)") plt.ylabel("Residual RT (min)") # add some space between subplots plt.subplots_adjust(hspace=0.3, wspace=0.35, bottom=0.2, right=0.85) # finalize and save figure fig = plt.gcf() fig.set_size_inches(7, 3.5) _fname = 'alignment_{}_{}.png'.format(str(exp), exp_names[exp]) fname = os.path.join(figures_path, _fname) logger.info('Saving figure to {} ...'.format(fname)) fig.savefig(fname, dpi=160) fig_names.append(os.path.join('figures', _fname)) plt.close() fig.clf() return fig_names
def laplace(shape, scale): ppf = lambda x: laplace.ppf(x, loc=0, scale=scale) rand = randomization.laplace(shape, scale) return randomization_ppf(rand, ppf)
from scipy.stats import laplace from scipy.stats import cauchy from scipy.stats import uniform from scipy.stats import poisson from scipy.stats import norm import matplotlib.pyplot as plt from math import sqrt, floor, exp, pi import math import numpy as np fig, ax = plt.subplots(1, 3) r = laplace.rvs(size=20) x = np.linspace(min(laplace.ppf(0.01), min(r)), max(laplace.ppf(0.99), max(r)), 100) ax[0].plot(x, laplace.cdf(x, 0, sqrt(2))) ax[0].hist(r, density=True, bins=floor(len(r)), histtype='step', cumulative=True) ax[0].set_xlabel('x') ax[0].set_title('Распределение Лапласа, n=20') r2 = laplace.rvs(size=60) x2 = np.linspace(min(laplace.ppf(0.01), min(r2)), max(laplace.ppf(0.99), max(r2)), 100) ax[1].plot(x2, laplace.cdf(x2, 0, sqrt(2))) ax[1].hist(r2, density=True, bins=floor(len(r2)), histtype='step',