def add_contaminate_data(data, params): """ add contaminated data""" if not params.has_key('gamma'): gamma = 1 else: gamma = params['gamma'] t_min = max(np.abs(data['rt']))+0.5 t_max = t_min+3; pi = params['pi'] n_cont = max(int(len(data)*pi),2) n_unif = max(int(n_cont*gamma),2) n_other = n_cont - n_unif l_data = range(len(data)) cont_idx = random.sample(l_data,n_cont) unif_idx = cont_idx[:n_unif] other_idx = cont_idx[n_unif:] # create guesses response = np.round(uniform.rvs(0,1,size=n_unif)) data[unif_idx]['rt'] = uniform.rvs(0,t_max,size=n_unif) * response data[unif_idx]['rt'] = response data[unif_idx[0]]['rt'] = min(abs(data['rt']))/2. data[unif_idx[1]]['rt'] = max(abs(data['rt'])) + 0.8 #create late responses response = (np.sign(gen_rts(params, n_other))+1) / 2 data[other_idx]['rt'] = uniform.rvs(t_min,t_max,size=n_other) * response return data
def setup(self): ######### # PART 1: Make model calcium data ######### # Data parameters RATE = 1 # mean firing rate of poisson spike train (Hz) STEPS = 100 # number of time steps in data STEPS_LONG = 5000 # number of time steps in data TAU = 0.6 # time constant of calcium indicator (seconds) DELTAT = 1/30 # time step duration (seconds) self.sigma = 0.1 # standard deviation of gaussian noise SEED = 2222 # random number generator seed # Make a poisson spike trains self.spikes = sima.spikes.get_poisson_spikes( deltat=DELTAT, rate=RATE, steps=STEPS, seed=SEED) # longer time-series for parameter estimation self.spikes_long = sima.spikes.get_poisson_spikes( deltat=DELTAT, rate=RATE, steps=STEPS_LONG, seed=SEED) # Convolve with kernel to make calcium signal np.random.seed(SEED) self.gamma = 1 - (DELTAT / TAU) CALCIUM = signal.lfilter([1], [1, -self.gamma], self.spikes) CALCIUM_LONG = signal.lfilter([1], [1, -self.gamma], self.spikes_long) # Make fluorescence traces with random gaussian noise and baseline self.fluors = CALCIUM + norm.rvs( scale=self.sigma, size=STEPS) + uniform.rvs() self.fluors_long = CALCIUM_LONG + norm.rvs( scale=self.sigma, size=STEPS_LONG) + uniform.rvs()
def _clayton(M, N, alpha): if(alpha<0): raise ValueError('Alpha must be >=0 for Clayton Copula Family') if(N<2): raise ValueError('Dimensionality Argument [N] must be an integer >= 2') elif(N==2): u1 = uniform.rvs(size=M) p = uniform.rvs(size=M) if(alpha<np.spacing(1)): u2 = p else: u2 = u1*np.power((np.power(p,(-alpha/(1.0+alpha))) - 1 + np.power(u1,alpha)),(-1.0/alpha)) U = np.column_stack((u1,u2)) else: # Algorithm 1 described in both the SAS Copula Procedure, as well as the # paper: "High Dimensional Archimedean Copula Generation Algorithm" U = np.empty((M,N)) for ii in range(0,M): shape = 1.0/alpha loc = 0 scale = 1 v = gamma.rvs(shape) # sample N independent uniform random variables x_i = uniform.rvs(size=N) t = -1*np.log(x_i)/v if(alpha<0): tmp = np.maximum(0, 1.0-t) else: tmp = 1.0 + t U[ii,:] = np.power(tmp, -1.0/alpha) return U
def _frank(M, N, alpha): if(N<2): raise ValueError('Dimensionality Argument [N] must be an integer >= 2') elif(N==2): u1 = uniform.rvs(size=M) p = uniform.rvs(size=M) if abs(alpha) > math.log(sys.float_info.max): u2 = (u1 < 0).astype(int) + np.sign(alpha)*u1 # u1 or 1-u1 elif abs(alpha) > math.sqrt(np.spacing(1)): u2 = -1*np.log((np.exp(-alpha*u1)*(1-p)/p + np.exp(-alpha))/(1 + np.exp(-alpha*u1)*(1-p)/p))/alpha else: u2 = p U = np.column_stack((u1,u2)) else: # Algorithm 1 described in both the SAS Copula Procedure, as well as the # paper: "High Dimensional Archimedean Copula Generation Algorithm" if(alpha<=0): raise ValueError('For N>=3, alpha >0 in Frank Copula') U = np.empty((M,N)) for ii in range(0,M): p = -1.0*np.expm1(-1*alpha) if(p==1): # boundary case protection p = 1 - np.spacing(1) v = logser.rvs(p, size=1) # sample N independent uniform random variables x_i = uniform.rvs(size=N) t = -1*np.log(x_i)/v U[ii,:] = -1.0*np.log1p( np.exp(-t)*np.expm1(-1.0*alpha))/alpha return U
def generate_random_uniform_timeseries(rmin, rmax, ndays=100, num=100): """ This function will generate some random uniform 2d data in a time series. parameters: rmin: int - the minimum value of the range rmax: int - the maximum value of the range ndays: int - number of days in the time series num: int - total number of locations returns: numpy array of coordinates with day index """ # First create an empty array of the proper size a = np.zeros([ndays*num, 3]) b = np.array(range(0, ndays)) b = np.repeat(b, num) a[:,0] = b for row in a: row[1] = uniform.rvs(rmin, rmax) row[2] = uniform.rvs(rmin, rmax) return a
def ep2_rvs(mu, sigma, alpha, size=1): u = uniform.rvs(loc=0, scale=1, size=size) b = beta.rvs(1. / alpha, 1 - 1. / alpha, size=size) r = np.sign(uniform.rvs(loc=0, scale=1, size=size) - .5) z = r * (-alpha * b * np.log(u))**(1. / alpha) return z
def __init__(self, likelihood_info): try: self.n_modes = likelihood_info["n_modes"] except KeyError: self.n_modes = 1 try: self.delay = likelihood_info["delay"] except KeyError: self.delay = 0 if "mean" in likelihood_info and "cov" in likelihood_info: # Try to make sense of mean and cov mean = np.array(likelihood_info["mean"]) cov = np.array(likelihood_info["cov"]) if self.n_modes == 1: if len(mean.shape) == 1: mean = np.array([mean]) if len(cov.shape) == 2: cov = np.array([cov]) self.d = mean.shape[1] assert (mean.shape == (self.n_modes, self.d) and cov.shape == (self.n_modes, self.d, self.d)), ( "Inconsistent mean and covmat given. "+ "See documentation for the Gaussian likelihood.") lims = np.array(()) elif "limits" in likelihood_info: lims = np.array(likelihood_info["limits"]) if len(lims.shape) == 1: lims = np.array([lims]) self.d = lims.shape[0] assert lims.shape == (self.d, 2), ( "Inconsistent parameter limits given. "+ "See documentation for the Gaussian likelihood.") mean = np.zeros((self.n_modes, self.d)) cov = np.zeros((self.n_modes, self.d, self.d)) for i in range(self.n_modes): mean[i] = [uniform.rvs()*(xmax-xmin)+xmin for (xmin,xmax) in lims] stds = [uniform.rvs()*(xmax-xmin)*0.09+0.01 for (xmin,xmax) in lims] for j in range(self.d): cov[i,j,j] = stds[j]**2 for k in range(j+1,self.d): cov[i,j,k] = stds[j]*stds[k]*(uniform.rvs()*2-1) cov[i,k,j] = cov[i,j,k] else: raise ValueError("Not enough info specified for the likelihood 'gaussian': "+ " Either 'mean' and 'cov', OR simply 'limits', must be specified.") self.gaussians = [multivariate_normal(mean[i], cov[i]) for i in range(self.n_modes)] print "[Likelihood: Gaussian] Initialised with %d mode(s) and a delay of %f sec"%(self.n_modes, self.delay) if lims!=np.array(()): print "[Likelihood: Gaussian] Gaussians were randomly picked, so here is the YAML for testing:" print '"""\nlikelihoods:\n - name: gaussian\n n_modes: %d'%self.n_modes print " mean:" for i in range(self.n_modes): print " - ",mean[i].tolist() print " cov:" for i in range(self.n_modes): print " - ",cov[i].tolist() print ' delay: %d\n"""'%self.delay
def parse_distribution(params, dist=False): distribName = params[0] params[1], params[2] = float(params[1]), float(params[2]) if distribName == 'norm': if len(params)!=3: displaymessage("Normal distribution takes two parameters",end_execution=False) out=1 loc, scale = params[1], params[2] if dist: out = lambda x : norm.rvs(loc, scale) else: loop_counter=0; out=norm.rvs(loc,scale) while (out<0): out=norm.rvs(loc,scale) loop_counter+=1 if loop_counter>100: displaymessage("Distribution too far in the negative. Set to default 1.",end_execution=False) out=1 break elif distribName == 'uniform': #constant between loc and loc+scale if len(params)!=3: displaymessage("Uniform distribution takes two parameters",end_execution=False) out=1 loc, scale = params[1], params[2] if (loc+scale <= 0): displaymessage("Distribution lies entirely to the left of the y-axis. Changed to default value 1.",end_execution=False) out=1 elif loc<0: displaymessage("Uniform distribution takes negative values.",end_execution=False) loc = max(loc,0) else: if dist: out = lambda x : uniform.rvs(loc, scale) else: out = uniform.rvs(loc, scale) elif distribName == 'beta': if len(params)!=5: displaymessage("Normal distribution takes four parameters",end_execution=False) out=1 a, b, loc, scale = params[1], params[2], float(params[3]), float(params[4]) if dist: out = lambda x : beta.rvs(a, b, loc, scale) else: out = beta.rvs(a, b, loc, scale) return out
def generateToy(): np.random.seed(12345) fig,ax = plt.subplots(4,sharex=True) #fig,ax = plt.subplots(2) powerlaw_arg = 2 triang_arg=0.7 n_samples = 500 #generate simple line with slope 1, from 0 to 1 frozen_powerlaw = powerlaw(powerlaw_arg) #powerlaw.pdf(x, a) = a * x**(a-1) #generate triangle with peak at 0.7 frozen_triangle = triang(triang_arg) #up-sloping line from loc to (loc + c*scale) and then downsloping for (loc + c*scale) to (loc+scale). frozen_uniform = uniform(0.2,0.5) frozen_uniform2 = uniform(0.3,0.2) x = np.linspace(0,1) signal = np.random.normal(0.5, 0.1, n_samples/2) data_frame = pd.DataFrame({'powerlaw':powerlaw.rvs(powerlaw_arg,size=n_samples), 'triangle':triang.rvs(triang_arg,size=n_samples), 'uniform':np.concatenate((uniform.rvs(0.2,0.5,size=n_samples/2),uniform.rvs(0.3,0.2,size=n_samples/2))), 'powerlaw_signal':np.concatenate((powerlaw.rvs(powerlaw_arg,size=n_samples/2),signal))}) ax[0].plot(x, frozen_powerlaw.pdf(x), 'k-', lw=2, label='powerlaw pdf') hist(data_frame['powerlaw'],bins=100,normed=True,histtype='stepfilled',alpha=0.2,label='100 bins',ax=ax[0]) #hist(data_frame['powerlaw'],bins='blocks',fitness='poly_events',normed=True,histtype='stepfilled',alpha=0.2,label='b blocks',ax=ax[0]) ax[0].legend(loc = 'best') ax[1].plot(x, frozen_triangle.pdf(x), 'k-', lw=2, label='triangle pdf') hist(data_frame['triangle'],bins=100,normed=True,histtype='stepfilled',alpha=0.2,label='100 bins',ax=ax[1]) hist(data_frame['triangle'],bins='blocks',fitness='poly_events',normed=True,histtype='stepfilled',alpha=0.2,label='b blocks',ax=ax[1]) ax[1].legend(loc = 'best') #ax[0].plot(x, frozen_powerlaw.pdf(x), 'k-', lw=2, label='powerlaw pdf') hist(data_frame['powerlaw_signal'],bins=100,normed=True,histtype='stepfilled',alpha=0.2,label='100 bins',ax=ax[2]) #hist(data_frame['powerlaw_signal'],bins='blocks',normed=True,histtype='stepfilled',alpha=0.2,label='b blocks',ax=ax[2]) ax[2].legend(loc = 'best') ax[3].plot(x, frozen_uniform.pdf(x)+frozen_uniform2.pdf(x), 'k-', lw=2, label='uniform pdf') hist(data_frame['uniform'],bins=100,normed=True,histtype='stepfilled',alpha=0.2,label='100 bins',ax=ax[3]) #hist(data_frame['uniform'],bins='blocks',fitness = 'poly_events',p0=0.05,normed=True,histtype='stepfilled',alpha=0.2,label='b blocks',ax=ax[3]) ax[3].legend(loc = 'best') plt.show() fig.savefig('plots/toy_plots.png')
def get_poisson_spikes(seed=11111, rate=5, steps=1000, deltat=1 / 30): """ Generate a poisson spike train Parameters ---------- seed : int, optional Random number generator seed. rate : int Mean firing rate across the spike train (in Hz). steps : int Number of time steps in spike train. deltat : int Width of each time bin (in seconds). Returns ------- spikes : array Array of length equal to steps containing binary values. """ np.random.seed(seed) spikes = np.zeros(steps) spikes[[ step for step in range(steps) if uniform.rvs() <= rate * deltat]] = 1.0 return spikes
def test_v_uniform(self): distro = Distribution( uniform.rvs(loc=0, scale=2, size=100, random_state=np.random.RandomState(2))) assert_equal(distro.v, None)
def _test_anneal(self, objfunc, schedule, probfunc, iterations): ''' our actual annealing method ''' bests = [] k_current = 1 # get our initial state dim = len(objfunc.maxs) params = [(objfunc.mins[i], objfunc.maxs[i] - objfunc.mins[i]) for i in xrange(dim)] seeds = np.array(lhs([uniform]*dim, params, 1, True, np.identity(dim))).T[0] state = self._get_state(objfunc, [0.0]*dim, seeds) state = [s + objfunc.mins[i] for i, s in enumerate(state)] best = np.array(state) start_val = best_val = objfunc.eval(best) t_current = self.t_initial bests.append(best_val) for _ in range(iterations): k_current += 1 t_current = schedule(k_current) state = self._get_state(objfunc, state, [1.0]*dim) current_val = objfunc.eval(state) if uniform.rvs() < probfunc(t_current, current_val, best_val, dim): best = state best_val = current_val bests.append(best_val) return np.array(bests)
def ep_rvs(mu=0, alpha=1, beta=1, size=1): u = uniform.rvs(loc=0, scale=1, size=size) z = 2 * np.abs(u - 1. / 2) z = gammaincinv(1. / beta, z) y = mu + np.sign(u - 1. / 2) * alpha * z**(1. / beta) return y
def test_negative_pdf(self): distro = Distribution( uniform.rvs(loc=0, scale=1, size=100, random_state=np.random.RandomState(1))) assert_almost_equal(distro.negative_pdf(0), [-0.5805], decimal=4)
def _anneal(self, objfunc, schedule, probfunc): ''' our actual annealing method ''' k_current = 1 # get our initial state dim = len(objfunc.maxs) params = [(objfunc.mins[i], func.maxs[i] - func.mins[i]) for i in xrange(dim)] seeds = np.array(lhs([uniform]*dim, params, 1, True, np.identity(dim))).T[0] state = self._get_state(objfunc, [0.0]*dim, seeds) state = [s + objfunc.mins[i] for i, s in enumerate(state)] best = np.array(state) best_val = objfunc.eval(best) t_current = self.t_initial while t_current > self.tfinal: # run until temperature is low enough k_current += 1 t_current = schedule(k_current) state = self._get_state(objfunc, state, [1.0]*dim) current_val = objfunc.eval(state) if uniform.rvs() < probfunc(t_current, current_val, best_val, dim): best = state best_val = current_val return best
def test_logpdf(self): distro = Distribution( uniform.rvs(loc=0, scale=1, size=100, random_state=np.random.RandomState(1))) assert_almost_equal(distro.logpdf(0), np.log([0.5805]), decimal=4)
def rand_frag_size(dist_string): dist_list = dist_string.split(',') if dist_list[0] == 'skewed-normal': rand_size = abs( int( round( skewnormdist.rvs(float(dist_list[3]), loc=float(dist_list[1]), scale=float(dist_list[2]))))) elif dist_list[0] == 'normal': rand_size = abs( int( round( normdist.rvs(loc=float(dist_list[1]), scale=float(dist_list[2]))))) elif dist_list[0] == 'uniform': rand_size = abs( int( round( uniformdist.rvs(loc=float(dist_list[1]), scale=float(dist_list[2]))))) elif dist_list[0] == 'truncated-normal': rand_size = abs( int( round( truncnormdist.rvs(float(dist_list[3]), float(dist_list[4]), loc=float(dist_list[1]), scale=float(dist_list[2]))))) return (rand_size)
def bm_1switch_d(n=1000, x=0, mus=[0, -.1], sigmas=[1, 1.2], cut=-1, dt=.1, dr=.05): """ This function generates a brownian motion that transitions through a regime. The first set of parameters apply so long as the process is above the cutpoint, and the second set of parameters applies below. Otherwise parameters are as defined by the simple brownian motion. function bm_basic. But now, we will have a death rate as in the basic brownian motion...""" vals = np.zeros((n, 1)) devents = np.zeros((n, 1)) for k in range(n): if uniform.rvs() < dt * dr: x = 0 devents[k] = 1 elif x >= cut: x = x + mus[0] * dt + norm.rvs(scale=sigmas[0]**2 * dt) vals[k] = x else: x = x + mus[1] * dt + norm.rvs(scale=sigmas[1]**2 * dt) vals[k] = x return vals, devents
def bm_2switch_d(n=1000, x=0, mus=[0, -.1], sigmas=[1, 1.2], cut=[-1, -2], dt=.1, dr=.05): """ This function generates a brownian motion that transitions through a middle regime where risk-taking behavior occurs. The first set of parameters apply so long as the process is above the cutpoint, and below the second cutpoint. The second set of parameters applies in between. the second set of parameters applies below. Otherwise parameters are as defined by the simple brownian motion. function bm_basic.""" vals = np.zeros((n, 1)) devents = np.zeros((n, 1)) for k in range(n): if uniform.rvs() < dt * dr: x = 0 elif x >= cut[0] or x < cut[1]: x = x + mus[0] * dt + norm.rvs(scale=sigmas[0]**2 * dt) vals[k] = x else: x = x + mus[1] * dt + norm.rvs(scale=sigmas[1]**2 * dt) vals[k] = x return vals
def generate_image(fwhm=2): """ Parameters ---------- fwhm : int The desired FWHM for the Gaussian kernel used in smoothing a uniform RVS Returns ------- simg : nibabel.nifti1.Nifti1Image A Nifti image with smooth uniform noise """ # Load generic neurovault image neurovault_entry = neurovault.fetch_neurovault_auditory_computation_task() img_path = neurovault_entry.images img = nib.load(img_path.pop()) # Generate uniform, gaussian noise rv = uniform.rvs(size=np.array(img.shape)) sigma = fwhm / np.sqrt(8 * np.log(2)) srv = filters.gaussian_filter(rv, sigma=sigma) # Add generated noise to the neurovault image new_data = img.get_fdata() + srv simg = nilearn.image.new_img_like(img, new_data) return simg
def generate_p0(df_config, nwalkers, fix=None): config = df_config.copy() adopts_logprior = config.prior == "log" _a = config.lo.values _b = config.hi.values a = _a[adopts_logprior] b = _b[adopts_logprior] loc = _a[~adopts_logprior] scale = (_b - _a)[~adopts_logprior] #print(loc,scale) p0 = empty((nwalkers, len(config))) #print(p0.shape) p0[:, ~adopts_logprior] = uniform.rvs(size=(nwalkers, (~adopts_logprior).sum()), loc=loc, scale=scale) p0[:, adopts_logprior] = loguniform.rvs(size=(nwalkers, adopts_logprior.sum()), a=a, b=b) if fix is not None: deleted_index = [list(config.name).index(pname) for pname in fix] p0 = np.delete(p0, deleted_index, axis=1) return p0
def bm_basic_d(n=1000, x=0, mu=0, sigma=1, dt=.1, dr=.05): """ This function generates a basic brownian motion with n observations, but now there is a flow probability of death, in which event the process starts over again at 0. The x is the starting point, mu is the mean of the process, the sigma is the standard deviation of the process. The step size is set at .1, while n is the number of draws. The flow probability of death is dt*dr. """ vals = np.zeros((n, 1)) devents = np.zeros((n, 1)) for k in range(n): if uniform.rvs() < dt * dr: x = 0 devents[k] = 1 else: x = x + mu * dt + norm.rvs(scale=sigma**2 * dt) vals[k] = x return vals, devents
def test_repr_uniform(self): distro = Distribution( uniform.rvs(loc=0, scale=1, size=100, random_state=np.random.RandomState(2))) assert_equal(distro.__repr__(), '(4.364(+5.354/-4.098))e-1')
def test_repr_uniform_diff_precision(self): distro = Distribution( uniform.rvs(loc=0, scale=1, size=100, random_state=np.random.RandomState(2))) assert_equal(distro.__repr__(precision=2), '(4.36(+5.35/-4.10))e-1')
def test_not_normal(self): distro = Distribution( uniform.rvs(loc=0, scale=1, size=100, random_state=np.random.RandomState(1))) assert_equal(distro.normal, False)
def _homoGen(self, t0, tMax, rate): t = t0 times = [t0] while times[-1] < tMax: times.append(times[-1] - (1 / rate) * log(uniform.rvs(0., 1.))) self.bkgTimes = times
def sampling_exponential_distribution(L, lambd): deviation = 0.0 true_mean = 1 / lambd #mu_y repetitions = 1000 for _ in range(repetitions): #invariant: # deviation = sum of |mu_y-y_hat| # for all y_hat values previously # calculated #get L numbers from uniform dist z = uniform.rvs(size=L) #map uniform values to those given by expression y(x)=-(1/lambd)*log(1-x) y = map(lambda(x) : -(1/lambd)*log(1-x), z) #calculate sample mean y_hat sample_mean = sum(y) / L #add new |mu_y-y_hat| to deviation deviation = deviation + abs(true_mean - sample_mean) #We return the average deviation of all #repetitions return deviation/repetitions
def random_cov(ranges, O_std_min=1e-2, O_std_max=1, n_modes=1, mpi_warn=True): """ Returns a random covariance matrix, with standard deviations sampled log-uniformly from the length of the parameter ranges times ``O_std_min`` and ``O_std_max``, and uniformly sampled correlation coefficients between ``rho_min`` and ``rho_max``. The output of this function can be used directly as the value of the option ``cov`` of the :class:`likelihoods.gaussian`. If ``n_modes>1``, returns a list of such matrices. """ if get_mpi_size() and mpi_warn: print( "WARNING! " "Using with MPI: different process will produce different random results." ) dim = len(ranges) scales = np.array([r[1] - r[0] for r in ranges]) cov = [] for i in range(n_modes): stds = scales * 10**(uniform.rvs(size=dim, loc=np.log10(O_std_min), scale=np.log10( O_std_max / O_std_min))) this_cov = np.diag(stds).dot( (random_correlation.rvs(dim * stds / sum(stds)) if dim > 1 else np.eye(1)).dot( np.diag(stds))) # Symmetrize (numerical noise is usually introduced in the last step) cov += [(this_cov + this_cov.T) / 2] if n_modes == 1: cov = cov[0] return cov
def ztp(N, lambda_): """Zero truncated Poisson distribution""" temp = [poisson.pmf(0, item) for item in lambda_] p = [uniform.rvs(loc=item, scale=1-item) for item in temp] ztp = [int(poisson.ppf(p[i], lambda_[i])) for i in range(len(p))] return np.array(ztp)
def move(self, global_best, alpha, beta): for i, coord in enumerate(self.velocity): t_min = self.func.mins[i] * 0.5 t_max = self.func.maxs[i] * 0.5 tval = coord + ((alpha*uniform.rvs())*(self.best[i] - self.position[i])) + \ ((beta * uniform.rvs()) * (global_best[i] - self.position[i])) self.velocity[i] = t_min if tval < t_min \ else t_max if tval > t_max else tval for i, coord in enumerate(self.position): tval = coord + self.velocity[i] self.position[i] = self.func.mins[i] if tval < self.func.mins[i] \ else self.func.maxs[i] if tval > self.func.maxs[i] else tval
def _try_drift(self): self.examples_in_current_macro_round+=1 if self.examples_in_current_macro_round==self.number_of_nodes: self.examples_in_current_macro_round=0 if uniform.rvs(loc=0.0, scale=1.0) < self.drift_prob: print "DRIFT!!!" self.set_random_parameters() self._generate_drift_event()
def decentralize(Pdiff, no_individuals): no_snps,no_individuals =Pdiff.shape p0s=uniform.rvs(size=no_snps) P=(Pdiff.T+p0s).T if no_individuals>0: P=discretize(P, no_individuals)*1.0/no_individuals p0s=discretize(p0s, no_individuals)*1.0/no_individuals return P, p0s
def UniformDis(self, size, dis): """ :param dis: distribution range :param size: the number of variables :return: random number """ set = np.array(uniform.rvs(1, dis, size)).astype(int) return set
def rvs(self, size=1): scalers = [MinMaxScaler(bound) for bound in self._bounds] samples = uniform.rvs(size=(size, self.ndim)) samples = [scaler.fit_transform(sample[:, None]).T for scaler, sample in zip(scalers, samples.T)] samples = np.vstack(samples).T return samples
def _rvs( self, p, q ): # important to explicitly define the _RVS, otherwise extremely slow size = self._size #print args loc = p scale = q - p return np.exp(uniform.rvs(loc=loc, scale=scale, size=size))
def ztpoisson(N, lambda_par): """Zero truncated Poisson distribution.""" temp = poisson.pmf(0, lambda_par) p = [uniform.rvs(loc=item, scale=1-item) for item in temp] ztp = [int(poisson.ppf(p[i],lambda_par[i])) for i in range(N)] return np.array(ztp)
def jeuDeDonnees_schioler_bruite_inhomogene(size, a1=-.75, b1=-.25, a2=1, b2=1.25, a3=None, b3=None, sigma=0.2): ''' Calcul de la fonction schioler définie comme suit : | Sur les abscisses les points suivent une distribution uniforme ihomogene | ainsi X est tire sur [a1, b1] U [a2, b2]; | y = f(x) + delta où : | f(x) = sin (pi.x) sur ] -1, 1 [ | f(x) = 0 sur [ -2,-1 ] U [1, 2] | avec delta un bruit qui suit une distribution normale N(0 ; sigma**2) | | X, Y = schioler(size,sigma) | | N : Nombre de données (points d'abscisses) à générer | sigma : Sigma de la loi Normale (N(0 ; sigma**2)) suivie par le bruit à ajouter | aux données (0.2 est la valeur par defaut qui correspond à l'énoncé) | En sortie : | X : Les valeurs d'abscisse tirées aléatoirement | Y : Les valeurs de sortie de la fonction ''' if a3 == None and b3 == None: if not (a1 < b1 < a2 < b2): raise ValueError("a1 < b1 < a2 < b2 pas verifie.") p1 = (b1 - a1) / ((b1 - a1) + (b2 - a2)) size1 = binom.rvs(size, p1) size2 = size - size1 X = np.concatenate((uniform.rvs(0, 1, size1) * (b1 - a1) + a1, uniform.rvs(0, 1, size2) * (b2 - a2) + a2)) else: if not (a1 < b1 < a2 < b2 < a3 < b3): raise ValueError("a1 < b1 < a2 < b2 < a3 < b3 pas verifie.") p1 = (b1 - a1) / ((b1 - a1) + (b2 - a2) + (b3 - a3)) p2 = (b2 - a2) / ((b1 - a1) + (b2 - a2) + (b3 - a3)) size1 = binom.rvs(size, p1) size2 = binom.rvs(size, p2) size3 = size - size2 - size1 X = np.concatenate((uniform.rvs(0, 1, size1) * (b1 - a1) + a1, uniform.rvs(0, 1, size2) * (b2 - a2) + a2, uniform.rvs(0, 1, size3) * (b3 - a3) + a3)) X = np.sort(X) return X, schioler_bruite(X, sigma=sigma)
def distributions(size): n = norminvgauss.rvs(1, 0, size=size) l = laplace.rvs(size=size, scale=1 / m.sqrt(2), loc=0) p = poisson.rvs(10, size=size) c = cauchy.rvs(size=size) u = uniform.rvs(size=size, loc=-m.sqrt(3), scale=2 * m.sqrt(3)) counted_distributions = [n, l, p, c, u] return counted_distributions
def phantom(self, cam_pose, relpos): if uniform.rvs() < self.phantom_prob: #0から1の一様分布 #ランドマークの本当の観測(relpos)は使わない pos = np.array( self.phantom_dist.rvs()).T #probの確率で半径5mの円の中にファントムが出現 return self.observation_function(cam_pose, pos) else: return relpos
def _rstable0(alpha): U = uniform.rvs(size=1) while True: # generate non-zero exponential random variable W = expon.rvs(size=1) if W != 0: break return np.power(_A(math.pi * U, alpha) / np.power(W, 1.0 - alpha), 1.0 / alpha)
def ztp(N, lambda_): """zero-truncated Poisson distribution""" temp = [poisson.pmf(0, item) for item in lambda_] p = [uniform.rvs(loc=item, scale=1-item) for item in temp] ztp = [int(poisson.ppf(p[i], lambda_[i])) for i in range(len(p))] return np.array(ztp)
def Friedman_function_gen(dimNum, dataNum): xdata = uniform.rvs(size=(dataNum, dimNum)) ydata = 10 * np.sin(xdata[:, 0] * np.pi * xdata[:, 1]) + 20 * ( (xdata[:, 2] - 0.5)** 2) + 10 * xdata[:, 3] + 5 * xdata[:, 4] + norm.rvs(size=dataNum) return xdata, ydata
def doJump(p=0.7): """Returns whether a human has jumped or not with probability p.""" x = uniform.rvs(size=1) if x <= p: return True if x > p: return False
def sep_rvs(mu=0, sigma=1, nu=0, tau=2, size=1): y = ep2_rvs(0, 1, tau, size=size) w = np.sign(y) * np.abs(y)**(tau / 2) * nu * np.sqrt(2. / tau) r = - np.sign(uniform.rvs(loc=0, scale=1, size=size) - scipy.stats.norm.cdf(w)) z = r * y return mu + sigma * z
def _rstable0(alpha): U = uniform.rvs(size=1) while True: # generate non-zero exponential random variable W = expon.rvs(size=1) if(W!=0): break return np.power(_A(math.pi*U,alpha)/np.power(W,1.0-alpha),1.0/alpha)
def sample_via_cdf(x, p, nsamp): # get normalized cumulative distribution cdf = cumtrapz(p, x, initial=0) cdf = cdf/cdf.max() # get interpolator interp = interp1d(cdf, x) # get uniform samples over cdf cdf_samp = uniform.rvs(size=nsamp) return interp(cdf_samp)
def drawTheta(thetaCurr): """ This function provides proposed values for theta, given a current value. It currently uses only a uniform distribution centered on the current value. The size of the window is specified below (propWindowSize). """ propWindowSize = 0.1 newTheta = uniform.rvs(loc=thetaCurr-(propWindowSize/2.0),scale=propWindowSize) # min = loc, max = loc+scale return newTheta
def get_stars_formed(ra, dec, t_min, t_max, v_sys, dist, N_size=512): """ Get the normalization constant for stars formed at ra and dec Parameters ---------- ra : float right ascension input (decimals) dec : float declination input (decimals) t_min : float minimum time for a star to have been formed (Myr) t_max : float maximum time for a star to have been formed (Myr) v_sys : float Systemic velocity of system (km/s) dist : float Distance to the star forming region (km) Returns ------- SFR : float Star formation normalization constant """ ran_phi = 2.0*np.pi*uniform.rvs(size = N_size) c_1 = 3.0 / np.pi / (t_max - t_min)**3 * (dist/v_sys)**2 ran_x = uniform.rvs(size = N_size) ran_t_b = (3.0 * ran_x / (c_1 * np.pi * (v_sys/dist)**2))**(1.0/3.0) + t_min # c_2 = dist_SMC / (np.pi * v_sys * (ran_t_b - t_min)) theta_c = v_sys / dist * (ran_t_b - t_min) c_2 = 1.0 / (np.pi * theta_c**2) ran_y = uniform.rvs(size = N_size) ran_theta = np.sqrt(ran_y / (c_2 * np.pi)) ran_ra = c.rad_to_deg * ran_theta * np.cos(ran_phi) / np.cos(c.deg_to_rad * dec) + ra ran_dec = c.rad_to_deg * ran_theta * np.sin(ran_phi) + dec # Specific star formation rate (Msun/Myr/steradian) SFR = sf_history.get_SFH(ran_ra, ran_dec, ran_t_b/(c.yr_to_sec*1.0e6), sf_history.smc_coor, sf_history.smc_sfh) return np.mean(SFR)
def SimulateUniforms(self): """docstring for Simulate""" V = self.V() Z = uniform.rvs(size = self.size) print "Z", Z X = map(lambda x: -math.log(x) / V, Z) Y = map(self.GHat, X) print Y # T = map(self.Invert, Y) return T
def _get_state(self, objfunc, state, params): ''' setup the initial state ''' def adjust_offset(value, minval, maxval): # keep within bounds return maxval if value > maxval else minval if value < minval else value seeds = np.array([uniform.rvs() - 0.5 for param in params]) state = np.array([adjust_offset(s + seeds[i], objfunc.mins[i], objfunc.maxs[i]) for i, s in enumerate(state)]) return state
def acceptance_rule(self, proposal, previous): u = uniform.rvs() if self.debug: print u print (self.pdf(proposal)/self.pdf(previous) > u) if (self.pdf(proposal)/self.pdf(previous) > u): return proposal else: return previous
def new_weights(self): """Get a new weight matrix """ from scipy.stats import uniform from numpy.random import permutation W = N.empty((self.N_CA, self.N_EC), 'd') Wdist = uniform.rvs(size=self.N_EC, loc=0, scale=2*self.mu_W) Wdist[int(self.C_W*self.N_EC):] = 0 for Wi in W: Wi[:] = permutation(Wdist) return W
def move_random(self): ''' moves a little random bit ''' alpha = self.pop.alpha for i, coord in enumerate(self.coords): # calc the temp value to set as coord tval = coord + (alpha * (uniform.rvs() - 0.5)) # set as coord if within bounds self.coords[i] = self.func.mins[i] if tval < self.func.mins[i] \ else self.func.maxs[i] if tval > self.func.maxs[i] else tval
def sample(z): # obtain new sample from conditional proposal distribution z_star = proposal_sample(z) a = target_pdf(z_star) / target_pdf(z) b = proposal_cpdf(z, z_star) / proposal_cpdf(z_star, z) # accept or reject new sample if uniform.rvs() < min(1, a * b): accepted = 1 z = z_star else: accepted = 0 return (accepted, z)
def acc_rej_sample(k, p, q, trunc, N): ''' 接受拒绝采样 :param N: 采样数 ''' z = norm.rvs(loc=q[0], scale=q[1], size=N) # 从建议分布采样 mu = uniform.rvs(size=N) # 从均匀分布采样 z = z[(mu <= norm.pdf(z, p[0], p[1]) / (k * norm.pdf(z, q[0], q[1])))] # 接受-拒绝采样 z = z[z >= trunc[0]] z = z[z <= trunc[1]] # print("sampled z = \n{}\n".format(z)) return z
def buildMat(_M=1,_N=1,_pConJ=0.1,_pConR=1,_J=1.,_sdJ=0.,_R=10.,_sdR=0.,_dist="bernouilli",_isSym=True,_isUni=False,_type="all"): ####pick values####### minDim=min(_N,_M); if _dist=="bernouilli": temp=np.triu(bernoulli.rvs(_pConJ,size=(_M,_N))); temp[:minDim,:minDim]=temp[:minDim,:minDim]-diag(diagonal(temp))+diag(bernoulli.rvs(_pConR,size=(minDim))); if _dist=="poisson": temp=np.triu(poisson.rvs(_pConJ,size=(_M,_N))); temp[:minDim,:minDim]=temp[:minDim,:minDim]-diag(diagonal(temp))+diag(poisson.rvs(_pConR,size=(minDim))); elif _dist=="uniform": temp=_J+_sdJ*(0.5-np.triu(uniform.rvs(size=(_M,_N)))); temp[:minDim,:minDim]=temp[:minDim,:minDim]-diag(diagonal(temp))+diag(_R+_sdR*(0.5-uniform.rvs(size=(minDim)))); elif _dist=="expon": temp=np.triu(expon.rvs(_J,size=(_M,_N))); temp[:minDim,:minDim]=temp[:minDim,:minDim]-diag(diagonal(temp))+diag(expon.rvs(_R,size=(minDim))); elif _dist=="norm": temp=_sdJ*np.triu(norm.rvs(_J,size=(_M,_N))); temp[:minDim,:minDim]=temp[:minDim,:minDim]-diag(diagonal(temp))+diag(_sdR*norm.rvs(_R,size=(minDim))); ####symmetrize matrix### if _isSym==True: if _N==_M: temp=_J*(temp.T+temp-2*diag(diagonal(temp)))+_R*diag(diagonal(temp)); else: print("buildMat : N!=M, cannot 'symmetrize' the matrix"); ####unitarize matrix### if _isUni==True: if _isSym==True: print("buildMat : WARNING : isUni=True, the matrix will not be symetric..."); temp,s,VT=svd(temp); ####render dense or sparse matrix#### if _type=="all": return temp; elif _type=="sparse": return scipy.sparse.lil_matrix(temp);