def g(): d = len(Mu) assert Mu.shape == (d,), "Mu must be a vector" assert A.shape == (d, d), "A must be a square matrix" assert (A.T == A).all(), "and symmetric" assert V.shape == (d, d), "V must be a square matrix" assert (V.T == V).all(), "and symmetric" a = chol(A) v = chol(V) B = dot(V, inv(V + A)) _a2 = V - dot(B, V) _a2 = chol(_a2) Y, U = array([0.0] * d), array([0.0] * d) for i in range(n + burnin): for _ in range(thin): # skipe # sample Y | U ~ N(U, V) Y = U + dot(v, random.normal(size=d)) # sample U | Y ~ N(A(A+V)^-1)(Y-Mu) + Mu, # A - A(A+V)^-1A) U = dot(B, (Mu - Y)) + Y + +dot(_a2, random.normal(size=d)) if i >= burnin: yield [U, Y]
def whitenoise(data, wpow=10**(-11), fs=2.5 * 10**6): l = len(data[0]) sig_noise = sqrt((wpow) * fs / 2) out1 = data[0] + sig_noise * random.normal(0, 1, l) out2 = data[1] + sig_noise * random.normal(0, 1, l) out3 = data[2] + sig_noise * random.normal(0, 1, l) out4 = data[3] + sig_noise * random.normal(0, 1, l) #datagenerator.writefile(out1,out2,out3,out4) return out1, out2, out3, out4
def testRewardFunction(self, x, typ, noise=0.000001): if typ == "growSin": return (sin((x - self.rangeMin) / 3.0) + 1.5 + x / self.distRange) / 4.0 + random.normal(0, noise) if typ == "rastrigin": n = x / self.distRange * 10.0 if abs(n) > 5.0: n = 0.0 # FIXME: imprecise reimplementation of the Rastrigin function that exists already # in rl/environments/functions... return (20.0 + n ** 2 - 10.0 * cos(2.0 * 3.1416 * n)) / 55.0 + random.normal(0, noise) if typ == "singleGaus": return self.getStND(x) + random.normal(0, noise) return 0.0
def testRewardFunction(self, x, typ, noise=0.000001): if typ=="growSin": return (sin((x-self.rangeMin)/3.0)+1.5+x/self.distRange)/4.0+random.normal(0,noise) if typ=="rastrigin": n=x/self.distRange*10.0 if abs(n) > 5.0: n=0.0 # FIXME: imprecise reimplementation of the Rastrigin function that exists already # in rl/environments/functions... return (20.0 + n**2 -10.0*cos(2.0*3.1416*n))/55.0+random.normal(0,noise) if typ=="singleGaus": return self.getStND(x)+random.normal(0,noise) return 0.0
def quadthermo_agent_gen(): targetT=spr.normal(18,2) tolerance=2 absmax=max(spr.normal(21,1),targetT+tolerance) #no support absmin=min(spr.gamma(4,1),targetT-tolerance) #no support vec=[0.0,0.21,0.135,0.205,0.115,0.115,0.095,0.065,0.03,0.03] r=sp.random.uniform() nres=1 i=0 while i<7 and r>vec[i]: r-=vec[i] i+=1 nres=i+1 occ=active.get_occ_p(nres) cons=[] for p in occ: p.extend([targetT,tolerance]) cons.append(p) q=sp.exp(sp.random.normal(-9.3,2.0)) fa=28.39+sp.random.gamma(shape=2.099,scale=28.696) #floor area from cabe dwelling survey flat=sp.random.uniform(0,1)<0.365 # flat or house if flat: U = 3.3*sp.sqrt(fa) #insulation in W/K floor area else: U= 3.6*sp.sqrt(fa)+0.14*fa k=U #insulation in W/K cm=1000*sp.exp(sp.random.normal(5.5,0.35)) #thermal capacity in J P=sp.random.uniform(6000,15000)# power in W Prequ=k*20 if Prequ>P: print "!!!!!!!!!!!" s=str(["quadthermo_agent",absmax,absmin,cons,q,P,cm,k]) return s
def thompson_sampling(y, std): """ Thompson sampling was first described by Thompson in 1933 as a solution to the multi-arm bandit problem. Thompson, W. 1933. “On the likelihood that one unknown probability exceeds another in view of the evidence of two samples”. Biometrika. 25(3/4): 285–294. Parameters ---------- y : 1D vector Numpy Array The mean of the surrogate model at all test points used in the optimization. std : 1D vector Numpy Array The standard deviation from the surrogate model at all test points used in the optimization. Returns ------- nu_star : float The maximum value from the Thompson Sampling. x_star : integer The index of the test point with the maximum value. tsVal : TYPE Sampled values for all test points. """ tsVal = random.normal(loc=y, scale=std) nu_star = np.max(tsVal) x_star = int(np.where(tsVal == nu_star)[0]) return nu_star, x_star, tsVal
def main(): ITERATIONS = 100 mc = zeros(ITERATIONS) og = zeros(ITERATIONS) #farby = QS = 10 colors = [ [0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 1, 1], [1, 0, 1], [0, 1, 1], ] for qpre in range(QS): q = qpre + 2 for it in range(ITERATIONS): W = random.normal(0, 0.1, [q, q]) WI = random.uniform(-.1, .1, [q, 1]) mc[it] = sum(memory_capacity(W, WI, memory_max=200, runs=1, iterations_coef_measure=5000)[0][:q+2]) og[it] = matrix_orthogonality(W) print(qpre, QS, it, ITERATIONS) plt.scatter(og, mc, marker='+', label=q, c=(colors[qpre % len(colors)])) plt.xlabel("orthogonality") plt.ylabel("memory capacity") plt.grid(True) plt.legend() plt.show()
def find_conservative_MPPS(self, n_MPP=2, n_MPP_tries=100, err_g_max=0.2): """ Search for MPPs Input: n_MPP - try to find this number of MPP's n_MPP_tries - max number of tries in search for new MPP err_g_max - convergence criterion in MPP search """ k = 0 U_MPP = [] for i in range(n_MPP_tries): u0 = random.normal(size=(self.X_dist.dim)) self.e = self.E_conservative[random.randint( len(self.E_conservative))] conv, u_MPP = self.MPP_search(u0=u0, N_max=100, err_g_max=err_g_max) if conv: k += 1 U_MPP.append(u_MPP) if k >= n_MPP: break return np.array(U_MPP)
def generate_random_large_scale_field(self): """ mg is a gmg object """ from scipy import random # level on which the random field is generated # this controls the scale of the field # current limitation: this has to be done on a level for which # each node has the whole domain flev = self.nlevs-1#min(6,self.nlevs-1) # gaussian noise parameters mu = 0. sigma = 1. # generate it on rank==0 then broadcast it # so that every rank has the same field if (self.myrank == 0): forc = random.normal(mu, sigma, (self.grid[flev].mv,self.grid[flev].nv)) forc = forc*self.grid[flev].msk else: forc = None forc = MPI.COMM_WORLD.bcast(forc,root=0) # interpolate it on the finest grid self.x[flev][:,:] = forc for lev in range(flev-1,-1,-1): if self.grid[lev].flag=='peak': coarsetofine(self.grid[lev+1],self.grid[lev],self.x[lev+1],self.x[lev]) else: coarsetofine(self.grid[lev+1],self.grid[lev],self.x[lev+1],self.r[lev]) self.x[lev]+= self.r[lev] self.grid[lev].smooth(self.x[lev],self.b[lev],2)#15oct changed ,1 to ,2 return self.x[0]
def initialize_x(N, init_type='normal'): ''' Initialized an array of length N filled with probability parameter. init_type: str Accepts 'equal', 'normal', and 'uniform' ''' x = np.zeros(N) initialize = True while initialize: if init_type == 'normal': x = random.normal(p, min(p, (1 - p)) / 2, N) elif init_type == 'uniform': x = random.uniform(pmin, pmax, N) elif init_type == 'equal': x = np.full(N, p) else: print('init_type is wrong') exit() if np.all(x <= pmax) and np.all(x >= pmin): initialize = False else: initialize = True return x
def generate_random_small_scale_field(self): """ mg is a gmg object """ from scipy import random # level on which the random field is generated # this controls the scale of the field # current limitation: this has to be done on a level for which # each node has the whole domain lev = 0 # min(6,self.nlevs-1) # gaussian noise parameters mu = 0. sigma = 1. # generate it on rank==0 then broadcast it # so that every rank has the same field forc = random.normal(mu, sigma, (self.grid[lev].mv, self.grid[lev].nv)) # smooth twice on the finest grid self.x[lev][:, :] = forc self.grid[lev].smooth(self.x[lev], self.b[lev], 2) # 15oct changed ,1 to ,2 return self.x[0]
def Run_MC(self, N_MC): """ Crude Monte Carlo TODO: if N_MC = None -> run until cov is acceptable Output: beta - Reliability index (for comparison with FORM) pof - Probability of failure cov - Coefficient of variation """ # Sample in U space U = random.normal(size=(N_MC, self.X_dist.dim)) # Trasform to X space X = self.X_dist.U_to_X(U) # Compute limit state and pof g = self.G(X) I = (g < 0) * 1 pof = I.mean() # Coefficient of variation if pof > 0: var = pof * (1 - pof) / N_MC cov = np.sqrt(var) / pof else: cov = 1 # Reliability index beta = -stats.norm.ppf(pof) return beta, pof, cov
def newEpisode(self): if self.learning: params = ravel(self.explorationlayer.module.params) target = ravel(sum(self.history.getSequence(self.history.getNumSequences()-1)[2]) / 500) if target != 0.0: self.gp.addSample(params, target) if len(self.gp.trainx) > 20: self.gp.trainx = self.gp.trainx[-20:, :] self.gp.trainy = self.gp.trainy[-20:] self.gp.noise = self.gp.noise[-20:] self.gp._calculate() # get new parameters where mean was highest max_cov = diag(self.gp.pred_cov).max() indices = where(diag(self.gp.pred_cov) == max_cov)[0] pick = indices[random.randint(len(indices))] new_param = self.gp.testx[pick] # check if that one exists already in gp training set if len(where(self.gp.trainx == new_param)[0]) > 0: # add some normal noise to it new_param += random.normal(0, 1, len(new_param)) self.explorationlayer.module._setParameters(new_param) else: self.explorationlayer.drawRandomWeights() # don't call StateDependentAgent.newEpisode() because it randomizes the params LearningAgent.newEpisode(self)
def step(self): self.sensors = random.normal(loc=self.sensors * self.A + self.action * self.b, scale=0.001).flatten() if self.hasRenderer(): self.getRenderer().updateData(self.sensors) if self.delay: time.sleep(self.tau)
def drop_object(self): """Drops a random object (box, sphere) into the scene.""" # choose between boxes and spheres if random.uniform() > 0.5: (body, geom) = self._create_sphere(self.space, 10, 0.4) else: (body, geom) = self._create_box(self.space, 10, 0.5, 0.5, 0.5) # randomize position slightly body.setPosition((random.normal(-6.5, 0.5), 6.0, random.normal(-6.5, 0.5))) # body.setPosition( (0.0, 3.0, 0.0) ) # randomize orientation slightly #theta = random.uniform(0,2*pi) #ct = cos (theta) #st = sin (theta) # rotate body and append to (body,geom) tuple list # body.setRotation([ct, 0., -st, 0., 1., 0., st, 0., ct]) self.body_geom.append((body, geom))
def drop_object(self): """Drops a random object (box, sphere) into the scene.""" # choose between boxes and spheres if random.uniform() > 0.5: (body,geom) = self._create_sphere(self.space, 10, 0.4) else: (body,geom) = self._create_box(self.space, 10, 0.5,0.5,0.5) # randomize position slightly body.setPosition( (random.normal(-6.5, 0.5), 6.0, random.normal(-6.5, 0.5)) ) # body.setPosition( (0.0, 3.0, 0.0) ) # randomize orientation slightly #theta = random.uniform(0,2*pi) #ct = cos (theta) #st = sin (theta) # rotate body and append to (body,geom) tuple list # body.setRotation([ct, 0., -st, 0., 1., 0., st, 0., ct]) self.body_geom.append((body,geom))
def keplerSim(tau, e, T0, K, w, sig, tlo, thi, n): dt = (thi-tlo)/(n-1) data = zeros((n,2), Float) data[:,0] = r.uniform(tlo, thi, (n)) # for i in range(n): # data[i,0] = tlo + i*dt data[:,1] = v_rad(K, w, tau, e, T0, data[:,0])+r.normal(0.,sig,(n)) print "Created data." return data
def keplerSim(tau, e, T0, K, w, sig, tlo, thi, n): dt = (thi - tlo) / (n - 1) data = zeros((n, 2), Float) data[:, 0] = r.uniform(tlo, thi, (n)) # for i in range(n): # data[i,0] = tlo + i*dt data[:, 1] = v_rad(K, w, tau, e, T0, data[:, 0]) + r.normal(0., sig, (n)) print "Created data." return data
def GaussianRandomInitializer(gridShape, sigma=0.2, seed=None, slipSystem=None, slipPlanes=None, slipDirections=None, vacancy=None, smectic=None): oldgrid = copy.copy(gridShape) if len(gridShape) == 1: gridShape = (128,) if len(gridShape) == 2: gridShape = (128,128) if len(gridShape) == 3: gridShape = (128,128,128) """ Returns a random initial set of fields of class type PlasticityState """ if slipSystem=='gamma': state = SlipSystemState.SlipSystemState(gridShape,slipPlanes=slipPlanes,slipDirections=slipDirections) elif slipSystem=='betaP': state = SlipSystemBetaPState.SlipSystemState(gridShape,slipPlanes=slipPlanes,slipDirections=slipDirections) else: if vacancy is not None: state = VacancyState.VacancyState(gridShape,alpha=vacancy) elif smectic is not None: state = SmecticState.SmecticState(gridShape) else: state = PlasticityState.PlasticityState(gridShape) field = state.GetOrderParameterField() Ksq_prime = FourierSpaceTools.FourierSpaceTools(gridShape).kSq * (-sigma**2/4.) if seed is None: seed = 0 n = 0 random.seed(seed) Ksq = FourierSpaceTools.FourierSpaceTools(gridShape).kSq.numpy_array() for component in field.components: temp = random.normal(scale=gridShape[0],size=gridShape) ktemp = fft.rfftn(temp)*(sqrt(pi)*sigma)**len(gridShape)*exp(-Ksq*sigma**2/4.) field[component] = numpy.real(fft.irfftn(ktemp)) #field[component] = GenerateGaussianRandomArray(gridShape, temp ,sigma) n += 1 """ t, s = LoadState("2dstate32.save", 0) for component in field.components: for j in range(0,32): field[component][:,:,j] = s.betaP[component].numpy_array() """ ## To make seed consistent across grid sizes and convergence comparison gridShape = copy.copy(oldgrid) if gridShape[0] != 128: state = ResizeState(state,gridShape[0],Dim=len(gridShape)) state = ReformatState(state) state.ktools = FourierSpaceTools.FourierSpaceTools(gridShape) return state
def fill_region(l,r,sigma,v): if (l == r or l == r-1): pass else: m = int(round((r+l)*0.5)) a = v[l] + (v[r]-v[l])*(m - l)/float(r - l) s = sigma*sqrt((m-l)*(r-m)/float(r-l)) v[m] = a + s * random.normal() fill_region(l,m,sigma,v) fill_region(m,r,sigma,v)
def drawSample(self): sum = 0.0 rndFakt = random.random() for g in range(self.numOGaus): sum += self.sigmo(self.alpha[g]) if rndFakt < sum: if self.sigma[g] < self.minSig: self.sigma[g] = self.minSig x = random.normal(self.mue[g], self.sigma[g]) break return x
def perturbation(self): """ Generate a difference vector with the given standard deviations """ #print self.sigList #print "_*_*_*_*_*_*_*_" #raw_input("Press Enter to continue") #time.sleep(3) return random.normal(0., self.sigList)
def generate_data(N=100, true_params=secret_true_params, seed=42): x = np.linspace(-2.5, 2.5, N) y1 = my_model(x, *true_params) y2 = 1.0 * random.normal(size=N) # Create the data data = np.array([x, y1 + y2]).T # Shuffle the data permuted_data = random.permutation(data) # Save the data np.savetxt("dataN%d.txt" % N, data) return data
def add_noise(self, temp): """ Add per-pixel Gaussian random noise. """ self.noise = random.normal(0,temp,[self.npix,self.npix]) self.Fnoise = ft.fftshift(ft.fft2(self.noise)) self.Txy = self.Txy + self.noise self.Fxy = ft.fftshift(ft.fft2(self.Txy)) self.Clnoise = ((temp*self.mapsize_rad/self.npix)*self.Bl)**-2.e0 self.Pknoise = np.interp(self.modk, self.k, self.Clnoise)
def generate_lin_regression(n, d, sigma): """ See cgd.pdf """ X = random.randn(n, d) for i in xrange(d): X[:, i] /= numpy.max(numpy.abs(X[:, i])) beta = random.uniform(low=0, high=1, size=d) e = numpy.array(random.normal(0, sigma, n)) y = X.dot(beta) + e.T return (X, y, beta, e)
def generate_lin_regression_nikolaenko(n, d): """ Generates a synthetic linear regression instance as in "Privacy-Preserving Ridge Regression on Hundreds of Millions of Records" """ X = random.uniform(low=-1, high=1, size=(n, d)) beta = random.uniform(low=-1, high=1, size=d) mu, sigma = 0, 1 # mean and standard deviation e = numpy.array(random.normal(mu, sigma, n)) y = X.dot(beta) + e.T return (X, y, beta, e)
def run(self): self.households.reset_reading() counter = 0 for h in self.households: counter += 1 wtp = max(0.0, random.normal(7.348656, 9.407043)) h.SetField("wtp_extreme_heat", wtp) if counter % 100000 == 0: self.households.sync() self.__container.set_next_by_index(counter) self.households.finalise()
def generate_data(N=100, true_params=secret_true_params, seed = 42): x = np.linspace(-2.5, 2.5, N) y1 = my_model(x, *true_params) y2 = 1.0 * random.normal(size=N) # Create the data data = np.array([x,y1+y2]).T # Shuffle the data permuted_data = random.permutation(data) # Save the data np.savetxt("dataN%d.txt"%N, data) return data
def sample(self, N): """ Generate N samples """ # Standard normal samples U = random.normal(size=(N, self.dim)) # Sample random means with equal probability random_means = self.means[list(random.randint(self.m, size=N)), :] return U + random_means
def __init__(self, statedim, actiondim, sigma= -2.): Explorer.__init__(self, actiondim, actiondim) self.statedim = statedim self.actiondim = actiondim # initialize parameters to sigma ParameterContainer.__init__(self, actiondim, stdParams=0) self.sigma = [sigma] * actiondim # exploration matrix (linear function) self.explmatrix = random.normal(0., expln(self.sigma), (statedim, actiondim)) # store last state self.state = None
def drawSample(self, dm): sum = 0.0 rndFakt = random.random() if dm == "max": for g in range(self.numOGaus): sum += self.sigmo(self.alpha[g]) if rndFakt < sum: if self.sigma[g] < self.minSig: self.sigma[g] = self.minSig x = random.normal(self.mue[g], self.sigma[g]) break return x if dm == "dist": return rndFakt * self.distRange + self.rangeMin return 0.0
def Run_MCIS(self, N_MC, u0=None): """ MC with importance sampling TODO: if N_MC = None -> run until cov is acceptable Output: beta - Reliability index (for comparison with FORM) pof - Probability of failure cov - Coefficient of variation """ # Get MPP in U-space if u0 is None: conv, MPP_u = self.MPP_search() else: conv, MPP_u = True, u0 # Sample in U space U = random.normal(size=(N_MC, self.X_dist.dim)) pdf_U = np.prod(stats.norm.pdf(U), 1) # Shift samples to design point U_shifted = U + MPP_u pdf_U_shifted = np.prod(stats.norm.pdf(U_shifted), 1) # Trasform to X space X = self.X_dist.U_to_X(U_shifted) # Evaluate limit state g = self.G(X).flatten() I = (g < 0) * 1 # Estimate pof q = I * (pdf_U_shifted / pdf_U) pof = (1 / N_MC) * q.sum() # .. and CoV if pof > 0: var = (1 / N_MC) * (1 / (N_MC - 1)) * ((q - pof)**2).sum() cov = np.sqrt(var) / pof else: cov = 1 # Reliability index beta = -stats.norm.ppf(pof) return beta, pof, cov
def randomlist(self,mode=1,size=1000,low=0,high=100): "generate a list followed given distribution" "IN:distribution model code which refer to randomlist method; sequence size; sequence range" "OUT:a sequence followed distribution like" x = [] if mode == 1: x = random.randint(low,high,size=size) if mode == 2: x = map(float,random.normal(loc=(low+high)/2.0, scale=(high-low)/6.0, size=size)) if mode == 3: x = map(long,random.exponential(scale=1, size=size)+low) if mode == 4: x = map(long,random.pareto(1,size=size)+low) if mode == 5: x = map(long,random.poisson(lam=(low+high)/2.0, size=size)) # x = random.choice(x,size=100) return x
def model(times): t_fold, t_fold_model = self.period_folding(times, available, m, m_err, out_dict) data = empty(0) rms = empty(0) for time in t_fold_model: # we're going to create a window around the desired time and sample a gaussian distribution around that time period = 1.0 / f assert ( period < available.ptp() * 1.5 ), ( "period is greater than ####SEE VARIABLE CONTSTRAINT#### of the duration of available data" ) # alterring this. originally 1/3 # window is 2% of the period passed = False for x in arange(0.01, 0.1, 0.01): t_min = time - x * period t_max = time + x * period window = logical_and( (t_fold < t_max), (t_fold > t_min) ) # picks the available times that are within that window try: # there must be more than # points in the window for this to work: assert window.sum() >= 2, str(time) # jhiggins changed sum from 5 to 2 except AssertionError: continue else: passed = True break assert passed, "No adequate window found" m_window = m[window] mean_window = mean(m_window) std_window = std(m_window) # now we're ready to sample that distribution and create our point new = (random.normal(loc=mean_window, scale=std_window, size=1))[0] data = append(data, new) rms = append(rms, std_window) period_folded_model_file = file("period_folded_model.txt", "w") # model_file = file("model.txt", "w") for n in range(len(t_fold_model)): period_folded_model_file.write("%f\t%f\t%f\n" % (t_fold_model[n], data[n], rms[n])) # model_file.write("%f\t%f\t%f\n" % (available[n], data[n], rms[n])) # model_file.close() period_folded_model_file.close() return {"flux": data, "rms": rms}
def __init__(self, mapsize=10.e0, pixels=1024, cosm=Cosmology()): """ Constructor. Default will create a 10x10 degree FOV with 1024 pixels and a WMAP7 Cosmology. """ self.cosm = cosm self.mapsize_deg = mapsize # map size in np.real domain self.mapsize_rad = np.deg2rad(self.mapsize_deg) self.fsky = (mapsize**2.e0) / 41253.e0 self.npix = pixels self.Fmapsize = 1.e0 / self.mapsize_rad # map size in Fourier domain self.pixsize = self.mapsize_rad / self.npix # pixel size in radians self.pixsize_deg = self.mapsize_deg / self.npix # pixel size in degrees self.mapaxis = ( (self.mapsize_deg / self.npix) * # range of axes np.arange(-self.mapsize_deg / 2.e0, self.mapsize_deg / 2.e0, 1)) self.Fmapaxis = 1.e0 / self.mapaxis self.krange = ( self.Fmapsize * # define k-space np.arange(-self.npix / 2.e0, self.npix / 2.e0, 1)) self.kx, self.ky = np.meshgrid(self.krange, self.krange) self.modk = sqrt(self.kx**2.e0 + self.ky**2.e0) self.Txy = random.normal( 0, 1, [self.npix, self.npix]) # Gaussian random field self.Txy = self.Txy - np.mean(self.Txy) self.Fxy = ft.fftshift(ft.fft2(self.Txy)) # Fourier domain GRF self.Fxy = self.Fxy / sqrt(np.var(self.Fxy)) self.build_Pk(self.cosm) # Get flat-sky P(k) for cosmology self.Fxy = self.Fxy * self.Pk # Apply the power spectrum self.Txy = np.real(ft.ifft2(ft.fftshift(self.Fxy))) self.ymap = np.zeros([self.npix, self.npix])
def model(times): t_fold, t_fold_model = self.period_folding(times, available, m, m_err, out_dict) data = empty(0) rms = empty(0) for time in t_fold_model: # we're going to create a window around the desired time and sample a gaussian distribution around that time period = 1./f assert period < available.ptp()/3, "period is greater than one third of the duration of available data" # window is 2% of the period passed = False for x in arange(0.01, 0.1, 0.01): t_min = time - x * period t_max = time + x * period window = logical_and((t_fold < t_max), (t_fold > t_min)) # picks the available times that are within that window try: # there must be more than 3 points in the window for this to work: assert (window.sum() > 5), str(time) except AssertionError: continue else: passed = True break assert passed, "No adequate window found" m_window = m[window] mean_window = mean(m_window) std_window = std(m_window) # now we're ready to sample that distribution and create our point new = (random.normal(loc=mean_window, scale = std_window, size = 1))[0] data = append(data,new) rms = append(rms, std_window) period_folded_model_file = file("period_folded_model.txt", "w") # model_file = file("model.txt", "w") for n in range(len(t_fold_model)): period_folded_model_file.write("%f\t%f\t%f\n" % (t_fold_model[n], data[n], rms[n])) # model_file.write("%f\t%f\t%f\n" % (available[n], data[n], rms[n])) # model_file.close() period_folded_model_file.close() return {'flux':data, 'rms': rms}
def step(self): """ integrate state using simple rectangle rule """ thrust = float(self.action[0]) rudder = float(self.action[1]) h, hdot, v = self.sensors rnd = random.normal(0, 1.0, size=3) thrust = min(max(thrust, -1), +2) rudder = min(max(rudder, -90), +90) drag = 5 * h + (rudder**2 + rnd[0]) force = 30.0 * thrust - 2.0 * v - 0.02 * v * drag + rnd[1] * 3.0 v = v + self.dt * force / self.mass v = min(max(v, -10), +40) torque = -v * (rudder + h + 1.0 * hdot + rnd[2] * 10.) last_hdot = hdot hdot += torque / self.I hdot = min(max(hdot, -180), 180) h += (hdot + last_hdot) / 2.0 if h > 180.: h -= 360. elif h < -180.: h += 360. self.sensors = (h, hdot, v)
def step(self): """ integrate state using simple rectangle rule """ thrust = float(self.action[0]) rudder = float(self.action[1]) h, hdot, v = self.sensors rnd = random.normal(0,1.0, size=3) thrust = min(max(thrust,-1),+2) rudder = min(max(rudder,-90),+90) drag = 5*h + (rudder**2 + rnd[0]) force = 30.0*thrust - 2.0*v - 0.02*v*drag + rnd[1]*3.0 v = v + self.dt*force/self.mass v = min(max(v,-10),+40) torque = -v*(rudder + h + 1.0*hdot + rnd[2]*10.) last_hdot = hdot hdot += torque / self.I hdot = min(max(hdot,-180),180) h += (hdot + last_hdot) / 2.0 if h>180.: h -= 360. elif h<-180.: h += 360. self.sensors = (h,hdot,v)
def SmecticInitializer(gridShape, sigma=0.2, seed=None): if seed is None: seed = 0 random.seed(seed) state = SmecticState.SmecticState(gridShape) field = state.GetOrderParameterField() Ksq = FourierSpaceTools.FourierSpaceTools(gridShape).kSq.numpy_array() for component in field.components: temp = random.normal(scale=gridShape[0],size=gridShape) ktemp = fft.rfftn(temp)*(sqrt(pi)*sigma)**len(gridShape)*exp(-Ksq*sigma**2/4.) field[component] = numpy.real(fft.irfftn(ktemp)) ## To make seed consistent across grid sizes and convergence comparison gridShape = copy.copy(oldgrid) if gridShape[0] != 128: state = ResizeState(state,gridShape[0],Dim=len(gridShape)) state = ReformatState(state) state.ktools = FourierSpaceTools.FourierSpaceTools(gridShape) return state
def update(self): self.state = [s + 0.1 * a for s, a in zip(self.state, self.action)] if self.noise: self.state += random.normal(0, self.noise, self.dim)
import pyqtgraph as pg from pyqtgraph import MultiPlotWidget try: from pyqtgraph.metaarray import * except: print("MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)") exit() app = QtGui.QApplication([]) mw = QtGui.QMainWindow() mw.resize(800,800) pw = MultiPlotWidget() mw.setCentralWidget(pw) mw.show() data = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]]) ma = MetaArray(data, info=[ {'name': 'Signal', 'cols': [ {'name': 'Col1', 'units': 'V'}, {'name': 'Col2', 'units': 'A'}, {'name': 'Col3'}, ]}, {'name': 'Time', 'values': linspace(0., 1., 1000), 'units': 's'} ]) pw.plot(ma) ## Start Qt event loop unless running in interactive mode. if __name__ == '__main__': import sys if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): QtGui.QApplication.instance().exec_()
def newEpisode(self): """ Randomize the matrix values for exploration during one episode. """ self.explmatrix = random.normal(0., expln(self.sigma), self.explmatrix.shape)
def make_brownian_path(mean, sigma, numpts): xn = sigma * sqrt(numpts)*random.normal() result = [ 0.0 for x in range(0,numpts)] result[len(result)-1] = xn; fill_region(0,len(result)-1,sigma,result) return [result[i]+mean*i for i in range (0,numpts)]
def _forwardImplementation(self, inbuf, outbuf): outbuf[:] = random.normal(inbuf, expln(self.sigma))
def genDifVect(self): # generates a difference vector with the given standard deviations self.deltas=random.normal(0.0, self.sigList)
def drawRandomWeights(self): self.module._setParameters(random.normal(0, expln(self.params), self.module.paramdim))
def run(self): self.households.reset_reading() for h in self.households: wtp = max(0.0, random.normal(7.348656, 9.407043)) h.SetField("wtp_extreme_heat", wtp) self.households.finalise()
# Définition des paramètres NomFichier = 'MESURES1.CSV' # ------------------------------------------------------------------------ # Début du programme # ------------------------------------------------------------------------ # Création d'une liste de mesures avec des erreurs aléatoires # contruite autour de la valeur de référence de la tension U = 3.992 # tension de référence NbMesures = 1000 ListeMesures = [] for i in range(NbMesures): ListeMesures.append(random.normal(U, 0.002)) # Calcul des paramètres de la liste NbMesures = len(ListeMesures) MinListe = min(ListeMesures) MaxListe = max(ListeMesures) Moyenne = mean(ListeMesures) EcartType = std(ListeMesures) # Affichage des paramètres statistiques print "Expression du résultat : ", Moyenne, " +- ", EcartType # Définition des classes de l'histogramme NbClasses = 10 ClasseRange = 0.002
def _forwardImplementation(self, inbuf, outbuf): if not self.enabled: outbuf[:] = inbuf else: outbuf[:] = random.normal(inbuf, expln(self.params))
def sampler(self, probabilities): return random.normal(probabilities, self.visibleVariances)
def sample(self): """ Sample with mean mu and variance sigma """ return random.normal(self.mu, self.sigma, 1)
def main(): """ Today's agenda: i) generate initial matrices ii) simulate iii) learn gauss iv) simulate v) plot & compare """ W = random.normal(0, sigma, [q, q]) WI = random.uniform(-sigma, sigma, [q, 1]) X = random.uniform(-1, 1, [q]) a = ones([q]) b = zeros([q]) S = zeros([q, ITERATIONS]) S2 = zeros([q, ITERATIONS]) ahist = zeros([q, ITERATIONS]) # i?) simulate U = random.uniform(-1, 1, [ITERATIONS]) for it in range(ITERATIONS): net = dot(WI, U[it].reshape(1)) + dot(W, X) X = tanh( a * net + b) S[:, it] = X # iii) learn U = random.uniform(-1, 1, [ITERATIONS]) for it in range(ITERATIONS): net = dot(WI, U[it].reshape(1)) + dot(W, X) Y = tanh( a * net + b) a, b = ipgauss(net, Y, a, b) ahist[:, it] = a X = Y # i?) simulate2 U = random.uniform(-1, 1, [ITERATIONS]) for it in range(ITERATIONS): net = dot(WI, U[it].reshape(1)) + dot(W, X) X = tanh( a * net + b) S2[:, it] = X # iv) view histogram BINCNT=10 def show_histograms(): for yplt in range(gridy): print("\r {}/{}".format(yplt, gridy), end="") for xplt in range(gridx): indx = yplt*gridx + xplt pyplot.subplot(gridy, gridx, indx) std1 = std(S[indx, :]) std2 = std(S2[indx, :]) pyplot.hist(S[indx,:], bins=BINCNT, normed=True, label="{}:bfr std1={:6.4f}".format(indx,std1)) pyplot.hist(S2[indx,:], bins=BINCNT, normed=True, label="{}:atr std2={:6.4f}".format(indx,std2)) pyplot.grid(True) pyplot.legend() print("std1={0}, std2={1}".format(std1, std2)) #pyplot.legend() pyplot.show() show_histograms() for ciara in range(q): pyplot.plot(range(ITERATIONS), ahist[ciara, :], label="%d"%ciara) pyplot.legend() pyplot.show() print("a = %s" % a) print("b = %s" % b) print("done.")
#mue learning sigmoA = self.sigmo(self.alpha) self.mue += self.alphaM * fakt * (x - self.mue) * sigmoA * norm #sigma learning if fakt > 0.0: self.sigma += self.alphaS * fakt * ((x - self.mue) ** 2 - self.sigma ** 2) / self.sigma * sigmoA * norm def sigmo(self, a): return 1.0 / (1.0 + exp(-1.0 * a)) def invSigmo(self, a): return - log(1.0 / a - 1.0) def getSample(self): sampleX = self.drawSample() return sampleX if __name__ == '__main__': m = MixtureOfGaussians() for i in range(10000): x = m.getSample() n = x / m.distRange * 10.0 if abs(n) > 5.0: n = 0.0 y = (20.0 + n ** 2 - 10.0 * cos(2.0 * 3.1416 * n)) / 55.0 + random.normal(0, 0.2) #one dimensional rastrigin m.learn(x, y) print(m.alpha) print(m.mue) print(m.sigma)
band_specific_names = observed_apparent_mag_names[observed_apparent_mag_band_mapping==m] fig = plt.figure(figsize = (12, 12)) ax1 = subplot(111) # with some dust, poor color excess approximation dust_corrected_abs_mags = (apparent_mags - 0.85*prior_ebvs*(const_R_V*extinction_ccm_a[m] + extinction_ccm_b[m]) ) - prior_mus dust_corrected_abs_mag_errs = sqrt(apparent_mag_errs**2 + (0.85*prior_ebv_errs)**2 + prior_mu_errs**2) linear_period = (10**period_terms) * P_0 ax1.errorbar(log10(linear_period), dust_corrected_abs_mags, dust_corrected_abs_mag_errs, linestyle="none", marker="s", color="blue") polyfit_slope = [] polyfit_intercept = [] for iter in range(1000): fit_params = polyfit(period_terms, random.normal(dust_corrected_abs_mags, dust_corrected_abs_mag_errs), 1) polyfit_slope.append(fit_params[0]) polyfit_intercept.append(fit_params[1]) polyfit_slope = array(polyfit_slope) polyfit_intercept = array(polyfit_intercept) best_fit_line = polyfit_intercept.mean() + polyfit_slope.mean()*logper_grid squared_m_errs = ((dust_corrected_abs_mags) - (polyfit_intercept.mean() + polyfit_slope.mean()*period_terms))**2 sig_m = sqrt(squared_m_errs.mean()) ci_err_grid = [] for logper_val in logper_grid: ci_err_grid.append(sqrt(sig_m**2 + std(polyfit_intercept + polyfit_slope * logper_val)**2)) ci_err_grid = array(ci_err_grid)