def sample(self, density, deviation, add_source_point=True, seed=None): try: seed = int(seed) except: seed = None np_random.seed(seed) self.sample_seed = seed points = set() for k, node in self.K.items(): if node.parent is None: continue parent = self.K[node.parent] avg_radius = (node.radius + parent.radius) / 2 axis = node.pos - parent.pos surf = 2 * pi * avg_radius * linalg.norm(axis) n = int(round(density * surf)) p1 = parent.pos p2 = node.pos p = np_random.randn(3) r = cross(p-p1, p2-p1) r /= linalg.norm(r) s = cross(r, p2-p1) s /= linalg.norm(s) for i in range(n): theta = np_random.uniform(0, radians(360)) d = np_random.uniform() # relative distance of the point, on line between p1 and p2 t = p1 + d * axis interp_radius = (node.radius - parent.radius) * d + parent.radius if deviation: interp_radius += np_random.exponential(deviation) q = harray([t[0] + interp_radius * cos(theta) * r[0] + interp_radius * sin(theta) * s[0], t[1] + interp_radius * cos(theta) * r[1] + interp_radius * sin(theta) * s[1], t[2] + interp_radius * cos(theta) * r[2] + interp_radius * sin(theta) * s[2]]) points.add(q) if add_source_point: points.add((0,-0.01,0)) self.P = vstack(points)
def width_uniform(max_width, num_profiles, num_samples): """Uniform width distribution Generates halfwidths in U[0, max_width] """ halfwidths = rand.uniform(0, max_width, num_profiles) return rand.uniform(-halfwidths, halfwidths, (num_samples, num_profiles)).T
def expose(self, widget, event): cr = widget.window.cairo_create() environ["GKS_WSTYPE"] = "142" pc = PyCairoContext.from_address(id(cr)) environ['GKSconid'] = "%lu" % pc.ctx cr.move_to(15, 15) cr.set_font_size(14) cr.show_text("Contour Plot using Gtk ...") seed(0) xd = uniform(-2, 2, 100) yd = uniform(-2, 2, 100) zd = xd * np.exp(-xd**2 - yd**2) gr.setviewport(0.15, 0.95, 0.1, 0.9) gr.setwindow(-2, 2, -2, 2) gr.setspace(-0.5, 0.5, 0, 90) gr.setmarkersize(1) gr.setmarkertype(gr.MARKERTYPE_SOLID_CIRCLE) gr.setcharheight(0.024) gr.settextalign(2, 0) gr.settextfontprec(3, 0) x, y, z = gr.gridit(xd, yd, zd, 200, 200) h = np.linspace(-0.5, 0.5, 20) gr.surface(x, y, z, 5) gr.contour(x, y, h, z, 0) gr.polymarker(xd, yd) gr.axes(0.25, 0.25, -2, -2, 2, 2, 0.01) gr.updatews()
def make_random_transform(): transform_type = rand.randint(0,3) if transform_type == 0: axis = 'freq' # FIXME generalize later nbins = rand.randint(1, 5) nt_chunk = 8 * rand.randint(5, 11) epsilon = rand.uniform(3.0e-4, 1.0e-3) return rf_pipelines.spline_detrender(nt_chunk, axis, nbins, epsilon) elif transform_type == 1: # intensity_clipper axis = rand.randint(0,2) if (rand.uniform() < 0.66) else None Df = 2**rand.randint(0,4) Dt = 2**rand.randint(0,4) sigma = rand.uniform(1.3, 1.7) niter = rand.randint(1,5) iter_sigma = rand.uniform(1.8, 2.0) nt_chunk = Dt * 8 * rand.randint(1,8) two_pass = True if rand.randint(0,2) else False return rf_pipelines.intensity_clipper(nt_chunk, axis, sigma, niter, iter_sigma, Df, Dt, two_pass) else: # std_dev_clipper axis = rand.randint(0,2) Df = 2**rand.randint(0,4) Dt = 2**rand.randint(0,4) sigma = rand.uniform(1.3, 1.7) nt_chunk = Dt * 8 * rand.randint(1,8) two_pass = True if rand.randint(0,2) else False return rf_pipelines.std_dev_clipper(nt_chunk, axis, sigma, Df, Dt, two_pass)
def PSO_global(self): _list = [rd.uniform(self.low_bound, self.up_bound, self.dimension) for x in range(0, self.particles_count, 1)] g_best = _list[0] position = _list vel = [] for x in position: if self.function(x) < self.function(g_best): g_best = x vel.append(rd.uniform(-(abs(self.up_bound - self.low_bound)), abs(self.up_bound -self. low_bound), self.dimension)) count = 0 while count < self.stop_case: for x in range(len(_list)): for y in xrange(self.dimension): r_g, r_p = rd.random(1), rd.random(1) vel[x][y] = self.w * vel[x][y] + self.especial_param_p * r_p * (position[x][y] - _list[x][y]) + self.especial_param_g \ * r_g * ( g_best[y] - _list[x][y]) _list[x] += vel[x] if self.function(_list[x]) < self.function(position[x]): position[x] = _list[x] if self.function(position[x]) < self.function(g_best): g_best = position[x] count += 1 return g_best
def _create_plot_component(): # Create a random scattering of XY pairs x = random.uniform(0.0, 10.0, 50) y = random.uniform(0.0, 5.0, 50) pd = ArrayPlotData(x=x, y=y) plot = Plot(pd, border_visible=True, overlay_border=True) scatter = plot.plot(("x", "y"), type="scatter", color="lightblue")[0] # Tweak some of the plot properties plot.set(title="Scatter Inspector Demo", padding=50) # Attach some tools to the plot plot.tools.append(PanTool(plot)) plot.overlays.append(ZoomTool(plot)) # Attach the inspector and its overlay scatter.tools.append(ScatterInspector(scatter)) overlay = ScatterInspectorOverlay( scatter, hover_color="red", hover_marker_size=6, selection_marker_size=6, selection_color="yellow", selection_outline_color="purple", selection_line_width=3, ) scatter.overlays.append(overlay) return plot
def _build(self): '''Create populations.''' if self._open_bc: ldict_s = {'elements': 'iaf_psc_alpha', 'positions': [[self._x_d, self._y_d]], 'extent': [self._L] * self._dimensions, 'edge_wrap': False} x = rnd.uniform(-self._L / 2., self._L / 2., self._N) y = rnd.uniform(-self._L / 2., self._L / 2., self._N) pos = list(zip(x, y)) ldict_t = {'elements': 'iaf_psc_alpha', 'positions': pos, 'extent': [self._L] * self._dimensions, 'edge_wrap': False} self._ls = topo.CreateLayer(ldict_s) self._lt = topo.CreateLayer(ldict_t) self._driver = nest.GetLeaves(self._ls)[0] else: ldict_s = {'elements': 'iaf_psc_alpha', 'positions': [[0.] * self._dimensions], 'extent': [self._L] * self._dimensions, 'edge_wrap': True} x = rnd.uniform(-self._L / 2., self._L / 2., self._N) y = rnd.uniform(-self._L / 2., self._L / 2., self._N) if self._dimensions == 3: z = rnd.uniform(-self._L / 2., self._L / 2., self._N) pos = list(zip(x, y, z)) else: pos = list(zip(x, y)) ldict_t = {'elements': 'iaf_psc_alpha', 'positions': pos, 'extent': [self._L] * self._dimensions, 'edge_wrap': True} self._ls = topo.CreateLayer(ldict_s) self._lt = topo.CreateLayer(ldict_t) self._driver = topo.FindCenterElement(self._ls)
def generateHaarFeatures(number): prototype = np.array( [0, 0, 1, 1, 0.25, 0.25, 0.5, 0.5]) scale = random.uniform(0.15, 0.6, (number, 1)) protoypes = np.tile(prototype, (number, 1)) * scale translation = random.uniform(0, 0.8, (number, 2)) protoypes[:, 0:2] += translation protoypes[:, 4:6] += translation # If generated features lies outside of range [0,1] translate it back # inside for row in protoypes: if row[0] + row[2] > 1: move = row[0] + row[2] - 1 row[0] -= move row[4] -= move if row[1] + row[3] > 1: move = row[1] + row[3] - 1 row[1] -= move row[5] -= move protoypes[:,2:4] = protoypes[:,2:4] + protoypes[:,0:2] protoypes[:,6:8] = protoypes[:,6:8] + protoypes[:,4:6] for row in protoypes: for i in range(row.shape[0]): if(row[i] > 1): row[i] = 1 return protoypes.astype(np.float32)
def shift_vertices(vertices, s): for i,(x,y) in enumerate(vertices): x += random.uniform(-s/2.,s/2.) y += random.uniform(-s/2.,s/2.) vertices[i,0] = x vertices[i,1] = y return vertices
def randomize(self): '''Randomize with uniform distribution within bounds.''' # Iterate over self.pts for i, (lowerbound, upperbound) in enumerate(self.constraints): self.pts[i] = uniform(lowerbound, upperbound) absrange = abs(upperbound-lowerbound) self.spds[i] = uniform(-absrange, absrange)
def test_random_positions_lim_fraction(self): R = uniform(0,1) N = uniform(0,1000) x = random_positions(R,N) r = dot(x,x) assert_array_less(r,R**2)
def gen_traffic_list(time_serial_num,traffic_num,traffic_data_mean_size): traffic_list = [0]*time_serial_num #生成时序包络 mu = int(random.uniform(0, time_serial_num)) sigma = random.uniform(1,6) #每一时序生成随机的业务数 i = 0 while (i<time_serial_num): tmp_time_serial = int(random.gauss(mu, sigma)) if tmp_time_serial < time_serial_num and tmp_time_serial >=0: traffic_list[tmp_time_serial] = traffic_list[tmp_time_serial]+1 i+=1 plotLine(range(0,len(traffic_list)),traffic_list) #每个业务生成随机的数据块 mu = traffic_data_mean_size sigma = traffic_data_mean_size*0.1 floor_data_size = mu - 3*sigma ceil_data_size = mu + 3*sigma traffic_entity_list=[[]]*time_serial_num for index,k in enumberate(traffic_list): i=0 while (i<k): data_size = int(random.gauss(mu, sigma)) if data_size>floor_data_size and data_size < ceil_data_size : traffic_entity_list[index] i=i+1 return traffic_entity_list
def load_linear_dataset(n, d, noise_stdev, xmax): signs = rand.choice([-1, 1], size=(d,)) B = np.dot(signs, rand.uniform(size=(d,))) errors = noise_stdev * rand.normal(size=(d,)) X = xmax * rand.uniform(size=(d, n)) y = np.dot(X.T, B) + errors return X, y, (B, errors)
def test_no_zenith(self): """Azimuths are irrelevant when from the Zenith""" azimuth1 = random.uniform(-pi, pi, 10000) azimuth2 = random.uniform(-pi, pi, 10000) angle = utils.angle_between(0, azimuth1, 0, azimuth2) self.assertTrue(all(angle == 0))
def test_single_values(self): """Other tests use arrays, check if single values also work""" zenith = random.uniform(0, pi / 2) azimuth = random.uniform(-pi, pi) angle = utils.angle_between(zenith, azimuth, zenith, azimuth) self.assertTrue(angle == 0)
def InitiateCataract(self): key = int((self.Attribute['Age'] -50)/5) cataractRisk = random.triangular(1.5,2.7,4.9) if self.medicalRecords['NumberTrabeculectomy'] == 0: if self.Attribute['Gender'] == 1: if key < 7: RateCataract = (cataract_formation[key]/1000) else: RateCataract = (cataract_formation[7]/1000) else: if key < 7: RateCataract = (cataract_formation_female[key]/1000) else: RateCataract = (cataract_formation_female[7]/1000) else: if self.Attribute['Gender'] == 1: if key < 7: RateCataract = (cataract_formation[key]/1000)*cataractRisk else: RateCataract = (cataract_formation[7]/1000)*cataractRisk else: if key < 7: RateCataract = (cataract_formation_female[key]/1000)*cataractRisk else: RateCataract = (cataract_formation_female[7]/1000)*cataractRisk if random.uniform(0,1) <RateCataract: self.medicalRecords['Cataract'] = True if random.uniform(0,1) < random.beta(123,109) and self.medicalRecords['Cataract'] == True: self.medicalRecords['SurgeryCataract'] += 1 self.medicalRecords['Cataract'] = False
def pcolorRandom(): "Makes a pcolormesh plot of randomly generated data pts." # make up some randomly distributed data npts = 100 x = uniform(-3, 3, npts) y = uniform(-3, 3, npts) z = x * N.exp(-x ** 2 - y ** 2) # define grid. xi = N.arange(-3.1, 3.1, 0.05) yi = N.arange(-3.1, 3.1, 0.05) # grid the data. zi = griddata(x, y, z, xi, yi) # contour the gridded data, plotting dots at the randomly spaced data points. plt.pcolormesh(xi, yi, zi) #CS = plt.contour(xi,yi,zi,15,linewidths=0.5,colors='k') #CS = plt.contourf(xi,yi,zi,15,cmap=plt.cm.jet) plt.colorbar() # draw colorbar # plot data points. plt.scatter(x, y, marker='o', c='b', s=5) plt.xlim(-3, 3) plt.ylim(-3, 3) plt.title('griddata test (%d points)' % npts) plt.show()
def make_init_file(start_lon,start_lat,npts,position_init_file): npts=100 xyz=zeros((npts,3)) xyz[:,0]= [random.uniform(start_lon-0.5,start_lon+0.5) for _ in xrange(npts)] # x index 100 points xyz[:,1]= [random.uniform(start_lat-0.5,start_lat+0.5) for _ in xrange(npts)] # y index 100 points xyz[:,2]=23 # at k=20 level, z level will be overwritten if the target_density in the namelist is larger than 0. xyz.T.astype('>f8').tofile('particle_init.bin') #the saving sequence should be x[:], y[:], z[:], not [x1,y1,z1],[x2,y2,z2]...
def slice_sampler(px, N = 1, x = None): """ Provides samples from a user-defined distribution. slice_sampler(px, N = 1, x = None) Inputs: px = A discrete probability distribution. N = Number of samples to return, default is 1 x = Optional list/array of observation values to return, where prob(x) = px. Outputs: If x=None (default) or if len(x) != len(px), it will return an array of integers between 0 and len(px)-1. If x is supplied, it will return the samples from x according to the distribution px. """ values = np.zeros(N, dtype=np.int) samples = np.arange(len(px)) px = np.array(px) / (1.*sum(px)) u = uniform(0, max(px)) for n in xrange(N): included = px>=u choice = random.sample(range(np.sum(included)), 1)[0] values[n] = samples[included][choice] u = uniform(0, px[included][choice]) if x: if len(x) == len(px): x=np.array(x) values = x[values] else: print "px and x are different lengths. Returning index locations for px." if N == 1: return values[0] return values
def astep(self, q0, logp): """q0 : current state logp : log probability function """ # Draw from the normal prior by multiplying the Cholesky decomposition # of the covariance with draws from a standard normal chol = draw_values([self.prior_chol]) nu = np.dot(chol, nr.randn(chol.shape[0])) y = logp(q0) - nr.standard_exponential() # Draw initial proposal and propose a candidate point theta = nr.uniform(0, 2 * np.pi) theta_max = theta theta_min = theta - 2 * np.pi q_new = q0 * np.cos(theta) + nu * np.sin(theta) while logp(q_new) <= y: # Shrink the bracket and propose a new point if theta < 0: theta_min = theta else: theta_max = theta theta = nr.uniform(theta_min, theta_max) q_new = q0 * np.cos(theta) + nu * np.sin(theta) return q_new
def MakeHexagons(self): print "Building %i Hexagons"%NumHexagons # get a list of colors for random colors wx.lib.colourdb.updateColourDB() self.colors = wx.lib.colourdb.getColourList() print "Max colors:", len(self.colors) Canvas = self.Canvas D = 1.0 h = D *N.sqrt(3)/2 Hex = N.array(((D , 0), (D/2 , -h), (-D/2, -h), (-D , 0), (-D/2, h), (D/2 , h), )) Centers = uniform(-100, 100, (NumHexagons, 2)) for center in Centers: # scale the hexagon Points = Hex * uniform(5,20) #print Points # shift the hexagon Points = Points + center #print Points cf = random.randint(0,len(self.colors)-1) #cf = 55 H = Canvas.AddPolygon(Points, LineColor = None, FillColor = self.colors[cf]) #print "BrushList is: %i long"%len(H.BrushList) H.Bind(FloatCanvas.EVT_FC_LEFT_DOWN, self.HexHit) print "BrushList is: %i long"%len(H.BrushList)
def evaluation_function(self, X): ################################################ # write voltages and algorithm parameters into # the database so that working nodes can access # this data. ################################################ write_algo_parameters_to_db(X, self.geom_iteration_counter, self.geom_particle_counter) ################################################ # wait for Rs to calculate target functions ################################################ dont_have_target_function = True print 'waiting to recieve target functions from R' t0 = time.time() while dont_have_target_function: target_function = get_target_functions_from_db(len(X)) if len(target_function) == len(X): dont_have_target_function = False target_function.reverse() print target_function #raw_input() return target_function else: time.sleep(1) print target_function print random.uniform(0,1)
def downRp(self,myslice,base=0,noise=1,gradient=0.2, noise_type = 'gauss'): """Replace Time series myslice with downward trend control chart pattern myslice is a tuple (i,j) of the start and end point. Uses Python syntax i <= x < j All values edited are those of x """ if noise_type == 'gauss': series = normal(base,noise,myslice[1]-myslice[0]) elif noise_type == 'uni' : series = uniform(0-noise,noise,myslice[1]-myslice[0]) + base elif noise_type == 'none' : series = array([base] * myslice) series = uniform(0-noise,noise,myslice[1]-myslice[0]) + base for i in range(len(series)): series[i] = series[i] - gradient*i a = self[:myslice[0]] b = self[myslice[1]:] self = a.extend(series) self = self.extend(b) return self
def pillbox_sunshape_directions(num_rays, ang_range): """ Calculates directions for a ray bundles with ``num_rays`` rays, distributed as a pillbox sunshape shining toward the +Z axis, and deviating from it by at most ang_range, such that if all rays have the same energy, the flux distribution comes out right. Arguments: num_rays - number of rays to generate directions for. ang_range - in radians, the maximum deviation from +Z. Returns: A (3, num_rays) array whose each column is a unit direction vector for one ray, distributed to match a pillbox sunshape. """ # Diffuse divergence from +Z: # development based on eq. 2.12 from [1] xi1 = random.uniform(high=2.*N.pi, size=num_rays) # Phi xi2 = random.uniform(size=num_rays) # Rtheta #theta = N.arcsin(N.sin(ang_range)*N.sqrt(xi2)) #sin_th = N.sin(theta) #a = N.vstack((N.cos(xi1)*sin_th, N.sin(xi1)*sin_th , N.cos(theta))) sinsqrt = N.sin(ang_range)*N.sqrt(xi2) a = N.vstack((N.cos(xi1)*sinsqrt, N.sin(xi1)*sinsqrt , N.sqrt(1.-sinsqrt**2.))) return a
def SimpleMC(f, nIter, nVar, upper, lower): """ Description: Finds the Minimum of a function using the Simple Monte-Carlo method (all non-improving moves are discarded) Inputs : f : Function to be minimized nIter : Number of Global iterations to perform the search over nVar : Number of variables involved in function f upper : Upper search limit lower : Lower search limit Outputs: Returns a list with 2 elements i) fist element : The minimum value of f ii) second element : List of coordinates corresponding to the minimum Note: Values are obtained after max(100 x nIter) search attempts. """ bestVal = [] ; bestCoord = [] for n in range(nIter): for i in range(100): if i == 0 : step = [float(uniform(lower,upper,1)) for m in range(nVar)]; min_Coord = [m for m in step] minVal = E = f(min_Coord) else: if any(m > upper or m < lower for m in step ):break step = [m + float(uniform(-1,1,1)) for m in step] ; E_new = f(step) deltaE = E_new - E if deltaE < 0 : minVal = E = E_new ; min_Coord = [m for m in step] bestVal.append(minVal); bestCoord.append(min_Coord) index = min(enumerate(bestVal),key = itemgetter(1))[0] return [bestVal[index],bestCoord[index]]
def set_interest(self, household, firm): # Set nominal interest rate as 0,1,2,...,i,i+1,... where i stands for 0.1*i percent and announce to the public #current_coefs = [self.alp_i, self.alp_p] current_irate = self.irate Tremble = uniform() < self.TrblActn Inertia = uniform() < self.inertia satisficing_output = self.val_output >= self.sl_output satisficing_infltn = self.val_infltn >= self.sl_infltn if not(Inertia): if Tremble or not(satisficing_output) or not(satisficing_infltn): self.irate = uniform_range(max(self.min_irate, self.irate-(self.delta)), self.irate+(self.delta)) else: self.irate = uniform_range(max(self.min_irate, self.irate-(self.delta)), self.irate+(self.delta)) # self.irate = min(self.irate, int(self.max_irate/self.unit)) if current_irate == self.irate: self.ActionChanged = False # if not(Inertia): # if (Tremble or not(satisficing_output) or not(satisficing_infltn)): # self.alp_i = randint(max(-2, self.alp_i - self.delta), min(5, self.alp_i + self.delta)) # self.alp_p = randint(max(-2, self.alp_p - self.delta), min(5, self.alp_p + self.delta)) # # Rule for random choice randint(max(0, self.irate - self.delta), self.irate + self.delta) # self.irate = int(min(max(0, self.alp_i*household.c[irate_node] # + self.alp_p*self.inflation), self.max_irate/self.unit)) # if current_coefs == [self.alp_i, self.alp_p] : # self.ActionChanged = False Tremble = uniform() < self.TrbSatLv lamda = uniform()**self.gamma if not(Tremble): self.sl_output += lamda*self.LAMBDA*min(self.val_output - self.sl_output, 0) self.sl_infltn += lamda*self.LAMBDA*min(self.val_infltn - self.sl_infltn, 0) else: self.sl_output += lamda*(self.val_output-self.sl_output) self.sl_infltn += lamda*(self.val_infltn-self.sl_infltn) household.irate = firm.irate = self.irate
def edge_rays_bundle(num_rays, center, direction, radius, ang_range, flux=None, radius_in=0.): radius = float(radius) radius_in = float(radius_in) a = edge_rays_directions(num_rays, ang_range) # Rotate to a frame in which <direction> is Z: perp_rot = rotation_to_z(direction) directions = N.sum(perp_rot[...,None] * a[None,...], axis=1) # Locations: # See [1] xi1 = random.uniform(size=num_rays) thetas = random.uniform(high=2.*N.pi, size=num_rays) rs = N.sqrt(radius_in**2.+xi1*(radius**2.-radius_in**2.)) xs = rs * N.cos(thetas) ys = rs * N.sin(thetas) # Rotate locations to the plane defined by <direction>: vertices_local = N.vstack((xs, ys, N.zeros(num_rays))) vertices_global = N.dot(perp_rot, vertices_local) rayb = RayBundle(vertices=vertices_global + center, directions=directions) if flux != None: rayb.set_energy(N.pi*(radius**2.-radius_in**2.)/num_rays*flux*N.ones(num_rays)) return rayb
def __init__(self, **kwargs): """ Parameters: AdaggerA_X -- list of ne expressions for the coordinate dependent product of the dissipator apply_A -- list of the functions applying the operator A onto the wavefunction BdaggerB_P -- list of ne expressions for the coordinate dependent product of the dissipator apply_B -- list of the functions applying the operator B onto the wavefunction """ # Extract and save the list of dissipators AdaggerA_X = kwargs.pop("AdaggerA_X", []) self.apply_A = kwargs.pop("apply_A", []) assert len(self.apply_A) == len(AdaggerA_X), "Lengths of AdaggerA_X and apply_A must be equal" BdaggerB_P = kwargs.pop("BdaggerB_P", []) self.apply_B = kwargs.pop("apply_B", []) assert len(self.apply_B) == len(BdaggerB_P), "Lengths of BdaggerB_P and apply_B must be equal" # Put the operators in the brackets, just to be on safe side for compilation AdaggerA_X = ["({})".format(_) for _ in AdaggerA_X] BdaggerB_P = ["({})".format(_) for _ in BdaggerB_P] # ne codes for calculating lambda_{A_k}(t) = < A_k^\dagger A_k (x) > self.code_lambda_A = ["sum({} * abs(wavefunction) ** 2)".format(_) for _ in AdaggerA_X] # ne codes for calculating lambda_{B_k}(t) = < B_k^\dagger B_k (p) > self.code_lambda_B = ["sum({} * density)".format(_) for _ in BdaggerB_P] # Set the arrays for P_A and P_B self.P_A = np.ones(len(AdaggerA_X), dtype=np.float) self.r_A = uniform(0., 1., len(AdaggerA_X)) self.P_B = np.ones(len(BdaggerB_P), dtype=np.float) self.r_B = uniform(0., 1., len(BdaggerB_P)) # Modify the potential energy with the contribution \ # from the coordinate dependent dissipators kwargs["V"] = "{} -0.5j * ({})".format( kwargs.pop("V"), ("+ ".join(AdaggerA_X) if AdaggerA_X else "0.") ) # the save for the kinetic energy kwargs["K"] = "{} -0.5j * ({})".format( kwargs.pop("K"), ("+ ".join(BdaggerB_P) if BdaggerB_P else "0.") ) # Call the parent constructor super().__init__(**kwargs) if BdaggerB_P: # Allocate a copy of the wavefunction for storing the wavefunction in the momentum representation # if the momentum dependent dissipator is given self.wavefunction_p = np.zeros_like(self.wavefunction) # and also for density self.density = np.zeros(self.wavefunction_p.shape, dtype=np.float) self.AdaggerA_X = AdaggerA_X self.BdaggerB_P = BdaggerB_P
def set_price(self, bank, household): # set price and announce it to the public irate_node = self.irate_node(self.irate) self.current_price = self.price[irate_node] Tremble = uniform() < self.TrblActn Inertia = uniform() < self.inertia Satisficing = self.Val[irate_node] >= self.SatLv[irate_node] # print 'price range:', self.price[irate_node]*(1-self.delta), self.price[irate_node]*(1+self.delta) if not(Inertia): if Tremble or not(Satisficing): self.price[irate_node] = uniform_range(self.price[irate_node]*(1-self.delta), self.price[irate_node]*(1+self.delta)) if self.current_price != self.price[irate_node]: # if self.ActionChanged or self.current_price != self.price[irate_node]: self.ActionChanged = True # current_markup = self.markup[irate_node] # Tremble = uniform() < self.TrblActn # Inertia = uniform() < self.inertia # Satisficing = self.Val[irate_node] >= self.SatLv[irate_node] # if not(Inertia): # if Tremble or not(Satisficing): # self.markup[irate_node] = uniform_range(max(0, self.markup[irate_node]*(1-self.delta)), self.markup[irate_node]*(1+self.delta)) # if self.ActionChanged or current_markup != self.markup[irate_node]: # self.ActionChanged = True # price = (1+self.markup[irate_node])*(self.irate*self.unit)*(self.capital**(1-self.capital_power))/(self.capital_power*self.tech) household.price = bank.price = max(0.01, self.price[irate_node]) Tremble = uniform() < self.TrbSatLv lamda = uniform()**self.gamma if not(Tremble): self.SatLv[irate_node] += lamda*self.LAMBDA*min(self.Val[irate_node] - self.SatLv[irate_node], 0) else: self.SatLv[irate_node] += lamda*(self.Val[irate_node] - self.SatLv[irate_node])
def test_write_output(output_dir): """ render the basemap """ r = Renderer(bna_star, output_dir, image_size=(600, 600), draw_back_to_fore=True, formats=['png']) r.draw_background() BB = r.map_BB (min_lon, min_lat) = BB[0] (max_lon, max_lat) = BB[1] N = 100 # create some random particle positions: lon = random.uniform(min_lon, max_lon, (N, )) lat = random.uniform(min_lat, max_lat, (N, )) # create a sc sc = sample_sc_release(num_elements=N) sc['positions'][:, 0] = lon sc['positions'][:, 1] = lat r.cache = FakeCache(sc) r.write_output(0) r.save_foreground(os.path.join(output_dir, 'map_and_elements.png')) r.draw_back_to_fore = False r.clear_foreground() r.write_output(1) r.save_foreground(os.path.join(output_dir, 'just_elements.png'))
def __call__(self, image): if random.randint(2): delta = random.uniform(-self.delta, self.delta) image += delta return image
def __call__(self, image, boxes=None, labels=None): height, width, _ = image.shape while True: # randomly choose a mode mode = random.choice(self.sample_options) if mode is None: return image, boxes, labels min_iou, max_iou, min_scale = mode if min_iou is None: min_iou = float('-inf') if max_iou is None: max_iou = float('inf') # max trails (50) for _ in range(50): current_image = image w = random.uniform(min_scale * width, width) h = random.uniform(min_scale * height, height) # aspect ratio constraint b/t .5 & 2 if h / w < 0.5 or h / w > 1.3: continue left = random.uniform(width - w) top = random.uniform(height - h) # convert to integer rect x1,y1,x2,y2 rect = np.array( [int(left), int(top), int(left + w), int(top + h)]) # calculate IoU (jaccard overlap) b/t the cropped and gt boxes overlap = jaccard_numpy(boxes, rect) # is min and max overlap constraint satisfied? if not try again if overlap.min() < min_iou or max_iou < overlap.max(): #TODO continue # cut the crop from the image current_image = current_image[rect[1]:rect[3], rect[0]:rect[2], :] # keep overlap with gt box IF center in sampled patch centers = (boxes[:, :2] + boxes[:, 2:]) / 2.0 # mask in all gt boxes that above and to the left of centers m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1]) # mask in all gt boxes that under and to the right of centers m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1]) # mask in that both m1 and m2 are true mask = m1 * m2 # have any valid boxes? try again if not if not mask.any(): #TODO LP is not work continue # take only matching gt boxes current_boxes = boxes[mask, :].copy() # take only matching gt labels current_labels = labels[mask] # should we use the box left and top corner or the crop's current_boxes[:, :2] = np.maximum(current_boxes[:, :2], rect[:2]) # adjust to crop (by substracting crop's left,top) current_boxes[:, :2] -= rect[:2] current_boxes[:, 2:] = np.minimum(current_boxes[:, 2:], rect[2:]) # adjust to crop (by substracting crop's left,top) current_boxes[:, 2:] -= rect[:2] return current_image, current_boxes, current_labels
def __call__(self, image, boxes=None, labels=None): if random.randint(2): delta = random.uniform(-self.delta, self.delta) image += delta return image, boxes, labels
def __call__(self, image, boxes=None, labels=None): if random.randint(2): alpha = random.uniform(self.lower, self.upper) image *= alpha return image, boxes, labels
def __call__(self, image, boxes=None, labels=None): if random.randint(2): image[:, :, 0] += random.uniform(-self.delta, self.delta) image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0 image[:, :, 0][image[:, :, 0] < 0.0] += 360.0 return image, boxes, labels
def __call__(self, image, boxes=None, labels=None): if random.randint(2): image[:, :, 1] *= random.uniform(self.lower, self.upper) return image, boxes, labels
def __call__(self, image): if random.randint(2): image[:, :, 1] *= random.uniform(self.lower, self.upper) return image
def next(self): if self.num < self.num_samples: self.num = self.num + 1 return random.uniform(0, 1, self.num_parameters) else: raise StopIteration()
output.write(insert_template % ((current, ) + tuple(tuples[current][tix]))) tix += 1 #Caclulate progress along the line normalized to the interval [0...1] progress = (dir_vector[0] / (clusters - history) - (centers[current][0] - base[0])) / (dir_vector[0] / (clusters - history)) #Calculate probabilities for querying each cluster if (workloadtype == "insert_delete"): probs = mc.calc_prob(current, progress, max_prob, clusters, current - history) else: probs = mc.calc_prob(current, progress, max_prob, clusters, 0) #Pick target clusters for queries query_centers = random.choice(clusters, queries_per_step, p=probs) #Generate queries queries = [] for i in query_centers: c = centers[i] + random.normal(0, sigma, (dimension)) rng = random.uniform(0, 3 * sigma, dimension) low = c - rng high = c + rng output.write(query_template % tuple(mc.createBoundsList(low, high))) output.close()
def __call__(self, image): if random.randint(2): alpha = random.uniform(self.lower, self.upper) image *= alpha return image
def getSamples(): n = 2000 alpha = 0.1 global dim #x = [0.]*dim vec = [] resl = [] #vec.append(x) iter = 100 #for j in range(iter): x = np.concatenate( (nprand.uniform( 10.0, 30.0, [iter, 6]), nprand.uniform( -0.3, 0.3, [iter, 42]), nprand.uniform(0.0, 30.0, [iter, 1]), [[-3.5] * 4] * iter, nprand.uniform( -30.0, 30.0, [iter, 1]), nprand.uniform( 0.0, 0.3, [iter, 6]), nprand.uniform(5.0, 20.0, [iter, 6])), axis=1) #x = nprand.uniform(low=0.0, high=10.0, size=[iter,dim]) vec.extend(x) resl.extend(sdnorm(x)) noise = np.concatenate( (nprand.uniform( -0.2, 0.2, [n, 6]), nprand.uniform( -0.006, 0.006, [n, 42]), nprand.uniform(-0.3, 0.3, [n, 1]), [[0.0] * 4] * n, nprand.uniform( -0.6, 0.6, [n, 1]), nprand.uniform( -0.003, 0.003, [n, 6]), nprand.uniform(-0.15, 0.15, [n, 6])), axis=1) for i in xrange(1, n): can = x + noise[i] #candidate can[:, 0:6] = np.clip(can[:, 0:6], 10.0, 30.0) can[:, 6:48] = np.clip(can[:, 6:48], -0.3, 0.3) can[:, 48:49] = np.clip(can[:, 48:49], 0.0, 30.0) #can[49:53]=np.clip(can[49:53],-3.0,30.0) can[:, 53:54] = np.clip(can[:, 53:54], -30.0, -30.0) can[:, 54:60] = np.clip(can[:, 54:60], 0.0, 0.3) can[:, 60:66] = np.clip(can[:, 60:66], 5.0, 20.0) #can=[[max(min(u,10.0),0.0) for u in yy] for yy in can] #can=map(lambda x: map(lambda y: max(0.0,min(10.0,y)), x), can) #can=map(lambda x: map(lambda y: max(0.0,y), x), can) k = sdnorm(can) k2 = sdnorm(x) aprob = map(lambda a, b: min([1., a / b]), k2, k) #acceptance probability u = nprand.uniform(0, 1, iter) for j in range(iter): if u[j] < aprob[j]: x[j] = can[j] vec.append(x[j]) resl.append(k[j]) return [vec, resl]
from plot_clone_tool import PlotCloneTool, MPPlotCloneTool from data_source_button import ButtonController, DataSourceButton from mp_move_tool import MPMoveTool from mp_viewport_pan_tool import MPViewportPanTool #from canvas_grid import CanvasGrid # Multitouch imports if MULTITOUCH: from mptools import MPPanTool, MPDragZoom, MPLegendTool, \ MPPanZoom, MPRangeSelection #AxisTool = MPAxisTool PlotCloneTool = MPPlotCloneTool NUMPOINTS = 250 DATA = { "GOOG": random.uniform(-2.0, 10.0, NUMPOINTS), "MSFT": random.uniform(-2.0, 10.0, NUMPOINTS), "AAPL": random.uniform(-2.0, 10.0, NUMPOINTS), "YHOO": random.uniform(-2.0, 10.0, NUMPOINTS), "CSCO": random.uniform(-2.0, 10.0, NUMPOINTS), "INTC": random.uniform(-2.0, 10.0, NUMPOINTS), "ORCL": random.uniform(-2.0, 10.0, NUMPOINTS), "HPQ": random.uniform(-2.0, 10.0, NUMPOINTS), "DELL": random.uniform(-2.0, 10.0, NUMPOINTS), } def add_basic_tools(plot): plot.tools.append(PanTool(plot)) plot.tools.append(MoveTool(plot, drag_button="right")) zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
true_data = sample_Poisson(1, 100) # initialize algorithm theta1 = 6 theta2 = 0.5 current_model = 2 # MH algorithm parameters num_of_iter = 1 total_iter = 5000 # number of times visit M1 visits = 0 while num_of_iter <= total_iter: u = nprand.uniform(low=0, high=1) if current_model == 1: visits += 1 theta2 = nprand.uniform(low=0, high=1) numerator = like_M2(theta2, true_data) * eval_lognormal(theta1) denom = like_M1(theta1, true_data) * np.exp(-theta1) print('likeM1', like_M1(theta1, true_data)) print('theta1', theta1) print('theta2', theta2) print('likeM2', like_M2(theta2, true_data)) ratio = float(numerator / denom) alpha = min(1.0, ratio) if u < alpha: current_model = 2 elif current_model == 2:
def get_uniform_mab_env(bounds: Sequence[Tuple[float, float]]) -> 'MabEnv': return MabEnv( [lambda c=c, d=d: uniform(c, d, 1)[0] for c, d in bounds])
def uniform_distribution(x, dx): from numpy.random import uniform return uniform(x - dx, x + dx)
#def f(x): # global dim # a=[5.0]*dim # z=(x[:,0]-a[0])**2.0 # for i in range(1,len(x[0])): # z=z+(x[:,i]-a[i])**2.0 # return z #def f(x): #return (x[:,0]-5.0)**2.0+(x[:,1]-5.0)**2.0+(x[:,2]-5.0)**2.0+(x[:,3]-5.0)**2.0+(x[:,4]-5.0)**2.0 #X=np.atleast_2d([[8.0,0.0,4.0,6.0,9.0,1.0,7.5,9.8,4.2,0.8],[1.0, 9.0, 2.0,7.0,3.0,6.0,2.3,8.2,3.9,7.0]]) dim = 66 #X=nprand.uniform(low=0.0, high=10.0, size=[2,dim]) X = np.concatenate( (nprand.uniform(10.0, 30.0, [2, 6]), nprand.uniform(-0.3, 0.3, [2, 42]), nprand.uniform(0.0, 30.0, [2, 1]), [[-3.5] * 4] * 2, nprand.uniform(-30.0, 30.0, [2, 1]), nprand.uniform( 0.0, 0.3, [2, 6]), nprand.uniform(5.0, 20.0, [2, 6])), axis=1) maxprev = X[0] y = np.asarray([f1(X[0]), f1(X[1])]) #y=[odefunc(X[0]),odefunc(X[1])] prevmax1 = y[0] #prevmax1=f1(X[0]) #y = f(X).ravel() #x1=[1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0] x1 = np.linspace(0, 10, 100) #v1,v2,v3,v4,v5= np.meshgrid(x1,x1,x1,x1,x1[0]) gp = gaussian_process.GaussianProcess(theta0=1e-2, thetaL=1e-4, thetaU=1e-1) #n=10
def init_particle(x_range, y_range, N): particles = np.empty((N, 2)) particles[:, 0] = uniform(x_range[0], x_range[1], size=N) particles[:, 1] = uniform(y_range[0], y_range[1], size=N) return particles
for i in range(N): word_entropy[i]=H_dir[i] for k in sorted(word_entropy.keys(),key=lambda x:word_entropy[x]): print(ref_changes_pruned[k],changes_pruned[k][1],word_entropy[k]) """ M = [[sum(sound_ind[i, x[0]:x[1]]) for x in s_breaks] for i in range(len(sound_ind))] full_prior_ln = [] full_prior_ln_T = [] for t in range(T): alpha = uniform(0, 100) while alpha == 0: alpha = uniform(0, 100) theta_star = np.array( [np.random.dirichlet([alpha, alpha]) for l in range(L)]) psi_star = np.array( [np.random.multivariate_normal([0] * S, Sigma * 10) for k in range(K)]) phi_star = np.array([ np.concatenate([ (softmax([psi_star[k][s_breaks[x][0]:s_breaks[x][1]]])[0]**10) / np.sum( softmax([psi_star[k][s_breaks[x][0]:s_breaks[x][1]]])[0]**10) for x in range(X) ]) for k in range(K) ]) p_z = np.exp(np.dot(lang_ind, log(theta_star)))
#!/usr/bin/env python import SamplePdf import numpy as np import os from numpy.random import uniform import pickle as pkl num_samples = 5000 uni_samples = np.zeros(num_samples) # TODO: draw uniform samples uni_samples = uniform(size=num_samples) # create instance of our new PDF sampler my_pdf_sampler = SamplePdf.SamplePdf(num_samples) # feed the uniform samples and create our custom ones # TODO this function needs to be implemented in SamplePdf.py new_samples = my_pdf_sampler.sample_pdf(uni_samples) my_pdf_sampler.plot_result() # safe the result in a struct pkl.dump(new_samples, open("results.pkl", 'wb'))
# N vienmieriigi sadaliiti gadiijuma skaitlji # N uniformly distributed random numbers from numpy import random #print(random.__doc__) #print(random.uniform.__doc__) N = 10000 a = 0 b = 5 #pseido-gadiijuma skaitlju generatora grauds #random.seed(1) x = random.uniform(a, b, N) #x = random.normal(a,b,N) k = [0, 0, 0, 0, 0] for i in range(N): if x[i] < 1: k[0] = k[0] + 1 elif x[i] < 2: k[1] = k[1] + 1 elif x[i] < 3: k[2] = k[2] + 1 elif x[i] < 4: k[3] = k[3] + 1 else: k[4] = k[4] + 1 print(k)
def perturbParticle(params, priors, kernel, kernel_type, special_cases): np = len(priors) prior_prob = 1 if special_cases == 1: # this is the case where kernel is uniform and all priors are uniform ind = 0 for n in kernel[0]: lflag = (params[n] + kernel[2][ind][0]) < priors[n][1] uflag = (params[n] + kernel[2][ind][1]) > priors[n][2] lower = kernel[2][ind][0] upper = kernel[2][ind][1] if lflag == True: lower = -(params[n] - priors[n][1]) if uflag == True: upper = priors[n][2] - params[n] delta = 0 positive = False if lflag == False and uflag == False: # proceed as normal delta = rnd.uniform(low=kernel[2][ind][0], high=kernel[2][ind][1]) else: # decide if the particle is to be perturbed positively or negatively positive = rnd.uniform(0, 1) > abs(lower) / (abs(lower) + upper) if positive == True: # theta = theta + U(0, min(prior,kernel) ) delta = rnd.uniform(low=0, high=upper) else: # theta = theta + U( max(prior,kernel), 0 ) delta = rnd.uniform(low=lower, high=0) params[n] = params[n] + delta ind += 1 # this is not the actaul value of the pdf but we only require it to be non zero return 1.0 else: if kernel_type == 1: ind = 0 # n refers to the index of the parameter (integer between 0 and np-1) # ind is an integer between 0 and len(kernel[0])-1 which enables to determine the kernel to use for n in kernel[0]: params[n] = params[n] + rnd.uniform(low=kernel[2][ind][0], high=kernel[2][ind][1]) ind += 1 if kernel_type == 2: ind = 0 # n refers to the index of the parameter (integer between 0 and np-1) # ind is an integer between 0 and len(kernel[0])-1 which enables to determine the kernel to use for n in kernel[0]: params[n] = rnd.normal(params[n], numpy.sqrt(kernel[2][ind])) ind += 1 if kernel_type == 3: mean = list() for n in kernel[0]: mean.append(params[n]) tmp = statistics.mvnd_gen(mean, kernel[2]) ind = 0 for n in kernel[0]: params[n] = tmp[ind] ind = ind + 1 if (kernel_type == 4 or kernel_type == 5): mean = list() for n in kernel[0]: mean.append(params[n]) D = kernel[2] tmp = statistics.mvnd_gen(mean, D[str(params)]) ind = 0 for n in kernel[0]: params[n] = tmp[ind] ind = ind + 1 # compute the likelihood prior_prob = 1 for n in range(np): x = 1.0 #if priors[n][0]==1: # x=statistics.getPdfGauss(priors[n][1], numpy.sqrt(priors[n][2]), params[n]) # if we do not care about the value of prior_prob, then here: x=1.0 if priors[n][0] == 2: x = statistics.getPdfUniform(priors[n][1], priors[n][2], params[n]) #if priors[n][0]==3: # x=statistics.getPdfLognormal(priors[n][1],priors[n][2],params[n]) # if we do not care about the value of prior_prob, then here: x=1.0 if params[n]>=0 and 0 otherwise prior_prob = prior_prob * x return prior_prob
testSeq['ctpd'] = condsList[len(condsList)-i-1]['ctpd_prime'] testSeq['dtype'] = cond['dtype_test'] testSeq['dsd'] = cond['dsd_test'] testSeq['seq_type'] = 'probe' testSeq['set_size'] = cond['test_set_size'] totalTrials+=primeSeq['nextProbe']+testSeq['nextProbe'] conds.append(primeSeq) conds.append(testSeq) for blockN, block in enumerate(conds): block['blockN'] = blockN block['blockRepN'] = blockRepN block['totBlockN'] = totBlockN totBlockN+=1 if block['seq_type']=='prime': dmean = random.uniform(0, 360) else: # we use different bin sizes at different CTPDs so we have a bit more precision closer to the mean, where we actually expect to see differences if abs(prev_ctpd)==80: targetOri = prev_dmean+np.sign(prev_ctpd)*random.uniform(70,90) elif abs(prev_ctpd)==60: targetOri = prev_dmean+np.sign(prev_ctpd)*random.uniform(50,70) elif abs(prev_ctpd)==40: targetOri = prev_dmean+np.sign(prev_ctpd)*random.uniform(35,50) else: targetOri = prev_dmean+prev_ctpd+random.uniform(-5,5) if totBlockN>1: block['prevDistrMean'] = prev_dmean block['prevDistrCTPD'] = prev_ctpd block['prevDistrType'] = prev_dtype prev_dmean = dmean
def onemove(self, x, u, xp, up): """One move of the twalk. This is basically the raw twalk kernel. It is usefull if the twalk is needed inside a more complex MCMC. onemove(x, u, xp, up), x, xp, two points WITHIN the support ***each entry of x0 and xp0 must be different***. and the value of the objective at x, and xp u=U(x), up=U(xp). It returns: [y, yp, ke, A, u_prop, up_prop] y, yp: the proposed jump ke: The kernel used, 0=nothing, 1=Walk, 2=Traverse, 3=Blow, 4=Hop A: the M-H ratio u_prop, up_prop: The values for the objective func. at the proposed jumps """ #### Make local references for less writing n = self.n U = self.U Supp = self.Supp Fw = self.Fw ker = uniform() ### To choose the kernel to be used ke = 1 A = 0 ## Kernel nothing exchange x with xp, not used if ((0.0 <= ker) & (ker < Fw[0])): ke = 0 y = xp.copy() up_prop = u yp = x.copy() u_prop = up ### A is the MH acceptance ratio A = 1.0 #always accepted ## The Walk move if ((Fw[0] <= ker) & (ker < Fw[1])): ke = 1 dir = uniform() if ((0 <= dir) & (dir < 0.5)): ## x as pivot yp = self.SimWalk(xp, x) y = x.copy() u_prop = u if ((Supp(yp)) & (all(abs(yp - y) > 0))): up_prop = U(yp) A = exp(up - up_prop) else: up_prop = None A = 0 ##out of support, not accepted else: ## xp as pivot y = self.SimWalk(x, xp) yp = xp.copy() up_prop = up if ((Supp(y)) & (all(abs(yp - y) > 0))): u_prop = U(y) A = exp(u - u_prop) else: u_prop = None A = 0 ##out of support, not accepted #### The Traverse move if ((Fw[1] <= ker) & (ker < Fw[2])): ke = 2 dir = uniform() if ((0 <= dir) & (dir < 0.5)): ## x as pivot beta = self.Simbeta() yp = self.SimTraverse(xp, x, beta) y = x.copy() u_prop = u if Supp(yp): up_prop = U(yp) if (self.nphi == 0): A = 1 ###Nothing moved else: A = exp((up - up_prop) + (self.nphi - 2) * log(beta)) else: up_prop = None A = 0 ##out of support, not accepted else: ## xp as pivot beta = self.Simbeta() y = self.SimTraverse(x, xp, beta) yp = xp.copy() up_prop = up if Supp(y): u_prop = U(y) if (self.nphi == 0): A = 1 ###Nothing moved else: A = exp((u - u_prop) + (self.nphi - 2) * log(beta)) else: u_prop = None A = 0 ##out of support, not accepted ### The Blow move if ((Fw[2] <= ker) & (ker < Fw[3])): ke = 3 dir = uniform() if ((0 <= dir) & (dir < 0.5)): ## x as pivot yp = self.SimBlow(xp, x) y = x.copy() u_prop = u if ((Supp(yp)) & all(yp != x)): up_prop = U(yp) W1 = self.GBlowU(yp, xp, x) W2 = self.GBlowU(xp, yp, x) A = exp((up - up_prop) + (W1 - W2)) else: up_prop = None A = 0 ##out of support, not accepted else: ## xp as pivot y = self.SimBlow(x, xp) yp = xp.copy() up_prop = up if ((Supp(y)) & all(y != xp)): u_prop = U(y) W1 = self.GBlowU(y, x, xp) W2 = self.GBlowU(x, y, xp) A = exp((u - u_prop) + (W1 - W2)) else: u_prop = None A = 0 ##out of support, not accepted ### The Hop move if ((Fw[3] <= ker) & (ker < Fw[4])): ke = 4 dir = uniform() if ((0 <= dir) & (dir < 0.5)): ## x as pivot yp = self.SimHop(xp, x) y = x.copy() u_prop = u if ((Supp(yp)) & all(yp != x)): up_prop = U(yp) W1 = self.GHopU(yp, xp, x) W2 = self.GHopU(xp, yp, x) A = exp((up - up_prop) + (W1 - W2)) else: up_prop = None A = 0 ##out of support, not accepted else: ## xp as pivot y = self.SimHop(x, xp) yp = xp.copy() up_prop = up if ((Supp(y)) & all(y != xp)): u_prop = U(y) W1 = self.GHopU(y, x, xp) W2 = self.GHopU(x, y, xp) A = exp((u - u_prop) + (W1 - W2)) else: u_prop = None A = 0 ##out of support, not accepted return [y, yp, ke, A, u_prop, up_prop]
def stablernd(alpha, size=1): # cf Devroye, 2009, Equation (2) U = npr.uniform(low=0.0, high=np.pi, size=size) E = npr.exponential(size=size) samples = (zolotarev(U, alpha) / E)**((1 - alpha) / alpha) return samples
def generate_fake_fits_observation(event_list=None, filename=None, instr='FPMA', gti=None, tstart=None, tstop=None, mission='NUSTAR', mjdref=55197.00076601852, livetime=None, additional_columns={}): """Generate fake NuSTAR data. Takes an event list (as a list of floats) All inputs are None by default, and can be set during the call. Parameters ---------- event_list : list-like :class:`stingray.events.Eventlist` object. If left None, 1000 random events will be generated, for a total length of 1025 s or the difference between tstop and tstart. filename : str Output file name Returns ------- hdulist : FITS hdu list FITS hdu list of the output file Other Parameters ---------------- mjdref : float Reference MJD. Default is 55197.00076601852 (NuSTAR) pi : list-like The PI channel of each event tstart : float Start of the observation (s from mjdref) tstop : float End of the observation (s from mjdref) instr : str Name of the instrument. Default is 'FPMA' livetime : float Total livetime. Default is tstop - tstart """ from astropy.io import fits import numpy.random as ra if event_list is None: tstart = assign_value_if_none(tstart, 8e+7) tstop = assign_value_if_none(tstop, tstart + 1025) ev_list = sorted(ra.uniform(tstart, tstop, 1000)) else: ev_list = event_list.time if hasattr(event_list, 'pi'): pi = event_list.pi else: pi = ra.randint(0, 1024, len(ev_list)) tstart = assign_value_if_none(tstart, np.floor(ev_list[0])) tstop = assign_value_if_none(tstop, np.ceil(ev_list[-1])) gti = assign_value_if_none(gti, np.array([[tstart, tstop]])) filename = assign_value_if_none(filename, 'events.evt') livetime = assign_value_if_none(livetime, tstop - tstart) if livetime > tstop - tstart: raise ValueError('Livetime must be equal or smaller than ' 'tstop - tstart') # Create primary header prihdr = fits.Header() prihdr['OBSERVER'] = 'Edwige Bubble' prihdr['TELESCOP'] = (mission, 'Telescope (mission) name') prihdr['INSTRUME'] = (instr, 'Instrument name') prihdu = fits.PrimaryHDU(header=prihdr) # Write events to table col1 = fits.Column(name='TIME', format='1D', array=ev_list) col2 = fits.Column(name='PI', format='1J', array=pi) allcols = [col1, col2] if mission.lower().strip() == 'xmm': ccdnr = np.zeros(len(ev_list)) + 1 ccdnr[1] = 2 # Make it less trivial ccdnr[10] = 7 allcols.append(fits.Column(name='CCDNR', format='1J', array=ccdnr)) for c in additional_columns.keys(): col = fits.Column(name=c, array=additional_columns[c]["data"], format=additional_columns[c]["format"]) allcols.append(col) cols = fits.ColDefs(allcols) tbhdu = fits.BinTableHDU.from_columns(cols) tbhdu.name = 'EVENTS' # ---- Fake lots of information ---- tbheader = tbhdu.header tbheader['OBSERVER'] = 'Edwige Bubble' tbheader['COMMENT'] = ("FITS (Flexible Image Transport System) format is" " defined in 'Astronomy and Astrophysics', volume" " 376, page 359; bibcode: 2001A&A...376..359H") tbheader['TELESCOP'] = (mission, 'Telescope (mission) name') tbheader['INSTRUME'] = (instr, 'Instrument name') tbheader['OBS_ID'] = ('00000000001', 'Observation ID') tbheader['TARG_ID'] = (0, 'Target ID') tbheader['OBJECT'] = ('Fake X-1', 'Name of observed object') tbheader['RA_OBJ'] = (0.0, '[deg] R.A. Object') tbheader['DEC_OBJ'] = (0.0, '[deg] Dec Object') tbheader['RA_NOM'] = (0.0, 'Right Ascension used for barycenter corrections') tbheader['DEC_NOM'] = (0.0, 'Declination used for barycenter corrections') tbheader['RA_PNT'] = (0.0, '[deg] RA pointing') tbheader['DEC_PNT'] = (0.0, '[deg] Dec pointing') tbheader['PA_PNT'] = (0.0, '[deg] Position angle (roll)') tbheader['EQUINOX'] = (2.000E+03, 'Equinox of celestial coord system') tbheader['RADECSYS'] = ('FK5', 'Coordinate Reference System') tbheader['TASSIGN'] = ('SATELLITE', 'Time assigned by onboard clock') tbheader['TIMESYS'] = ('TDB', 'All times in this file are TDB') tbheader['MJDREFI'] = (int(mjdref), 'TDB time reference; Modified Julian Day (int)') tbheader['MJDREFF'] = (mjdref - int(mjdref), 'TDB time reference; Modified Julian Day (frac)') tbheader['TIMEREF'] = ('SOLARSYSTEM', 'Times are pathlength-corrected to barycenter') tbheader['CLOCKAPP'] = (False, 'TRUE if timestamps corrected by gnd sware') tbheader['COMMENT'] = ("MJDREFI+MJDREFF = epoch of Jan 1, 2010, in TT " "time system.") tbheader['TIMEUNIT'] = ('s', 'unit for time keywords') tbheader['TSTART'] = (tstart, 'Elapsed seconds since MJDREF at start of file') tbheader['TSTOP'] = (tstop, 'Elapsed seconds since MJDREF at end of file') tbheader['LIVETIME'] = (livetime, 'On-source time') tbheader['TIMEZERO'] = (0.000000E+00, 'Time Zero') tbheader['COMMENT'] = ( "Generated with HENDRICS by {0}".format(os.getenv('USER'))) # ---- END Fake lots of information ---- # Fake GTIs start = gti[:, 0] stop = gti[:, 1] col1 = fits.Column(name='START', format='1D', array=start) col2 = fits.Column(name='STOP', format='1D', array=stop) allcols = [col1, col2] cols = fits.ColDefs(allcols) gtinames = ['GTI'] if mission.lower().strip() == 'xmm': gtinames = ['STDGTI01', 'STDGTI02', 'STDGTI07'] all_new_hdus = [prihdu, tbhdu] for name in gtinames: gtihdu = fits.BinTableHDU.from_columns(cols) gtihdu.name = name all_new_hdus.append(gtihdu) thdulist = fits.HDUList(all_new_hdus) thdulist.writeto(filename, overwrite=True) return thdulist
def Simbeta(self): at = self.at if (uniform() < (at - 1.0) / (2.0 * at)): return exp(1.0 / (at + 1.0) * log(uniform())) else: return exp(1.0 / (1.0 - at) * log(uniform()))
def __call__(self, image, boxes=None, labels=None): height, width, _ = image.shape while True: # randomly choose a mode mode = random.choice(self.sample_options) boxes_rect = [] for i in range(len(boxes)): boxes_rect.append([ min(boxes[i, ::2]), min(boxes[i, 1::2]), max(boxes[i, ::2]), max(boxes[i, 1::2]) ]) boxes_rect = np.array(boxes_rect) if mode is None or len(boxes_rect) == 0: return image, boxes, labels min_boxes, max_boxes = mode if min_boxes is None: min_boxes = float('-inf') if max_boxes is None: max_boxes = float('inf') # max trails (50) for _ in range(50): current_image = image w = random.uniform(0.1 * width, width) h = random.uniform(0.1 * height, height) # aspect ratio constraint b/t .5 & 2 if h / w < 0.5 or h / w > 2: continue left = random.uniform(width - w) top = random.uniform(height - h) # convert to integer rect x1,y1,x2,y2 rect = np.array( [int(left), int(top), int(left + w), int(top + h)]) # calculate IoU (jaccard overlap) b/t the cropped and gt boxes overlap = modified_jaccard_numpy(boxes_rect, rect) if (overlap > 0.9).sum() <= min_boxes or ( overlap > 0.9).sum() >= max_boxes: continue # cut the crop from the image current_image = current_image[rect[1]:rect[3], rect[0]:rect[2], :] """ No Mask """ current_boxes = boxes.copy() num_pt = int(current_boxes.shape[1] / 2) current_boxes[:, :2 * num_pt] -= rect[:2].tolist() * num_pt current_labels = labels return current_image, current_boxes, current_labels
def Run(self, T, x0, xp0): """Run the twalk. Run( T, x0, xp0), T = Number of iterations. x0, xp0, two initial points within the support, ***each entry of x0 and xp0 most be different***. """ sec = time() print "pytwalk: Running the twalk with %d iterations." % ( T, ), strftime("%a, %d %b %Y, %H:%M.", localtime(sec)) ### Check x0 and xp0 are in the support [rt, u, up] = self._SetUpInitialValues(x0, xp0) if (not (rt)): return 0 ### send an estimation for the duration of the sampling if ### evaluating the ob. func. twice (in self._SetUpInitialValues) takes more than one second sec2 = time() # last time we sent a message print " " + Remain(T, 2, sec, sec2) x = x0 ### Use x and xp by reference, so we can retrive the last values used xp = xp0 ### Set the array to place the iterations and the U's ... we donot save up's self.Output = zeros((T + 1, self.n + 1)) self.T = T + 1 self.Acc = zeros(6) kercall = zeros(6) ## Times each kernel is called #### Make local references for less writing n = self.n Output = self.Output U = self.U Supp = self.Supp Acc = self.Acc Fw = self.Fw Output[0, 0:n] = x.copy() Output[0, n] = u j1 = 1 j = 0 ### Sampling for it in range(T): y, yp, ke, A, u_prop, up_prop = self.onemove(x, u, xp, up) kercall[ke] += 1 kercall[5] += 1 if (uniform() < A): x = y.copy() ### Accept the propolsal y u = u_prop xp = yp.copy() ### Accept the propolsal yp up = up_prop Acc[ke] += 1 Acc[5] += 1 ### To retrive the current values self.x = x self.xp = xp self.u = u self.up = up Output[it + 1, 0:n] = x.copy() Output[it + 1, n] = u ### Estimate the remaing time, every 2**j1 iterations if ((it % (1 << j1)) == 0): j1 += 1 j1 = min( j1, 10) # check the time at least every 2^10=1024 iterations ax = time() if ((ax - sec2) > (1 << j) * self.WAIT): # Print an estimation every WAIT*2**j print "pytwalk: %10d iterations so far. " % ( it, ) + Remain(T, it, sec, ax) sec2 = ax j += 1 j1 -= 1 # check the time as often if (Acc[5] == 0): print "pytwalk: WARNING, all propolsals were rejected!" print strftime("%a, %d %b %Y, %H:%M:%S.", localtime(time())) return 0 else: print "pytwalk: finished, " + strftime("%a, %d %b %Y, %H:%M:%S.", localtime(time())) for i in range(6): if kercall[i] != 0: Acc[i] /= kercall[i] return 1
def __call__(self, results): img, boxes, labels = [ results[k] for k in ('img', 'gt_bboxes', 'gt_labels') ] h, w, c = img.shape while True: mode = random.choice(self.sample_mode) if mode == 1: return results min_iou = mode for i in range(50): new_w = random.uniform(self.min_crop_size * w, w) new_h = random.uniform(self.min_crop_size * h, h) # h / w in [0.5, 2] if new_h / new_w < 0.5 or new_h / new_w > 2: continue left = random.uniform(w - new_w) top = random.uniform(h - new_h) patch = np.array( (int(left), int(top), int(left + new_w), int(top + new_h))) overlaps = bbox_overlaps( patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1) if overlaps.min() < min_iou: continue # center of boxes should inside the crop img center = (boxes[:, :2] + boxes[:, 2:]) / 2 mask = ((center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (center[:, 0] < patch[2]) * (center[:, 1] < patch[3])) if not mask.any(): continue boxes = boxes[mask] labels = labels[mask] # adjust boxes img = img[patch[1]:patch[3], patch[0]:patch[2]] boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:]) boxes[:, :2] = boxes[:, :2].clip(min=patch[:2]) boxes -= np.tile(patch[:2], 2) results['img'] = img results['gt_bboxes'] = boxes results['gt_labels'] = labels if 'gt_masks' in results: valid_masks = [ results['gt_masks'][i] for i in range(len(mask)) if mask[i] ] results['gt_masks'] = np.stack([ gt_mask[patch[1]:patch[3], patch[0]:patch[2]] for gt_mask in valid_masks ]) # not tested if 'gt_semantic_seg' in results: results['gt_semantic_seg'] = results['gt_semantic_seg'][ patch[1]:patch[3], patch[0]:patch[2]] return results
# First do it with no incentive expected_payoffs = [] for bid in bids: ep = (bid[0], utility(bid[1], activation_threshold, 0, 0)) expected_payoffs.append(ep) pos_expected_payoffs = [payoff for payoff in expected_payoffs if payoff[1] > 0] count_A = 0 count_B = 0 proportion_A = [0] count = 0 while len(pos_expected_payoffs) > 0: rand_pick = int(round(uniform(0, len(pos_expected_payoffs) - 1))) picked_tuple = pos_expected_payoffs[rand_pick] picked_agent = picked_tuple[0] picked_payoff = picked_tuple[1] picked_bids_list = [bid for bid in bids if bid[0] == picked_agent] picked_bid = picked_bids_list[0][1] if agents[picked_agent] == 'A': count_A += 1 else: count_B += 1 count = count + 1 proportion_A.append(float(count_A) / float(count_A + count_B))