def testDetrend(): import pylab #something random to test with data = [[ (sin(random.gauss(0,1)*.01+float(i)/(n/51.0)))*(cos(random.gauss(0,1)*.01+float(i)/(n/31.0))) for i in range(n)] ] plot(data,'--',label="Original Data") detrend(data,channels = 1) plot(data,label="Detrended Data")
def genDistribution(xMean, xSD, yMean, ySD, n, namePrefix): samples = [] for s in range(n): x = random.gauss(xMean, xSD) y = random.gauss(yMean, ySD) samples.append(Example(namePrefix+str(s), [x, y])) return samples
def levy_harmonic_path(k): x = [random.gauss(0.0, 1.0 / math.sqrt(2.0 * math.tanh(k * beta / 2.0)))] if k == 2: Ups1 = 2.0 / math.tanh(beta) Ups2 = 2.0 * x[0] / math.sinh(beta) x.append(random.gauss(Ups2 / Ups1, 1.0 / math.sqrt(Ups1))) return x[:]
def execute(self, userdata): self.world.inc_time() if self.world.time_step >= self.experiment.max_transitions: return "timeout" newx = random.gauss(self.mu_x, self.si_x) newy = random.gauss(self.mu_y, self.si_y) newth = random.gauss(self.mu_th, self.si_th) #denormalizing the network output # lx = self.world.min_x # rx = self.world.max_x # ly = self.world.min_y # ry = self.world.max_y lx = 0 rx = 5 ly = -2.5 ry = 2.5 lt = -math.pi rt = math.pi newpos = (lx + newx *(rx-lx), ly + newy *(ry-ly), lt + newth *(rt-lt)) if self.world.move_robot(newpos): return "success" else: return "failure"
def move(self, motion): # Do not change the name of this function (alpha, distance) = motion # alpha = lenkwinkel alpha += random.gauss(0., self.steering_noise) distance += random.gauss(0., self.distance_noise) # see https://www.udacity.com/course/viewer#!/c-cs373/l-48726342/m-48693619 b = distance/self.length * tan(alpha) # turning angle if b < 0.001: x = self.x + distance * cos(self.orientation) y = self.y + distance * sin(self.orientation) o = self.orientation else: radius = distance / b cx = self.x - sin(self.orientation) * radius cy = self.y + cos(self.orientation) * radius x = cx + sin(self.orientation + b) * radius y = cy - cos(self.orientation + b) * radius o = (self.orientation + b) % (2*pi) result = robot() result.set(x, y, o) result.set_noise(self.bearing_noise, self.steering_noise, self.distance_noise) return result # make sure your move function returns an instance
def localize(self, robot_pose, laser_scan): sigma_coord, sigma_theta = RobotParams.sigmaCoord, RobotParams.sigmaTheta laser_scan_obstacles = filter(lambda rec: rec[2] == Map.OBSTACLE, laser_scan.data) estimated_pose = robot_pose.copy().translate_by_dist(0) # laser offset estimated_dist = self.__scanDistance(estimated_pose, laser_scan_obstacles) init_dist = estimated_dist bad_samples_cnt = total_samples_cnt = 0 while bad_samples_cnt < RobotParams.badMCIterations and total_samples_cnt < RobotParams.maxMCIterations: total_samples_cnt += 1 sample = estimated_pose.copy() \ .translate(random.gauss(0, sigma_coord), random.gauss(0, sigma_coord)) \ .rotate(random.gauss(0, sigma_theta)) sample_dist = self.__scanDistance(sample, laser_scan_obstacles) if estimated_dist <= sample_dist: bad_samples_cnt += 1 continue estimated_dist, estimated_pose = sample_dist, sample if RobotParams.badMCIterations // 3 < bad_samples_cnt: bad_samples_cnt = 0 sigma_coord *= 0.5 sigma_theta *= 0.5 pose_diff = estimated_pose - robot_pose d_pose = max(abs(pose_diff[0]), abs(pose_diff[1]), abs(pose_diff[2])) if 0.0 < d_pose: rospy.loginfo("MC Correction -- dx: %.2f | dy: %.2f | dt: %.2f ", pose_diff[0], pose_diff[1], pose_diff[2]) return estimated_pose.translate_by_dist(-0) # laser offset
def move(self, motion): # Do not change the name of this function # ADD CODE HERE alpha = random.gauss(motion[0], self.steering_noise) #steering angle distance = motion[1] + random.gauss(0, self.distance_noise) #Distance moved theta = self.orientation #Orientation of the robot beta = distance / self.length * tan(alpha) result = robot(self.length) result.set(self.x, self.y, self.orientation) result.set_noise(bearing_noise, steering_noise, distance_noise) if beta > 0.001: radius = distance / beta cx = self.x - sin(theta) * radius cy = self.y + cos(theta) * radius result.x = cx + (sin(theta + beta) * radius) result.y = cy - (cos(theta + beta) * radius) theta = (theta + beta) % (2 * pi) result.orientation = theta else: result.x = self.x + distance * cos(theta) result.y = self.y + distance * sin(theta) result.orientation = (theta + beta) % 2 * pi return result
def writeTuple() : # create Tuple s=Proxy_Store("treeDemo.root","root",1) tuple = Tuple(s,"t","Example tuple with particles","TVector3 p; double mass") im = tuple.findColumn("mass") # create p vector v=TVector3() v_pointer = v._C_instance tuple.setAddress("p",v_pointer.address()) im=tuple.findColumn("mass") import random for i in range(0,1000) : v.SetXYZ( random.gauss(0,1), random.gauss(1,2), random.gauss(2,5) ) tuple.fill(im, random.gauss(10,1) ) tuple.addRow() print "Tuple filled with ",tuple.rows()," rows " s.close() return
def run(self): sim_bam = pysam.Samfile(self.out + ".bam", "wb", header=TEST_HEADER) # Randomly generate number from 2 to value normalized for genomic region for N value read_range = xrange(2, self.total_reads / 500) # pdb.set_trace() mid_position = self.initial_mid_position count = 0 total_reads = self.total_reads while total_reads > 0: N = random.sample(read_range, 1)[0] total_reads = total_reads - N while N > 0: count = count + 1 N = N - 1 # Introduce jitter tmp_mid_position = int(round(mid_position + random.uniform(-1, 1) * self.jitter)) isize = int(round(random.gauss(145, self.insert_sd))) positions = ((tmp_mid_position - (isize / 2), isize), (tmp_mid_position + (isize / 2), -isize)) # Construct/write paired AlignedReads for position in positions: read = construct_read(count, 0, position[0], position[1], position[1] > 0) sim_bam.write(read) # Advance position by 300 advance = self.advance if self.jitter_advance > 0: advance = int(round(random.gauss(advance, self.jitter_advance))) mid_position = mid_position + advance sim_bam.close()
def move(self, motion): # Do not change the name of this function theta = self.orientation alfa = float(motion[0]) + random.gauss(0.0, self.steering_noise) d = float(motion[1]) + random.gauss(0.0, self.distance_noise) beta = (d/self.length)*tan(alfa) newx = 0.0 newy = 0.0 newtheta = 0.0 if (abs(beta) < 0.001): newx = self.x + d * cos(theta) newy = self.y + d * sin(theta) newtheta = (theta + beta) % (2*pi) else: R = d/beta cx = self.x - R * sin(theta) cy = self.y + R * cos(theta) newx = cx + R * sin(beta + theta) newy = cy - R * cos(beta + theta) newtheta = (theta + beta) % (2*pi) result = robot(self.length) result.set(newx, newy, newtheta) return result # make sure your move function returns an instance
def __init__(self, world, space, pos, color, capsules, radius, mass=2, fixed=False, orientation=v(1, 0, 0, 0)): "capsules is a list of (start, end) points" self.capsules = capsules self.body = ode.Body(world) self.body.setPosition(pos) self.body.setQuaternion(orientation) m = ode.Mass() # computing MOI assuming sphere with .5 m radius m.setSphere(mass/(4/3*math.pi*.5**3), .5) # setSphereTotal is broken self.body.setMass(m) self.geoms = [] self.geoms2 = [] for start, end in capsules: self.geoms.append(ode.GeomTransform(space)) x = ode.GeomCapsule(None, radius, (end-start).mag()) self.geoms2.append(x) self.geoms[-1].setGeom(x) self.geoms[-1].setBody(self.body) x.setPosition((start+end)/2 + v(random.gauss(0, .01), random.gauss(0, .01), random.gauss(0, .01))) a = (end - start).unit() b = v(0, 0, 1) x.setQuaternion(sim_math_helpers.axisangle_to_quat((a%b).unit(), -math.acos(a*b))) self.color = color self.radius = radius if fixed: self.joint = ode.FixedJoint(world) self.joint.attach(self.body, None) self.joint.setFixed()
def move(self, motion): # obtain steering angle and distance forward steering = random.gauss(motion[0], self.steering_noise) distance = random.gauss(motion[1], self.distance_noise) # compute turning angle turning_angle = distance / self.length * tan(steering) if turning_angle < 0.001: # approximate by straight line motion new_x = self.x + (distance * cos(self.orientation)) new_y = self.y + (distance * sin(self.orientation)) new_orientation = (self.orientation + turning_angle) % (2*pi) else: # compute radio and center of the circular path R = distance / turning_angle cx = self.x - (R*sin(self.orientation)) cy = self.y + (R*cos(self.orientation)) new_x = cx + (R*sin(self.orientation+turning_angle)) new_y = cy - (R*cos(self.orientation+turning_angle)) new_orientation = (self.orientation + turning_angle) % (2*pi) # copy values to the new robot result = robot() result.length = self.length result.set(new_x, new_y, new_orientation) result.set_noise(self.bearing_noise, self.steering_noise, self.distance_noise) return result
def initial(average): a=[0 for i in range(2*NP+2)] for i in range(0,2*NP): a[i]=average[i]+random.gauss(0.0,0.1) for i in range(2): a[2*NP+i]=2.8+random.gauss(0.0,0.2) return a
def resample_particles(self): """ Resample the particles according to the new particle weights. The weights stored with each particle should define the probability that a particular particle is selected in the resampling step. You may want to make use of the given helper function draw_random_sample. """ #Primeiro de tudo, normalizar particulas self.normalize_particles() #Criar array do numpy vazia do tamanho do numero de particulas. values = np.empty(self.n_particles) #Preencher essa lista com os indices das particulas for i in range(self.n_particles): values[i] = i #Criar uma lista para novas particulas new_particles = [] #Criar lista com os indices das particulas com mais probabilidade random_particles = ParticleFilter.weighted_values(values,[p.w for p in self.particle_cloud],self.n_particles) for i in random_particles: #Transformar o I em inteiro para corrigir bug de float int_i = int(i) #Pegar particula na possicao I na nuvem de particulas. p = self.particle_cloud[int_i] #Adicionar particulas somando um valor aleatorio da distribuicao gauss com media = 0 e desvio padrao = 0.025 new_particles.append(Particle(x=p.x+gauss(0,.025),y=p.y+gauss(0,.025),theta=p.theta+gauss(0,.025))) #Igualar nuvem de particulas a novo sample criado self.particle_cloud = new_particles #Normalizar mais uma vez as particulas. self.normalize_particles()
def update(self, velX, velY): #Don't update after a crash #if self.crashed: # return velY -= gravity #Update true position #True velocity is intentional accel + random force accel self.posX += self.velX + random.gauss(0, self.randomForce) self.posY += self.velY + random.gauss(0, self.randomForce) self.velX = velX self.velY = velY sensorX = self.posX + random.gauss(0,self.sensorNoise) sensorY = self.posY + random.gauss(0,self.sensorNoise) #update naive estimated position self.posXEst = sensorX self.posYEst = sensorY self.kalman(timeUpdate, sensorX, sensorY) #Crash if we hit the ground if self.posY < 0: self.crashed = True self.logGT.log(self.posX, self.posY) self.logEst.log(self.posXEst, self.posYEst) self.logKal.log(self.x[0], self.x[1]) self.time += 1 self.printValues()
def move(self, motion): # Do not change the name of this function theta = self.orientation alpha = float(motion[0]) + random.gauss(0.0, self.steering_noise) alpha %= 2.0 * pi d = float(motion[1]) + random.gauss(0.0, self.distance_noise) beta = (d/self.length) * tan(alpha) if (beta < 0.001): xnew = self.x + d*cos(theta) ynew = self.y + d*sin(theta) else: R = d/beta cx = self.x - sin(theta)*R cy = self.y + cos(theta)*R xnew = cx + sin(theta+beta)*R ynew = cy - cos(theta+beta)*R theta_new = (theta+beta) % (2*pi) result = robot(self.length) result.set(xnew, ynew, theta_new) result.set_noise(self.bearing_noise, self.steering_noise, self.distance_noise ) return result # make sure your move function returns an instance
def __init__(self, name="anonymous goblin", **kwargs): """creates a new goblin instance every attribute can be overwritten with an argument like g = Goblin(attack = 33.2) this will overwrite the random self.attack attribute with 33.2 """ self.name = name self.attack = random.gauss(Config.attack, 2) # float values self.defense = random.gauss(Config.defense, 2) # always create an goblin with twice the "normal" hitpoints # to make him cost real money self.hitpoints = random.gauss(Config.hitpoints*2, 3) self.fullhealth = self.hitpoints self.defense_penalty = 0 # integer value self.sleep = False # boolean #statistics self.damage_dealt = 0 self.damage_received = 0 self.victory = 0 # over all rounds self.streak = 0 # victories in this combat self.lastround = 0 # number of combatround whre goblin lost self.lost = 0 self.fights = 0 # overwrite attributes if keywords were passed as arguments for key in kwargs: self.__setattr__(key, kwargs[key]) # but do not mess around with number self.number = Goblin.number # access class attribute Goblin.number += 1 # prepare class attribute for next goblin # calculate value based on averages described in class Config self.value = self.calculate_value()
def old_jpq_mutated(self, indiv, pop): """ mutate some genes of the given individual """ res = indiv.copy() #to avoid having a child identical to one of the currentpopulation''' in_pop = self.childexist(indiv,pop) for i in range(self.numParameters): if random() < self.mutationProb: res[i] = max(min(indiv[i] + gauss(0, self.mutationStdDev),self.maxs[i]), self.mins[i]) if random() < self.mutationProb or in_pop: if self.xBound is None: res[i] = indiv[i] + gauss(0, self.mutationStdDev) else: if in_pop: cmin = abs(indiv[i] - self.mins[i])/(self.maxs[i]-self.mins[i]) cmax = abs(indiv[i] - self.maxs[i])/(self.maxs[i]-self.mins[i]) if cmin < 1.e-7 or cmax < 1.e-7: res[i] = self.mins[i] + random()*random()*(self.maxs[i]-self.mins[i]) else: res[i] = max(min(indiv[i] + gauss(0, self.mutationStdDev),self.maxs[i]), self.mins[i]) else: res[i] = max(min(indiv[i] + gauss(0, self.mutationStdDev),self.maxs[i]), self.mins[i]) return res
def move(self, motion): # Do not change the name of this function steering = motion[0] dist = motion[1] result = self # add noise steering2 = random.gauss(steering, result.steering_noise) dist2 = random.gauss(dist, result.distance_noise) turn = tan(steering2) * dist2 / result.length tolerance = 0.001 if abs(turn) < tolerance: # approximate by straight line motion result.x+=(cos(result.orientation) * dist2) result.y+=(sin(result.orientation) * dist2) result.orientation = (result.orientation + turn) % (2.0 * pi) else: # bicycle motion radius = dist2 / turn cx = result.x - (sin(result.orientation) * radius) cy = result.y + (cos(result.orientation) * radius) result.orientation = (result.orientation + turn) % (2.0 * pi) result.x = cx + sin(result.orientation) * radius result.y = cy + cos(result.orientation) * radius return result # make sure your move function returns an instance
def move(self, motion): # Do not change the name of this function theta = self.orientation length = self.length x = self.x y = self.y alpha, d = motion; alphar = random.gauss(alpha, self.steering_noise) dr = random.gauss(d, self.distance_noise) alpha = alphar d = dr beta = (d/length)*tan(alpha) if (abs(beta) > 0.001): R = d/beta cx = x - sin(theta)*R cy = y + cos(theta)*R x = cx + sin(theta+beta)*R y = cy - cos(theta+beta)*R theta = (theta+beta)%(2*pi) else: x = x+d*cos(theta) y = y+d*sin(theta) theta = (theta+beta)%(2*pi) result = robot() result.set_noise(self.bearing_noise, self.steering_noise, self.distance_noise) result.set(x,y,theta) return result # make sure your move function returns an instance
def modify(self): data_vars = [] if not self._2D: data_vars.append('roll') data_vars.append('pitch') data_vars.append('yaw') # generate a gaussian noise rotation vector rot_vec = Vector((0.0, 0.0, 0.0)) for i in range(0, 3): if data_vars[i] in self._rot_std_dev: rot_vec[i] = random.gauss(rot_vec[i], self._rot_std_dev[data_vars[i]]) # convert rotation vector to a quaternion representing the random rotation angle = rot_vec.length if angle > 0: axis = rot_vec / angle noise_quat = Quaternion(axis, angle) else: noise_quat = Quaternion() noise_quat.identity() try: self.data['orientation'] = (noise_quat * self.data['orientation']).normalized() except KeyError: # for eulers this is a bit crude, maybe should use the noise_quat here as well... for var in data_vars: if var in self.data and var in self._rot_std_dev: self.data[var] = random.gauss(self.data[var], self._rot_std_dev[var])
def move(self, motion): # Do not change the name of this function # ADD CODE HERE if motion[1] < 0: raise ValueError, 'Robot cannot move backwards' # move, and add randomness to the motion command dist = float(motion[1]) + random.gauss(0.0, self.distance_noise) # turn, and add randomness to the turning command theta = self.orientation + random.gauss(0.0, self.bearing_noise) alpha = float(motion[0]) + random.gauss(0.0, self.steering_noise) alpha %= 2 * pi beta = tan(alpha) * dist / float(self.length) if beta < 0.001: x = self.x + dist * cos(theta) y = self.y + dist * sin(theta) theta = theta + beta else: r = dist / beta cx = self.x - r * sin(theta) cy = self.y + r * cos(theta) x = cx + r * sin(theta + beta) y = cy - r * cos(theta + beta) theta = theta + beta # set particle res = robot(self.length) res.set(x, y, theta) res.set_noise(self.bearing_noise, self.steering_noise, self.distance_noise) return res # make sure your move function returns an instance
def get_cuckoo(nests, best_nest, Lb, Ub, nest_number,nd,stepsize, percentage): import math import scipy.special import random #Mantegna's Algorithm alpha = 1.5 #flexible parameter but this works well. Also need to plug in decimal form sigma=(scipy.special.gamma(1+alpha)*math.sin(math.pi*alpha/2)/(scipy.special.gamma((1+alpha)/2)*alpha*2**((alpha-1)/2)))**(1/alpha) for i in range(int(round(nest_number*percentage))): temp = nests[i][:] step = [0]*len(temp) for j in range(len(temp)): sign = 1 a = random.gauss(0,1)*sigma b = random.gauss(0,1) if a < 0: sign = -1 step[j] = sign*stepsize[j]*((abs(a)/abs(b))**(1/alpha))*(temp[j]-best_nest[j]) temp[j] = round(temp[j]+step[j]*random.gauss(0,1),3) #check to see if new solution is within bounds if temp[j] <= Lb[j]: temp[j] = Lb[j] elif temp[j] >= Ub[j]: temp[j] = Ub[j] #!!!we need second parameter to be larger than first. If conditions like these are necessary, change this section to #!!!needed conditions. Otherwise remove or comment out. if j == 1 and temp[j] < temp[0]: coin = random.randint(0,1) if coin == 0: temp[0] = round(random.uniform(Lb[0],temp[j]),3) else: temp[j] = round(random.uniform(temp[0],Ub[j]),3) nests[i][:] = temp return nests
def logPokemonDb(p): pokemon_id = int(p['pokemon_data']['pokemon_id']) pokemon_name = get_pokemon_name(str(pokemon_id)).lower().encode('ascii','ignore') last_modified_time = int(p['last_modified_timestamp_ms']) time_until_hidden_ms = int(p['time_till_hidden_ms']) hidden_time_unix_s = int((p['last_modified_timestamp_ms'] + p['time_till_hidden_ms']) / 1000.0) hidden_time_utc = datetime.utcfromtimestamp(hidden_time_unix_s) encounter_id = str(p['encounter_id']) spawnpoint_id = str(p['spawn_point_id']) longitude = float(p['longitude']) latitude = float(p['latitude']) longitude_jittered = longitude + (random.gauss(0, 0.3) - 0.5) * 0.0005 latitude_jittered = latitude + (random.gauss(0, 0.3) - 0.5) * 0.0005 #query = "INSERT INTO spotted_pokemon (name, encounter_id, last_modified_time, time_until_hidden_ms, hidden_time_unix_s, hidden_time_utc, spawnpoint_id, longitude, latitude, pokemon_id, longitude_jittered, latitude_jittered) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);" query = "INSERT INTO spotted_pokemon (name, encounter_id, last_modified_time, time_until_hidden_ms, hidden_time_unix_s, hidden_time_utc, spawnpoint_id, longitude, latitude, pokemon_id, longitude_jittered, latitude_jittered) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (encounter_id) DO UPDATE SET last_modified_time = EXCLUDED.last_modified_time, time_until_hidden_ms = EXCLUDED.time_until_hidden_ms, hidden_time_unix_s = EXCLUDED.hidden_time_unix_s, hidden_time_utc = EXCLUDED.hidden_time_utc;" data = (pokemon_name, encounter_id, last_modified_time, time_until_hidden_ms, hidden_time_unix_s, hidden_time_utc, spawnpoint_id, longitude, latitude, pokemon_id, longitude_jittered, latitude_jittered) try: cursor.execute(query, data) except Exception,e: log.error('Postgresql error (%s)', str(e))
def move(self, motion): # Do not change the name of this function alpha = random.gauss(motion[0], sqrt(self.steering_noise)) d = random.gauss(motion[1], sqrt(self.distance_noise)) beta = (d / self.length) * tan(alpha) if(beta >= abs(0.001)): R = d / beta cx = self.x - sin(self.orientation) * R cy = self.y + cos(self.orientation) * R x = cx + sin(self.orientation + beta) * R y = cy - cos(self.orientation + beta) * R orientation = (self.orientation + beta) % (2*pi) else: x = self.x + d * cos(self.orientation) y = self.y + d * sin(self.orientation) orientation = (self.orientation + beta) % (2*pi) result = robot(self.length) result.set(x, y, orientation) result.set_noise(self.bearing_noise, self.steering_noise, self.distance_noise) return result
def move(self, robot, steering, distance, tolerance = 0.001, max_steering_angle = pi / 4.0): if steering > max_steering_angle: steering = max_steering_angle if steering < -max_steering_angle: steering = -max_steering_angle if distance < 0.0: distance = 0.0 # apply noise steering2 = random.gauss(steering, self.steering_noise) distance2 = random.gauss(distance, self.distance_noise) # Execute motion turn = tan(steering2) * distance2 / self.length if abs(turn) < tolerance: # approximate by straight line motion x = robot.pos.x + (distance2 * cos(robot.orientation)) y = robot.pos.y + (distance2 * sin(robot.orientation)) orientation = (robot.orientation + turn) % (2.0 * pi) else: # approximate bicycle model for motion radius = distance2 / turn cx = robot.pos.x - (sin(robot.orientation) * radius) cy = robot.pos.y + (cos(robot.orientation) * radius) orientation = (robot.orientation + turn) % (2.0 * pi) x = cx + (sin(orientation) * radius) y = cy - (cos(orientation) * radius) return (x,y,orientation)
def move(self, motion, tol = 0.001): # Do not change the name of this function result = robot() steerangle = motion[0] dist = motion[1] #if abs(steerangle) result.length = self.length result.bearing_noise = self.bearing_noise result.steering_noise = self.steering_noise steerangle +=random.gauss(0.0, self.steering_noise) steerangle %= (2*pi) result.distance_noise = self.distance_noise dist += random.gauss(0.0, self.distance_noise) beta = dist / self.length * tan(steerangle) if abs(beta)<= tol: result.x = self.x + dist * cos(self.orientation) result.y = self.y + dist * sin(self.orientation) result.orientation = (self.orientation + beta ) % (2.0 *pi) else: R = dist / beta cx = self.x - R * sin(self.orientation) cy = self.y + R * cos(self.orientation) result.orientation = (self.orientation + beta) % (2*pi) result.x = cx + sin(beta + self.orientation) * R result.y = cy - cos(beta + self.orientation) * R return result # make sure your move function returns an instance
def compute_random_cut_off(self): desired_v_removed = int(gauss(len(self.V) / 2, len(self.V)/6)) while desired_v_removed >= len(self.V) - self.threshold or desired_v_removed < self.threshold: desired_v_removed = int(gauss(len(self.V) / 2, len(self.V)/6)) ratio = 1 estimate_block_size = int(((len(self.L) - self.received_count) / (len(self.V) - self.threshold)) * ratio) return estimate_block_size * desired_v_removed + randint(0, estimate_block_size) - self.received_count
def GaussLk( self, newLkFile, sigma ) : fout = open( newLkFile, "w" ) fout.write("# input data : %s\n" % "+ Gaussian error" ) percentU = percentQ = 0. for lineStr,f1,f2 in zip (self.LeakList[0].lineStr, self.LeakList[0].f1, self.LeakList[0].f2) : print lineStr fout.write("#\n") for ant in range(1,16) : DRlist = [] DLlist = [] for Lk in self.LeakList : if Lk.ant == ant : for lineStr1,DR1,DL1 in zip( Lk.lineStr, Lk.DR, Lk.DL ) : if (lineStr1 == lineStr) and (abs(DR1) > 0.) and (abs(DL1) > 0.) : DRlist.append( DR1 ) DLlist.append( DL1 ) print "... ant %d - appending data from %s" % ( ant, Lk.legend ) if len(DRlist) > 0 : DRmean = numpy.mean(DRlist) DLmean = numpy.mean(DLlist) DRnew = random.gauss(numpy.real(DRmean), sigma) + random.gauss(numpy.imag(DRmean), sigma) * 1j DLnew = random.gauss(numpy.real(DLmean), sigma) + random.gauss(numpy.imag(DLmean), sigma) * 1j else : DRnew = 0. + 0j DLnew = 0. + 0j print ant, DRnew, DLnew fout.write("C%02d %8.3f %8.3f %8.3f %6.3f %8.3f %6.3f %8.3f %6.3f %s\n" % \ ( ant, f1, f2, DRnew.real, DRnew.imag, DLnew.real, \ DLnew.imag, percentQ, percentU, lineStr) ) fout.close()
def move(self, motion): a = random.gauss(motion[0], self.steering_noise) d = random.gauss(motion[1], self.distance_noise) b = (d * tan(a)) / self.length new_orientation = (b + self.orientation) % (2 * pi) if b > 0.001: r = d / b cx = self.x - r * sin(self.orientation) cy = self.y + r * cos(self.orientation) new_x = cx + r * sin(new_orientation) new_y = cy - r * cos(new_orientation) else: new_x = self.x + d * cos(new_orientation) new_y = self.y + d * sin(new_orientation) res = robot() res.x = new_x res.y = new_y res.orientation = new_orientation res.length = self.length res.bearing_noise = self.bearing_noise res.steering_noise = self.steering_noise res.distance_noise = self.distance_noise return res
import os from collections import defaultdict import sys import math import json import random import collections #random.seed(777) weight = {} bias = {} sOperation = ["shift", "reduce left", "reduce right"] for sOp in sOperation: weight[sOp] = [] bias[sOp] = random.gauss(0, 1) i_phi = {} pathInput = "../../data/mstparser-en-train.dep" pathModel = "model.json" dElements = {} tdict = {} tdict["a"] = 0 for i, j in tdict.items(): print(i, j) class cElement: def __init__(self, index, word, POS, head, label): self.index = index self.word = word self.POS = POS
def sense(self): return [ random.gauss(self.x, self.measurement_noise), random.gauss(self.y, self.measurement_noise) ]
def lombScargle(frequencyRange,objectmag=20,loopNo=looooops,df=0.001,fmin=0.001,numsteps=100000,modulationAmplitude=0.1,Nquist=200): # frequency range and object mag in list #global totperiod, totmperiod, totpower, date, amplitude, frequency, periods, LSperiod, power, mag, error, SigLevel results = {} totperiod = [] totmperiod = [] totpower = [] # reset SigLevel = [] filterletter = ['o','u','g','r','i','z','y'] period = 1/(frequencyRange) if period > 0.5: numsteps = 10000 elif period > 0.01: numsteps = 100000 else: numsteps = 200000 freqs = fmin + df * np.arange(numsteps) # for manuel allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy = [], [], [], [], [], [], [] #reset measuredpower = [] # reset y = [allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy] # for looping only for z in range(1, len(y)): #y[z] = averageFlux(obs[z], frequencyRange[frange], 30) # amplitde calculation for observations, anf frequency range y[z] = ellipsoidalFlux(obs[z], frequencyRange,30) y[z] = [modulationAmplitude * t for t in y[z]] # scaling for G in range(0, len(y[z])): flareMinute = int(round((obs[z][G]*24*60*2)%((dayinsec/(30*2))*flarecycles))) y[z][G] = y[z][G] + longflare[flareMinute] # add flares swapped to second but not changing the name intrtoduces fewer bugs date = [] amplitude = [] mag = [] error = [] filts = [] for z in range(1, len(y)): if objectmag[z] > sat[z] and objectmag[z] < lim[z]: #date.extend([x for x in obs[z]]) date.extend(obs[z]) amplitude = [t + random.gauss(0,magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])) for t in y[z]] # scale amplitude and add poisson noise mag.extend([objectmag[z] - t for t in amplitude]) # add actual mag error.extend([sigSys + magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])+0.2]*len(amplitude)) filts.extend([filterletter[z]]*len(amplitude)) phase = [(day % (period*2))/(period*2) for day in obs[z]] pmag = [objectmag[z] - t for t in amplitude] # plt.plot(phase, pmag, 'o', markersize=4) # plt.xlabel('Phase') # plt.ylabel('Magnitude') # plt.gca().invert_yaxis() # plt.title('filter'+str(z)+', Period = '+str(period))#+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20))) # plt.show() # plt.plot(date, mag, 'o') # plt.xlim(lower,higher) # plt.xlabel('time (days)') # plt.ylabel('mag') # plt.gca().invert_yaxis() # plt.show() model = periodic.LombScargleMultibandFast(fit_period=False) model.fit(date, mag, error, filts) power = model.score_frequency_grid(fmin, df, numsteps) if period > 10.: model.optimizer.period_range=(10, 110) elif period > 0.51: model.optimizer.period_range=(0.5, 10) elif period > 0.011: model.optimizer.period_range=(0.01, 0.52) else: model.optimizer.period_range=(0.0029, 0.012) LSperiod = model.best_period if period < 10: higher = 10 else: higher = 100 # fig, ax = plt.subplots() # ax.plot(1./freqs, power) # ax.set(xlim=(0, higher), ylim=(0, 1.2), # xlabel='period (days)', # ylabel='Lomb-Scargle Power', # title='Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20))); # plt.show() phase = [(day % (period*2))/(period*2) for day in date] #idealphase = [(day % (period*2))/(period*2) for day in dayZ] #print(len(phase),len(idealphase)) #plt.plot(idealphase,Zmag,'ko',) # plt.plot(phase, mag, 'o', markersize=4) # plt.xlabel('Phase') # plt.ylabel('Magnitude') # plt.gca().invert_yaxis() # plt.title('Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20))) # plt.show() #print(period, LSperiod, period*20) # print('actualperiod', period, 'measured period', np.mean(LSperiod),power.max())# 'power',np.mean(power[maxpos])) # print(frequencyRange[frange], 'z', z) # totperiod.append(period) # totmperiod.append(np.mean(LSperiod)) # totpower.append(power.max()) mpower = power.max() measuredpower.append(power.max()) # should this correspond to period power and not max power? maxpower = [] counter = 0. for loop in range(0,loopNo): random.shuffle(date) model = periodic.LombScargleMultibandFast(fit_period=False) model.fit(date, mag, error, filts) power = model.score_frequency_grid(fmin, df, numsteps) maxpower.append(power.max()) for X in range(0, len(maxpower)): if maxpower[X] > measuredpower[-1]: counter = counter + 1. Significance = (1.-(counter/len(maxpower))) #print('sig', Significance, 'counter', counter) SigLevel.append(Significance) #freqnumber = FrangeLoop.index(frequencyRange) #magnumber = MagRange.index(objectmag) #print(fullmaglist) #listnumber = (magnumber*maglength)+freqnumber # print(listnumber) # measuredperiodlist[listnumber] = LSperiod # periodlist[listnumber] = period # powerlist[listnumber] = mpower # siglist[listnumber] = Significance # fullmaglist[listnumber] = objectmag # results order, 0=mag,1=period,2=measuredperiod,3=siglevel,4=power,5=listnumber results[0] = objectmag[3] results[1] = period results[2] = LSperiod results[3] = Significance results[4] = mpower results[5] = 0#listnumber return results
def process ( self , item ) : i, n = item import ROOT, random h1 = ROOT.TH1F ( 'h%d' % i , '' , 100 , 0 , 10 ) for i in range ( n ) : h1.Fill ( random.gauss ( 5 , 1 ) ) return h1
def rand_vector(): vec = [gauss(0, 1) for i in range(3)] mag = sum(x**2 for x in vec) ** 0.5 return [x/mag for x in vec]
def Mutate(self): geneToMutate = random.randint(0, 3) self.genome[geneToMutate] = random.gauss(geneToMutate, math.fabs(geneToMutate))
#! /usr/bin/env python2 """ histogram.py: A program to print " """ __author__ = "Seshagiri Prabhu" __copyright__ = "MIT License" import random if __name__ == "__main__": """ Main function """ # Generates random numbers gen_numbers = [] for x in xrange(1001): gen_numbers.append(int(random.gauss(5, 2))) # Finds the frequency of numbers normalized = dict((i, gen_numbers.count(i)) for i in gen_numbers) # Prints histogram for key, value in normalized.iteritems(): if key > -1 and key < 11: print str(key) + ":\t" + (value / 10) * "*"
def data_augmentation(self): #数据集增强,主要通过计算正负样本的均值,方差,向正负样本加上高斯噪声 #高斯噪声 label="host" data=self.get_dataset(cond={'label':label}) avg=list() std=list() veclen=len(data[0]['vec']) sampleSize=len(data) for j in range(0,veclen): avg.append(0) for i in range(0,sampleSize): avg[j]+=data[i]['vec'][j] for j in range(0,veclen): avg[j]/=sampleSize for j in range(0,veclen): std.append(0) for i in range(0,sampleSize): std[j]+=(data[i]['vec'][j]-avg[j])**2 for j in range(0,veclen): std[j]/=(0.000001+sampleSize-1) std[j]=std[j]**0.5+0.000001 for each in data: for j in range(0,veclen): #each['vec'][j]=(each['vec'][j]-avg[j])/std[j] each['vec'][j]=0.95*each['vec'][j]+0.05*random.gauss(avg[j],std[j]) each.setdefault('tag','data_augmentation') each.pop('_id') self.insert(data) label="nat" data=self.get_dataset(cond={'label':label}) nat_data=data.copy() avg=[] std=[] veclen=len(data[0]['vec']) sampleSize=len(data) for j in range(0,veclen): avg.append(0) for i in range(0,sampleSize): avg[j]+=data[i]['vec'][j] for j in range(0,veclen): avg[j]/=sampleSize for j in range(0,veclen): std.append(0) for i in range(0,sampleSize): std[j]+=(data[i]['vec'][j]-avg[j])**2 for j in range(0,veclen): std[j]/=(0.000001+sampleSize-1) std[j]=std[j]**0.5+0.000001 for each in data: for j in range(0,veclen): #each['vec'][j]=(each['vec'][j]-avg[j])/std[j] each['vec'][j]=0.95*each['vec'][j]+0.05*random.gauss(avg[j],std[j]) each.setdefault('tag','data_augmentation_gauss') each.pop('_id') self.insert(data) #将部分Nat的TTL个数改为1,因为也的确会有同一个局域网中所有主机的OS会是一致的情况.将比例控制在10% for each in nat_data: p=random.uniform(0,1) if p<0.1 and each['vec'][-2]!=1: each['vec'][-2]=1 each.setdefault('tag','data_augmentation_nat_synthes') each.pop('_id') self.insert(each)
train_data = np.delete(train_data, -1, 1) train_label = np.array(uni[:,-1]).reshape((train_data.shape[0],1)) train_label[train_label==0] = -1 no_train_sample = train_data.shape[0] no_variable = train_data.shape[1] + 1 c = [0]*(no_variable*2) + [-1]*no_train_sample + [0]*no_train_sample b = [-1]*no_train_sample U = np.diag([-1]*no_train_sample) S = np.diag([1]*no_train_sample) A = np.zeros((no_train_sample,no_variable*2)) one = np.array([random.gauss(1, 0.1) for i in range(no_train_sample)]).reshape((no_train_sample,1)) Z = train_label*np.concatenate((train_data,one),axis = 1) for i in range(no_variable): A[:,2*i] = Z[:,i] A[:,2*i+1] = -1*Z[:,i] A = np.concatenate((A,U),axis = 1) A = np.concatenate((A,S),axis = 1) # In[24]: #============================================================================== # import numpy as np # # train_data = np.load("dataset\DB_Vecs.npy") # train_label = np.load("dataset\DB_Labels.npy")
def get_random_unit_vector(self): v = np.array([random.gauss(0, 1) for _ in range(self.n)]) return v / np.linalg.norm(v)
def gen(self): return random.gauss(0, 1)
def add_noise(data_point): return random.gauss(data_point, 1)
matrx_all_X.append([a11**2, 2*a11*a12,a12**2]) #========================================= # Only x-axis analysis #========================================= sigma_rel_err = 0.05 n_ws = len(matrx_all_X) mMatrX = Matrix(matrx_all_X,n_ws,3) sigma2Vector = Matrix(n_ws,1) weightM = Matrix.identity(n_ws,n_ws) for ind in range(n_ws): sigma = sizes_X_arr[ind]*(1.0 + random.gauss(0.,sigma_rel_err)) sigma2Vector.set(ind,0,sigma**2) err2 = (2*sigma*sigma*sigma_rel_err)**2 weightM.set(ind,ind,1.0/err2) #=== mwmMatr = (M^T*W*M) ======= mwmMatr = ((mMatrX.transpose()).times(weightM)).times(mMatrX) #=== corr2ValMatr = [(M^T*W*M)^-1] * M^T * W * Vector(sigma**2) corr2ValMatr = (((mwmMatr.inverse()).times(mMatrX.transpose())).times(weightM)).times(sigma2Vector) corr2ErrMatr = mwmMatr.inverse() print "========================================" print "<x^2> = ","%12.5e"%corr2ValMatr.get(0,0)," +- ","%12.5e"%math.sqrt(abs(corr2ErrMatr.get(0,0))) print "<x*x'> = ","%12.5e"%corr2ValMatr.get(1,0)," +- ","%12.5e"%math.sqrt(abs(corr2ErrMatr.get(1,1)))
def generate_turbulence(self, wind_level): self.tiltAngle += gauss(0.75, 1.0 * wind_level) logging.info('Tilt after turbulence {}'.format(self.tiltAngle))
def make_rand_vector(dims): vec = [gauss(0, 1) for i in range(dims)] mag = sum(x**2 for x in vec)**0.5 return [x / mag for x in vec]
def resfit(self): """ implements logic of resfit.pro for this resonator """ rfp = self.resFitPrep() x = rfp['functkw']['x'] y = rfp['functkw']['y'] err = rfp['functkw']['err'] p = rfp['p0'] parinfo = rfp['parinfo'] functkw = rfp['functkw'] m = mpfit(Resonator.resDiffLin, p, parinfo=parinfo, functkw=functkw, quiet=1) rdlFit = Resonator.resDiffLin(m.params, fjac=None, x=x, y=y, err=err) bestChi2 = np.power(rdlFit[1], 2).sum() bestIter = 0 parold = p.copy() random.seed(y.sum()) bestPar = p.copy() bestM = m for k in range(1, 12): parnew = parold.copy() parnew[0] = 20000 + 30000.0 * random.random() parnew[1] = parold[1] + 5000 * random.gauss(0.0, 1.0) parnew[2] = parold[2] + 0.2 * parold[2] * random.gauss(0.0, 1.0) parnew[3] = parold[3] + 0.2 * parold[3] * random.gauss(0.0, 1.0) parnew[4] = parold[4] + 5.0 * parold[4] * random.gauss(0.0, 1.0) parnew[5] = parold[5] + 0.2 * parold[5] * random.gauss(0.0, 1.0) parnew[6] = parold[6] + 0.5 * parold[6] * random.gauss(0.0, 1.0) parnew[7] = parold[7] + 0.5 * parold[7] * random.gauss(0.0, 1.0) parnew[8] = parold[8] + 0.5 * parold[8] * random.gauss(0.0, 1.0) parnew[9] = parold[9] + 0.5 * parold[9] * random.gauss(0.0, 1.0) m = mpfit(Resonator.resDiffLin, parnew, parinfo=parinfo, functkw=functkw, quiet=1) rdlFit = Resonator.resDiffLin(m.params, fjac=None, x=x, y=y, err=err) thisChi2 = np.power(rdlFit[1], 2).sum() if m.status > 0 and thisChi2 < bestChi2: bestIter = k bestChi2 = thisChi2 bestM = m p = bestM.params yFit = Resonator.resModel(x, bestM.params) ndf = len(x) - len(bestM.params) # size of loop from fit radius = (p[6] + p[7]) / 4.0 # normalized diamter of the loop (off resonance = 1) diam = (2.0 * radius) / (math.sqrt(p[8]**2 + p[9]**2) + radius) Qc = p[0] / diam Qi = p[0] / (1.0 - diam) dip = 1.0 - diam try: dipdb = 20.0 * math.log10(dip) except ValueError: dipdb = -99 chi2Mazin = math.sqrt(bestChi2 / ndf) return { "m": bestM, "x": x, "y": y, "yFit": yFit, "chi2": bestChi2, "ndf": ndf, "Q": p[0], "f0": p[1] / 1e9, "Qc": Qc, "Qi": Qi, "dipdb": dipdb, "chi2Mazin": chi2Mazin, "dip": dip }
# -*- coding: utf-8 -*- # 平均 μ=0,標準偏差 σ=0.7 のデータを200個生成して,ファイルに保存 import random filename = "random200.dat" T = 200; mu = 0.0 sigma = 0.7 # random.seed( 20131107 ) f = open(filename, 'w') for i in range(T): f.write("%f\n" % random.gauss(mu,sigma) ) f.close()
def calcScore(self, song): scores = [] scores += [song.rating * random.gauss(1, 0.5)] scores += [self.calcContextMatchScore(song) * random.gauss(1, 0.5)] return sum(scores) + random.gauss(1, 0.5)
def crear_hogares(self): """ Consideranco la probabilidad de que en una casa exista un matrimonio, y el promedio de hijos menores a 23 años en cada casa, se asignan individuos a cada nodo hogar. Se consideran personas solteras con y sin hijos. """ #Se generan las listas con cada tipo de individuos a repartir hombres = [ ind for ind in self.agentes_a_asignar if ind.sexo == 'h' and ind.edad >= 23 ] mujeres = [ ind for ind in self.agentes_a_asignar if ind.sexo == 'm' and ind.edad >= 23 ] hijos = [ind for ind in self.agentes_a_asignar if ind.edad < 23] print( f'Cantidad de hombres {len(hombres)}, mujeres {len(mujeres)}, hijos {len(hijos)}' ) contador_nodos = 0 while len(hombres) > 0 or len(mujeres) > 0: a_agregar = [] matrimonio = random() < self.p_matrimonio ##Se asignan primero los jefes de familia (hombre y/o mujer) if len(hombres) * len(mujeres) > 0 and matrimonio: a_agregar.append(hombres.pop()) a_agregar.append(mujeres.pop()) else: seleccion = sample([hombres, mujeres], 2) if len(seleccion[0]) > 0: a_agregar.append(seleccion[0].pop()) else: a_agregar.append(seleccion[1].pop()) ##Se asignan los hijos a cada casa n_hijos = abs(int(gauss(self.promedio_hijos, 0.5))) while len(hijos) > 0 and n_hijos > 0: a_agregar.append(hijos.pop()) n_hijos -= 1 print( f'En la casa {contador_nodos} hay {len(a_agregar)} personas. Matrimonio : {matrimonio}' ) for i in a_agregar: i.casa_id = contador_nodos i.nodo_actual = contador_nodos ##Se procede a crear el nodo correspondiente self.add_node(contador_nodos, tipo='casa', habitantes=[ind.unique_id for ind in a_agregar], ocupantes=a_agregar) self.casasids.append(contador_nodos) contador_nodos += 1 ##Si sobran hijos, entonces se agregan al azar a todos los nodos while len(hijos) > 0: hijo = hijos.pop() idcasa = choice(list(self.nodes)) print(f'Se agrega un hijo a la casa {idcasa}... ', end='') self.nodes[idcasa]['habitantes'].append(hijo.unique_id) self.nodes[idcasa]['ocupantes'].append(hijo) hijo.casa_id = idcasa hijo.nodo_actual = idcasa print('Agregado')
def visualizeSingleSystem(subplots, channel_file, power_system_object, pos, G, a): node_labels = {i: str(i) for i in range(1, 40)} time, frequencies, res = getFrequencyDeviation(channel_file) gens = [gen._bus for gen in power_system_object._generators] loads = [load._bus for load in power_system_object._loads] stubs = list( set(range(1, power_system_object._nbus + 1)) - (set(gens + loads))) nodelists = [gens, loads, stubs] bus_colors = {} node_colors = [] qss = nx.shortest_path_length(G, 10) qsna = {key: (12 - qss[key]) * 1.0 / 10 for key in qss} qsa = {key: (14 - qss[key]) * 1.0 / 9 for key in qss} qsna[10] = 2.4 qsna[32] = 1.9 qsna[13] = 1.3 qsna[12] = 1.1 qsna[11] = 1.15 #qs = {10:0.99,32:0.93,13:0.94,12:0.923,11:0.947,14:0.912,15:0.901,6:0.873,31:} for nodelist in nodelists: #node_colors.append([float(res[key]) for key in res if key in nodelist]) temp = [] for i in range(len(nodelist)): if a: if nodelist[i] in qsa: q = qsa[nodelist[i]] * random.gauss(.7, .08) else: q = 0 #random.gauss(.25,.25) else: if nodelist[i] in qsna: q = qsna[nodelist[i]] * random.gauss(.35, .08) else: q = 0 #random.gauss(.25,.25) if q < 0: q = 0.0 if q > 1: q = 1.0 bus_colors[nodelist[i]] = q temp.append(q) node_colors.append(temp) shapes = ['s', 'o', 'o'] node_sizes = [90, 90, 90] img = misc.imread('asd.PNG') img[:, :, 3] = 190 #set alpha plt.subplot(subplots[0]) plt.imshow(img, zorder=0, extent=[0.0, 1.0, 0.0, 1.0]) for i in range(3): im = nx.draw_networkx_nodes(G, pos, nodelist=nodelists[i], node_color=node_colors[i], node_shape=shapes[i], node_size=node_sizes[i], cmap=plt.cm.get_cmap('RdYlBu_r'), vmin=0.0, vmax=1.0) #nx.draw_networkx_labels(G,pos,labels=node_labels,font_size=15) plt.axis('off') nx.draw_networkx_edges(G, pos) cmap = plt.cm.get_cmap('RdYlBu_r') plt.subplot(subplots[1]) #bus_colors[bus] for bus in sorted(bus_colors): #colorVal = plt.cm.get_cmap('RdYlBu_r').to_rgba(res[bus]) plt.plot(time, frequencies[bus], color=cmap(bus_colors[bus]), alpha=0.7) plt.xlim([0.0, 30.0]) plt.ylim([59.62, 60.38]) plt.xlabel('Time [s]') if a: plt.ylabel('Frequency [Hz]') plt.xticks([0, 10, 20, 30]) plt.yticks([59.7, 60, 60.3]) return im
''' write a sequence of argv[1] normally distributed random numbers with mean argv[2] and std.dev argv[3] into argv[4] (ASCII text file) Example: python create_init_distr.py 20 -16.44 0.3 fens.txt ''' from math import * import random import sys #for getting command line args noe = int(sys.argv[1]) #number of ensemble members mu = float(sys.argv[2]) #mean of the normal distribution sig = float(sys.argv[3]) #standard deviation of the normal distribution f = open(sys.argv[4], 'w') #open the desired text file to write output in there for i in range(noe): #r = random.gauss(log10(4e-17), log10(8e-17/4e-17)) #-16.4+-0.3 seems reasonable, 4e-17 is mu, 8e-17 is mu+1sigma r = random.gauss(mu, sig) f.write(str(r) + '\n') f.close() # DART $Id: create_init_distr.py 11001 2017-02-03 23:08:55Z [email protected] $ # from Alexey Morozov # # <next few lines under version control, do not edit> # $URL: https://svn-dares-dart.cgd.ucar.edu/DART/branches/rma_trunk/models/gitm/python/create_init_distr.py $ # $Revision: 11001 $ # $Date: 2017-02-03 16:08:55 -0700 (Fri, 03 Feb 2017) $
eta = 5.0 * (10**(-2)) tau = 1.0 * (10**(2)) dt = 1.0 x_0 = 1.0 sigma = 10.0 constant = 1.0 / math.sqrt(2 * math.pi * (sigma**2)) for experiment in range( NUM_EXPTS ): #simulate the paradigm using many different initializations of w_0 mu = [(20.0 * j) + 10.0 for j in range(MU_RANGE)] y = [] y2 = [] # generate weights from a gaussian with mu = 3.0 and sigma = 1.0. Constrain w_i >= 0. w_0 = [random.gauss(3.0, 1.0) for i in range(NUM_NEURONS) ] # at each of the 20 simulations, this is drawn tmp_w0 = copy.deepcopy( w_0) # reassignment of list would create shallow copies!! for j in range(MU_RANGE): w_0 = copy.deepcopy(tmp_w0) # use the same weights for all inputs # print "init w_0: ",w_0 f = open("weights_for_mu%d_expt%d_t%d" % (j, experiment, TIME_LIMIT), 'wt') #f_theta = open("theta_for_mu%d_expt%d_t%d"%(j , experiment,TIME_LIMIT),'wt') #f_y = open("response_for_mu%d_expt%d_t%d"%(j , experiment,TIME_LIMIT),'wt') #f_F = open("objective_for_mu%d_expt%d_t%d"%(j , experiment,TIME_LIMIT),'wt') theta_0 = [2.5 for i in range(MU_RANGE)] # print "init theta_0: ",theta_0
while True: # Round the position to the nearest tenth of a meter. This keeps the sprites from jumping around while # drawing due to floating point round-off to the nearest pixel. site_pos = (round( random.uniform(*DELIVERY_SITE_X_BOUNDS) % WORLD_LENGTH, 1), round( random.uniform(*DELIVERY_SITE_Y_BOUNDS) % WORLD_WIDTH, 1)) if min((s.distance_to(site_pos) for s in delivery_sites), default=MIN_DELIVERY_DISTANCE) >= MIN_DELIVERY_DISTANCE: delivery_sites.append(DeliverySite(site_pos)) break # Randomly generate trees that aren't too close to delivery sites. trees = [] tree_density = random.gauss(TYPICAL_NUM_TREES, MAX_NUM_TREES / 3) ''' num_trees = round(min(MAX_NUM_TREES, tree_density) if tree_density >= TYPICAL_NUM_TREES else random.triangular(0, TYPICAL_NUM_TREES, TYPICAL_NUM_TREES)) ''' num_trees = 99 for _ in range(num_trees): while True: # Round the position to the nearest tenth of a meter. This keeps the sprites from jumping around while # drawing due to floating point round-off to the nearest pixel. tree_pos = (round(random.uniform(*TREE_X_BOUNDS), 1), round(random.uniform(0, WORLD_WIDTH), 1)) if min((s.distance_to(tree_pos) for s in delivery_sites), default=MIN_TREE_DISTANCE) >= MIN_TREE_DISTANCE: trees.append(Tree(tree_pos)) break
for t in range(0, total_steps): # Write positions to file with open('positions_hard_sphere.xyz', 'a') as f: f.write(str(len(x)) + '\n') f.write('\n') for i in range(len(x)): # xyz file is a file format to store 3D position # the general format is: # PARTICLE_TYPE X Y Z # here we just call our hard spheres H f.write('H' + '\t' + str(x[i]) + '\t' + str(y[i]) + '\t' + str(z[i]) + '\n') for i in range(0, particle_number): # Trial Move trial_x = x[i] + random.gauss(0, 1) trial_y = y[i] + random.gauss(0, 1) trial_z = z[i] + random.gauss(0, 1) # Check boundaries # We always move particles a small step, so don't worry if trial_x >> box_size if trial_x <= 0: trial_x += box_size elif trial_x >= box_size: trial_x -= box_size if trial_y <= 0: trial_y += box_size elif trial_y >= box_size: trial_y -= box_size
def raytrace_2d(nrays, sigma0, vel, nscr): # seed random number generator here (a second one below!!!@#@) np.random.seed(380340) rand.seed(32422005) # The screen strength weighting scheme is as follows (screen j): # sigma0 = passed in: sets the scale of all the deflections # Sr[j] = strength of the randdom component of the ray # Sd[j] = strength of the directed component of the ray # A[j] = axial ratio sigmax/sigmay ; y gets divided by this value # psi[j] = position angle (rel. to x-axis) of the directed component # creating variables. Path is x or y location at each screen. # thetax and thetay are the angular positions at each screen. # omega and tau are the delay and fringe frequency corroloaries nscreen = nscr pathx = np.zeros((nrays, nscreen+1)) # x position. nscreen indices. pathy = np.zeros((nrays, nscreen+1)) # y position with nscreen indices. thetax = np.zeros((nrays, nscreen+1)) # one deflection angle at each screen plus the initial one. thetay = np.zeros((nrays, nscreen+1)) tau = np.zeros(nrays) # array that holds delays omega = np.zeros(nrays) # array of omega values for each ray. Sd = np.zeros(nrays) # strength (in units of sigma0) of directed component psi = np.zeros(nrays) # position angle (reltative to x-axis) of directed component AR = np.ones(nrays) # axial ratio of ellipse sigmay = sigmax / AR Sr = np.ones(nrays) # amplitudes amp = np.zeros(nrays) dz = 3.1e19/nscreen # distance between screens (1kpc is 3.1e19 meters) # GENERATE EACH RAY HERE: for i in range(nrays): pathx[i,0] = 0 pathy[i,0] = 0 for j in range(nscreen+1): theta1 = 0 theta2 = 0 # this is the general ray creation mechanism. gx = rand.gauss(0,1) # x amplitude of directed component gy = rand.gauss(0,1) # y amplitude of directed component gr = rand.gauss(0,1) # amplitude of the random component psi_rand = 2.*np.pi*rand.random() # uniform [0, 2 pi) theta1 = sigma0*(Sd[j]*(gx*np.cos(psi[j]) - gy*np.sin(psi[j])/AR[j]) + Sr[j]*gr*np.cos(psi_rand)) # theta2 = sigma0*(Sd[j]*(gx*np.sin(psi[j]) + gy*np.cos(psi[j])/AR[j]) + Sr[j]*gr*np.sin(psi_rand)) # Calculate the amplitude of this ray # may need tweaking - Dan (3/23/18) amp[i] += (gx**2 + gy**2) # adjust theta based on what our deflection did to photons. thetax[i,j] = thetax[i,j-1] + theta1 thetay[i,j] = thetay[i,j-1] + theta2 # tracks the path the ray takes. pathx[i,j] = ((thetax[i,j])*dz) + pathx[i,j-1] pathy[i,j] = ((thetay[i,j])*dz) + pathy[i,j-1] # take the sigma values and subtract avg, find probability amp[i] = amp[i] - (nscreen-1) amp[i] = np.exp(-amp[i]) # converge on the observer more neatly by subtracting a small amount from # each step. for ray in range(nrays): dispx = (pathx[ray,nscreen])/(nscreen) dispy = (pathy[ray,nscreen])/(nscreen) for scr in range(1,nscreen+1): pathx[ray,scr] = pathx[ray,scr] - (dispx*(scr)) pathy[ray,scr] = pathy[ray,scr] - (dispy*(scr)) # calculate the omega values assuming the pulsar is moving in only x or # y direction and not a combination of the two. # the total weighting sum, divides at end. sum_weights = 0 sj = 0.0 wj = 0.0 for i in range(1,nscreen): # the screen fractional distance sj = float(i)/float(nscreen) # the screen weighting wj = sj/(1-sj) # adding each weighting to the total sum_weights += wj # getting omega values for ray in range(nrays): for i in range(nscreen): # the screen fractional distance sj = float(i)/float(nscreen) # the screen weighting wj = sj/(1-sj) # update theta values for the bent ray thetax[ray,i] = (pathx[ray,i]-pathx[ray,i-1])/dz thetay[ray,i] = (pathy[ray,i]-pathy[ray,i-1])/dz # add the screen plus the weighting omega[ray] += (thetax[ray,i]*wj) # gets the tau delays relative to to straight-line path. (small # angle approximation used here. xdelay = ((thetax[ray,i]**2)*dz)/(2*(3e8)) #seconds ydelay = ((thetay[ray,i]**2)*dz)/(2*(3e8)) #seconds tau[ray] += np.sqrt(xdelay**2+ydelay**2) # print thetax[ray,nscreen] # calculate the final omega by getting right units/undo weighting omega[ray] = 2*np.pi*omega[ray]*vel/(sum_weights*0.37) #divide by wavelength of .37m # random phase approximation for a given ray. # phi = 2.0 * np.pi * np.random.rand(nrays) # random phase # set the omega and tau values to zero and make ray 0 the source point. amp[0] = 1e-2 omega[0] = 0 tau[0] = 0 # FIND ALL INTERFERENCE TERMS HERE sec = [ (0,0) for i in range(nrays*nrays) ] sec_amp = np.zeros(nrays*nrays) idx = 0 for ray1 in range(nrays): for ray2 in range(nrays): if (ray1 != ray2): # difference term sec[idx] = ((omega[ray2]-omega[ray1]),(tau[ray2]-tau[ray1])) elif (ray1 == ray2): # self term sec[idx] = (0,0) #saving the amplitude of the combined rays for every interference. sec_amp[idx] = amp[ray1]*amp[ray2] idx += 1 # if you wanted to, this is where you would create a dynamic. ######### dyn = makeDyn(nx,ny,nscreen,phi,omega,tau, nrays) return pathx, pathy, thetax, thetay, sec, tau, omega, sec_amp
#!/usr/bin/python import random ######################################################################### # Generate data like [ x, y, z] [ val = (c1*x + c2*y + c3*z + noise) ] # # where x,y,z are random numbers and c1, c2, c3 are coefficients(known) # ######################################################################### coefficients = [2, 4, 7] minRandom = -50 maxRandom = 50 # random.gauss(mean, sigma) # first param is mean, # second param is standard deviation noise = random.gauss(0, 10) inputs = [] output = 0 for index in range(len(coefficients)): randFloat = random.uniform(minRandom, maxRandom) inputs.append(float(format(randFloat, '.3f'))) output += inputs[index] * coefficients[index] output += float(format(noise, '.3f')) print inputs, "[", output, "]"
def rand(x): return max(-2 * x, min(2 * x, gauss(0, 1) * x))
def noise(): '''a noise vector''' from random import gauss v = Vector3(gauss(0, 1), gauss(0, 1), gauss(0, 1)) v.normalize() return v * opts.noise
import torch from torchvision.transforms import ToTensor import numpy as np import random # Building random datasets of 100 elements in each class # Building 1st dataset n = 100 values1 = [] frequencies1 = {} while len(values1) < n: value = random.gauss(5, 4) if 0 < value < 10: frequencies1[int(value)] = frequencies1.get(int(value), 0) + 1 values1.append(value) values1 = np.array(values1) label1 = np.zeros(100).astype(int) class1 = np.array([values1, label1]).T # Building 2nd dataset values2 = [] frequencies2 = {} while len(values2) < n: value = random.gauss(15, 4) if 8 < value < 18: frequencies2[int(value)] = frequencies2.get(int(value), 0) + 1 values2.append(value) values2 = np.array(values2) label2 = np.ones(100).astype(int)
def random_point(self): c = self.center ll, ul = self.limits x, y, z = (gauss(0, 1), gauss(0, 1), gauss(0, 1)) r = (uniform(ll[0]**3, ul[0]**3)**(1 / 3) / sqrt(x**2 + y**2 + z**2)) return [r * x + c[0], r * y + c[1], r * z + c[2]]