def generateData3(size): classA = [(r.normalvariate(-1.0, .5), r.normalvariate(0.5, 1), 1.0) for i in range(int(size))] classB = [(r.normalvariate(0.0, 0.5), r.normalvariate(-0.5, 0.5), -1.0) for i in range(size)]; data = classA + classB; r.shuffle(data); return data
def generate_data(N): classA = [(random.normalvariate(-3, 0.5), random.normalvariate(-0.5, 0.5), 1.0) for i in range(N)] + [(random.normalvariate(3, 0.5), random.normalvariate(-0.5, 0.5), 1.0) for i in range(N)] # classA = [(random.normalvariate(-3, 0.5), random.normalvariate(0.5, 1), 1.0) for i in range(N)] + [(random.normalvariate(-3, 0.5), random.normalvariate(0.5, 1), 1.0) for i in range(N)] classB = [(random.normalvariate(0.0, 0.5), random.normalvariate(-0.5, 0.5), -1.0) for i in range(N)] + [(random.normalvariate(-6, 0.5), random.normalvariate(-0.5, 0.5), -1.0) for i in range(N)] data = classA + classB random.shuffle(data) return data
def random_series_generator(): l = [normalvariate(0, 1) for i in range(3)] while True: next_val = 0.7 * l[-1] - 0.5 * l[-2] - 0.2 * l[-3] + normalvariate(0, 1) l.append(next_val) # next_val = normalvariate(0,1) yield 1 if next_val > 0 else -1
def generate_messy_data(): data_1 = [(random.normalvariate(MESSY_MEAN_X_1, MESSY_STD_X_1), random.normalvariate(MESSY_MEAN_Y_1, MESSY_STD_Y_1)) for i in range(500)] data_2 = [(random.normalvariate(MESSY_MEAN_X_2, MESSY_STD_X_2), random.normalvariate(MESSY_MEAN_Y_2, MESSY_STD_Y_2)) for i in range(500)] return data_1, data_2, (data_1 + data_2)
def test_te_local_values(): """Test local TE estimation.""" n = 1000 cov = 0.4 source = [rn.normalvariate(0, 1) for r in range(n)] # correlated src target = [0] + [ sum(pair) for pair in zip( [cov * y for y in source[0 : n - 1]], [(1 - cov) * y for y in [rn.normalvariate(0, 1) for r in range(n - 1)]], ) ] analysis_opts = { "kraskov_k": 4, "normalise": "false", "theiler_t": 0, "noise_level": 1e-8, "local_values": True, "tau_target": 1, "tau_source": 1, "source_target_delay": 1, "history_target": 1, "history_source": 1, } te_est = Estimator_te("jidt_kraskov") te_res = te_est.estimate(np.array(source), np.array(target), analysis_opts) assert te_res.shape[0] == n, "Local TE estimator did not return an array."
def generateAtmosProfile(densityMeanList,uMeanList,vMeanList,wMeanList,densitySDList,uSDList,vSDList,wSDList,windMagMax=None,altitudeList=None): import random counter= 0 if (windMagMax==None): density = random.normalvariate(densityMeanList,densitySDList) u = random.gauss(uMeanList,uSDList) v = random.gauss(vMeanList,vSDList) w = random.gauss(wMeanList,wSDList) else: for index in range(1000): density = random.normalvariate(densityMeanList,densitySDList) u = random.gauss(uMeanList,uSDList) v = random.gauss(vMeanList,vSDList) w = random.gauss(wMeanList,wSDList) altitude = np.array(altitudeList) indexAlt = altitude<20000.# 20 km to be considered Vmag = np.sqrt(np.array(u[indexAlt])**2 + np.array(v[indexAlt])**2 + np.array(w[indexAlt])**2) counter = counter+1 if np.max(Vmag)<=30.0:#30 m/sec as the limit break if counter==999: print 'Error in AtmosProfile.py, Check the Maximum wind velocity allowed in atmo profile generation' exit() return(density,u,v,w)
def test_003_test_message_with_noise(self): NUM_GENERATED_PASSES = 5 N0 = 0.06 # get strider decoder dec = rf.codes.strider.StriderFactory.createDecoder(1530) # get turbo encoder enc = rf.codes.strider.StriderFactory.createEncoder(1530) for i in xrange(2): # get random message message = numpy.random.bytes(6179) message += (chr(numpy.random.randint(0,4) & 0x3)) # encode enc.setPacket(message) encoderOutput = rf.vector_csymbol() enc.encode(3840*NUM_GENERATED_PASSES, encoderOutput) # Add noise sigma = math.sqrt(N0 / 2.0) for i in xrange(encoderOutput.size()): encoderOutput[i] += random.normalvariate(0,sigma) encoderOutput[i] += 1j * random.normalvariate(0,sigma) # attempt to decode dec.reset() dec.add(encoderOutput,N0) res = dec.decode() self.assertEqual(len(res.packet), len(message)) self.assertEqual(res.packet, message)
def get_RM(N,t): """ Produces a random matrix of size N with Gaussian entries and returns the eigenvalues of the scaled matrix.""" dim = (N,N) var = 1.0/np.sqrt(2.0) H = np.zeros(dim, dtype = complex) # initialize the complex-type matrix for i in range (N): H[i,i] = random.normalvariate(0,1) # the diagonal is real, normal zero one for j in range(N): if abs(j-i) <= np.floor(t*N) and j > i: # within a distance of the diagonal R = random.normalvariate(0,var) I = random.normalvariate(0,var) H[i,j] = complex(R,I) H[j,i] = H[i,j].conjugate() elif abs(j-i) > np.floor(t*N): # the top right and bottom left H[i,j] = random.normalvariate(0,1) scale = np.sqrt(N ** -1) W = scale * H lam = np.linalg.eigvals(W) return lam
def generate_hypothesis(x_mean, y_mean, th_mean, x_sdev, y_sdev, th_sdev): """ Generate a gausian hypothesis """ return(random.normalvariate(x_mean, x_sdev), random.normalvariate(y_mean, y_sdev), random.normalvariate(th_mean, th_sdev))
def compare_poincare_baker(): """ bread measurements by poincare 365 fitted normal distribution with mean 950g and 50g standard deviation """ poincare_sample = [ random.normalvariate(950, 50) for i in xrange(366) ] poincare_sample_cdf = MakeCdfFromList(poincare_sample, 'poincare') baker_sample = [] for i in xrange(366): baker_sample.append(max(random.normalvariate(950, 50) for i in xrange(4))) baker_cdf = MakeCdfFromList(baker_sample) print poincare_sample_cdf.Mean(), print baker_cdf.Mean() myplot.Clf() myplot.Cdfs([poincare_sample_cdf, baker_cdf]) pyplot.xlim(600, 1120) pyplot.legend(loc=0) myplot.SaveFormat('../resources/plots/poincare_vs_baker', 'png') # t-test t_test = stats.ttest_rel(poincare_sample, baker_sample) print "t-test statistic is %s with a p-value %s" % t_test
def makeMeasAccToPlan_lognorm(func, expplan:list, b:list, c:dict, Ve=None, n=1, outfilename="", listOfOutvars=None): """ :param func: векторная функция :param expplan: план эксперимента (список значений вектора x) :param b: вектор b :param c: вектор c :param Ve: ковариационная матрица (np.array) :param n: объём выборки y :param outfilename: имя выходного файла, куда писать план :param listOfOutvars: список выносимых переменных :return: список экспериментальных данных в формате списка словарей 'x':..., 'y':... """ res = list() for i in range(len(expplan)): y=func(expplan[i],b,c) if y is None: #если функция вернула чушь, то в measdata её не записывать! continue #Внесём возмущения: if Ve is not None: if np.linalg.det(Ve)>10e-15: ydisps=np.diag(Ve) for k in range(len(y)): if (y[k]<0): y[k]=-1*math.exp(random.normalvariate(math.log(math.fabs(y[k])), math.sqrt(ydisps[k]))) else: y[k]=math.exp(random.normalvariate(math.log(y[k]), math.sqrt(ydisps[k]))) curdict = {'x':expplan[i], 'y':y} #res[i]["y"]=y res.append(curdict) return res
def test_compare_opencl_jidt_implementation(): """Compare results from OpenCl and JIDT implementation.""" n = 4000 cov = 0.4 source_1 = [rn.normalvariate(0, 1) for r in range(n)] # correlated src target = [sum(pair) for pair in zip( [cov * y for y in source_1], [(1 - cov) * y for y in [rn.normalvariate(0, 1) for r in range(n)]])] # Cast everything to numpy so the idtxl estimator understands it. source_1 = np.expand_dims(np.array(source_1), axis=1) target = np.expand_dims(np.array(target), axis=1) # Note that the calculation is a random variable (because the generated # data is a set of random variables) - the result will be of the order of # what we expect, but not exactly equal to it; in fact, there will be a # large variance around it. # opts = {'kraskov_k': 4, 'normalise': True, 'nchunkspergpu': 2} opts = {'kraskov_k': 4, 'normalise': True} calculator_name = 'jidt_kraskov' est = Estimator_cmi(calculator_name) res_jidt = est.estimate(var1=source_1, var2=target, conditional=None, opts=opts) calculator_name = 'opencl_kraskov' n_chunks = 1 est = Estimator_cmi(calculator_name) res_opencl = est.estimate(var1=source_1, var2=target, conditional=None, n_chunks=n_chunks, opts=opts) print('result jidt: {0}, result opencl: {1}'.format(res_jidt, res_opencl))
def _determine_personality_feature(self, feature_type): """Determine a value for a Big Five personality trait.""" config = self.person.game.config feature_will_get_inherited = ( self.person.biological_mother and random.random() < config.big_five_heritability_chance[feature_type] ) if feature_will_get_inherited: # Inherit this trait (with slight variance) takes_after = random.choice([self.person.biological_father, self.person.biological_mother]) feature_value = random.normalvariate( self._get_a_persons_feature_of_type(person=takes_after, feature_type=feature_type), config.big_five_inheritance_sd[feature_type] ) else: takes_after = None # Generate from the population mean feature_value = random.normalvariate( config.big_five_mean[feature_type], config.big_five_sd[feature_type] ) if feature_value < config.big_five_floor: feature_value = config.big_five_floor elif feature_value > config.big_five_cap: feature_value = config.big_five_cap feature_object = Feature(value=feature_value, inherited_from=takes_after) return feature_object
def poison(self, amount_of_poison): """ Applies poison to the population of cells in order to kill them. Cells which are resistant may not die. """ for cell in self.cell_collection: # Vary the strength of the poison base_damage = 100 * normalvariate(1, 0.12) poison_strength_after_resistance = ( amount_of_poison * 1.0) - (cell.drug_resistance * normalvariate(1, 0.12)) # Determine the effectiveness of the poison if(poison_strength_after_resistance > 0): effectiveness = max( (poison_strength_after_resistance / 2.0), 0.0) else: effectiveness = 0 # print 'poison: bd:', base_damage, ' str: ', # poison_strength_after_resistance, ' effect: ', effectiveness, ' = # ', base_damage * effectiveness # Apply the poison to the cell cell.life = cell.life - (base_damage * effectiveness)
def test_cmi_no_c_estimator_ocl(): """Tests CMI estimation without a condional variable The estimator should fall back to MI estiamtion and provide the correct result """ n = 4001 # This needs to be odd as we loose one sample when shifting signals cov = 0.4 source_1 = [rn.normalvariate(0, 1) for r in range(n)] # correlated src target = [sum(pair) for pair in zip( [cov * y for y in source_1], [(1 - cov) * y for y in [rn.normalvariate(0, 1) for r in range(n)]])] # Cast everything to numpy so the idtxl estimator understands it. source_1 = np.expand_dims(np.array(source_1), axis=1) target = np.expand_dims(np.array(target), axis=1) # Note that the calculation is a random variable (because the generated # data is a set of random variables) - the result will be of the order of # what we expect, but not exactly equal to it; in fact, there will be a # large variance around it. # opts = {'kraskov_k': 4, 'normalise': True, 'nchunkspergpu': 2} opts = {'kraskov_k': 4, 'normalise': True} n_chunks = 2 calculator_name = 'opencl_kraskov' est = Estimator_cmi(calculator_name) res_1 = est.estimate(var1=source_1[1:], var2=target[1:], conditional=None, n_chunks=n_chunks, opts=opts) expected_res = math.log(1 / (1 - math.pow(cov, 2))) print('Example 1: TE result for second chunk is {0:.4f} nats;' ' expected to be close to {1:.4f} nats for these correlated' ' Gaussians.'.format(res_1[0], expected_res)) assert(res_1[0] != res_1[1]), ('CMI results for chunk 1 and 2 are' 'identical, this is unlikely for random' 'data.')
def write_data(file_name, label_list, link_list, size_list): """ 把转发路径txt的数据写入xls文件,在create_xls中使用 :param file_name: xls文件名 :param label_list: 昵称列表 :param link_list: 链接列表 :param size_list: 规模列表 :return: """ file_rpt = xlwt.Workbook("utf-8") nodes = file_rpt.add_sheet("nodes") edges = file_rpt.add_sheet("edges") nodes.write(0, 0, "id") nodes.write(0, 1, "x") nodes.write(0, 2, "y") nodes.write(0, 3, "label") nodes.write(0, 4, "size") edges.write(0, 0, "sourceId") edges.write(0, 1, "targetId") print len(label_list) print len(link_list) print len(size_list) for i in range(len(label_list)): nodes.write(i + 1, 0, i) nodes.write(i + 1, 1, random.normalvariate(1, 5)) nodes.write(i + 1, 2, random.normalvariate(1, 5)) nodes.write(i + 1, 3, label_list[i]) nodes.write(i + 1, 4, size_list[i]) for i in range(len(link_list)): edges.write(i + 1, 0, link_list[i][0]) edges.write(i + 1, 1, link_list[i][1]) file_rpt.save(file_name) return file_name
def run_ens(xi,yi,zi,ens_num,stepCnt,dt): x = [] y = [] z = [] for t in range(ens_num): xs = np.empty((stepCnt + 1,)) ys = np.empty((stepCnt + 1,)) zs = np.empty((stepCnt + 1,)) # Setting initial values mean = 0 sdev = .5 xs[0], ys[0], zs[0] = (xi+random.normalvariate(mean, sdev), yi+random.normalvariate(mean, sdev), zi+random.normalvariate(mean, sdev)) for i in range(stepCnt) : x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i]) xs[i + 1] = xs[i] + (x_dot * dt) ys[i + 1] = ys[i] + (y_dot * dt) zs[i + 1] = zs[i] + (z_dot * dt) x.append(xs) y.append(ys) z.append(zs) return x,y,z
def generateNextTarget(self): tgtID=self.count tgtIntelValue=random.triangular(1,100,60) tgtIntelPriority=tgtIntelValue if(self.Tartype==0): #want all Pedestrians tgtType='Pedestrian' tgtStealth=random.triangular(0.1, 0.9, 0.5) tgtSpeed=random.normalvariate(1.44, 0.288)#based on project 1 data elif(self.Tartype==1): #want all Vehicles tgtType='Vehicle' tgtStealth=random.triangular(0.5, 0.95, 0.8) tgtSpeed=random.triangular(11.11, 19.44, 15.28)#based on average urban speed for several countries else: r=random.random() if r<0.5: tgtType='Vehicle' tgtStealth=random.triangular(0.5, 0.95, 0.8) tgtSpeed=random.triangular(11.11, 19.44, 15.28)#based on average urban speed for several countries else: tgtType='Pedestrian' tgtStealth=random.triangular(0.1, 0.9, 0.5) tgtSpeed=random.normalvariate(1.44, 0.288)#based on project 1 data tgtPredLoc=self.randNodes[self.count] tgtGoalTrackTime=random.triangular(60,600,300) tgtActualTrackTime=0 tgtTrackAttempts=0 self.currTargetTimestamp=self.currTargetTimestamp+random.triangular(1380,4200,2100) tgtData = [tgtID,tgtIntelValue,tgtIntelPriority,tgtType,tgtStealth,tgtSpeed,tgtPredLoc,tgtGoalTrackTime,tgtActualTrackTime,tgtTrackAttempts] self.targetTimestamps.append(self.currTargetTimestamp) self.targets[self.currTargetTimestamp] = tgtData self.count = self.count + 1
def illumina_errorprofile(read, platform): """Inputs are read, platform (This function is used for both SOLiD and Illumina profile) Function returns modified read, quality, pseudo_cigar_format. Quality of reads follow illumina error profile""" qual = "" # Quality sub = 0 match = 0 for j in range(len(read)): y1 = 35.0 / (1 + math.exp(0.6 * (j - (len(read))))) # A variant of Logistic function used stdev = 0.5 + (j / 10.0) # Error Bars for each position y2 = random.normalvariate(0, stdev) q = y1 + y2 while q < 0 and q > 40: y2 = random.normalvariate(0, stdev) q = y1 + y2 # Final Quality prob = pc(q) # Calling probability calculator r = random.random() # Generate a random number between 0 and 1 if r < prob: # Poor Quality! if platform == 1: # Illumina newbase = substitute(read[j], ["A", "G", "T", "C"]) else: # If Platform==SOLiD newbase = substitute(read[j], ["0", "1", "2", "3"]) read = read[0:j] + newbase + read[j + 1 :] # Read Modified sub = sub + 1 else: match = match + 1 qual = qual + chr(int(round(q)) + 33) # Offset of 33+ ascii pseudo_cigar_format = str(sub) + "S" + "0IOD" + str(match) + "M" return [read, qual, pseudo_cigar_format]
def test_lagged_mi(): """Test estimation of lagged MI.""" n = 10000 cov = 0.4 source = [rn.normalvariate(0, 1) for r in range(n)] target = [0] + [sum(pair) for pair in zip( [cov * y for y in source[0:n - 1]], [(1 - cov) * y for y in [rn.normalvariate(0, 1) for r in range(n - 1)]])] source = np.array(source) target = np.array(target) settings = { 'discretise_method': 'equal', 'n_discrete_bins': 4, 'history': 1, 'history_target': 1, 'lag_mi': 1, 'source_target_delay': 1} est_te_k = JidtKraskovTE(settings) te_k = est_te_k.estimate(source, target) est_te_d = JidtDiscreteTE(settings) te_d = est_te_d.estimate(source, target) est_d = JidtDiscreteMI(settings) mi_d = est_d.estimate(source, target) est_k = JidtKraskovMI(settings) mi_k = est_k.estimate(source, target) est_g = JidtGaussianMI(settings) mi_g = est_g.estimate(source, target) _compare_result(mi_d, te_d, 'JidtDiscreteMI', 'JidtDiscreteTE', 'lagged MI', tol=0.05) _compare_result(mi_k, te_k, 'JidtKraskovMI', 'JidtKraskovTE', 'lagged MI', tol=0.05) _compare_result(mi_g, te_k, 'JidtGaussianMI', 'JidtKraskovTE', 'lagged MI', tol=0.05)
def PairingDancers(): men_heights = [ random.normalvariate(178, math.sqrt(59.4)) for i in xrange(101) ] ladies_heights = [ random.normalvariate(163, math.sqrt(52.8)) for i in xrange(101) ] prob_of_taller_women = [] count = 0.0 for i in xrange(1001): number_of_taller_women = 0 for j in xrange(1001): count += 1.0 mens_picks = random.randint(0,100) ladies_picks = random.randint(0,100) man = men_heights[mens_picks] lady = ladies_heights[ladies_picks] if lady > man: number_of_taller_women += 1 prob_of_taller_women.append(float(number_of_taller_women)) total = 0.0 for prob in prob_of_taller_women: total += prob print total / count print scipy.stats.norm.sf(float(178 - 163) / math.sqrt(59.4 + 52.8))
def accelModel(self): """ Model of the accelerometer sensor on the MPU9150 IMU board Produces a realistic accelerometer output accounting for noise and quantization Accelerometer is assumed to be located on wheel axle. Hence no other forces, except gravitational acceleration, are measured when robot rotates. :Sensor Paramters: accelRMS Accelerometer RMS Noise (g - rms) accelSens Gyro sensitivity (LSB/g) accelRes Gyroscope resolution (g) """ accelRMS = 0.004 accelSens = 16384 accelRes = 1.0/accelSens #Add noise to accel reading according to datasheet specs phi = self.getPhi() accelGZ = math.cos(phi) #Acceleromter output in 'g' along Z (vertical) axis accelGY = -math.sin(phi) #Acceleromter output in 'g' along Y (horizontal) axis noisy_accelGZ = random.normalvariate(accelGZ, accelRMS) noisy_accelGY = random.normalvariate(accelGY, accelRMS) #Quantize accel reading according to datasheet specs noisy_accelGZ = int(noisy_accelGZ / accelRes) * accelRes noisy_accelGY = int(noisy_accelGY / accelRes) * accelRes return [noisy_accelGZ, noisy_accelGY]
def main(argv=None): n = 1000000 if argv is None: argv = sys.argv try: try: opts, args = getopt.getopt(argv[1:], "hn:", ["help","npts="]) except getopt.error, msg: raise Usage(msg) for o, a in opts: if o in ("-h","--help"): print "Usage: data.py --npts=Npts" return 2 if o in ("-n","--npts"): n = int(a) else: raise Usage() # y_A ~ 2*x + 0.1 + N(0,0.5) # y_B ~ 4*x - 0.1 + N(0,0.5) # x ~ (-1,1) labels = ['A','B'] bcoeffs = [ 2 , 4 ] acoeffs = [+.1,-.1] x = -1 + 1./n for i in range(n): which = random.randint(0,1) noise = random.randint(0,10000) if noise == 1: print labels[which],"\t",x,"\t",random.normalvariate(0,0.5)+ \ 1000.+bcoeffs[which]*x+acoeffs[which] else: print labels[which],"\t",x,"\t",random.normalvariate(0,0.5)+ \ bcoeffs[which]*x+acoeffs[which] x = x + 2./n
def main(): f = ROOT.TFile.Open('input.root','recreate') nom = ROOT.TH1F('nominal_histo','nominal_histo',2,0,2) up = ROOT.TH1F('syst_up','syst_up',2,0,2) dn = ROOT.TH1F('syst_down','syst_down',2,0,2) nom.SetBinContent(1,10) nom.SetBinContent(2,20) nom.Sumw2(0) nom.Write() up.SetBinContent(1,12) up.SetBinContent(2,24) up.Sumw2(0) up.Write() dn.SetBinContent(1,8) dn.SetBinContent(2,16) dn.Sumw2(0) dn.Write() data1 = ROOT.TH1F('data1','data1',2,0,2) data1.SetBinContent(1,10.2) data1.SetBinContent(2,19.7) data1.Sumw2(0) data1.Write() data2 = ROOT.TH2F('data2','data2',6,-3,3,6,-3,3) for i in xrange(10000): data2.Fill(random.normalvariate(0,1),random.normalvariate(0,1)) data2.Sumw2(0) data2.Write() f.Close()
def testFuc(): import random import time import pylab plot=True points=[] # create three random 2D gaussian clusters for i in range(8): x=random.random()*3 y=random.random()*3 c=[scipy.array((x+random.normalvariate(0,0.1), y+random.normalvariate(0,0.1))) for j in range(100)] points+=c if plot: pylab.scatter([x[0] for x in points], [x[1] for x in points]) random.shuffle(points) n=len(points) start=time.time() # the value of N is generally quite forgiving, i.e. # giving 6 will still only find the 3 clusters. # around 10 it will start finding more c=OnlineCluster(8) while len(points)>0: c.onlineCluster(points.pop()) clusters=c.clusters #print ("I clustered %d points in %.2f seconds and found %d clusters."%(n, time.time()-start, len(clusters))) if plot: cx=[x.center[0] for x in clusters] cy=[y.center[1] for y in clusters] pylab.plot(cx,cy,"ro") pylab.draw() pylab.show()
def generate_candidate(self,mu,sigma): # randomly generate a candidate value from the proposal distribution if self.method=="independent": candidate = random.normalvariate(mu,sigma) # proposed move elif self.method=="random_walk": candidate = self.chain[i] + random.normalvariate(mu,sigma) # proposed move return candidate
def placement(self, mode): if (mode == "manual"): # requires 2 values: x and y if (self.info == None or len(self.info) == 0): self.x = 0 self.y = 0 else: self.x = self.info[0] if (len(self.info) >= 1): self.y = self.info[1] else: self.y = self.x if (mode == "random"): # requires 4 values: size_x, size_y, offset_x, offset_y #if self.info == None or len(self.info) == 0: # size_x, size_y = 100, 100 # offset_x, offset_y = 0, 0 #else: size_x = self.info[0] size_y = self.info[1] offset_x = self.info[2] offset_y = self.info[3] self.x = random.random()*size_x + offset_x self.y = random.random()*size_y + offset_y if (mode == "cluster"): # requires 4 values: center_x, center_y, spread_x, spread_y center_x = self.info[0] center_y = self.info[1] spread_x = self.info[2] spread_y = self.info[3] self.x = random.normalvariate(center_x, spread_x) self.y = random.normalvariate(center_y, spread_y)
def animate(self, graph): # If we're new, start in the middle if self.position is None: self.position = Numeric.array(graph.viewport.size, Numeric.Float)/2 # Slowly fade in if self.opacity < 1: self.opacity += 0.01 if not self.isGrabbed: # Forces don't apply if we're being grabbed # Stay away from other nodes for other in graph.nodes: if other.position: repulsionForce(self, other, 80) # Stay away from the viewport edges edgeStrength = 200 for axis in (0,1): self.position[axis] += edgeStrength / max(self.position[axis], 1) self.position[axis] -= edgeStrength / max(graph.viewport.size[axis] - self.position[axis], 1) # Random wandering self.position += (random.normalvariate(0, graph.temperature), random.normalvariate(0, graph.temperature))
def random_clusters(imagedir, category, make_faces=False): """Creates a test mockup of random clusters from a folder of images Returns: clusters: a list of clusters that can be JSONified and passed to the html renderer """ image_extensions = set(['jpg', 'png', 'jpeg', 'gif', 'ico']) local_images = [os.path.splitext(x)[0] for x in sorted(os.listdir(imagedir)) if os.path.splitext(x)[1][1:] in image_extensions] local_images = [make_image(h, category, make_faces) for h in local_images] clusters = [] n_clusters = max(int(random.normalvariate(6,2)),2) # TODO add cluster children to simulate HAC for i in range(n_clusters): n_images = random.randrange(4,7) n_size = random.randrange(40,60) cluster = {'all_images': random.sample(local_images, n_size), 'sample_images': random.sample(local_images, n_images), 'std': random.normalvariate(10.0,2.0), 'position': (random.random(), random.random()), 'size': n_size, 'children': []} clusters.append(cluster) return clusters
def _get_gauss_data(n=10000, covariance=0.4, expand=True): """Generate correlated and uncorrelated Gaussian variables. Generate two sets of random normal data, where one set has a given covariance and the second is uncorrelated. """ corr_expected = covariance / (1 * np.sqrt(covariance**2 + (1-covariance)**2)) expected_mi = calculate_mi(corr_expected) src_corr = [rn.normalvariate(0, 1) for r in range(n)] # correlated src src_uncorr = [rn.normalvariate(0, 1) for r in range(n)] # uncorrelated src target = [sum(pair) for pair in zip( [covariance * y for y in src_corr[0:n]], [(1-covariance) * y for y in [ rn.normalvariate(0, 1) for r in range(n)]])] # Make everything numpy arrays so jpype understands it. Add an additional # axis if requested (MI/CMI estimators accept 2D arrays, TE/AIS only 1D). if expand: src_corr = np.expand_dims(np.array(src_corr), axis=1) src_uncorr = np.expand_dims(np.array(src_uncorr), axis=1) target = np.expand_dims(np.array(target), axis=1) else: src_corr = np.array(src_corr) src_uncorr = np.array(src_uncorr) target = np.array(target) return expected_mi, src_corr, src_uncorr, target
def compute(): for x in xrange(50): p = product() a = ['silver', 'gold', 'platinum'] bids = random.randrange(0, 20) c = customer(random.choice(a), bids) pmd = random.randrange(0, 25) sales = random.randrange(500, 2000) q = plot(sales, pmd) #random.normalvariate(random.randrange(500,1050,100),random.randrange(50,105,10)) p.inventory.cur = random.randrange( random.randrange(1 + (x % 20), 10 + (x % 20), 2), random.randrange(22 + (x % 20), 500 + (x % 20), 7), random.randrange(1, 3, 1)) p.inventory.maxi = random.randrange( random.randrange(1 + (x % 20), 10 + (x % 20), 2), random.randrange(22 + (x % 20), 500 + (x % 20), 7), random.randrange(1, 3, 1)) p.number_items_dp = abs(p.inventory.maxi - p.inventory.cur) p.revenue.dp = random.normalvariate( random.randrange(500 + (x % 20), 1550 + (x % 20), 30), random.randrange(50 + (x % 20), 105 + (x % 20), 10)) p.revenue.cp = random.normalvariate( random.randrange(500 + (x % 20), 1550 + (x % 20), 30), random.randrange(50 + (x % 20), 105 + (x % 20), 10)) p.price.cost = random.normalvariate( random.randrange(500 + (x % 20), 1550 + (x % 20), 30), random.randrange(50 + (x % 20), 105 + (x % 20), 10)) p.price.selling = random.normalvariate( random.randrange(600 + (x % 20), 1650 + (x % 20), 30), random.randrange(50 + (x % 20), 105 + (x % 20), 10)) #p.bids.cur=random.randrange(0,5) # call to database for total number of bids made #p.bids.tot=p.bids.cur+random.randrange(0,5) #p.inventory.t0=p.inventory.cur + random.randrange(random.randrange(1+(x%20),10+(x%20),2),random.randrange(22+(x%20),500+(x%20),7), random.randrange(1,3,1)) p.inventory.t0 = p.inventory.cur #Inserting into the db print d.insert(db_name, 'inventory', p.inventory.to_JSON()) print d.insert(db_name, 'revenue', p.revenue.to_JSON()) print d.insert(db_name, 'price', p.price.to_JSON()) print d.insert(db_name, 'bids', p.bids.to_JSON()) print d.insert(db_name, 'product', p.to_JSON()) print d.insert(db_name, 'customer', c.to_JSON()) print d.insert(db_name, 'plot', q.to_JSON()) print d.insert(db_name, 'customer', c.to_JSON()) # print d.update_with_date_random(db_name,'revenue',p.revenue.to_JSON(),p.id-1) # print d.update_with_date_random(db_name,'price',p.price.to_JSON(),p.id-1) # print d.update_with_date_random(db_name,'bids',p.bids.to_JSON(),p.id-1) # print d.update_with_date_random(db_name,'product',p.to_JSON(),p.id-1) return
def start_task(i: int): lifespan = random.normalvariate(ave_lifespan, 10) return pool.apply_async(run_client, ('Player-{}'.format(i), lifespan, acts))
def generate_reward(self): if self.hit(): return random.normalvariate(self.ave, self.std) else: return 0
def build_pitot_bootstrapper_condition_input_details_dictionary(cfg): """Function that builds a dictionary that tells the condition builder what tests to run. It will include a list called 'test_names' that will be cycled through by the main condition program. """ import random test_condition_input_details = {} test_condition_input_details['test_names'] = [] test_name = 0 # we will add to this as we go, first test_name will be 1 print '-'*60 print "Building the test condition details dictionary containing a dictionary for each simulation." # for this we just need to randomly mess around with all of the input variables until we have # the amount of tests as the 'number_of_test_runs' variable for i in range(0, cfg['number_of_test_runs']): # change the test name test_name += 1 # store that test name test_condition_input_details['test_names'].append(test_name) # now go through each variable in the variable list and assign values... # for now we just want to randomise nominal or above or below # I will use random.randint from -1 to 1 input_dictionary = {} for variable in cfg['variable_list']: if cfg[variable + '_distribution'] == 'uniform': # we will either have a random distribution between a range, or with deltas... if variable + '_delta' in cfg: range_lower = cfg[variable] - cfg[variable + '_delta'] range_upper = cfg[variable] + cfg[variable + '_delta'] elif variable + '_range' in cfg: range_lower = cfg[variable + '_range'][0] range_upper = cfg[variable + '_range'][1] # now our value will be a random number in between these bounds... input_dictionary[variable] = random.uniform(range_lower, range_upper) elif cfg[variable + '_distribution'] == 'normal': # our nominal value will be the mean and the input must include a standard deviation value mean_value = cfg[variable] std_dev_value = cfg[variable + '_std_dev'] # now our value will be a random normal variate number using these... input_dictionary[variable] = random.normalvariate(mean_value, std_dev_value) elif cfg[variable + '_distribution'] == 'lognormal': # our nominal value will be the mean and the input must include a standard deviation value mean_value = cfg[variable] std_dev_value = cfg[variable + '_std_dev'] # now our value will be a random lognormal variate number using these... input_dictionary[variable] = random.lognormvariate(mean_value, std_dev_value) elif cfg[variable + '_distribution'] == 'gaussian': # our nominal value will be the mean and the input must include a standard deviation value mean_value = cfg[variable] std_dev_value = cfg[variable + '_std_dev'] # now our value will be a random gaussian number using these... input_dictionary[variable] = random.gauss(mean_value, std_dev_value) test_condition_input_details[test_name] = input_dictionary print "The test_names list for this simulation is:" print test_condition_input_details['test_names'] return test_condition_input_details
for iteration in range(loop): print(str(traffic) + "-" + str(algo_type) + "-" + str(iteration)) max_servers_set = [] traffic_set = [] # Edge: traffic, ratio, max_latency, capacity, max_servers, cost edge_set = [] edge_filename = "testcase/" + args.edge_file edge_file = open(edge_filename, 'r') for i, line in enumerate(edge_file): if i % 6 == 0: # if algo_type == 0 and iteration == 0: for num in list(map(int, line.split())): traffic_set.append( random.normalvariate(traffic, traffic / 5)) # traffic_set = list( map( int, line.split())) elif i % 6 == 1: ratio_rate_set = list(map(float, line.split())) elif i % 6 == 2: max_latency_set = list(map(float, line.split())) elif i % 6 == 3: capacity_set = list(map(int, line.split())) elif i % 6 == 4: cost_set = list(map(int, line.split())) else: for num in list(map(int, line.split())): max_servers_set.append(server_num) # max_servers_set = list( map( int, line.split())) for i in range(len(traffic_set)):
for val in sigma_set: #print(val) # Use to track what the current sigma is # Initialize empty lists pairs_dict[val] = list() # List used for pair values pairs_to_bits[val] = list() # List used for bits # Pair creation main loop # While the number of pairs in the library is < m, keep creating new pairs while len(pairs_dict[val]) < m: # (i) Selecting b_i b = random.randint(0, n - 1) # (ii) Selecting B_i B = random.normalvariate(b, val) # (iii) Dropping B_i that lie outside the interval [1, n-2] if B_more_than <= B <= B_less_than and B != ((n - 1) / 2): pair = (b, B) pairs_dict[val].append(pair) # Create Bit String if pair[1] >= (n - 1) / 2: # If B is greater than the midpoint, pairs_to_bits[val].append(1) # Add a 1 to the string else: pairs_to_bits[val].append(0) # Else add a 0 # For the given sigma value, create a SatelliteString object, and supply its string a copy of the respective bit string string_class_dict[val] = SatelliteString(copy.copy(pairs_to_bits[val])) # For each SatelliteString, change random indices and store the changed locations
def val(self, f, s=0): return self.noise() + random.normalvariate(mu=self._mid, sigma=self._amp)
def main(): import numpy as np # ndarray attributes array1 = np.random.randn(2, 3) # normally distributed random number print(array1.shape) print(array1.dtype) # the data shape print(array1.ndim) # create ndarray data = [6, 7.5, 8, 1] array = np.array(data, dtype=np.float64) # transform a sequence into ndarray; syntax like set() array = np.zeros(4) array = np.zeros((4, 3)) # put a tuple for shape array = np.arange(10) # array version of built-in range() array = array.astype(np.string_) # change the dtype of array; create a new copy of data '''array = array.astype(array1)''' # change the dtype to another ndarray # array arithmetic array1 = np.array([[1, 2, 3], [4, 5, 6]]) array2 = np.array([[0, 4, 1], [7, 2, 12]]) print(array2 > array1) # computation is carried in-placely # index and slice name = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe']) data = np.random.randn(7, 4) data[name == 'Bob', 2:] # it obtain index/slices from the 'name' array; use it ot obtain object from 'data' array data[~(name == 'Bob')] # the '~' symbol negate the argument mask = (name == 'Bob') | (name == 'Will') # use '|' and '&', instead of 'and', 'or' data[mask] # multiple boolean indexing always create new copy of data; unlike what numpy normally do. # fancy indexing; index using integer arrays. Copy the arrays arr = np.empty((8, 4)) for i in range(8): arr[i] = i arr[[4, 3, 0, 6]] # select particular row with determined order arr[[1, 5, 7, 2,], [0, 3, 1, 2]] # this select only four element at (1, 0), (5, 3), (7, 1), (2, 2) arr[[1, 5, 7, 2]][:, [0, 3, 1, 2]] # this select a rectangular area of elements # tarnspose and swap arr = np.arange(15).reshape((3, 5)) arr.T # a view for traditional transform np.dot(arr.T, arr) # compute the inner matrix product arr = np.arange(16).reshape((2, 2, 4)) arr.transpose((1, 0, 2)) # this will permute the axes. arr.swapaxes(1, 2) # this will swap the axes. Change the order of items! # unary, binary functions arr = np.arange(10) np.sqrt(arr) np.exp(arr) x = np.random.randn(8) y = np.random.randn(8) np.maximum(x, y) # output element-wise maximum x.argmax() # fully scan and return the first maximum # function on grid import matplotlib.pyplot as plt points = np.arange(-5, 5, 0.01) x, y = np.meshgrid(points, points) # cartesian output z = np.sqrt(x ** 2, y ** 2) # element-wise calculation plt.imshow(z, cmap=plt.cm.gray) plt.colorbar() plt.title('Image plot of $\sqrt{x^2 + y^2}$ for a grid of values') plt.show() # array methods arr = np.random.randn(4, 4) np.where(arr > 0, 2, -2) # a conditional logic to create a new array arr = np.random.randn(5, 4) arr.mean() np.mean(arr) # the same as the above one arr.mean(axis=1) # compute against particular axis arr.sum(axis=0) arr = np.arange(0, 9).reshape(3, 3) arr.cumsum(axis=0) # cumulative sum arr.cumprod(axis=1) # cumulartive product bools = np.array([False, False, True, False]) bools.any() bools.all() # also take non-zero value as True arr = np.random.randn(6).reshape(3, 2) arr.sort(1) # particular axis sort. It created a sorted copy names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe']) np.unique(names) # the unique value in array. sorted(set(names) values = np.array([6, 0, 0, 3, 2, 5 ,6]) np.in1d(values, [2, 3, 6]) # return boolean of whether values in the latter list '''np.save('some_array', arr) np.savez('some_array', a=array1, b=array2) # store multiple arrays in one file np.savez_compressed('some_array', a=array1, b=array2) arch = np.load('some_array') arch['a'] # index the arrays as in a dict.''' # Linalg x = np.array([[1, 2, 3], [4, 5, 6]]) y = np.array([[6, 23], [-1, 7], [8, 9]]) x.dot(y) np.dot(x, y) x @ y # these there are the same # random from random import normalvariate samples = [normalvariate(0, 1) for _ in range(16)] # built-in random samples = np.random.normal(loc=0, scale=0.25, size=(4, 4)) # numpy random np.random.seed(42) # a global seed rng = np.random.RandomState(42) rng.randn(10) # a private seed with operation under the reference
__author__ = 'student' import random import matplotlib.pyplot as plt random.seed(0) plt.subplot(221) n = 100 values = [random.normalvariate(0, 1) for i in range(n)] plt.hist(values, bins=100) random.seed(0) plt.subplot(222) n = 1000 values = [random.normalvariate(0, 1) for i in range(n)] plt.hist(values, bins=100) random.seed(0) plt.subplot(223) n = 10000 values = [random.normalvariate(0, 1) for i in range(n)] plt.hist(values, bins=100) random.seed(0) plt.subplot(224) n = 100000 values = [random.normalvariate(0, 1) for i in range(n)] plt.hist(values, bins=100) plt.show()
from selenium import webdriver from selenium.webdriver.support.select import Select import time import random import pandas as pd import json import csv import glob normal_delay = random.normalvariate(3, 0.5) normal_delay_2 = random.normalvariate(5, 0.5) driver = webdriver.Chrome(executable_path='chromedriver.exe') driver.get( 'https://www.amazon.com/RockBirds-Flashlights-Bright-Aluminum-Flashlight/product-reviews/B00X61AJYM' ) time.sleep(normal_delay_2) sort = driver.find_element_by_css_selector('#sort-order-dropdown') most_recent = Select(sort) most_recent.select_by_visible_text('Most recent') time.sleep(normal_delay) filter = driver.find_element_by_css_selector('#reviewer-type-dropdown') verified_purchase = Select(filter) verified_purchase.select_by_visible_text('Verified purchase only') time.sleep(normal_delay) titles = [] dates = []
''' # days to generate data for duration = 100 # mean value mean_inc = 0.2 # standard deviation std_dev_inc = 1.2 # time series x = range(duration) y = [] price_today = 0 for i in x: next_delta = random.normalvariate(mean_inc, std_dev_inc) price_today += next_delta y.append(price_today) pylab.plot(x, y) pylab.xlabel('Time') pylab.ylabel('Value') pylab.show() ''' 为了更多控制 添加各种不同的分布 ''' import random import matplotlib import matplotlib.pyplot as plt
def generate_groundings(): global global_count, var_mappings, dict_domain_vars, predicates, formulas, var_mappings, det_clauses, global_count_formulas, formula_vars, rev_var_mappings, dict_vars_no_evid infile = open(sys.argv[1], 'r') lines = infile.readlines() #print "hello" if lines[0] != "// domain declarations\n": print "Error in format" else: i = 1 ############################ Parsing Input File ####################################################################### while (1): if lines[i] == "// predicate declarations\n": break if lines[i] == "\n": i = i + 1 continue temp_str = re.split('=', lines[i]) ##print temp_str[1] list1 = temp_str[1].translate(None, '{}\n') list2 = list1.split(',') temp_str[0] = temp_str[0].translate(None, ' ') ##print list2 i = i + 1 dict_domain_vars[temp_str[0]] = list2 #print list2 ##print dict_domain_vars i = i + 1 while (1): if lines[i] == "// formulas\n": break if lines[i] == "\n": i = i + 1 continue temp_str = lines[i].split('(') if len(temp_str) == 1: temp_str[0] = temp_str[0].translate(None, '\n') SingletonPredicates[temp_str[0]] = 1 i = i + 1 continue list1 = temp_str[1].translate(None, ')\n ') list2 = list1.split(',') list predicates[temp_str[0]] = list2 i = i + 1 ##print predicates i = i + 1 while (1): if i >= len(lines): break if lines[i] == "\n": i = i + 1 continue temp_str = lines[i] temp_str = temp_str.strip('\n') if temp_str[len(temp_str) - 1] == '.': temp_str = temp_str.strip('.') temp_str = temp_str.replace(" ", "") temp_str = temp_str.strip('\n') det_clauses.append(temp_str) ##print "groundings", temp_str else: weighted_clause = temp_str.split() weight = weighted_clause[0] rule = ' '.join(weighted_clause[1:len(weighted_clause)]) formulas[rule] = weight #print rule i = i + 1 num_vars = 0 i = 0 ################################### Generate Variable Mappings ########################################################################## var_mappings = {} for key in predicates: var_types = predicates[key] temp_count = 1 base_var = key base_var = base_var + "(" temp_var = base_var ground_vars = [] ##print base_var for j in range(len(var_types)): temp_count = temp_count * len(dict_domain_vars[var_types[j]]) num_vars = num_vars + temp_count generate_ground_vars_rec(base_var, var_types) #for key in var_mappings: # print(key, "invars", var_mappings[key],"\n") ##print num_vars for key in SingletonPredicates: var_mappings[key] = global_count rev_var_mappings[global_count] = key num_vars = num_vars + 1 global_count = global_count + 1 ################################## Generate Ground Clauses ############################################################################# # for key in var_mappings: # print key,'in', var_mappings[key] for key in formulas: curr_formula = key #preds_curr_formula=re.split("\(|\)",curr_formula) preds_curr_formula = curr_formula.split() ##print preds_curr_formula temp_dict = {} for j in xrange(0, len(preds_curr_formula), 2): present_clause = preds_curr_formula[j] ##print present_clause #if j>0: # present_clause=present_clause[1:len(present_clause)] present_clause = present_clause.split('(') #print present_clause var_type = present_clause[0] if var_type[0] == '!': var_type = var_type[1:len(var_type)] if (len(present_clause) == 1): continue vars_current_pred = present_clause[1] vars_current_pred = vars_current_pred.strip(')') vars_current_pred = vars_current_pred.split(',') #print var_type temp_list = predicates[var_type] ##print temp_list for k in range(len(vars_current_pred)): temp_dict[vars_current_pred[k]] = temp_list[k] curr_list_vars = [None] * len(temp_dict) k = 0 for key in temp_dict: curr_list_vars[k] = key k = k + 1 formula_vars[curr_formula] = temp_dict original_list_vars = copy.deepcopy(curr_list_vars) #print "here1",original_list_vars,curr_formula generate_ground_formulas_rec(curr_formula, original_list_vars, curr_list_vars, 0) ##print global_count_formulas ######################## Dealing Deterministic Variables #################################################################### # for j in range(len(det_clauses)): # formulas[det_clauses[j]]=float("inf") # det_clauses[j]=det_clauses[j].replace(" ","") # if det_clauses[j][0]=='!': # if det_clauses[j][1:len(det_clauses[j])] in ground_formula_dict.keys(): # ground_formula_dict.pop(det_clauses[j][1:len(det_clauses[j])],None) # global_count_formulas=global_count_formulas-1 # else: # if "!"+det_clauses[j] in ground_formula_dict.keys(): # ground_formula_dict.pop("!"+det_clauses[j],None) # global_count_formulas=global_count_formulas-1 # if not det_clauses[j] in ground_formula_dict: # global_count_formulas=global_count_formulas+1 # ground_formula_dict[det_clauses[j]]=det_clauses[j] #global_count_formulas=global_count_formulas+1 #for key in ground_formula_dict: # print key,'in',ground_formula_dict[key] curr_count = 1 #print det_clauses for i in range(num_vars): #print rev_var_mappings if not (rev_var_mappings[i + 1] in det_clauses or "!" + rev_var_mappings[i + 1] in det_clauses): dict_vars_no_evid[i + 1] = curr_count var_mappings_updated[rev_var_mappings[i + 1]] = curr_count curr_count = curr_count + 1 #for key in dict_vars_no_evid: # print key,"in",dict_vars_no_evid[key] num_vars = curr_count - 1 ##################################################### Writing Dimacs File############################################################ #for key in ground_formula_dict: # #print "formulas",key,ground_formula_dict[key] out_file1 = open("temp_dimacs.cnf", "w") out_file2 = open("temp_dimacs.cnf.saucy", "w") out_file1.write("p wcnf " + str(num_vars) + " " + str(global_count_formulas) + "\n") out_file2.write("p wcnf " + str(num_vars) + " " + str(global_count_formulas) + "\n") out_file1.write("c variable mappings:\n") sorted_vars = sorted(var_mappings_updated.items(), key=operator.itemgetter(1)) for j in range(len(sorted_vars)): out_file1.write("c " + str(sorted_vars[j][1]) + " " + str(sorted_vars[j][0]) + "\n") out_file1.write("c clauses:\n") color_dict = {} color_formula_dict = {} color = 2 omitted_lines = 0 #print key,ground_formula_dict for key in ground_formula_dict: #print key,ground_formula_dict[key],"\n" original_formula = ground_formula_dict[key] weight = formulas[original_formula] key_with_spaces = key.replace(")v", ") v ") clauses_curr_formula = key_with_spaces.split(' ') if str(weight) == '+w': ##print"Yes I am here" weight = str(random.normalvariate(0, 0.001)) if str(weight) == '+W': ##print"Yes I am here" weight = str(0.2 + random.normalvariate(0, 0.001)) formula_var_number_form = str(weight) + " " formula_color_form = " " var_dupl = {} omit_line = False omit_variable = False for j in range(0, len(clauses_curr_formula), 2): formula_var_number_form = formula_var_number_form flag_positive = True omit_variable = False # Changed part after Evidence and Context has same behavior if clauses_curr_formula[j] in det_clauses or ( "!" + clauses_curr_formula[j] in det_clauses): ##print "here I am",clauses_curr_formula[j] if len(clauses_curr_formula) > 2: if omit_line == False: omit_line = True omitted_lines = omitted_lines + 1 if clauses_curr_formula[j] in det_clauses: if omit_line == False: omit_line = True omitted_lines = omitted_lines + 1 if clauses_curr_formula[j][0] == '!': check_clause = clauses_curr_formula[j][ 1:len(clauses_curr_formula[j])] else: check_clause = "!" + clauses_curr_formula[j] if check_clause in det_clauses: omit_variable = True ####################################################### if clauses_curr_formula[j][0] == '!': flag_positive = False clauses_curr_formula[j] = clauses_curr_formula[j][ 1:len(clauses_curr_formula[j])] var_number = var_mappings[clauses_curr_formula[j]] if flag_positive == False: var_number = var_number * -1 if -1 * var_number in var_dupl.keys(): get_sign = var_dupl[-1 * var_number] if get_sign == True: if omit_line == False: omit_line = True omitted_lines = omitted_lines + 1 else: omit_variable = True else: var_dupl[-1 * var_number] = False else: if var_number in var_dupl.keys(): get_sign = var_dupl[var_number] if get_sign == False: if omit_line == False: omit_line = True omitted_lines = omitted_lines + 1 ##print"Omiited3",clauses_curr_formula else: omit_variable = True else: var_dupl[var_number] = True if omit_variable == False and omit_line == False: if var_number < 0: updated_var_number = -1 * dict_vars_no_evid[-1 * var_number] else: updated_var_number = dict_vars_no_evid[var_number] formula_color_form = formula_color_form + str( updated_var_number) + " " formula_var_number_form = formula_var_number_form + str( updated_var_number) + " " formula_var_number_form = formula_var_number_form + "0\n" formula_color_form = formula_color_form + "0\n" split_formula_color_form = formula_color_form.split(" ") if len(split_formula_color_form) <= 2: omit_line = True omitted_lines = omitted_lines + 1 #Print("omitted_lines if omit_line == False: ##print weight if weight in color_dict.keys(): curr_color = color_dict[weight] else: color_dict[weight] = color curr_color = color color = color + 1 formula_color_form = str(curr_color) + formula_color_form if formula_color_form in color_formula_dict.keys(): omitted_lines = omitted_lines + 1 color_formula_dict[formula_color_form] = curr_color # else : # print("omitted lines",omitted_lines,"\n") if omit_line == False: out_file1.write("c " + str(weight) + " " + key_with_spaces + "\n") out_file1.write(formula_var_number_form) sorted_color_list = sorted(color_formula_dict.items(), key=operator.itemgetter(1)) for j in range(len(sorted_color_list)): out_file2.write(sorted_color_list[j][0]) out_file1.close() out_file2.close() infile1 = open("temp_dimacs.cnf", "r") out_file1 = open("dimacs.cnf", "w") lines = infile1.readlines() out_file1.write("p wcnf " + str(num_vars) + " " + str(global_count_formulas - omitted_lines) + "\n") for i in range(1, len(lines)): out_file1.write(lines[i]) infile1.close() out_file1.close() infile2 = open("temp_dimacs.cnf.saucy", "r") out_file2 = open("dimacs.cnf.saucy", "w") lines = infile2.readlines() out_file2.write("p wcnf " + str(num_vars) + " " + str(global_count_formulas - omitted_lines) + "\n") for i in range(1, len(lines)): out_file2.write(lines[i]) infile2.close() out_file2.close() return
def bwalk(min, max, std): """ Generates a bounded random walk. """ rng = max - min while True: max += normalvariate(0, std) yield abs((max % (rng * 2)) - rng) + min
def __call__(self, *args, **kwargs): import random return random.normalvariate(self.Mu, self.Sigma)
# Write your code here :-) import random print(random.normalvariate(68.99802084636,2.8022106094)) #69.16852724981656
def mutate(self, x, y): return (abs(normalvariate(x, sqrt(sqrt(x)))), abs(normalvariate(y, sqrt(sqrt(y)))))
image_color = randomColor(image) saveImage(image_color, directory + '/' + fname + '/thumbnail_color.jpg') # pca jittering #print(imag img = np.array(image) img = img / 255.0 img_size = img.size // 3 img1 = img.reshape(img_size, 3) img1 = np.transpose(img1) img_cov = np.cov([img1[0], img1[1], img1[2]]) lamda, p = np.linalg.eig(img_cov) p = np.transpose(p) alpha1 = random.normalvariate(0, 3) alpha2 = random.normalvariate(0, 3) alpha3 = random.normalvariate(0, 3) v = np.transpose( (alpha1 * lamda[0], alpha2 * lamda[1], alpha3 * lamda[2])) add_num = np.dot(p, v) img2 = np.array([ img[:, :, 0] + add_num[0], img[:, :, 1] + add_num[1], img[:, :, 2] + add_num[2] ]) * 255 img2 = np.swapaxes(img2, 0, 2) img2 = np.swapaxes(img2, 0, 1) #print(img2) #image_pca = Image.fromarray(np.uint8(img2)) misc.imsave(directory + '/' + fname + '/thumbnail_pca.jpg', img2)
# Model input and output x = tf.placeholder(tf.float32) linear_model = W * x + b y = tf.placeholder(tf.float32) # loss loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares # optimizer optimizer = tf.train.GradientDescentOptimizer(0.001) train = optimizer.minimize(loss) # training data K = random.randint(-10, 10) Y0 = random.randint(-10, 10) x_train = [i for i in range(10)] y_train = [K * x + Y0 + random.normalvariate(mu=0, sigma=1) for x in x_train] # training loop init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) # reset values to wrong for i in range(1000): sess.run(train, {x: x_train, y: y_train}) # evaluate training accuracy curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train}) print("K: %s Y0: %s" % (K, Y0)) print("W: %s b: %s loss: %s" % (curr_W, curr_b, curr_loss)) print(x_train) print(y_train)
def gen(nsample): # Same mean and standard deviation return [ normalvariate(np.average(arma.resid), np.std(arma.resid)) for i in range(nsample) ]
def rand_seq(target): seq_len = int(random.normalvariate(len(target), len(target) / 10)) return ''.join(random.choice('ACGT') for _ in xrange(seq_len))
def generate(target, std=5): return random.normalvariate(target, std)
def sample(self): return {self.value: random.normalvariate(self.mean, self.std)}
def get_signal(n: int = 1001) -> List[Tuple[float, float]]: fun = lambda x: 1 / (pow(x, 2) - 3 * x + 2) system = lambda f_x, delta: -100 + delta if f_x < -100 else f_x + delta if abs(f_x) <= 100 else 100 + delta signal = [(x, system(fun(x), delta)) for x, delta in [(3 * k / 1000., normalvariate(0, 1)) for k in range(n)]] return signal
import random a = random.random() print(a) a = random.uniform(1, 10) print(a) a = random.randint(1, 10) # 10 included print(a) a = random.randrange(1, 10) # 10 excluded print(a) a = random.normalvariate(0, 1) print(a) mylist = list("ABSCDEFG") print(mylist) a = random.choice(mylist) print(a) a = random.sample(mylist, 5) print(a) a = random.choices(mylist, k=5) print(a) a = random.shuffle(mylist) print(mylist)
#!/usr/local/bin/python """ the "hello world" service here just adds randomized numbers with normal distribution in a real-world example, this could be e.g. a thermometer connected to the provider's machine providing its inputs into the database or some other piece of information from some external source that changes over time and which can be expressed as a singular value [ part of the VM image that's deployed by the runtime on the Provider's end. ] """ import os from pathlib import Path import random import time MU = 14 SIGMA = 3 SERVICE_PATH = Path(__file__).absolute().parent / "simple_service.py" while True: v = random.normalvariate(MU, SIGMA) os.system(f"{SERVICE_PATH} --add {v}") time.sleep(1)
def get_points(num = 80): #return zip([random.random() for x in xrange(num)], [random.random() for x in xrange(num)]) return zip([random.normalvariate(0.5, 0.1) for x in xrange(num)], [random.normalvariate(0.5, 0.1) for x in xrange(num)])
def normal(self, sigma): return lambda: random.normalvariate(0, sigma)
# import the required modules. from random import normalvariate import vfl # create a model. mdl = vfl.model.VFR( alpha0=1000, beta0=2.5, nu=1e-3, data=vfl.Data(file='sinc.dat'), factors=[vfl.factor.Impulse(mu=0, tau=0.01) for i in range(10)]) # randomize the factor means. for f in mdl: f.mu = normalvariate(0, 2.5) # create an optimizer. opt = vfl.optim.FullGradient(model=mdl, lipschitz_init=0.0001) # optimize. opt.execute() # build gridded datasets for prediction. G = [[-10, 1e-3, 10]] mean = vfl.Data(grid=G) var = vfl.Data(grid=G) # compute the model prediction. mdl.predict(mean=mean, var=var) # write the prediction results.
def __Levyfly(self, step, Pbest, n, dimension): for i in range(n): stepsize = 0.2 * step * (self.__agents[i] - Pbest) self.__agents[i] += stepsize * np.array([normalvariate(0, 1) for k in range(dimension)])
def generate_snowflakes(self): new_snows = int(random.normalvariate(self.fall_rate, self.fluctuation)) if new_snows <= 0: return for _ in range(new_snows): self.snowflake_collection.add(Snowflake(self.snowflake_collection))