def run(self): self.cloud_name = self.group['cloud'] if self.group['failure_rate'] == "None": # No failures, shut the simulator LOG.info("Failure-Simulator-%s: failure rate is set to None. Terminating simulator" % (self.cloud_name)) self.stop_event.set() return self.failute_rate = float(self.group['failure_rate']) self.interval = random.expovariate(self.failute_rate) while(not self.stop_event.is_set()): LOG.info("Failure-Simulator-%s: sleeping for %d sec" % (self.cloud_name, self.interval)) self.stop_event.wait(self.interval) list_of_vms = self.get_cloud_termination_list() # continue as normal count = len(list_of_vms) if count > 0: pick = random.randint(0, count-1) instance = list_of_vms[pick] LOG.info("Failure-Simulator-%s: terminating an instance %s (%s)" % (self.cloud_name, instance.id, instance.public_dns_name)) filelog(self.config.failure_log, "%s,TERMINATED,%s,%s" % (time.time(), self.cloud_name, instance.public_dns_name)) worker = Worker(self.config, instance) worker.terminate() # terminates condor daemon and shuts down instance # Reset the sleep time (interval before the next failure) self.interval = random.expovariate(self.failute_rate)
def perturbByMax(agg, act): # FIXME: Incomplete ''' Pick a bunch of sum nodes that feed into act and create a new sum node that combines them ''' neighbours = [arc[0] for arc in agg.v if arc[1] == act and agg.f[arc[0]] == (pyagg.FN_TYPE_WEIGHTED_MAX, 0)] random.shuffle(neighbours) n1 = neighbours[0] success = False for n2 in neighbours[1:]: if match(n1, n2) == 2: # FIXME This is rough! success = True break if not success: return False # Add the node w = [random.expovariate(0.5), random.expovariate(0.5)] fn = merge(n1, n2) makeMax(agg, act, [n1, n2], w, fn) if testCut(agg, act, n1) is not None and testCut(agg, act, n2) is not None: tryCutNode(agg, act, n1) tryCutNode(agg, act, n2) return True elif not escape: tryCutNode(agg, act, fn) return False
def geninput(self): # Return a list of flows flowlist = [] # print 'Generating input' if not self.hasInput: for fid in range(self.MAXNUMFLOW): flow = Flow.Flow() flow.flowId = fid flow.SetFlowSize(round(random.expovariate(1 / self.FLOWSIZEMEAN))) # deadline is in number of RTTs ddlinsteps = int(round(random.expovariate(1 / self.FLOWDDLMEAN) / self.RTT)) while ddlinsteps >= self.Steps - 1: ddlinsteps = int(round(random.expovariate(1 / self.FLOWDDLMEAN) / self.RTT)) flow.SetFlowDeadline(ddlinsteps) # print 'flow {} time for choice: {}'.format(fid, (Steps - flow.deadline)) flow.startTime = random.choice(range(self.Steps - flow.deadline)) # initial window flow.bw = 10 * PKTSIZE / self.RTT flow.residualRate = 0 flowlist.append(flow) else: with open(self.inputf) as inputf: tempflows = json.load(inputf) for i in range(len(tempflows)): flow = Flow.Flow() flow.flowId = i flow.SetFlowSize(tempflows['{}'.format(i)]['flowSize']) flow.SetFlowDeadline(tempflows['{}'.format(i)]['deadline']) flow.startTime = tempflows['{}'.format(i)]['startTime'] flow.bw = 10 * PKTSIZE / self.RTT flow.residualRate = 0 flowlist.append(flow) return flowlist
def fail_simulation(n,s): t = 0 r = 0 trep = 2**64-1 # officialy INF t_list = [] for _ in xrange(n): t_list.append(random.expovariate(1)) t_list.sort() while True: # t1 < tmax t1 = t_list[0] if t1 < trep: t = t1 r += 1 if r == s+1: return t elif r < s+1: x = random.expovariate(1) t_list.pop(0) t_list.append(t+x) t_list.sort() if r == 1: y = random.expovariate(8) trep = t + y else: t = trep r -= 1 if r > 0: y = random.expovariate(8) trep = t + y elif r == 0: trep = 2**64-1
def test_all(self): for test in ['', 'a', 'b', 'abc', 'abc'*50, 'hello world']: #print test #print sha256.sha256(test).hexdigest() #print hashlib.sha256(test).hexdigest() #print assert sha256.sha256(test).hexdigest() == hashlib.sha256(test).hexdigest() def random_str(l): return ''.join(chr(random.randrange(256)) for i in xrange(l)) for length in xrange(150): test = random_str(length) a = sha256.sha256(test).hexdigest() b = hashlib.sha256(test).hexdigest() #print length, a, b if a != b: print 'ERROR!' raise ValueError() for i in xrange(100): test = random_str(int(random.expovariate(1/100))) test2 = random_str(int(random.expovariate(1/100))) a = sha256.sha256(test) a = a.copy() a.update(test2) a = a.hexdigest() b = hashlib.sha256(test) b = b.copy() b.update(test2) b = b.hexdigest() #print a, b if a != b: print 'ERROR!' raise ValueError()
def get_flow_duration(flow_type): """ Makes a choice on the flow duration, depending on the flow_type :param flow_type: 'm' (mice) or 'e' (elephant) :return: integer representing the flow duration in seconds """ # Flow duration ranges min_len_elephant = 20 max_len_elephant = 50#500 min_len_mice = 0.2 max_len_mice = 6 # flow is elephant if flow_type == 'e': # Draw flow duration return random.uniform(min_len_elephant, max_len_elephant) # flow is mice elif flow_type == 'm': # Draw flow duration #return random.uniform(min_len_mice, max_len_mice) mean = 4.0 ep = random.expovariate(1/mean) while ep > max_len_mice: ep = random.expovariate(1 / mean) return ep else: raise ValueError("Unknown flow type: {0}".format(flow_type))
def bankQueue(timeMax = 600): # t in minutes ta = 0 # time to next arrival in minutes ts = 0 # time to end of next service in minutes q = 1 # number of people in the queue c = 0 # time on the clock in minutes nserved = 0 # total number of people served cs = [] ss = [] qs = [] while c < timeMax: if q == 0: c += ta q += 1 ta = random.expovariate(1./avgArrive) elif ta<ts: ts -= ta c += ta q += 1 ta = random.expovariate(1./avgArrive) else: ta -= ts c += ts q -= 1 ts = random.expovariate(1./avgService) nserved += 1 cs.append(c/60.) ss.append(nserved) qs.append(q) # end while p = biggles.FramedPlot() c1 = biggles.Curve(cs,ss,color='red') c2 = biggles.Curve(cs,qs,color='blue') p.add(c1,c2) p.show()
def get_precipitation_event_duration(self): """This method is the storm generator. This method has one argument: the mean_storm parameter. (In Eagleson (1978), this parameter was called Tr.) It finds a random storm_duration value based on the poisson distribution about the mean. This is accomplished using the expovariate function from the "random" standard library. Additionally, it is rounded to contain 4 significant figures, for neatness. The if-else statement is very important here. Values of 0 can exist in the Poission distribution, but it does not make sense to have 0 duration storms, so to avoid that, we return a storm duration IF it is greater than 0, otherwise, recursion is employed to re-call the storm generator function and get a new value. :returns: storm_duration as a float """ storm = round(random.expovariate(1/self.mean_storm),2) while storm == 0: storm = round(random.expovariate(1/self.mean_storm),2) self.storm_duration = storm return self.storm_duration
def get_interstorm_event_duration(self): """ This method is the interstorm duration generator This method takes one argument, the mean_interstorm parameter. (In Eagleson (1978), this parameter was called Tb.) This method is modeled identically to get_precipitation_event_duration() This method finds a random value for interstorm_duration based on the poisson distribution about the mean. This is accomplished using the expovariate function from the "random" standard library. Additionally, it is rounded to contain 4 significant figures, for neatness. The if-else statement is very important here. Values of 0 can exist in the Poission distribution, but it does not make sense to have 0 hour interstorm durations. To avoid 0 hour interstorm durations, we return a interstorm duration IF it is greater than 0, otherwise, recursion is employed to re-call the interstorm duration generator function and get a new value. :returns: interstorm_duration as a float""" interstorm = round(random.expovariate(1/self.mean_interstorm),2) while interstorm == 0: interstorm = round(random.expovariate(1/self.mean_interstorm),2) self.interstorm_duration = interstorm return self.interstorm_duration
def getSource(new_S, multiplier, lambd): #return the index of the source that has to provide a value for a specific data item i = int(random.expovariate(lambd)*multiplier) while (i >= len(new_S)): i = int(random.expovariate(lambd)*multiplier) s = new_S[i] return int(s.replace('source',''))
def getFalseValue(provided_false_values, bin, lambda_bin_index): '''to pick a false value among the set The probability to select a false value, already provided, is higher than selecting a new one. provided_false_values is used for this reason ''' value = 'null' if (len(set(provided_false_values)) < max_false_domain_cardinality) and (random.random() < 0.4): #selection not in VALUES ALREADY PROVIDED bin_index = int(random.expovariate(lambda_bin_index)) while bin_index < 0 or bin_index >= len(bin): bin_index = int(random.expovariate(lambda_bin_index)) if len(bin[bin_index])==0: return None if (len(bin[bin_index]) == 1): value_index = 0 else: value_index = int(random.randint(0, len(bin[bin_index])-1))#Return a random integer N such that a <= N <= b. value = (bin[bin_index])[value_index] else: #selection among values already provided if (len(provided_false_values) > 0): if (len(provided_false_values) == 1): value_index = 0 else: value_index = random.randint(0, len(provided_false_values)-1) value = provided_false_values[value_index] provided_false_values.append(value) return [value, provided_false_values]
def infect(self,t,mu_input,sigma_e,dur_p,Dmax,D50,Dk,dur_d,rho,beta_p,betamax,beta50,betak,beta_d): self.Infected=True self.mu=mu_input self.timeInfection=t print("timeInfection: "+str(t)) print("mu: "+str(self.mu)) # def setViralLoad(self,sigma_e): self.ViralLoad= random.normalvariate(self.mu, sigma_e) print("ViralLoad: "+str(self.ViralLoad)) # def setTimes(self,dur_p,Dmax,D50,Dk,dur_d,rho): self.timeAsymptomatic=nextStage(self.timeInfection,self.timeDeath,random.expovariate(1.0/dur_p)) print("timeAsymptomatic: "+str(self.timeAsymptomatic)) expectedDurationAsymptomatic=Dmax*pow(D50,Dk)/(pow(10,self.ViralLoad*Dk) + pow(D50,Dk)) print("expectedDurationAsymptomatic: "+str(expectedDurationAsymptomatic)) actualDurationAsymptomatic=expectedDurationAsymptomatic*pow(-math.log(random.uniform(0,1)),1.0/rho)/math.gamma(1+1.0/rho) self.timeDisease=nextStage(self.timeAsymptomatic,self.timeDeath,actualDurationAsymptomatic) print("timeDisease: "+str(self.timeDisease)) self.timeDeathAIDS=nextStage(self.timeDisease,self.timeDeath,random.expovariate(1.0/dur_d)) if not(self.timeDeathAIDS is None) and self.timeDeathAIDS < self.timeDeath: self.timeDeath = self.timeDeathAIDS print("timeDeath: "+ str(self.timeDeath)) # def setInfectivity(self,beta_p,betamax,beta50,betak,beta_d): self.infectivityPrimary= beta_p self.infectivityAsymptomatic= betamax*pow(10,self.ViralLoad*betak)/(pow(10,self.ViralLoad*betak)+pow(beta50,betak)) self.infectivityDisease= beta_d
def zombie_interval(self): if (datetime.now() - self.start_time).total_seconds() > 200: interval = random.expovariate(1/250) else: interval = 1000 - 50*self.kills interval = random.expovariate(1/max(interval, 500)) self.timer.setInterval(interval)
def correlated_capacities(num_resources, min_fill, dev=.05, rem_cons=1.0, correlated_items = False, minr=MIN_RES, maxr=MAX_RES): """ Generates a bin. Bin capacities are correlated, and if correlated_items is True item requirements are correlated Either Volume(items) > min_fill or a 0 weighted item was generated base*dev is the standard deviation Item capacities are generated in [0,rem_cons*b.remaining] """ mf = min(min_fill, 1-1e-15) # helps getting rid of numerical instabilities base = random.randint(minr,maxr) mean = base*dev lbd = 1.0/mean bin_cap = [max(0,int(round(base+(random.expovariate(lbd)-mean)))) for x in xrange(num_resources)] b = Bin(bin_cap) item_vol = 0.0 items = [] while item_vol < mf: if correlated_items: for tr in xrange(MAX_TRY): base = random.randint(1,int(rem_cons*min(bin_cap))) mean = base*dev lbd = 1.0/mean it_res = [max(0,int(round(base+(random.expovariate(lbd)-mean)))) for x in xrange(num_resources)] vl = update(items, it_res, b, item_vol) if vl: break item_vol = vl else: it_res = [random.randint(0,int(rem_cons*x)) for x in b.remaining] item_vol = update(items, it_res, b, item_vol) if not item_vol: break return items, b
def generate_site_mutations(tree, position, mu, site_table, mutation_table, multiple_per_node=True): """ Generates mutations for the site at the specified position on the specified tree. Mutations happen at rate mu along each branch. The site and mutation information are recorded in the specified tables. Note that this records more than one mutation per edge. """ assert tree.interval[0] <= position < tree.interval[1] states = {"A", "C", "G", "T"} state = random.choice(list(states)) site_table.add_row(position, state) site = site_table.num_rows - 1 stack = [(tree.root, state, msprime.NULL_MUTATION)] while len(stack) != 0: u, state, parent = stack.pop() if u != tree.root: branch_length = tree.branch_length(u) x = random.expovariate(mu) new_state = state while x < branch_length: new_state = random.choice(list(states - set(state))) if multiple_per_node and (state != new_state): mutation_table.add_row(site, u, new_state, parent) parent = mutation_table.num_rows - 1 state = new_state x += random.expovariate(mu) else: if (not multiple_per_node) and (state != new_state): mutation_table.add_row(site, u, new_state, parent) parent = mutation_table.num_rows - 1 state = new_state stack.extend(reversed([(v, state, parent) for v in tree.children(u)]))
def execute(self,maxCompletions): while Task.completed < maxCompletions: self.debug(" starts thinking") thinktime = ran.expovariate(1.0/MeanThinkTime) yield hold,self,thinktime self.debug(" request cpu") yield request,self,cpu self.debug(" got cpu") CPUtime=ran.expovariate(1.0/MeanCPUTime) yield hold,self,CPUtime yield release,self,cpu self.debug(" finish cpu") while ran.random() < pDisk: self.debug(" request disk") yield request,self,disk self.debug(" got disk") disktime=ran.expovariate(1.0/MeanDiskTime) yield hold,self,disktime self.debug(" finish disk") yield release,self,disk self.debug(" request cpu") yield request,self,cpu self.debug(" got cpu") CPUtime=ran.expovariate(1.0/MeanCPUTime) yield hold,self,CPUtime yield release,self,cpu Task.completed += 1 self.debug(" completed %d tasks"%(Task.completed,)) Task.rate = Task.completed/float(now())
def similar_items(num_resources, min_fill, base_item, dev=.05, minr=MIN_RES, maxr=MAX_RES): """ Generates a bin. Bin capacities are not correlated. All items are similar to the given base item base*dev is the standard deviation """ mf = min(min_fill, 1-1e-15) # helps getting rid of numerical instabilities base = random.randint(minr,maxr) mean = base*dev lbd = 1.0/mean bin_cap = base_item.requirements[:] for i,v in enumerate(bin_cap): base = 5*v mean = base*dev lbd = 1.0/mean bin_cap[i] = max(0,int(round(base+(random.expovariate(lbd)-mean)))) b = Bin(bin_cap) item_vol = 0.0 items = [] vl = 0 while item_vol < mf: for tr in xrange(MAX_TRY): it_res = base_item.requirements[:] for i,base in enumerate(it_res): mean = base*dev lbd = 1.0/mean it_res[i] = max(0,int(round(base+(random.expovariate(lbd)-mean)))) vl = update(items, it_res, b, item_vol) if vl : break item_vol = vl if not item_vol: break return items, b
def scrape_save_season(start_year=2014): num_games = 30*41 season_string = "{0}{1}".format(start_year,start_year+1) make_dir('../data') make_dir('../data/html') make_dir('../data/html/%s'%season_string) for game in range(742,num_games+1): print "Downloading game: %d"%game make_dir('../data/html/%s/%04d'%(season_string,game)) play_by_play_htlm = urllib.urlopen("http://www.nhl.com/scores/htmlreports/%s/PL02%04d.HTM"%(season_string,game)).read() time.sleep(random.expovariate(1)) home_toi_html = urllib.urlopen('http://www.nhl.com/scores/htmlreports/%s/TH02%04d.HTM'%(season_string,game)).read() time.sleep(random.expovariate(1)) away_toi_html = urllib.urlopen('http://www.nhl.com/scores/htmlreports/%s/TV02%04d.HTM'%(season_string,game)).read() out_file = open('./../data/html/%s/%04d/play_by_play.html'%(season_string,game), 'w') out_file.write(play_by_play_htlm) out_file.close() out_file = open('./../data/html/%s/%04d/home_toi.html'%(season_string,game), 'w') out_file.write(home_toi_html) out_file.close() out_file = open('./../data/html/%s/%04d/away_toi.html'%(season_string,game), 'w') out_file.write(away_toi_html) out_file.close() wait = 10 + random.expovariate(0.1) print "Wait: ",wait time.sleep(wait)
def run_belt(self): # For first belt time_new_cassette = self.params['time_new_cassette'] cassette_size = self.params['cassette_size'] cassette = yield self.input.input.get() # receive first cassette wafer_counter = cassette_size mtbf_enable = self.mtbf_enable if mtbf_enable: mtbf = 1/(3600*self.params['mtbf']) mttr = 1/(60*self.params['mttr']) while True: if mtbf_enable and self.env.now >= self.next_failure: self.downtime_duration = random.expovariate(mttr) #print(str(self.env.now) + "- [" + self.params['type'] + "] MTBF set failure - maintenance needed for " + str(round(self.downtime_duration/60)) + " minutes") self.downtime_finished = self.env.event() self.maintenance_needed = True yield self.downtime_finished self.next_failure = self.env.now + random.expovariate(mtbf) #print(str(self.env.now) + "- [" + self.params['type'] + "] MTBF maintenance finished - next maintenance in " + str(round((self.next_failure - self.env.now)/3600)) + " hours") yield self.next_step if not wafer_counter: yield self.input.output.put(cassette) # return empty cassette cassette = yield self.input.input.get() # receive new cassette wafer_counter = cassette_size yield self.env.timeout(time_new_cassette) if (not self.belts[0][0]): self.belts[0][0] = True wafer_counter -= 1
def __init__(self, space_width, space_height, animal_count, pred_count, genomes): self.pool = Pool() self.manager = Manager() self.width = space_width self.height = space_height self.genomes = genomes self.predators = [ Predator( random.randrange(0, self.width, 1), random.randrange(0, self.height, 1) ) for _ in range(pred_count) ] self.animals = [ Animal(random.choice(genomes), PHEROMONES) for _ in range(animal_count) ] self.objects = [ Food( random.randrange(0, self.width, 1), random.randrange(0, self.height, 1), 10 ) for _ in range(animal_count) ] for a in self.animals: a.teleport( random.randrange(0, self.width, 1), random.randrange(0, self.height, 1), random.uniform(0, 6.28) ) self.next_food = random.expovariate(1.0/FOOD_PERIOD) self.next_poison = random.expovariate(1.0/POISON_PERIOD) self.pheromones = self.manager.list([]) self.next_breed = random.expovariate(1.0/BREEDING_PERIOD) self.new_genomes = []
def execute(self): yield hold,self,random.expovariate(1/G.MEAN_TIME_BETWEEN_REQUEST) while True: #print self.ID, 'CHECK csma len: ', len(Cont.CSMA.activeQ), 'chan len: ', len(Cont.Channel.activeQ) if (not len(Cont.CSMA.activeQ)==G.NUM_CHAN) and (not (Cont.Channel.activeQ)==G.NUM_CHAN): #print self.ID, 'REQ csma len: ', len(Cont.CSMA.activeQ), 'chan len: ', len(Cont.Channel.activeQ) yield request,self,Cont.CSMA yield hold,self,0.01 if self.interrupted(): G.LossCount += 1 yield release,self,Cont.CSMA #print self.ID, 'was interrupted! -- Collision occured' #G.Count += 1 yield hold,self,random.expovariate(1/G.MEAN_BACKOFF) else: #print self.ID, 'no collision, through' #print G.ThroughCount G.ThroughCount += 1 yield release,self,Cont.CSMA #print self.ID, 'csma len: ', len(Cont.CSMA.activeQ), 'REQ chan len: ', len(Cont.Channel.activeQ) yield request,self,Cont.Channel yield hold,self,random.expovariate(1/G.MEAN_TIME_HOLD_CHANNEL) yield release,self,Cont.Channel yield hold,self,random.expovariate(1/G.MEAN_TIME_BETWEEN_REQUEST) elif (len(Cont.CSMA.activeQ)==G.NUM_CHAN) and (not (Cont.Channel.activeQ)==G.NUM_CHAN): for D in Cont.CSMA.activeQ: #print self.ID, 'is interrupting!' self.interrupt(D) yield hold,self,random.expovariate(1/G.MEAN_BACKOFF)
def gen_restock_list(cat): np = NeoPage(base_url='https://items.jellyneo.net') path = '/search/' args = [] args.append(f'cat[]={cat}') args.append('min_rarity=1') args.append('max_rarity=100') args.append('status=1') args.append('sort=5') args.append('sort_dir=desc') start = 0 while True: np.get(path, *args, f'start={start}') time.sleep(min(60, random.expovariate(30)) + 60) last_page = not re.search(r'<li class="arrow"><a href=".*?">»</a>', np.content) referer = np.referer results = re_jn_item.findall(np.content) for item_id, name, price_updated, price in results: if price == 'Inflation Notice': np.set_referer(referer) np.get(f'/item/{item_id}') time.sleep(min(60, random.expovariate(30)) + 60) price = re.search('<div class="price-row">(.*?) NP', np.content)[1] try: price = lib.amt(price) except (ValueError, TypeError, IndexError): # safe assumption that unpriceable items are at least 10M NP? price = 10000001 print(f'\'{name}\': {price},') start += 50 if last_page: break
def generate(self, service): index = 0 global buffer_size global dropped_packets buffer_size = 10 print ('Buffer Size is %d' %(buffer_size)) while index < 6 : for x in range(1, total_packets): arrivalTime = random.expovariate(lambda_list[index]) packet = Packet(env, arrivalTime, x, service) yield env.timeout(arrivalTime) print ('Lambda: %f : Packet Loss Probability: %f' %(lambda_list[index],dropped_packets *1.0/total_packets)) dropped_packets = 0 index = index + 1 index = 0 buffer_size = 50 print ('Buffer Size is %d' %(buffer_size)) while index < 6 : for x in range(1, total_packets): arrivalTime = random.expovariate(lambda_list[index]) packet = Packet(env, arrivalTime, x, service) yield env.timeout(arrivalTime) print ('Lambda: %f : Packet Loss Probability: %f' %(lambda_list[index],dropped_packets *1.0/total_packets)) dropped_packets = 0 index = index + 1
def main(): # initialize defaults meanoff = 1.0 # default mean off is 1 second meanon = 20.0 # default mean on is 20 seconds opts, args = getopt.getopt(sys.argv[1:], 'h', ['mean-off=', 'mean-on=']) name = None for o, a in opts: if o == '-h': print usage() sys.exit(0) elif o == '--mean-off': meanoff = float(a) elif o == '--mean-on': meanon = float(a) else: print "Error parsing command-line option: unknown/unrecognized option %s" % o #print >> sys.stderr, usage() sys.exit(1) if len(args): print "Error parsing command-line arguments: no arguments allowed" #print >> sys.stderr, usage() sys.exit(2) seed() while 1: print time() system("cnistnet -a 10.2.34.150 192.168.5.10 --drop 0%; cnistnet -a 192.168.5.10 10.2.34.150 --drop 0%") sleep(expovariate(1.0/meanon)) print time() system("cnistnet -a 10.2.34.150 192.168.5.10 --drop 100%; cnistnet -a 192.168.5.10 10.2.34.150 --drop 100%") sleep(expovariate(1.0/meanoff))
def req_generator(topology, n_contents, alpha, seed, rate=12.0, duration_warmup=9000, duration_real=36000): """This function generates events on the fly, i.e. instead of creating an event schedule to be kept in memory, returns an iterator that generates events when needed. This is useful for running large schedules of events where RAM is limited as its memory impact is considerably lower. """ random.seed(seed) rate = 4.0 warmup = 0 duration = 25000000 receivers = [v for v in topology.nodes_iter() if topology.node[v]['stack'][0] == 'receiver'] zipf = ZipfDistribution(alpha, n_contents) t_event = (expovariate(rate)) while t_event < warmup + duration: recv = choice(receivers) content = int(zipf.rand_val()) log = (t_event > warmup) event = {'receiver': recv, 'content': content, 'log': log} yield (t_event, event) t_event += (expovariate(rate)) raise StopIteration()
def sample_rate_matrix(fs, Q_mut): nstates = len(Q_mut) # sample the selection parameters if fs.really_low_var: v = 0.04 elif fs.low_var: v = 0.2 elif fs.medium_var: v = 1 elif fs.high_var: v = 5.0 elif fs.really_high_var: v = 25.0 s = math.sqrt(v) if fs.neg_skew: sels = [-random.expovariate(1/s) for i in range(nstates)] elif fs.no_skew: sels = [random.gauss(0, s) for i in range(nstates)] elif fs.pos_skew: sels = [random.expovariate(1/s) for i in range(nstates)] # define the mutation-selection rate matrix using Halpern-Bruno Q = np.zeros_like(Q_mut) for i in range(nstates): for j in range(nstates): if i != j: tau = math.exp(-(sels[j] - sels[i])) coeff = math.log(tau) / (1 - 1/tau) Q[i, j] = Q_mut[i, j] * coeff for i in range(nstates): Q[i, i] = -np.sum(Q[i]) return Q
def random_gen(self, filepath="random.replay", avgTransmit=30.0, avgWait=0, totalTime=1800.0): f = open(filepath, "w") replay = list() count = 0 port = 1234 for cl in self.clients: curTime = 0 port = 1234 while True: if avgWait == 0: startTime = 0 else: startTime = random.expovariate(1 / avgWait) txTime = random.expovariate(1 / avgTransmit) # Generate traffic until total time if curTime + startTime + txTime > totalTime: break else: replay.append((curTime + startTime, cl.IP() + ":" + str(port), curTime + startTime + txTime)) port += 1 curTime += startTime + txTime # Write replay to file sorted by timestamp replay.sort() count = 0 port = 1234 for startTime, dstIP, endTime in replay: srcIP = self.servers[count % len(self.servers)].IP() + ":" + str(port + count) f.write("%f %s %s %f\n" % (startTime, srcIP, dstIP, endTime)) count += 1 f.close()
def simulate_once(self): """Simulate once. Return ------ This will return the amount of operational loss simulated for the given time period. Example: r=OpRiskModel(stor4) lower, mu, upper = r.simulate_many() print "Bootstrap: ",lower,mu,upper Output: 0.68% between 127760271.155 and 162467836.895 0.8% between 122874286.419 and 167353821.63 0.9% between 116569621.33 and 173658486.72 0.95% between 111101264.604 and 179126843.445 0.98% between 104743118.138 and 185484989.911 0.99% between 100413671.581 and 189814436.469 0.995% between 96401399.4999 and 193826708.549 0.998% between 91486833.5654 and 198741274.484 0.999% between 88010967.5982 and 202217140.451 0.9999% between 77597567.1919 and 212630540.857 0.99999% between 68459385.7079 and 221768722.341 Bootstrap: 138714608.714 145114054.025 150873917.501 """ t=random.expovariate(self.params.lamb) loss=0.0 while t<self.params.days: t+=random.expovariate(self.params.lamb) amount=self.params.xm*random.paretovariate(self.params.alpha) loss+=amount return loss
def poisson_train(start, end, rate): newspike = start+rnd.expovariate(rate) strain = [] while newspike < end: strain.append(newspike) newspike += rnd.expovariate(rate) return strain
def helper(size, mononomial): if size <= 1: if random.randint(0, 1): var = random.randrange(0, num_vars) return (OpTag.variable, var) k = random.expovariate(mean_constant) if random.randint(0, 1): k = int(k) return k tag = random_tag(weights, mononomial) if tag == OpTag.addition or tag == OpTag.product: a, b = random_split(size - 1) l = helper(a, mononomial) r = helper(b, mononomial) return (tag, l, r) if tag == OpTag.power: while True: k = random.expovariate(mean_exponent) if (not generalized) or random.randint(0, 1): k = int(k + 2) if k < 40: break x = helper(size - 1, mononomial) return (OpTag.power, x, k) if tag == OpTag.ratio: a, b = random_split(size - 1) l = helper(a, mononomial) r = helper(b, True) return (OpTag.ratio, l, r) assert(False)
def updateClimber(me): stufftodo = ['RockSchool2012', 'Seneca', 'V2'] # more later for stuff in stufftodo: time.sleep(random.expovariate(0.5)) me.__setattr__(stuff, True)
def sample(self): return self.D + random.expovariate(self.mu)
def main(_): exp_name = "%s_scoring" % (args.name) local_files = Config.local_path log_path = local_files / exp_name save_path = local_files / exp_name data_path = os.path.join(args.data_path, args.name) save_path_train = f"{local_files}/{exp_name}/train" save_path_test = f"{local_files}/{exp_name}/test" if not os.path.exists(local_files): os.makedirs(local_files) if not os.path.exists(log_path): os.makedirs(log_path) if not os.path.exists(save_path): os.makedirs(save_path) if not os.path.exists(save_path_train): os.mkdir(save_path_train) if not os.path.exists(save_path_test): os.mkdir(save_path_test) train_models = [args.name] test_models = [args.name, 'human'] config = Config() config = config_model_coco(config, args.model_architecture) config.max_epoch = args.epochs if config.random_search: # Parameters investigated using Random Search config.learning_rate = np.round(random.expovariate(100000), 10) config.num_layers = random.randint(1, 2, 3) config.dropout_prob = np.round(random.uniform(0.05, 0.5), 2) config.vocab_size = random.choice([3004, 5004, 10004]) if args.name == "neuraltalk": load_features = True else: load_features = False [data_train, data_val, data_test, word_embedding] = data_loader(data_path, use_mc_samples=False, load_features=load_features) word_to_idx = data_train[f'word_to_idx'] # if config.resize_data: # data_train = resize_data(data_train, config.resize_samples) if not config.resize_data: config.resize_samples = len(data_train["file_names"]) print("Model architecture:%s" % (args.model_architecture)) with tf.Graph().as_default(): with tf.name_scope("Train"): with tf.variable_scope("Discriminator", reuse=None): mtrain = Discriminator(word_embedding, word_to_idx, use_glove=True, config=config, is_training=True) tf.summary.scalar("Training Loss", mtrain._loss) tf.summary.scalar("Training Accuracy", mtrain._accuracy) with tf.name_scope("Val"): with tf.variable_scope("Discriminator", reuse=True): mval = Discriminator(word_embedding, word_to_idx, use_glove=True, config=config, is_training=False) tf.summary.scalar("Validation Loss", mval._loss) tf.summary.scalar("Validation Accuracy", mval._accuracy) config_sess = tf.ConfigProto(allow_soft_placement=True) config_sess.gpu_options.allow_growth = True with tf.Session(config=config_sess) as sess: tf.global_variables_initializer().run() summary_writer = tf.summary.FileWriter( log_path, graph=tf.get_default_graph()) saver = tf.train.Saver() # model_architecture / num_layers / dropout_prob / batch_size / use_lstm output_filename = f"vocab{config.vocab_size}__model_{args.model_architecture}__lr{config.learning_rate}__" \ f"lay{config.num_layers}__dp{config.dropout_prob}__bs{config.batch_size}__lstm{config.use_lstm}" \ f"__ts{config.resize_samples}.txt" output_filename_train = f"vocab{config.vocab_size}__model_{args.model_architecture}__lr{config.learning_rate}__" \ f"lay{config.num_layers}__dp{config.dropout_prob}__bs{config.batch_size}__lstm{config.use_lstm}" \ f"__ts{config.resize_samples}_train.txt" output_filename_test = f"vocab{config.vocab_size}__model_{args.model_architecture}__lr{config.learning_rate}__" \ f"lay{config.num_layers}__dp{config.dropout_prob}__bs{config.batch_size}__lstm{config.use_lstm}" \ f"__ts{config.resize_samples}_testresults.txt" output_filepath = os.path.join(save_path, output_filename) output_filepath_train = os.path.join(save_path_train, output_filename_train) output_filepath_test = os.path.join(save_path_test, output_filename_test) f = open(output_filepath, 'w') f_train = open(output_filepath_train, 'w') # Column names: f.write(f"{test_models[0]} average score\tacc {test_models[0]}\t" f"{test_models[1]} average score\tacc {test_models[1]}\n") f_train.write("Epoch\t Loss\t Accuracy\n") score_list = [] idx_batch_list = [] cat_list = [] # Training for i in range(config.max_epoch): print(f"Epoch: {i + 1} out of {config.max_epoch}") train_loss, train_acc = train(sess, mtrain, data_train, gen_model=train_models, epoch=i, config=config) for k_item in range(len(train_loss)): f_train.write(f"{i} \t") f_train.write(f"{train_loss[k_item]} \t") f_train.write(f"{train_acc[k_item]} \n") for test_model in test_models: [acc, logits, scores, idx_batch] = inference(sess, mval, data_val, test_model, config=config) s = np.mean(scores[:, :, 0]) f.write("%f\t" % s) a = np.mean(acc) # Average Score: f.write("%f\t" % a) if i == config.max_epoch - 1: if test_model == "human": cat_list += list(np.ones(len(idx_batch))) elif test_model == args.name: cat_list += list(np.zeros(len(idx_batch))) score_list += scores[:, :, 0].tolist() idx_batch_list += idx_batch.tolist() f.write("\n") all_test = pd.DataFrame() all_test["idx_batch"] = idx_batch_list all_test["cat"] = cat_list all_test["score"] = score_list all_test.to_csv(output_filepath_test, sep="\t", header=True) f.close() f_train.close() if save_path: model_path = os.path.join(save_path, args.model_architecture) print("Saving model to %s." % model_path) saver.save(sess, model_path, global_step=i + 1) print("Model saved to %s." % model_path)
def sample_exp(self): mean = 1.0 return random.expovariate(mean)
def _fix(self): stack = [None] index = [] current = stack i = 0 ln = len(self._regexp) interp = True while i < ln: c = self._regexp[i] i += 1 if c == '(': current = [current] current[0].append(current) elif c == '|': p = current[0] ch = p[-1] if type(ch) is not tuple: ch = ("choice", [current]) p[-1] = ch else: ch[1].append(current) current = [p] elif c == ')': ch = current[0][-1] if type(ch) is tuple: ch[1].append(current) index.append(current) current = current[0] elif c == '[' or c == '{': current = [current] current[0].append(current) interp = False elif c == ']': current = current[0] choice = RandRegExp.choice_expand("".join(current.pop()[1:])) current.append(RandChoice(*list(choice))) interp = True elif c == '}': current = current[0] num = "".join(current.pop()[1:]) e = current.pop() if "," not in num: n = int(num) current.append([current] + [e] * n) else: num_min, num_max = num.split(",") if not num_min: num_min = "0" if num_max: n = RandNum(int(num_min), int(num_max)) else: n = RandNumExpo(self._lambda, base=int(num_min)) current.append(("repeat", n)) current.append(e) interp = True elif c == '\\': c = self._regexp[i] if c == "s": c = RandChoice(" ", "\t") elif c in "0123456789": c = ("cite", ord(c) - 0x30) current.append(c) i += 1 elif not interp: current.append(c) elif c == '+': e = current.pop() current.append([current] + [e] * (int(random.expovariate(self._lambda)) + 1)) elif c == '*': e = current.pop() current.append([current] + [e] * int(random.expovariate(self._lambda))) elif c == '?': if random.randint(0, 1): current.pop() elif c == '.': current.append(RandChoice(*[chr(x) for x in xrange(256)])) elif c == '$' or c == '^': pass else: current.append(c) return RandRegExp.stack_fix(stack[1:], index)
def _fix(self): return self.base + int(round(random.expovariate(self.lambd)))
def jobSizeGenerate(): return int(random.expovariate(0.025))
def updatePythonista(me): stufftodo = ['Pycon2012', 'string', 'stdlib'] # more later for stuff in stufftodo: time.sleep(random.expovariate(0.25)) me.__setattr__(stuff, True)
time.sleep(random.expovariate(0.25)) me.__setattr__(stuff, True) def updateClimber(me): stufftodo = ['RockSchool2012', 'Seneca', 'V2'] # more later for stuff in stufftodo: time.sleep(random.expovariate(0.5)) me.__setattr__(stuff, True) if __name__ == '__main__': mamma, baba = Parents(), Parents() me = SubhodeepMoitra(mamma, baba) lifeStart = time.time() gradSchoolStart = 21 # Life begins at gradschool ? # Don't want to live beyond 70 lifeEnd = lifeStart + 70 - random.expovariate(lambd=1.0) life = Pool(processes=4) life.apply_async(updateProteinArchitect, me) life.apply_async(updatePythonista, me) life.apply_async(updateTriathlete, me) life.apply_async(updateClimber, me) while time.time() + gradSchoolStart < lifeEnd: time.sleep(1) # Check anually if I am alive me.alive = False life.terminate() print("What a ride..!!")
def updateTriathlete(me): stufftodo = ['PittMarathon2012', 'IMTexas70_3', 'MS150'] # more later for stuff in stufftodo: time.sleep(random.expovariate(0.25)) me.__setattr__(stuff, True)
def duration_period(self): # Duration: A random value generated by an exponential distribution of parameter 1, rounded up. Author: Program_Creek * URL: https://www.programcreek.com/python/example/8889/random.expovariate exponential_distribution = random.expovariate(1) self.duration_time = math.ceil(exponential_distribution) #rounds up the value to the closest next intager, bigger than the initial value.
def updateProteinArchitect(me): stufftodo = ['MSLTI', 'Proteins2012', 'PyMol', 'BioPython'] # more later for stuff in stufftodo: time.sleep(random.expovariate(0.5)) me.__setattr__(stuff, True)
def fct_calcul_demand_entry_link_poisson_proc(param): return random.expovariate(param)
#terminated yield ram.put(cantMemoria) print('%s regresa %d de cantidad de RAM' % (mens, cantMemoria)) tTotal += (env.now - tRea) t.append(env.now - tRea) mRam = 100 tTotal = 0.0 t = [] numProces = 50 velocidad = 3.0 env = simpy.Environment() ram = simpy.Container(env, capacity=mRam, init=mRam) cpu = simpy.Resource(env, capacity=2) wait = simpy.Resource(env, capacity=2) nInt = 1 random.seed(2411) for i in range(numProces): tiempo = random.expovariate(1.0 / nInt) nIns = random.randint(1, 10) cantMemoria = random.randint(1, 10) env.process( computadora(env, tiempo, 'Proceso Numero %d' % i, ram, cantMemoria, nIns, velocidad)) env.run()
for repet in range(5000): #---- simulate a Kingman Coalescent with 1000 individuals, with a rate of coalescence = 0.007 * nb of couples # the code is extracted from the package Kingman. sample_size = 1000 random.seed() time = [0 for j in range(2 * sample_size)] parent = [0 for j in range(2 * sample_size)] time[0] = -1 parent[0] = -1 ancestors = list(range(1, sample_size + 1)) t = 0 next_node = sample_size + 1 for n in range(sample_size, 1, -1): t += random.expovariate(0.00765 * n * (n - 1)) for _ in range(2): child = random.choice(ancestors) parent[child] = next_node ancestors.remove(child) ancestors.append(next_node) time[next_node] = t next_node += 1 test = (parent, time) #---- Measure of statistics on the tree:tMRCA, Cherries, extBranch tMRCA = max(test[1]) cherries = 0 extBranch = 0
def exponential(rate): return random.expovariate(rate)
from ArrivalGenerator import ArrivalGenerator from Simulator import Simulator import random random.seed(1) sim = Simulator() t = random.expovariate(100) sim.insert_ev(ArrivalGenerator(t)) sim.do_all_events()
I = 1000 i = 1 #Tempo de atendimento ao chamado em horas atendimento = 0.5 #Matriz de prioridades prioridades = [[1, 1, 3, 4, 3, 2, 2, 1, 1, 1], [4, 1, 3, 4, 3, 2, 2, 1, 1, 1], [1, 1, 3, 3, 3, 2, 2, 1, 1, 4], [1, 2, 3, 4, 2, 2, 2, 1, 1, 1], [1, 1, 1, 2, 1, 2, 2, 1, 1, 1], [1, 1, 3, 4, 3, 2, 2, 1, 2, 4], [1, 1, 3, 4, 4, 2, 2, 1, 2, 1], [1, 1, 3, 4, 3, 2, 2, 1, 1, 1], [1, 1, 1, 2, 1, 2, 2, 1, 1, 1], [1, 1, 3, 4, 3, 3, 1, 1, 1, 1]] MP = np.array(prioridades) #Matriz com os tempos entre manutenções dos equipamentos de cada apartamento em horas mtbm = [] for m0 in range (0, 10): auxmtbm = [] for k0 in range (0, 10): auxmtbm.append(random.expovariate(1/(15*24))) mtbm.append(auxmtbm) AGE = np.array(mtbm) #Matriz para receber os tempos de chegada do chamado por apartamento tpch = [] for m1 in range (0, 10): auxtpch = [] for k1 in range (0, 10): auxtpch.append(random.expovariate(1)) tpch.append(auxtpch) chamados = np.array(tpch) #Inicialização do número de OS fechadas OS = 0
def run(self, N, lamb, mu): for i in range(N): a = Arrival(str(i)) activate(a, a.run(mu)) t = random.expovariate(lamb) yield hold, self, t
def arrival_rate(lmda): return random.expovariate(lmda)
def _draw(): return random.expovariate(1.0 / mean)
import time import random import pandas as pd random.seed() dict_df = dict() lymbd = 0.4 while lymbd <= 3: dict_df[str(lymbd)] = dict() for i in range(100): b = int(random.expovariate(lymbd)) try: dict_df[str(lymbd)][b] += 1 except KeyError: dict_df[str(lymbd)][b] = 1 lymbd += 0.3 df = pd.DataFrame(dict_df).sort_index() print(df)
speed = 3.0 #instrucciones/unidad de tiempo total_memory = 100 #Cantidad total (máxima) de memoria ram disponible. processes = 25 #Procesos totales a ejecutar times = [] #Lista que contendrá todos los tiempos interval = 1 env = simpy.Environment() cpu = simpy.Resource (env, capacity=2) ram = simpy.Container(env, init=total_memory, capacity=total_memory) wait = simpy.Resource(env, capacity=2) #Random random.seed(2048) for i in range(processes): time = random.expovariate(1.0/interval) instructions = random.randint(1,10) memory = random.randint(1,10) env.process(proceso(env, 'Proceso %d' % (i+1), time, ram, memory, instructions, speed)) env.run() #Tiempo total y desviación estándar sumatoria = 0 for i in times: sumatoria = sumatoria + i prom = sumatoria/processes sumdesv = 0 for x in times: sumdesv = sumdesv + (x-prom)**2
def draw(self): x = self.left y = self.top w = self.right - x h = self.bottom - y pygame.draw.rect(screen, self.colour, pygame.Rect(x, y, w, h), 2) # Create 10,000 random items, some of which should overlap with the screen. colours = [(0, 0, 255), (0, 255, 0), (0, 255, 255), (255, 0, 0), (255, 0, 255), (255, 255, 0), (255, 255, 255)] items = [] for i in range(10000): x = random.uniform(-5000, 5000) y = random.uniform(-5000, 5000) w = 5 + random.expovariate(1.0 / 50) h = 5 + random.expovariate(1.0 / 50) colour = random.choice(colours) items.append(Item(x, y, x + w, y + h, colour)) # Put them into a quad-tree. tree = QuadTree(items) WIDTH = 640 HEIGHT = 480 screen = pygame.display.set_mode((WIDTH, HEIGHT), pygame.DOUBLEBUF) quit = False while not quit: for event in pygame.event.get(): if event.type == pygame.QUIT: quit = True
def syn(A, max_t_len, aa_path=opath_grid_traj, r_path=r_path, x_path=x_path, l_path=l_path, sd_path=sd_path, sd_final_path=sd_final_path, nSyn=14650): """basic description detailed description Args: Returns: """ # 输入自适应网格A, trip分布矩阵R,转移矩阵X,中位数长度L,生成轨迹数量nSyn with open(aa_path) as f: AA = list() for line in f.readlines(): AA += eval(line) # 读trip分布矩阵 r_file = open(r_path, 'r') R = [] for row in r_file.readlines(): row = row.strip() R_ele = [] for ele in row.split(' '): R_ele.append(float(ele)) R.append(R_ele) # 读马尔科夫转移概率矩阵 x_file = open(x_path, 'r') X = [] for row in x_file.readlines(): row = row.strip() X_ele = [] count = 0 for ele in row.split(' '): X_ele.append(float(ele)) count += float(ele) X.append(X_ele) X_np = np.array(X) X_copy = X_np.copy() X_array = [X_copy] # 先对转移概率矩阵做乘方,迭代一定次数后基本不变 for i in range(max_t_len): X_array.append(X_array[i].dot(X_copy)) X_array_len = len(X_array) # 读轨迹长度矩阵 l_file = open(l_path, 'r') L = [] for row in l_file.readlines(): row = row.strip() for ele in row.split(' '): L.append(float(ele)) sd_file = open(sd_path, 'w') # 开始综合 # line 1: Initialize SD as empty set SD = [] p1 = ProgressBar(nSyn, '生成网格化的脱敏数据') for i in range(nSyn): p1.update(i) # Pick a sample S = (Cstart, Cend) from Rˆ index_array = [int(j) for j in range(A * A)] R = np.array(R) R /= np.sum(R) # 选trip 分布 index = np.random.choice(index_array, p=R.ravel()) start_point = int(index / A) # 轨迹起点 end_point = index - start_point * A # 轨迹终点 l_now = L[index] # 轨迹长度参数 r_length = random.expovariate(np.log(2) / l_now) # 指数分布取轨迹长 r_length = int(np.round(r_length)) # 整数化 if r_length < 2: r_length = 2 T = [] prev_point = start_point T.append(prev_point) # 加入起始点 # line 7-10 for j in range(1, r_length - 1): # 论文公式,X的r_length-j倍,寻找X_array下标,超过X_array长度则取最后一个 if r_length - 1 - j - 1 >= X_array_len: X_now = X_array[-1] else: X_now = X_array[r_length - 1 - j - 1] # Sample sample_prob = [] for k in range(A): sample_prob.append(X_now[k][end_point] * X_np[prev_point][k]) # 加入取样概率 sample_prob = np.array(sample_prob) if np.sum(sample_prob) == 0: continue sample_prob /= np.sum(sample_prob) # 归一化 now_point = np.random.choice([int(m) for m in range(A)], p=sample_prob.ravel()) # 抽样 prev_point = now_point # 更新上一个点 T.append(now_point) # 加入轨迹中 T.append(end_point) # 加入结束点 SD.append(T) # 加入轨迹 for sd in SD: sd_file.writelines(str(sd) + '\n') sd_file.close()
def ocurre_terremoto(self): l = 1/randint(4, 10) p = expovariate(l)
import math import random j=0 for i in range(1,6): j=i name='A'+str(j)+'+0.2.txt' print name sum=0 num=0 while sum<60: s=random.expovariate(0.2) sum=s+sum num=num+1 if(sum<60): #f = open('D'+%i'+0.2.txt', 'a') f=open(name,'a') f.write("Acar%s.Start(Seconds(%f));\n" % (num, sum)) f.close()
def poisson(): while True: yield random.expovariate(alpha)
def send(self, n, block, delay=None): if delay is None: delay = NETWORK_DELAY + random.expovariate(10.0/NETWORK_DELAY) yield self.env.timeout(delay) n.getBlock(block)
list(range(0, 3 * n, 3))) DTS = DT0[:, f.B, sort(f.A)] assert_equals(DTS, DT1) @new def test_bool8_small_descending(): DT0 = dt.Frame([True, False, False, None, True, True, None]) DT1 = dt.Frame([None, None, True, True, True, False, False]) DTS = DT0[:, :, sort(-f.C0)] assert DT0.stype == dt.bool8 assert isview(DTS) assert_equals(DTS, DT1) @pytest.mark.parametrize("n", [int(random.expovariate(0.00001)) + 100]) @new def test_bool8_large_descending(n): DT0 = dt.Frame([True, False, True, None, None, False, True] * n) DT1 = dt.Frame([None] * (2 * n) + [True] * (3 * n) + [False] * (2 * n)) DTS = DT0[:, :, sort(-f[0])] assert_equals(DTS, DT1) #------------------------------------------------------------------------------- # Int16 #------------------------------------------------------------------------------- @new def test_int16_small():