def reduce_geoms(self,infile): if self.notrans == True: self.normalize() else: self.trans2intensity() self.finish_spectrum() toprint = "Original spectrum sigma: "+str(self.sigma) toprint += "\nPrinting original spectra:" self.writeoutall(infile,toprint) sys.stdout.flush() self.origintensity = self.intensity[:] self.exc_orig = self.exc self.trans_orig = self.trans self.nsample = self.subset jobs = [] for i in range(self.ncores): pid = os.fork() if pid == 0: for j in range(self.jobs): self.pid = str(os.getpid())+"_"+str(j); random.seed() random.jumpahead(os.getpid()) d = self.SA() toprint = str(self.pid)+":\tFinal D-min = "+str(d) toprint += "\n\tReduced spectrum sigma: "+str(self.sigma) toprint += "\n\tPrinting reduced spectra:" self.writeoutall(infile,toprint) self.writegeoms(infile) sys.stdout.flush() os._exit(0) jobs.append(pid) for job in jobs: os.waitpid(job,0)
def setUp(self): random.jumpahead(int(time.time())) num = random.randint(1, 100000) self.input_table = self.input_table + "_" + str(num) self.output_table = self.output_table + "_" + str(num) #if (not os.getenv("HADOOP_CLASSPATH")): # os.putenv("HADOOP_CLASSPATH", self.getjars(":")) dir = os.path.dirname(os.path.realpath(__file__)) file = os.path.join(dir, 'splits') # code, out, err = cloudshell.run(self.username, self.password, 'table RowHashTestInput\n') # if out.find('no such table') == -1: # code, out, err = cloudshell.run(self.username, self.password, 'deletetable RowHashTestInput\n') # self.sleep(15) code, out, err = cloudshell.run( self.username, self.password, "createtable %s -sf %s\n" % (self.input_table, file)) #code, out, err = cloudshell.run('table RowHashTest\n') #if out.find('no such table') == -1: # code, out, err = cloudshell.run('user root\nsecret\ndeletetable RowHashTest\n') # self.sleep(15) code, out, err = cloudshell.run( self.username, self.password, "createtable %s -sf %s\n" % (self.output_table, file)) command = self.buildcommand( 'org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest', self.numrows(), self.keysizemin(), self.keysizemax(), self.minvaluesize(), self.maxvaluesize(), self.input_table, self.getInstance(), self.getZookeepers(), self.getUsername(), self.getPassword(), self.maxmaps) handle = runner.start(command, stdin=subprocess.PIPE) log.debug("Running: %r", command) out, err = handle.communicate("") Benchmark.setUp(self)
def worker(lst): import random n, train_ds, test_ds, options, state = lst random.setstate(state) random.jumpahead(n) preds, err = predict(train_ds, test_ds, options, True) return err
def get_random_trackset(date, length): '''Try to grab a random song for the date today randomizes by current day -- NOT SECURE, just simple ''' random.seed(0) random.jumpahead( int((date - datetime(2014, 12, 25)).total_seconds() / 86400)) tracksets = json.load(open(JSON_FILE, 'r')) while True: i = random.randint(0, len(tracksets) - 1) trackset = tracksets[i] musicfile = os.path.join(TEMP_DIR, trackset['basename']) if os.path.exists(musicfile): break musictext = '[Included Music]' + ' '.join( ['\n ' + t.strip() for t in trackset['nicename'].splitlines()]) info = dict( musicfile=musicfile, musictext=musictext.encode('utf8'), # this might be bad index=i, date=date, length=length) return info
def bootstrap(self, bootstrap_sample_size=1, seed=None): """ Use bootstrapping to calculate the variance of the difference of two EFROC studies. :param bootstrap_sample_size: Number of times to resample. Defaults to 1. :param seed: Seed to initially pass to the random number generator. Defaults to None. :return: an ND Array of the bootstrapped differences. """ difference_list = [] gen = random.Random() gen.seed(seed) for count in xrange(bootstrap_sample_size): difference_list.append(self._resample_and_compare(gen)) if seed is not None: random.jumpahead(seed) difference_array = np.array(difference_list) self.variance = np.var(difference_array) plt.figure() plt.hist(difference_array, np.ceil(np.sqrt(bootstrap_sample_size)), histtype='stepfilled') plt.title("Bootstrapped Estimation of $\delta A_{FE}$") plt.xlabel("$\delta A_{FE}$") plt.ylabel("Count") return difference_array
def setUp(self): random.jumpahead(int(time.time())) num = random.randint(1, 100000) self.input_table = self.input_table + "_" + str(num) self.output_table = self.output_table + "_" + str(num) #if (not os.getenv("HADOOP_CLASSPATH")): # os.putenv("HADOOP_CLASSPATH", self.getjars(":")) dir = os.path.dirname(os.path.realpath(__file__)) file = os.path.join( dir, 'splits' ) # code, out, err = cloudshell.run(self.username, self.password, 'table RowHashTestInput\n') # if out.find('no such table') == -1: # code, out, err = cloudshell.run(self.username, self.password, 'deletetable RowHashTestInput\n') # self.sleep(15) code, out, err = cloudshell.run(self.username, self.password, "createtable %s -sf %s\n" % (self.input_table, file)) #code, out, err = cloudshell.run('table RowHashTest\n') #if out.find('no such table') == -1: # code, out, err = cloudshell.run('user root\nsecret\ndeletetable RowHashTest\n') # self.sleep(15) code, out, err = cloudshell.run(self.username, self.password, "createtable %s -sf %s\n" % (self.output_table, file)) command = self.buildcommand('org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest', self.numrows(), self.keysizemin(), self.keysizemax(), self.minvaluesize(), self.maxvaluesize(), self.input_table, self.getInstance(), self.getZookeepers(), self.getUsername(), self.getPassword(), self.maxmaps) handle = runner.start(command, stdin=subprocess.PIPE) log.debug("Running: %r", command) out, err = handle.communicate("") Benchmark.setUp(self)
def reduce_geoms(self, infile): if self.notrans == True: self.normalize() else: self.trans2intensity() self.finish_spectrum() toprint = "Original spectrum sigma: " + str(self.sigma) toprint += "\nPrinting original spectra:" self.writeoutall(infile, toprint) sys.stdout.flush() self.origintensity = self.intensity[:] self.exc_orig = self.exc self.trans_orig = self.trans self.nsample = self.subset jobs = [] for i in range(self.ncores): pid = os.fork() if pid == 0: for j in range(self.jobs): self.pid = str(os.getpid()) + "_" + str(j) random.seed() random.jumpahead(os.getpid()) d = self.SA() toprint = str(self.pid) + ":\tFinal D-min = " + str(d) toprint += "\n\tReduced spectrum sigma: " + str(self.sigma) toprint += "\n\tPrinting reduced spectra:" self.writeoutall(infile, toprint) self.writegeoms(infile) sys.stdout.flush() os._exit(0) jobs.append(pid) for job in jobs: os.waitpid(job, 0)
def random_random(jobid): """Random number setting for parallel jobs - python - ROOT.gRandom - ROOT.RooRandom >>> jobid = ... >>> random_random ( jobid ) """ import time, random, ROOT, sys, os, socket ## random.seed() ## jhid = os.getpid(), os.getppid(), socket.getfqdn(), jobid, os.uname( ), time.time() jhid = hash(jhid) ## if sys.version_info.major < 3: random.jumpahead(jhid) else: njumps = jhid % 9967 for j in range(njumps): random.uniform(0, 1) ## sleep a bit (up to one second) time.sleep(random.uniform(0.1, 1.0)) ## now initialize ROOT ROOT.gRandom.SetSeed() ## ... and Roofit ROOT.RooRandom.randomGenerator().SetSeed() return random.getstate(), ROOT.gRandom.GetSeed( ), ROOT.RooRandom.randomGenerator().GetSeed()
def SSA_evolve(tau, tau_max, concentrations, CRS, random_seed, output_prefix= None, t_out= None): if (output_prefix != None and t_out == None): raise ValueError('Output file prefix specified but no output frequency given, please provide an output time frequency') elif (output_prefix == None and type(t_out) == float): raise ValueError('Output frequency provided but output file prefix was not provided, please provide a file prefix name') import sys import random from ctypes import c_int, c_double, POINTER constants, propensity_ints, reaction_arr, catalyst_arr = Init.convert_CRS_to_npArrays(CRS) concentrations_ptr, constants_ptr, propensity_ints_ptr, reaction_arr_ptr, catalyst_arr_ptr= Init.get_c_pointers(concentrations, constants, propensity_ints, reaction_arr, catalyst_arr) freq_counter = 0.0 random.seed(random_seed) while tau < tau_max: # Get seed r_seed = random.randint(0, sys.maxint) # Update concentrations in place using C function c_tau = SSA_update(c_double(tau), c_double(freq_counter),r_seed, c_int(1),c_int(1), c_int(len(CRS.molecule_list)), c_int(len(constants)), concentrations_ptr, constants_ptr, propensity_ints_ptr, reaction_arr_ptr, catalyst_arr_ptr ) # Update Time tau = c_tau # Update random seed random.jumpahead(tau-freq_counter) print tau # Output data Out.output_concentrations(concentrations, 'tutorial_data',time = freq_counter) freq_counter += t_out Out.tidy_timeseries(CRS.molecule_list, 'tutorial_data', delete_dat = True) return concentrations
def getSpawnsInRegion(self, rx, rz): # Generate each spawn point and store in regionspawns, otherwise we just get the cached spawnpoints. if not (rx, rz) in self.worldspawns: # Seed the random number gen with all 64 bits of region coordinate data by using both seed and jumpahead random.seed( self.seed ^ ((rx & 0xFFFF0000) | (rz & 0x0000FFFF)) ) random.jumpahead( ((rx & 0xFFFF0000) | (rz & 0x0000FFFF)) ) # First number should be number of points in region numspawns = self.density rangetop = self.rangetop rangebottom = self.rangebottom self.worldspawns[ (rx,rz) ] = {} currentregion = self.worldspawns[ (rx,rz) ] for ix in xrange(numspawns): blockx = random.randint( 0, CHUNK_WIDTH_IN_BLOCKS * REGION_WIDTH_IN_CHUNKS - 1 ) + rx * CHUNK_WIDTH_IN_BLOCKS * REGION_WIDTH_IN_CHUNKS blockz = random.randint( 0, CHUNK_WIDTH_IN_BLOCKS * REGION_WIDTH_IN_CHUNKS - 1 ) + rz * CHUNK_WIDTH_IN_BLOCKS * REGION_WIDTH_IN_CHUNKS blocky = random.randint( max(0, rangebottom), min(CHUNK_HEIGHT_IN_BLOCKS - 1, rangetop) ) currchunkx = blockx / CHUNK_WIDTH_IN_BLOCKS currchunkz = blockz / CHUNK_WIDTH_IN_BLOCKS # We store the points for each chunk indexed by chunk if not (currchunkx, currchunkz) in currentregion: currentregion[ (currchunkx, currchunkz) ] = [] # We make a landmark for each point lmtypeix = random.randint(0, len(self.landmarklist) - 1) lmtype = self.landmarklist[lmtypeix] #lm = lmtype(self.seed, self.terrainlayer, blockx, blockz, blocky) lm = copy.copy(lmtype) lm.setPos(blockx, blockz, blocky) # Lastly we append the landmark to the chunk currentregion[ (currchunkx, currchunkz) ].append( lm ) return self.worldspawns[ (rx,rz) ]
def run(self): foo = time.time() random.jumpahead(self.N) time.sleep(random.random()) for i in xrange(self.count): flag = Flag(0,i,0,foo) self.fc.enque(flag)
def run(self): foo = time.time() random.jumpahead(self.N) time.sleep(random.random()) for i in xrange(self.count): flag = Flag(0, i, 0, foo) self.fc.enque(flag)
def _make_salt(self, jump): salt_length = self._salt_length() seed = self._salt_seed() chars = string.ascii_uppercase + string.digits random.seed(seed) random.jumpahead(jump) return ''.join(random.choice(chars) for idx in range(salt_length))
def __attack(self, game): if (game.frame > self.nextAttackFrame): ## Spawn a new fruit at the monkey's location and let 'er fall! guava = Guava(game, self.rect) game.sprites.append(guava) ## Set the new time that the monkey should attack self.nextAttackFrame = game.frame + (1.0 / difficultyMul()) * random.randrange(15, 40) random.jumpahead(1)
def genWorkerID(): global _randomized if not _randomized: random.jumpahead(os.getpid()) _randomized = True return "worker-%02d-%02d-%02d" % ( random.randint(0,99), random.randint(0,99), random.randint(0,99))
def generateNextLine( self ): random.jumpahead(random.randint(100,1000)) line = [] line.append( '|' ) # This will be true for every row after the finish # line is generated. if self.rows > self.rowtarget: for i in range(0, 5): line.append(' ') line.append('|') return line # This will only be true when the target rows # have been hit if self.rows == self.rowtarget: for i in range(0, 5): line.append('=') line.append('|') return line # 1% chance to generate aroadblock if random.randint(0, 100) > 99: for i in range(0, 5): line.append( 'X' ) line.append('|') # Needs at least one open space next to another one x = random.randint(0, 5) while self.state[2][x] != ' ': x = random.randint(0, 5) line[x] = ' ' return line # Generate a normal line with 14% chance of an obstruction for i in range(0, 5): if random.randint(0, 100) > 86: type = random.randint(0, 5) if type == 0: line.append( 'r' ) elif type == 1: line.append( 'c' ) elif type == 2: line.append( 'T' ) elif type == 3: line.append( 'P' ) elif type == 4: line.append( '~' ) else: line.append( 'Z' ) else: line.append( ' ' ) line.append( '|' ) return line
def do_test(n, do_test_1=do_test_1): random.jumpahead(n*111222333444555666777L) N = 1 TAIL = 'lo' objects = [None, -1, 0, 1, 123455+N, -99-N, 'hel'+TAIL, [1,2], {(5,): do_test}, 5.43+0.01*N, xrange(5)] do_test_1(objects) for o in objects[4:]: #print '%5d -> %r' % (sys.getrefcount(o), o) assert sys.getrefcount(o) == 4
def calcpi(n): random.seed(1) #n=decimal.Decimal(n) j=0 random.jumpahead(n) for i in range(n): x,y=random.uniform(0,1),random.random() if x*x+y*y<=1.0: j+=1 return n,4.0*float(j)/float(n)
def randomizeFoodPos(self): e = 0 while True: newFoodX = random.randint(1, self.blocks - 2) random.jumpahead(random.randint(1, self.blocks - 2)) newFoodY = random.randint(1, self.blocks - 2) e += 1 print("Randomizing " + str(e)) if self.checkCollision(newFoodX, newFoodY) == 0 or e > 500: break self.food.setPos(newFoodX, newFoodY)
def setUp(self): random.jumpahead(int(time.time())) num = random.randint(1, 100000) #self.tablename = self.tablename + "-" + str(num) # Find which hadoop version # code, out, err = cloudshell.run(self.username, self.password, 'table %s\n' % self.tablename) #if out.find('no such table') == -1: # log.debug('Deleting table %s' % self.tablename) # code, out, err = cloudshell.run(self.username, self.password, 'deletetable %s\n' % self.tablename) # self.sleep(10) Benchmark.setUp(self)
def __init__(self, name): self.name = name self.instance = Client.instance Client.instance += 1 self.sock = socket.socket() self.rx_msg_q = Queue.Queue() self.receive_thread = Thread( target=self.receive_loop ) self.receive_thread.daemon = True self.action_thread = Thread( target=self.action_loop ) self.action_thread.daemon = True self.print_msgs = True self.complete = False random.jumpahead( self.instance+5000 )
def setUp(self): random.jumpahead(int(time.time())) num = random.randint(1, 100000) self.tablename = self.tablename + "_" + str(num) # Need to generate a splits file for each speed #code, out, err = cloudshell.run(self.username, self.password, 'table %s\n' % self.tablename) #if out.find('no such table') == -1: # log.debug('Deleting table %s' % self.tablename) # code, out, err = cloudshell.run('user %s\n%s\ndeletetable %s\n' % (self.user, # self.password, # self.tablename)) # self.sleep(5) Benchmark.setUp(self)
def __init__(self, g, pos): def hit_handler(g, s, a): pass Sprite.__init__(self, g.images['monkey_0_1'], pos) self.groups = g.string2groups('enemy') self.agroups = g.string2groups('player') self.anim_frame = random.randrange(1, 5) self.hit = hit_handler ## How often the monkey should attack. Higher numbers = more infrequent self.nextAttackFrame = random.randrange(0, 10) random.jumpahead(1)
def __init__(self, name): self.name = name self.instance = Client.instance Client.instance += 1 self.sock = socket.socket() self.rx_msg_q = Queue.Queue() self.receive_thread = Thread(target=self.receive_loop) self.receive_thread.daemon = True self.action_thread = Thread(target=self.action_loop) self.action_thread.daemon = True self.print_msgs = True self.complete = False random.jumpahead(self.instance + 5000)
def fuzz(fuzz_input=None, seed_val=None, jump_idx=None, ratio_min=0.0, ratio_max=1.0, range_list=None, fuzzable_chars=None): ''' Twiddle bytes of input and return output ''' logging.debug('fuzz params: %d %d %f %f %s', seed_val, jump_idx, ratio_min, ratio_max, range_list) if seed_val is not None: random.seed(seed_val) if jump_idx is not None: random.jumpahead(jump_idx) ratio = random.uniform(ratio_min, ratio_max) inputlen = len(fuzz_input) chunksize = 2**19 # 512k logger.debug('ratio=%f len=%d', ratio, inputlen) if range_list: chunksize = inputlen for chunk_start in xrange(0, inputlen, chunksize): chunk_end = min(chunk_start + chunksize, inputlen) chunk_len = chunk_end - chunk_start if range_list: chooselist = [ x for x in xrange(inputlen) if _fuzzable(x, range_list) ] else: chooselist = xrange(chunk_len) if fuzzable_chars is not None: chooselist = [ x for x in chooselist if fuzz_input[x + chunk_start] in fuzzable_chars ] nbytes_to_fuzz = int(round(ratio * len(chooselist))) bytes_to_fuzz = random.sample(chooselist, nbytes_to_fuzz) for idx in bytes_to_fuzz: offset = chunk_start + idx fuzz_input[offset] = random.getrandbits(8) return fuzz_input
def process_in_fork(function, display, processes, timeout): """make rules, in background processes""" children = [] for i in range(processes): print "handling child %s" %i child = fork_safely() #without this jump all the processes will give the same answer random.jumpahead(11) if not child: #in child try: print "in child %s, to run %s" % (i, function) #os._exit(0) function() print "in child %s, after %s" % (i, function) except: print "in child %s, with exception" % (i) #exception will kill X window (and thus main process), #so catch everything traceback.print_exc() sys.stderr.flush() print "in child %s, wleaving" % (i,) os._exit(0) children.append(child) # now, twiddle thumbs timeout += time.time() results = [] while children and time.time() < timeout: if display is not None: display() pid, status = os.waitpid(-1, os.WNOHANG) if pid in children: children.remove(pid) print "got pid %s, status %s" % (pid, status) if not status: results.append(pid) #final chance -- no display/delay for i in range(len(children)): pid, status = os.waitpid(-1, os.WNOHANG) children.remove(pid) print "got (late) pid %s, status %s" % (pid, status) if not status: results.append(pid) if children: print "getting violent with children %s" % children for pid in children: #kill slowcoaches, if any os.kill(pid, 9) return results
def random_source(output_signal, clock, reset, seed=None, edge_sensitivity='posedge'): '''Generate random signals on each clock edge - the specific clock edge to use is given by ``edge_sensitivity`` and can be either `posedge` for positive edge or `negedge` for negative edge. The seed to be used can be specified by ``seed``. Interfaces are supported and the output should be deterministic if seed is specified. ''' if seed is not None: random.seed(seed) else: # Make sure we've moved the random state away from other calls to # this function. random.jumpahead(0) if isinstance(output_signal, myhdl._Signal._Signal): return _signal_random_source(output_signal, clock, reset, edge_sensitivity) else: random_state = random.getstate() attribute_names = sorted(output_signal.__dict__) sources = [] for attribute_name in attribute_names: attribute = getattr(output_signal, attribute_name) if isinstance(attribute, myhdl._Signal._Signal): # We only want to generate on the signals. random.setstate(random_state) random.jumpahead(0) random_state = random.getstate() # We've already set the random state to what we want, so # request that _signal_random_source leave it alone with # deterministic_output=True sources.append( _signal_random_source(attribute, clock, reset, edge_sensitivity=edge_sensitivity)) return sources
def test_jumpahead(): """jumpahead will change the pseudo-number generator's internal state """ random.seed() state1 = random.getstate() random.jumpahead(20) state2 = random.getstate() rep = 0 for ind in range(len(state1)): elem1 = state1[ind] elem2 = state2[ind] if (elem1 == elem2): rep += 1 if (rep > len(state1) / 2): raise "state1 and state2 can't be the same"
def my_reduce(self, key, values): random.jumpahead(key[1]) best = None for value in values: wyn = wynik_zachlanny(value) if best == None: best = value bestval = sum([x[-1] for x in wyn]) else: actval = sum([x[-1] for x in wyn]) if actval < bestval: bestval = actval best = value yield key[0], best
def _gen_random(size, frag_size, seed, jumps): """ Random generator. :param size: Size :param frag_size: Fragment size :param seed: Random seed :param jumps: Number of jumps :return: a fragment of elements """ import random random.seed(seed) random.jumpahead(jumps) return [[random.random() for _ in range(size)] for _ in range(frag_size)]
def _gen_normal(size, frag_size, seed, jumps): """ Normal generator. :param size: Size :param frag_size: Fragment size :param seed: Random seed :param jumps: Number of jumps :return: a fragment of elements """ import random random.seed(seed) random.jumpahead(jumps) return [[random.gauss(mu=0.0, sigma=1.0) for _ in range(size)] for _ in range(frag_size)]
def get_generator(gen_conf, size=12345): try: key_parts = gen_conf.split(':') random_cls = PRNGS_MAP.get(key_parts[0], randomMT) seed = int(key_parts[1]) seq = int(key_parts[2]) random_cls.seed(seed) random_cls.jumpahead(seq) ret = random_cls except Exception as e: randomMT.seed(hash(size)) randomMT.jumpahead(size) ret = randomMT gen_conf = 'default:' + str(hash(size)) + ':' + str(size) return ret, gen_conf
def get_generator(gen_key, size=12345): try: key_parts = gen_key.split(':') random_cls = PRNGS_MAP.get(key_parts[0], randomMT) seed = int(key_parts[1]) seq = int(key_parts[2]) random_cls.seed(seed) random_cls.jumpahead(seq) ret = random_cls except Exception as e: randomMT.seed(hash(size)) randomMT.jumpahead(size) ret = randomMT gen_key = 'default:' + str(hash(size)) + ':' + str(size) return ret, gen_key
def __import_request_file(self, filename): random.jumpahead(int(time.strftime('%S%M%H'))) self.__ref_group_str = '' self.__request = None chunk = {} for line in fileinput.input(filename): tmp = line.replace('\r', '') tmp = tmp.replace('\n', '') line_type = tmp[3:7] line_data = tmp[7:] if line_type in cLDTImporter._chunk_starters: # process any previous data if len(chunk) != 0: # get handler try: handle_chunk = cLDTImporter.__chunk_handler[chunk_type] except KeyError: fileinput.close() if self.__request is not None: self.__request['request_status'] = 'partial' self.__request['is_pending'] = 'true' self.__request.save_payload() _log.Log( gmLog.lErr, 'kein Handler für Satztyp [%s] verfügbar' % chunk_type) return False # handle chunk if not handle_chunk(self, chunk): if self.__request is not None: self.__request['request_status'] = 'partial' self.__request['is_pending'] = 'true' self.__request.save_payload() fileinput.close() _log.Log(gmLog.lErr, 'cannot handle [%s] chunk' % chunk_type) return False # start new chunk chunk = {} chunk_type = line_type # FIXME: max line count if not chunk.has_key(line_type): chunk[line_type] = [] chunk[line_type].append(line_data) fileinput.close() return True
def generate(params, k, passlength=16): s = '' ascii_min = 32 # space ascii_max = 126 # backtick? something visible before ^H h = params['hostname'] u = params['user'] p = params['pass'] # salt pvt key with service hostname + username k['lines'].append(h) k['lines'].append(u) salted = '' for line in k['lines']: salted += line # hash the salted pvt key # keyspace for the future seed() is hella small now manager = hashlib.sha1() manager.update(salted) hashed = manager.hexdigest() # and now the passphrase index = 0 passphrase = params['pass'] hlen = hashed.__len__() plen = passphrase.__len__() if hlen > plen: passphrase = (passphrase*((hlen/plen+1)))[:plen] print passphrase elif hlen < plen: hashed = (hashed*((plen/hlen+1)))[:hlen] print hashed hashed = [ord(a) ^ ord(b) for a,b in zip(hashed,passphrase)] # probably not remembering this piece off the top of my head for i in range(0,passlength): index = i % hashed.__len__() random.seed(hashed[index]) random.jumpahead((i+1)*(i+2)) val = random.randint(ascii_min, ascii_max) s += chr(val) print '--->[' + s + ']<---'
def getregioncorner(self, coord ): """ Get the corner height of a region (well, four neighboring regions, anyway.) coord - the coordinates of the region corner, region-sized (512 blocks wide) """ assert( type(coord[0]) == int ) assert( type(coord[1]) == int ) assert( type(self.seed) == int ) regionsouth = coord[0] regionwest = coord[1] random.seed( self.seed ^ ((regionsouth & 0xFFFF0000) | (regionwest & 0x0000FFFF)) ) random.jumpahead( ((regionwest & 0xFFFF0000) | (regionsouth & 0x0000FFFF)) ) corner = random.random() return corner
def editChunk(self, cornerblockx, cornerblockz, terrainchunk): """ Edit the input chunk and add ores. """ if self.stamp == None: self.stamp = [[[MAT_TRANSPARENT for vert in xrange(self.sizey)] for col in xrange(self.sizez)] for row in xrange(self.sizex)] # Add shit to the stamp here! random.seed( self.seed ^ (( (self.x << 16) & 0xFFFF0000) | ( self.z & 0x0000FFFF)) ) random.jumpahead( self.y ) for row in self.stamp: for col in row: for ix in xrange(len(col)) : if random.random() < self.density: col[ix] = self.ore offsetx = self.x - cornerblockx offsetz = self.z - cornerblockz offsety = self.y self.stampToChunk( self.stamp, terrainchunk.blocks, offsetx, offsetz, offsety )
def __import_request_file(self, filename): random.jumpahead(int(time.strftime('%S%M%H'))) self.__ref_group_str = '' self.__request = None chunk = {} for line in fileinput.input(filename): tmp = line.replace('\r','') tmp = tmp.replace('\n','') line_type = tmp[3:7] line_data = tmp[7:] if line_type in cLDTImporter._chunk_starters: # process any previous data if len(chunk) != 0: # get handler try: handle_chunk = cLDTImporter.__chunk_handler[chunk_type] except KeyError: fileinput.close() if self.__request is not None: self.__request['request_status'] = 'partial' self.__request['is_pending'] = 'true' self.__request.save_payload() _log.Log(gmLog.lErr, 'kein Handler für Satztyp [%s] verfügbar' % chunk_type) return False # handle chunk if not handle_chunk(self, chunk): if self.__request is not None: self.__request['request_status'] = 'partial' self.__request['is_pending'] = 'true' self.__request.save_payload() fileinput.close() _log.Log(gmLog.lErr, 'cannot handle [%s] chunk' % chunk_type) return False # start new chunk chunk = {} chunk_type = line_type # FIXME: max line count if not chunk.has_key(line_type): chunk[line_type] = [] chunk[line_type].append(line_data) fileinput.close() return True
def testIsolineLabelBackground(self): (latmin, latmax, lonmin, lonmax) = (-90, 90, -180, 180) dataset = cdms2.open(os.path.join(vcs.sample_data, "tas_cru_1979.nc")) data = dataset("tas", time=slice(0, 1), latitude=(latmin, latmax), longitude=(lonmin, lonmax, 'co'), squeeze=1) dataset.close() self.x.backgroundcolor = [100, 105, 105] isoline = self.x.createisoline() isoline.label = "y" texts = [] colors = [] bcolors = [] bopacities = [] for i in range(10): text = self.x.createtext() random.seed(i * 200) text.color = random.randint(1, 255) text.height = 12 random.jumpahead(i * 100) colors.append(random.randint(1, 255)) random.jumpahead(i * 20) bcolors.append(random.randint(1, 255)) bopacities.append(random.randint(0, 100)) if i % 2 == 0: texts.append(text.name) else: texts.append(text) isoline.text = texts isoline.labelbackgroundcolors = bcolors isoline.labelbackgroundopacities = bopacities isoline.labelskipdistance = 15.0 # First test using isoline.text[...].color self.x.plot(data, isoline, bg=self.bg) fnm = "test_vcs_isoline_labels_background.png" self.checkImage(fnm)
def getregioncorner(self, coord): """ Get the corner height of a region (well, four neighboring regions, anyway.) coord - the coordinates of the region corner, region-sized (512 blocks wide) """ assert (type(coord[0]) == int) assert (type(coord[1]) == int) assert (type(self.seed) == int) regionsouth = coord[0] regionwest = coord[1] random.seed(self.seed ^ ((regionsouth & 0xFFFF0000) | (regionwest & 0x0000FFFF))) random.jumpahead( ((regionwest & 0xFFFF0000) | (regionsouth & 0x0000FFFF))) corner = random.random() return corner
def __init__(self, place=[0, 0], size=[2, 2]): self.X = place[0] self.Y = place[1] self.size = size # because threading random.jumpahead(1252157) self.foodamount = random.random() self.size[0] = self.size[0] + int(self.foodamount * 6) self.size[1] = self.size[1] + int(self.foodamount * 6) self.RED = (255, 0, 0) self.BLACK = (0, 0, 0) self.GREEN = (0, 255, 0) self.BLUE = (0, 0, 255) colors = [] colors.append(self.RED) colors.append(self.BLACK) colors.append(self.GREEN) colors.append(self.BLUE) self.colornumber = random.randint(0, 3) self.color = colors[self.colornumber]
def getRandomVideo(): # Open JSON stream_file = open(stream,'r') stream_data = json.load(stream_file) rtmp_url = stream_data["provider"]["streamer"]["url"] swfUrl = stream_data["provider"]["streamer"]["swfUrl"] selection = [] for source in stream_data["provider"]["streamer"]["streams"]["source"]: # select only enabled videos if source["enabled"] == 'true': selection.append(source) # return random video random.jumpahead(1) select = choice(selection) uri = rtmp_url + ' swfURL=' + swfUrl + ' playpath=' + select["playpath"] + ' live=true buffer=20000' print uri return uri
def _checkDependencies(): # hack to avoid name collision in concurrently-running mock-based tests random.jumpahead(os.getpid()) dev = _Bridge() try: dev.addDevice() except: raise SkipTest("'brctl' has failed. Do you have bridge-utils " "installed?") null = open("/dev/null", "a") try: check_call([EXT_TC, 'qdisc', 'add', 'dev', dev.devName, 'ingress']) except: raise SkipTest("'tc' has failed. Do you have Traffic Control kernel " "modules installed?") finally: null.close() dev.delDevice()
def fuzz(fuzz_input=None, seed_val=None, jump_idx=None, ratio_min=0.0, ratio_max=1.0, range_list=None, fuzzable_chars=None): ''' Twiddle bytes of input and return output ''' logging.debug('fuzz params: %d %d %f %f %s', seed_val, jump_idx, ratio_min, ratio_max, range_list) if seed_val is not None: random.seed(seed_val) if jump_idx is not None: random.jumpahead(jump_idx) ratio = random.uniform(ratio_min, ratio_max) inputlen = len(fuzz_input) chunksize = 2 ** 19 # 512k logger.debug('ratio=%f len=%d', ratio, inputlen) if range_list: chunksize = inputlen for chunk_start in xrange(0, inputlen, chunksize): chunk_end = min(chunk_start + chunksize, inputlen) chunk_len = chunk_end - chunk_start if range_list: chooselist = [x for x in xrange(inputlen) if _fuzzable(x, range_list)] else: chooselist = xrange(chunk_len) if fuzzable_chars is not None: chooselist = [x for x in chooselist if fuzz_input[x + chunk_start] in fuzzable_chars] nbytes_to_fuzz = int(round(ratio * len(chooselist))) bytes_to_fuzz = random.sample(chooselist, nbytes_to_fuzz) for idx in bytes_to_fuzz: offset = chunk_start + idx fuzz_input[offset] = random.getrandbits(8) return fuzz_input
def editChunk(self, cornerblockx, cornerblockz, terrainchunk): """ Edit the input chunk and add ores. """ if self.stamp == None: self.stamp = [[[MAT_TRANSPARENT for vert in xrange(self.sizey)] for col in xrange(self.sizez)] for row in xrange(self.sizex)] # Add shit to the stamp here! random.seed(self.seed ^ (( (self.x << 16) & 0xFFFF0000) | (self.z & 0x0000FFFF))) random.jumpahead(self.y) for row in self.stamp: for col in row: for ix in xrange(len(col)): if random.random() < self.density: col[ix] = self.ore offsetx = self.x - cornerblockx offsetz = self.z - cornerblockz offsety = self.y self.stampToChunk(self.stamp, terrainchunk.blocks, offsetx, offsetz, offsety)
def get_random_trackset(date, length): '''Try to grab a random song for the date today randomizes by current day -- NOT SECURE, just simple ''' random.seed(0) random.jumpahead(int((date-datetime(2014,12,25)).total_seconds()/86400)) tracksets = json.load(open(JSON_FILE,'r')) while True: i = random.randint(0, len(tracksets)-1) trackset = tracksets[i] musicfile = os.path.join(TEMP_DIR, trackset['basename']) if os.path.exists(musicfile): break musictext = '[Included Music]' + ' '.join(['\n '+t.strip() for t in trackset['nicename'].splitlines()]) info = dict(musicfile=musicfile, musictext=musictext.encode('utf8'), # this might be bad index=i, date=date, length=length) return info
def __init__(self, authorization, starturi, resulturi): self.authorization = authorization # CCXML authentication token # create unique string to use as token # in theory, we should probably check that it's not already in use random.seed() random.jumpahead(100) self.token = sha.new(str(random.random())).hexdigest() # URI of script that we use to start session self.startURI = starturi # URI of script in current use self.currentURI = self.startURI # Session ID of remote session self.sessionID = None # URI where CCXML should send responses self.resultURI = resulturi
def getSpawnsInRegion(self, rx, rz): # Generate each spawn point and store in regionspawns, otherwise we just get the cached spawnpoints. if not (rx, rz) in self.worldspawns: # Seed the random number gen with all 64 bits of region coordinate data by using both seed and jumpahead random.seed(self.seed ^ ((rx & 0xFFFF0000) | (rz & 0x0000FFFF))) random.jumpahead(((rx & 0xFFFF0000) | (rz & 0x0000FFFF))) # First number should be number of points in region numspawns = self.density rangetop = self.rangetop rangebottom = self.rangebottom self.worldspawns[(rx, rz)] = {} currentregion = self.worldspawns[(rx, rz)] for ix in xrange(numspawns): blockx = random.randint( 0, CHUNK_WIDTH_IN_BLOCKS * REGION_WIDTH_IN_CHUNKS - 1) + rx * CHUNK_WIDTH_IN_BLOCKS * REGION_WIDTH_IN_CHUNKS blockz = random.randint( 0, CHUNK_WIDTH_IN_BLOCKS * REGION_WIDTH_IN_CHUNKS - 1) + rz * CHUNK_WIDTH_IN_BLOCKS * REGION_WIDTH_IN_CHUNKS blocky = random.randint( max(0, rangebottom), min(CHUNK_HEIGHT_IN_BLOCKS - 1, rangetop)) currchunkx = blockx / CHUNK_WIDTH_IN_BLOCKS currchunkz = blockz / CHUNK_WIDTH_IN_BLOCKS # We store the points for each chunk indexed by chunk if not (currchunkx, currchunkz) in currentregion: currentregion[(currchunkx, currchunkz)] = [] # We make a landmark for each point lmtypeix = random.randint(0, len(self.landmarklist) - 1) lmtype = self.landmarklist[lmtypeix] #lm = lmtype(self.seed, self.terrainlayer, blockx, blockz, blocky) lm = copy.copy(lmtype) lm.setPos(blockx, blockz, blocky) # Lastly we append the landmark to the chunk currentregion[(currchunkx, currchunkz)].append(lm) return self.worldspawns[(rx, rz)]
def main(): """ NAME scalc.py DESCRIPTION calculates Sb from VGP Long,VGP Lat,Directional kappa,Site latitude data SYNTAX scalc -h [command line options] [< standard input] INPUT takes space delimited files with PLong, PLat,[kappa, N_site, slat] OPTIONS -h prints help message and quits -f FILE: specify input file -c cutoff: specify VGP colatitude cutoff value -k cutoff: specify kappa cutoff -v : use the VanDammme criterion -a: use antipodes of reverse data: default is to use only normal -C: use all data without regard to polarity -b: do a bootstrap for confidence -p: do relative to principle axis NOTES if kappa, N_site, lat supplied, will consider within site scatter OUTPUT N Sb Sb_lower Sb_upper Co-lat. Cutoff """ coord, kappa, cutoff = "0", 0, 90. nb, anti, boot = 1000, 0, 0 all = 0 n = 0 v = 0 spin = 1 coord_key = 'tilt_correction' if '-h' in sys.argv: print main.__doc__ sys.exit() if '-f' in sys.argv: ind = sys.argv.index("-f") in_file = sys.argv[ind + 1] f = open(in_file, 'rU') lines = f.readlines() else: lines = sys.stdin.readlines() if '-c' in sys.argv: ind = sys.argv.index('-c') cutoff = float(sys.argv[ind + 1]) if '-k' in sys.argv: ind = sys.argv.index('-k') kappa = float(sys.argv[ind + 1]) if '-n' in sys.argv: ind = sys.argv.index('-n') n = int(sys.argv[ind + 1]) if '-a' in sys.argv: anti = 1 if '-C' in sys.argv: cutoff = 180. # no cutoff if '-b' in sys.argv: boot = 1 if '-v' in sys.argv: v = 1 if '-p' in sys.argv: spin = 0 # # # find desired vgp lat,lon, kappa,N_site data: # A, Vgps, slats, Pvgps = 180., [], [], [] for line in lines: if '\t' in line: rec = line.replace('\n', '').split( '\t') # split each line on space to get records else: rec = line.replace( '\n', '').split() # split each line on space to get records vgp = {} vgp['vgp_lon'], vgp['vgp_lat'] = rec[0], rec[1] Pvgps.append([float(rec[0]), float(rec[1])]) if anti == 1: if float(vgp['vgp_lat']) < 0: vgp['vgp_lat'] = '%7.1f' % (-1 * float(vgp['vgp_lat'])) vgp['vgp_lon'] = '%7.1f' % (float(vgp['vgp_lon']) - 180.) if len(rec) == 5: vgp['average_k'], vgp['average_nn'], vgp['average_lat'] = rec[ 2], rec[3], rec[4] slats.append(float(rec[4])) else: vgp['average_k'], vgp['average_nn'], vgp[ 'average_lat'] = "0", "0", "0" if 90. - (float(vgp['vgp_lat'])) <= cutoff and float( vgp['average_k']) >= kappa and int(vgp['average_nn']) >= n: Vgps.append(vgp) if spin == 0: # do transformation to pole ppars = pmag.doprinc(Pvgps) for vgp in Vgps: vlon, vlat = pmag.dotilt(float(vgp['vgp_lon']), float(vgp['vgp_lat']), ppars['dec'] - 180., 90. - ppars['inc']) vgp['vgp_lon'] = vlon vgp['vgp_lat'] = vlat vgp['average_k'] = "0" S_B = pmag.get_Sb(Vgps) A = cutoff if v == 1: thetamax, A = 181., 180. vVgps, cnt = [], 0 for vgp in Vgps: vVgps.append(vgp) # make a copy of Vgps while thetamax > A: thetas = [] A = 1.8 * S_B + 5 cnt += 1 for vgp in vVgps: thetas.append(90. - (float(vgp['vgp_lat']))) thetas.sort() thetamax = thetas[-1] if thetamax < A: break nVgps = [] for vgp in vVgps: if 90. - (float(vgp['vgp_lat'])) < thetamax: nVgps.append(vgp) vVgps = [] for vgp in nVgps: vVgps.append(vgp) S_B = pmag.get_Sb(vVgps) Vgps = [] for vgp in vVgps: Vgps.append(vgp) # make a new Vgp list SBs, Ns = [], [] if boot == 1: print 'please be patient... bootstrapping' for i in range(nb): # now do bootstrap BVgps = [] for k in range(len(Vgps)): ind = random.randint(0, len(Vgps) - 1) random.jumpahead(int(ind * 1000)) BVgps.append(Vgps[ind]) SBs.append(pmag.get_Sb(BVgps)) SBs.sort() low = int(.025 * nb) high = int(.975 * nb) print len(Vgps), '%7.1f %7.1f %7.1f %7.1f ' % (S_B, SBs[low], SBs[high], A) else: print len(Vgps), '%7.1f %7.1f ' % (S_B, A) if len(slats) > 2: stats = pmag.gausspars(slats) print 'mean lat = ', '%7.1f' % (stats[0])
#!/usr/bin/env python import matplotlib matplotlib.use("TkAgg") import random, pylab, sys N, min, max = 500, 10, 20 bins = (max - min) * 2 Nums = [] for i in range(N): Nums.append(random.uniform(min, max)) random.jumpahead(10) pylab.hist(Nums, bins=bins, facecolor='orange') pylab.title('Uniform distribution') pylab.show()
def advanceRandomState(numadvances): random.jumpahead(numadvances)
def _fuzz(self): """Twiddle bits of input_file_path and write output to output_file_path""" # rng_seed is the based on the input file seed(self.rng_seed) jumpahead(self.iteration) # select a ratio of bytes to fuzz self.range = self.sf.rangefinder.next_item() self.ratio = uniform(self.range.min, self.range.max) chooselist = [] # only add bytes in range to the bytes we can fuzz range_list = self.options.get('range_list') if range_list: max_index = len(self.input) - 1 for (start, end) in range_list: if start > end: logger.warning( 'Skipping range_list item %s-%s (start exceeds end)', start, end) continue elif start > max_index: # we can't go past the end of the file logger.debug( 'Skipping range_list item %s-%s (start exceeds max)', start, end) continue # figure out where the actual end of this range is last = min(end, max_index) if last != end: logger.debug( 'Reset range end from to %s to %s (file length exceeded)', end, last) # seems legit...proceed chooselist.extend(xrange(start, last + 1)) else: # they're all available to fuzz chooselist.extend(xrange(len(self.input))) # build the list of bits we're allowed to flip # since chooselist is the list of bytes we can fuzz # protobitlist will be the base position of the first # bit we are allowed to fuzz in each of those bytes protobitlist = [x * 8 for x in chooselist] bitlist = [] for b in protobitlist: for i in xrange(0, 8): # here we fill in the actual bits we are # allowed to fuzz # this will add b, b+1, b+2...b+7 bitlist.append(b + i) # calculate num of bits to flip bit_flip_count = int(round(self.ratio * len(bitlist))) indices_to_flip = sample(bitlist, bit_flip_count) # create mask to xor with input mask = bytearray(len(self.input)) for i in indices_to_flip: (byte_index, bit_index) = divmod(i, 8) mask[byte_index] = mask[byte_index] | (1 << bit_index) # apply the mask to the input for idx, val in enumerate(self.input): self.input[idx] = mask[idx] ^ val self.fuzzed = self.input