def main(prng=None, display=False): if prng is None: prng = Random() prng.seed(time()) items = [(80, 1), (0, 0), (80, 10)] problem = inspyred.benchmarks.Knapsack(100, items, duplicates=False) ea = inspyred.ec.EvolutionaryComputation(prng) ea.selector = inspyred.ec.selectors.tournament_selection ea.variator = [inspyred.ec.variators.uniform_crossover, inspyred.ec.variators.gaussian_mutation] ea.replacer = inspyred.ec.replacers.steady_state_replacement ea.terminator = inspyred.ec.terminators.evaluation_termination final_pop = ea.evolve(generator=problem.generator, evaluator=problem.evaluator, bounder=problem.bounder, maximize=problem.maximize, pop_size=100, max_evaluations=2500, tournament_size=5, num_selected=2) if display: best = max(ea.population) print('Best Solution: {0}: {1}'.format(str(best.candidate), best.fitness)) return ea
def __init__(self): super(HTTPDigestAuth, self).__init__() self.random = SystemRandom() try: self.random.random() except NotImplementedError: self.random = Random()
def test_ordered_dictionaries_preserve_keys(): r = Random() keys = list(range(100)) r.shuffle(keys) x = fixed_dictionaries( OrderedDict([(k, booleans()) for k in keys])).example() assert list(x.keys()) == keys
def seeds(starting, n_steps): random = Random(starting) result = [] for _ in hrange(n_steps): result.append(random.getrandbits(64)) return result
class HTTPDigestAuth(HTTPAuth): def __init__(self): super(HTTPDigestAuth, self).__init__() self.random = SystemRandom() try: self.random.random() except NotImplementedError: self.random = Random() def get_nonce(self): return md5(str(self.random.random()).encode('utf-8')).hexdigest() def authenticate_header(self): session["auth_nonce"] = self.get_nonce() session["auth_opaque"] = self.get_nonce() return 'Digest realm="' + self.realm + '",nonce="' + session["auth_nonce"] + '",opaque="' + session["auth_opaque"] + '"' def authenticate(self, auth, password): if not auth.username or not auth.realm or not auth.uri or not auth.nonce or not auth.response or not password: return False if auth.nonce != session.get("auth_nonce") or auth.opaque != session.get("auth_opaque"): return False a1 = auth.username + ":" + auth.realm + ":" + password ha1 = md5(a1.encode('utf-8')).hexdigest() a2 = request.method + ":" + auth.uri ha2 = md5(a2.encode('utf-8')).hexdigest() a3 = ha1 + ":" + auth.nonce + ":" + ha2 response = md5(a3.encode('utf-8')).hexdigest() return response == auth.response
def __init__(self): self.valid_consts = [x for x in range(0, 10) if x != 0] rand = Random() self.a = rand.choice(self.valid_consts) self.b = rand.choice(self.valid_consts) self.c = rand.choice(self.valid_consts)
def generate(self, number, interval): rv = Random(self.SEED) for i in range(number): c = Customer(name = 'Customer%02d' % (i,)) activate(c, c.visit(timeInBank = 12.0)) t = rv.expovariate(1.0 / interval) yield hold, self, t
def export_to_csv(session, filename, multiplier=5): # Order by id to keep a stable ordering. stmt = text('select lat, lon from mapstat ' 'order by id limit :l offset :o') # Set up a pseudo random generator with a fixed seed to prevent # datamap tiles from changing with every generation. pseudorandom = Random() pseudorandom.seed(42) random = pseudorandom.random offset = 0 batch = 200000 pattern = '%.6f,%.6f\n' result_rows = 0 # export mapstat mysql table as csv to local file with open(filename, 'w') as fd: while True: result = session.execute(stmt.bindparams(o=offset, l=batch)) rows = result.fetchall() result.close() if not rows: break lines = [] append = lines.append for r in rows: for i in xrange(multiplier): lat = (r[0] + random()) / 1000.0 lon = (r[1] + random()) / 1000.0 append(pattern % (lat, lon)) fd.writelines(lines) result_rows += len(lines) offset += batch return result_rows
def __init__(self): self.valid_consts = range(1, 10) rand = Random() self.a = rand.choice(self.valid_consts) self.b = rand.choice(self.valid_consts) self.c = rand.choice(self.valid_consts)
def __init__(self, context, key=None, counter=None, seed=None): int32_info = np.iinfo(np.int32) from random import Random rng = Random(seed) if key is not None and counter is not None and seed is not None: raise TypeError("seed is unused and may not be specified " "if both counter and key are given") if key is None: key = [ rng.randrange( int(int32_info.min), int(int32_info.max)+1) for i in range(self.key_length-1)] if counter is None: counter = [ rng.randrange( int(int32_info.min), int(int32_info.max)+1) for i in range(4)] self.context = context self.key = key self.counter = counter self.counter_max = int32_info.max
def main(prng=None, display=False): if prng is None: prng = Random() prng.seed(time()) import logging logger = logging.getLogger('inspyred.ec') logger.setLevel(logging.DEBUG) file_handler = logging.FileHandler('inspyred.log', mode='w') file_handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler.setFormatter(formatter) logger.addHandler(file_handler) ea = inspyred.ec.DEA(prng) if display: ea.observer = inspyred.ec.observers.stats_observer ea.terminator = inspyred.ec.terminators.evaluation_termination final_pop = ea.evolve(generator=generate_rastrigin, evaluator=inspyred.ec.evaluators.parallel_evaluation_pp, pp_evaluator=evaluate_rastrigin, pp_dependencies=(my_squaring_function,), pp_modules=("math",), pop_size=8, bounder=inspyred.ec.Bounder(-5.12, 5.12), maximize=False, max_evaluations=256, num_inputs=3) if display: best = max(final_pop) print('Best Solution: \n{0}'.format(str(best))) return ea
def dir_listing(request): """Generate view for 'dir listing' option menu.""" # Generate random links and files m_types = [ (' ' , 'unknown.gif'), ('TXT', 'text.gif'), ('DIR', 'folder.gif'), ('IMG', 'image2.gif'), ] m_urls = [] m_urls_append = m_urls.append m_rand = Random() for x in xrange(m_rand.randint(2,50)): l_type = m_rand.randint(0,len(m_types) - 1) # [IMG, ALT, URL, DATE, SIZE] l_date = datetime.datetime.fromtimestamp(m_rand.randint(10000000,1350000000)).strftime("%Y-%m-%d %H:%M") l_url = generate_random_url("/home/links/", 1 , False)[0] m_urls_append( [ m_types[l_type][1], m_types[l_type][0], l_url, l_date, '{:.2f}'.format(m_rand.random() * 10) ] ) ctx = {'urls':m_urls} return render_to_response_random_server('home/dir_listing.html', ctx, context_instance=RequestContext(request))
def init_seq_order(self, epoch=None, seq_list=None): assert seq_list is None, "seq_list not supported for %s" % self.__class__ need_reinit = self.epoch is None or self.epoch != epoch super(CombinedDataset, self).init_seq_order(epoch=epoch, seq_list=seq_list) if not need_reinit: return False # We just select for which seq-idx we will use which dataset. # The ordering of the seqs in the datasets will not be set here # (do that in the config for the specific dataset). seqs_dataset_idx = self._canonical_seqs_dataset_idxs() if self.seq_ordering in ("default", "random"): # default is random. this is different from base class! from random import Random rnd = Random(self.epoch) rnd.shuffle(seqs_dataset_idx) elif self.seq_ordering == "in-order": pass # keep as-is elif self.seq_ordering == "reversed": seqs_dataset_idx = reversed(seqs_dataset_idx) else: raise Exception("seq_ordering %s not supported" % self.seq_ordering) self.dataset_seq_idxs = self._dataset_seq_idxs(seqs_dataset_idx) assert self.num_seqs == len(self.dataset_seq_idxs) for dataset in self.datasets.values(): dataset.init_seq_order(epoch=epoch) return True
def find_port(port): if port >= 1024 and port < (65536-8): return port from random import Random r = Random(os.getpid()) return r.randint(1024, 65536-8)
def random_sample(prob, seq, random_state=None): """ Return elements from a sequence with probability of prob Returns a lazy iterator of random items from seq. ``random_sample`` considers each item independently and without replacement. See below how the first time it returned 13 items and the next time it returned 6 items. >>> seq = list(range(100)) >>> list(random_sample(0.1, seq)) # doctest: +SKIP [6, 9, 19, 35, 45, 50, 58, 62, 68, 72, 78, 86, 95] >>> list(random_sample(0.1, seq)) # doctest: +SKIP [6, 44, 54, 61, 69, 94] Providing an integer seed for ``random_state`` will result in deterministic sampling. Given the same seed it will return the same sample every time. >>> list(random_sample(0.1, seq, random_state=2016)) [7, 9, 19, 25, 30, 32, 34, 48, 59, 60, 81, 98] >>> list(random_sample(0.1, seq, random_state=2016)) [7, 9, 19, 25, 30, 32, 34, 48, 59, 60, 81, 98] ``random_state`` can also be any object with a method ``random`` that returns floats between 0.0 and 1.0 (exclusive). >>> from random import Random >>> randobj = Random(2016) >>> list(random_sample(0.1, seq, random_state=randobj)) [7, 9, 19, 25, 30, 32, 34, 48, 59, 60, 81, 98] """ if not hasattr(random_state, 'random'): random_state = Random(random_state) return filter(lambda _: random_state.random() < prob, seq)
def compress_string(s): # avg_block_size is acutally the reciporical of the average # intended interflush distance. rnd = Random(s) flushes_remaining = FLUSH_LIMIT if len(s) < AVERAGE_SPAN_BETWEEN_FLUSHES * APPROX_MIN_FLUSHES: avg_block_size = APPROX_MIN_FLUSHES / float(len(s) + 1) else: avg_block_size = 1.0 / AVERAGE_SPAN_BETWEEN_FLUSHES s = StringIO(s) if isinstance(s, six.text_type) else BytesIO(s) zbuf = BytesIO() zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf) chunk = s.read(MIN_INTERFLUSH_INTERVAL + int(rnd.expovariate(avg_block_size))) while chunk and flushes_remaining: zfile.write(chunk) zfile.flush() flushes_remaining -= 1 chunk = s.read(MIN_INTERFLUSH_INTERVAL + int(rnd.expovariate(avg_block_size))) zfile.write(chunk) zfile.write(s.read()) zfile.close() return zbuf.getvalue()
def get(self): newGame = Game() newGame.title = "test1" newGame.gameKey = "dsadfasfdsfaasd" newGame.put() profiles = [None, 'a123456', 'b123456', 'c123456', 'd123456']; myrandom = Random() for i in range(500): points = myrandom.randint(999, 99999) days_before = myrandom.randint(0, 40) tags = [] profile = profiles[myrandom.randint(0, len(profiles) - 1)] name = profile if (name == None): name = "score-{0}".format(i) self.score(newGame, name, tags, points, datetime.datetime.today() - datetime.timedelta(days=days_before), profile) # self.score(newGame, "lastmonth-12341", ["hard"], 10000, datetime.datetime.today() - datetime.timedelta(days=40)) # self.score(newGame, "lastweek-15341", ["hard"], 15000, datetime.datetime.today() - datetime.timedelta(days=8)) # self.score(newGame, "yesterday-17341", [], 5000, datetime.datetime.today() - datetime.timedelta(days=2), "123123145") # self.score(newGame, "yesterday-17341", ["hard"], 7500, datetime.datetime.today() - datetime.timedelta(days=2), "123123145") # self.score(newGame, "yesterday-17341", [], 6500, datetime.datetime.today() - datetime.timedelta(days=1), "123123145") # self.score(newGame, "yesterday-17341", [], 5500, datetime.datetime.today() - datetime.timedelta(days=1), "123123145") # self.score(newGame, "yesterday-17341", [], 4500, datetime.datetime.today() - datetime.timedelta(days=1), "123123145") # self.score(newGame, "yesterday-17341", [], 2500, datetime.datetime.today() - datetime.timedelta(days=1), "123123145") # self.score(newGame, "today-17341", ["hard"], 3500, datetime.datetime.today()) self.response.headers['Content-Type'] = 'text/plain' self.response.out.write("OK")
class UniformRandom(ParameterGenerator): """Generate unformly-distributed random numbers.""" @register_name def __init__(self, center, width, seed=None): """Create a new uniform random number generator. Will generate random numbers on the interval [center-width, center+width). Args: center: the centroid of the generated number cloud width: the half-width of the generated number cloud seed (optional): specify the seed for this random number generator. """ self.center = center self.width = width self.gen = Random() if seed is not None: self.gen.seed(seed) def get(self): """Generate the next random value.""" return self.gen.uniform(self.min_val, self.max_val) @property def min_val(self): return self.center - self.width @property def max_val(self): return self.center + self.width
class TestOrderedDict(unittest.TestCase): def setUp(self): self.rnd = Random(0xC0EDA55) self.seq = range(1000) self.rnd.shuffle(self.seq) self.od = OrderedDict() for k in self.seq: self.od[k] = self.rnd.random() def testorder(self): self.failUnlessEqual(list(self.od), self.seq) k,v = map(list, zip(*self.od.iteritems())) self.failUnlessEqual(k, self.seq) def testcp(self): od2 = OrderedDict(self.od) self.failUnlessEqual(list(od2), self.seq) self.failUnlessEqual(od2, self.od) def testdictcp(self): d = dict(self.od) self.failUnlessEqual(d, self.od) def testpickle(self): od2 = pickle.loads(pickle.dumps(self.od)) self.failUnlessEqual(list(od2), self.seq) self.failUnlessEqual(od2, self.od)
def get(pos): p = int(pos[0]) | int(pos[1]) t = time() rand = Random(int(t * 2) + p) n = rand.randint(0, 10) n += p return 'bard' + str(n % 4) + '.png'
def voting(): """Called from login(), a convenience method.""" if 'osm_token' not in session: return redirect(url_for('login')) if config.STAGE != 'voting': return redirect(url_for('login')) uid = session['osm_uid'] isadmin = uid in config.ADMINS nominees_list = Nominee.select(Nominee, Vote.user.alias('voteuser')).where(Nominee.chosen).join( Vote, JOIN.LEFT_OUTER, on=((Vote.nominee == Nominee.id) & (Vote.user == uid) & (~Vote.preliminary))).naive() # Shuffle the nominees nominees = [n for n in nominees_list] rnd = Random() rnd.seed(uid) rnd.shuffle(nominees) # For admin, populate the dict of votes if isadmin: votesq = Nominee.select(Nominee.id, fn.COUNT(Vote.id).alias('num_votes')).where(Nominee.chosen).join( Vote, JOIN.LEFT_OUTER, on=((Vote.nominee == Nominee.id) & (~Vote.preliminary))).group_by(Nominee.id) votes = {} for v in votesq: votes[v.id] = v.num_votes else: votes = None # Count total number of voters total = Vote.select(fn.Distinct(Vote.user)).where(~Vote.preliminary).group_by(Vote.user).count() # Yay, done return render_template('voting.html', nominees=nominees, year=date.today().year, isadmin=isadmin, votes=votes, stage=config.STAGE, total=total, nominations=config.NOMINATIONS, lang=g.lang)
def __init__(self, scheme=None, realm=None, use_ha1_pw=False): super(HTTPDigestAuth, self).__init__(scheme or 'Digest', realm) self.use_ha1_pw = use_ha1_pw self.random = SystemRandom() try: self.random.random() except NotImplementedError: # pragma: no cover self.random = Random() self.generate_nonce_callback = None self.verify_nonce_callback = None self.generate_opaque_callback = None self.verify_opaque_callback = None def _generate_random(): return md5(str(self.random.random()).encode('utf-8')).hexdigest() def default_generate_nonce(): session["auth_nonce"] = _generate_random() return session["auth_nonce"] def default_verify_nonce(nonce): return nonce == session.get("auth_nonce") def default_generate_opaque(): session["auth_opaque"] = _generate_random() return session["auth_opaque"] def default_verify_opaque(opaque): return opaque == session.get("auth_opaque") self.generate_nonce(default_generate_nonce) self.generate_opaque(default_generate_opaque) self.verify_nonce(default_verify_nonce) self.verify_opaque(default_verify_opaque)
def brachistochrone(): initial = [(float(i) / num_intervals) for i in range(num_intervals)] initial.append(1.0) initial.reverse() rand = Random() rand.seed(int(time())) real = initial[:] k = find_optimal(initial[:], 1000, rand) print duration(k) tenk = find_optimal(k[:], 10000, rand) print duration(tenk) #hundk = find_optimal(tenk[:], 100000, rand) #print duration(hundk) initial.reverse() plt.plot(initial, k, '-', lw=2) plt.plot(initial, tenk, '-', lw=2) #plt.plot(initial, hundk, '-', lw=2) plt.plot(initial, initial, '-', lw=2) real = actual(2.0) initial.reverse() plt.plot(initial, real[1], '-', lw=2) print duration(real[1]) print duration(initial) plt.title('Brachistochrone') plt.grid(True) plt.show()
def random(request): g=Random() p=Picture.objects.get(pk=g.randint(1,Picture.objects.count())) return HttpResponse( thumbnail_it(p.directory+"/"+p.filename), mimetype="image/jpeg" )
class Trolldoll(Icedoll): """ Trolldoll encryption algorithm based on Icedoll, which is based on Rijndael Trolldoll adds an 'IV' and integrity checking to Icedoll """ def __init__(self,key=None,keySize=32,blockSize=32,tapRound=6,extraRounds=6,micSize=16,ivSize=16): """ """ Icedoll.__init__(self,key=None,keySize=32,blockSize=32,tapRound=6,extraRounds=6) self.name = 'TROLLDOLL' self.micSize = micSize self.ivSize = ivSize self.r = Random() # for IV generation import time newSeed = time.ctime()+str(self.r) # seed with instance location self.r.seed(newSeed) # to make unique self.reset() def reset(self): Icedoll.reset(self) self.hasIV = None def _makeIV(self): return self.ivSize*'a' def _makeIC(self): """ Make the integrity check """ return self.micSize*chr(0x00) def _verifyIC(self,integrityCheck): """ Verify the integrity check """ if self.micSize*chr(0x00) == integrityCheck : return 1 # matches else: return 0 # fails def encrypt(self, plainText, more=None): """ """ if not(self.hasIV): # On first call to encrypt put in an IV plainText = self._makeIV() + plainText # add the 'IV' self.hasIV = 1 if more == None: # on last call to encrypt append integrity check plainText = plainText + self._makeIC() return Icedoll.encrypt(self, plainText, more=more) def decrypt(self, cipherText, more=None): """ Decrypt cipher text, Icedoll automatically removes prepended random bits used as IV. Note - typically IV is directly used as the first cipher text. Here the IV is prepended to the plaintext prior to encryption and removed on decryption. """ plainText = Icedoll.decrypt(self, cipherText, more=more) if not(self.hasIV): # on first call to decrypt remove IV plainText = plainText[self.ivSize:] # remove the IV self.hasIV = 1 if more == None: # on last call to encrypt append integrity check if not(self._verifyIC(plainText[-self.micSize:])) : raise IntegrityCheckError, 'Trolldoll MIC Failure, bad key or modified data' plainText = plainText[:-self.micSize] # trim off the integrity check return plainText
class Input: def __init__(self, dmgr, name): self.core = dmgr.get("core") self.name = name self.prng = Random() @kernel def gate_rising(self, duration): time.manager.event(("gate_rising", self.name, duration)) delay(duration) @kernel def gate_falling(self, duration): time.manager.event(("gate_falling", self.name, duration)) delay(duration) @kernel def gate_both(self, duration): time.manager.event(("gate_both", self.name, duration)) delay(duration) @kernel def count(self): result = self.prng.randrange(0, 100) time.manager.event(("count", self.name, result)) return result @kernel def timestamp_mu(self): result = time.manager.get_time_mu() result += self.prng.randrange(100, 1000) time.manager.event(("timestamp_mu", self.name, result)) at_mu(result) return result
def code_img(code, size): r = Random() code = code len_code = len(code) font = ImageFont.truetype("Essence_Sans.ttf", size) font_width, font_height = font.getsize(code) font_width += size / 2 print font_width, font_height img = Image.new("RGBA", (font_width, font_height), (255,) * 4) draw = ImageDraw.ImageDraw(img) draw.text((size/10, -size/10), code, font=font, fill=(0, 0, 0)) params = [1, 0, 0, 0, 1 - float(r.randint(1, 10)) / 100, 0, 0.001, float(r.randint(1, 2)) / 500 ] print params img = img.transform((font_width, font_height), Image.PERSPECTIVE, params) img = img.filter(ImageFilter.EDGE_ENHANCE_MORE) img.save("test.jpg")
class RulesetUniverseGenerator(object): def __init__(self, ruleset): self.ruleset = ruleset self.random = Random() def initialise(self, seed, system_min, system_max, planet_min, planet_max): self.random.seed(seed) self.system_min = system_min self.system_max = system_max self.planet_min = planet_min self.planet_max = planet_max @property def model(self): return self.ruleset.model @property def SIZE(self): return 10 ** 7 @property def SPEED(self): return 3 * 10 ** 8 @property def randint(self): return self.random.randint
def bestAgentFor(self, channel, server): allowed = self.getRootAgents() candidate = list() for agent in self.getRunnningAgents(): print agent.server+"/"+server if agent.server == server: if agent.nbchan < 8: candidate.append(agent) for root_agent in allowed: #print "computer name: "+getComputername(root_agent)+"/"+getComputername(agent.name)+"y"+agent.name if getComputername(root_agent)==getComputername(agent.name): allowed.remove(root_agent) #if no candidate log.msg("ALLOWED "+str(allowed)) self.requestNewAgent(allowed, server) if len(candidate)==0: return None from random import Random rnd = Random() return candidate[rnd.randint(0, len(candidate)-1)]
def give_port(): """ Returns a random port and registers it. """ global port_random context = get_deploy_context() # default behavior if context["config"] is None: return randint(1000, 65000) # during real deployment, let's register a port if port_random is None: port_random = Random(context["config"].DEPLOY_SECRET) if len(context["port_map"].items()) + len(context["config"].BANNED_PORTS) == 65536: raise Exception("All usable ports are taken. Cannot deploy any more instances.") while True: port = port_random.randint(0, 65535) if port not in context["config"].BANNED_PORTS: owner, instance = context["port_map"].get(port, (None, None)) if owner is None or (owner == context["problem"] and instance == context["instance"]): context["port_map"][port] = (context["problem"], context["instance"]) return port
def train(self, optimizer='pso'): ''' The function trains the hyperparameters of the Kriging model. :param optimizer: Two optimizers are implemented, a Particle Swarm Optimizer or a GA ''' # First make sure our data is up-to-date self.updateData() # Establish the bounds for optimization for theta and p values lowerBound = [self.thetamin] * self.k + [self.pmin] * self.k upperBound = [self.thetamax] * self.k + [self.pmax] * self.k # Create a random seed for our optimizer to use rand = Random() rand.seed(int(time())) # If the optimizer option is PSO, run the PSO algorithm if optimizer is 'pso': ea = inspyred.swarm.PSO(Random()) ea.terminator = self.no_improvement_termination ea.topology = inspyred.swarm.topologies.ring_topology # ea.observer = inspyred.ec.observers.stats_observer final_pop = ea.evolve(generator=self.generate_population, evaluator=self.fittingObjective, pop_size=300, maximize=False, bounder=ec.Bounder(lowerBound, upperBound), max_evaluations=30000, neighborhood_size=20, num_inputs=self.k) # Sort and print the best individual, who will be at index 0. final_pop.sort(reverse=True) # If not using a PSO search, run the GA elif optimizer is 'ga': ea = inspyred.ec.GA(Random()) ea.terminator = self.no_improvement_termination final_pop = ea.evolve(generator=self.generate_population, evaluator=self.fittingObjective, pop_size=300, maximize=False, bounder=ec.Bounder(lowerBound, upperBound), max_evaluations=30000, num_elites=10, mutation_rate=.05) # This code updates the model with the hyperparameters found in the global search for entry in final_pop: newValues = entry.candidate preLOP = copy.deepcopy(newValues) locOP_bounds = [] for i in range(self.k): locOP_bounds.append([self.thetamin, self.thetamax]) for i in range(self.k): locOP_bounds.append([self.pmin, self.pmax]) # Let's quickly double check that we're at the optimal value by running a quick local optimizaiton lopResults = minimize(self.fittingObjective_local, newValues, method='SLSQP', bounds=locOP_bounds, options={'disp': False}) newValues = lopResults['x'] # Finally, set our new theta and pl values and update the model again for i in range(self.k): self.theta[i] = newValues[i] for i in range(self.k): self.pl[i] = newValues[i + self.k] try: self.updateModel() except: pass else: break
def consume_random(self): Random.normal(self.noise, 1., True) np.random.rand()
class Trolldoll(Icedoll): """ Trolldoll encryption algorithm based on Icedoll, which is based on Rijndael Trolldoll adds an 'IV' and integrity checking to Icedoll """ def __init__(self, key=None, keySize=32, blockSize=32, tapRound=6, extraRounds=6, micSize=16, ivSize=16): """ """ Icedoll.__init__(self, key=None, keySize=32, blockSize=32, tapRound=6, extraRounds=6) self.name = 'TROLLDOLL' self.micSize = micSize self.ivSize = ivSize self.r = Random() # for IV generation import time newSeed = time.ctime() + str(self.r) # seed with instance location self.r.seed(newSeed) # to make unique self.reset() def reset(self): Icedoll.reset(self) self.hasIV = None def _makeIV(self): return self.ivSize * 'a' def _makeIC(self): """ Make the integrity check """ return self.micSize * chr(0x00) def _verifyIC(self, integrityCheck): """ Verify the integrity check """ if self.micSize * chr(0x00) == integrityCheck: return 1 # matches else: return 0 # fails def encrypt(self, plainText, more=None): """ """ if not (self.hasIV): # On first call to encrypt put in an IV plainText = self._makeIV() + plainText # add the 'IV' self.hasIV = 1 if more == None: # on last call to encrypt append integrity check plainText = plainText + self._makeIC() return Icedoll.encrypt(self, plainText, more=more) def decrypt(self, cipherText, more=None): """ Decrypt cipher text, Icedoll automatically removes prepended random bits used as IV. Note - typically IV is directly used as the first cipher text. Here the IV is prepended to the plaintext prior to encryption and removed on decryption. """ plainText = Icedoll.decrypt(self, cipherText, more=more) if not (self.hasIV): # on first call to decrypt remove IV plainText = plainText[self.ivSize:] # remove the IV self.hasIV = 1 if more == None: # on last call to encrypt append integrity check if not (self._verifyIC(plainText[-self.micSize:])): raise IntegrityCheckError, 'Trolldoll MIC Failure, bad key or modified data' plainText = plainText[:-self. micSize] # trim off the integrity check return plainText
def easy_index_word(self, variants): print("easy") size = len(variants) random = Random() random_number = int(size * random.random()) return min(random_number, size - 1)
def __init__(self, staging_dir, data_name, seed, min_flow, shave_rate, min_shave, id_mapping, weight_scale): """ :param staging_dir: :param data_name: :param seed: :param min_flow: :param shave_rate: :param min_shave: :param id_mapping: :param weight_scale: see CLI """ self.shave_rate = shave_rate self.min_shave = min_shave self.random = Random(seed) self.min_flow = min_flow self.weight_scale = weight_scale # loaded raw graph, loaded during first load call # Only stores half of the matrix removing duplicates. self.graph = list() # set of loaded original node ids and their flow self.node_flows = dict() # nodes with flow > min_flow self.dense_nodes = set() # weights of individual nodes if id mapping file is found with weights in it # default is 1.0 for all points if id mapping file not passed. Also weights passed are normalized between # 0 and 1 for all points based on weight_scale policy self.node_weights = dict() # 0 indexed sorted index mapping of points based on point order and makes ids contiguous # eg. 2 995 34 maps to 2->0, 34->1, 995->2 self.sorted_node_mapping = dict() # Flow graph at any given point during shaving is stored here. self.flow_graph = dict() # dict (node ID 1, node ID 2) -> flow # set of indices into self.graph representing unique nbrs/edges related to a given point/node self.nbrs = defaultdict(set) self.num_edges = None # edge shave threshold percentiles self.edge_shave_percentiles = None # clusters at each level, not re-labeled these are converted into HMA hierarchy at the end # no relabeling needed as that happens in gene diver self.level_clusters = None # check params if not 0 <= min_flow: raise ValueError("min_flow must be between 0 and 1") if not 0 <= shave_rate <= 1: raise ValueError("shave_rate muse be between 0 and 1") self.staging_dir = staging_dir # check paths if not os.path.isdir(staging_dir): raise GraphHDSException("Required staging dir does not exist: {}".format(staging_dir)) # Parent directory of all output files self.output_dir = os.path.join(staging_dir, data_name) # create output experiment dir if not existing if not os.path.exists(self.output_dir): print("Creating output dir: {}".format(self.output_dir)) os.makedirs(self.output_dir) # File containing graph input data self.graph_file = os.path.join(staging_dir, data_name + ".jsonl") if not os.path.isfile(self.graph_file): raise GraphHDSException("Could not find required graph input file: {}".format(self.graph_file)) # load ID mapping if id_mapping: self.source_id_mappings = dict() # file with original node values as a single column of values" graph_index_file = os.path.join(staging_dir, data_name + ".mapping.tsv") print("Getting point id mappings from {}".format(graph_index_file)) with open(graph_index_file) as f: line_count =0 for line in f: cols = line.split("\t") if len(cols) != 3: raise GraphHDSException("Expected format to be <node id> <int id> <raw weight>, found: {} at line: {}".format(line, line_count)) line_count += 1 node_original_id = cols[1].strip() node_id = int(cols[0]) node_weight = float(cols[2]) self.source_id_mappings[node_id] = node_original_id self.node_weights[node_id] = node_weight else: self.source_id_mappings = None print("Skipping ID mapping of points since graph ID mapping flag was off")
def random(total_indices: int, rng: Random = Random(time())): true_indices = [ i for i in range(total_indices) if rng.randint(0, 1) ] return Vec.SparseBool(true_indices, total_indices)
def test_can_reduce_poison_from_any_subtree(size, seed): """This test validates that we can minimize to any leaf node of a binary tree, regardless of where in the tree the leaf is.""" random = Random(seed) # Initially we create the minimal tree of size n, regardless of whether it # is poisoned (which it won't be - the poison event essentially never # happens when drawing uniformly at random). # Choose p so that the expected size of the tree is equal to the desired # size. p = 1.0 / (2.0 - 1.0 / size) strat = PoisonedTree(p) def test_function(data): v = data.draw(strat) if len(v) >= size: data.mark_interesting() runner = ConjectureRunner(test_function, random=random, settings=settings(TEST_SETTINGS, buffer_size=LOTS)) while not runner.interesting_examples: runner.test_function( runner.new_conjecture_data(lambda data, n: uniform(random, n))) runner.shrink_interesting_examples() data, = runner.interesting_examples.values() assert len(ConjectureData.for_buffer(data.buffer).draw(strat)) == size starts = [b.start for b in data.blocks if b.length == 2] assert len(starts) % 2 == 0 for i in hrange(0, len(starts), 2): # Now for each leaf position in the tree we try inserting a poison # value artificially. Additionally, we add a marker to the end that # must be preserved. The marker means that we are not allow to rely on # discarding the end of the buffer to get the desired shrink. u = starts[i] marker = hbytes([1, 2, 3, 4]) def test_function(data): v = data.draw(strat) m = data.draw_bytes(len(marker)) if POISON in v and m == marker: data.mark_interesting() runner = ConjectureRunner(test_function, random=random, settings=TEST_SETTINGS) runner.cached_test_function(data.buffer[:u] + hbytes([255]) * 4 + data.buffer[u + 4:] + marker) assert runner.interesting_examples runner.shrink_interesting_examples() shrunk, = runner.interesting_examples.values() assert ConjectureData.for_buffer( shrunk.buffer).draw(strat) == (POISON, )
def test_inject_faker_locale(_session_faker, faker, faker_locale): random = Random(_CHANGED_SEED) assert faker != _session_faker assert faker.locales == faker_locale assert faker.random != random assert faker.random.getstate() == random.getstate()
def __init__(self, seed): Simulation.__init__(self) self.rv = Random(seed)
def test(request): request.session['username'] = Random().randint(1, 100000) return HttpResponse("OK")
def split_data( data: MoleculeDataset, split_type: str = 'random', sizes: Tuple[float, float, float] = (0.8, 0.1, 0.1), seed: int = 0, args: TrainArgs = None, logger: Logger = None ) -> Tuple[MoleculeDataset, MoleculeDataset, MoleculeDataset]: """ Splits data into training, validation, and test splits. :param data: A MoleculeDataset. :param split_type: Split type. :param sizes: A length-3 tuple with the proportions of data in the train, validation, and test sets. :param seed: The random seed to use before shuffling data. :param args: Arguments. :param logger: A logger. :return: A tuple containing the train, validation, and test splits of the data. """ if not (len(sizes) == 3 and sum(sizes) == 1): raise ValueError( 'Valid split sizes must sum to 1 and must have three sizes: train, validation, and test.' ) random = Random(seed) if args is not None: folds_file, val_fold_index, test_fold_index = \ args.folds_file, args.val_fold_index, args.test_fold_index else: folds_file = val_fold_index = test_fold_index = None if split_type == 'crossval': index_set = args.crossval_index_sets[args.seed] data_split = [] for split in range(3): split_indices = [] for index in index_set[split]: with open( os.path.join(args.crossval_index_dir, f'{index}.pkl'), 'rb') as rf: split_indices.extend(pickle.load(rf)) data_split.append([data[i] for i in split_indices]) train, val, test = tuple(data_split) return MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset( test) elif split_type == 'index_predetermined': split_indices = args.crossval_index_sets[args.seed] if len(split_indices) != 3: raise ValueError( 'Split indices must have three splits: train, validation, and test' ) data_split = [] for split in range(3): data_split.append([data[i] for i in split_indices[split]]) train, val, test = tuple(data_split) return MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset( test) elif split_type == 'predetermined': if not val_fold_index and sizes[2] != 0: raise ValueError( 'Test size must be zero since test set is created separately ' 'and we want to put all other data in train and validation') assert folds_file is not None assert test_fold_index is not None try: with open(folds_file, 'rb') as f: all_fold_indices = pickle.load(f) except UnicodeDecodeError: with open(folds_file, 'rb') as f: all_fold_indices = pickle.load( f, encoding='latin1' ) # in case we're loading indices from python2 log_scaffold_stats(data, all_fold_indices, logger=logger) folds = [[data[i] for i in fold_indices] for fold_indices in all_fold_indices] test = folds[test_fold_index] if val_fold_index is not None: val = folds[val_fold_index] train_val = [] for i in range(len(folds)): if i != test_fold_index and (val_fold_index is None or i != val_fold_index): train_val.extend(folds[i]) if val_fold_index is not None: train = train_val else: random.shuffle(train_val) train_size = int(sizes[0] * len(train_val)) train = train_val[:train_size] val = train_val[train_size:] return MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset( test) elif split_type == 'scaffold_balanced': return scaffold_split(data, sizes=sizes, balanced=True, seed=seed, logger=logger) elif split_type == 'random': data.shuffle(seed=seed) train_size = int(sizes[0] * len(data)) train_val_size = int((sizes[0] + sizes[1]) * len(data)) train = data[:train_size] val = data[train_size:train_val_size] test = data[train_val_size:] return MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset( test) else: raise ValueError(f'split_type "{split_type}" not supported.')
def test_no_injection(_session_faker, faker): random = Random(_CHANGED_SEED) assert faker == _session_faker assert faker.locales == _MODULE_LOCALES assert faker.random != random assert faker.random.getstate() == random.getstate()
def __init__(self, the_seed=None): """ Initialize the general RNS with an optional seed. All further initialization is done in subclass. """ self.r = Random(the_seed)
# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # from random import Random from twisted.internet import reactor, threads from uo.entity import * from gemuo.error import * from gemuo.entity import Position from gemuo.engine import Engine from gemuo.path import path_find, Unreachable random = Random() class WalkReject(Exception): def __init__(self, message='Walk reject'): Exception.__init__(self, message) class Blocked(Exception): """The calculated path or the destination is blocked (temporary failure).""" def __init__(self, position): Exception.__init__(self) self.position = position
class Permuter: ''' Represents a single source from which permutation candidates can be generated, and which keeps track of good scores achieved so far. ''' def __init__(self, dir: str, compiler: Compiler, scorer: Scorer, source_file: str, source: str, force_rng_seed: Optional[int]) -> None: self.dir = dir self.random = Random() self.compiler = compiler self.scorer = scorer self.source_file = source_file fns = find_fns(source) if len(fns) == 0: raise Exception( f"{self.source_file} does not contain any function!") if len(fns) > 1: raise Exception( f"{self.source_file} must contain only one function. (Use strip_other_fns.py.)" ) self.fn_name = fns[0] self.unique_name = self.fn_name self.parser = pycparser.CParser() self.permutations = perm_gen.perm_gen(source) self.force_rng_seed = force_rng_seed self.cur_seed: Optional[Tuple[int, int]] = None self.base, base_score, base_hash = self.create_and_score_base() self.hashes = {base_hash} self.cand: Optional[Candidate] = None self.base_score: int = base_score self.best_score: int = base_score def reseed_random(self) -> None: self.random = Random() def create_and_score_base(self) -> Tuple[Candidate, int, str]: base_source = perm_eval.perm_evaluate_one(self.permutations) base_cand = Candidate.from_source(base_source, self.parser, rng_seed=0) o_file = base_cand.compile(self.compiler, show_errors=True) if not o_file: raise Exception(f"Unable to compile {self.source_file}") base_score, base_hash = base_cand.score(self.scorer, o_file) return base_cand, base_score, base_hash def eval_candidate(self, seed: int) -> Tuple[Candidate, Profiler]: t0 = time.time() # Determine if we should keep the last candidate keep = ((self.permutations.is_random() and self.random.uniform(0, 1) >= RANDOMIZER_KEEP_PROB) or self.force_rng_seed) # Create a new candidate if we didn't keep the last one (or if the last one didn't exist) # N.B. if we decide to keep the previous candidate, we will skip over the provided seed. # This means we're not guaranteed to test all seeds, but it doesn't really matter since # we're randomizing anyway. if not self.cand or not keep: cand_c = self.permutations.evaluate(seed, EvalState()) rng_seed = self.force_rng_seed or random.randrange(1, 10**20) self.cur_seed = (seed, rng_seed) self.cand = Candidate.from_source(cand_c, self.parser, rng_seed=rng_seed) # Randomize the candidate if self.permutations.is_random(): self.cand.randomize_ast() t1 = time.time() o_file = self.cand.compile(self.compiler) t2 = time.time() self.cand.score(self.scorer, o_file) t3 = time.time() profiler: Profiler = Profiler() profiler.add_stat(Profiler.StatType.perm, t1 - t0) profiler.add_stat(Profiler.StatType.compile, t2 - t1) profiler.add_stat(Profiler.StatType.score, t3 - t2) return self.cand, profiler def try_eval_candidate(self, seed: int) -> EvalResult: try: cand, profiler = self.eval_candidate(seed) return cand, profiler except Exception: return EvalError(exc_str=traceback.format_exc(), seed=self.cur_seed) def base_source(self) -> str: return self.base.get_source() def diff(self, cand: Candidate) -> str: a = self.base_source().split('\n') b = cand.get_source().split('\n') return '\n'.join( difflib.unified_diff(a, b, fromfile='before', tofile='after', lineterm=''))
def GeneratePassword(self): passwdChars = string.letters + string.digits passwdLength = 8 return ''.join(Random().sample(passwdChars, passwdLength))
def _fill_pool(self, pool: Pool): for size in self.TEST_SIZES: prng = Random() prng.seed(self.prng.getrandbits(128), version=2) self.results.append(pool.apply_async(TestCase(size, prng).run))
def reseed_random(self) -> None: self.random = Random()
def run_sm_perf_test(image_uri, num_nodes, region): """ Run TF sagemaker training performance tests Additonal context: Setup for this function is performed by 'setup_sm_benchmark_tf_train_env' -- this installs some prerequisite packages, clones some repos, and creates a virtualenv called sm_benchmark_venv. TODO: Refactor the above setup function to be more obviously connected to this function, TODO: and install requirements via a requirements.txt file :param image_uri: ECR image URI :param num_nodes: Number of nodes to run on :param region: AWS region """ framework_version = re.search(r"[1,2](\.\d+){2}", image_uri).group() if framework_version.startswith("1."): pytest.skip("Skipping benchmark test on TF 1.x images.") processor = "gpu" if "gpu" in image_uri else "cpu" ec2_instance_type = "p3.16xlarge" if processor == "gpu" else "c5.18xlarge" py_version = "py2" if "py2" in image_uri else "py37" if "py37" in image_uri else "py3" time_str = time.strftime("%Y-%m-%d-%H-%M-%S") commit_info = os.getenv("CODEBUILD_RESOLVED_SOURCE_VERSION") target_upload_location = os.path.join( BENCHMARK_RESULTS_S3_BUCKET, "tensorflow", framework_version, "sagemaker", "training", processor, py_version ) training_job_name = ( f"tf{framework_version[0]}-tr-bench-{processor}-{num_nodes}-node-{py_version}" f"-{commit_info[:7]}-{time_str}" ) # Inserting random sleep because this test starts multiple training jobs around the same time, resulting in # a throttling error for SageMaker APIs. time.sleep(Random(x=training_job_name).random() * 60) test_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources") venv_dir = os.path.join(test_dir, "sm_benchmark_venv") ctx = Context() with ctx.cd(test_dir), ctx.prefix(f"source {venv_dir}/bin/activate"): log_file = f"results-{commit_info}-{time_str}-{framework_version}-{processor}-{py_version}-{num_nodes}-node.txt" run_out = ctx.run( f"timeout 45m python tf_sm_benchmark.py " f"--framework-version {framework_version} " f"--image-uri {image_uri} " f"--instance-type ml.{ec2_instance_type} " f"--node-count {num_nodes} " f"--python {py_version} " f"--region {region} " f"--job-name {training_job_name}" f"2>&1 | tee {log_file}", warn=True, echo=True, ) if not (run_out.ok or run_out.return_code == 124): target_upload_location = os.path.join(target_upload_location, "failure_log") ctx.run(f"aws s3 cp {os.path.join(test_dir, log_file)} {os.path.join(target_upload_location, log_file)}") LOGGER.info(f"Test results can be found at {os.path.join(target_upload_location, log_file)}") result_statement, throughput = _print_results_of_test(os.path.join(test_dir, log_file), processor) throughput /= num_nodes assert run_out.ok, ( f"Benchmark Test failed with return code {run_out.return_code}. " f"Test results can be found at {os.path.join(target_upload_location, log_file)}" ) threshold = ( (TENSORFLOW2_SM_TRAINING_CPU_1NODE_THRESHOLD if num_nodes == 1 else TENSORFLOW2_SM_TRAINING_CPU_4NODE_THRESHOLD) if processor == "cpu" else TENSORFLOW2_SM_TRAINING_GPU_1NODE_THRESHOLD if num_nodes == 1 else TENSORFLOW2_SM_TRAINING_GPU_4NODE_THRESHOLD ) LOGGER.info( f"tensorflow {framework_version} sagemaker training {processor} {py_version} " f"imagenet {num_nodes} nodes Throughput: {throughput} images/sec, threshold: {threshold} images/sec" ) assert throughput > threshold, ( f"tensorflow {framework_version} sagemaker training {processor} {py_version} imagenet {num_nodes} nodes " f"Benchmark Result {throughput} does not reach the threshold {threshold}" )
def hash_fn(val): if val in mock: return mock[val] hasher = Random(val).randrange return hasher(1_000_000)
#Character Game version 2.5 #Author(s) Chris Heisler from InputHandler import getInput from Pymon import Pymon from time import sleep, clock from random import Random arenaName = "The Battle Frontier" random = Random(clock()) rounds = 3 #Start Game print("Welcome to " + arenaName + "!") #Gane Loop #Character Select char_sel = True while char_sel: #Player 1 #Scan Player 1's card and use the number to generate a character print("\nPlayer 1, choose your Character.") code = getInput() ch1 = Pymon(code) print("Preparing your character...") sleep(2) print(ch1.name + " has entered the arena.\n") sleep(2) #Player 2 #Scan Player 2's card and use the number to generate a character
def setUp(self) -> None: self.prng = Random() # For test repetitiveness purpose only. Use SystemRandom ordinarily. self.prng.seed(0xACA6E99F3B7EE68594F51ED5DE7FD778, version=2) self.results = []
def __init__(self): super(Dictionary, self).__init__() self.random = Random()
from Swarm import Swarm from random import Random seed = 5113 my_prng = Random(seed) dimensions = 2 swarm_size = 100 swarm = Swarm(swarm_size, my_prng, dimensions) swarm.create_ring() for i in range(10000): print("---- Round {} ----".format(i)) swarm.roam(i) swarm.update_inertia() print(swarm.gbest_val) print("Global best {}".format(swarm.gbest))
def test_overruns_if_not_enough_bytes_for_block(): runner = ConjectureRunner(lambda data: data.draw_bytes(2), settings=TEST_SETTINGS, random=Random(0)) runner.cached_test_function(b"\0\0") assert runner.tree.rewrite(b"\0")[1] == Status.OVERRUN
class CoinSelector: def __init__(self, target: int, cost_of_change: int, seed: str = None) -> None: self.target = target self.cost_of_change = cost_of_change self.exact_match = False self.tries = 0 self.random = Random(seed) if seed is not None: self.random.seed(seed, version=1) def select( self, txos: List[OutputEffectiveAmountEstimator], strategy_name: str = None) -> List[OutputEffectiveAmountEstimator]: if not txos: return [] available = sum(c.effective_amount for c in txos) if self.target > available: return [] return getattr(self, strategy_name or "standard")(txos, available) @strategy def prefer_confirmed( self, txos: List[OutputEffectiveAmountEstimator], available: int) -> List[OutputEffectiveAmountEstimator]: return (self.only_confirmed(txos, available) or self.standard(txos, available)) @strategy def only_confirmed(self, txos: List[OutputEffectiveAmountEstimator], _) -> List[OutputEffectiveAmountEstimator]: confirmed = [ t for t in txos if t.txo.tx_ref and t.txo.tx_ref.height > 0 ] if not confirmed: return [] confirmed_available = sum(c.effective_amount for c in confirmed) if self.target > confirmed_available: return [] return self.standard(confirmed, confirmed_available) @strategy def standard(self, txos: List[OutputEffectiveAmountEstimator], available: int) -> List[OutputEffectiveAmountEstimator]: return (self.branch_and_bound(txos, available) or self.closest_match(txos, available) or self.random_draw(txos, available)) @strategy def branch_and_bound( self, txos: List[OutputEffectiveAmountEstimator], available: int) -> List[OutputEffectiveAmountEstimator]: # see bitcoin implementation for more info: # https://github.com/bitcoin/bitcoin/blob/master/src/wallet/coinselection.cpp txos.sort(reverse=True) current_value = 0 current_available_value = available current_selection: List[bool] = [] best_waste = self.cost_of_change best_selection: List[bool] = [] while self.tries < MAXIMUM_TRIES: self.tries += 1 backtrack = False if current_value + current_available_value < self.target or \ current_value > self.target + self.cost_of_change: backtrack = True elif current_value >= self.target: new_waste = current_value - self.target if new_waste <= best_waste: best_waste = new_waste best_selection = current_selection[:] backtrack = True if backtrack: while current_selection and not current_selection[-1]: current_selection.pop() current_available_value += txos[len( current_selection)].effective_amount if not current_selection: break current_selection[-1] = False utxo = txos[len(current_selection) - 1] current_value -= utxo.effective_amount else: utxo = txos[len(current_selection)] current_available_value -= utxo.effective_amount previous_utxo = txos[len(current_selection) - 1] if current_selection else None if current_selection and not current_selection[-1] and previous_utxo and \ utxo.effective_amount == previous_utxo.effective_amount and \ utxo.fee == previous_utxo.fee: current_selection.append(False) else: current_selection.append(True) current_value += utxo.effective_amount if best_selection: self.exact_match = True return [ txos[i] for i, include in enumerate(best_selection) if include ] return [] @strategy def closest_match(self, txos: List[OutputEffectiveAmountEstimator], _) -> List[OutputEffectiveAmountEstimator]: """ Pick one UTXOs that is larger than the target but with the smallest change. """ target = self.target + self.cost_of_change smallest_change = None best_match = None for txo in txos: if txo.effective_amount >= target: change = txo.effective_amount - target if smallest_change is None or change < smallest_change: smallest_change, best_match = change, txo return [best_match] if best_match else [] @strategy def random_draw(self, txos: List[OutputEffectiveAmountEstimator], _) -> List[OutputEffectiveAmountEstimator]: """ Accumulate UTXOs at random until there is enough to cover the target. """ target = self.target + self.cost_of_change self.random.shuffle(txos, self.random.random) selection = [] amount = 0 for coin in txos: selection.append(coin) amount += coin.effective_amount if amount >= target: return selection return []
def setUp(self): """Initialization of Vehicle object""" self.veh = Vehicle(0, 0) self.R = Random(seed)
class Dictionary(): sz = 100 pool_dictionary = dict() used_words = dict() def __init__(self): super(Dictionary, self).__init__() self.random = Random() def init_dictionary(self): self.db = connect(getcwd() + "/dictionary.db") def close_connection(self): self.db.close() def load_dictionary(self): pass def setup_connection(self, game_id): pool_size = len(self.pool_dictionary) self.pool_dictionary[game_id] = pool_size self.used_words[game_id] = set() def pin_first_word(self, game_id, word): if self.used_words.get(game_id) is not None: current_set = self.used_words.get(game_id) current_set.add(word) self.used_words[game_id] = current_set def get_first_word(self, width): # TODO Realize with decorators self.init_dictionary() first_word_list = list() cursor = self.db.cursor() cursor.execute(GET_WORDS_QUERY) for row in cursor: word = row[0] if len(word) == width: first_word_list.append(word) print(len(first_word_list)) first_word = first_word_list[int(self.random.random() * len(first_word_list))] self.close_connection() return first_word def get_words(self): self.init_dictionary() cursor = self.db.cursor() cursor.execute(GET_WORDS_QUERY) return [row[0] for row in cursor] def get_used_words(self, game_id): return self.used_words.get(game_id) def is_word_correct_built(self, _x_list_, _y_list_, _changed_cell_): if len(_x_list_) == 0: return False _is_approved_ = True cnt_new = 0 for i in range(1, len(_x_list_)): if abs(_x_list_[i] - _x_list_[i - 1]) + abs(_y_list_[i] - _y_list_[i - 1]) != 1: _is_approved_ = False for i in range(len(_x_list_)): if _x_list_[i] == _changed_cell_.x and _y_list_[ i] == _changed_cell_.y: cnt_new += 1 if cnt_new != 1: _is_approved_ = False for i in range(0, len(_x_list_)): for j in range(i + 1, len(_y_list_)): if _x_list_[i] == _x_list_[j] and _y_list_[i] == _y_list_[j]: _is_approved_ = False if _is_approved_: return True return False def check_word(self, number_id, x_list, y_list, changed_cell, word): self.init_dictionary() if number_id is None: return False if not self.is_word_correct_built(x_list, y_list, changed_cell): return False value = self.is_word_good(word, number_id) self.close_connection() return value def is_word_good(self, word, number_id): self.init_dictionary() cursor = self.db.cursor() cursor.execute(CHECK_WORD_QUERY, (word, )) for row in cursor: current_set = self.used_words.get(number_id) if word not in current_set: current_set.add(word) self.used_words[number_id] = current_set print("WORD FOUND") self.close_connection() return True else: print("WORD NOT FOUND") self.close_connection() return False self.close_connection() print("WORD NOT FOUND") return False
def _discrete_log_pollard_rho(n, a, b, order=None, retries=10, rseed=None): """ Pollard's Rho algorithm for computing the discrete logarithm of ``a`` to the base ``b`` modulo ``n``. It is a randomized algorithm with the same expected running time as ``_discrete_log_shanks_steps``, but requires a negligible amount of memory. References ========== .. [1] "Handbook of applied cryptography", Menezes, A. J., Van, O. P. C., & Vanstone, S. A. (1997). Examples ======== >>> from sympy.ntheory.residue_ntheory import _discrete_log_pollard_rho >>> _discrete_log_pollard_rho(227, 3**7, 3) 7 See also ======== discrete_log """ a %= n b %= n if order is None: order = n_order(b, n) prng = Random() if rseed is not None: prng.seed(rseed) for i in range(retries): aa = prng.randint(1, order - 1) ba = prng.randint(1, order - 1) xa = pow(b, aa, n) * pow(a, ba, n) % n c = xa % 3 if c == 0: xb = a * xa % n ab = aa bb = (ba + 1) % order elif c == 1: xb = xa * xa % n ab = (aa + aa) % order bb = (ba + ba) % order else: xb = b * xa % n ab = (aa + 1) % order bb = ba for j in range(order): c = xa % 3 if c == 0: xa = a * xa % n ba = (ba + 1) % order elif c == 1: xa = xa * xa % n aa = (aa + aa) % order ba = (ba + ba) % order else: xa = b * xa % n aa = (aa + 1) % order c = xb % 3 if c == 0: xb = a * xb % n bb = (bb + 1) % order elif c == 1: xb = xb * xb % n ab = (ab + ab) % order bb = (bb + bb) % order else: xb = b * xb % n ab = (ab + 1) % order c = xb % 3 if c == 0: xb = a * xb % n bb = (bb + 1) % order elif c == 1: xb = xb * xb % n ab = (ab + ab) % order bb = (bb + bb) % order else: xb = b * xb % n ab = (ab + 1) % order if xa == xb: r = (ba - bb) % order if r != 0: return mod_inverse(r, order) * (ab - aa) % order break raise ValueError("Pollard's Rho failed to find logarithm")
def setUp(self): """Initialization of Soldier object""" self.sold = Soldier(0, 0) self.R = Random(seed)