def test_update(self): l = LRU(2) l['a'] = 1 self.assertEqual(l['a'], 1) l.update(a=2) self.assertEqual(l['a'], 2) l['b'] = 2 self.assertEqual(l['b'], 2) l.update(b=3) self.assertEqual(('b', 3), l.peek_first_item()) self.assertEqual(l['a'], 2) self.assertEqual(l['b'], 3) l.update({'a':1, 'b':2}) self.assertEqual(('b', 2), l.peek_first_item()) self.assertEqual(l['a'], 1) self.assertEqual(l['b'], 2) l.update() self.assertEqual(('b', 2), l.peek_first_item()) l.update(a=2) self.assertEqual(('a', 2), l.peek_first_item())
class StarboardEntries: """A way of managing starboard entries. Sort of like an ORM, but also not fully.""" _pool: asyncpg.Pool = attr.ib() # note: entry cache isn't really a dict, but for typehinting purposes this works _entry_cache: typing.Dict[int, StarboardEntry] = attr.ib() _sql_loop_task: asyncio.Task = attr.ib() _sql_queries: cclass.SetUpdateAsyncQueue = attr.ib() def __init__(self, pool: asyncpg.Pool, cache_size: int = 200): self._pool = pool self._entry_cache = LRU( cache_size ) # the 200 should be raised as the bot grows bigger self._sql_queries = cclass.SetUpdateAsyncQueue() loop = asyncio.get_event_loop() self._sql_loop_task = loop.create_task(self._sql_loop()) def stop(self): """Stops the SQL task loop.""" self._sql_loop_task.cancel() async def _sql_loop(self): """Actually runs SQL updating, hopefully one after another. Saves speed on adding, deleting, and updating by offloading this step here.""" try: while True: entry = await self._sql_queries.get() logging.getLogger("discord").debug(f"Running {entry.query}.") await self._pool.execute(entry.query, timeout=60, *entry.args) self._sql_queries.task_done() except asyncio.CancelledError: pass def _get_required_from_entry(self, entry: StarboardEntry): """Transforms data into the form needed for databases.""" return ( entry.ori_mes_id, entry.ori_chan_id, entry.star_var_id, entry.starboard_id, entry.author_id, list(entry.ori_reactors), list(entry.var_reactors), entry.guild_id, entry.forced, entry.frozen, entry.trashed, ) def _str_builder_to_insert( self, str_builder: typing.List[str], entry: StarboardEntry ): """Takes data from a string builder list and eventually puts the data needed into the _sql_queries variable.""" query = "".join(str_builder) args = self._get_required_from_entry(entry) self._sql_queries.put_nowait(StarboardSQLEntry(query, args)) def _handle_upsert(self, entry: StarboardEntry): """Upserts an entry by using an INSERT with an ON CONFLICT cause. This is a PostgreSQL-specific feature, so that's nice!""" str_builder = [ "INSERT INTO starboard(ori_mes_id, ori_chan_id, star_var_id, ", "starboard_id, author_id, ori_reactors, var_reactors, ", "guild_id, forced, frozen, trashed) VALUES($1, $2, $3, $4, ", "$5, $6, $7, $8, $9, $10, $11) ON CONFLICT (ori_mes_id) DO UPDATE ", "SET ori_chan_id = $2, star_var_id = $3, starboard_id = $4, ", "author_id = $5, ori_reactors = $6, var_reactors = $7, guild_id = $8, ", "forced = $9, frozen = $10, trashed = $11", ] self._str_builder_to_insert(str_builder, entry) def upsert(self, entry: StarboardEntry): """Either adds or updates an entry in the collection of entries.""" temp_dict = {entry.ori_mes_id: entry} if entry.star_var_id: temp_dict[entry.star_var_id] = entry self._entry_cache.update(**temp_dict) # type: ignore this is valid i promise self._handle_upsert(entry) def delete(self, entry_id: int): """Removes an entry from the collection of entries.""" self._entry_cache.pop(entry_id, None) self._sql_queries.put_nowait( StarboardSQLEntry("DELETE FROM starboard WHERE ori_mes_id = $1", [entry_id]) ) async def get( self, entry_id: int, check_for_var: bool = False ) -> typing.Optional[StarboardEntry]: """Gets an entry from the collection of entries.""" entry = None if self._entry_cache.has_key(entry_id): # type: ignore entry = self._entry_cache[entry_id] else: entry = discord.utils.find( lambda e: e and e.star_var_id == entry_id, self._entry_cache.values() ) if not entry: async with self._pool.acquire() as conn: data = await conn.fetchrow( f"SELECT * FROM starboard WHERE ori_mes_id = {entry_id} OR" f" star_var_id = {entry_id}" ) if data: entry = StarboardEntry.from_row(data) self._entry_cache[entry_id] = entry if entry and check_for_var and not entry.star_var_id: return None return entry async def select_query(self, query: str): """Selects the starboard database directly for entries based on the query.""" async with self._pool.acquire() as conn: data = await conn.fetch(f"SELECT * FROM starboard WHERE {query}") if not data: return None return tuple(StarboardEntry.from_row(row) for row in data) async def raw_query(self, query: str): """Runs the raw query against the pool, assuming the results are starboard entries.""" async with self._pool.acquire() as conn: data = await conn.fetch(query) if not data: return None return tuple(StarboardEntry.from_row(row) for row in data) async def super_raw_query(self, query: str): """You want a raw query? You'll get one.""" async with self._pool.acquire() as conn: return await conn.fetch(query) async def query_entries( self, seperator: str = "AND", **conditions: typing.Dict[str, str] ) -> typing.Optional[typing.Tuple[StarboardEntry, ...]]: """Queries entries based on conditions provided. For example, you could do `query_entries(guild_id=143425)` to get entries with that guild id.""" sql_conditions: list[str] = [ f"{key} = {value}" for key, value in conditions.items() ] combined_statements = f" {seperator} ".join(sql_conditions) async with self._pool.acquire() as conn: data = await conn.fetch( f"SELECT * FROM starboard WHERE {combined_statements}" ) if not data: return None return tuple(StarboardEntry.from_row(row) for row in data) async def get_random(self, guild_id: int) -> typing.Optional[StarboardEntry]: """Gets a random entry from a guild.""" # query adapted from # https://github.com/Rapptz/RoboDanny/blob/1fb95d76d1b7685e2e2ff950e11cddfc96efbfec/cogs/stars.py#L1082 query = """SELECT * FROM starboard WHERE guild_id=$1 AND star_var_id IS NOT NULL OFFSET FLOOR(RANDOM() * ( SELECT COUNT(*) FROM starboard WHERE guild_id=$1 AND star_var_id IS NOT NULL )) LIMIT 1 """ async with self._pool.acquire() as conn: data = await conn.fetchrow(query, guild_id) if not data: return None return StarboardEntry.from_row(data)
class Cache: # Replacement policies LRU = "LRU" FIFO = 'FIFO' def __init__(self, name, size, policy): self.name = name self.size = size self.free_space = size self.policy = policy # Eviction policy self.hashmap = {} # Mapping <objname,objsize> if (self.policy == Cache.LRU): self.cache = LRU(self.size) elif (self.policy == Cache.FIFO): self.cache = queue.Queue(maxsize=self.size) # Statistics self.hit_count = 0 self.miss_count = 0 def has_key(self, key): if key in self.hashmap.keys(): return True else: return False def update(self, key, size): self.hashmap[key] = size self.hit_count += 1 if (self.policy == Cache.LRU): self.cache.update(key=size) elif (self.policy == Cache.FIFO): self.cache.put(key) def insert(self, key, size, directory): if (self.policy == Cache.LRU): self.insertLRU(key, size, directory) elif (self.policy == Cache.FIFO): self.insertFIFO(key, size, directory) def evictLRU(self, directory): oid = self.cache.peek_last_item()[0] directory.removeBlock(oid, self.name) del [oid] del self.hashmap[oid] self.free_space += int(self.hashmap[oid]) def evictFIFO(self, directory): oid = self.cache.get() directory.removeBlock(oid, self.name) self.free_space += int(self.hashmap[oid]) del self.hashmap[oid] def insertLRU(self, key, size, directory): while (int(size) >= self.free_space): self.evictLRU(directory) self.cache[key] = size self.hashmap[key] = size self.free_space += size self.miss_count += 1 def insertFIFO(self, key, size, directory): while (int(size) >= self.free_space): self.evictFIFO(directory) self.cache.put(key) self.hashmap[key] = size self.free_space += size self.miss_count += 1 def put(self, key, size, directory): if self.has_key(key): self.update(key, size) else: self.insert(key, size, directory) def print(self): if (self.policy == Cache.LRU): print(self.name, "LRU", self.hashmap, self.cache.items()) elif (self.policy == Cache.FIFO): print(self.name, "LRU", self.hashmap, list(self.cache.queue)) def remove(self, key): del self.hashmap[key] if (self.policy == Cache.LRU): del self.cache[key] elif (self.policy == Cache.FIFO): a = 5
class GP: LOW_FITNESS = -2**31 def __init__(self, pop_size=5000, size_next_gen=300, lucky_per=0.10, unique_pop=False, add_naive=False, **kwargs): """ Create a genetic programming class that will help solve the circuit minimization problem :param pop_size: population max size :param size_next_gen: num of offspring in each generation :param kwargs: passed on to Util """ self.pop_size = pop_size self.size_next_gen = size_next_gen self.size_best_parents = (1 - lucky_per) * self.size_next_gen if self.size_best_parents % 2 == 1: self.size_best_parents += 1 self.size_lucky_parents = self.size_next_gen - self.size_best_parents self.unique_pop = unique_pop self.add_naive = add_naive self.util = Util(**kwargs) self.cache = LRU(10000) def _init_population(self, init_pop_size=1000): # fitness and srepr structure array dtype pop_dtype = np.dtype([('fitness', '<f4'), ('accuracy', '<f4'), ('numgates', '<i4')]) # population of sreprs and their fitness. this changes throughout the run self.population = np.array([self.LOW_FITNESS] * self.pop_size, dtype=pop_dtype).view(np.recarray) self.sreprs = [None] * self.pop_size if self.add_naive: init_pop_size -= 1 ret = g.util.pool.map(g.util.init_one, range(init_pop_size)) for i, r in enumerate(ret): r_fitness, r_accuracy, r_numgates, r_srepr = r self.population[i] = r_fitness, r_accuracy, r_numgates self.cache[r_srepr] = r_fitness, r_accuracy, r_numgates self.sreprs[i] = r_srepr if self.add_naive: r_fitness, r_accuracy, r_numgates, r_srepr = self.util.naive_srepr( ) self.population[i + 1] = r_fitness, r_accuracy, r_numgates self.cache[r_srepr] = r_fitness, r_accuracy, r_numgates self.sreprs[i + 1] = r_srepr def run(self, num_generations=10, init_pop_size=1000): global logfile print('init population...', end=' ', flush=True) print('init population...', end=' ', flush=True, file=logfile) self._init_population(init_pop_size=init_pop_size) print('done') print('done', file=logfile) print('starting search') print('starting search', file=logfile) for generation in range(num_generations): # print current status logstr = 'generation {0}/{1} best fitness: {2} best accuracy: {3}'.format( generation, num_generations, self.population[self.population.fitness.argmax()], self.population[self.population.accuracy.argmax()]) print(logstr) print(logstr, file=logfile) # get parents probability distribution - fitter sreprs are better picks as parents fitness = self.population.fitness.copy() real_min = np.min(fitness[fitness > self.LOW_FITNESS]) fitness[fitness == self. LOW_FITNESS] = real_min # so no chance they'll be taken nz_fitness = fitness - real_min # each parent couple generates 2 children best_parents = np.random.choice( np.arange(self.population.shape[0]), size=(int(self.size_best_parents / 2), 2), p=nz_fitness / nz_fitness.sum()) lucky_parents = np.random.choice( np.arange(self.population.shape[0]), size=(int(self.size_lucky_parents / 2), 2)) parents = np.concatenate((best_parents, lucky_parents)) parents_sreprs = [ (self.sreprs[x[0]], self.sreprs[x[1]]) for x in parents if None not in [self.sreprs[x[0]], self.sreprs[x[1]]] ] # generate offspring and replace the weak samples in the population worst_indices = np.argpartition( self.population.fitness, self.size_next_gen)[:self.size_next_gen] next_gen = g.util.pool.map(g.util.create_next_gen, parents_sreprs) flat_next_gen = [s for sc in next_gen for s in sc] next_gen_noncached_srepr = [ srepr for srepr in flat_next_gen if srepr not in self.cache ] next_gen_cached_srepr = [ srepr for srepr in flat_next_gen if srepr in self.cache ] next_gen_noncached_fitness = g.util.pool.map( g.util.fitness, next_gen_noncached_srepr) to_update = dict( zip(next_gen_noncached_srepr, next_gen_noncached_fitness)) # next_gen_sreprs = flat_next_gen if not self.unique_pop else next_gen_noncached_srepr next_gen_fitness = [(srepr, ) + to_update[srepr] for srepr in next_gen_noncached_srepr] if not self.unique_pop: next_gen_fitness += [(srepr, ) + self.cache[srepr] for srepr in next_gen_cached_srepr] self.cache.update(to_update) for wi, children in zip(worst_indices, next_gen_fitness): # child0, c0_fitness, c0_accuracy, c0_numgates, child1, c1_fitness, c1_accuracy, c1_numgates = children child, c_fitness, c_accuracy, c_numgates = children self.population[wi] = (c_fitness, c_accuracy, c_numgates) self.sreprs[wi] = child print(file=logfile) print('Finished!', file=logfile) def print_best(self, file=sys.stdout): best_fitness_index = self.population.fitness.argmax() best_accuracy_index = self.population.accuracy.argmax() print('best fitness: {0} best accuracy: {1}'.format( self.population[best_fitness_index], self.population[best_accuracy_index]), file=file) print(file=file) print(self.sreprs[best_fitness_index], file=file) print(file=file) print(self.sreprs[best_accuracy_index], file=file)
# Would print 5 l.set_size(3) print(l.items()) # Would print [(3, '3'), (5, '5'), (2, '2')] print(l.get_size()) # Would print 3 print(l.has_key(5)) # Would print True print(2 in l) # Would print True l.get_stats() # Would print (1, 0) l.update(5='0') # Update an item print l.items() # Would print [(5, '0'), (3, '3'), (2, '2')] l.clear() print l.items() # Would print [] def evicted(key, value): print "removing: %s, %s" % (key, value) l = LRU(1, callback=evicted) l[1] = '1'