def setup(cavern, is_part2=False, elves_power=3): global units global elves global goblins global elves_atk global part2 global G units = SortedKeyList(key=lambda u: u[:2]) elves = {} goblins = {} elves_atk = elves_power part2 = is_part2 G = nx.Graph() for i, row in enumerate(cavern): for j, cell in enumerate(row): if cell == FREE: G.add_node((i, j)) elif cell != WALL: u = [i, j, 200, False, cell] units.add(u) if cell == ELF: elves[i, j] = u elif cell == GOBLIN: goblins[i, j] = u for u in units: u[3] = is_near_enemies(u) for i, j in G.nodes: for ni, nj in adjacent(i, j): if (ni, nj) in G: # assumes borders are all walls G.add_edge((i, j), (ni, nj))
class MaxStack: def __init__(self): self.by_time = SortedKeyList(key=lambda t: -t[1]) self.by_val = SortedKeyList(key=lambda t: (-t[0], -t[1])) self.time = -1 def push(self, x: int) -> None: self.time += 1 rec = (x, self.time) self.by_time.add(rec) self.by_val.add(rec) def pop(self) -> int: rec = self.by_time.pop(0) self.by_val.remove(rec) return rec[0] def top(self) -> int: rec = self.by_time[0] return rec[0] def peekMax(self) -> int: rec = self.by_val[0] return rec[0] def popMax(self) -> int: rec = self.by_val.pop(0) self.by_time.remove(rec) return rec[0]
def solve(self, initial_state: State, heuristic: callable) -> State: def smart_heuristic(state: State): value = len(state.board_history) / 10 + heuristic(state) return value state_list = SortedKeyList(key=smart_heuristic) state_list.add(initial_state) visited = dict() while len(state_list) > 0: curr: State = state_list.pop(0) key = list_to_string(curr.current_board.content) visited[key] = True if curr.current_board.content == sorted(curr.current_board.content): return curr for child in curr.next_states(): new_key = list_to_string(child.current_board.content) if not visited.get(new_key, False): while len(state_list) > 100: state_list.pop(-1) state_list.add(child) return None
def A_star(board: Board, heuristic: callable, move_order=DEFAULT_MOVE_ORDER) -> Optional[Board]: def smart_heuristic(board): value = len(board.move_history) / 100 + heuristic(board) return value board_list = SortedKeyList(key=smart_heuristic) board_list.add(board) visited = dict() while len(board_list) > 0: considered_board = board_list.pop(0) key = array_to_string(considered_board.board) visited[key] = True if considered_board.is_solved(): return considered_board for direction in move_order: if considered_board.is_move_possible(direction): new_board = np.copy(considered_board.board) new_history = considered_board.move_history[:] new_board = Board(new_board, new_history) new_board.move(direction) new_key = array_to_string(new_board.board) if visited.get(new_key, False) == False: board_list.add(new_board) return None
def best_first_search(board: Board, heuristic: callable, move_order=DEFAULT_MOVE_ORDER) -> Optional[Board]: board_list = SortedKeyList(key=heuristic) board_list.add(board) visited = dict() while len(board_list) > 0: considered_board = board_list.pop(0) # print("Distance: {}".format(heuristic(considered_board))) key = array_to_string(considered_board.board) # if len(considered_board.move_history) > limit: # continue visited[key] = True if considered_board.is_solved(): return considered_board for direction in move_order: if considered_board.is_move_possible(direction): new_board = np.copy(considered_board.board) new_history = considered_board.move_history[:] new_board = Board(new_board, new_history) new_board.move(direction) new_key = array_to_string(new_board.board) if visited.get(new_key, False) == False: board_list.add(new_board) return None
def test_getitem_slice(): random.seed(0) slt = SortedKeyList(key=modulo) slt._reset(17) lst = list() for rpt in range(100): val = random.random() slt.add(val) lst.append(val) lst.sort(key=modulo) assert all(slt[start:] == lst[start:] for start in [-75, -25, 0, 25, 75]) assert all(slt[:stop] == lst[:stop] for stop in [-75, -25, 0, 25, 75]) assert all(slt[::step] == lst[::step] for step in [-5, -1, 1, 5]) assert all(slt[start:stop] == lst[start:stop] for start in [-75, -25, 0, 25, 75] for stop in [-75, -25, 0, 25, 75]) assert all(slt[:stop:step] == lst[:stop:step] for stop in [-75, -25, 0, 25, 75] for step in [-5, -1, 1, 5]) assert all(slt[start::step] == lst[start::step] for start in [-75, -25, 0, 25, 75] for step in [-5, -1, 1, 5]) assert all(slt[start:stop:step] == lst[start:stop:step] for start in [-75, -25, 0, 25, 75] for stop in [-75, -25, 0, 25, 75] for step in [-5, -1, 1, 5])
def test_copy(): slt = SortedKeyList(range(100), key=modulo) slt._reset(7) two = slt.copy() slt.add(100) assert len(slt) == 101 assert len(two) == 100
class ConstraintList: ''' List of constraints for a single rom variant ''' def __init__(self, constraints: List[Constraint], rom_variant: RomVariant) -> None: self.constraints = SortedKeyList(key=lambda x: x.addr) for constraint in constraints: if constraint.romA == rom_variant: self.constraints.add( RomConstraint(constraint.addressA, constraint)) elif constraint.romB == rom_variant: self.constraints.add( RomConstraint(constraint.addressB, constraint)) def get_constraints_at(self, local_address: int) -> List[Constraint]: constraints = [] index = self.constraints.bisect_key_left(local_address) while index < len(self.constraints ) and self.constraints[index].addr == local_address: constraints.append(self.constraints[index].constraint) index += 1 return constraints
def test_copy_copy(): import copy slt = SortedKeyList(range(100), key=negate) slt._reset(7) two = copy.copy(slt) slt.add(100) assert len(slt) == 101 assert len(two) == 100
def _run_clustering(self): print("try to use clustering") # here we need to run clustering # first of all we need to choose features max_feature_count = int(self._max_freq * len(self._urls)) min_feature_count = int(self._min_freq * len(self._urls)) start_index = bisect.bisect_left(self._features_count_list, (min_feature_count, '')) end_index = bisect.bisect_right(self._features_count_list, (max_feature_count, 'ZZZ')) if start_index >= end_index: print("not enough features") return self._next_queue_fallback() chosen_features = SortedSet() for i in range(start_index, end_index): chosen_features.add(self._features_count_list[i][1]) # then we need to build features matrix X = np.empty((len(self._urls), len(chosen_features))) for i in range(len(self._urls)): features = self._urls[self._urls_keys[i]][self._i_features] for j, fname in enumerate(chosen_features): if fname in features: X[i][j] = 1 else: X[i][j] = 0 # now we can run clustering y = self._clusterizer.fit_predict(X) # and we need to create uniform distributed queue def get_list_of_2_sets(): return [set(), set(), 0] # 0 is for used urls, # 1 is for unused # 3 is for total count url_in_cluster = defaultdict(get_list_of_2_sets) for i in range(len(y)): url = self._urls_keys[i] if self._urls[url][self._i_is_used]: url_in_cluster[y[i]][self._i_list_for_used].add(url) else: url_in_cluster[y[i]][self._i_list_for_unused].add(url) url_in_cluster[y[i]][self._i_list_total] += 1 limit = self._subqueue_len cluster_keys = SortedKeyList(url_in_cluster.keys(), key=lambda x: -len(url_in_cluster[x][self._i_list_for_used])) while limit > 0: # Todo: optimize if len(cluster_keys) > 0: less_index = cluster_keys.pop() unused_urls = url_in_cluster[less_index][self._i_list_for_unused] if len(unused_urls) > 0: url = unused_urls.pop() self._subqueue.put(url) limit -= 1 if len(unused_urls) > 0: url_in_cluster[less_index][self._i_list_for_used].add(url) cluster_keys.add(less_index) else: break
def inicia_LEP(red): LEP = SortedKeyList(key=lambda evento: evento.tiempo) for nodo in red.nodos: if nodo.tipo_n == "llegada": evento = Evento( random.expovariate(1 / nodo.t_llegadas), nodo, "llegada_e" ) LEP.add(evento) return LEP
def test_update_order_consistency(): setup = [10, 20, 30] slt1 = SortedKeyList(setup, key=modulo) slt2 = SortedKeyList(setup, key=modulo) addition = [40, 50, 60] for value in addition: slt1.add(value) slt2.update(addition) assert slt1 == slt2
def test_key2(): class Incomparable: pass a = Incomparable() b = Incomparable() slt = SortedKeyList(key=lambda val: 1) slt.add(a) slt.add(b) assert slt == [a, b]
def top_lemmas(self, n=100): """ Returns a list of (lemma, username, number of events) tuples for the n lemmas with the most events. """ lemmas_lengths = SortedKeyList(key=lambda key_val: -key_val[1]) for key, val in self.lemmas_to_logs.items(): lemmas_lengths.add((key, len(val))) return list(lemmas_lengths)[0:n]
def inicia_LEP(red): #metodo para leer la red de nodos LEP = SortedKeyList( key=lambda evento: evento.tiempo) #ordena la lisra de eventos for nodo in red.nodos: #recorre la red de nodos if nodo.tipo_n == "llegada": evento = Evento(random.expovariate(1 / nodo.t_llegadas), nodo, "llegada_e") LEP.add(evento) return LEP
def representative_trajectory(self, cluster): # TODO: Fix this :/ rep_trajectory = [] #Average direction vector: av_vector = np.array([0.0, 0.0]) for line in cluster: av_vector += line.vector av_vector /= len(cluster) print(av_vector) unit_av = av_vector/np.linalg.norm(av_vector) print(unit_av) x = np.array([1.0, 0.0]) theta = np.arccos(x.dot(unit_av)) if unit_av[1] > 0.0: theta = -theta rotation_mat = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) back_rotation_mat = np.array([[np.cos(-theta), np.sin(-theta)], [-np.sin(-theta), np.cos(-theta)]]) rotated_points = [] rotated_lines = [] for line in cluster: rot_v = rotation_mat.dot(line.vector) rot_b = line.a + line.length*rot_v rotated_points.append({"end": False, "point":line.a}) rotated_points.append({"end": True, "point": rot_b}) rotated_lines.append(LineSegment(line.a, rot_b)) rotated_points = sorted(rotated_points, key=lambda x: x["point"][0]) #Sort lines by starting x value line_start_lookup = SortedKeyList(rotated_lines, key=lambda x: x.a[0]) #Sort lines the sweep line crosses by ending x value intersecting_lines = SortedKeyList([], key=lambda x:x.b[0]) last_x = 0.0 for point_dict in rotated_points: if point_dict["end"]: try: intersecting_lines.pop(0) except Exception as e: print("Could not generate a representative trajectory. Examine your clustering parameters") break; else: intersecting_lines.add(line_start_lookup.pop(0)) if len(intersecting_lines) >= self.min_lns: # diff = point_dict["point"][0] - last_x # if diff >= self.gamma: average_y = 0.0 for line in intersecting_lines: slope = line.vector[1]/line.vector[0] average_y += (point_dict["point"][0]-line.a[0])*slope average_y /= len(intersecting_lines) rep_trajectory.append(np.array([point_dict["point"][0], average_y])) return rep_trajectory
def abs_end(self) -> Timecode: subrefs = SortedKeyList(key=lambda x: x.abs_start) for segment in self.parent.parent.stream: for ref in segment.references: for subref in ref.subrefs: if subref.abs_start > self.abs_start: subrefs.add(subref) if len(subrefs) > 0: return subrefs[0].abs_start else: return self.parent.parent.abs_end
def test_count(): slt = SortedKeyList(key=negate) slt._reset(7) assert slt.count(0) == 0 for iii in range(100): for jjj in range(iii): slt.add(iii) slt._check() for iii in range(100): assert slt.count(iii) == iii
class MemoryTimestampIndex(TimestampIndex): """ Index of transactions sorted by their timestamps. """ _index: 'SortedKeyList[TransactionIndexElement]' def __init__(self) -> None: self.log = logger.new() self._index = SortedKeyList(key=lambda x: (x.timestamp, x.hash)) def add_tx(self, tx: BaseTransaction) -> bool: assert tx.hash is not None # It is safe to use the in operator because it is O(log(n)). # http://www.grantjenks.com/docs/sortedcontainers/sortedlist.html#sortedcontainers.SortedList.__contains__ element = TransactionIndexElement(tx.timestamp, tx.hash) if element in self._index: return False self._index.add(element) return True def del_tx(self, tx: BaseTransaction) -> None: idx = self._index.bisect_key_left((tx.timestamp, tx.hash)) if idx < len(self._index) and self._index[idx].hash == tx.hash: self._index.pop(idx) def get_newest(self, count: int) -> Tuple[List[bytes], bool]: return get_newest_sorted_key_list(self._index, count) def get_older(self, timestamp: int, hash_bytes: bytes, count: int) -> Tuple[List[bytes], bool]: return get_older_sorted_key_list(self._index, timestamp, hash_bytes, count) def get_newer(self, timestamp: int, hash_bytes: bytes, count: int) -> Tuple[List[bytes], bool]: return get_newer_sorted_key_list(self._index, timestamp, hash_bytes, count) def get_hashes_and_next_idx(self, from_idx: RangeIdx, count: int) -> Tuple[List[bytes], Optional[RangeIdx]]: timestamp, offset = from_idx idx = self._index.bisect_key_left((timestamp, b'')) txs = SortedKeyList(key=lambda x: (x.timestamp, x.hash)) txs.update(self._index[idx:idx+offset+count]) ret_txs = txs[offset:offset+count] hashes = [tx.hash for tx in ret_txs] if len(ret_txs) < count: return hashes, None else: next_offset = offset + count next_timestamp = ret_txs[-1].timestamp if next_timestamp != timestamp: next_idx = txs.bisect_key_left((next_timestamp, b'')) next_offset -= next_idx return hashes, RangeIdx(next_timestamp, next_offset)
def pointQuery(api, stream, key, sort=False): txids = getTXids(api, full2short(stream), key) args = [[txid] for txid in txids] if stream[0] == PREFIX: stream = 'Timestamp' if sort: result = SortedKeyList( key=lambda x: x.split(DELIMITER)[ATTRIBUTE_INDEX[stream]]) for r in api.batch('getrawtransaction', args): result.add(decoder(r["result"])) return result else: return [ decoder(r["result"]) for r in api.batch('getrawtransaction', args) ]
class Universe: def __init__(self, name): self.name = name.capitalize() self._kingdoms = SortedKeyList(key=lambda x: x.name) self._ruler = '' @property def kingdoms(self) -> SortedKeyList[Kingdom]: # deepcopy prevents client code from modifying internal state of universe return deepcopy(self._kingdoms) @property def ruler(self) -> str: return self._ruler @ruler.setter def ruler(self, new_ruler: str): new_ruler = new_ruler.capitalize() self._get_kingdom(new_ruler) self._ruler = new_ruler def add_kingdoms(self, kingdoms: Iterable[Kingdom]): for kingdom in kingdoms: if kingdom.allies_given or kingdom.allies_received: raise RuntimeError( "Kingdom '{}' has already formed allegiances and thus cannot be part of a universe" ) if kingdom not in self._kingdoms: self._kingdoms.add(kingdom) def _get_kingdom(self, kingdom_name: str) -> Kingdom: kingdom_name = kingdom_name.capitalize() try: kingdom = utils.sorted_list_get_with_key(self._kingdoms, kingdom_name) except ValueError: raise ValueError( "Kingdom '{}' is not part of the '{}' universe".format( kingdom_name, self.name)) return kingdom def get_kingdom(self, kingdom_name: str) -> Kingdom: return deepcopy(self._get_kingdom(kingdom_name)) def form_allegiance(self, sender: str, receiver: str, msg: str) -> bool: sender = self._get_kingdom(sender) receiver = self._get_kingdom(receiver) return sender.ask_allegiance(receiver, msg)
def test_count(): slt = SortedKeyList(key=modulo) slt._reset(7) assert slt.count(0) == 0 for iii in range(100): for jjj in range(iii): slt.add(iii) slt._check() for iii in range(100): assert slt.count(iii) == iii slt = SortedKeyList(range(8), key=modulo) assert slt.count(9) == 0
def test_getitem(): random.seed(0) slt = SortedKeyList(key=modulo) slt._reset(17) slt.add(5) slt._build_index() slt._check() slt.clear() lst = list(random.random() for rpt in range(100)) slt.update(lst) lst.sort(key=modulo) assert all(slt[idx] == lst[idx] for idx in range(100)) assert all(slt[idx - 99] == lst[idx - 99] for idx in range(100))
class HeavyHitterList: def __init__(self, threshold): self.data = SortedKeyList(key=itemgetter(1)) self.threshold = threshold def append(self, x): if len(self.data) < self.threshold: self.data.add(x) else: if x[1] > self.data[0][1]: self.data.remove(self.data[0]) self.data.add(x) def get_data(self): return self.data def __str__(self): return self.data.__str__()
class PriorityQueue: def __init__(self, capacity=None, key=None): self._data = SortedKeyList(key=self._rank) self._capacity = inf if capacity is None else capacity self._key = key def _rank(self, item): if self._key: return self._key(*item) return item.rank def add(self, value, rank): self._data.add(Element(value, rank)) self._shrink() def clear(self): return self._data.clear() def __repr__(self): return f"PriorityQueue([{', '.join(f'{v}: {r}' for (v, r) in self._data)}])" def _shrink(self): while len(self._data) > self._capacity: self._data.pop() def update(self, src): raise NotImplementedError def __contains__(self, value): raise NotImplementedError def __iter__(self): for value, rank in self._data: yield value def __getitem__(self, index): if isinstance(index, int): return self._data[index].value return list(self)[index] def size(self): return len(self._data)
def test_getitem(): random.seed(0) slt = SortedKeyList(key=negate) slt._reset(17) slt.add(5) assert slt[0] == 5 slt.clear() lst = list() for rpt in range(100): val = random.random() slt.add(val) lst.append(val) lst.sort(reverse=True) assert all(slt[idx] == lst[idx] for idx in range(100)) assert all(slt[idx - 99] == lst[idx - 99] for idx in range(100))
def load_symbols_from_map(path: str) -> None: global symbols symbols = SortedKeyList([], key=lambda x: x.address) with open(path, 'r') as map_file: # ignore header line = map_file.readline() while not line.startswith('rom'): line = map_file.readline() line = map_file.readline() while not line.startswith( 'rom' ): # The second line starting with 'rom' is the one we need line = map_file.readline() # Parse declarations prev_symbol = None current_file = 'UNKNOWN' for line in map_file: if line.startswith(' .'): # ignore this definition of filename continue elif line.startswith(' '): parts = line.split() if len(parts ) == 2 and parts[1] != '': # it is actually a symbol addr = int(parts[0], 16) - ROM_OFFSET if prev_symbol is not None: prev_symbol.length = addr - prev_symbol.address symbol = Symbol(addr, parts[1], current_file) symbols.add(symbol) prev_symbol = symbol elif not line.startswith(' *'): # this defines the name current_file = line.split('(')[0].strip()
class HalfSnap: def __init__(self, bids: bool): if bids: self.data = SortedKeyList(key=lambda val: -val[0]) else: self.data = SortedKeyList(key=lambda val: val[0]) self.is_bids = bids self.time = None def fill(self, source): self.data.clear() for item in source: self.add(item) def add(self, item): price = item[0] size = item[1] self.data.add([price, size]) def update(self, price: float, size: float): key = -price if self.is_bids else price i = self.data.bisect_key_left(key) if 0 <= i < len(self.data): value = self.data[i] else: if size <= VERY_SMALL_NUMBER: return False self.data.add([price, size]) return True if size <= VERY_SMALL_NUMBER: if value[0] == price: self.data.discard(value) return True else: return False if value[0] == price: self.data[i][1] = size else: self.data.add([price, size]) return True def delete(self, price: float): return self.updatef(price, 0.0)
def test_add(): random.seed(0) slt = SortedKeyList(key=modulo) for val in range(1000): slt.add(val) slt._check() slt = SortedKeyList(key=modulo) for val in range(1000, 0, -1): slt.add(val) slt._check() slt = SortedKeyList(key=modulo) for val in range(1000): slt.add(random.random()) slt._check()
class Stream: _key: str = attr.ib() _data: List[dict] = attr.ib() meta: Dict[str, Any] = attr.ib(factory=dict) streams: List['Stream'] = attr.ib(factory=list) # for joined streams twitch: str = attr.ib(init=False) type: StreamType = attr.ib(init=False) games: List[Tuple['Game', SegmentReference]] = attr.ib(init=False) segments: List[Segment] = attr.ib(init=False) timecodes: Timecodes = attr.ib(init=False) @staticmethod def _segment_key(s) -> int: if hasattr(s, 'fallbacks') and 'offset' in s.fallbacks: offset = s.fallbacks['offset'] else: offset = s.offset() return int(offset) def __attrs_post_init__(self): self.twitch = self._key if ',' in self.twitch: self.type = StreamType.JOINED elif self.twitch.startswith('00'): self.type = StreamType.NO_CHAT else: self.type = StreamType.DEFAULT self.games = [] self.segments = SortedKeyList(key=self._segment_key) self.timecodes = Timecodes(timecodes.get(self.twitch) or {}) for segment in self._data: Segment(stream=self, **segment) # Workaround for SortedKeyList.__init__ def __new__(cls, *args, **kwargs): return object.__new__(cls) @property @cached('duration-twitch-{0[0].twitch}') def _duration(self) -> int: line = last_line(self.subtitles_path) if line is not None: return int(Timecode(line.split(' ')[2].split('.')[0])) @property def duration(self) -> Timecode: if self.type is StreamType.JOINED: return Timecode(sum(int(s.duration) for s in self.streams)) elif self.type is StreamType.NO_CHAT: return Timecode(max(int(s.abs_end) for s in self)) else: return Timecode(self._duration) @property def abs_start(self) -> Timecode: return Timecode(0) @property def abs_end(self) -> Timecode: return self.duration @property @cached('date-{0[0].twitch}') def _unix_time(self) -> str: args = ['--pretty=oneline', '--reverse', '-S', self.twitch] rev = repo.git.log(args).split(' ')[0] return repo.commit(rev).authored_date @property def date(self) -> datetime: if self.type is StreamType.JOINED: return self.streams[0].date elif self.type is StreamType.NO_CHAT: return datetime.strptime(self.twitch[2:8], '%y%m%d') else: return datetime.fromtimestamp(self._unix_time) @property def subtitles_prefix(self) -> str: """Returns public URL prefix of subtitles for this segment.""" year = str(self.date.year) key = f'$PREFIX/chats/{year}' if key not in config['repos']['mounts']: raise Exception(f'Repository for year {year} is not configured') prefix = config['repos']['mounts'][key]['prefix'] return prefix @property def subtitles(self) -> str: """Returns full public URL of subtitles for this stream.""" if self.type is StreamType.NO_CHAT: return None return f'{self.subtitles_prefix}/v{self.twitch}.ass' @property def subtitles_path(self) -> str: """Returns relative path of subtitles in current environment.""" return _(f'chats/{self.date.year}/v{self.twitch}.ass') @property def subtitles_style(self) -> SubtitlesStyle: style = SubtitlesStyle(tcd_config['ssa_style_format'], tcd_config['ssa_style_default']) if self.meta.get('chromakey'): style['Alignment'] = '5' else: style['Alignment'] = '1' return style @cached_property def blacklist(self) -> BlacklistTimeline: bl = BlacklistTimeline() for segment in self: for subref in segment.all_subrefs: bl.add(subref.blacklist, subref.abs_start, subref.abs_end) return bl @property @cached('messages-{0[0].twitch}') def _messages(self) -> int: lines = count_lines(self.subtitles_path) return (lines - 10) if lines else None @property def messages(self) -> int: if self.type is StreamType.JOINED: return sum([s.messages for s in self.streams]) else: return self._messages or 0 def __getitem__(self, index: int) -> Segment: return self.segments[index] def __contains__(self, segment: Segment) -> bool: return segment in self.segments def __len__(self) -> int: return len(self.segments) def index(self, segment: Segment) -> int: return self.segments.index(segment) def add(self, segment: Segment): self.segments.add(segment) def remove(self, index: int): self.segments.remove(index) @join() def to_json(self) -> str: if len(self) > 1: yield '[\n' first = True for segment in self: if not first: yield ',\n' else: first = False yield indent(segment.to_json(), 2) yield '\n]' else: yield self[0].to_json() def __str__(self) -> str: return self.to_json()
def test_len(): slt = SortedKeyList(key=modulo) for val in range(10000): slt.add(val) assert len(slt) == (val + 1)