def addNum(self, num): """ Adds a num into the data structure. :type num: int :rtype: void """ if len(self.left) > len(self.right): if num > self.median: heapq.heappush(self.right, num) else: heapq.heappush(self.right, -heapq.heappop(self.left)) heapq.heappush(self.left, -num) self.median = (-self.left[0] + self.right[0]) / 2.0 elif len(self.left) == len(self.right): if num > self.median: heapq.heappush(self.right, num) self.median = self.right[0] else: heapq.heappush(self.left, -num) self.median = -self.left[0] else: if num < self.median: heapq.heappush(self.left, -num) else: heapq.heappush(self.left, -heapq.heappop(self.right)) heapq.heappush(self.right, num) self.median = (-self.left[0] + self.right[0]) / 2.0
def _get_server(self): """ Get server to use for request. Also process inactive server list, re-add them after given interval. """ with self._lock: inactive_server_count = len(self._inactive_servers) for i in range(inactive_server_count): try: ts, server, message = heapq.heappop(self._inactive_servers) except IndexError: pass else: if (ts + self.retry_interval) > time(): # Not yet, put it back heapq.heappush(self._inactive_servers, (ts, server, message)) else: self._active_servers.append(server) logger.warn("Restored server %s into active pool", server) # if none is old enough, use oldest if not self._active_servers: ts, server, message = heapq.heappop(self._inactive_servers) self._active_servers.append(server) logger.info("Restored server %s into active pool", server) server = self._active_servers[0] self._roundrobin() return server
def _vsids_calculate(self): """ VSIDS Heuristic Calculation Examples ======== >>> from sympy.logic.algorithms.dpll2 import SATSolver >>> l = SATSolver([set([2, -3]), set([1]), set([3, -3]), set([2, -2]), ... set([3, -2])], set([1, 2, 3]), set([])) >>> l.lit_heap [(-2.0, -3), (-2.0, 2), (-2.0, -2), (0.0, 1), (-2.0, 3), (0.0, -1)] >>> l._vsids_calculate() -3 >>> l.lit_heap [(-2.0, -2), (-2.0, 2), (0.0, -1), (0.0, 1), (-2.0, 3)] """ if len(self.lit_heap) == 0: return 0 # Clean out the front of the heap as long the variables are set while self.variable_set[abs(self.lit_heap[0][1])]: heappop(self.lit_heap) if len(self.lit_heap) == 0: return 0 return heappop(self.lit_heap)[1]
def huff_encode(text): freq = defaultdict(int) for s in text: freq[s] += 1 tree = [ [f, [s, ""]] for s,f in freq.items() ] heapify(tree) while len(tree) > 1: l = heappop(tree) h = heappop(tree) for n in l[1:]: n[1] = '0' + n[1] for n in h[1:]: n[1] = '1' + n[1] heappush(tree, [l[0] + h[0]] + l[1:] + h[1:]) root = heappop(tree)[1:] codes = dict([(s, "0b"+c) for s,c in root]) # Header enc = BitArray() for s,c in root: enc += BitArray(bytes=s) enc += BitArray(uint=len(c), length=8) enc += BitArray("0b"+c) enc.prepend(BitArray(uint=len(root), length=8)) for s in text: enc += BitArray(codes[s]) return enc
def create_binary_tree(self): """ Create a binary Huffman tree using stored vocabulary word counts. Frequent words will have shorter binary codes. Called internally from `build_vocab()`. """ logger.info("constructing a huffman tree from %i words" % len(self.vocab)) # build the huffman tree heap = self.vocab.values() heapq.heapify(heap) for i in xrange(len(self.vocab) - 1): min1, min2 = heapq.heappop(heap), heapq.heappop(heap) heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.vocab), left=min1, right=min2)) # recurse over the tree, assigning a binary code to each vocabulary word if heap: max_depth, stack = 0, [(heap[0], [], [])] while stack: node, codes, points = stack.pop() if node.index < len(self.vocab): # leaf node => store its path from the root node.code, node.point = codes, points max_depth = max(len(codes), max_depth) else: # inner node => continue recursion points = array(list(points) + [node.index - len(self.vocab)], dtype=uint32) stack.append((node.left, array(list(codes) + [0], dtype=uint8), points)) stack.append((node.right, array(list(codes) + [1], dtype=uint8), points)) logger.info("built huffman tree with maximum node depth %i" % max_depth)
def skyline(buildings): if not buildings: return [] points = [] for l, r, h in buildings: points.append([l, -h]) points.append([r, h]) # sort the points to ensure that all points are sorted from left to right points.sort() result = [] queue = [0] prev = 0 for e in points: if e[1] < 0: # whenever we see start point of the building # we put the height of the building into the heapq heapq.heappush(queue, e[1]) else: # if we arrive at the end point of the building # we remove the height of the building from the heapq if queue[0] == -e[1]: heapq.heappop(queue) else: queue.remove(-e[1]) heapq.heapify(queue) curr = queue[0] if curr != prev: # if the tallest building in the heapq changes # then we need to put that into the final result result.append([e[0], -curr]) prev = curr return result
def test_buscar_el_mayor_y_menor_valor_de_una_lista(self): import heapq nums = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2] tres_mayores = heapq.nlargest(3,nums) self.assertEqual(tres_mayores,[42,37,23]) tres_menores = heapq.nsmallest(3,nums) self.assertEqual(tres_menores,[-4,1,2]) mayor = max(nums) menor = min(nums) self.assertEqual(mayor ,42) self.assertEqual(menor ,-4) heap = list(nums) #Pone el primer elemento en la posicion 0 heapq.heapify(heap) self.assertEqual(heap,[-4, 2, 1, 23, 7, 2, 18, 23, 42, 37, 8]) #heappop saca el primer elemento, y lo sustituye por el menor de la lista # asi heappop siempre sacara el elemento mas pequeño menor =heapq.heappop(heap) self.assertEqual(menor,-4) menor =heapq.heappop(heap) self.assertEqual(menor, 1)
def _build_tree(self, a): heapq.heapify(a) while len(a) > 1: left = heapq.heappop(a) right = heapq.heappop(a) heapq.heappush(a, (left[0] + right[0], left, right)) return a[0]
def get_next_task(self): """get the next task if there's one that should be processed, and return how long it will be until the next one should be processed.""" if _debug: TaskManager._debug("get_next_task") # get the time now = _time() task = None delta = None if self.tasks: # look at the first task when, nxttask = self.tasks[0] if when <= now: # pull it off the list and mark that it's no longer scheduled heappop(self.tasks) task = nxttask task.isScheduled = False if self.tasks: when, nxttask = self.tasks[0] # peek at the next task, return how long to wait delta = max(when - now, 0.0) else: delta = when - now # return the task to run and how long to wait for the next one return (task, delta)
def largestSumAfterKNegations(self, A: List[int], K: int) -> int: s = sneg = neg = 0 absmin = math.inf negpq = [] for a in A: absmin = min(absmin, abs(a)) if a >= 0: s += a else: heapq.heappush(negpq, a) neg += 1 sneg += a if K < neg: while K: s -= heapq.heappop(negpq) K -= 1 while negpq: s += heapq.heappop(negpq) else: s -= sneg if (K - neg) & 1: s -= absmin * 2 return s
def nextTime(self): while True: (t, k, o) = self._heap[0] if k in self._dict: break heapq.heappop(self._heap) return t
def getImminent(self, time): """ Returns a list of all models that transition at the provided time, with a specified epsilon deviation allowed. :param time: timestamp to check for models .. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set. """ #assert debug("Asking all imminent models") imm_children = [] t, age = time try: # Age must be exactly the same first = self.heap[0] while (abs(first[0][0] - t) < self.epsilon) and (first[0][1] == age): # Check if the found event is actually still active if(first[2]): # Active, so event is imminent imm_children.append(first[3]) first[2] = False else: # Wasn't active, but we will have to pop this to get the next # So we can lower the number of invalids self.invalids -= 1 # Advance the while loop heappop(self.heap) first = self.heap[0] except IndexError: pass return imm_children
def generateAckPacket(self, packet): ''' Generate ACK packet 1) If packet.pck_id == self.lastOrderedPacketID+1, the packet is the packet to be expected else, push the pck_id to the out of order queue 2) Generate ACK packet with the lastOrderedPacketID 3) set the same timestamp for the ACK Packet as the original Data Packet Args: packet: the Data Packet received ''' if packet.pck_id <= self.lastOrderedPacketID: pass else: if packet.pck_id == self.lastOrderedPacketID+1: self.lastOrderedPacketID += 1 while self.outOfOrderPackets and self.outOfOrderPackets[0] == self.lastOrderedPacketID+1: heapq.heappop(self.outOfOrderPackets) self.lastOrderedPacketID +=1 else: heapq.heappush(self.outOfOrderPackets, packet.pck_id) ack_packet = DataPacket(packet.destination, packet.source, self, self.engine.getCurrentTime(), ACK_PACKET_SIZE, True, self.lastOrderedPacketID + 1) ack_packet.setOriginalPacketTimestamp(packet.timestamp) return ack_packet
def __init__(self, search_strategy): if search_strategy == _DEPTH_FIRST: #use stack for OPEN set (last in---most recent successor #added---is first out) self.open = [] self.insert = self.open.append self.extract = self.open.pop elif search_strategy == _BREADTH_FIRST: #use queue for OPEN (first in---earliest node not yet #expanded---is first out) self.open = deque() self.insert = self.open.append self.extract = self.open.popleft elif search_strategy == _BEST_FIRST: #use priority queue for OPEN (first out is node with #lowest hval) self.open = [] #set node less than function to compare hvals only sNode.lt_type = _H self.insert = lambda node: heapq.heappush(self.open, node) self.extract = lambda: heapq.heappop(self.open) elif search_strategy == _ASTAR: #use priority queue for OPEN (first out is node with #lowest fval = gval+hval) self.open = [] #set node less than function to compare sums of hval and gval sNode.lt_type = _SUM_HG self.insert = lambda node: heapq.heappush(self.open, node) self.extract = lambda: heapq.heappop(self.open)
def generate_huffman_table(frequencies): encoding_table = {} decoding_table = {} for position_id, histogram in frequencies.iteritems(): encoding_table[position_id] = [None] * 256 # create a mapping table for characters ordinal_freq = [(histogram["%02x" % ordinal], ordinal) if histogram.has_key("%02x" % ordinal) else (1, ordinal) for ordinal in xrange(256)] heap = [] for node in ordinal_freq: heapq.heappush(heap, node) while len(heap) > 1: left = heapq.heappop(heap) right = heapq.heappop(heap) new_node = (left[0] + right[0], (left[1], right[1])) heapq.heappush(heap, new_node) total, huffman = heap[0] fill_table(huffman, "", encoding_table[position_id]) decoding_table[position_id] = huffman return encoding_table, decoding_table
def first_k_most_relevant(doc_scores): """If there are more than k documents containing terms in a query, return the k documents with the highest scores, tiebroken by least docID first. If there are less than k documents, return them, sorted by highest scores, and tiebroken by least docID first. O(n) + O(k lg n) :param doc_scores: A dictionary of docID to its corresponding document's score. :return: List of k docIDs of string type, which are the most relevant (i.e. have the highest scores) """ scores = [(-score, docID) for docID, score in doc_scores.iteritems()] # invert the scores so that heappop gives us the smallest score heapq.heapify(scores) most_relevant_docs = [] for _ in range(k): if not scores: break most_relevant_docs.append(heapq.heappop(scores)) if not most_relevant_docs: return most_relevant_docs # deals with equal-score cases kth_score, kth_docID = most_relevant_docs[-1] while scores: next_score, next_docID = heapq.heappop(scores) if next_score == kth_score: most_relevant_docs.append((next_score, next_docID)) else: break return sort_relevant_docs(most_relevant_docs)
def streaming_median(stream): ''' Given an umlimited stream of numbers, compute the exact median of the values: :param stream: The stream to observe :returns: The median of the stream >>> streaming_median(range(101)) 50 :param stream: The stream of incoming numbers :returns: The median of the given stream ''' max_heap, min_heap = [], [] # max_heap is a min heap holding the large values for el in stream: # min_heap is a max heap holding the small values if min_heap and el > min_heap[0]: heapq.heappush(min_heap, el) else: heapq.heappush(max_heap, -el) if abs(len(min_heap) - len(max_heap)) > 1: if len(min_heap) > len(max_heap): heapq.heappush(max_heap, -heapq.heappop(min_heap)) else: heapq.heappush(min_heap, -heapq.heappop(max_heap)) if len(min_heap) > len(max_heap): return min_heap[0] elif len(max_heap) > len(max_heap): return -max_heap[0] else: return (min_heap[0] - max_heap[0]) / 2
def dijkstra(self, node_from): graph = type(self)(copy.deepcopy(self.nodes), copy.deepcopy(self.vertex_map)) # Set the distance for the start node to zero graph.set_node_distance(node_from, 0) # Put tuple pair into the priority queue unvisited_queue = graph.get_nodes_unvisited() heapq.heapify(unvisited_queue) while len(unvisited_queue): distance, node_current = heapq.heappop(unvisited_queue) graph.set_node_visited(node_current) del distance for node_adj in graph.vertex_map.get(node_current, {}).keys(): if graph.vertex_map[node_current][node_adj].get('visited', False): continue new_dist = graph.get_node_distance(node_current) + graph.vertex_map[node_current][node_adj]['distance'] if new_dist < graph.get_node_distance(node_adj): graph.set_node_distance(node_adj, new_dist) graph.set_node_previous(node_adj, node_current) # Rebuild heap # 1. Pop every item while len(unvisited_queue): heapq.heappop(unvisited_queue) # 2. Put all vertices not visited into the queue unvisited_queue = graph.get_nodes_unvisited() heapq.heapify(unvisited_queue) return graph
def _merge_file_iters(self, iters, key=lambda x: x): heap = [] for iter, root in iters: try: heap.append((next(iter), iter, root)) except StopIteration: pass heapq.heapify(heap) while heap: value, iter, root = heapq.heappop(heap) try: heapq.heappush(heap, (next(iter), iter, root)) except StopIteration: pass items = [(value, root)] while heap: cand, iter, root = heapq.heappop(heap) if key(cand) == key(value): items.append((cand, root)) try: heapq.heappush(heap, (next(iter), iter, root)) except StopIteration: pass else: heapq.heappush(heap, (cand, iter, root)) break yield items
def pop(self): return_item = heappop(self)[1] while return_item not in self._item_set: return_item = heappop(self)[1] self._item_set.remove(return_item) self.sweep() return return_item
def findMedian(self): """ Returns the median of current data stream :rtype: float """ # If odd total len, then the value on lower heap # If even total len, avg of both heaps. res = [] if len(self.lower) > 0: res.append(-heapq.heappop(self.lower)) if len(self.higher) > 0: res.append(heapq.heappop(self.higher)) if len(res) > 0: heapq.heappush(self.lower, -res[0]) if len(res) > 1: heapq.heappush(self.higher, res[1]) if len(res) > 0: if len(self.lower) > len(self.higher): return float(res[0]) else: return (res[0] + res[1])/float(2) return -0.0 # Your MedianFinder object will be instantiated and called as such: # mf = MedianFinder() # mf.addNum(1) # mf.findMedian()
def minMeetingRooms(self, intervals): """ :type intervals: List[Interval] :rtype: int """ """ 用一个min-heap, 非空且下一个interval.start >= 上一个的end时, 意味不需要another room, pop; 不然, 需要一个新的room. heap里按end比较,所以存的是(interval.end, interval) """ import heapq if not intervals: return 0 intervals = sorted(intervals, key=lambda x: x.start) queue = [] max_len = 1 for i in xrange(len(intervals)): while queue and intervals[i].start >= queue[0][0]: heapq.heappop(queue) heapq.heappush(queue, (intervals[i].end, intervals[i])) # 注意heap中用end来比较 max_len = max(max_len, len(queue)) return max_len
def print_topK(): print "size of priority q" + str(len(topKQ)) for item in topKQ: (val,key) = heapq.heappop(topKQ) print key, "st \t", int(val) (val,key) = heapq.heappop(topKQ) print key, "st \t", int(val)
def run(self): headers = bam_headers(self.input) writer = Bam_writer(self.prefix+'.bam', headers) chrom = None endpoints = [ ] total = 0 discarded = 0 for record in Bam_reader(self.input): if record.flag & FLAG_UNMAPPED: continue total += 1 if chrom != record.rname: chrom = record.rname endpoints = [ ] while endpoints and endpoints[0] <= record.pos: heapq.heappop(endpoints) if len(endpoints) >= self.depth: discarded += 1 continue heapq.heappush(endpoints, record.pos+record.length) record.flag &= ~(FLAG_PAIRED|FLAG_PROPER|FLAG_MATE_UNMAPPED|FLAG_MATE_REVERSE|FLAG_FIRST|FLAG_SECOND) record.mrnm = '*' record.mpos = 0 writer.write(record) writer.close() self.log.log('Discarded %s alignments out of %s.\n' % (grace.pretty_number(discarded),grace.pretty_number(total)))
def _query(self, p, k=1, eps=0, distance_upper_bound=np.inf): if not self.root: return [] dist_to_ctr = self.distance(p, self.data[self.root.ctr_idx]) min_distance = max(0.0, dist_to_ctr - self.root.radius) # priority queue for chasing nodes # entries are: # minimum distance between the node area and the target # distance between node center and target # the node q = [(min_distance, dist_to_ctr, self.root)] # priority queue for the nearest neighbors # furthest known neighbor first # entries are (-distance, i) neighbors = [] if eps == 0: epsfac = 1 else: epsfac = 1 / (1 + eps) while q: min_distance, dist_to_ctr, node = heappop(q) if isinstance(node, CoverTree._LeafNode): # brute-force for i in node.idx: if i == node.ctr_idx: d = dist_to_ctr else: d = self.distance(p, self.data[i]) if d <= distance_upper_bound: if len(neighbors) == k: heappop(neighbors) heappush(neighbors, (-d, i)) if len(neighbors) == k: distance_upper_bound = -neighbors[0][0] else: # we don't push nodes that are too far onto the queue at # all, but since the distance_upper_bound decreases, we # might get here even if the cell's too far if min_distance > distance_upper_bound * epsfac: # since this is the nearest node, we're done, bail out break for child in node.children: if child.ctr_idx == node.ctr_idx: d = dist_to_ctr else: d = self.distance(p, self.data[child.ctr_idx]) min_distance = max(0.0, d - child.radius) # child might be too far, if so, don't bother pushing it if min_distance <= distance_upper_bound * epsfac: heappush(q, (min_distance, d, child)) return sorted([(-d, i) for (d, i) in neighbors])
def _merge_terms(self, iterlist): # Merge-sorts terms coming from a list of term iterators. # Create a map so we can look up each iterator by its id() value itermap = {} for it in iterlist: itermap[id(it)] = it # Fill in the list with the head term from each iterator. current = [] for it in iterlist: term = next(it) current.append((term, id(it))) heapify(current) # Number of active iterators active = len(current) while active: # Peek at the first term in the sorted list term = current[0][0] # Re-iterate on all items in the list that have that term while active and current[0][0] == term: it = itermap[current[0][1]] try: nextterm = next(it) heapreplace(current, (nextterm, id(it))) except StopIteration: heappop(current) active -= 1 # Yield the term yield term
def isPossible(self, nums: List[int]) -> bool: """ (length, last) heap sortest first >= 3, then drop split when duplicate """ h = [] for n in nums: while h and h[0].end + 1 < n: itvl = heapq.heappop(h) if itvl.length < 3: return False if not h: heapq.heappush(h, Interval(n, 1)) elif h[0].end + 1 == n: itvl = heapq.heappop(h) heapq.heappush(h, Interval(n, itvl.length + 1)) else: # n == end heapq.heappush(h, Interval(n, 1)) for itvl in h: if itvl.length < 3: return False return True
def pump(self, *arg, **kwarg): """ Pumps all events that are applicable to be called. """ # TODO: Reconfigure to use a number provided in pump method arguments while self.event_queue and self.event_queue[0][0] <= video.get_time(): heapq.heappop(self.event_queue)[1](*arg, **kwarg)
def heap_merged(items_lists, combiner): heap = [] def pushback(it): try: k,v = it.next() # put i before value, so do not compare the value heapq.heappush(heap, (k, i, v)) except StopIteration: pass for i, it in enumerate(items_lists): if isinstance(it, list): items_lists[i] = it = (k for k in it) pushback(it) if not heap: return last_key, i, last_value = heapq.heappop(heap) pushback(items_lists[i]) while heap: k, i, v = heapq.heappop(heap) if k != last_key: yield last_key, last_value last_key, last_value = k, v else: last_value = combiner(last_value, v) pushback(items_lists[i]) yield last_key, last_value
def reorganizeString(self, S): """ :type S: str :rtype: str """ if len(S) <= 1: return S l, c = len(S), [[-item[1], item[0]] for item in Counter(S).items()] + [[1, '#']] heapq.heapify(c) rls = '' for i in range(l): t1 = heapq.heappop(c) t2 = heapq.heappop(c) if rls == '' or t1[1] != rls[-1]: rls += t1[1] t1[0] += 1 else: if t2[1] == '#': return '' rls += t2[1] t2[0] += 1 if t1[0] != 0: heapq.heappush(c, t1) if t2[0] != 0: heapq.heappush(c, t2) return rls
# https://www.acmicpc.net/problem/2211 # 이름 : 네트워크 복구 # 번호 : 2211 # 난이도 : 골드 II # 분류 : 그래프 이론, 다익스트라 import sys from heapq import heappush, heappop input = sys.stdin.readline INF = sys.maxsize N, M = map(int, input().split()) graph = [[] for _ in range(N + 1)] for _ in range(M): A, B, C = map(int, input().split()) graph[A].append((C, B)) graph[B].append((C, A)) ans = [''] * (N + 1) dp = [INF] * (N + 1) dp[1] = 0 heap = [] heappush(heap, [0, 1]) while heap: dist, nx = heappop(heap) for n_dist, x in graph[nx]: temp = dist + n_dist if dp[x] > temp: dp[x] = temp ans[x] = str(nx) + ' ' + str(x) heappush(heap, [temp, x]) print(N - 1) print('\n'.join(ans[2:]))
n = int(input()) a = [-int(j) for j in input().split()] a.sort() import heapq l = [a[0]] heapq.heapify(l) ans = 0 for i in a[1:]: heapq.heappush(l, i) heapq.heappush(l, i) p = heapq.heappop(l) ans -= p print(ans)
queue[0] # return 2 examine the first element ## Stack stack = [] size = len(stack) stack.append(1) stack.append(2) stack.pop() # return 2 ## Priority Queue - https://docs.python.org/2/library/heapq.html import heapq heap = [] heapq.heappush(heap, 1) # heap [1] heapq.heappush(heap, 3) # heap [1,3] heapq.heappush(heap, 2) # heap [1,3,2] heapq.heappop(heap) # 1 heapq.heappop(heap) # 2 heapq.heappop(heap) # 3 ## Deque import collections dq = collections.deque() dq.append(1) # [1] dq.appendleft(2) # [2,1] dq.popleft() # [1] ## Set s = set({1,3,4,5,6,5}) # {1, 3, 4, 5, 6} ## List
def encode(file): """Encodes file into a smaller representation using huffman encoding. The encoded file is written to the filename "compressed.txt" Params: file - str """ # open the file try: s = open(file).read() except IOError: print("Error opening file: {}".format(file)) exit(1) # build dict of character: frequency freqs = make_dict(s) # build the Huffman Tree. # 1. Make leaf nodes, and store the letter and frequency. # 2. Build a tree by taking the 2 nodes with the smallest frequency, and creating a parent with the # letters concatenated and the frequencies summed. # I used a minheap for log(N) access to the 2 nodes with the smallest frequency each time I pop from the heap. minheap = [] for k, v in freqs.items(): heappush(minheap, TreeNode(k, v)) # ASSUMPTION: since the bitmap is of size 100 x 100 it must be separated by newlines \n, # so there are at least 2 distinct characters in any input file. parent_node = None # save the current parent node # This is step 2 - building the tree by creating parent nodes and keeping track of child pointers. while True: node1, node2 = heappop(minheap), heappop(minheap) parent_node = TreeNode(node1.letter + node2.letter, node1.freq + node2.freq) parent_node.left = node1 parent_node.right = node2 if parent_node.freq == len(s): break else: heappush(minheap, parent_node) # We know the tree is built when a parent node's frequency equals the length of the original string. # 3. Recursively iterate through the tree, creating encodings for all of the leaf nodes. # Since there is a unique path from the root to any particular node, each encoding will be unique. leaves = [] tree_dfs(parent_node, "", leaves) encoding_to_char = {} for leaf in leaves: assert len(leaf.letter) == 1 assert leaf.encoding not in encoding_to_char # because the encodings should have been unique. encoding_to_char[leaf.encoding] = leaf.letter # 4. Create the dictionary that maps characters in our original file to their encodings. char_to_encoding = {v: k for k, v in encoding_to_char.items()} # Representation of the encoded file. First we convert it into a binary string and then hex to save space. int_str = "".join([char_to_encoding[char] for char in s]) hex_str = hex(int(int_str, 2)) # hack to fix in the future - This way of getting the hex form does not preserve leading zeros, # leading to occasional inaccuracy in converting back and forth between binary and hex. num_zeros_to_add = 0 bin_str = bin(int(hex_str, 16))[2:] while bin_str != int_str: bin_str = '0' + bin_str num_zeros_to_add+=1 # 5. Write out the compressed file. We will need both the string and dictionary to dencode at a later time. try: os.remove('compressed.txt') except OSError: pass with open('compressed.txt', 'wb') as file: pickle.dump((hex_str, char_to_encoding, num_zeros_to_add), file) # A sanity check to make sure that the mapping and strings were written correctly. with open('compressed.txt', 'rb') as file: hs, mapping, add_zero = pickle.load(file) assert mapping == char_to_encoding, "houston we have a problem" assert bin_str == int_str, "{} {}".format(bin_str, int_str)
def run(self, run_to, node_state_grid=None, plot_each_transition=False, plotter=None): """Run the model forward for a specified period of time. Parameters ---------- run_to : float Time to run to, starting from self.current_time node_state_grid : 1D array of ints (x number of nodes) (optional) Node states (if given, replaces model's current node state grid) plot_each_transition : bool (optional) Option to display the grid after each transition plotter : CAPlotter object (optional) Needed if caller wants to plot after every transition """ if node_state_grid is not None: self.set_node_state_grid(node_state_grid) # Continue until we've run out of either time or events while self.current_time < run_to and self.event_queue: if _DEBUG: print('Current Time = ', self.current_time) # Is there an event scheduled to occur within this run? if self.event_queue[0].time <= run_to: # If so, pick the next transition event from the event queue ev = heappop(self.event_queue) if _DEBUG: print('Event:', ev.time, ev.link, ev.xn_to) # ... and execute the transition if _USE_CYTHON: do_transition(ev, self.next_update, self.grid.node_at_link_tail, self.grid.node_at_link_head, self.node_state, self.link_state, self.san, self.link_orientation, self.propid, self.prop_data, self.n_xn, self.xn_to, self.xn_rate, self.grid.links_at_node, self.grid.active_link_dirs_at_node, self.num_node_states, self.num_node_states_sq, self.prop_reset_value, self.xn_propswap, self.xn_prop_update_fn, self.bnd_lnk, self.event_queue, self, plot_each_transition, plotter) else: self.do_transition(ev, self.current_time, plot_each_transition, plotter) # Update current time self.current_time = ev.time # If there is no event scheduled for this span of time, simply # advance current_time to the end of the current run period. else: self.current_time = run_to
def pop(self): (priority, _, item) = heapq.heappop(self.heap) return (item, priority)
import heapq import sys input = sys.stdin.readline T, M, K = map(int, input().split()) KKK = list(map(int, input().split())) arr = [list(map(int, input().split())) for i in range(M)] dic = defaultdict(list) for i, j, cost in arr: dic[i].append((cost, j)) dic[j].append((cost, i)) Q = [] for n in KKK: Q.extend(dic[n][:]) heapq.heapify(Q) visit = [False] * (T + 1) for n in KKK: visit[n] = True res = 0 size = K while Q: if size == T: break cost, next = heapq.heappop(Q) if visit[next] == False: visit[next] = True res += cost for i in dic[next]: heapq.heappush(Q, i) size += 1 print(res)
import heapq import sys N = int(input()) colors = list(map(int, input().strip().split())) heapq.heapify(colors) if (len(colors) % 2 == 0): colors.append(0) ans = 0 while (len(colors) > 2): a = heapq.heappop(colors) b = heapq.heappop(colors) c = heapq.heappop(colors) heapq.heappush(colors, a + b + c) ans += a + b + c print(ans)
def crawl_next(inlink, frontier, time_record, url_crawled, outlink, url_response): next_frontier = set() frontier_size = len(frontier) url_crawled_this_wave = set() while frontier: try: (priority, url, parent) = heapq.heappop(frontier) except Exception as ex: continue # 1. Check if this is a good url, if not, skip if url in url_crawled: if url in url_crawled_this_wave: inlink[url].append(parent) continue if check_media(url): continue try: # response = request.urlopen(url, timeout=5) restriction = {"Accept-Language": "en-US,en;q=0.5"} response = requests.get(url, headers=restriction, timeout=5) url_component = parse.urlparse(url) except Exception as ex: continue domain = url_component.netloc.lower() scheme = url_component.scheme.lower() if domain.split( '.')[1] == 'wikipedia' and domain.split('.')[0] != 'en': continue if len(domain.split('.')) > 2: if domain.split('.')[2] == 'gov': continue if not check_robot(scheme, domain, url): continue if not check_response(response): continue print("Crawled: " + str(len(url_crawled)) + " URL. Currently on: " + url) # 2. Get the next urls # next_urls = get_next_urls(url, response) if domain not in time_record: time_record[domain] = time.time() next_urls = get_next_urls(url, response) else: if (time.time() - time_record[domain]) < 1: print("Sleep for" + str((time.time() - time_record[domain]))) time.sleep(time.time() - time_record[domain]) next_urls = get_next_urls(url, response) else: time_record[domain] = time.time() next_urls = get_next_urls(url, response) # 3. Saving information of the urls url_response[url] = response url_crawled.add(url) if parent != None: inlink[url].append(parent) else: if url in inlink: pass else: inlink[url] = [] for next_url in next_urls: next_frontier.add((next_url, url)) if next_url in url_crawled: if str(url) != str(next_url): inlink[next_url].append(url) next_urls = list(next_urls) outlink[url] = next_urls # 4. Dump the web content when necessary. if (len(url_crawled) % 10) == 0: dump_inlink(inlink) dump_url_crawled(url_crawled) dump_frontier(frontier) dump_content(outlink, url_response) # Only process top 60% scored urls. if (len(frontier) / frontier_size) < 0.4 and len(frontier) > 6: break # 5. Stop and exit when we crawled enough urls. if len(url_crawled) >= 40300: dump_inlink(inlink) print("Finish crawling") exit() return next_frontier
import sys, heapq sys.stdin = open('백준1937. 욕심쟁이 판다.txt', 'r') d = [(0, 1), (0, -1), (1, 0), (-1, 0)] n = int(input()) ground = [[0] * n for _ in range(n)] matrix = [list(map(int, sys.stdin.readline().split())) for _ in range(n)] heap = [] for r in range(n): for c in range(n): heapq.heappush(heap, (matrix[r][c], r, c)) answer = 0 while heap: bamboo, r, c = heapq.heappop(heap) for idx in range(4): nr = r + d[idx][0] nc = c + d[idx][1] if 0 <= nr < n and 0 <= nc < n: if matrix[r][c] > matrix[nr][nc]: ground[r][c] = max(ground[r][c], ground[nr][nc]) ground[r][c] += 1 answer = max(answer, ground[r][c]) print(answer) # DFS # import sys # input = sys.stdin.readline # sys.setrecursionlimit(10**6)
def find_path(vertex1, vertex2, external_path): # pdb.set_trace() priority_queue = [] visited = set() v1 = PriorityQueueVertex(vertex1, 0, None) v1_key = Graph.compute_heuristic(vertex1, vertex2) heapq.heappush(priority_queue, (v1_key, v1)) visited.add(vertex1) # start search path = [] destination_vertex = vertex2 occupied_edges = set() while priority_queue[0][1] != destination_vertex: # pdb.set_trace() current_v = priority_queue[0][1] current_cost = current_v.cost visited.add(current_v.vertex) # update edges that are currently occupied for e in occupied_edges: e.cost = Edge.UNOCCUPIED_COST # set occupied edges occupied_edges = set() for i in range(1, 3): try: occupied_vertex = external_path[current_v.depth + i] except IndexError: continue if i == 1: for e in occupied_vertex.edges: if e is not None: other_edge = occupied_vertex.get_opposite_edge(e) if other_edge is not None: occupied_edges.add(other_edge) if i == 2: # update wait edge e = occupied_vertex.edges[4] occupied_edges.add(e) for e in occupied_edges: e.cost = Edge.OCCUPIED_COST wait_flag = False for e in current_v.vertex.edges: if e is not None and e.cost == Edge.OCCUPIED_COST: wait_flag = True break for e in current_v.vertex.edges: if e is None: continue # calculate cost and heuristics, push onto PQ v = e.destination if wait_flag: if v in visited and v != current_v.vertex: continue else: if v in visited: continue v_cost = current_cost + e.cost v_key = (v_cost + Graph.compute_heuristic(v, destination_vertex)) v_priority = PriorityQueueVertex(v, v_cost, current_v) heapq.heappush(priority_queue, (v_key, v_priority)) if wait_flag: # there might be duplicate entries in the priority queue. # Only remove the smallest one holder = [i for i in priority_queue if i[1] == current_v] if len(holder) > 1: holder.sort() v_key = holder[0][0] priority_queue.remove((v_key, current_v)) else: priority_queue = [i for i in priority_queue if i[1] != current_v] heapq.heapify(priority_queue) # pdb.set_trace() # Now get path v_top = heapq.heappop(priority_queue) v = v_top[1] path = [] while True: path.append(v.vertex) v = v.previous_vertex if v is None: break path.reverse() return path
import heapq for sadsf in range(int(input())): n, m = map(int, input().split()) m -= 1 h = [] heapq.heappush(h, -n) d = {n: 1} j = n while m > 0: # print(d, h) if d[j] > m: break m -= d[j] if (j - 1) // 2 not in d: d[(j - 1) // 2] = 0 heapq.heappush(h, -((j - 1) // 2)) d[(j - 1) // 2] += d[j] if j // 2 not in d: d[j // 2] = 0 heapq.heappush(h, -(j // 2)) d[j // 2] += d[j] del d[j] heapq.heappop(h) j = -h[0] print('Case #{}: {} {}'.format(sadsf + 1, j // 2, (j - 1) // 2))
def main(_): #initial outer file WordIndex = Word_Index() EntTagIndex = Ent_Tag_Index() TriTagIndex = Tri_Tag_Index() PtagIndex = Ptag_Index() Index = [EntTagIndex, TriTagIndex] object_tag_Index = Index[FLAGS.object_id] FLAGS.num_word = len(WordIndex.word2idex) FLAGS.num_postag = len(PtagIndex.ptag2id) FLAGS.num_class = object_tag_Index.num_class df_test = pd.read_csv('datas/corpus/testdata_id.txt', sep='#', skip_blank_lines=False, dtype={'len': np.int32}) df_train = pd.read_csv('datas/corpus/traindata_id.txt', sep='#', skip_blank_lines=False, dtype={'len': np.int32}) # eval_size = int(len(df_train) * FLAGS.dev_rate) # df_eval = df_train.iloc[-eval_size:] # df_train = df_train.iloc[:-eval_size] print('trainsize' + str(len(df_train))) train_data_itor = DataItor(df_train, False) # eval_data_itor = DataItor(df_eval,False) test_data_itor = DataItor(df_test, False) FLAGS.check_every_point = int(train_data_itor.size / FLAGS.batch_size) if os.path.exists(FLAGS.pretrain_file) and FLAGS.use_pretrain_embedding: print('initial!!!!!!!!!!') # initial_embedding(WordIndex.word2idex) FLAGS.pretrain_emb = initial_embedding(WordIndex.word2idex) else: FLAGS.pretrain_emb = None myconfig = tf.ConfigProto(allow_soft_placement=True) with tf.Session(config=myconfig) as sess: model = ST_Model(FLAGS) sess.run(tf.global_variables_initializer()) # xidx_eval, tidx_eval, lens_eval = eval_data_itor.next_all() xidx_test, tidx_test, lens_test = test_data_itor.next_all() # print('eval data size %f' % len(tidx_eval[0])) # _, id2label = helper.loadMap(FLAGS.label2id_path) saver = tf.train.Saver(max_to_keep=2) previous_best_valid_f1_score = 0 previous_best_epoch = -1 bad_count = 0 heap_source, heap_trigger, heap_target = [], [], [] while train_data_itor.epoch < FLAGS.num_epochs: x_train_batch, y_train_batch = train_data_itor.next_batch( FLAGS.batch_size) train_step, train_loss = model.train_model( sess, x_train_batch, y_train_batch[FLAGS.object_id]) if train_data_itor.batch_time % FLAGS.check_every_point == 0: print("current batch_time: %d" % (train_data_itor.batch_time)) # y_eval_pred, eval_loss = model.inference_for_single(sess, xidx_eval, tidx_eval[FLAGS.object_id]) y_test_pred, test_loss = model.inference_for_single( sess, xidx_test, tidx_test[FLAGS.object_id]) if FLAGS.object_id == 0: # precison, recall, f1_eval = helper.evaluate_ent(xidx_eval[0], tidx_eval[FLAGS.object_id], # y_eval_pred, id2word=WordIndex.id2word,id2label=EntTagIndex.id2tag,seq_lens=lens_eval) # print('evalution on eval data, target_eval_loss:%.4f,precison:%.4f,recall:%.4f,fscore:%.4f' % ( # eval_loss, precison, recall, f1_eval)) precison1, recall1, f1_test = helper.evaluate_ent( xidx_test[0], tidx_test[FLAGS.object_id], y_test_pred, id2word=WordIndex.id2word, id2label=EntTagIndex.id2tag, seq_lens=lens_test) print( 'evalution on test data, target_eval_loss:%.3f,precison:%.4f,recall:%.4f,fscore:%.4f' % (test_loss, precison1, recall1, f1_test)) else: # precison, recall, f1_eval = helper.evaluate_tri(xidx_eval[0], tidx_eval[FLAGS.object_id], y_eval_pred,id2word=WordIndex.id2word,seq_lens=lens_eval) # print('evalution on eval data, target_eval_loss:%.3f,precison:%.4f,recall:%.4f,fscore:%.4f' % ( # eval_loss, precison, recall, f1_eval)) precison1, recall1, f1_test = helper.evaluate_tri( xidx_test[0], tidx_test[FLAGS.object_id], y_test_pred, id2word=WordIndex.id2word, seq_lens=lens_test) print( 'evalution on test data, target_eval_loss:%.3f,precison:%.4f,recall:%.4f,fscore:%.4f' % (test_loss, precison1, recall1, f1_test)) # early stop if len(heap_target) < 5: heapq.heappush(heap_target, (f1_test, (precison1, recall1, f1_test))) else: if f1_test > heap_target[0][0]: heapq.heappop(heap_target) heapq.heappush(heap_target, (f1_test, (precison1, recall1, f1_test))) if f1_test > previous_best_valid_f1_score: helper.store_stl_result( xidx_test[0], tidx_test[FLAGS.object_id], y_test_pred, id2word=WordIndex.id2word, id2label=Index[FLAGS.object_id].id2tag, seq_lens=lens_test, type_id=FLAGS.object_id) previous_best_valid_f1_score = f1_test bad_count = 0 else: bad_count += 1 if bad_count >= FLAGS.patients: print('early stop!') break print('Train Finished!!') show_result(heap_target) pass
def get(self): return heapq.heappop(self.elements)[1]
graph = defaultdict(list) for u, v, w in data: graph[u].append([v, w]) # perform INF = 3000000 result = [INF for _ in range(V)] heap = [] heapq.heappush(heap, (0, start_point)) result[start_point-1] = 0 while heap: cur_weight, cur_point = heapq.heappop(heap) if cur_weight > result[cur_point-1]: print(cur_weight, result[cur_point-1]) continue for pos, wei in graph[cur_point]: tmp = cur_weight + wei if tmp < result[pos-1]: result[pos-1] = tmp heapq.heappush(heap, (tmp, pos)) # output for r in result: if r == INF: print("INF")
def get(self): # Pop and return the smallest item from the heap return heapq.heappop(self.elements)[1]
def pop(self): return heappop(self.q)
#21.02.02 import sys import heapq input = sys.stdin.readline N = int(input()) rooms,query = [],[] for _ in range(N): query.append(list(map(int,input().strip().split()))) query=sorted(query,key=lambda x:x[0]) for i in range(N): # print("i :",i,"rooms :",rooms,"Query :",query) if len(rooms)!=0 and rooms[0]<=query[i][0]: heapq.heappop(rooms) heapq.heappush(rooms,query[i][1]) print(len(rooms))
import heapq n, k1, k2 = map(int, input().split()) a = list(map(int, input().split()))[:n] b = list(map(int, input().split()))[:n] total = k1 + k2 arr = [] for i in range(n): arr.append((-1) * abs(a[i] - b[i])) heapq.heapify(arr) while (total > 0): item = (-1) * (heapq.heappop(arr)) heapq.heappush(arr, (-1) * abs(item - 1)) total -= 1 ans = 0 for i in range(len(arr)): ans += (arr[i])**2 print(ans)
def pop_smallest(self): """ Remove and return the smallest item from the queue """ smallest = heapq.heappop(self.heap) del self.set[smallest] return smallest
import heapq # heapq.heappush() # heapq.heappop() # list를 heap구조로 사용하는 함수. N = int(input()) heap = list() result = list() for _ in range(N): data = int(input()) if data == 0: if heap: result.append(heapq.heappop(heap)) else: result.append(0) else: heapq.heappush(heap, data) for data in result: print(data)
def start(self): if not logging.getLogger().handlers: # The IOLoop catches and logs exceptions, so it's # important that log output be visible. However, python's # default behavior for non-root loggers (prior to python # 3.2) is to print an unhelpful "no handlers could be # found" message rather than the actual log entry, so we # must explicitly configure logging if we've made it this # far without anything. logging.basicConfig() if self._stopped: self._stopped = False return old_current = getattr(IOLoop._current, "instance", None) IOLoop._current.instance = self self._thread_ident = thread.get_ident() self._running = True # signal.set_wakeup_fd closes a race condition in event loops: # a signal may arrive at the beginning of select/poll/etc # before it goes into its interruptible sleep, so the signal # will be consumed without waking the select. The solution is # for the (C, synchronous) signal handler to write to a pipe, # which will then be seen by select. # # In python's signal handling semantics, this only matters on the # main thread (fortunately, set_wakeup_fd only works on the main # thread and will raise a ValueError otherwise). # # If someone has already set a wakeup fd, we don't want to # disturb it. This is an issue for twisted, which does its # SIGCHILD processing in response to its own wakeup fd being # written to. As long as the wakeup fd is registered on the IOLoop, # the loop will still wake up and everything should work. old_wakeup_fd = None if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix': # requires python 2.6+, unix. set_wakeup_fd exists but crashes # the python process on windows. try: old_wakeup_fd = signal.set_wakeup_fd( self._waker.write_fileno()) if old_wakeup_fd != -1: # Already set, restore previous value. This is a little racy, # but there's no clean get_wakeup_fd and in real use the # IOLoop is just started once at the beginning. signal.set_wakeup_fd(old_wakeup_fd) old_wakeup_fd = None except ValueError: # non-main thread pass while True: poll_timeout = _POLL_TIMEOUT # Prevent IO event starvation by delaying new callbacks # to the next iteration of the event loop. with self._callback_lock: callbacks = self._callbacks self._callbacks = [] for callback in callbacks: self._run_callback(callback) # Closures may be holding on to a lot of memory, so allow # them to be freed before we go into our poll wait. callbacks = callback = None if self._timeouts: now = self.time() while self._timeouts: if self._timeouts[0].callback is None: # the timeout was cancelled heapq.heappop(self._timeouts) self._cancellations -= 1 elif self._timeouts[0].deadline <= now: timeout = heapq.heappop(self._timeouts) self._run_callback(timeout.callback) del timeout else: seconds = self._timeouts[0].deadline - now poll_timeout = min(seconds, poll_timeout) break if (self._cancellations > 512 and self._cancellations > (len(self._timeouts) >> 1)): # Clean up the timeout queue when it gets large and it's # more than half cancellations. self._cancellations = 0 self._timeouts = [ x for x in self._timeouts if x.callback is not None ] heapq.heapify(self._timeouts) if self._callbacks: # If any callbacks or timeouts called add_callback, # we don't want to wait in poll() before we run them. poll_timeout = 0.0 if not self._running: break if self._blocking_signal_threshold is not None: # clear alarm so it doesn't fire while poll is waiting for # events. signal.setitimer(signal.ITIMER_REAL, 0, 0) try: event_pairs = self._impl.poll(poll_timeout) except Exception as e: # Depending on python version and IOLoop implementation, # different exception types may be thrown and there are # two ways EINTR might be signaled: # * e.errno == errno.EINTR # * e.args is like (errno.EINTR, 'Interrupted system call') if (getattr(e, 'errno', None) == errno.EINTR or (isinstance(getattr(e, 'args', None), tuple) and len(e.args) == 2 and e.args[0] == errno.EINTR)): continue else: raise if self._blocking_signal_threshold is not None: signal.setitimer(signal.ITIMER_REAL, self._blocking_signal_threshold, 0) # Pop one fd at a time from the set of pending fds and run # its handler. Since that handler may perform actions on # other file descriptors, there may be reentrant calls to # this IOLoop that update self._events self._events.update(event_pairs) while self._events: fd, events = self._events.popitem() try: if self._handlers.has_key(fd): self._handlers[fd](fd, events) except (OSError, IOError) as e: if e.args[0] == errno.EPIPE: # Happens when the client closes the connection pass else: self.handle_callback_exception(self._handlers.get(fd)) except Exception: self.handle_callback_exception(self._handlers.get(fd)) # reset the stopped flag so another start/stop pair can be issued self._stopped = False if self._blocking_signal_threshold is not None: signal.setitimer(signal.ITIMER_REAL, 0, 0) IOLoop._current.instance = old_current if old_wakeup_fd is not None: signal.set_wakeup_fd(old_wakeup_fd)
def irv_margin(election, winner=None, elim_order=None, ub=None, trace=False, timeout=1e75): '''Compute the exact IRV margin of the election. @type election: L{Election} @param election: The election. @type winner: number @param winner: The winner of the election. If C{None}, it will be computed. @type elim_order: list @param elim_order: The elimination order. If C{None}, it will be computed. @type ub: number @param ub: An upper bound on the margin. If C{None}, it will be computed via L{irv_ub}. @type trace: boolean @param trace: If C{True}, print a trace of the computation. @type timeout: number @param timeout: The amount of time to spend computing the margin. @rtype: number @return: Returns the IRV margin. ''' if ilp is None: raise Exception('Cannot compute irv_margin() because no optimizer library could be loaded.') then = time.time() if winner is None or elim_order is None: winner, _, elim_order = irv(election) if ub is None: ub = irv_ub(election, winner=winner, elim_order=elim_order) # We can eliminate candidates who _must_ be eliminated first. The # reasoning is that for them to not be eliminated, more votes would have to # be shifted than the upper bound on the margin. root = election.profile.deepcopy() while True: esets = [] _elimination_set(root, all_sets=esets, rules=SF_RCV_RULES) max_eset = set() candidates = root.children() for eset in esets: if len(eset) <= len(max_eset): continue lb = min(root.get_child(c).value for c in candidates - eset) - \ sum(root.get_child(c).value for c in eset) if lb > ub: max_eset = eset if not max_eset: break for c in max_eset: if trace: print 'Eliminating candidate %d' % c root.eliminate(c) # This is the algorithm from Magrino et al., more or less candidates = root.children() k = len(candidates) tertiary = [] elims = set((winner,)) j = len(elim_order)-1 for i in range(1, k+1): if i >= len(elims): elims = elims.union(elim_order[j]) j -= 1 tertiary.append(elims) if trace: print tertiary ranks = election.ranks fringe = [] for c in candidates: if c == winner: continue if trace: print '\t', '(%d)' % c, 0, -1, 0 heapq.heappush(fringe, (0, -1, 0, [c])) while True: d, s, t, elim = heapq.heappop(fringe) if trace: print tuple(elim), d, s, t if len(elim) == k: return d now = time.time() timeout -= now - then if timeout <= 0.0: return -1 then = now elim_set = set(elim) prefixes = candidates - elim_set for c in prefixes: reduced = root.deepcopy() for e in prefixes - set((c,)): reduced.eliminate(e) new_elim = [c] + elim if trace: print '\t', tuple(new_elim), sys.stdout.flush() d = ilp.distance_to(reduced, ranks, new_elim, timeout) if d == -1: return -1 if d <= ub: s = -len(new_elim) t = len(set(new_elim) - tertiary[len(new_elim)-1]) if trace: print d, s, t heapq.heappush(fringe, (d, s, t, new_elim)) elif trace: print d
def pop_state( self, next_states: List[Tuple[int, int, PuzzleState]]) -> PuzzleState: return heapq.heappop(next_states)[2]
def best_first_search( R, zf, K, optimal_detection, N, complexity, constellation ): # best first serach 其實類似BFS search,只不過變為priority queue # R為H進行QR分解後的R矩陣 # zf向量為zero forcing detection後尚未demapping的結果、K為選擇branch時要選擇離soft value最近的K個節點 # optimal_detection向量為最終detection後得到的結果 # N為傳送向量長度 # complexity有3個元素,分別記錄經過幾個node、做幾次加法運算、做幾次乘法運算 # constellation為星座圖,也就是向量中元素的值域 priority_queue = [] # tree的第一層時對應到的soft value為zf[N-1, 0] # 我們接下來要看哪些星座點離zf[N-1, 0]最近 select = [0] * len( constellation ) # 若select[i] == 0 代表不選擇星座點i,若select[i] == 1 代表會選擇星座點i lt = [[0] * 2 for i in range(len(constellation))] for i in range(len(constellation)): lt[i][0] = i # 代表這是第i個星座點,待會排序後可以知道到底哪個星座點與soft value最近 lt[i][1] = abs(zf[N - 1, 0] - constellation[i]) # 代表此星座點與這個soft value間的距離 lt.sort(key=lambda cust: cust[1] ) # 以星座點與 soft value間的距離為依據來排序,間距越小者排越前面 # 若lt[0][0] = 3 代表星座點3與soft value最近 # 若lt[1][0] = 1 代表星座點1與soft value第二近 # 若lt[2][0] = 0 代表星座點0與soft value第三近 # 注意到我們現在只要選擇K個離soft value最近的星座點 # 所以現在開始來選擇 count = 0 for i in range(len(constellation)): select[lt[i][0]] = 1 count += 1 if count >= K: break # 再來將tree中第一層離soft value zf[N-1, 0]較近的星座點丟到priority queue中 for i in range(len(constellation)): if select[ i] == 0: # 若星座點i 離soft value zf[N-1, 0]較遠,就不選擇此星座點了 continue vector = np.matrix([0j] * (N)).transpose() vector[N - 1, 0] = constellation[i] heapq.heappush( priority_queue, (abs(z[N - 1, 0] - R[N - 1, N - 1] * constellation[i]) **2, vector, 1)) # 將( (abs(z[N-1,0] - R[N-1,N-1]*constellation[i])**2, vector, 1) ) 丟到priority queue中 # 其中(abs(z[N-1,0] - R[N-1,N-1]*constellation[i])**2值越小的話,優先權越大 # vector則存放選擇後的向量結果 # 1代表這個向量只有1個元素 complexity[0] += 1 # 經過node點數加1 # 做一次乘法運算時,我們會採用以下的方法,所以記錄成做一次加法 # 因為detect[j,0]的值域在64QAM時為 -7, -5, -3, -1, 1, 3, 5, 7 # 乘上1,相當於0次加法 # 乘上3,相當往左shift一個bit(x2)再做一次加法 = 一次加法 # 乘上5,相當往左shift兩個bit(x4)再做一次加法 = 一次加法 # 乘上7,相當往左shift三個bit(x8)再做一次減法 = 一次加法 if constellation[i] != 1 and constellation[i] != -1: complexity[1] += 1 # 代表加法運算次數加1 complexity[1] += 1 # 因為有做一次減法,所以加法運算次數加1 complexity[2] += 1 # 因為最後有取絕對值平方,所以乘法運算次數加1 # 接下來可以開始進行best first search了 (其過程類似BFS,只不過此處的queue變為priority queue) while True: #先取出priority queue中優先權最高的元素 first_element = heapq.heappop(priority_queue) # first_element[0] 存放上一層的accumulated_metric # first_element[1] 存放上一層的vector # first_element[2] 則代表上一層的vector有幾個元素 if first_element[2] == N: # 若搜尋完畢 break # 搜尋此節點的下層節點 # tree的這一層對應到的soft value為zf[N-1-first_element[2], 0] # 我們接下來要看哪些星座點離zf[N-1-first_element[2], 0]最近 select = [0] * len( constellation ) # 若select[i] == 0 代表不選擇星座點i,若select[i] == 1 代表會選擇星座點i lt = [[0] * 2 for i in range(len(constellation))] for i in range(len(constellation)): lt[i][ 0] = i # 代表這是第i個星座點,待會排序後可以知道到底哪個星座點與soft value最近 lt[i][1] = abs( zf[N - 1 - first_element[2], 0] - constellation[i]) # 代表此星座點與這個soft value間的距離 lt.sort(key=lambda cust: cust[1] ) # 以星座點與 soft value間的距離為依據來排序,間距越小者排越前面 # 若lt[0][0] = 3 代表星座點3與soft value最近 # 若lt[1][0] = 1 代表星座點1與soft value第二近 # 若lt[2][0] = 0 代表星座點0與soft value第三近 # 注意到我們現在只要選擇K個離soft value最近的星座點 # 所以現在開始來選擇 count = 0 for i in range(len(constellation)): select[lt[i][0]] = 1 count += 1 if count >= K: break for i in range(len(constellation)): if select[ i] == 0: # 若星座點i 離soft value zf[N - 1 - first_element[2], 0]較遠,就不選擇此星座點了 continue vector = np.matrix(first_element[1]) vector[N - 1 - first_element[2], 0] = constellation[i] complexity[0] += 1 # 經過node點數加1 # 接下來計算accumulated_metric accumulated_metric = z[N - 1 - first_element[2], 0] for j in range(N - 1, N - 2 - first_element[2], -1): accumulated_metric -= R[ N - 1 - first_element[2], j] * vector[j, 0] # 注意到每次都有一個減法 and 乘法運算 # 做一次乘法運算時,我們會採用以下的方法,所以記錄成做一次加法 # 因為detect[j,0]的值域在64QAM時為 -7, -5, -3, -1, 1, 3, 5, 7 # 乘上1,相當於0次加法 # 乘上3,相當往左shift一個bit(x2)再做一次加法 = 一次加法 # 乘上5,相當往左shift兩個bit(x4)再做一次加法 = 一次加法 # 乘上7,相當往左shift三個bit(x8)再做一次減法 = 一次加法 if vector[j, 0] != 1 and vector[j, 0] != -1: complexity[1] += 1 # 代表加法運算次數加1 complexity[1] += 1 # 因為有做一次減法,所以加法運算次數加1 accumulated_metric = accumulated_metric**2 complexity[2] += 1 # 因為做一次平方運算=一次乘法運算 heapq.heappush(priority_queue, (first_element[0] + accumulated_metric, vector, first_element[2] + 1)) # accumulated_matrix 變為first_element[0]+accumulated_metric # 所以要做一次加法運算 complexity[1] += 1 # first_element[1] 這個向量即為我們要的答案,將此向量的值複製到optimal_detection向量中 for i in range(len(optimal_detection)): optimal_detection[i, 0] = first_element[1][i, 0]
import heapq, sys N = int(input()) arr = [] for i in range(N): a = -int(sys.stdin.readline()) if a == 0: if arr: print(-heapq.heappop(arr)) else: print(0) else: heapq.heappush(arr, a)
def heappop(self): return heapq.heappop(self)
def sphere_decoding_bfs(R, zf, K, optimal_detection, K1, N, complexity, constellation): # R為H進行QR分解後的R矩陣、zf向量為zero forcing detection後尚未demapping的結果、K為選擇branch時要選擇離soft value最近的K個節點 # optimal_detection向量為最終detection後得到的結果、K1為BFS搜尋中最多准許有幾個node出現 # N為傳送向量長度 # complexity有3個元素,分別記錄經過幾個node、做幾次加法運算、做幾次乘法運算 # constellation為星座圖,也就是向量中元素的值域 # 此處的bfs比較特別,這種bfs在搜尋一層樹時,每一條節點最多只會往下選擇其中離soft value較近的K個node # 除此之外下一層最多總共只含K1個節點,所以我用priority queue來決定要選擇哪K1個node queue = [[], []] # 有兩個queue,a是要負責pop元素,b則push元素 # 等到其中a queue的元素pop完後 # 換b queue pop元素,a queue push元素 current = 0 # 利用current來決定目前是哪個queue要pop元素 # tree的第一層時對應到的soft value為zf[N-1, 0] # 我們接下來要看哪些星座點離zf[N-1, 0]最近 select = [0] * len( constellation ) # 若select[i] == 0 代表不選擇星座點i,若select[i] == 1 代表會選擇星座點i lt = [[0] * 2 for i in range(len(constellation))] for i in range(len(constellation)): lt[i][0] = i # 代表這是第i個星座點,待會排序後可以知道到底哪個星座點與soft value最近 lt[i][1] = abs(zf[N - 1, 0] - constellation[i]) # 代表此星座點與這個soft value間的距離 lt.sort(key=lambda cust: cust[1] ) # 以星座點與 soft value間的距離為依據來排序,間距越小者排越前面 # 若lt[0][0] = 3 代表星座點3與soft value最近 # 若lt[1][0] = 1 代表星座點1與soft value第二近 # 若lt[2][0] = 0 代表星座點0與soft value第三近 # 注意到我們現在只要選擇K個離soft value最近的星座點 # 所以現在開始來選擇 count = 0 for i in range(len(constellation)): select[lt[i][0]] = 1 count += 1 if count >= K: break # 一開始將tree中第一層離soft value zf[N-1, 0]較近的節點丟到queue[current]中 for i in range(len(constellation)): if select[i] == 0: # 若此點離soft value太遠就不選取 continue vector = np.matrix([0j] * (N)).transpose() vector[N - 1, 0] = constellation[i] heapq.heappush( queue[current], (abs(z[N - 1, 0] - R[N - 1, N - 1] * constellation[i]) **2, vector, 1)) # 將( (abs(z[N-1,0] - R[N-1,N-1]*constellation[i])**2, vector, 1) ) 丟到priority queue中 # 其中(abs(z[N-1,0] - R[N-1,N-1]*constellation[i])**2值越小的話,優先權越大 # vector則存放選擇後的向量結果 # 1代表這個向量只有1個元素 complexity[0] += 1 # 經過node點數加1 # 做一次乘法運算時,我們會採用以下的方法,所以記錄成做一次加法 # 因為detect[j,0]的值域在64QAM時為 -7, -5, -3, -1, 1, 3, 5, 7 # 乘上1,相當於0次加法 # 乘上3,相當往左shift一個bit(x2)再做一次加法 = 一次加法 # 乘上5,相當往左shift兩個bit(x4)再做一次加法 = 一次加法 # 乘上7,相當往左shift三個bit(x8)再做一次減法 = 一次加法 if constellation[i] != 1 and constellation[i] != -1: complexity[1] += 1 # 代表加法運算次數加1 complexity[1] += 1 # 因為有做一次減法,所以加法運算次數加1 complexity[2] += 1 # 因為最後有取絕對值平方,所以乘法運算次數加1 # 接下來要開始將queue[current]的元素pop到另一個queue[(current+1)%2]中 while True: count = 0 # count用來紀錄queue[current] 目前pop幾個元素 while True: # 將queue[current]的元素pop出來,並根據此pop出來的元素從tree的下層選出其他節點加到queue[(current+1)%2]中 # 注意到queue[current]最多只會pop K個node if len(queue[current] ) == 0: # 若該queue的元素都pop出來了,就直接break break elif count >= K1: # 若已經從queue[current]中pop K1個元素(代表從這一層的節點選取K1個節點了) # 將queue[current]清空後在break while True: heapq.heappop(queue[current]) if len(queue[current]) == 0: break break # 先取出priority queue中優先權最高的元素 first_element = heapq.heappop(queue[current]) # first_element[0] 存放上一層的accumulated_metric # first_element[1] 存放上一層的vector # first_element[2] 則代表上一層的vector有幾個元素 count += 1 # 因為queue[current] pop出一個元素了 if first_element[2] == N: # 代表BFS已搜尋到樹的最底端,搜尋結束 break # 接下來搜尋此節點的下層節點 # 搜尋前我們要看,哪一個下層節點離下層soft value值較近,我們只選取離soft value值較近的K個節點 # 我們接下來要看哪些星座點離zf[N - 1 - first_element[2], 0]最近 select = [0] * len( constellation ) # 若select[i] == 0 代表不選擇星座點i,若select[i] == 1 代表會選擇星座點i lt = [[0] * 2 for i in range(len(constellation))] for i in range(len(constellation)): lt[i][ 0] = i # 代表這是第i個星座點,待會排序後可以知道到底哪個星座點與soft value最近 lt[i][1] = abs( zf[N - 1 - first_element[2], 0] - constellation[i]) # 代表此星座點與這個soft value間的距離 lt.sort(key=lambda cust: cust[1] ) # 以星座點與 soft value間的距離為依據來排序,間距越小者排越前面 # 若lt[0][0] = 3 代表星座點3與soft value最近 # 若lt[1][0] = 1 代表星座點1與soft value第二近 # 若lt[2][0] = 0 代表星座點0與soft value第三近 # 注意到我們現在只要選擇K個離soft value最近的星座點 # 所以現在開始來選擇 count1 = 0 for i in range(len(constellation)): select[lt[i][0]] = 1 count1 += 1 if count1 >= K: break for i in range(len(constellation)): if select[i] == 0: # 若該星座點離此層的soft value值較遠就不選取 continue vector = np.matrix(first_element[1]) vector[N - 1 - first_element[2], 0] = constellation[i] complexity[0] += 1 # 經過node點數加1 # 接下來計算accumulated_metric accumulated_metric = z[N - 1 - first_element[2], 0] for j in range(N - 1, N - 2 - first_element[2], -1): accumulated_metric -= R[ N - 1 - first_element[2], j] * vector[j, 0] # 注意到每次都有一個減法 and 乘法運算 # 做一次乘法運算時,我們會採用以下的方法,所以記錄成做一次加法 # 因為detect[j,0]的值域在64QAM時為 -7, -5, -3, -1, 1, 3, 5, 7 # 乘上1,相當於0次加法 # 乘上3,相當往左shift一個bit(x2)再做一次加法 = 一次加法 # 乘上5,相當往左shift兩個bit(x4)再做一次加法 = 一次加法 # 乘上7,相當往左shift三個bit(x8)再做一次減法 = 一次加法 if vector[j, 0] != 1 and vector[j, 0] != -1: complexity[1] += 1 # 代表加法運算次數加1 complexity[1] += 1 # 因為有做一次減法,所以加法運算次數加1 accumulated_metric = accumulated_metric**2 complexity[2] += 1 # 因為做一次平方運算=一次乘法運算 heapq.heappush( queue[(current + 1) % 2], (first_element[0] + accumulated_metric, vector, first_element[2] + 1)) # 將一個節點push到queue[(current+1)%2]中 # accumulated_matrix 變為first_element[0]+accumulated_metric # 所以要做一次加法運算 complexity[1] += 1 current = ( current + 1 ) % 2 # 因為待會負責pop的queue會變成要push、而剛剛負責push的queue則要負責pop if first_element[2] == N: break # first_element[1] 這個向量即為我們要的答案,將此向量的值複製到optimal_detection向量中 for i in range(len(optimal_detection)): optimal_detection[i, 0] = first_element[1][i, 0]
visited = [False]*(n+1) edge = [[3, 6], [4, 3], [5, 2], [1, 3], [1, 5], [3, 5], [5, 6]] for a, b in edge: graph[a].append((a, b)) graph[b].append((b, a)) result[1] = 0 for i in range(len(graph[1])): heapq.heappush(heapq_list, graph[1][i]) visited[1] = True while(heapq_list): print(heapq_list) from_, to_ = heapq.heappop(heapq_list) if visited[to_] == True: continue if result[from_]+1 < result[to_]: result[to_] = result[from_]+1 for i in range(len(graph[to_])): heapq.heappush(heapq_list, graph[to_][i]) visited[to_] = True max_num = max(result) count = 0 for num in result: if num == max_num: count += 1 print(result) print(count)
def heap_sort(arr): heapq.heapify(arr) # O(n) time result = [] while len(arr) > 0: result.append(heapq.heappop(arr)) # Pop takes O(1) + O(log n) for re-heapification return result