def _load_annots(): """ Obtains the beat annotations of the current record using the gqrs application with a lower threshold, using some properties of the input system module. """ global ANNOTS annotator = 'gqrs01' refann = sortedcontainers.SortedList(IN._ANNOTS) ANNOTS = sortedcontainers.SortedList() rec = IN.get_record_name() leads = get_leads(rec) annotators = [] for lead in leads: command = [ 'gqrs', '-r', rec, '-outputName', annotator, '-m', '0.1', '-s', lead ] subprocess.check_call(command) annpath = rec + '.' + annotator annotators.append(read_annotations(annpath)) for ann in _merge_annots(annotators): idx = refann.bisect_left(ann) #First we check that the annotation is not in the base evidence. if (ann.time - refann[idx - 1].time > C.RDEFLECTION_MIN_DIST and (idx >= len(refann) or refann[idx].time - ann.time > C.RDEFLECTION_MIN_DIST)): #And now we select the most promising one between all leads. ANNOTS.add(ann) os.remove(annpath)
def recover_all(self): """ Recovers all observations from the ancestor interpretations, in order to have the full interpretation from the beginning of the process. Hypotheses in the focus of attention are also included in the *observations* attribute. """ allobs = set(self.observations) interp = self.parent while interp is not None: allobs |= set(interp.observations) interp = interp.parent allobs.update((o for o, p in self.focus._lst if p is not None and o is p.hypothesis)) allobs = sortedcontainers.SortedList(allobs) # Duplicate removal (set only prevents same references, not equality) i = 0 while i < len(allobs) - 1: obs = allobs[i] while allobs[i + 1] == obs: allobs.pop(i + 1) if i == len(allobs) - 1: break i += 1 self.observations = sortedcontainers.SortedList(allobs)
def classify_by_path_basic(self, query, guard_hops=100, callback=None): visitedSet, candidates, tmpResult = set(), sortedcontainers.SortedList( ), sortedcontainers.SortedList() entry = random.randint(0, len(self.nodes) - 1) candidates.add((self.dist(query, self.nodes[entry].value), entry)) tmpResult.add((self.dist(query, self.nodes[entry].value), entry)) hops = 0 #distance from first closest_dist_ever = candidates[0][0] class_ = self.nodes[entry]._class while hops < guard_hops: hops += 1 if len(candidates) == 0: break closest_dist, сlosest_id = candidates.pop(0) if closest_dist_ever < closest_dist: break closest_dist_ever = closest_dist class_ = self.nodes[сlosest_id]._class for e in self.nodes[сlosest_id].neighbourhood: if e not in visitedSet: d = self.dist(query, self.nodes[e].value) visitedSet.add(e) candidates.add((d, e)) tmpResult.add((d, e)) if callback is not None: callback(self.nodes[сlosest_id].value, tmpResult) return class_
def kthSmallest(self, mat: List[List[int]], k: int) -> int: n, m = len(mat), len(mat[0]) s = sortedcontainers.SortedList([mat[0][i] for i in range(m)]) for row in mat[1:]: temp = sortedcontainers.SortedList([ row[i] + s[j] for i in range(len(row)) for j in range(len(s)) ]) s = temp[:k] return s[k - 1]
def multi_search(self, query, attempts=1, top=5): '''Implementation of `K-NNSearch`, but without keeping the visitedSet''' # share visitedSet among searched. Paper, 4.2.p2 visitedSet, candidates, result = set(), sortedcontainers.SortedList(), sortedcontainers.SortedList() for i in range(attempts): closest, hops = self.search_nsw_basic(query, visitedSet, candidates, result, top=top) result.update(closest) result = sortedcontainers.SortedList(set(result)) return [v for k, v in result[:top]]
def find132pattern(self, nums: List[int]) -> bool: if len(nums) < 3: return False left = sortedcontainers.SortedList(nums[:1]) right = sortedcontainers.SortedList(nums[1:]) i = 1 while i < len(nums) - 1: right.discard(nums[i]) l = right.bisect_right(left[0]) r = right.bisect_left(nums[i]) if l < r: return True left.add(nums[i]) i += 1 return False
def maximumSegmentSum(self, nums: List[int], removeQueries: List[int]) -> List[int]: prefix = [0] for num in nums: prefix.append(prefix[-1] + num) heap = [-prefix[-1], 0] pending_delete = collections.Counter() segments = sortedcontainers.SortedList([(0, len(nums))]) res = [] for query in removeQueries: split_point = segments.bisect_right((query, float("inf"))) - 1 start, end = segments.pop(split_point) pending_delete[prefix[end] - prefix[start]] += 1 start1, end1, start2, end2 = start, query, query + 1, end if start1 != end1: segments.add((start1, end1)) heapq.heappush(heap, prefix[start1] - prefix[end1]) if start2 != end2: segments.add((start2, end2)) heapq.heappush(heap, prefix[start2] - prefix[end2]) while pending_delete[-heap[0]]: pending_delete[-heapq.heappop(heap)] -= 1 res.append(-heap[0]) return res
def __init__(self, n: int, entries: List[List[int]]): self.t_price = dict() self.t_valid = defaultdict(sortedcontainers.SortedList) # key=movie,value=(price, shop) self.t_rent = sortedcontainers.SortedList() for shop, movie, price in entries: self.t_price[(shop, movie)] = price self.t_valid[movie].add((price, shop))
def find_outline_points(boxes): result = list() end_points = list() for box in boxes: # height for starting point is negative to process higher height start points before lower height start points for same x end_points.append((box[0], -box[2])) end_points.append((box[1], box[2])) end_points = sorted(end_points, key=lambda i: (i[0], i[1])) height_list = sortedcontainers.SortedList() height_list.add(0) for pt in end_points: x, ht = pt if ht < 0: # start point if abs(ht) > height_list[ -1]: # current height is higher, so include in result result.append([x, abs(ht)]) height_list.add(abs(ht)) # update max_height else: # end point height_list.remove(ht) # remove current height from list if ht > height_list[ -1]: # max_height is less than previous value (dip in structure), include in result result.append([x, height_list[-1]]) return result
def fill_pits2(points, outletID): """Conditions a mesh, in place, by removing pits. Inputs: points | A dictionary of the form {ID, Point()} outletID | ID of the outlet This is a refactored, single pass algorithm that leverages a sorted list. """ # create a sorted list of elevations, from largest to smallest elev = sortedcontainers.SortedList(list(points.items()), key=lambda id_p:id_p[1].coords[2]) waterway = set([outletID,]) # loop over elevation list from small to large while len(elev) != 0: current, current_p = elev.pop(0) if current in waterway: # still in the waterway waterway.update(current_p.neighbors) else: # not in the waterway, fill ww_neighbors = [n for n in current_p.neighbors if n in waterway] if len(ww_neighbors) != 0: current_p.coords[2] = min(points[n].coords[2] for n in ww_neighbors) else: current_p.coords[2] = min(points[n].coords[2] for n in current_p.neighbors) # push back into elev list with new, higher elevation elev.add( (current,current_p) ) return
def busiestServers(self, k: int, arrival: List[int], load: List[int]) -> List[int]: taskFinished = [0] * k busyServers = [] availableServers = sortedcontainers.SortedList([x for x in range(k)]) # Return -1 if no server available. def getServer(index, arrival): nonlocal availableServers while len(busyServers) > 0 and busyServers[0][0] <= arrival: server = heapq.heappop(busyServers) availableServers.add(server[1]) if len(availableServers) == 0: return -1 current = bisect_left(availableServers, index % k) return availableServers[current % len(availableServers)] for i in range(len(arrival)): server = getServer(i, arrival[i]) if server == -1: continue heapq.heappush(busyServers, (arrival[i] + load[i], server)) taskFinished[server] += 1 availableServers.remove(server) maxTaskFinished = max(taskFinished) return [i for i, v in enumerate(taskFinished) if v == maxTaskFinished]
def mergeKLists(self, lists: List[ListNode]) -> ListNode: # skip None nodes lists = [node for node in lists if node] # create sorted queue queue = sortedcontainers.SortedList(lists, key=lambda x: x.val) # set head node head = current = ListNode(0) while queue: # get lower node node = queue.pop(0) # swipe next and current current.next = node current = current.next if node.next: # add next node queue.add(node.next) return head.next
def giveMeFullData(clusters,refTable): clusterText="" sortedClusterLines=sortedcontainers.SortedList() clusterFirst={} clusterLines=[] i=1 for cluster in clusters: clusterName="haplotig_"+str(i) contigs=set() for SNP in cluster: contigs.add(SNP.split(":")[0]) contigSeparated={} for contig in contigs: contigSeparated[contig]=set() for SNP in cluster: SNPContig=SNP.split(":")[0] contigSeparated[SNPContig].add(SNP.split(":")[1].split("=")[0]) for contig,positions in contigSeparated.items(): for position in positions: sortedClusterLines.add([int(position)+refTable[contig]["startPosition"],clusterName,contig]) i+=1 for line in sortedClusterLines: if line[1] not in clusterFirst: clusterFirst[line[1]]="haplotig_"+str(len(clusterFirst)+1) for line in sortedClusterLines: clusterLines.append([clusterFirst[line[1]],line[0],line[2],clusterFirst[line[1]].split("_")[1]]) for line in clusterLines: clusterText+="\t".join([str(x) for x in line])+"\n" return clusterText
def generateSimilarityIndex(cache): similarityIndex=sortedcontainers.SortedList() for simCluster in cache.keys(): for combination, score in cache[simCluster]["similarities"].items(): similarPositions=cache[simCluster]["positions"]&cache[combination]["positions"] similarityIndex.add([score,len(similarPositions),simCluster,combination]) return similarityIndex
def _discretize(self, slices): """ Discretize the interval according to provided slices :param slices: :return: """ to_return = {} sorted_slices = sortedcontainers.SortedList(slices) for period in self.periods(): bins = list( sorted_slices.irange(period[0], period[1], inclusive=(True, True))) for i in range(len(bins) - 1): to_return[bins[i]] = bins[i + 1] - bins[i] if period[0] < bins[0]: i_bin_before = sorted_slices.bisect_left(period[0]) bin_before = sorted_slices[i_bin_before - 1] to_return[bin_before] = bins[0] - period[0] if period[1] > bins[-1]: #i_bin_after = sorted_slices.bisect_right(period[1]) #bin_after = sorted_slices[i_bin_after] #print(period[1],bins[-1],bin_after) to_return[bins[-1]] = period[1] - bins[-1] return to_return
def __init__(self, bus: VelbusProtocol, address: int, module_info: ModuleInfo = None, update_state_cb: Callable = lambda ops: None): """ Initialize module handling for the given address SubClasses do not need to call this __init__ method if they don't need its functionality. You will probably need to overload the self.state property in that case. :param bus: bus to communicate over to initialize the module further. DO NOT STORE THIS VALUE, use the bus provided in dispatch() to log the actual client making the requests (instead of the client triggering the instantiation of this module) :param address: address of the module :param module_info: module info received in the ModuleType message :param update_state_cb: Callback to call with updates to state on the client Call signature: cb(ops: JsonPatch) :raises ValueError if this class is unwilling to handle the given address/module_info """ self.address = address self._state = JsonPatchDict() self._state.callback.add(update_state_cb) # state is synced via WebSockets to JavaScript clients # You can use it as a nested dict. Be aware that Javascript requires strings as keys! # # It is advisable to keep the structure of `state` and the # URL-structure as similar as possible self._process_queue = sortedcontainers.SortedList() self._next_delayed_call: typing.Optional[asyncio.TimerHandle] = None
def _characterize_signal(beg, end): """ Characterizes the available signal in a specific time interval. Parameters ---------- beg: Starting time point of the interval. end: Last time point of the interval. Returns ------- out: sortedlist with one entry by lead. Each entry is a 5-size tuple with the lead, the signal samples, the relevant points to represent the samples, the baseline level estimation for the fragment, and the quality of the fragment in that lead. """ siginfo = sortedcontainers.SortedList(key=lambda v: -v[4]) for lead in sig_buf.get_available_leads(): baseline, quality = characterize_baseline(lead, beg, end) sig = sig_buf.get_signal_fragment(beg, end, lead=lead)[0] if len(sig) == 0: return None # We build a signal simplification taking at most 9 points, and with # a minimum relevant deviation of 50 uV. points = DP.arrayRDP(sig, ph2dg(0.05), 9) siginfo.add((lead, sig, points, baseline, quality)) return siginfo
def benchmark_del(start, limit, times): """Benchmark sorted list delitem method. Start and limit are an inclusive range of magnitudes. The load of the sorted list is the square root of the size. Measurements are made by sampling performance at each "moment" of a sorted list while items are deleted from it. See `init_sorted_list` for how "moment" is used. """ for exponent in xrange(start, limit + 1): timings = [] count = 10**exponent sl = sc.SortedList(load=int(count**(1.0 / 3))) # 2))) for attempt in xrange(times): subtimings = [] for moment in xrange(-5, 0): indices = randindices(count) init_sorted_list(sl, count, moment) gc.collect() subtiming = delitem(sl, indices) subtimings.append(subtiming) timing = sum(subtimings) timings.append(timing) display('del', timings, count)
def numberOfWeakCharacters(self, properties: List[List[int]]) -> int: if not properties: return 0 props = [tuple(x) for x in properties] props.sort() import sortedcontainers sl = sortedcontainers.SortedList() for x, y in props: sl.add(y) j = 0 n = len(props) ans = 0 for x, y in props: while j < n and props[j][0] == x: sl.remove(props[j][1]) j += 1 if j == n: continue if sl[-1] > y: ans += 1 return ans
def countSmaller(self, nums: List[int]) -> List[int]: sorted_arr = sortedcontainers.SortedList() # O(nlogn) rst = [] for num in nums[::-1]: idx = sorted_arr.bisect_left(num) # this is O(logn) rst.append(idx) sorted_arr.add(num) # this is O(logn) return rst[::-1]
def get_features(interpretation): """ Obtains the relevant classification features for every QRS in the interpretation. """ result = collections.OrderedDict() rhythms = interpretation.get_observations(o.Cardiac_Rhythm) beats = sortedcontainers.SortedList(interpretation.get_observations(o.QRS)) rrs = np.diff([b.time.start for b in beats]) beatiter = iter(beats) obs = interpretation.observations qrs = None for rh in rhythms: qidx0 = bidx = 0 if qrs is None: i = 0 qrs = next(beatiter) else: i = 1 while qrs.time.start <= rh.lateend: info = BeatInfo(qrs) info.rh = rh bidx = beats.index(qrs) qidx0 = qidx0 or bidx if bidx > 0: info.rr = rrs[bidx - 1] idx = obs.index(qrs) pw = None if idx > 0 and isinstance(obs[idx - 1], o.PWave): pw = obs[idx - 1] elif idx > 1 and isinstance(obs[idx - 2], o.PWave): pw = obs[idx - 2] info.pwave = pw.amplitude if pw is not None else {} if isinstance(rh, (o.Sinus_Rhythm, o.Bradycardia, o.Tachycardia)): info.pos = REGULAR elif isinstance(rh, o.Extrasystole): info.pos = ADVANCED if i == 1 else REGULAR elif isinstance(rh, o.Couplet): info.pos = ADVANCED if i in (1, 2) else REGULAR elif isinstance(rh, (o.RhythmBlock, o.Asystole)): info.pos = DELAYED elif isinstance(rh, o.Atrial_Fibrillation): info.pos = AFIB elif isinstance(rh, o.Bigeminy): info.pos = ADVANCED if i % 2 == 1 else REGULAR elif isinstance(rh, o.Trigeminy): info.pos = ADVANCED if i % 3 == 1 else REGULAR elif isinstance(rh, o.Ventricular_Flutter): info.pos = REGULAR result[qrs] = info qrs = next(beatiter, None) if qrs is None: break i += 1 meanrr = np.mean(rrs[qidx0:bidx]) if qidx0 < bidx else rrs[bidx - 1] rh.meas = o.CycleMeasurements((meanrr, 0), (0, 0), (0, 0)) return result
def __init__(self): super(Mixer, self).__init__() self.logger = logging.getLogger('mixer') self.logger.setLevel(logging.DEBUG) self.expect() self.captor = Captor(Volume.destination(), '*.report.json') self.reports = sortedcontainers.SortedList() self.lut = {} self.start()
def all_primes_less_than(n): primes = sc.SortedList() gp = get_primes(2) while True: next_prime = next(gp) if next_prime < n: primes.append(next_prime) else: break return primes
def all_primes_between(a, n): primes = sc.SortedList() gp = get_primes(a) while True: next_prime = next(gp) if next_prime < n: primes.append(next_prime) else: break return primes
def reversePairs(self, nums: List[int]) -> int: import sortedcontainers brr = sortedcontainers.SortedList(nums) count = 0 # anything smaller before larger is discarded. for i in range(len(nums)): # O(nlogn), loop is n, logn inside brr.discard(nums[i]) k = brr.bisect_left((nums[i] + 1) // 2) count += k return count
def __init__(self, data=None): # child nodes a.k.a. children self.children = sortedcontainers.SortedList() # if True, this is the last element of a set in the # set-trie use this to store user data (a set # element). Must be a hashable (i.e. hash(data) should # work) and comparable/orderable (i.e. data1 < data2 # should work; see # https://wiki.python.org/moin/HowTo/Sorting/) type. self.flag_last = False self.data = data
def solve(N, K): sl = sortedcontainers.SortedList([N]) for i in xrange(0, K): max_space = sl.pop() Ls = max_space / 2 Rs = (max_space - 1) / 2 sl.add(Ls) sl.add(Rs) return max(Ls, Rs), min(Ls, Rs)
def __init__(self, points: Iterable[Point], **kwargs): """Store points and sort them by angle Args: points (Iterable[Point]): Points for the track. kwargs: Additional informations for the track. """ self._points = list(points) self.start_point = self._points[0] self._points = sortedcontainers.SortedList(self._points, key=_sort_order) self.desc = kwargs
def arrayRDP(arr, epsilon=0.0, n=None): """ This is a slightly modified version of the _aRDP function, that accepts as arguments the tolerance in the distance and the maximum number of points the algorithm can select. **Note:** The results of this algoritm should be identical to the arrayRDP function if the *n* parameter is not specified. In that case, the performance is slightly worse, although the asymptotic complexity is the same. For this reason, this function internally delegates the solution in that function if the *n* parameter is missing. Parameters ---------- arr: Array of values of consecutive points. epsilon: Maximum difference allowed in the simplification process. n: Maximum number of points of the resulted simplificated array. Returns ------- out: Array of indices of the selected points. """ if n is None: return _aRDP(arr, epsilon) if epsilon <= 0.0: raise ValueError('Epsilon must be > 0.0') n = n or len(arr) if n < 3: return arr fragments = sortedcontainers.SortedDict() # We store the distances as negative values due to the default order of # sorteddict dist, idx = max_vdist(arr, 0, len(arr) - 1) fragments[(-dist, idx)] = (0, len(arr) - 1) while len(fragments) < n - 1: (dist, idx), (first, last) = fragments.popitem(last=False) if -dist <= epsilon: # We have to put again the last item to prevent loss fragments[(dist, idx)] = (first, last) break else: # We have to break the fragment in the selected index dist, newidx = max_vdist(arr, first, idx) fragments[(-dist, newidx)] = (first, idx) dist, newidx = max_vdist(arr, idx, last) fragments[(-dist, newidx)] = (idx, last) # Now we have to get all the indices in the keys of the fragments in order. result = sortedcontainers.SortedList(i[0] for i in fragments.itervalues()) result.add(len(arr) - 1) return np.array(result)
def __init__(self, data=None, value=None): # child nodes a.k.a. children self.children = sortedcontainers.SortedList() # if True, this is the last element of a key set store a # member element of the key set. Must be a hashable # (i.e. hash(data) should work) and comparable/orderable # (i.e. data1 < data2 should work; see # https://wiki.python.org/moin/HowTo/Sorting/) type. self.flag_last = False self.data = data # the value associated to the key set if flag_last == # True, otherwise None self.value = None