def __init__(self, max_error=0, bin_dir=None, output_invocation_dir=None): """ :param max_error: maximum pixelwise error allowed. Use 0 for lossless compression :param bin_dir: path to the directory that contains the ldc_encoder, ldc_decoder and ldc_header_tool binaries. If it is None, options.external_bin_base_dir is None. If this is None as well, the same directory of this script is used by default. """ bin_dir = bin_dir if bin_dir is not None else options.external_bin_base_dir bin_dir = bin_dir if bin_dir is not None else os.path.dirname(__file__) assert os.path.isdir(bin_dir), f"Invalid binary dir {bin_dir}." jpeg_bin_path = os.path.join(bin_dir, "jpeg") param_dict = sortedcontainers.SortedDict() param_dict["ls"] = 0 max_error = int(max_error) assert max_error >= 0 param_dict["m"] = int(max_error) icompression.WrapperCodec.__init__( self, compressor_path=jpeg_bin_path, decompressor_path=jpeg_bin_path, param_dict=param_dict, output_invocation_dir=output_invocation_dir)
def __init__(self, name, timeout, rand=0): self.db = {} self.name = name self.timestamps = sortedcontainers.SortedDict() self.timeout = timeout self.rand = rand self.base_redis_key = "RSSIT:" + str(self.name) + ":"
def cli_bw_summary_attributes(self): nbr_percentage = self._traffic_percentage if nbr_percentage is None: intf_percentage_str = "" else: nbr_percentage_str = "{:.1f}".format(nbr_percentage) + " %" interface_infos = sortedcontainers.SortedDict() for intf_name, intf in self._interfaces.items(): intf_bandwidth_str = utils.value_str(intf.bandwidth, "Mbps", "Mbps") intf_percentage = self.interface_percentage(intf_name) if intf_percentage is None: intf_percentage = "" else: intf_percentage_str = "{:.1f}".format(intf_percentage) + " %" interface_infos[intf_name] = (intf_bandwidth_str, intf_percentage_str) return [ self._system_id, utils.value_str(self._ingress_bandwidth, "Mbps", "Mbps"), utils.value_str(self._egress_bandwidth, "Mbps", "Mbps"), nbr_percentage_str, list(interface_infos.keys()), [info[0] for info in interface_infos.values()], [info[1] for info in interface_infos.values()] ]
def __init__(self, rep_file_name: str, verbose: bool, start_paused: bool): self._rep_file = rep_file_name self._games = sortedcontainers.SortedDict() self._info_callback = lambda _: None self._verbose = verbose self._active_game = RepGame(verbose=verbose, info_callback=self.__info_callback) self._rounds = _load_queue(rep_file_name, self._active_game.queue) ''' dist = self._rounds // 64 if dist == 0: dist = 1 p_pos = 0 buf_p = RepGame(verbose=verbose, info_callback=lambda _: None) buf_q = RepGame(verbose=verbose, info_callback=lambda _: None) copy_rep_game(self._active_game, buf_p) while p_pos < self._rounds: copy_rep_game(buf_p, buf_q) self._games[p_pos] = buf_q p_pos += dist buf_p.set_round(p_pos) ''' self._active_game.queue.popleft()[1].run(self._active_game._logic) self._rep_thread = None self.sig = queue.Queue() self._ui_running = lambda: False self._start_paused = start_paused
def resolve_containers(self, declaration, category=None): """ Tries to find containers from given category which contains an element or field of type according to provided declaration. :param declaration: Declaration of an element or a field. :param category: Category name string. :return: List with Container objects. """ if str(declaration) not in self._containers_cache: self._containers_cache[str( declaration)] = sortedcontainers.SortedDict() if category and category not in self._containers_cache[str( declaration)]: cnts = self.__resolve_containers(declaration, category) self._containers_cache[str(declaration)][category] = cnts return cnts elif not category and 'default' not in self._containers_cache[str( declaration)]: cnts = self.__resolve_containers(declaration, category) self._containers_cache[str(declaration)]['default'] = cnts return cnts elif category and category in self._containers_cache[str(declaration)]: return self._containers_cache[str(declaration)][category] else: return self._containers_cache[str(declaration)]['default']
def test(): s = SG_container() s[1] = sortedcontainers.SortedDict() s[1] = 1 if 1 not in s.keys(): print('False') print(s.keys())
def parse_fam_files_content(content, name): samples = sortedcontainers.SortedDict() for line in content: if isinstance(line, bytes): line = line.decode('utf-8') if line.startswith('#') or not line: continue try: family, id, father, mother, sex, affected = line.split() sample = dict() sample['family'] = family sample['id'] = id sample['father'] = father sample['mother'] = mother sample['sex'] = int(sex) sample['affected'] = (int(affected) == 2) if len(samples) == 0: if not sample['affected']: raiseException("First sample in {} is expected to be proband but is unaffected". format(name)) sample['proband'] = True else: sample['proband'] = False samples[id] = sample except Exception as e: raiseException('Could not parse fam file line: {}. {}' .format(line.strip(), e)) return samples
def node_element_str(element): lines = [] if element.name is not None: lines.append("Name: " + str(element.name)) lines.append("Level: " + str(element.level)) if element.flags is not None: lines.append("Flags:") if element.flags.overload is not None: lines.append(" Overload: " + str(element.flags.overload)) if element.capabilities is not None: lines.append("Capabilities:") if element.capabilities.flood_reduction is not None: lines.append(" Flood reduction: " + str(element.capabilities.flood_reduction)) if element.capabilities.hierarchy_indications is not None: lines.append(" Leaf indications: " + hierarchy_indications_str( element.capabilities.hierarchy_indications)) sorted_neighbors = sortedcontainers.SortedDict(element.neighbors) for system_id, neighbor in sorted_neighbors.items(): lines.append("Neighbor: " + utils.system_id_str(system_id)) lines.append(" Level: " + str(neighbor.level)) if neighbor.cost is not None: lines.append(" Cost: " + str(neighbor.cost)) if neighbor.bandwidth is not None: lines.append(" Bandwidth: " + bandwidth_str(neighbor.bandwidth)) if neighbor.link_ids is not None: sorted_link_ids = sorted(neighbor.link_ids) for link_id_pair in sorted_link_ids: lines.append(" Link: " + link_id_pair_str(link_id_pair)) return lines
def decode(self, s, classname=""): # print classname," XXXX\n" ,s if classname == "": dec = json.JSONDecoder.decode(self, s) ret = KeyFrameList() ret._countID = dec["_countID"] ret.posdict = self.decode(dec["posdict"], "posdict") ret.items = self.decode(dec["items"], "items") return ret elif classname == "posdict": return sortedcontainers.SortedDict( (float(k), int(v)) for k, v in six.iteritems(s)) elif classname == "items": return dict( (int(k), KeyFrame(v["pos"], self.decode(v["transformData"], "transformData"))) for k, v in six.iteritems(s)) elif classname == "transformData": t = TransformData() t.__dict__.update(s) t.quatRot = Quaternion(*t.quatRot) t.bounds = np.array(t.bounds) t.translate = np.array(t.translate) return t
def p_declaration_specifiers_list(p): """ declaration_specifiers_list : prefix_specifiers_list type_specifier suffix_specifiers_list | prefix_specifiers_list type_specifier | type_specifier suffix_specifiers_list | type_specifier """ values = p[1:] unknown_specifier = values[0] declaration_specifiers_list = sortedcontainers.SortedDict() if len(values) == 1: type_specififier, = values specifiers = None elif len(values) == 2 and isinstance(unknown_specifier, list): specifiers, type_specififier = values elif len(values) == 2 and isinstance(unknown_specifier, dict): type_specififier, specifiers = values else: prefix_specifiers_list, type_specififier, suffix_specifiers_list = values specifiers = prefix_specifiers_list + suffix_specifiers_list declaration_specifiers_list['type specifier'] = type_specififier if specifiers: new_specifiers = [] new_qualifiers = [] for specifier in specifiers: if keyword_lookup(specifier) == 'TYPE_QUALIFIER': new_qualifiers.append(specifier) else: new_specifiers.append(specifier) declaration_specifiers_list['specifiers'] = new_specifiers declaration_specifiers_list['qualifiers'] = new_qualifiers p[0] = declaration_specifiers_list
def zero(): """ Returns: Pauli: a new Pauli object initialized with no strings. """ return Pauli(sortedcontainers.SortedDict())
def __getitem__(self, qubit): if self.char == 'I': return Pauli.I() else: return Pauli( sortedcontainers.SortedDict([(PauliString( (PauliOperator(qubit=qubit, char=self.char), )), 1.0)]))
def prefixes_str(label_str, prefixes): lines = [] sorted_prefixes = sortedcontainers.SortedDict(prefixes.prefixes) for prefix, attributes in sorted_prefixes.items(): line = label_str + ' ' + ip_prefix_str(prefix) lines.append(line) if attributes: if attributes.metric: line = " Metric: " + str(attributes.metric) lines.append(line) if attributes.tags: for tag in attributes.tags: line = " Tag: " + str(tag) lines.append(line) if attributes.monotonic_clock: line = " Monotonic-clock:" lines.append(line) if attributes.monotonic_clock.timestamp: line = " Timestamp: " line += str(attributes.monotonic_clock.timestamp.AS_sec) if attributes.monotonic_clock.timestamp.AS_nsec: nsec_str = "{:06d}".format( attributes.monotonic_clock.timestamp.AS_nsec) line += "." + nsec_str lines.append(line) if attributes.monotonic_clock.transactionid: line = " Transaction-ID: " + str( attributes.monotonic_clock.transactionid) lines.append(line) return lines
def collect_event_groups(events): # Map of (sorted-tuple-of-times): [list-of-events]. # This automatically sorts the keys by ``begin_time``. groups = sortedcontainers.SortedDict() for e in events: e_begin_time = e.begin_time e_end_time = e.end_time # Short circuit: If there's an identical group, just join it. if (e_begin_time, e_end_time) in groups: groups[(e_begin_time, e_end_time)].add(e) continue # Look for a suitable group. for key in groups.keys(): begin_time, *_, end_time = key if ((begin_time <= e_begin_time and end_time >= e_end_time) or (e_begin_time <= begin_time and e_end_time >= end_time)): # Either key can contain event, or event can contain key. events = groups.pop(key) events.add(e) times = sorted(set(key) | {e_begin_time, e_end_time}) groups[tuple(times)] = events break else: # No suitable group. Let's make a new one. # This automatically sorts the items by location. groups[(e_begin_time, e_end_time)] = make_group(e) return groups
def __init__(self, tsv_calls_file: str, format_string: str, samples: Collection) -> None: super().__init__() self.samples = samples if format_string: patterns = dict() patterns.update( {s: format_string.format(sample=s) for s in self.samples}) else: patterns = None self.candidate_calls = sortedcontainers.SortedDict() with open(tsv_calls_file) as calls: for call in calls: if call.startswith('#'): continue samples = set() if patterns: for sample in patterns: if patterns[sample] in call: samples.add(sample) if not samples: continue data = call.split() key = Pos(data[0], int(data[1])) if samples: self.candidate_calls[key] = samples else: self.candidate_calls[key] = data[2:]
def model_comment(comment_type, text=None, other=None): """ Print model comment in the form accepted by the Klever error trace parser from VRP. This simple comment contains short json to parse. For example: /* EMG_ACTION {"action": "REGISTER", "type": "DISPATCH_BEGIN", "comment": "Register TTY callbacks."} */ :param comment_type: Comment type string. :param text: Sentence string with a comment itself. :param other: An existing dictionary to which the comment and type should be added :return: Final comment string (look at the example above). """ if other and isinstance(other, dict): comment = other else: comment = dict() comment = sortedcontainers.SortedDict(comment) comment['type'] = comment_type.upper() if text: comment['comment'] = text string = json.dumps(comment) return "/* EMG_ACTION {} */".format(string)
def __init__(self, max_error=0, bin_dir=None, data_format=None, output_invocation_dir=None): """ :param max_error: maximum pixelwise error allowed. Use 0 for lossless compression :param bin_dir: path to the directory that contains the ldc_encoder, ldc_decoder and ldc_header_tool binaries. If it is None, options.external_bin_base_dir is None. If this is None as well, the same directory of this script is used by default. :param data_format: bsq/bil format of the expected data. If none, the default (BSQ) is used """ bin_dir = bin_dir if bin_dir is not None else options.external_bin_base_dir bin_dir = bin_dir if bin_dir is not None else os.path.dirname(__file__) assert os.path.isdir(bin_dir), f"Invalid binary dir {bin_dir}." param_dict = sortedcontainers.SortedDict() max_error = int(max_error) assert max_error >= 0, f"Invalid max_error {max_error}" param_dict["max_error"] = max_error data_format = data_format if data_format is not None else self.default_format assert data_format in [self.FORMAT_BSQ, self.FORMAT_BIL ], f"Invalid data format {data_format}" param_dict["data_format"] = data_format icompression.WrapperCodec.__init__( self, compressor_path=os.path.join(bin_dir, "Mcalic_enc_nl"), decompressor_path=os.path.join(bin_dir, "Mcalic_dec_nl"), param_dict=param_dict, output_invocation_dir=output_invocation_dir)
def convert_action(action): d = sortedcontainers.SortedDict() if action.comment: d['comment'] = action.comment if action.condition: d['condition'] = action.condition if action.trace_relevant: d['trace relevant'] = action.trace_relevant if isinstance(action, Subprocess): d['process'] = CollectionEncoder._serialize_fsa(action.action) elif isinstance(action, Dispatch) or isinstance(action, Receive): d['parameters'] = action.parameters if len(action.peers) > 0: d['peers'] = list() for p in action.peers: d['peers'].append(str(p['process'])) # Remove duplicates d['peers'] = sorted(set(d['peers'])) elif isinstance(action, Block): if action.statements: d["statements"] = action.statements return d
def parse_fam_file(fam_file): samples = sortedcontainers.SortedDict() case_dir, file_name = os.path.split(fam_file) case = file_name.split('.')[0] map_file = None maps = glob.glob(os.path.join(case_dir, "samples*")) if len(maps) == 1: map_file = maps[0] elif len(maps) > 1: maps = [m for m in maps if case in m] if (len(maps) > 0): map_file = maps[0] sample_map = dict() if (map_file): with open(map_file) as input: lines = input.readlines() for line in lines: tokens = line.split() internal_names = [t for t in tokens if case in t.strip()] external_names = [t for t in tokens if "CP" in t.strip()] if (not external_names): external_names = tokens[0:1] if (len(internal_names) == 1 and len(external_names) == 1): sample_map[internal_names[0]] = external_names[0] elif (len(internal_names) == 0): raise Exception( "Line {}: missing mapping for sample: {}*".format( line, case)) elif (len(external_names) == 0): raise Exception( "Line {}: missing mapping for sample: CP*".format( line)) else: raise Exception( "Ambiguous sample mapping: {}".format(line)) with open(fam_file) as input: for line in input: if line.startswith('#') or not line: continue try: family, id, father, mother, sex, affected = line.split() sample = dict() sample["name"] = sample_map.get(id, id) sample['family'] = family sample['id'] = id sample['father'] = father sample['mother'] = mother sample['sex'] = int(sex) sample['affected'] = (int(affected) == 2) samples[id] = sample except: raise Exception('Could not parse fam file line: {}'.format( line.strip())) return samples
def check_families(self, f_metadata: str, vcf_file: str, first_stage_calls: str, families_subset: List): self.local_callers = sortedcontainers.SortedDict() families = parse_all_fam_files(f_metadata) if families_subset: families = {f: families[f] for f in families_subset} vcf_reader = pyvcf.Reader(filename=vcf_file) patterns = get_bam_patterns() bam_pattern = None if first_stage_calls: if not self.bayesian or not self.calculates_pp: raiseException("Second stage cam only be Bayesian") fmt = "{sample}:PASSED" self.first_stage_reader = create_tsv_reader( families=families_subset, metadata=families, tsv_calls_file=first_stage_calls, format_string=fmt) if self.bayesian and self.calculates_pp: bam_pattern = os.path.join(self.path_to_bams, patterns[0]) samples = {s for s in vcf_reader.samples} shared_detector = None if not self.calculates_pp: shared_detector = DenovoDetector(self.path_to_library) for name in families: family = families[name] if not all([s in samples for s in family]): continue trios = get_trios_for_family(family) for proband in trios: trio = trios[proband] if self.bayesian: if self.calculates_pp: list_of_bam_files = [ bam_pattern.format(sample=sample) for sample in trio ] if not all( os.path.exists(bam) for bam in list_of_bam_files): print("Skipping family {} because not all " "bams are present".format(name)) continue detector = DenovoDetector(self.path_to_library, trio_list=list_of_bam_files) else: detector = shared_detector else: detector = None ab_caller = ABDenovoCaller() ab_caller.set_shared_context(self.variant_context) ab_caller.init(family, samples) local_caller = LocalCaller(proband, ab_caller, detector) self.local_callers[proband] = local_caller print("Total trios: {:d}".format(len(self.local_callers))) return
def __init__(self, address_family, kernel, log, log_id): self.address_family = address_family self.kernel = kernel # Sorted dict of Route objects indexed by prefix. We use the Route class for both the RIB # and the FIB, although not all Route attributes are relevant for the FIB. self.routes = sortedcontainers.SortedDict() self._log = log self._log_id = log_id
def __init__(self): # See http://www.grantjenks.com/docs/sortedcontainers/ # Efficient search, insertion, next. # Keys are virtual times. Values are lists of Timestamped's. self.elements = sortedcontainers.SortedDict() self.vt = LATEST_VT self.rollback = False self.annihilation = False
def __init__(self, name, output_dir): self.times = sortedcontainers.SortedDict() if not os.path.exists(output_dir): os.makedirs(output_dir) self.output = os.path.join(output_dir, "{}.csv".format(name)) with open(self.output, 'w') as f: print("No,url,elapsed time,status_code", file=f)
def __init__(self, data=None, default=None): self._d = sortedcontainers.SortedDict(data) self.default = default self.getter_functions = { "previous": self._get_previous, "linear": self._get_linear_interpolate, }
def set_onsets_and_durs(self, onsets, durs): if onsets is not None and durs is not None: dict_ = {o: d for (o, d) in zip(onsets, durs)} else: dict_ = {} self._data = sortedcontainers.SortedDict(dict_) self._onsets = onsets self._durs = durs
def __init__(self, address_family, fib, log, log_id): assert fib.address_family == address_family self.address_family = address_family # Sorted dict of _Destination objects indexed by prefix self.destinations = sortedcontainers.SortedDict() self.fib = fib self._log = log self._log_id = log_id
def __init__(self): super(KeyFrameList, self).__init__() self._countID = 0 self.posdict = sortedcontainers.SortedDict() self.items = dict() # self.addItem(KeyFrame(0.)) # self.addItem(KeyFrame(1.)) self._modelChanged.emit()
def __init__(self, data=None, default=EXTEND_BACK): self._d = sortedcontainers.SortedDict(data) self.default = default self.getter_functions = { 'previous': self._get_previous, 'linear': self._get_linear_interpolate, }
def _get_neighbors_from_list(xs): xs = np.sort(xs) xs_left = np.roll(xs, 1).tolist() xs_right = np.roll(xs, -1).tolist() xs_left[0] = None xs_right[-1] = None neighbors = {x: [x_L, x_R] for x, x_L, x_R in zip(xs, xs_left, xs_right)} return sortedcontainers.SortedDict(neighbors)
def __init__(self, data=None): """ Instanciate a new graph, with or without initial data :param data: can be a dictionary {time step:graph} or a list of graph, in which sase time steps are integers starting at 0 """ self._snapshots = sortedcontainers.SortedDict() if data != None: if isinstance(data, dict): self._snapshots = sortedcontainers.SortedDict(data) elif isinstance(data, list): self._snapshots = sortedcontainers.SortedDict( {i: g for i, g in enumerate(data)}) else: raise Exception("data should be a list or a dictionary")