def copy_table(self, table, needed_polies, context): ret = {} if len(needed_polies) == 0: return ret #needs to be sorted #table columns t_cols = sortedset(table[table.keys()[0]].keys()) z_cols = zip(t_cols, range(len(t_cols))) copy_cols = sortedset([0.0, 1.0]) #get all the roots of needed polynomials and points between them #except for the first root of all (need to copy manually) for p in needed_polies: if table.has_key(p) == False: continue if table[p][0.0] == 0: continue for (col, ind) in z_cols[1:-1]: if table[p][col] == 0: copy_cols.add(col) copy_cols.add(z_cols[ind + 1][0]) #copies all the roots and points immediately after them for p in needed_polies: ret[p] = {} for col in copy_cols: """ #copy from context when it is scalar if table.has_key(p) == False: if context.has_key(p) == False: print "here1" raise Exception("no key (%s) in context (%s) and table\n%s" %(p, context, self.pprint_diagram(table) )) ret[p][col] = context[p] continue ret[p][col] = table[p][col]""" ret[p][col] = self.table_lookup(table, p, col, context) # manual copy of the point before first root # this must exist in the table # this is the same as for -infinity point_before_first = (copy_cols[1] + copy_cols[0]) / 2 """ print point_before_first if table.has_key(p) == False: if context.has_key(p) == False: raise Exception("no key in table and context") ret[p][col] = context[p] ret[p][point_before_first] = context[p] continue ret[p][point_before_first] = table[p][copy_cols[0]]""" ret[p][point_before_first] = self.table_lookup( table, p, copy_cols[0], context) print copy_cols print "copy ret" self.pprint_diagram(ret) return ret
def getAlbums(plusArtist=True, ss=False): sort = ss if not plusArtist: return sortedset(getInfo('C')) if ss else getInfo('C') else: di = createDict(getAlbums(plusArtist=False), truncateArtists(getArtists(), sortedSet=False)) li = sortedset(getInfo('C')) if ss else getInfo('C') albums = [] for i in range(len(li)): albums.append("%s - %s" % (li[i], di[li[i]])) return albums
def copy_table(self, table, needed_polies, context): """Copies the diagram such that it includes only some of the polynomials, but still is a correct sign diagram (has exactly one column between roots that are not roots or infinity columns) Keyword arguments: table -- table to be copied (/reduced) needed_polies -- polynomials that are included in new table context -- context that was used to get the table return -- sign diagram with needed_polies as rows """ ret = {} if len(needed_polies) == 0: return ret #needs to be sorted #table columns t_cols = sortedset(table[table.keys()[0]].keys()) z_cols = zip(t_cols, range(len(t_cols))) copy_cols = sortedset([Fraction(0), Fraction(1)]) #get all the roots of needed polynomials and points between them #except for the first root of all (need to copy manually) for p in needed_polies: if table.has_key(p) == False: continue if table[p][Fraction(0)] == 0: continue for (col, ind) in z_cols[1:-1]: if table[p][col] == 0: copy_cols.add(col) copy_cols.add(z_cols[ind + 1][0]) #copies all the roots and points immediately after them for p in needed_polies: ret[p] = {} for col in copy_cols: ret[p][col] = self.table_lookup(table, p, col, context) # manual copy of the point before first root # this must exist in the table # this is the same as for -infinity point_before_first = (copy_cols[1] + copy_cols[0]) / 2 ret[p][point_before_first] = self.table_lookup( table, p, copy_cols[0], context) return ret
def _test_view_indexing(self, key): expected_items = [(3, "first"), (7, "second")] if key is not None: u = self.type2test(key, expected_items) expected_items.sort(key=lambda item: key(item[0])) else: u = self.type2test(expected_items) expected_items.sort() expected_keys, expected_values = list(zip(*expected_items)) if sys.version_info[0] < 3: keys = u.viewkeys() values = u.viewvalues() items = u.viewitems() else: keys = u.keys() values = u.values() items = u.items() for i in range(len(expected_items)): self.assertEqual(keys[i], expected_keys[i]) self.assertEqual(values[i], expected_values[i]) self.assertEqual(items[i], expected_items[i]) for i in range(-1, len(expected_items) + 1): for j in range(-1, len(expected_items) + 1): self.assertEqual(keys[i:j], blist.sortedset(expected_keys[i:j])) self.assertEqual(values[i:j], list(expected_values[i:j])) self.assertEqual(items[i:j], blist.sortedset(expected_items[i:j])) self.assertEqual(list(reversed(keys)), list(reversed(expected_keys))) for i, key in enumerate(expected_keys): self.assertEqual(keys.index(key), expected_keys.index(key)) self.assertEqual(keys.count(key), 1) self.assertEqual(keys.bisect_left(key), i) self.assertEqual(keys.bisect_right(key), i + 1) self.assertEqual(keys.count(object()), 0) self.assertRaises(ValueError, keys.index, object()) for item in expected_items: self.assertEqual(items.index(item), expected_items.index(item)) self.assertEqual(items.count(item), 1) self.assertEqual(items.count((7, "foo")), 0) self.assertEqual(items.count((object(), object())), 0) self.assertRaises(ValueError, items.index, (7, "foo")) self.assertRaises(ValueError, items.index, (object(), object()))
def _blist(es): t = blist.sortedset() for e in es: t.add(e) if len(t) > 1: keys = list(t) c = min([abs(x - y) for (x, y) in zip_(keys[1: ], keys[: -1])])
def __init__(self, code=None, code_str=None, work_dir=None, *args, **kwargs): """ Initializes a request object and the 'action' must be a function name that exists in the NNTPConnection(), you can optionally specify it the args and kwargs too. """ # The response information is placed here self.code = code if self.code is None: self.code = 0 self.code_str = code_str if self.code_str is None: self.code_str = '' # Track the time our response object was created; this is # useful for multi-threading since we know when our response # was generated self.created = datetime.now() # Our body contains non-decoded content self.body = NNTPAsciiContent(work_dir=work_dir) # Contains a list of decoded content self.decoded = sortedset(key=lambda x: x.key()) # For iterating over decoded items self._iter = None
def algo(caseno, n, k, a, b, c, r): m = [a] firstelem = a #generate m counts = Counter() for i in range(1, k): mi = ((((b%r) * (m[i-1]%r))%r) + (c%r))%r if(mi <= k): counts[mi] += 1 m.append(mi) #generate a tree of num 0 - k, which is NOT present in m #the they are in sorted order, so the first element would the min elment not in the k window tree = sortedset([]) for i in range(k+1): if i not in counts: tree.add(i) #find the k+1 length array which contains value from 0 - k+1 and thus would obviously repeate itself replist = [] for i in range(k): #add first element from the tree replist.append(tree.pop(0)) #remove the front element and bring it to the not-available tree if (m[i] <= k): if (counts[m[i]] == 1): tree.add(m[i]) else: counts[m[i]]-=1 result = replist[(n-1 -k) % (k+1)] print ("Case #%d: %d"%(caseno, result))
def _refresh_ring(self): """ builds a view of the token ring """ self.token_ring = sortedset(self.nodes.values(), key=lambda n: n.token) if self.is_initializing: # if this is the only node, set it to normal # there are no nodes to stream data from if len(self.nodes) == 1: self.status = Cluster.Status.NORMAL self._previous_ring = None return stream_nodes = [n for n in self.nodes.values() if n.node_id != self.node_id] self._previous_ring = sortedset(stream_nodes, key=lambda n: n.token) else: self._previous_ring = None
def reindex(self): events = self.catalog.query(review_state=self.state) self.index = sortedset() self.update(events) self.generate_metadata()
def _blist(es): t = blist.sortedset() for e in es: t.add(e) if len(t) > 1: keys = list(t) c = min([abs(x - y) for (x, y) in zip_(keys[1:], keys[:-1])])
def encode(self, encoders): """ A wrapper to the encoding of content. The function returns None if a problem occurs, otherwise the function returns an NNTPArtice() object. The power of this function comes from the fact you can pass in multiple encoders to have them all fire after one another. """ if len(self) == 0: # Nothing to encode return None objs = sortedset(key=lambda x: x.key()) for content in self: obj = content.encode(encoders) if obj is None: return None # Successful, add our object to our new list objs.add(obj) # If we reach here we encoded our entire article # Create a copy of our article article = self.copy(include_attachments=False) # In our new copy; store our new encoded content article.decoded = objs # Return our article return article
def run_tests(names, num_items, num_its, type_ = int): fns = dict([ ('btrees', lambda es: BTrees.OOBTree.OOSet(es)), ('blist', lambda es: blist.sortedset(es)), ('bintrees', lambda es: bintrees.FastRBTree([(e, None) for e in es])), ('set', lambda es: set(es)), ('banyan_red_black_tree_rank_updator', lambda es: banyan.SortedSet(es, key_type = type_, alg = banyan.RED_BLACK_TREE, updator = banyan.RankUpdator)), ('banyan_red_black_tree_min_max_updator', lambda es: banyan.SortedSet(es, key_type = type_, alg = banyan.RED_BLACK_TREE, updator = banyan.MinMaxUpdator)), ('banyan_red_black_tree', lambda es: banyan.SortedSet(es, key_type = type_, alg = banyan.RED_BLACK_TREE)), ('banyan_splay_tree', lambda es: banyan.SortedSet(es, key_type = type_, alg = banyan.SPLAY_TREE)), ('banyan_sorted_list', lambda es: banyan.SortedSet(es, key_type = type_, alg = banyan.SORTED_LIST)), ('banyan_red_black_tree_gen', lambda es: banyan.SortedSet(es, alg = banyan.RED_BLACK_TREE)), ('banyan_splay_tree_gen', lambda es: banyan.SortedSet(es, alg = banyan.SPLAY_TREE)), ('banyan_sorted_list_gen', lambda es: banyan.SortedSet(es, alg = banyan.SORTED_LIST))]) t = dict([]) for name in names: t[name] = _run_test(fns[name], type_, num_items, num_its) return t
def write_to(self, contracts, target): ready = sortedset() deps = dict() fol = dict() def _recfind(item): if item in deps: return d = list(self._dependencies(item)) deps[item] = len(d) fol[item] = [] if deps[item] == 0: ready.add((self._sortkey(item), item,)) for dependency in d: _recfind(dependency) fol[dependency].append(item) for contract in contracts: _recfind(contract) while len(ready) > 0: item = ready.pop(0)[1] self._write(item, target) for f in fol[item]: deps[f] = deps[f] - 1 if deps[f] == 0: ready.add((self._sortkey(f), f,))
def identify(self): self.updateTask("Building base sequences...") sequences = self.get_sequences(self.segmentation, self.slides) self.updateTask("Getting base matches...") matches = self.get_base_matches(sequences) # Find extra slide candidates self.updateTask("Finding extra slide candidates...") extra_matches = (seq for seq in sequences if seq.is_remnant) extra_matches = [Match(s, s.candidates[0], False) for s in extra_matches] extra_matches = blist.sortedset(extra_matches) self.updateTask("Merging off sequence slides...") self.merge_off_sequence_slides(matches, extra_matches) self.updateTask("Merging redundant sequences (pass 1/2)...") self.merge_redundant_sequences(matches) self.updateTask("Merging remnant sequences...") self.merge_remnant_sequences(matches, extra_matches) self.updateTask("Merging redundant sequences (pass 2/2)...") self.merge_redundant_sequences(matches) self.updateTask("Assigning orphan sequences...") self.assign_orphan_sequences(matches, self.slides) self.updateTask("Filtering trailing sequences...") self.filter_trailling_sequences(matches) return matches
def __decode_value(cls, attr_type, encoded_value): decoded_value = None if decoded_value is not None: return decoded_value collection_type = attr_type.collection_type if collection_type is None: decoded_value = cls.__decode_single_value(attr_type.type, encoded_value) elif collection_type == AttributeType.COLLECTION_TYPE_MAP: if isinstance(encoded_value, dict): res_dict = dict() key_type = attr_type.key_type value_type = attr_type.value_type for key, value in encoded_value.iteritems(): res_dict[cls.__decode_single_value( key_type, key)] = (cls.__decode_single_value(value_type, value)) decoded_value = res_dict elif collection_type == AttributeType.COLLECTION_TYPE_SET: element_type = attr_type.element_type res = sortedset() for val in encoded_value: res.add(cls.__decode_single_value(element_type, val)) decoded_value = res if decoded_value is None: raise ValidationError(_( "Can't recognize attribute value '%(value)s'" "of type %(type)s"), type=attr_type, value=json.dumps(encoded_value)) return decoded_value
def __init__(self, port, phoneaddress, instrument, maxVol=1.): self.port = port self.OSCserver = liblo.Server(self.port) self.OSCserver.add_method(None, None, self.handleMsg) self.phoneaddress = phoneaddress self.instrument = instrument self.maxVol = maxVol self.pressed = sortedset() self.note_last = 0 self.bindings = { ('1', 'push1') : self.handleKeyboard1, ('1', 'push2') : self.handleKeyboard1, ('1', 'push3') : self.handleKeyboard1, ('1', 'push4') : self.handleKeyboard1, ('1', 'push5') : self.handleKeyboard1, ('1', 'push6') : self.handleKeyboard1, ('1', 'push7') : self.handleKeyboard1, ('1', 'push8') : self.handleKeyboard1, ('1', 'push9') : self.handleKeyboard1, ('1', 'push10') : self.handleKeyboard1, ('1', 'push11') : self.handleKeyboard1, ('1', 'push12') : self.handleKeyboard1, ('2', 'push1') : self.handleKeyboard2, ('2', 'push2') : self.handleKeyboard2, ('2', 'push3') : self.handleKeyboard2, ('2', 'push4') : self.handleKeyboard2, ('2', 'push5') : self.handleKeyboard2, ('2', 'push6') : self.handleKeyboard2, ('2', 'push7') : self.handleKeyboard2, ('2', 'push8') : self.handleKeyboard2, ('2', 'push9') : self.handleKeyboard2, ('2', 'push10') : self.handleKeyboard2, ('2', 'push11') : self.handleKeyboard2, ('2', 'push12') : self.handleKeyboard2 }
def __decode_value(cls, attr_type, encoded_value): decoded_value = None if decoded_value is not None: return decoded_value collection_type = attr_type.collection_type if collection_type is None: decoded_value = cls.__decode_single_value(attr_type.type, encoded_value) elif collection_type == AttributeType.COLLECTION_TYPE_MAP: if isinstance(encoded_value, dict): res_dict = dict() key_type = attr_type.key_type value_type = attr_type.value_type for key, value in encoded_value.iteritems(): res_dict[cls.__decode_single_value(key_type, key)] = ( cls.__decode_single_value(value_type, value) ) decoded_value = res_dict elif collection_type == AttributeType.COLLECTION_TYPE_SET: element_type = attr_type.element_type res = sortedset() for val in encoded_value: res.add(cls.__decode_single_value(element_type, val)) decoded_value = res if decoded_value is None: raise ValidationError( _("Can't recognize attribute value '%(value)s'" "of type %(type)s"), type=attr_type, value=json.dumps(encoded_value) ) return decoded_value
def write_to(self, contracts, target): ready = sortedset() deps = dict() fol = dict() def _recfind(item): if item in deps: return d = list(self._dependencies(item)) deps[item] = len(d) fol[item] = [] if deps[item] == 0: ready.add(( self._sortkey(item), item, )) for dependency in d: _recfind(dependency) fol[dependency].append(item) for contract in contracts: _recfind(contract) while len(ready) > 0: item = ready.pop(0)[1] self._write(item, target) for f in fol[item]: deps[f] = deps[f] - 1 if deps[f] == 0: ready.add(( self._sortkey(f), f, ))
def complex_partial(self, ps, assigner = c_assignment_interactive): """Partial Muchnik procedure over complexes Keyword arguments: ps -- input polynomials assigner -- assigner which determines to which terms are assigned. Either c_assignment_all_zero, c_assigment_all_one or c_assigment_interactive """ muchnik_seq = self.muchnik_sequence( sortedset([self._algebra.zero()], key = self._poly_deg_key), ps) # constant fragment of the polynomials # (constant with respect to variable to be eliminated) constant_frag = filter( lambda p: self._algebra.div(p, self._algebra.e_var())[0] == 0, muchnik_seq ) # non-constant fragment of the polynomials nonconst_frag = filter( lambda p: self._algebra.div(p, self._algebra.e_var())[0] != 0, muchnik_seq ) root_diagram = assigner(constant_frag) diagram = self.complex_extend(root_diagram, nonconst_frag) self.pprint_diagram(diagram)
def _blist(es): t = blist.sortedset() for i, e in enumerate(es): t.add(e) if i % 1 == 0: for ee in t: pass
def addedges(self, K, candidates = None): """Aggiunge K archi a caso al grafo, tra i candidati (oggetto di tipo edgerange o set/list di archi).""" if candidates is None: self.E.add(self.mMax()) new = lsample( self.mMax() - self.M() + 1, K) i = j = 0 while j < K: if self.E[i] > new[j]+i: new[j] += i j+=1 else: i+=1 self.E.remove(self.mMax()) self.E |= new else: dup = sortedset([]) for e in self: if e in candidates: dup.add(self.cod(e)) new = lsample( len(candidates) - len(dup), K) dup.add(self.mMax()) i = j = 0 while j < K: if dup[i] > candidates[new[j]+i]: new[j] = candidates[new[j]+i] j+=1 else: i+=1 self.E |= new
def real_partial(self, ps, assigner = r_assignment_interactive): """Partial Muchnik procedure over reals Keyword arguments: ps -- input polynomials assigner -- assigner which determines to which terms are assigned. Either r_assignment_all_pos, r_assigment_all_zero, r_assignment_all_neg or r_assigment_interactive """ muchnik_seq = self.muchnik_sequence( sortedset([self._algebra.zero()], key = self._poly_deg_key), ps ) constant_frag = filter( lambda p: self._algebra.div(p, self._algebra.e_var())[0] == 0, muchnik_seq ) nonconst_frag = filter( lambda p: self._algebra.div(p, self._algebra.e_var())[0] != 0, muchnik_seq ) # root_diagram = self.r_assignment_all_pos(constant_frag) # root_diagram = self.r_assignment_interactive(constant_frag) root_diagram = assigner(constant_frag) self.pprint_diagram(root_diagram) diagram = self.real_extend(root_diagram, nonconst_frag)
def from_json(cls, json_object): """Create Workouts from a dict.""" return cls(**{ 'filename': json_object.get('filename'), 'workouts': sortedset([Workout.from_json(w) for w in json_object.get('workouts', [])]) })
def remove(self, events): assert events ids = [r.id for r in events] stale = set(i for i in self.index if self.identity_id(i) in ids) self.index = sortedset(self.index - stale)
def __init__(self, sampler, loop=15): self.sampler=sampler self.step = 0 self.loop = loop self.rhythms = Euclid_driver(self.loop) self.grid = [] self.pressed = sortedset() self.fills = [0]*4 for i in range(4): self.grid.append([0]*self.loop) self.handlers = { 'LJLR' : self.handle_LJLR, 'LJUD' : self.handle_LJUD, 'RJLR' : self.handle_RJLR, 'RJUD' : self.handle_RJUD, 'L1' : self.handle_L1, 'R1' : self.handle_R1, 'L2' : self.handle_L2, 'R2' : self.handle_R2, 'B1' : self.handle_B1, 'B2' : self.handle_B2, 'B3' : self.handle_B3, 'B4' : self.handle_B4, 'DPLR' : self.handle_DPLR, 'DPUD' : self.handle_DPUD, }
def __init__(self, slots=None): """ Create the slots object """ self.slots = sortedset(key=lambda x: x.start + x.end) if slots is not None: for slot in slots: self.slots.add(slot) self.verbose = False self.slot_items = {} self.item_slots = {} # prevent duplicate slots and items by remembering their string rep self._str_slots = {} self._str_items = {} self.items = sortedset(key=lambda x: x.value)
def add_item(self, item): """ Slot the item """ if not isinstance(item, Item): item = self.create_item(item) if str(item) in self._str_items: return self._str_items[str(item)] slot = self._find_slot(item) if slot is None: if self.verbose: print 'ERROR: Did not find a slot - existing slots - %s' % self.dump( ).keys() return False if slot not in self.slot_items: self.slot_items[slot] = sortedset(key=lambda x: x.value) self.items.add(item) slot.items.add(item) self._str_items[str(item)] = item self.slot_items[slot].add(item) self.item_slots[item] = slot item.slot = slot return item
def call(self, function_name, **kwargs): """ Executes the specified function while passing in the same parameters you feed it here. This function returns the called functions respose as it's own return. """ # Our response # We sort on index zero (0) which will be our priority responses = sortedset(key=lambda x: x['key']) # Acquire our object funcs = self.functions.get(function_name) if funcs is not None: for _func in funcs: # Acquire our information from our dictionary set func = _func['function'] priority = _func['priority'] # Acquire our entries module = getattr( func, self.module_id, '{module}.{function}'.format( module=self.name, function=func.__name__, ), ) try: # Execute our function and return it into our # tuple which provides us the priority (used to sort # our response), our function name (which may or may # not be the same as the function call type) and # our result responses.add({ # Store our priority and module path for unambiguity # This becomes our key 'key': '%.6d/%s' % (priority, module), # Store our priority 'priority': priority, # Store our module path: 'module': module, # Store our result 'result': func(**kwargs), }) except Exception as e: logger.warning( "Hook Exception {0} calling {1}." .format(str(e), module)) return responses
def _test_view_indexing(self, key): expected_items = [(3, "first"), (7, "second")] if key is not None: u = self.type2test(key, expected_items) expected_items.sort(key=lambda item: key(item[0])) else: u = self.type2test(expected_items) expected_items.sort() expected_keys, expected_values = list(zip(*expected_items)) if sys.version_info[0] < 3: keys = u.viewkeys() values = u.viewvalues() items = u.viewitems() else: keys = u.keys() values = u.values() items = u.items() for i in range(len(expected_items)): self.assertEqual(keys[i], expected_keys[i]) self.assertEqual(values[i], expected_values[i]) self.assertEqual(items[i], expected_items[i]) for i in range(-1, len(expected_items)+1): for j in range(-1, len(expected_items)+1): self.assertEqual(keys[i:j], blist.sortedset(expected_keys[i:j])) self.assertEqual(values[i:j], list(expected_values[i:j])) self.assertEqual(items[i:j], blist.sortedset(expected_items[i:j])) self.assertEqual(list(reversed(keys)), list(reversed(expected_keys))) for i, key in enumerate(expected_keys): self.assertEqual(keys.index(key), expected_keys.index(key)) self.assertEqual(keys.count(key), 1) self.assertEqual(keys.bisect_left(key), i) self.assertEqual(keys.bisect_right(key), i+1) self.assertEqual(keys.count(object()), 0) self.assertRaises(ValueError, keys.index, object()) for item in expected_items: self.assertEqual(items.index(item), expected_items.index(item)) self.assertEqual(items.count(item), 1) self.assertEqual(items.count((7, "foo")), 0) self.assertEqual(items.count((object(), object())), 0) self.assertRaises(ValueError, items.index, (7, "foo")) self.assertRaises(ValueError, items.index, (object(), object()))
def _direct_sum(self, *others): n = 0 k = self._smallest_k(*others) X = sortedset(chain(self.kmin, *imap(attrgetter("kmin"), others)))[:k] for item in self.kmin: if item in X and all(item in L.kmin for L in others): n += 1 return n, X
def set_add(self, key, values): if not self.is_exists(key): self._data[key] = sortedset([]) elif self.is_set(key): return None for val in values: self._data[key].add(val) return True
def __init__(self, N=0, E=None, M=None, w=None, type=None): """Costruisce un grafo vuoto con N vertici, e insieme di archi E (se specificato). Se type e' specificato, costruisce invece un grafo di quel tipo. I valori ammissibili per type sono cycle,path,tree,forest,clique. Se w e' specificato, il grafo viene considerato pesato, con pesi generati dalla funzione w(). E' anche ammessa l'instanziazione graph(G) con G un grafo gia' esistente.""" if isinstance(N,graph) and E is None: E = [self.cod(e) for e in N] N = N.V if not ((E is None) or (type is None)): raise StandardError("Incompatible parameters specified.") self.V=N self.w=w if E and isinstance(E[0],list): E=sortedset([self.cod(e) for e in E]) if E is None: E=sortedset([]) if len(E)==0 and N > 1: if type is 'cycle': for i in xrange(N): E.add(self.cod([i,(i+1)%N])) if type is 'path': for i in xrange(N-1): E.add(self.cod([i,(i+1)%N])) if type is 'tree': for i in xrange(1,N): E.add(self.cod([randint(i),i])) if type is 'forest': if not (0 <= M < N): raise StandardError("Parameter M out of bounds.") for i in lsample(N-1,M): E.add(self.cod([randint(i+1),i+1])) if type is 'clique': for i in xrange(N-1): for j in xrange(i+1,N): E.add(self.cod([i,j])) if type is 'star': for i in xrange(1,N): E.add(self.cod([0,i])) if type is 'wheel': for i in xrange(1,N): E.add(self.cod([0,i])) E.add(self.cod([i,(i+1)%N])) # eventualmente aggiungere: gear, caterpillar/lobster, BIPARTITE self.E = sortedset(E)
def __init__(self, genesis: Block): self.latest_block_nodes = dict() # type: Dict[int, Node] self.blocks_at_height = dict() # type: Dict[int, Set[Node]] self.node_with_block = dict() # type: Dict[Block, Node] self.heights = sortedset( key=lambda x: -x) # store from largest -> smallest self.path_block_to_child_node = dict() # type: Dict[Block, Node] self.node_counter = 1 self.root = self.add_tree_node(genesis, None, True)
def __init__(self): self.root = None self.worklist = None self.checked_nodes = defaultdict(set) self.labels = {} self.max = 0 self.seen_comps = set() self.cs_cache = sortedset(key=len) self.cs_cache.append = lambda v: self.cs_cache.add(v)
def reindex(self): events = self.catalog.query(review_state=self.state) if self.index or self.index is None: self.index = sortedset() self.update(events) self.generate_metadata()
def kn_columns(self, target_column, k, dist_func): """Gets k nearest columns to target_column by distance function provided by dist_func""" n = len(self.token_set) coolset = sortedset() for word in self.token_set: if word <> "*": coolset.add((dist_func(target_column, word), word)) array = list(coolset[1:k + 1]) return array
def kn_columns(self, target_column, k, dist_func): """Gets k nearest columns to target_column by distance function provided by dist_func""" n = len(self.token_set) coolset = sortedset() for word in self.token_set: if word <> "*": coolset.add((dist_func(target_column, word), word)) array = list(coolset[1 : k + 1]) return array
def _blist(es): t = blist.sortedset() for e in es: t.add(e) c = 0 for i in t: if i == e: break c += 1
def __init__(self): """ Initialize our object """ # We maintain a sorted set of hooks. This is so we can priortize # our calling efforts self.hooks = sortedset(key=lambda x: x.key())
def test_index_document(self): """Test normal indexing document.""" doc = {'email': '*****@*****.**'} uid = hashlib.md5(json.dumps(doc, sort_keys=True)).hexdigest() self.search_index.index_document(table_name='user', index_name='email', token='*****@*****.**', document=doc) self.assertEqual( self.search_index.indices, { 'user': { 'email': { '*****@*****.**': sortedset(['35cc47af8dcdbc9cf700dbd7dfe6a5a1']), '*****@*****.**': sortedset([uid]) } } })
def set_sort(infile, outfile): s = blist.sortedset() with open(infile, 'rb') as fp: for line in fp: s.add(int(line)) with open(outfile, 'wb') as fp: for x in s: fp.write(str(x) + '\n')
def kn_cooccurences(self, target_column, k): """Gets k top columns having max cooccurence with target_column""" n = len(self.token_set) coolset = sortedset() for word in self.token_set: if word <> "*": coolset.add((self.get(target_column, word), word)) array = list(coolset[len(coolset) - k:len(coolset)]) array.reverse() return array
def pprint_diagram(self, diagram): """Pretty prints diagram""" max_len = sortedset(map(lambda s: len(str(s)), diagram.keys()))[-1] header = sortedset() for row in diagram.keys(): header.update(diagram[row].keys()) for row in sortedset(diagram.keys(), key = self._poly_deg_key): print str("%%%ds " %max_len) %row, # print "%s " %diagram[row], # for index in diagram[row].keys(): # print "%2s " %diagram[row][index][0], for col in header: if col in diagram[row]: print "%2s" %diagram[row][col][0], else: print " *", print "" print ""
def truncateArtists(ss, sortedSet=True): li = [] for i in range(len(ss)): s = ss[i].strip() index = s.find(",") index2 = s.find(" feat") if index != -1: s = s[:index] if index2 != -1: s = s[:index2] li.append(s) return sortedset(li) if sortedSet else li
def _refresh_ring(self): """ builds a view of the token ring """ self.token_ring = sortedset(self.nodes.values(), key=lambda n: n.token) if self.is_initializing: # if this is the only node, set it to normal # there are no nodes to stream data from if len(self.nodes) == 1: self.status = Cluster.Status.NORMAL self._previous_ring = None return stream_nodes = [ n for n in self.nodes.values() if n.node_id != self.node_id ] self._previous_ring = sortedset(stream_nodes, key=lambda n: n.token) else: self._previous_ring = None
def fill(pos, initial, bitmap, bktRGB, bktA): log(INFO, lambda: "fill(%s, %s)" % (pos, initial)) queue = blist.sortedset([pos]) # Change all adjacent pixels of same initial color to new color queued = 1 filled = 0 iters = 0 skipped = 0 pixel = currentPixel(bktRGB, bktA) setPixelFaster(bitmap, pos, pixel) while queue: if iters % 40000 == 0: log( INFO, "fill() iters=%s skipped=%s len(queue)=%s q=%s pixel=%s" % (iters, skipped, len(queue), queue[0], bitmap[queue[0][0]][queue[0][1]])) iters += 1 (x, y) = queue.pop() filled += 1 if x > 0: next = (x - 1, y) if getPixel(bitmap, next) == initial: setPixelFaster(bitmap, next, pixel) queue.add(next) queued += 1 else: skipped += 1 if x < 599: next = (x + 1, y) if getPixel(bitmap, next) == initial: setPixelFaster(bitmap, next, pixel) queue.add(next) queued += 1 else: skipped += 1 if y > 0: next = (x, y - 1) if getPixel(bitmap, next) == initial: setPixelFaster(bitmap, next, pixel) queue.add(next) queued += 1 else: skipped += 1 if y < 599: next = (x, y + 1) if getPixel(bitmap, next) == initial: setPixelFaster(bitmap, next, pixel) queue.add(next) queued += 1 else: skipped += 1 log( INFO, "fill() queued %s and filled %s pixels in %s iterations" % (queued, filled, iters))
def findDupSongs(songs, artists): li = [] dup = [] s = songs a = artists for i in range(len(s)): li.append("Artist: %s Song: %s" % (a[i], s[i])) ss = list(sortedset(li)) for i in range(len(s)): if li[i] in ss: ss.remove(li[i]) else: dup.append(li[i]) return dup
def __init__(self): self.nb_bytes = 0 # Initialize the set of free values (Initaly all the values are free) self.free = sortedset() # A dictionnary used to store the mapping AS to integer. An other value # is also used to indicate how many times each AS appear on this mapping. self.mapping = {} self.blocked = False self.max_free = 500
def __init__(self, start, end, desc=None): if start > end or start == end: raise SlotterException('ERROR: Invalid start - %s, end - %s' % (start, end)) self.start = start self.end = end self.items = sortedset(key=lambda x: x.value) if desc is None: desc = '%s-%s' % (str(self.start), str(self.end)) self.desc = desc
def getSortedSet(sortS=False, songs=False): li = getArtistsv2() ss = [] s = li[0][36:37] if not songs: for i in li: r = i[i.find(s) + 2:] ss.append(r) else: for i in li: r = i[:i.find(s)] ss.append(r.strip()) return sortedset(ss) if sortS else ss
def test_search_empty_with_result(self): """Test when search with empty query and still have result.""" query = '' doc = {'email': ''} self.search_index.index_document(table_name='user', index_name='email', token='', document=doc) self.assertEqual( self.search_index.indices, { 'user': { 'email': { '': sortedset(['ee3161ffa6dad871caedeed6e2298654']), '*****@*****.**': sortedset(['35cc47af8dcdbc9cf700dbd7dfe6a5a1']) } } }) result = self.search_index.search(table_name='user', index_name='email', token=query) self.assertEqual(result, [{'email': ''}])