def get_nodes_edges(tree, root_node=None): ''' 返回树中所有节点和边 ''' Node = namedtuple('Node', ['id', 'label']) Edge = namedtuple('Edge', ['start', 'end']) nodes, edges = [], [] if type(tree) is not dict: return nodes, edges if root_node is None: label = '{}: {}'.format(tree['feat_idx'], tree['feat_val']) root_node = Node._make([uuid.uuid4(), label]) nodes.append(root_node) for sub_tree in (tree['left'], tree['right']): if type(sub_tree) is dict: node_label = '{}: {}'.format(sub_tree['feat_idx'], sub_tree['feat_val']) else: node_label = '{:.2f}'.format(sub_tree) sub_node = Node._make([uuid.uuid4(), node_label]) nodes.append(sub_node) edge = Edge._make([root_node, sub_node]) edges.append(edge) sub_nodes, sub_edges = get_nodes_edges(sub_tree, root_node=sub_node) nodes.extend(sub_nodes) edges.extend(sub_edges) return nodes, edges
def test_gross_net_calculator_answer_format(self): # specificially test the output format of the answer. Gn_shell_type = namedtuple('Gross_Net_Shell', 'input_was_gross input_was_net') Gn_output_type = namedtuple( 'Gross_Net_Output', 'dollars federal state fed_penalty summary') # input_value is what is passed to the function # dollars, fed, state, fed_penalty # gross is the input_value = (5555.56, 24, 4, 0) input_was_gross = Gn_output_type( Decimal(4000).quantize(Decimal('.01'), rounding=ROUND_HALF_DOWN), Decimal(1333.33).quantize(Decimal('.01'), rounding=ROUND_HALF_DOWN), Decimal(222.22).quantize(Decimal('.01'), rounding=ROUND_HALF_DOWN), Decimal(0).quantize(Decimal('.01'), rounding=ROUND_HALF_DOWN), '''Assuming the input dollar value (${dollars:.2f}) is the Gross amount, the Net amount is ${net:.2f}, {fed_pct:.2f}% withheld for Federal taxes is ${fed_dollars:.2f}, {state_pct:.2f}% withheld for State taxes is ${state_dollars:.2f}, and a {penalty_pct:.2f}% Federal Penalty tax is ${penalty_dollars:.2f}.'''. format( dollars=5555.56, net=4000, fed_pct=24.00, fed_dollars=1333.33, state_pct=4.00, state_dollars=222.22, penalty_pct=0.00, penalty_dollars=0.00, ), ) input_was_net = Gn_output_type( Decimal(7716.06).quantize(Decimal('.01'), rounding=ROUND_HALF_DOWN), Decimal(1851.8544).quantize(Decimal('.01'), rounding=ROUND_HALF_DOWN), Decimal(308.64).quantize(Decimal('.01'), rounding=ROUND_HALF_DOWN), Decimal(0).quantize(Decimal('.01'), rounding=ROUND_HALF_DOWN), '''Assuming the input dollar value (${dollars:.2f}) is the Net amount, the Gross amount is ${gross:.2f}, {fed_pct:.2f}% withheld for Federal taxes is ${fed_dollars:.2f}, {state_pct:.2f}% withheld for State taxes is ${state_dollars:.2f}, and a {penalty_pct:.2f}% Federal Penalty tax is ${penalty_dollars:.2f}.'''. format( dollars=5555.56, gross=7716.06, fed_pct=24.00, fed_dollars=1851.85, state_pct=4.00, state_dollars=308.64, penalty_pct=0.00, penalty_dollars=0.00, ), ) shell = Gn_shell_type(input_was_gross, input_was_net) self.assertEqual(gross_net_calculator(*input_value), shell)
def classroom_model(db): # `db` should be a fixture provided by the current dal testbench class Teacher(Document): name = fields.StrField(required=True) class Meta: register_document = False collection = db.teacher class Course(Document): name = fields.StrField(required=True) teacher = fields.ReferenceField(Teacher, required=True) class Meta: register_document = False collection = db.course class Student(Document): name = fields.StrField(required=True) birthday = fields.DateTimeField() courses = fields.ListField(fields.ReferenceField(Course)) class Meta: register_document = False collection = db.student allow_inheritance = True return namedtuple('Mapping', ('Teacher', 'Course', 'Student'))(Teacher, Course, Student)
def load_ppi_data(root): DataType = namedtuple('Dataset', ['num_classes', 'g']) adj_full = sp.load_npz(os.path.join(root, 'ppi', 'adj_full.npz')) G = dgl.from_scipy(adj_full) nodes_num = G.num_nodes() role = json.load(open(os.path.join(root, 'ppi', 'role.json'), 'r')) tr = list(role['tr']) te = list(role['te']) va = list(role['va']) mask = np.zeros((nodes_num, ), dtype=bool) train_mask = mask.copy() train_mask[tr] = True val_mask = mask.copy() val_mask[va] = True test_mask = mask.copy() test_mask[te] = True G.ndata['train_mask'] = torch.tensor(train_mask, dtype=torch.bool) G.ndata['val_mask'] = torch.tensor(val_mask, dtype=torch.bool) G.ndata['test_mask'] = torch.tensor(test_mask, dtype=torch.bool) feats = np.load(os.path.join(root, 'ppi', 'feats.npy')) G.ndata['feat'] = torch.tensor(feats, dtype=torch.float) class_map = json.load( open(os.path.join(root, 'ppi', 'class_map.json'), 'r')) labels = np.array([class_map[str(i)] for i in range(nodes_num)]) G.ndata['label'] = torch.tensor(labels, dtype=torch.float) data = DataType(g=G, num_classes=labels.shape[1]) return data
def main(): args = parse_args() local = namedtuple('local', ('host', 'port')) local.host = args.local_host local.port = args.local_port remote = namedtuple('remote', ('host', 'port')) remote.host = args.remote_host remote.port = args.remote_port queue = Queue() listen_thread = Thread(target=listen_for_packets, args=(local, queue)) listen_thread.start() send_packets(target=remote, queue=queue)
def get_info(): item_xpath = '//*[@id="J_ShopSearchResult"]/div/div[@class="J_TItems"]' source = WebDriverWait(driver, 10).until( EC.presence_of_element_located((By.XPATH, item_xpath))) '''获取名称,价格,销量,评价''' name = '//dd[@class="detail"]/a' # 名称 price = '//dd[@class="detail"]/div/div[1]/span[2]' # 价格 sale_area = '//dd[@class="detail"]/div/div[@class="sale-area"]/span' # 销量 judge = '//dd[@class="rates"]/div/h4/a' # 评价 source1 = source.find_elements(By.XPATH, name) names = [name.text for name in source1] source2 = source.find_elements(By.XPATH, price) prices = [price.text for price in source2] source3 = source.find_elements(By.XPATH, sale_area) sale_areas = [sale_area.text for sale_area in source3] source4 = source.find_elements(By.XPATH, judge) judges = [judge.text for judge in source4] info = namedtuple('info', row) how_many = len(names) - 8 # print(len(names),len(prices),len(sale_areas),len(judges)) # 推荐的商品没有评价 for i in range(how_many): oneinfo = info(names[i], prices[i], sale_areas[i], judges[i]) infos.append(oneinfo) print('Download one page')
def test_get_grid_posititions_from_shape_exceptions(self): grid_str = """08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48""" grid = [s.split() for s in grid_str.splitlines()] Input = namedtuple('Input', ['shape', 'row', 'col', 'n']) known_exceptions = [ (Input(self.SHAPE.VERTICAL, 19, 0, 4), IndexError), (Input(self.SHAPE.HORIZONTAL, 19, 19, 4), IndexError), (Input(self.SHAPE.HORIZONTAL, 19, 19, 4), IndexError), ] for input_, output in known_exceptions: #self.assertRaises(IndexError, IndexError) self.assertRaises(output, hf.get_grid_posititions_from_shape, grid, input_.shape, input_.row, input_.col, input_.n)
def record(self, winner, chips, player_point, dealer_point): self.update(winner, chips, player_point, dealer_point) Row = namedtuple('Row', [ 'rounds', 'player_point', 'dealer_point', 'winner', 'remain_chips' ]) row = Row(self.rounds, self.player_point, self.dealer_point, self.winner, self.remain_chips) self.data.append(row)
class Prompter: ''' -- read the queue and wait until the trigger fires used by the follower task to subscribe its event handlers to line updates Asynchronous Iterator; may be called as coroutine there is no way to reach StopAsyncIteration at the moment ''' ### datatype to send event details back to handler Event = namedtuple('Event', ('ln', 'line', 'match')) ############## __slots__ = ( 'queue', '_trigger', '_file', ) def __init__( self, queue: curio.Queue, trigger: Trigger, file: Path, ): self.queue = queue self._trigger = trigger self._file = file @property def trigger(self): return self._trigger @property def file(self): return self._file ############## async def __anext__(self): ''' async block until the queue produces a line that activates the trigger. never reaches stop iteration ''' while True: (line, ln) = await self.queue.get() match = self.trigger.check(line) if match is not None: return self.Event(ln, line, match) raise StopAsyncIteration def __aiter__(self): ''' Prompter object is an async iterator ''' return self async def __call__(self): ''' Prompter object is a coroutine''' try: return await self.__anext__() except StopAsyncIteration: raise # do what
class BoundedProperty(namedtuple("BoundedProperty", ["min", "max", "delta"])): def __new__(cls, minimum, maximum, delta=None): if delta is None: delta = abs(maximum - minimum) return super().__new__(cls, minimum, maximum, delta) def interpolate(self, val): result = (self.max - val) / self.delta return 1.0 - bound(0, result, 1)
def test_hex_tile_generator_known_answer(self): Ans = namedtuple('Ans', ['input', 'ans']) known_answers = [ Ans('builtin1', [2, 1, 4, 3, 5]), Ans('ordered5', [1, 2, 3, 4, 5]), ] for ans in known_answers: test_te = list(te.hex_tile_generator(ans.input)) self.assertListEqual(ans.ans, test_te)
def load_data(args): '''Wraps the dgl's load_data utility to handle ppi special case''' DataType = namedtuple('Dataset', ['num_classes', 'g']) if args.dataset == 'amazon2m': amazon_data = AmazonDataset() DataType = namedtuple('Dataset', ['num_classes', 'g']) data = DataType(g=amazon_data[0], num_classes=amazon_data.num_classes) return data elif args.dataset == 'ppi': train_dataset = PPIDataset('train') train_graph = dgl.batch([train_dataset[i] for i in range(len(train_dataset))], edge_attrs=None, node_attrs=None) val_dataset = PPIDataset('valid') val_graph = dgl.batch([val_dataset[i] for i in range(len(val_dataset))], edge_attrs=None, node_attrs=None) test_dataset = PPIDataset('test') test_graph = dgl.batch([test_dataset[i] for i in range(len(test_dataset))], edge_attrs=None, node_attrs=None) G = dgl.batch( [train_graph, val_graph, test_graph], edge_attrs=None, node_attrs=None) train_nodes_num = train_graph.number_of_nodes() test_nodes_num = test_graph.number_of_nodes() val_nodes_num = val_graph.number_of_nodes() nodes_num = G.number_of_nodes() assert (nodes_num == (train_nodes_num + test_nodes_num + val_nodes_num)) # construct mask mask = np.zeros((nodes_num,), dtype=bool) train_mask = mask.copy() train_mask[:train_nodes_num] = True val_mask = mask.copy() val_mask[train_nodes_num:-test_nodes_num] = True test_mask = mask.copy() test_mask[-test_nodes_num:] = True G.ndata['train_mask'] = torch.tensor(train_mask, dtype=torch.bool) G.ndata['val_mask'] = torch.tensor(val_mask, dtype=torch.bool) G.ndata['test_mask'] = torch.tensor(test_mask, dtype=torch.bool) data = DataType(g=G, num_classes=train_dataset.num_labels) return data else: dataset = _load_data(args) data = DataType(g=dataset[0], num_classes=dataset.num_classes) return data
def load_data(args, multilabel): if not os.path.exists('graphsaintdata') and not os.path.exists('data'): raise ValueError("The directory graphsaintdata does not exist!") elif os.path.exists('graphsaintdata') and not os.path.exists('data'): os.rename('graphsaintdata', 'data') prefix = "data/{}".format(args.dataset) DataType = namedtuple('Dataset', ['num_classes', 'train_nid', 'g']) adj_full = scipy.sparse.load_npz( './{}/adj_full.npz'.format(prefix)).astype(np.bool) g = dgl.from_scipy(adj_full) num_nodes = g.num_nodes() adj_train = scipy.sparse.load_npz( './{}/adj_train.npz'.format(prefix)).astype(np.bool) train_nid = np.array(list(set(adj_train.nonzero()[0]))) role = json.load(open('./{}/role.json'.format(prefix))) mask = np.zeros((num_nodes, ), dtype=bool) train_mask = mask.copy() train_mask[role['tr']] = True val_mask = mask.copy() val_mask[role['va']] = True test_mask = mask.copy() test_mask[role['te']] = True feats = np.load('./{}/feats.npy'.format(prefix)) scaler = StandardScaler() scaler.fit(feats[train_nid]) feats = scaler.transform(feats) class_map = json.load(open('./{}/class_map.json'.format(prefix))) class_map = {int(k): v for k, v in class_map.items()} if multilabel: # Multi-label binary classification num_classes = len(list(class_map.values())[0]) class_arr = np.zeros((num_nodes, num_classes)) for k, v in class_map.items(): class_arr[k] = v else: num_classes = max(class_map.values()) - min(class_map.values()) + 1 class_arr = np.zeros((num_nodes, )) for k, v in class_map.items(): class_arr[k] = v g.ndata['feat'] = torch.tensor(feats, dtype=torch.float) g.ndata['label'] = torch.tensor( class_arr, dtype=torch.float if multilabel else torch.long) g.ndata['train_mask'] = torch.tensor(train_mask, dtype=torch.bool) g.ndata['val_mask'] = torch.tensor(val_mask, dtype=torch.bool) g.ndata['test_mask'] = torch.tensor(test_mask, dtype=torch.bool) data = DataType(g=g, num_classes=num_classes, train_nid=train_nid) return data
class Dep( namedtuple('dep', ['inputs', 'outputs', 'rule', 'params', 'use_existing'])): def __new__(cls, inputs, outputs, rule, params=None, use_existing=False): if not isinstance(inputs, list): inputs = [inputs] if not isinstance(outputs, list): outputs = [outputs] if params is None: params = {} return super().__new__(cls, inputs, outputs, rule, params, use_existing)
def record(self, winner, chips, player_point, dealer_point): self.update(winner, chips, player_point, dealer_point) Row = namedtuple("Row", [ "rounds", "player_point", "dealer_point", "winner", "remain_chips" ]) row = Row( self.rounds, self.player_point, self.dealer_point, self.winner, self.remain_chips, ) self.data.append(row)
def final_function(ones): df = create_blocks(ones) sinking, sink = create_sinking_df(df) r_f_splits = [split_columns(df, sinking, col, sink) for col in sinking] sinking = [x['sinking'] for x in r_f_splits if 'sinking' in x.keys()] sinking = pd.DataFrame(list(chain.from_iterable(sinking))) sinking.name = 'sinking' rising = [x['rising'] for x in r_f_splits if 'rising' in x.keys()] rising = pd.DataFrame(list(chain.from_iterable(rising))) rising.name = 'rising' result = namedtuple('result', 'rising, sinking') return result(rising.T, sinking.T)
def load_data(args): '''Wraps the dgl's load_data utility to handle ppi special case''' if args.dataset != 'ppi': return _load_data(args) train_dataset = PPIDataset('train') val_dataset = PPIDataset('valid') test_dataset = PPIDataset('test') PPIDataType = namedtuple('PPIDataset', [ 'train_mask', 'test_mask', 'val_mask', 'features', 'labels', 'num_labels', 'graph' ]) G = dgl.BatchedDGLGraph( [train_dataset.graph, val_dataset.graph, test_dataset.graph], edge_attrs=None, node_attrs=None) G = G.to_networkx() # hack to dodge the potential bugs of to_networkx for (n1, n2, d) in G.edges(data=True): d.clear() train_nodes_num = train_dataset.graph.number_of_nodes() test_nodes_num = test_dataset.graph.number_of_nodes() val_nodes_num = val_dataset.graph.number_of_nodes() nodes_num = G.number_of_nodes() assert (nodes_num == (train_nodes_num + test_nodes_num + val_nodes_num)) # construct mask mask = np.zeros((nodes_num, ), dtype=bool) train_mask = mask.copy() train_mask[:train_nodes_num] = True val_mask = mask.copy() val_mask[train_nodes_num:-test_nodes_num] = True test_mask = mask.copy() test_mask[-test_nodes_num:] = True # construct features features = np.concatenate( [train_dataset.features, val_dataset.features, test_dataset.features], axis=0) labels = np.concatenate( [train_dataset.labels, val_dataset.labels, test_dataset.labels], axis=0) data = PPIDataType(graph=G, train_mask=train_mask, test_mask=test_mask, val_mask=val_mask, features=features, labels=labels, num_labels=121) return data
def get_nodes_edges(tree, root_node=None): ''' 返回决策树所有节点和边 ''' Node = namedtuple('Node', ['id', 'label']) # 使用名称索引,避免内存越界 Edge = namedtuple('Edge', ['start', 'end']) nodes, edges = [], [] if type(tree) is not dict: # 递归终止条件,决策树不可继续划分 return nodes, edges if root_node is None: # 根节点为空,则从空节点开始 # 难道Tree的ID即本节点的feature ID? label = '{}: {}'.format(tree['feat_id'], tree['feat_val']) root_node = Node._make([uuid.uuid4(), label]) # 从 iterable 对象中创建新的实例 nodes.append(root_node) for sub_tree in [tree['left'], tree['right']]: # 遍历左右子树 if type(sub_tree) is dict: # 非叶子结点 node_label = '{}: {}'.format( sub_tree['feat_id'], sub_tree['feat_val']) else: node_label = '{}'.format(sub_tree) # 叶子结点, 叶子节点的值 sub_node = Node._make([uuid.uuid4(), node_label]) # 创建子节点 nodes.append(sub_node) # 插入子节点 edge = Edge._make([root_node, sub_node]) # 创建边 edges.append(edge) # 插入边 # 递归子树, 获取子树的节点和边 sub_nodes, sub_edges = get_nodes_edges(sub_tree, root_node=sub_node) nodes.extend(sub_nodes) edges.extend(sub_edges) return nodes, edges
def __init__(self, *children, **options): self.content = None """Raw content of the node""" self.children = [] """List of children of this node""" OptionsProxy = namedtuple("OptionsProxy", self.valid_options) self.options = OptionsProxy(**options) """Named tuple with the options for this node""" if self.is_terminal: if children: self.content = children[0] else: self.children = list(children)
def main(): global args args = parse_args() client_condition = not args.listen and args.port > 0 listen_condition = args.listen and args.port > 0 target = namedtuple('target', ('host', 'port')) target.host = args.host target.port = args.port if client_condition: print('Clienting') run_client_loop(target) elif listen_condition: print('Listening') run_server_loop(target)
def test_hex_tile_generator_random(self): RandBuiltins = namedtuple('RandBuiltins', ['builtin', 'n', 'tile_stack']) known_answers = [ RandBuiltins('random', 29, None), RandBuiltins('random', 10, None), RandBuiltins('random', 13, None), ] for ans in known_answers: test_te = list( te.hex_tile_generator(builtin=ans.builtin, n=ans.n, tile_stack=ans.tile_stack)) print('n={}, output={}'.format(ans.n, test_te)) self.assertEqual(len(test_te), ans.n)
def gen_pythagorean_triplet(min_c=5, max_c=100): """ Returns the next set of Pythagorean triplets as a tuple (a, b, c). Up to max_c for a**2 + b**2 == c**2 and a < b Generator """ # Originally for problem 9 # TDD Pythag = namedtuple('Pythag', 'a b c') return_tuple = Pythag(3, 4, 5) for c in range(min_c, max_c): for a in range(1, c): for b in range(a + 1, c): if a**2 + b**2 == c**2 and a < b: return_tuple = Pythag(a, b, c) yield return_tuple
class Dictionary(object): '''Build a trie dictionary out of words defined in vocab.''' Match = namedtuple('Match', 'prefix isword prefix_words') def __init__(self, vocab=None): defaultdict_maker = lambda: defaultdict(defaultdict_maker) self._prefix_tree = defaultdict(defaultdict_maker) self.extend(vocab or []) def add(self, word): it = self._prefix_tree for ch in word: it = it[ch] it[''] = True def extend(self, words): for word in words: self.add(word) def partial_match(self, text): return Match() def prefix_words(self, text): it = self._prefix_tree for i, ch in enumerate(text): it = it.get(ch, None) if it is None: break if '' in it: yield text[:i+1] def isword(self, word): match = self.partial_match(word) return match is not None and match.isword def print(self): def print_dictionary(it=self, i=0): for k, v in it.items(): if v is True: print('{}EOW'.format(' '*i*2)) else: print('{}{}:'.format(' '*i*2, k)) print_dictionary(v, i+1) print_dictionary()
def zipsource(): Fo = namedtuple("Fo", ("filename", "is_dir")) zipsource = LocalTargetAndZipSource("", "") zipsource.zip = Mock() zipsource.zip.filelist = [ Fo("src/", lambda: True), Fo("src/en/", lambda: True), Fo("src/en/to", lambda: False), Fo("src/en/tre", lambda: False), ] def namelist(): for i in ("src/", "src/en/", "src/en/to", "src/en/tre"): yield i zipsource.zip.namelist = namelist zipsource.zip.read.return_value = b"data" return zipsource
def classroom_model(instance): @instance.register class Teacher(Document): name = fields.StrField(required=True) @instance.register class Course(Document): name = fields.StrField(required=True) teacher = fields.ReferenceField(Teacher, required=True, allow_none=True) @instance.register class Student(Document): name = fields.StrField(required=True) birthday = fields.DateTimeField() courses = fields.ListField(fields.ReferenceField(Course)) return namedtuple('Mapping', ('Teacher', 'Course', 'Student'))(Teacher, Course, Student)
def test_bad_lazy_collection(self, dal_moke): # Bad `dal` attribute with pytest.raises(exceptions.NoCollectionDefinedError) as exc: class Doc7(Document): class Meta: lazy_collection = lambda: None class dal: pass assert exc.value.args[0] == ( "`dal` attribute must be a subclass of <class 'umongo.abstract.AbstractDal'>") # Invalid lazy_collection's dal LazyCollection = namedtuple('LazyCollection', ('dal', 'load')) class BadDal: pass def load_collection(): pass with pytest.raises(exceptions.NoCollectionDefinedError) as exc: class Doc8(Document): class Meta: lazy_collection = LazyCollection(BadDal, load_collection) assert exc.value.args[0] == ( "`dal` attribute must be a subclass of <class 'umongo.abstract.AbstractDal'>") # Invalid lazy_collection's load class GoodDal(AbstractDal): @staticmethod def io_validate_patch_schema(schema): pass class Doc9(Document): class Meta: lazy_collection = LazyCollection(GoodDal, load_collection) with pytest.raises(exceptions.NoCollectionDefinedError) as exc: Doc9.collection assert exc.value.args[0] == "lazy_collection didn't returned a collection"
def list_to_pagination(items, already_sliced=False, page=1, per_page=None, total=None, **kwargs): """ Convert the given list to a :class:`Pagination` object :param already_sliced: If true, don't slice the given items with page/per_page arguments """ if page < 1: raise ValueError('page must be > 0') total = total or len(items) per_page = per_page or total fields = ['items', 'page', 'per_page', 'total'] if kwargs: fields += list(kwargs.keys()) Pagination = namedtuple('Pagination', fields) if not already_sliced: items = items[(page - 1) * per_page: page * per_page] return Pagination(items, page, per_page, total, **kwargs)
def classroom_model(instance): @instance.register class Teacher(Document): name = fields.StrField(required=True) @instance.register class Course(Document): name = fields.StrField(required=True) teacher = fields.ReferenceField(Teacher, required=True) @instance.register class Student(Document): name = fields.StrField(required=True) birthday = fields.DateTimeField() courses = fields.ListField(fields.ReferenceField(Course)) class Meta: allow_inheritance = True return namedtuple('Mapping', ('Teacher', 'Course', 'Student'))(Teacher, Course, Student)
def another_test_user(app): TestUser = namedtuple( 'TestUser', ['email', 'password', 'id', 'auth_headers'] ) with app.app_context(): email = '*****@*****.**' password = '******' user = User(email=email, password=guard.encrypt_password(password), is_verified=True) db.session.add(user) db.session.commit() headers = {'Authorization': 'Bearer %s' % guard.encode_jwt_token(user)} return TestUser(email=email, password=password, id=user.id, auth_headers=headers)
def _get_clusters_based_on_tree_level(lvl, link): LVL = namedtuple("Level","members,lvl") clusters = [LVL(members=[x], lvl=0) for x in range(link.shape[0]+1)] for i in range(link.shape[0]): ix_1,ix_2 = int(link[i][0]), int(link[i][1]) group = clusters[ix_1].members + clusters[ix_2].members max_group_number = max(clusters[ix_1].lvl,clusters[ix_2].lvl) clusters[ix_1] = LVL(members=clusters[ix_1].members, lvl=max_group_number) clusters[ix_2] = LVL(members=clusters[ix_2].members, lvl=max_group_number) new_lvl = max_group_number + 1 clusters.append(LVL(members=sorted(group), lvl=new_lvl)) maximum_lvl = clusters[-1].lvl clusters = [LVL(x.members,maximum_lvl-x.lvl) for x in clusters] selection = [x for x in clusters if x.lvl==lvl] lvl -= 1 while lvl > 0: candidates = [x for x in clusters if x.lvl==lvl] winner = [x for x in candidates if not set(selection[0].members).issubset(set(x.members))] selection.append(winner[0]) lvl -= 1 return [s.members for s in selection]
def from_string(cls, format_str): """ Create a buffer format from a string. Generated buffer format are cached, so this function is not expensive to call. A format string is composed of N format token. A format token follow these rules: ({number}{format char})[{name}] Whitespaces are ignored. Available format char: f: float d: double b: byte B: unsigned Byte s: short S: unsigned short i: int I: unsigned int Example: "(3i)[vertex](4f)[color]" "(4f)[foo] (4f)[bar] (4d)[yolo]" """ format_str = format_str.replace(' ', '') format_str_2 = "" if len(format_str) == 0: raise BufferFormatError('Format must be present') # Create the tokens tokens, offset = [], 0 for match in BufferFormat.pattern.finditer(format_str): groups = match.groups() _type, gl_type = BUFFER_FORMAT_TYPES_MAP.get(groups[1]) size=int(groups[0]) name=groups[2] name_match = pyvars.match(name) if name_match is None or name_match.span() != (0, len(name)): raise ValueError('"{}" is not a valid variable name'.format(name)) token = BufferFormat.token(size=size, type=_type*size, name=name, gl_type=gl_type, offset=offset) tokens.append(token) offset += sizeof(token.type) format_str_2 += format_str[match.start():match.end()] if format_str_2 != format_str: raise BufferFormatError('Format string is not valid') bformat = super().__new__(cls) # Save the tokens bformat.tokens = tokens # Build the item bformat.item = namedtuple('V', [t.name for t in tokens]) # Build the structure struct_fields = [(t.name, t.type) for t in tokens] bformat.struct = type('BufferStruct', (Structure,), {'_fields_': struct_fields}) return bformat
# pylint: disable=redefined-builtin,redefined-outer-name,missing-docstring # pylint: disable=too-few-public-methods,no-self-use,arguments-differ # pylint: disable=dangerous-default-value,redefined-builtin,too-many-arguments # pylint: disable=abstract-method import os import pickle from functools import namedtuple, update_wrapper, wraps import psutil import redis from . import config, constants _CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "currsize"]) def memory_full(): """Check if the memory is too full for further caching.""" current_process = psutil.Process(os.getpid()) return (current_process.memory_percent() > config.MAXIMUM_CACHE_MEMORY_PERCENTAGE) class _HashedSeq(list): """This class guarantees that ``hash()`` will be called no more than once per element. This is important because the ``lru_cache()`` will hash the key multiple times on a cache miss. """
from functools import namedtuple from barracuda.components.ai_component import AIComponent from barracuda.components.status_component import StatusComponent # from barracuda.components.render_component import RenderComponent from barracuda.tools import Vect2D class PositionComponent(Vect2D): pass RenderComponent = namedtuple('RenderComponent', ()) # AIComponent = namedtuple('AIComponent', ()) # __all__ = ('AIComponent', 'BehaviorComponent', 'BehaviorActions', # 'StatusComponent', 'RenderComponent') HealerComponent = namedtuple('HealerComponent', ('power', )) AttackerComponent = namedtuple('AttackerComponent', ('power', )) # AIComponent = namedtuple('AIComponent', ())
def __init__(self, page_number, total_pages, count): self.number = page_number Paginator = namedtuple('Paginator', ['num_pages', 'count']) self.paginator = Paginator(total_pages, count)
from functools import namedtuple from rwlock import RWLock Message = namedtuple("Message", ["text", "login", "time"]) class ChatMessageStore: def __init__(self): self.__id = 1 self.__storage = [] self.__rwlock = RWLock() def add_message(self, message): self.__rwlock.acquire_write() self.__storage.append((self.__id, message)) self.__id += 1 self.__rwlock.release() def get_messages(self, start_id=0): self.__rwlock.acquire_read() result = ([message for id_, message in self.__storage if id_ >= start_id], self.__id) self.__rwlock.release() return result
from functools import namedtuple from ecs import Component KeyBind = namedtuple('KeyBind', ('key', 'callback', 'type')) class InputComponent(Component): def __init__(self, keymap=None): super().__init__() self.keymap = [] def add_keybind(self, key, callback, type='default'): self.keymap.append(KeyBind(key, callback, type))
import threading from functools import namedtuple from concurrent import futures import time import csv import requests header = ["aid", "view", "danmaku", "reply", "favorite", "coin", "share"] Video = namedtuple('Video', header) headers = { 'X-Requested-With': 'XMLHttpRequest', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/56.0.2924.87 Safari/537.36' } total = 1 result = [] lock = threading.Lock() def run(url): """ 启动爬虫 """ global total req = requests.get(url, headers=headers, timeout=6).json() time.sleep(0.5) # 延迟,避免太快 ip 被封 try: data = req['data'] video = Video(
from functools import namedtuple from blinker import signal move_event = signal('move') action_event = signal('action') HealAction = namedtuple('HealAction', ('power', )) AttackAction = namedtuple('AttackAction', ('power', )) # MoveEvent = namedtuple('MoveEvent', ('entity', 'start_pos', 'end_pos')) # ActionEvent = namedtuple('AttackEvent', ('entity_actor', 'entity_target', 'action'))
""" # file : bilibili.py # author : shao # date: 2017/11/3 0003 """ import csv import threading import time from concurrent import futures from functools import namedtuple import MySQLdb import requests header = ["aid", "view", "danmaku", "reply", "favorite", "coin", "share"] Video = namedtuple('Video', header) headers = { 'X-Requested-With': 'XMLHttpRequest', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/56.0.2924.87 Safari/537.36'} total = 1 result = set() lock = threading.Lock() def run(url): """ 启动爬虫 """ global total req = requests.get(url, headers=headers, timeout=6).json() time.sleep(0.6) # 延迟,避免太快 ip 被封
from ctypes import byref, Structure, cast, POINTER, sizeof, c_void_p from functools import lru_cache, namedtuple from collections.abc import Sequence from sys import modules #Loaded extensions name are added in here LOADED_EXTENSIONS = [] BUFFER_FORMAT_TYPES_MAP = { 'f': (GLfloat, GL_FLOAT), 'd': (GLdouble, GL_DOUBLE), 'b': (GLbyte, GL_BYTE), 'B': (GLubyte, GL_UNSIGNED_BYTE), 'i': (GLint, GL_INT), 'I': (GLuint, GL_UNSIGNED_INT), 's': (GLshort, GL_SHORT), 'S': (GLushort, GL_UNSIGNED_SHORT)} pyvars = re.compile('[_a-zA-Z][_\w]+') map_info = namedtuple('MappingInformation', ['access', 'target', 'ptr', 'size']) def ptr_array(arr): " Cast an array in a pointer " return cast(arr, POINTER(arr._type_)) def eval_index(index, length): if index < 0 and index >= (-length) : return length+index elif index >= length: raise IndexError('Index "{}" out of bound, buffer has a length of "{}"'.format(index, length)) elif index < (-length): raise IndexError('Index "{}" out of bound, buffer has a length of "{}"'.format(index, length)) return index
def __init__(self, min, max, c_min=Color((1,0,0)), c_max=Color((0,1,0))): super().__init__() Thickness = namedtuple("Thickness", ["min", "max", "delta"]) Col = namedtuple("Color", ["min", "max"]) self.thickness = Thickness(min, max, max - min) self.color = Col(c_min, c_max)
from functools import namedtuple EConst = namedtuple('EConst', 'val') EVar = namedtuple('EVar', 'name') EAbs = namedtuple('EAbs', ['arg', 'body']) EApp = namedtuple('EApp', ['func', 'arg']) ELet = namedtuple('ELet', ['var', 'val', 'body']) TBase = namedtuple('TBase', 'name') TVar = namedtuple('TVar', 'name') TFunc = namedtuple('TFunc', ['arg', 'result']) TScheme = namedtuple('TScheme', ['bound_types', 'type']) TInt = TBase('TInt') TBool = TBase('TBool') def substitute(substitution, type_): if isinstance(type_, TBase): return type_ if isinstance(type_, TVar): if type_ in substitution: return substitution[type_] return type_ if isinstance(type_, TFunc): return TFunc(substitute(substitution, type_.arg), substitute(substitution, type_.result)) if isinstance(type_, TScheme): new_subs = {find:replace for find,replace in substition.items() if find not in type_.bound_types} return TScheme(type_.bound_types, substitute(new_subs, type_.type))
__author__ = 'dm' from functools import namedtuple Point = namedtuple('Point', 'x y') IJPoint = namedtuple('IJPoint', 'i j')
Defense+3 80 0 3 """ rings = [Item(ring.split()) for ring in rings.strip().splitlines()] def shopping(): from itertools import combinations as comb for w in list(comb(weapons, 1)): for a in list(comb(armors, 0)) + list(comb(armors, 1)): for r in list(comb(rings, 0)) + list(comb(rings, 1)) + list(comb(rings, 2)): yield list(w) + list(a) + list(r) from functools import namedtuple Person = namedtuple('Person', ['hp', 'attack', 'defense']) def defeat(p1: Person, p2: Person): while p1.hp > 0 and p2.hp > 0: hit = max(1, p1.attack - p2.defense) p2 = p2._replace(hp=p2.hp - hit) p1 = p1._replace(hp=p1.hp - max(1, p2.attack - p1.defense)) return p2.hp <= 0 # Boss # Hit Points: 103 # Damage: 9 # Armor: 2
Memory-aware LRU Cache function decorator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A modification of the builtin ``functools.lru_cache`` decorator that takes an additional keyword argument, ``use_memory_up_to``. The cache is considered full if there are fewer than ``use_memory_up_to`` bytes of memory available. If ``use_memory_up_to`` is set, then ``maxsize`` has no effect. Uses the ``psutil`` module to get the available memory. """ import psutil from functools import RLock, update_wrapper, namedtuple _CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) class _HashedSeq(list): """ This class guarantees that hash() will be called no more than once per element. This is important because the lru_cache() will hash the key multiple times on a cache miss. """ __slots__ = 'hashvalue' def __init__(self, tup, hash=hash): self[:] = tup self.hashvalue = hash(tup) def __hash__(self):
import subprocess import shutil import json import wave import os import io import logging from functools import namedtuple from synthesizer.sample import Sample __all__ = ["AudiofileToWavStream", "StreamMixer", "VolumeFilter", "EndlessFramesFilter", "SampleStream"] log = logging.getLogger("synthesizer.streaming") AudioFormatProbe = namedtuple("AudioFormatProbe", ["rate", "channels", "sampformat", "fileformat", "duration"]) class AudiofileToWavStream(io.RawIOBase): """ Streams WAV PCM audio data from the given sound source file. If the file is not already a .wav, and/or you want to resample it, ffmpeg/ffprobe are used to convert it in the background. For HQ resampling, ffmpeg has to be built with libsoxr support. Input: audio file of any supported format Output: stream of audio data in WAV PCM format """ ffmpeg_executable = "ffmpeg" ffprobe_executable = "ffprobe"
) from _freestyle import ( blendRamp, evaluateColorRamp, evaluateCurveMappingF, ) import time from mathutils import Vector from math import pi, sin, cos, acos, radians from itertools import cycle, tee from functools import namedtuple # named tuple primitives used for storing data. Thickness = namedtuple("Thickness", ["min", "max", "delta"]) Range = namedtuple("Range", ["min", "max", "delta"]) Value = namedtuple("Value", ["min", "max", "delta"]) class ColorRampModifier(StrokeShader): """Primitive for the color modifiers """ def __init__(self, blend, influence, ramp): StrokeShader.__init__(self) self.blend = blend self.influence = influence self.ramp = ramp def evaluate(self, t): col = evaluateColorRamp(self.ramp, t) return col.xyz # omit alpha
@lru_cache(maxsize=32) def phase_to_direction(length): """ Returns a list of tuples each containing: - the phase - a Vector with the values of the cosine and sine of 2pi * phase (the direction) """ results = list() for i in range(length): phase = i / (length - 1) results.append((phase, Vector((cos(2 * pi * phase), sin(2 * pi * phase))))) return results # A named tuple primitive used for storing data that has an upper and # lower bound (e.g., thickness, range and certain values) BoundedProperty = namedtuple("BoundedProperty", ["min", "max", "delta"]) class BoundingBox: """Object representing a bounding box consisting out of 2 2D vectors""" __slots__ = ( "minimum", "maximum", "size", "corners", ) def __init__(self, minimum: Vector, maximum: Vector): self.minimum = minimum self.maximum = maximum