def compute_up(expr, args, **kwargs): return odo( compute_up( expr, VarArgs(map(_pd_from_dshape, args, map(discover, expr.args))), **kwargs), list, )
def compute_up(expr, args, **kwargs): return odo( compute_up( expr, VarArgs(map(_pd_from_dshape, args, map(discover, expr.args))), **kwargs ), list, )
def card_entry(info): create_card_node = lambda x: graph.create( Node('Card', slot=x[0], name=x[1]))[0] mark, result, olt = info with open(log_file, 'a') as logging: logging.write("{0}:{1}\n".format(olt, mark)) if result and mark == 'success': ip = olt.split(',')[0] node = graph.find_one('Olt', property_key='ip', property_value=ip) card_nodes = map(create_card_node, result) list(map(lambda x: graph.create((node, 'HAS', x)), card_nodes))
def card_entry(info): create_card_node = lambda x: graph.create( Node('Card', slot=x[0], name=x[1]))[0] mark, cards, switch = info with open(log_file, 'a') as flog: flog.write("{0}:{1}\n".format(switch, mark)) if cards and mark == 'success': ip = switch.split(',')[0] switch_node = graph.find_one( 'Switch', property_key='ip', property_value=ip) card_nodes = map(create_card_node, cards) list(map(lambda x: graph.create((switch_node, 'HAS', x)), card_nodes))
def card_entry(info): create_card_node = lambda x: graph.create( Node('Card', slot=x[0], name=x[1]))[0] mark, result, olt = info with open(log_file, 'a') as logging: logging.write("{0}:{1}\n".format(olt, mark)) if result and mark == 'success': ip = olt.split(',')[0] node = graph.find_one( 'Olt', property_key='ip', property_value=ip) card_nodes = map(create_card_node, result) list(map(lambda x: graph.create((node, 'HAS', x)), card_nodes))
def deepmap(func, data, n=1): """ >>> inc = lambda x: x + 1 >>> list(deepmap(inc, [1, 2], n=1)) [2, 3] >>> list(deepmap(inc, [(1, 2), (3, 4)], n=2)) [(2, 3), (4, 5)] """ if n == 1: return map(func, data) else: return map(compose(tuple, partial(deepmap, func, n=n-1)), data)
def card_entry(info): create_card_node = lambda x: graph.create( Node('Card', slot=x[0], name=x[1]))[0] mark, cards, switch = info with open(log_file, 'a') as flog: flog.write("{0}:{1}\n".format(switch, mark)) if cards and mark == 'success': ip = switch.split(',')[0] switch_node = graph.find_one('Switch', property_key='ip', property_value=ip) card_nodes = map(create_card_node, cards) list(map(lambda x: graph.create((switch_node, 'HAS', x)), card_nodes))
def get_input_example(okform_dir, malformed_dir, id_): cap_title_path = str(Path(malformed_dir) / Path(id_)) + ".auxil" doc_path = str(Path(okform_dir) / Path(id_)) _, docs = separate_title_from_body(doc_path + ".auxil", doc_path + ".paf") with codecs.open(cap_title_path, "r", "utf8") as f: for l in f: pass titles = list(map(convert_sentence_auxil_to_request, json.loads(l)["sents"])) doc_sents = list(map(convert_sentence_auxil_to_request, docs)) return {"capitalizedSentences": titles, "otherSentences": doc_sents}
def compute_up(t, lhs, rhs, **kwargs): """ Join Operation for Python Streaming Backend Note that a pure streaming Join is challenging/impossible because any row in one seq might connect to any row in the other, requiring simultaneous complete access. As a result this approach compromises and fully realizes the LEFT sequence while allowing the RIGHT sequence to stream. As a result Always put your bigger collection on the RIGHT side of the Join. """ if lhs == rhs: lhs, rhs = itertools.tee(lhs, 2) on_left = [t.lhs.fields.index(col) for col in listpack(t.on_left)] on_right = [t.rhs.fields.index(col) for col in listpack(t.on_right)] left_default = (None if t.how in ('right', 'outer') else toolz.itertoolz.no_default) right_default = (None if t.how in ('left', 'outer') else toolz.itertoolz.no_default) pairs = toolz.join(on_left, lhs, on_right, rhs, left_default=left_default, right_default=right_default) assemble = pair_assemble(t, on_left, on_right) return map(assemble, pairs)
def sql_to_iterator(t, **kwargs): engine = t.bind with engine.connect() as conn: result = conn.execute(sa.sql.select([t])) result = map(tuple, result) # Turn RowProxy into tuple for item in result: yield item
def translate(self, trans_text, src="auto", dest="en"): # Detail on supported types can be found here location = "global" parent = f"projects/{self.project}/locations/{location}" # https://cloud.google.com/translate/docs/supported-formats send_list = trans_text if isinstance(trans_text, str): send_list = [translate] request_preps = self.make_batches(send_list, src, dest, parent) responses = toolz.map( lambda req: self.tclient.translate_text(request=req), request_preps) result = [] for response in responses: # Display the translation for each input text provided for translation in response.translations: # print("Translated text: {}".format(translation.translated_text)) result.append(TranslatedText(translation, src, dest)) return result
def interface_check(): clear_log() cmd = "match(s:Switch) where s.model='T64G' or s.model='S9306' or s.model='S9303' or s.model='S8905' return s.ip,s.model" # cmd = "match(s:Switch) where s.model='S9306' or s.model='s9303' return s.ip,s.model limit 2" nodes = graph.cypher.execute(cmd) switchs = [(x[0], x[1]) for x in nodes] list(map(compose(output_interface, get_interface), switchs))
def compute_up(t, lhs, rhs, **kwargs): """ Join Operation for Python Streaming Backend Note that a pure streaming Join is challenging/impossible because any row in one seq might connect to any row in the other, requiring simultaneous complete access. As a result this approach compromises and fully realizes the LEFT sequence while allowing the RIGHT sequence to stream. As a result Always put your bigger collection on the RIGHT side of the Join. """ if lhs == rhs: lhs, rhs = itertools.tee(lhs, 2) on_left = [t.lhs.fields.index(col) for col in listpack(t.on_left)] on_right = [t.rhs.fields.index(col) for col in listpack(t.on_right)] left_default = (None if t.how in ('right', 'outer') else toolz.itertoolz.no_default) right_default = (None if t.how in ('left', 'outer') else toolz.itertoolz.no_default) pairs = toolz.join(on_left, lhs, on_right, rhs, left_default=left_default, right_default=right_default) assemble = pair_assemble(t) return map(assemble, pairs)
def get_input_example(okform_dir, malformed_dir, id_): cap_title_path = str(Path(malformed_dir) / Path(id_)) + '.auxil' doc_path = str(Path(okform_dir) / Path(id_)) _, docs = separate_title_from_body(doc_path + '.auxil', doc_path + '.paf') with codecs.open(cap_title_path, 'r', 'utf8') as f: for l in f: pass titles = list( map(convert_sentence_auxil_to_request, json.loads(l)['sents'])) doc_sents = list(map(convert_sentence_auxil_to_request, docs)) return {'capitalizedSentences': titles, 'otherSentences': doc_sents}
def extract_and_capitalize_headlines_from_corpus(corpus_dir, docids): """ Iterate through all the files in `corpus_dir`, extract the headlines, capitalized and return them Parameter: --------------- corpus_dir: string docids: list of string the document to be processed Return: -------------- generator of (docid, headlines): (str, list<list<str>>) """ get_tokens = partial(map, partial(get_in, ["token"])) get_features = partial(get_in, ["features"]) make_capitalized_title_new = lambda words: make_capitalized_title(title_words=words) for docid in docids: p = Path(corpus_dir) / Path(docid) auxil_p = p.with_suffix(".auxil") paf_p = p.with_suffix(".paf") if auxil_p.exists() and paf_p.exists(): try: titles, _ = separate_title_from_body(str(auxil_p), str(paf_p)) except Exception as e: yield (e, None) # pipeline: # -> get features # -> get tokens # -> capitalize headline yield (None, (p.name, list(map(compose(make_capitalized_title_new, get_tokens, get_features), titles))))
def transition_from_buy(game_state, game_id): if _is_game_over(game_state): stock.handle_game_end(game_state) game_state.current_action_type = models.ActionType.GAME_OVER game_state.current_action_player = None game_state.current_turn_player = None game_state.current_action_details = list( toolz.map( toolz.first, sorted(game_state.money_by_player.items(), key=toolz.second, reverse=True))) return game_state global_tiles = persistance.get_global_tiles(game_id) new_tile = tiles.draw_tile(global_tiles) game_state.tiles_remaining = len(global_tiles) if new_tile: persistance.deal_tile_to_player(game_id, game_state.current_turn_player, new_tile) next_player = _get_next_player(game_state) game_state.current_turn_player = next_player game_state.current_action_player = next_player game_state.current_action_type = models.ActionType.PLACE_TILE return game_state
def clstrs_2_setfiles(clstr2docmap): """ Creates new SetFiles for each cluster. Accepts a dict with cluster number as the key and lists of docids as the values. """ return tlz.map(lambda t: apply(clstr_2_setfile, t), clstr2docmap.iteritems())
def _insert_set_members_sql(artifact_set): pairs = [(artifact_set.id, id) for id in artifact_set.artifact_ids] return """ INSERT INTO artifact_set_members (set_id, artifact_id) VALUES {} ON CONFLICT DO NOTHING """.strip().format(",\n".join(t.map(str, pairs)))
def stream_decompress(fmt, data): """ Decompress a block of compressed bytes into a stream of strings """ if fmt == 'gz': return gzip.GzipFile(fileobj=BytesIO(data)) if fmt == 'bz2': return bz2_stream(data) else: return map(bytes.decode, BytesIO(data))
def set_attributes(self): list(map( lambda property_name: setattr( type(self.mocked_object_label_a), property_name, mock.PropertyMock(name=property_name, return_value=1) ), self.attributes ))
def __new__(cls, artifact_ids, name=None, created_at=None, id=None): artifact_ids = t.map(_artifact_id, artifact_ids) ids = frozenset(artifact_ids) if id: set_id = id else: set_id = hash(ids) created_at = created_at if created_at else datetime.utcnow() return super(ArtifactSet, cls).__new__(cls, set_id, ids, created_at, name)
def format_sudoku(sudoku): def format_row(row): return '| ' + ' | '.join(map(lambda s: str(s) if s else ' ', row)) + ' |' row_delim = '+---+---+---+---+---+---+---+---+---+' formatted_rows = map(format_row, sudoku) print_lines = (row_delim, *interpose(row_delim, formatted_rows), row_delim) return '\n'.join(print_lines)
def resource_json(uri, open=open): f = open(uri) try: data = json.load(f) f.close() return data except ValueError: f = open(uri) data = map(json.loads, f) return data
def resource_json(uri, open=open, **kwargs): f = open(uri) try: data = json.load(f) f.close() return data except ValueError: f = open(uri) data = map(json.loads, f) return data
def compute_up(t, seq, **kwargs): try: row = first(seq) except StopIteration: return () seq = concat([[row], seq]) # re-add row to seq if isinstance(row, list): seq = map(tuple, seq) return unique(seq)
def svlan_entry(lock, info): cmd = "match (n:Olt{ip:{ip}}) create unique (n)-[:USE{port:{port}}]-(:Svlan{value:{value}})" mark, result, olt = info ip = olt.split(',')[0] with lock: with open(log_file, 'a') as logging: logging.write("{0}:{1}\n".format(olt, mark)) if result and mark == 'success': with lock: list(map(lambda x: graph.cypher.execute( cmd, {"ip": ip, "port": x[0], "value": x[1]}), result))
def test_averages(self): self.set_attributes() list( map( lambda property_name: self.assertEqual( getattr(self.confusion_matrix, "average_" + property_name), 1 ), self.attributes ) )
def etl_directory(path, session): for filename in filter(lambda s: s.endswith('.stp'), listdir(path)): logging.info("Reading APC-AVL from %s.", filename) with open(os.path.join(path, filename)) as fp: archive = StpFile(fp) logging.info("Saving created objects...") for rows in partition_all(100000, map(to_model, archive.readlines())): session.execute(ApcAvlRecord.__table__.insert().values(rows)) logging.info("Committing.") session.commit()
def stringify_list(rows, separator='\t'): """Transform input to tabular data ready for writing to output Args: rows (list): list of data values separator (str, optional): delimiter used to stringify rows Yields: str: stringified line of rows """ return (separator.join(toolz.map(text_type, row)) for row in rows)
def select_to_iterator(sel, dshape=None, **kwargs): engine = sel.bind # TODO: get engine from select with engine.connect() as conn: result = conn.execute(sel) if dshape and isscalar(dshape.measure): result = pluck(0, result) else: result = map(tuple, result) # Turn RowProxy into tuple for item in result: yield item
def map(func, iterable, chunksize=1, processes=2): if processes == 1: import toolz return list(toolz.map(func, iterable)) else: import pathos.multiprocessing as mp pool = mp.Pool(processes=processes) result = pool.map(func, iterable, chunksize=chunksize) pool.close() pool.join() return result
def deepmap(func, *data, **kwargs): """ >>> inc = lambda x: x + 1 >>> list(deepmap(inc, [1, 2], n=1)) [2, 3] >>> list(deepmap(inc, [(1, 2), (3, 4)], n=2)) [(2, 3), (4, 5)] Works on variadic args too >>> add = lambda x, y: x + y >>> list(deepmap(add, [1, 2], [10, 20], n=1)) [11, 22] """ n = kwargs.pop('n', 1) if n == 0: return func(*data) if n == 1: return map(func, *data) else: return map(compose(tuple, partial(deepmap, func, n=n - 1)), *data)
def capture_set(labels=None, initial_set=None): if initial_set: initial = set(t.map(_artifact_id, initial_set)) else: initial = set() repo = get_default_repo() spy = RepoSpy(repo) with using_repo(spy): result = [] yield result artifact_ids = spy.artifact_ids | initial result.append(ArtifactSet(artifact_ids, labels=labels).put(repo))
def Matrix(text, type='flex'): """Displays a table or flexbox style grid of values. For flexbox mode, make sure that there are an equal number of | in each row. Table mode is more flexible. Blank lines are ignored. """ return t.pipe(text.split('\n'), tc.filter(lambda x: x.strip() != ''), tc.map(lambda x: re.split(' ?' + Patterns.escape.value.format('\|') + ' ?', x)), tc.map(lambda x: ['div' if type == 'flex' else 'tr', {'style': {'display':'flex'} if type == 'flex' else {}}, *t.map(lambda y: ['span' if type == 'flex' else 'td', {'style': {'flex': 1}} if type == 'flex' else {}, y], x)]), tc.cons('div.matrix.matrix-flex' if type == 'flex' else 'table.matrix.matrix-table'), list)
def compute_up(t, seq, **kwargs): if t.on: raise NotImplementedError("python backend cannot specify what columns to distinct on") try: row = toolz.first(seq) except StopIteration: return () seq = concat([[row], seq]) # re-add row to seq if isinstance(row, list): seq = map(tuple, seq) return unique(seq)
def compute_up(t, seq, **kwargs): if t.on: raise NotImplementedError( 'python backend cannot specify what columns to distinct on') try: row = toolz.first(seq) except StopIteration: return () seq = concat([[row], seq]) # re-add row to seq if isinstance(row, list): seq = map(tuple, seq) return unique(seq)
def compute_down(expr, data, map=map, **kwargs): leaf = expr._leaves()[0] (chunk, chunk_expr), (agg, agg_expr) = split(leaf, expr) parts = list(map(curry(compute_chunk, chunk, chunk_expr), data)) if isinstance(parts[0], np.ndarray): intermediate = np.concatenate(parts) elif isinstance(parts[0], pd.DataFrame): intermediate = pd.concat(parts) elif isinstance(parts[0], (Iterable, Iterator)): intermediate = concat(parts) return compute(agg_expr, {agg: intermediate})
def hostname_check(): clear_log() nodes = graph.find('Olt') # nodes = graph.find('Olt', property_key='ip', property_value='172.18.0.46') olts = [(x['ip'], x['company']) for x in nodes] pool = Pool(16) lock = Manager().Lock() func = partial(hostname_entry, lock) list(pool.map(compose(func, get_hostname), olts)) pool.close() pool.join() ip_hostname = (x.split(',') for x in open(result_file)) cmd = "match (n:Olt) where n.ip={ip} set n.hostname={hostname}" list(map(lambda x: graph.cypher.execute( cmd, ip=x[0], hostname=x[1]), ip_hostname))
def traffic_check(): # clear_log() # cmd = 'match(n:Olt)-[*]-(p:Port) where n.company="hw" return n.ip,n.company,collect(p.name)' # nodes = graph.cypher.execute(cmd) # olts = [(x[0], x[1], x[2]) for x in nodes] # pool = Pool(16) # lock = Manager().Lock() # func = partial(traffic_output, lock) # list(pool.map(compose(func, get_traffic), olts)) # pool.close() # pool.join() records = (x.split(',') for x in open(result_file)) cmd = "match (n:Olt)-[*]-(p:Port) where n.ip={ip} and p.name={name} set p.in_traffic={i},p.out_traffic={o}" list(map(lambda x: graph.cypher.execute( cmd, ip=x[0], name=x[1], i=x[2], o=x[3]), records))
def run(n, x, *goals): """ Run a logic program. Obtain n solutions to satisfy goals. n - number of desired solutions. See ``take`` 0 for all None for a lazy sequence x - Output variable goals - a sequence of goals. All must be true >>> from logpy import run, var, eq >>> x = var() >>> run(1, x, eq(x, 1)) (1,) """ results = map(partial(reify, x), goaleval(lall(*goals))({})) return take(n, unique(results, key=multihash))
def svlan_entry(lock, info): cmd = "match (n:Olt{ip:{ip}}) create unique (n)-[:USE{port:{port}}]-(:Svlan{value:{value}})" mark, result, olt = info ip = olt.split(',')[0] with lock: with open(log_file, 'a') as logging: logging.write("{0}:{1}\n".format(olt, mark)) if result and mark == 'success': with lock: list( map( lambda x: graph.cypher.execute(cmd, { "ip": ip, "port": x[0], "value": x[1] }), result))
def merge_intervals(intervals): """Return the ends of a list of intervals. Args: intervals (list): list of intervals Returns: tuple of int: the beginning and end of the combined interval """ if len(intervals) == 0: raise ValueError("'intervals' must contain at least one interval") overall_start = intervals[0].start overall_end = max(map(end_getter, intervals)) return overall_start, overall_end
def hostname_check(): clear_log() nodes = graph.find('Olt') # nodes = graph.find('Olt', property_key='ip', property_value='172.18.0.46') olts = [(x['ip'], x['company']) for x in nodes] pool = Pool(16) lock = Manager().Lock() func = partial(hostname_entry, lock) list(pool.map(compose(func, get_hostname), olts)) pool.close() pool.join() ip_hostname = (x.split(',') for x in open(result_file)) cmd = "match (n:Olt) where n.ip={ip} set n.hostname={hostname}" list( map(lambda x: graph.cypher.execute(cmd, ip=x[0], hostname=x[1]), ip_hostname))
def run(n, x, *goals): """ Run a logic program. Obtain n solutions to satisfy goals. n - number of desired solutions. See ``take`` 0 for all None for a lazy sequence x - Output variable goals - a sequence of goals. All must be true >>> from kanren import run, var, eq >>> x = var() >>> run(1, x, eq(x, 1)) (1,) """ results = map(partial(reify, x), goaleval(lall(*goals))({})) return take(n, unique(results, key=multihash))
def list_files(paths: Union[Sequence[Union[str, Path]], str, Path]): if isinstance(paths, str) or isinstance(paths, Path): if os.path.isdir(paths): return list( tz.concat((os.path.join(dp, f) for f in fn) for dp, dn, fn in os.walk(paths))) elif os.path.isfile(paths): return [paths] else: raise ValueError( f"Input argument {paths} is not a path or a directory") elif isinstance(paths, Sequence): sortd = sorted(paths, key=os.path.isdir) files, dirs = tuple(tz.partitionby(os.path.isdir, sortd)) return list(tz.concatv(files, *tz.map(list_files, dirs)))
def zhongji_check(): clear_log() nodes = graph.find('Olt') # nodes = graph.find('Olt', property_key='ip', property_value='172.18.0.46') olts = [(x['ip'], x['company']) for x in nodes] pool = Pool(16) lock = Manager().Lock() func = partial(zhongji_entry, lock) list(pool.map(compose(func, get_zhongji), olts)) pool.close() pool.join() ports = (x.split(',') for x in open(result_file)) cmd = """match(n: Olt) where n.ip = {ip} merge(n) - [:HAS]->(m: Etrunk{name: {sm}}) merge(m) - [:Include]->(p: Port{name: {interface}})""" list(map(lambda x: graph.cypher.execute( cmd, ip=x[0], sm=x[1], interface=x[2]), ports))
def traffic_check(): # clear_log() # cmd = 'match(n:Olt)-[*]-(p:Port) where n.company="hw" return n.ip,n.company,collect(p.name)' # nodes = graph.cypher.execute(cmd) # olts = [(x[0], x[1], x[2]) for x in nodes] # pool = Pool(16) # lock = Manager().Lock() # func = partial(traffic_output, lock) # list(pool.map(compose(func, get_traffic), olts)) # pool.close() # pool.join() records = (x.split(',') for x in open(result_file)) cmd = "match (n:Olt)-[*]-(p:Port) where n.ip={ip} and p.name={name} set p.in_traffic={i},p.out_traffic={o}" list( map( lambda x: graph.cypher.execute( cmd, ip=x[0], name=x[1], i=x[2], o=x[3]), records))
def resource_json_gzip(uri, encoding='utf-8', **kwargs): f = gzip.open(uri) lines = (line.decode(encoding) for line in gzip.open(uri)) try: one = json.loads(next(lines)) two = json.loads(next(lines)) except StopIteration: # single json element f.close() return one except ValueError: # Single multi-line element f.close() f = gzip.open(uri) o = json.loads(f.read().decode(encoding)) f.close() return o # JSON Streaming case return concat([[one, two], map(json.loads, lines)]) f.close()
def hw_gpon(): clear_log() cmd = 'match(n:Olt{company:\'hw\'})--(c:Card) where c.name contains \'GPBD\' return distinct n.ip' nodes = graph.cypher.execute(cmd) olts = [x[0] for x in nodes] # nodes = graph.find('Olt') # nodes = graph.find('Olt', property_key='ip', property_value='172.18.0.46') command = 'xpon ont-interoperability-mode gpon tcont-pq-priority-reverse enable' doSomething = partial(Huawei.doSomething, username=hw_olt_username, password=hw_olt_password, command=command) def output(info): mark, ip = info with open(log_file, 'a') as logging: logging.write("{ip}:{mark}\n".format(ip=ip, mark=mark)) list(map(compose(output, doSomething), olts))
def coerce(dshape, item): blocks = partition_all(1024, item) return chain.from_iterable(map(partial(coerce, dshape), blocks))
def compute_one(t, seq, **kwargs): return map(rowfunc(t), seq)