def ccds_to_bed(ccds_stream): """Convert CCDS dump to Chanjo-style BED stream. Main entry point for default Chanjo converter (ccds). It converts a sorted (start, chrom) CCDS database to the Chanjo BED-format. Args: ccds_stream (file): file handle to read CCDS lines from Yields: Interval: interval with merged block and superblock ids """ return pipe( ccds_stream, filter(grep('Public')), # filter out Public tx map(text_type.rstrip), # strip \n and spaces map(split(sep='\t')), # split into list map(extract_intervals), # convert to Interval concat, # flatten map(rename_sex_interval), # rename sex contigs partial(lazy_groupby, key=attrgetter('contig')), # group by contig pluck(1), # extract second item map(groupby(attrgetter('name'))), # non-lazy group by id map(valmap(merge_related_elements)), # group intervals map(itervalues), # extract values map(partial(sorted, key=attrgetter('start'))), # sort by start pos concat # flatten )
def _log_remove_from_clb(steps): lbs = groupby(lambda s: s.lb_id, steps) effs = [ cf_msg('convergence-remove-clb-nodes', lb_id=lb, nodes=sorted(concat(s.node_ids for s in lbsteps))) for lb, lbsteps in sorted(lbs.iteritems())] return parallel(effs)
def optimize_steps(clb_steps): steps_by_lb = groupby(lambda s: s.lb_id, clb_steps) return [ step_class(**{ 'lb_id': lb_id, attr_name: pset(concat(getattr(s, attr_name) for s in steps))}) for lb_id, steps in steps_by_lb.iteritems() ]
def _log_bulk_rcv3(event, steps): by_lbs = groupby(lambda s: s[0], concat(s.lb_node_pairs for s in steps)) effs = [ cf_msg(event, lb_id=lb_id, servers=sorted(p[1] for p in pairs)) for lb_id, pairs in sorted(by_lbs.iteritems()) ] return parallel(effs)
def _(steps): by_cfg = groupby(lambda s: s.server_config, steps) effs = [ cf_msg( 'convergence-create-servers', num_servers=len(cfg_steps), server_config=dict(cfg)) # We sort the items with `thaw` because PMap does not support # comparison for cfg, cfg_steps in sorted(by_cfg.iteritems(), key=thaw)] return parallel(effs)
def _log_set_metadata(steps): by_kv = groupby(lambda s: (s.key, s.value), steps) effs = [ cf_msg( 'convergence-set-server-metadata', servers=sorted(s.server_id for s in kvsteps), key=key, value=value ) for (key, value), kvsteps in sorted(by_kv.iteritems()) ] return parallel(effs)
def _log_change_clb_node(steps): lbs = groupby(lambda s: (s.lb_id, s.condition, s.weight, s.type), steps) effs = [ cf_msg('convergence-change-clb-nodes', lb_id=lb, nodes=sorted([s.node_id for s in grouped_steps]), condition=condition.name, weight=weight, type=node_type.name) for (lb, condition, weight, node_type), grouped_steps in sorted(lbs.iteritems()) ] return parallel(effs)
def limit_steps_by_count(steps, step_limits): """ Limits step count by type. :param steps: An iterable of steps. :param step_limits: A dict mapping step classes to their maximum allowable count. Classes not present in this dict have no limit. :return: The input steps :rtype: pset """ return pbag(concat(typed_steps[:step_limits.get(cls)] for (cls, typed_steps) in groupby(type, steps).iteritems()))
def subsample(graphs, targets, subsample_size=100): """subsample.""" tg = zip(targets, graphs) num_classes = len(set(targets)) class_graphs = groupby(lambda x: first(x), tg) subgraphs = [] subtargets = [] for y in class_graphs: class_subgraphs = class_graphs[y][:subsample_size / num_classes] class_subgraphs = [second(x) for x in class_subgraphs] subgraphs += class_subgraphs subtargets += [y] * len(class_subgraphs) subgraphs, subtargets = paired_shuffle(subgraphs, subtargets) return list(subgraphs), list(subtargets)
def broadcast_dimensions(argpairs, numblocks, sentinels=(1, (1,))): """ Find block dimensions from arguments Parameters ---------- argpairs: iterable name, ijk index pairs numblocks: dict maps {name: number of blocks} sentinels: iterable (optional) values for singleton dimensions Examples -------- >>> argpairs = [('x', 'ij'), ('y', 'ji')] >>> numblocks = {'x': (2, 3), 'y': (3, 2)} >>> broadcast_dimensions(argpairs, numblocks) {'i': 2, 'j': 3} Supports numpy broadcasting rules >>> argpairs = [('x', 'ij'), ('y', 'ij')] >>> numblocks = {'x': (2, 1), 'y': (1, 3)} >>> broadcast_dimensions(argpairs, numblocks) {'i': 2, 'j': 3} Works in other contexts too >>> argpairs = [('x', 'ij'), ('y', 'ij')] >>> d = {'x': ('Hello', 1), 'y': (1, (2, 3))} >>> broadcast_dimensions(argpairs, d) {'i': 'Hello', 'j': (2, 3)} """ # List like [('i', 2), ('j', 1), ('i', 1), ('j', 2)] L = concat([zip(inds, dims) for (x, inds), (x, dims) in join(first, argpairs, first, numblocks.items())]) g = groupby(0, L) g = dict((k, set([d for i, d in v])) for k, v in g.items()) g2 = dict((k, v - set(sentinels) if len(v) > 1 else v) for k, v in g.items()) if g2 and not set(map(len, g2.values())) == set([1]): raise ValueError("Shapes do not align %s" % g) return valmap(first, g2)
def log_steps(steps): """ Log some steps (to cloud feeds). In general this tries to reduce the number of Log calls to a reasonable minimum, based on how steps are usually used. For example, multiple :obj:`SetMetadataItemOnServer` that are setting the same key/value on a server will be merged into one Log call that shows all the servers being affected. """ steps_by_type = groupby(type, steps) effs = [] for step_type, typed_steps in steps_by_type.iteritems(): if step_type in _loggers: effs.append(_loggers[step_type](typed_steps)) return parallel(effs)
def balance(graphs, targets, estimator, ratio=2): """balance.""" class_counts = Counter(targets) majority_class = None max_count = 0 minority_class = None min_count = 1e6 for class_key in class_counts: if max_count < class_counts[class_key]: majority_class = class_key max_count = class_counts[class_key] if min_count > class_counts[class_key]: minority_class = class_key min_count = class_counts[class_key] desired_size = int(min_count * ratio) tg = zip(targets, graphs) class_graphs = groupby(lambda x: first(x), tg) maj_graphs = [second(x) for x in class_graphs[majority_class]] min_graphs = [second(x) for x in class_graphs[minority_class]] if estimator: # select only the instances in the majority class that # have a small margin preds = estimator.decision_function(maj_graphs) else: # select at random preds = [random.random() for i in range(len(maj_graphs))] preds = [abs(pred) for pred in preds] pred_graphs = sorted(zip(preds, maj_graphs))[:desired_size] maj_graphs = [g for p, g in pred_graphs] bal_graphs = min_graphs + maj_graphs bal_pos = [minority_class] * len(min_graphs) bal_neg = [majority_class] * len(maj_graphs) bal_targets = bal_pos + bal_neg return paired_shuffle(bal_graphs, bal_targets)
def get_scaling_group_servers(tenant_id, authenticator, service_name, region, server_predicate=None, clock=None): """ Return tenant's servers that belong to a scaling group as {group_id: [server1, server2]} ``dict``. No specific ordering is guaranteed :param server_predicate: `callable` taking single server as arg and returns True if the server should be included, False otherwise """ def has_group_id(s): return 'metadata' in s and 'rax:auto_scaling_group_id' in s['metadata'] def group_id(s): return s['metadata']['rax:auto_scaling_group_id'] server_predicate = server_predicate if server_predicate is not None else lambda s: s servers_apply = compose(groupby(group_id), filter(server_predicate), filter(has_group_id)) d = get_all_server_details(tenant_id, authenticator, service_name, region, clock=clock) d.addCallback(servers_apply) return d
def optimize_steps(steps): """ Optimize steps. Currently only optimizes per step type. See the :func:`_optimizer` decorator for more information on how to register an optimizer. :param pbag steps: Collection of steps. :return: a pbag of steps. """ def grouping_fn(step): step_type = type(step) if step_type in _optimizers: return step_type else: return "unoptimizable" steps_by_type = groupby(grouping_fn, steps) unoptimizable = steps_by_type.pop("unoptimizable", []) omg_optimized = concat(_optimizers[step_type](steps) for step_type, steps in steps_by_type.iteritems()) return pbag(concatv(omg_optimized, unoptimizable))
def get_all_scaling_group_servers(changes_since=None, server_predicate=identity): """ Return tenant's servers that belong to any scaling group as {group_id: [server1, server2]} ``dict``. No specific ordering is guaranteed :param datetime changes_since: Get server since this time. Must be UTC :param server_predicate: function of server -> bool that determines whether the server should be included in the result. :return: dict mapping group IDs to lists of Nova servers. """ def has_group_id(s): return 'metadata' in s and isinstance(s['metadata'], dict) def group_id(s): return group_id_from_metadata(s['metadata']) servers_apply = compose(keyfilter(lambda k: k is not None), groupby(group_id), filter(server_predicate), filter(has_group_id)) return get_all_server_details(changes_since).on(servers_apply)
sys.exit(1) fname, output_dir = sys.argv[0], "zrm_phone_xhe_shape" if not Path(output_dir).exists(): os.makedirs(output_dir) char_to_shape = pipe(CharShapeTable.select(), map(lambda e: (e.char, e.shapes)), reduceby(lambda e: e[0], lambda e1, e2: e1), valmap(lambda e: e[1]), dict) print(f"total {len(char_to_shape)} char shapes") char_to_phones = pipe(CharPhoneTable.select(), map(lambda e: (e.char, e.zrm)), groupby(lambda e: e[0]), valmap(lambda phones: [e[1] for e in phones]), dict) print(f"total {len(char_to_phones)} char phones") one_hit_char_items = generate_one_hit_char(60000) top_single_chars_items = generate_topest_char(char_to_phones, 60000) sys_top_chars_data = f"{output_dir}/sys_top_chars_data.txt" with open(sys_top_chars_data, 'w', encoding='utf8') as fout: fout.write("---config@码表分类=主码-1\n") fout.write("---config@允许编辑=否\n") fout.write(f"---config@码表别名=简码单字\n") for item in one_hit_char_items.items(): fout.write(f"{item[0]}#序{item[1]}\n") for item in top_single_chars_items.items(): fout.write(f"{item[0]}#序{item[1]}\n")
sys.exit(1) fname, output_dir = sys.argv[0], "baidu_mobile_ini" if not Path(output_dir).exists(): os.makedirs(output_dir) char_to_shape = pipe(CharShapeTable.select(), map(lambda e: (e.char, e.shapes)), reduceby(lambda e: e[0], lambda e1, e2: e1), valmap(lambda e: e[1]), dict) print(f"total {len(char_to_shape)} char shapes") char_to_phones = pipe(CharPhoneTable.select(), map(lambda e: (e.char, e.xhe)), groupby(lambda e: e[0]), valmap(lambda phones: [e[1] for e in phones]), dict) print(f"total {len(char_to_phones)} char phones") all_items = [] #单字部分 all_items.extend( [tuple(e.split("\t")) for e in generate_one_hit_char(60000).keys()]) all_items.extend([ tuple(e.split("\t")) for e in generate_topest_char(char_to_phones, 60000) ]) #系统单字部分 all_items.extend( pipe(CharPhoneTable.select(),
int]) -> Iterable[Tuple[Text, int]]: current, distance = current_and_distance if distance < radius: yield from map(lambda neighbor: (neighbor, distance + 1), get_neighbors(current)) return map( toolz.first, graph_traverse(source=(source, 0), get_neighbors=get_neighbors_limiting_radius), ) edges_to_graph = toolz.compose( curried.valmap(toolz.compose(frozenset, curried.map(toolz.second))), curried.groupby(toolz.first), ) graph_to_edges = toolz.compose_left( curried.keymap(lambda x: (x, )), dict.items, curried.mapcat(functional.star(itertools.product)), ) reverse_graph = toolz.compose_left( graph_to_edges, curried.map(toolz.compose_left(reversed, tuple)), edges_to_graph) cliques_to_graph = toolz.compose_left( curried.mapcat(lambda clique: itertools.permutations(clique, r=2)), edges_to_graph)
), curried.map(lambda t: (t[0][0], t[1][1])), sorted, ), [ ("Alice", "NYC"), ("Alice", "Chicago"), ("Dan", "Syndey"), ("Edith", "Paris"), ("Edith", "Berlin"), ("Zhao", "Shanghai"), ], ), "count_by": (curried.countby(lambda x: x % 2 == 0), range(20)), "groupby": ( chained(curried.groupby(lambda x: x % 2 == 0), curried.valmap(sorted)), range(20), ), "keymap": ( chained(dict, curried.keymap(lambda x: 2 * x)), dict.items({ 1: 2, 3: 4, 5: 6, 7: 8, 9: 10 }), ), "valmap": ( chained(dict, curried.valmap(lambda x: 2 * x)), dict.items({
def list_orders(token, page="1", page_length="10", status=None): decoded_token = get_decoded_token(token) customer_id = frappe.db.exists( "Customer", {"le_firebase_uid": decoded_token["uid"]} ) if not customer_id: frappe.throw(frappe._("Customer does not exist on backend")) get_conditions = compose(lambda x: " AND ".join(x), filter(None)) conditions = get_conditions( [ "docstatus = 1", "customer = %(customer)s", "status IN %(statuses)s" if status else None, ] ) statuses = json.loads(status) if status else None get_count = compose( lambda x: x[0][0], lambda x: frappe.db.sql( """ SELECT COUNT(name) FROM `tabSales Order` WHERE {conditions} """.format( conditions=conditions ), values={"customer": x, "statuses": statuses}, ), ) orders = frappe.db.sql( """ SELECT name, transaction_date, status, total, total_taxes_and_charges, grand_total, rounding_adjustment, rounded_total FROM `tabSales Order` WHERE {conditions} ORDER BY transaction_date DESC, creation DESC LIMIT %(start)s, %(page_length)s """.format( conditions=conditions ), values={ "customer": customer_id, "statuses": statuses, "start": (frappe.utils.cint(page) - 1) * frappe.utils.cint(page_length), "page_length": frappe.utils.cint(page_length), }, as_dict=1, ) items = ( groupby( "parent", frappe.db.sql( """ SELECT parent, name, item_code, item_name, item_group, qty, rate, amount FROM `tabSales Order Item` WHERE parent IN %(parents)s """, values={"parents": [x.get("name") for x in orders]}, as_dict=1, ), ) if orders else {} ) return { "count": get_count(customer_id), "items": [merge(x, {"items": items.get(x.get("name"), [])}) for x in orders], }
def _get_data(filters): company = frappe.defaults.get_user_default("company") customer = frappe.get_cached_value("Booking Party", filters.booking_party, "customer") account = get_party_account( "Customer", customer, company, ) _, rows = get_report( frappe._dict({ "from_date": filters.get("from_date"), "to_date": filters.get("to_date"), "company": company, "account": account, "party_type": "Customer", "party": [customer], "group_by": frappe._("Group by Voucher (Consolidated)"), })) gl_entries = rows[1:-2] invoices = [ x.get("voucher_no") for x in gl_entries if x.get("voucher_type") == "Sales Invoice" ] get_booking_orders = compose(valmap(first), groupby("sales_invoice"), frappe.db.sql) booking_orders = (get_booking_orders( """ SELECT si.name AS sales_invoice, bo.name, bo.paper_receipt_no, bo.booking_datetime AS order_datetime FROM `tabSales Invoice` AS si LEFT JOIN `tabBooking Order` AS bo ON bo.name = si.gg_booking_order WHERE si.name IN %(invoices)s """, values={"invoices": invoices}, as_dict=1, ) if invoices else {}) orders = [v.get("name") for _, v in booking_orders.items()] get_sales_invoice_items = compose(groupby("sales_invoice"), frappe.db.sql) sales_invoice_items = (get_sales_invoice_items( """ SELECT sii.parent AS sales_invoice, sii.description, sii.qty, sii.rate, bofd.based_on, IFNULL(sii.gg_bo_detail, '') != '' AS is_freight_item FROM `tabSales Invoice Item` AS sii LEFT JOIN `tabBooking Order Freight Detail` AS bofd ON bofd.name = sii.gg_bo_detail WHERE sii.parent IN %(invoices)s """, values={"invoices": invoices}, as_dict=1, ) if invoices else {}) get_delivery_dates = compose(groupby("booking_order"), frappe.db.sql) delivery_dates = (get_delivery_dates( """ SELECT booking_order, posting_datetime FROM `tabBooking Log` WHERE activity = 'Collected' AND booking_order IN %(orders)s """, values={"orders": orders}, as_dict=1, ) if orders else {}) def make_message(item): rate = frappe.utils.fmt_money( item.get("rate"), currency=frappe.defaults.get_global_default("currency")) if item.get("is_freight_item"): if item.get("based_on") == "Weight": return "{} by weight @ {} - {}".format(item.get("qty"), rate, item.get("description")) if item.get("based_on") == "Packages": return "{} packages @ {} - {}".format(item.get("qty"), rate, item.get("description")) return "{} @ {}".format(item.get("description"), rate) def make_description(si): return "<br />".join([ make_message(x) for x in sales_invoice_items.get(si, []) if x.get("qty") and x.get("rate") ]) def make_delivery_date(bo): return ", ".join( set([ frappe.format_value(x.get("posting_datetime"), {"fieldtype": "Date"}) for x in delivery_dates.get(bo, []) ])) def make_row(row): booking_order = booking_orders.get(row.get("voucher_no"), {}) bo_name = booking_order.get("name") order_date = booking_order.get("order_datetime") return merge( row, { "booking_order": bo_name, "paper_receipt_no": booking_order.get("paper_receipt_no"), "description": make_description(row.get("voucher_no")) if row.get("voucher_type") == "Sales Invoice" else row.get("remarks"), "order_date": frappe.format_value(order_date, {"fieldtype": "Date"}) if order_date else "", "delivery_dates": make_delivery_date(bo_name), }, ) def make_ag_row(row, label): return merge(row, {"voucher_type": label}) return ([make_ag_row(rows[0], "Opening")] + [make_row(x) for x in gl_entries] + [make_ag_row(rows[-2], "Total"), make_ag_row(rows[-1], "Closing")])
def _get_valuation_rate(items): return compose( valmap(lambda x: x['valuation_rate']), valmap(first), groupby('name'), lambda: frappe.get_all('Item', filters=[['name', 'in', items]], fields=['name', 'valuation_rate']))()
sys.exit(1) _, sents_path = sys.argv exist_words = pipe(WordPhoneTable.select(), map(lambda e: e.word), set ) seg = Segger(exist_words, 5) with open(sents_path, 'r', encoding='utf8') as fin: word_freq = pipe(fin, map(lambda e: e.strip().replace(" ", "").replace("\t", "")), filter(lambda e: e != "" and not e.startswith("#")), map(lambda e: seg.cut(e)), concat, groupby(lambda e: e), valmap(lambda e: len(e)), dict ) index = 0 for item in WordPhoneTable.select(): index += 1 if index == 1000: print(item) index = 0 word = item.word if word in word_freq: freq = word_freq[word] else: freq = 1
def group_by_op(tree): return t.pipe(tree, flatten, tc.filter(lambda e: isinstance(e, str)), tc.groupby(lambda e: e.split('^')[0]), dict)
def average_precision(detections: List[BBox], ground_truths: List[BBox], iou_threshold=.5, use_07_metric=True, ignore_difficult=True): c2dts = groupby(lambda b: b.category_id, detections) c2gts = groupby(lambda b: b.category_id, ground_truths) dts = {} gts = {} for c, c_dts in c2dts.items(): dts[c] = groupby(lambda b: b.image_id, c_dts) for i in dts[c]: dts[c][i].sort(key=lambda d: d.score, reverse=True) for c, c_gts in c2gts.items(): gts[c] = groupby(lambda b: b.image_id, c_gts) img_ids = set(d.image_id for d in ground_truths) classes = set(d.category_id for d in ground_truths) ious = defaultdict(lambda: defaultdict()) for c in classes: if c not in dts: continue for i in img_ids: if i not in dts[c] or i not in gts[c]: continue gt = gts[c][i] dt = dts[c][i] dt_bboxes = np.array([d.bbox for d in dt]) gt_bboxes = np.array([d.bbox for d in gt]) ious[c][i] = iou_mn(dt_bboxes, gt_bboxes) aps = {} for c in classes: if c not in dts: aps[c] = 0 continue c_gts = gts[c] if ignore_difficult: n_positive = len([d for ds in c_gts.values() for d in ds if not d.is_difficult]) else: n_positive = len([d for ds in c_gts.values() for d in ds]) # n_positive = len([d for ds in c_gts.values() for d in ds if not (ignore_difficult and d.is_difficult)]) c_dts = sorted([d for ds in dts[c].values() for d in ds], key=lambda b: b.score, reverse=True) TP = np.zeros(len(c_dts), dtype=np.uint8) FP = np.zeros(len(c_dts), dtype=np.uint8) seen = { i: np.zeros(len(ds), dtype=np.uint8) for i, ds in c_gts.items() } rank = { i: 0 for i in c_gts } for di, dt in enumerate(c_dts): img_id = dt.image_id if img_id not in c_gts: FP[di] = 1 continue iou = ious[c][img_id][rank[img_id]] rank[img_id] += 1 j_max, iou_max = max(enumerate(iou), key=lambda x: x[1]) if iou_max > iou_threshold: if not (ignore_difficult and c_gts[img_id][j_max].is_difficult): if not seen[img_id][j_max]: TP[di] = 1 seen[img_id][j_max] = 1 else: FP[di] = 1 else: FP[di] = 1 acc_fp = np.cumsum(FP) acc_tp = np.cumsum(TP) recall = acc_tp / n_positive precision = acc_tp / (acc_fp + acc_tp + 1e-10) ap = average_precision_pr(precision, recall, use_07_metric) aps[c] = round(ap, 6) return aps
if __name__ == "__main__": if len(sys.argv) != 2: print(f"Usage: python3 {sys.argv[0]} sents.txt", file=sys.stderr) sys.exit(1) _, sents_path = sys.argv exist_words = pipe(WordPhoneTable.select(), map(lambda e: e.word), set) seg = Segger(exist_words, 5) with open(sents_path, 'r', encoding='utf8') as fin: word_freq = pipe( fin, map(lambda e: e.strip().replace(" ", "").replace("\t", "")), filter(lambda e: e != "" and not e.startswith("#")), map(lambda e: seg.cut(e)), concat, groupby(lambda e: e), valmap(lambda e: len(e)), dict) index = 0 for item in WordPhoneTable.select(): index += 1 if index == 1000: print(item) index = 0 word = item.word if word in word_freq: freq = word_freq[word] else: freq = 1 if freq == item.priority: continue
def groupby(self, key): return self.apply(groupby(0)).apply(dict.items)
def get_items(page="1", field_filters=None, attribute_filters=None, search=None): other_fieldnames = ["item_group", "thumbnail", "has_variants"] price_list = frappe.db.get_single_value("Shopping Cart Settings", "price_list") products_per_page = frappe.db.get_single_value("Products Settings", "products_per_page") get_item_groups = compose( list, unique, map(lambda x: x.get("name")), concat, map(lambda x: get_child_nodes("Item Group", x) if x and frappe.db.exists("Item Group", x, cache=True) else []), ) get_other_fields = compose( valmap(excepts(StopIteration, first, lambda _: {})), groupby("name"), lambda item_codes: frappe.db.sql( """ SELECT name, {other_fieldnames} FROM `tabItem` WHERE name IN %(item_codes)s """.format(other_fieldnames=", ".join(other_fieldnames)), values={"item_codes": item_codes}, as_dict=1, ), lambda items: [x.get("name") for x in items], ) get_page_count = compose( lambda x: frappe.utils.ceil(x[0][0] / products_per_page), lambda x: frappe.db.sql( """ SELECT COUNT(name) FROM `tabItem` WHERE show_in_website = 1 AND item_group IN %(item_groups)s """, values={"item_groups": x}, ), ) field_dict = (frappe.parse_json(field_filters) if isinstance( field_filters, str) else field_filters) or {} item_groups = (get_item_groups(field_dict.get("item_group")) if field_dict.get("item_group") else None) frappe.form_dict.start = (frappe.utils.cint(page) - 1) * products_per_page items = get_products_for_website( field_filters=merge( field_dict, {"item_group": item_groups} if item_groups else {}), attribute_filters=frappe.parse_json(attribute_filters), search=search, ) other_fields = get_other_fields(items) if items else {} item_prices = _get_item_prices(price_list, items) if items else {} get_rates = _rate_getter(price_list, item_prices) return { "page_count": get_page_count(item_groups) if item_groups else 0, "items": [ merge( x, { "route": transform_route(x), "description": frappe.utils.strip_html_tags(x.get("description") or ""), }, get_rates(x.get("name")), { k: other_fields.get(x.get("name"), {}).get(k) for k in other_fieldnames }, ) for x in items ], }
def get_full_to_bingji_transformer() -> Dict[str, str]: return pipe( FullToTwoTable().select(), map(lambda e: (e.full, e.bingji)), groupby(lambda e: e[0]), itemmap(lambda kv: (kv[0], list(map(lambda e: e[1], kv[1]))[0])), dict)
template_images = get_values(variant_of) if variant_of else {} def get_image(field): return images.get(field) or template_images.get(field) return { "thumbnail": get_image("thumbnail"), "image": get_image("image"), "website_image": get_image("website_image"), "slideshow": get_slideshows(get_image("slideshow")), } _get_item_prices = compose( valmap(excepts(StopIteration, first, lambda _: {})), groupby("item_code"), lambda price_list, items: frappe.db.sql( """ SELECT item_code, price_list_rate FROM `tabItem Price` WHERE price_list = %(price_list)s AND item_code IN %(item_codes)s """, values={ "price_list": price_list, "item_codes": [x.get("name") for x in items] }, as_dict=1, ) if price_list else {}, )
def group_valid(ohlc): groups = tz.groupby(is_valid_ohlc, ohlc) valid_ohlc = groups[True] if True in groups else [] invalid_ohlc = groups[False] if False in groups else [] return valid_ohlc, invalid_ohlc
def get_notes(token, so_name): decoded_token = get_decoded_token(token) customer_id = frappe.db.exists( "Customer", {"le_firebase_uid": decoded_token["uid"]} ) if not customer_id: frappe.throw(frappe._("Customer does not exist on backend")) if customer_id != frappe.db.get_value("Sales Order", so_name, "customer"): frappe.throw(frappe._("Not allowed to view this document")) get_dn_fields = compose( keyfilter( lambda x: x in [ "name", "partner", "partner_name", "scheduled_datetime", "posting_datetime", "total", "total_taxes_and_charges", "grand_total", "rounding_adjustment", "rounded_total", "status", ] ), first, ) get_item_fields = compose( list, map( keyfilter( lambda x: x in [ "name", "item_code", "item_name", "item_group", "rate", "amount", "so_detail", ] ) ), map(lambda x: merge(x, {"name": x.get("child_name")})), ) get_deliveries = compose( lambda x: x.values(), valmap(lambda x: merge(get_dn_fields(x), {"items": get_item_fields(x)})), groupby("name"), lambda x: frappe.db.sql( """ SELECT dn.name, dn.sales_partner AS partner, sp.partner_name, dn.le_scheduled_datetime AS scheduled_datetime, TIMESTAMP(dn.posting_date, dn.posting_time) AS posting_datetime, dn.total, dn.total_taxes_and_charges, dn.grand_total, dn.rounding_adjustment, dn.rounded_total, dn.workflow_state AS status, dni.name AS child_name, dni.item_code, dni.item_name, dni.item_group, dni.qty, dni.rate, dni.amount, dni.so_detail FROM `tabDelivery Note Item` AS dni LEFT JOIN `tabDelivery Note` AS dn ON dn.name = dni.parent LEFT JOIN `tabSales Partner` AS sp ON sp.name = dn.sales_partner WHERE dn.status < 2 AND dn.workflow_state IN ('Pending', 'Completed') AND dni.against_sales_order = %(against_sales_order)s """, values={"against_sales_order": x}, as_dict=1, ), ) return get_deliveries(so_name)