def _extend_data(filters, data, batch_idx, balance_idx): get_batches = compose( reduceby("name", lambda x: x[0]), lambda batches: frappe.db.sql( """ SELECT name, expiry_date, IF(expiry_date, DATEDIFF(expiry_date, %(today)s), '') AS expiry_in_days FROM `tabBatch` WHERE name IN %(batches)s """, values={ "today": frappe.utils.today(), "batches": batches }, as_dict=1, ) if batches else [], set, filter(lambda x: x), map(lambda x: x[batch_idx]), ) batches = get_batches(data) idx = batch_idx + 1 def get_cells(row): batch = batches.get(row[batch_idx]) if batch: return [batch.get("expiry_date"), batch.get("expiry_in_days")] return [None, None] def will_show(row): if not filters.hide_zero_stock: return True return row[balance_idx] != 0 return [(x[:idx] + get_cells(x) + x[idx:]) for x in data if will_show(x)]
if __name__ == "__main__": if len(sys.argv) != 1: print("USAGE: python3 generate_dd_txt.py ") sys.exit(1) fname, output_dir = sys.argv[0], "zrm_phone_xhe_shape" if not Path(output_dir).exists(): os.makedirs(output_dir) char_to_shape = pipe(CharShapeTable.select(), map(lambda e: (e.char, e.shapes)), reduceby(lambda e: e[0], lambda e1, e2: e1), valmap(lambda e: e[1]), dict) print(f"total {len(char_to_shape)} char shapes") char_to_phones = pipe(CharPhoneTable.select(), map(lambda e: (e.char, e.zrm)), groupby(lambda e: e[0]), valmap(lambda phones: [e[1] for e in phones]), dict) print(f"total {len(char_to_phones)} char phones") one_hit_char_items = generate_one_hit_char(60000) top_single_chars_items = generate_topest_char(char_to_phones, 60000) sys_top_chars_data = f"{output_dir}/sys_top_chars_data.txt" with open(sys_top_chars_data, 'w', encoding='utf8') as fout: fout.write("---config@码表分类=主码-1\n") fout.write("---config@允许编辑=否\n")
if __name__ == "__main__": if len(sys.argv) != 2: print(f"USAGE: python3 {sys.argv[0]} words.txt", file=sys.stderr) sys.exit(1) _, words_path = sys.argv exist_wordphones = pipe(EngWordTable.select(), map(lambda e: e.word), set ) with open(words_path, "r", encoding='utf8') as fin: to_add_words = pipe(fin, map(lambda e: e.strip()), filter(lambda e: e != ''), filter(lambda e: e not in exist_wordphones), reduceby(lambda e: e, lambda e1, e2: e1), map(lambda e: EngWordTable(word=e, priority=1, updatedt=datetime.now())), ) with db.atomic(): EngWordTable.bulk_create(to_add_words, batch_size=100) # for w in to_add_words: # print(f"add {w}") # # w.save() print('done')
chained(dict, curried.itemfilter(lambda i: i[0] % 2 == 0 and i[1] < 4)), dict.items({ 1: 2, 2: 3, 3: 4, 4: 5 }), ), # example taken from toolz docs "mapcat": ( chained(curried.mapcat(lambda s: [c.upper() for c in s]), list), [["a", "b"], ["c", "d", "e"]], ), "reduce": (curried.reduce(op.add), range(20)), "reduceby": (curried.reduceby(lambda x: x % 2 == 0, op.add), range(20)), "topk": (chained(curried.topk(5), list), range(20)), "curried.unique": (chained(curried.unique, sorted), [1, 1, 2, 3, 4, 4]), "unique": (chained(toolz.unique, sorted), [1, 1, 2, 3, 4, 4]), } def params(spec, m): return pytest.mark.parametrize(spec, [v for (_, v) in sorted(m.items())], ids=sorted(m.keys())) @params("func, arg", examples) def test_input_outputs(func, arg): actual_no_dask = func(arg) actual_dask = apply(db.from_sequence(arg, npartitions=3), func).compute()