Ejemplo n.º 1
0
    def transform_test(self, data):
        list_of_dicts = [
            self.get(image_fname, klass, 'test') for klass, image_fname in data
        ]
        images = list(pluck('images', list_of_dicts))
        labels = list(pluck('labels', list_of_dicts))

        return {'images': np.array(images), 'labels': np.array(labels)}
Ejemplo n.º 2
0
    def transform_train(self, data):
        list_of_dicts = _pool.starmap(self.get,
                                      [(image_fname, klass, 'train')
                                       for klass, image_fname in data])
        images = list(pluck('images', list_of_dicts))
        labels = list(pluck('labels', list_of_dicts))

        return {'images': np.array(images), 'labels': np.array(labels)}
Ejemplo n.º 3
0
def getid_counts(lines, kind='symbol'):
    if kind == 'symbol':
        kind = 0
    else:
        kind = 1
    ids = []
    res = tz.pipe(lines, spliteach(sep='\t'), tz.pluck(0), spliteach(sep='|'),
                  tz.pluck(kind), tee(ids.append), tz.frequencies)
    return ids, res
Ejemplo n.º 4
0
def _get_versions_from_repology_repos(project: str) -> set:
    """Returns set of versions available in other repos of repology"""
    reply = requests.get(f"https://repology.org/api/v1/project/{project}")
    reply.raise_for_status()
    return set(
        pluck("version")(filter(lambda _: _["status"] == "newest", reply.json()))
    )
Ejemplo n.º 5
0
    def _word_skip_grams(self, tokens, stop_words=None):
        # handle stop words
        if stop_words is not None:
            tokens = [w for w in tokens if w not in stop_words]

        return compose(cmap(' '.join), pluck([0, 2]),
                       sliding_window(3))(tokens)
Ejemplo n.º 6
0
def gene_length_df(filename):
    """Grab Gene Symbol, Gene ID, and Gene Length from a GAF file.

    Parameters
    ----------
    filename : string
        Path to a Gene Annotation Format (GAF) file.

    Returns
    -------
    gene_lengths : pandas DataFrame
        A data frame with three columns: gene symbol, gene id, and gene
        length (in bases).
    """
    with open(filename) as fin:
        header = next(fin).rstrip().split('\t')
        geneid = header.index('FeatureID')
        genelen = header.index('FeatureCoordinates')
        feattype = header.index('FeatureType')
        output = tz.pipe(fin, spliteach,
                         tz.filter(lambda x: x[feattype] == 'gene'),
                         tz.pluck([geneid, genelen]), tz.map(range2len), list)
    df = pd.DataFrame(output, columns=['GeneSymbol', 'GeneID', 'GeneLength'])
    df = df.drop_duplicates('GeneSymbol').set_index('GeneSymbol')
    return df
Ejemplo n.º 7
0
def ccds_to_bed(ccds_stream):
  """Convert CCDS dump to Chanjo-style BED stream.

  Main entry point for default Chanjo converter (ccds). It converts
  a sorted (start, chrom) CCDS database to the Chanjo BED-format.

  Args:
    ccds_stream (file): file handle to read CCDS lines from

  Yields:
    Interval: interval with merged block and superblock ids
  """
  return pipe(
    ccds_stream,
    filter(grep('Public')),                    # filter out Public tx
    map(text_type.rstrip),                     # strip \n and spaces
    map(split(sep='\t')),                      # split into list
    map(extract_intervals),                    # convert to Interval
    concat,                                    # flatten
    map(rename_sex_interval),                  # rename sex contigs
    partial(lazy_groupby, key=attrgetter('contig')),  # group by contig
    pluck(1),                                  # extract second item
    map(groupby(attrgetter('name'))),          # non-lazy group by id
    map(valmap(merge_related_elements)),       # group intervals
    map(itervalues),                           # extract values
    map(partial(sorted, key=attrgetter('start'))),  # sort by start pos
    concat                                     # flatten
  )
Ejemplo n.º 8
0
def print_earnings():
    registry = Registry()
    registry.load_strategies()

    earnings = []
    for vault in registry.vaults:
        for strategy in vault.strategies:
            contract = strategy.strategy
            strategist = getattr(contract, "strategist", None)

            if strategist:
                config = vault.vault.strategies(contract).dict()
                if config.get("performanceFee"):
                    earnings.append({"strategist": strategist(), **config})

    if earnings:
        print("....All earnings:")
        pprint(earnings)
        data = valmap(
            compose(lambda fg: sum(gain / fee for fee, gain in fg),
                    pluck(["performanceFee", "totalGain"])),
            groupby("strategist", earnings))
        print("....Earnings grouped by strategist")
        pprint(data)
    else:
        print("No earnings found")
 def make_data(val):
     clients = compose(count, unique, pluck('customer'), lambda: val)
     animals = compose(valmap(count), groupby('species'), lambda: val)
     return {
         'total_val':
         reduce(lambda total, x: total + x.get('total_vat'), val, 0.00),
         'animals':
         _get_dict_to_csv(animals()),
         'clients':
         clients()
     }
Ejemplo n.º 10
0
def compute_ci_for_metrics_collection(
        metrics: List[ClassificationMetrics]) -> Dict:
    attributes = get_object_attributes(metrics[0])
    metrics_with_ci_dict = {
        attribute: pass_args(
            confidence_interval(list(pluck(attribute, metrics))),
            lambda m, ci, std: ValueWithStatistics(m, std, ci),
        )
        for attribute in attributes
    }
    return metrics_with_ci_dict
Ejemplo n.º 11
0
def example():
    data = [('cheese', 1), ('cheese', 2), ('cheese', 3), ('tiramisu', 10),
            ('tiramisu', 11), ('meat', 12)]

    c = Collection.create(data)
    c_train, c_test = c.random_split(0.5, seed=1)

    pipe = (Collection.pipeline("file_list").groupby(0).evolve(
        1, lambda x: list(pluck(1, x))).combinations(replacement=True).starmap(
            generate_matching_pairs).flatten().filter(random_drop(0.5)))

    print(pipe.pump({'file_list': c_train}).compute())
    print(pipe.pump({'file_list': c_test}).compute())
Ejemplo n.º 12
0
def compare_and_format_results(
    y_true: Series,
    results_for_methods: Dict[str, List[ModelCVResult]],
    include: Tuple[str] = ('balanced_accuracy', 'roc_auc', 'recall', 'fpr',
                           'f1', 'average_precision'),
) -> str:
    metrics_for_methods = valmap(
        lambda r: compute_classification_metrics_from_results_with_statistics(
            y_true, r), results_for_methods)

    def get_line(method: str, metrics: ClassificationMetricsWithStatistics):
        return [
            format_method(method),
            *[metrics[metric].mean for metric in include]
        ]

    lines = sorted(
        [
            get_line(method, metrics)
            for method, metrics in metrics_for_methods.items()
        ],
        key=get(1),
        reverse=True,
    )

    max_by_column = [
        None if index == 0 else max(pluck(index, lines))
        for index in range(len(include) + 1)
    ]

    lines_with_differences = [
        list(
            flatten([
                item if item_index == 0 else
                [item, item - max_by_column[item_index]]
                for item_index, item in enumerate(line)
            ])) for line in lines
    ]

    return tabulate_formatted(
        format_structure(
            format_decimal,
            [
                [
                    '', *flatten(
                        map(lambda metric: [format_metric_short(metric), ''],
                            include))
                ],
                *lines_with_differences,
            ],
        ))
Ejemplo n.º 13
0
def test_compare_local_remote_versions(local_versions, remotes, result,
                                       monkeypatch):
    for remote in remotes:
        monkeypatch.setitem(
            vu.version_getter,
            remote[0].type,
            lambda _: remote[1],
        )

    assert (vu.compare_local_remote_versions(
        local_versions=local_versions,
        remotes=tuple(pipe(remotes, pluck(0))),
        worker_count=8,
    ) == result)
Ejemplo n.º 14
0
def _create_account_dimenstions():
    if frappe.db.exists("Accounting Dimension", {"document_type": "Branch"}):
        return

    doc = frappe.get_doc({
        "doctype": "Accounting Dimension",
        "document_type": "Branch",
        "label": "Branch",
    })
    for company in pluck("name", frappe.get_all("Company")):
        doc.append(
            "dimension_defaults",
            {"company": company},
        )

    doc.save(ignore_permissions=True)
Ejemplo n.º 15
0
def _update_booking_orders(shipping_order):
    for bo in pluck(
            "name",
            frappe.get_all(
                "Booking Order",
                filters={
                    "docstatus": 1,
                    "status": ("in", ["Loaded", "In Transit"]),
                    "last_shipping_order": shipping_order.name,
                },
            ),
    ):
        doc = frappe.get_cached_doc("Booking Order", bo)
        doc.status = "In Transit"
        doc.current_station = shipping_order.current_station
        doc.save()
Ejemplo n.º 16
0
def execute():
    if frappe.db.exists(
        "DocType", "POS Bahrain Settings Tax Exempt"
    ) and frappe.db.exists("DocType", "POS Bahrain Settings Tax Category"):
        settings = frappe.get_single("POS Bahrain Settings")
        for account in pluck(
            "account",
            frappe.get_all(
                "POS Bahrain Settings Tax Exempt",
                filters={
                    "parent": "POS Bahrain Settings",
                    "parentfield": "vat_exempt_account",
                },
                fields="account",
            ),
        ):
            settings.append(
                "vat_tax_categories", {"account": account, "category": "Exempt"}
            )

        settings.save(ignore_permissions=True)

    frappe.delete_doc_if_exists("DocType", "POS Bahrain Settings Tax Exempt")
Ejemplo n.º 17
0
def test_toolz_pluck_default(executor):
    actual = executor(
        pluck(['a', 'b'], default=-1),
        [{
            'a': 1,
            'b': 2
        }, {
            'a': 3,
            'b': 4
        }, {
            'a': 5,
            'b': 6
        }, {
            'a': 7,
            'b': 8
        }, {
            'a': 9,
            'c': 0
        }],
        npartitions=3,
    )

    assert list(actual) == [(1, 2), (3, 4), (5, 6), (7, 8), (9, -1)]
Ejemplo n.º 18
0
def test_toolz_pluck(executor):
    actual = executor(
        pluck('a'),
        [{
            'a': 1,
            'b': 2
        }, {
            'a': 3,
            'b': 4
        }, {
            'a': 5,
            'b': 6
        }, {
            'a': 7,
            'b': 8
        }, {
            'a': 9,
            'b': 0
        }],
        npartitions=3,
    )

    assert list(actual) == [1, 3, 5, 7, 9]
Ejemplo n.º 19
0
def generate_counts_matrix(files):
    """Produce a counts matrix from a TCGA Level 3 archive.

    Parameters
    ----------
    files : list of string
        The input files to read from.

    Returns
    -------
    ids : list of string
        The row names (gene IDs).
    samples : list of string
        The column names (sample IDs).
    counts : array of float, shape (n_genes, n_samples)
        The expression counts.
    """
    ncols = len(files)
    with open(files[0]) as fin:
        header = next(fin)
        id_labels, id_counts = getid_counts(fin)
    # discard all non-unique ids
    nrows = len(id_labels)
    counts = np.zeros((nrows, ncols), dtype=np.float)
    for col, filename in enumerate(files):
        with open(filename) as fin:
            dat = tz.pipe(fin, tz.drop(1), spliteach(sep='\t'), tz.pluck(1),
                          tz.map(float), arrayfromiter(count=nrows))
            counts[:, col] = dat
    samples = list(map(sampleid_from_filename, files))
    rows_to_keep = [
        i for i, gene in enumerate(id_labels) if id_counts[gene] == 1
    ]
    ids = [id_labels[i] for i in rows_to_keep]
    counts = counts[rows_to_keep, :]
    return ids, samples, counts
Ejemplo n.º 20
0
def top(func, output, out_indices, *arrind_pairs, **kwargs):
    """ Tensor operation

    Applies a function, ``func``, across blocks from many different input
    dasks.  We arrange the pattern with which those blocks interact with sets
    of matching indices.  E.g.

        top(func, 'z', 'i', 'x', 'i', 'y', 'i')

    yield an embarassingly parallel communication pattern and is read as

        z_i = func(x_i, y_i)

    More complex patterns may emerge, including multiple indices

        top(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')

        $$ z_{ij} = func(x_{ij}, y_{ji}) $$

    Indices missing in the output but present in the inputs results in many
    inputs being sent to one function (see examples).

    Examples
    --------

    Simple embarassing map operation

    >>> inc = lambda x: x + 1
    >>> top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)})  # doctest: +SKIP
    {('z', 0, 0): (inc, ('x', 0, 0)),
     ('z', 0, 1): (inc, ('x', 0, 1)),
     ('z', 1, 0): (inc, ('x', 1, 0)),
     ('z', 1, 1): (inc, ('x', 1, 1))}

    Simple operation on two datasets

    >>> add = lambda x, y: x + y
    >>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),
    ...                                                      'y': (2, 2)})  # doctest: +SKIP
    {('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
     ('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
     ('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
     ('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}

    Operation that flips one of the datasets

    >>> addT = lambda x, y: x + y.T  # Transpose each chunk
    >>> #                                        z_ij ~ x_ij y_ji
    >>> #               ..         ..         .. notice swap
    >>> top(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),
    ...                                                       'y': (2, 2)})  # doctest: +SKIP
    {('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
     ('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),
     ('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),
     ('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}

    Dot product with contraction over ``j`` index.  Yields list arguments

    >>> top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),
    ...                                                          'y': (2, 2)})  # doctest: +SKIP
    {('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
                            [('y', 0, 0), ('y', 1, 0)]),
     ('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
                            [('y', 0, 1), ('y', 1, 1)]),
     ('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
                            [('y', 0, 0), ('y', 1, 0)]),
     ('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
                            [('y', 0, 1), ('y', 1, 1)])}

    Supports Broadcasting rules

    >>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),
    ...                                                      'y': (2, 2)})  # doctest: +SKIP
    {('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
     ('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
     ('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
     ('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}
    """
    numblocks = kwargs['numblocks']
    argpairs = list(partition(2, arrind_pairs))

    assert set(numblocks) == set(pluck(0, argpairs))

    all_indices = pipe(argpairs, pluck(1), concat, set)
    dummy_indices = all_indices - set(out_indices)

    # Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
    dims = broadcast_dimensions(argpairs, numblocks)

    # (0, 0), (0, 1), (0, 2), (1, 0), ...
    keytups = list(product(*[range(dims[i]) for i in out_indices]))
    # {i: 0, j: 0}, {i: 0, j: 1}, ...
    keydicts = [dict(zip(out_indices, tup)) for tup in keytups]

    # {j: [1, 2, 3], ...}  For j a dummy index of dimension 3
    dummies = dict((i, list(range(dims[i]))) for i in dummy_indices)

    # Create argument lists
    valtups = []
    for kd in keydicts:
        args = []
        for arg, ind in argpairs:
            tups = lol_tuples((arg, ), ind, kd, dummies)
            tups2 = zero_broadcast_dimensions(tups, numblocks[arg])
            args.append(tups2)
        valtups.append(tuple(args))

    # Add heads to tuples
    keys = [(output, ) + kt for kt in keytups]
    vals = [(func, ) + vt for vt in valtups]

    return dict(zip(keys, vals))
Ejemplo n.º 21
0
def select_to_iterator(sel, dshape=None, **kwargs):
    func = pluck(0) if dshape and isscalar(dshape.measure) else map(tuple)
    _, rows = batch(sel)
    return func(rows)
Ejemplo n.º 22
0
def select_to_iterator(sel, dshape=None, bind=None, **kwargs):
    func = pluck(0) if dshape and isscalar(dshape.measure) else map(tuple)
    _, rows = batch(sel, bind=bind)
    return func(rows)
Ejemplo n.º 23
0
def top(func, output, out_indices, *arrind_pairs, **kwargs):
    """ Tensor operation

    Applies a function, ``func``, across blocks from many different input
    dasks.  We arrange the pattern with which those blocks interact with sets
    of matching indices.  E.g.

        top(func, 'z', 'i', 'x', 'i', 'y', 'i')

    yield an embarassingly parallel communication pattern and is read as

        z_i = func(x_i, y_i)

    More complex patterns may emerge, including multiple indices

        top(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')

        $$ z_{ij} = func(x_{ij}, y_{ji}) $$

    Indices missing in the output but present in the inputs results in many
    inputs being sent to one function (see examples).

    Examples
    --------

    Simple embarassing map operation

    >>> inc = lambda x: x + 1
    >>> top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)})  # doctest: +SKIP
    {('z', 0, 0): (inc, ('x', 0, 0)),
     ('z', 0, 1): (inc, ('x', 0, 1)),
     ('z', 1, 0): (inc, ('x', 1, 0)),
     ('z', 1, 1): (inc, ('x', 1, 1))}

    Simple operation on two datasets

    >>> add = lambda x, y: x + y
    >>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),
    ...                                                      'y': (2, 2)})  # doctest: +SKIP
    {('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
     ('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
     ('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
     ('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}

    Operation that flips one of the datasets

    >>> addT = lambda x, y: x + y.T  # Transpose each chunk
    >>> #                                        z_ij ~ x_ij y_ji
    >>> #               ..         ..         .. notice swap
    >>> top(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),
    ...                                                       'y': (2, 2)})  # doctest: +SKIP
    {('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
     ('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),
     ('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),
     ('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}

    Dot product with contraction over ``j`` index.  Yields list arguments

    >>> top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),
    ...                                                          'y': (2, 2)})  # doctest: +SKIP
    {('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
                            [('y', 0, 0), ('y', 1, 0)]),
     ('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
                            [('y', 0, 1), ('y', 1, 1)]),
     ('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
                            [('y', 0, 0), ('y', 1, 0)]),
     ('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
                            [('y', 0, 1), ('y', 1, 1)])}

    Supports Broadcasting rules

    >>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),
    ...                                                      'y': (2, 2)})  # doctest: +SKIP
    {('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
     ('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
     ('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
     ('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}
    """
    numblocks = kwargs['numblocks']
    argpairs = list(partition(2, arrind_pairs))

    assert set(numblocks) == set(pluck(0, argpairs))

    all_indices = pipe(argpairs, pluck(1), concat, set)
    dummy_indices = all_indices - set(out_indices)

    # Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
    dims = broadcast_dimensions(argpairs, numblocks)

    # (0, 0), (0, 1), (0, 2), (1, 0), ...
    keytups = list(product(*[range(dims[i]) for i in out_indices]))
    # {i: 0, j: 0}, {i: 0, j: 1}, ...
    keydicts = [dict(zip(out_indices, tup)) for tup in keytups]

    # {j: [1, 2, 3], ...}  For j a dummy index of dimension 3
    dummies = dict((i, list(range(dims[i]))) for i in dummy_indices)

    # Create argument lists
    valtups = []
    for kd in keydicts:
        args = []
        for arg, ind in argpairs:
            tups = lol_tuples((arg,), ind, kd, dummies)
            tups2 = zero_broadcast_dimensions(tups, numblocks[arg])
            args.append(tups2)
        valtups.append(tuple(args))

    # Add heads to tuples
    keys = [(output,) + kt for kt in keytups]
    vals = [(func,) + vt for vt in valtups]

    return dict(zip(keys, vals))
# IPython log file


import itertools as it
with open('pypi-deps.txt', 'r') as fin:
    lines = fin.readlines()
    edges = [line.rstrip().split() for line in lines]
    packages = set(list(it.chain(*edges)))
    
len(edges)
len(packages)
'skimage' in packages
import toolz as tz
from toolz import curried as c
dep_count = tz.pipe(edges, c.pluck(1), tz.frequencies)
dep_count['skimage']
import networkx as nx
deps = nx.DiGraph()
'scikit-image' in packages
'scikit-learn' in packages
for u, v in edges:
    u = u.replace('scikit-', 'sk')
    v = v.replace('scikit-', 'sk')
    deps.add_edge(u, v)
    
deps.number_of_edges()
deps.number_of_nodes()
deps.node['skimage']
deps.in_edges('skimage')
nodes = nx.katz_centrality(deps)
central = sorted(deps.nodes(), key=nodes.__getitem__, reverse=True)
Ejemplo n.º 25
0
 "collections.Counter.elements": (
     chained(collections.Counter, op.methodcaller("elements"), sorted),
     [1, 2, 2, 3, 3, 3, 4, 4, 4, 4],
 ),
 "collections.Counter.most_common": (
     chained(collections.Counter, op.methodcaller("most_common", 2),
             sorted),
     [1, 2, 2, 3, 3, 3, 4, 4, 4, 4],
 ),
 "set": (chained(set, sorted), [1, 2, 2, 3, 3, 3, 4, 4, 4, 4]),
 "map": (chained(curried.map(lambda x: 2 * x), list), [1, 2, 3, 4, 5, 6]),
 "filter": (chained(curried.filter(lambda x: x % 2 == 0), list), range(10)),
 "remove": (chained(curried.remove(lambda x: x % 2 == 0), list), range(10)),
 # example from toolz docs
 "pluck-single": (
     chained(curried.pluck("name"), list),
     [{
         "id": 1,
         "name": "Cheese"
     }, {
         "id": 2,
         "name": "Pies"
     }],
 ),
 # example from toolz docs
 "pluck-mulitple": (chained(curried.pluck([0, 1]), list), [[1, 2, 3],
                                                           [4, 5, 7]]),
 # example from toolz docs
 "join": (
     chained(
         curried.join(
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('-d', '--debug', action='store_true', default=False, help='Display debug messages')
    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Increase output verbosity')
    global args
    args = parser.parse_args()
    logging.basicConfig(
        level=logging.DEBUG if args.debug else (logging.INFO if args.verbose else logging.WARNING),
        stream=sys.stdout,
        )

    if not os.path.isdir(json_dir_path):
        os.mkdir(json_dir_path)
    if not os.path.isdir(ast_dir_path):
        os.mkdir(ast_dir_path)

    # Load variables definitions

    tgvh_infos = list(load_tgvH_file())

    # Write constants

    constant_by_name = pipe(
         tgvh_infos,
         filter(lambda val: val['type'] == 'variable_const'),
         map(lambda d: (d['name'], d['value'])),
         dict,
         )
    write_json_file(data=constant_by_name, file_name='constants.json')

    # Write variables dependencies

    regles_nodes = list(mapcat(load_regles_nodes, iter_json_file_names('chap-*.json', 'res-ser*.json')))
    dependencies_by_formula_name = dict(list(mapcat(dependencies_visitors.visit_node, regles_nodes)))
    write_json_file(data=dependencies_by_formula_name, file_name='formulas_dependencies.json')

    # Write variables definitions

    ast_infos_by_variable_name = {}
    for regle_node in regles_nodes:
        regle_infos = {
            'regle_applications': regle_node['applications'],
            'regle_linecol': regle_node['linecol'],
            'regle_name': regle_node['name'],
            'source_file_name': regle_node['source_file_name'],
            }
        regle_tags = list(pluck('value', regle_node.get('tags', [])))
        if regle_tags:
            regle_infos['regle_tags'] = regle_tags
        for formula_node in regle_node['formulas']:
            if formula_node['type'] == 'formula':
                ast_infos_by_variable_name[formula_node['name']] = assoc(
                    regle_infos, 'formula_linecol', formula_node['linecol'])
            elif formula_node['type'] == 'pour_formula':
                for unlooped_formula_node in unloop_helpers.iter_unlooped_nodes(
                        loop_variables_nodes=formula_node['loop_variables'],
                        node=formula_node['formula'],
                        unloop_keys=['name'],
                        ):
                    pour_formula_infos = merge(regle_infos, {
                        'pour_formula_linecol': formula_node['formula']['linecol'],
                        'pour_formula_name': formula_node['formula']['name'],
                        })
                    ast_infos_by_variable_name[unlooped_formula_node['name']] = pour_formula_infos
            else:
                assert False, 'Unhandled formula_node type: {}'.format(formula_node)

    def rename_key(d, key_name, key_new_name):
        return assoc(dissoc(d, key_name), key_new_name, d[key_name])

    tgvh_infos_by_variable_name = pipe(
        tgvh_infos,
        filter(lambda d: d['type'] in ('variable_calculee', 'variable_saisie')),
        map(lambda d: rename_key(d, 'linecol', 'tgvh_linecol')),
        map(lambda d: (d['name'], d)),  # Index by name
        dict,
        )

    definition_by_variable_name = merge_with(merge, ast_infos_by_variable_name, tgvh_infos_by_variable_name)

    write_json_file(data=definition_by_variable_name, file_name='variables_definitions.json')

    return 0
Ejemplo n.º 27
0
def dict_to_struct_table_horizontal(dictionary: Mapping) -> List[List]:
    items = list(dictionary.items())
    return [
        list(pluck(0, items)),
        list(pluck(1, items)),
    ]
Ejemplo n.º 28
0
    def set_report_details(self):
        args = merge(
            keyfilter(
                lambda x: x in ["user", "pos_profile", "company"], self.as_dict()
            ),
            {
                "start_datetime": self.start_datetime or frappe.utils.now(),
                "end_datetime": self.end_datetime or frappe.utils.now(),
            },
        )

        sales, returns = _get_invoices(args)
        sales_payments, returns_payments = _get_si_payments(args)
        payin_payments, payout_payments = _get_pe_payments(args)

        def get_mop_amount(mode_of_payment=None, payments=[]):
            return compose(
                lambda x: x.get("amount"),
                excepts(StopIteration, first, lambda x: {"amount": 0}),
                filter(lambda x: x.get("mode_of_payment") == mode_of_payment),
            )(payments)

        get_sales_amount = partial(get_mop_amount, payments=sales_payments)
        get_returns_amount = partial(get_mop_amount, payments=returns_payments)
        get_payin_amount = partial(get_mop_amount, payments=payin_payments)
        get_payout_amount = partial(get_mop_amount, payments=payout_payments)

        def make_payment(mode_of_payment):
            sales_amount = get_sales_amount(mode_of_payment)
            returns_amount = get_returns_amount(mode_of_payment)
            payin_amount = get_payin_amount(mode_of_payment)
            payout_amount = get_payout_amount(mode_of_payment)
            return {
                "mode_of_payment": mode_of_payment,
                "sales_amount": sales_amount,
                "returns_amount": returns_amount,
                "payin_amount": payin_amount,
                "payout_amount": payout_amount,
                "total_amount": sales_amount
                + returns_amount
                + payin_amount
                + payout_amount,
            }

        sum_by_total = sumby("total")
        sum_by_net = sumby("net_total")
        sum_by_discount = compose(operator.neg, sumby("discount_amount"))
        sum_by_taxes = sumby("total_taxes_and_charges")
        sum_by_grand = sumby("grand_total")
        sum_by_rounded = sumby("rounded_total")

        get_cash = compose(
            sum,
            map(lambda x: x.get("amount")),
            filter(lambda x: x.get("type") == "Cash"),
        )

        self.cash_sales = get_cash(sales_payments)
        self.cash_returns = get_cash(returns_payments)
        self.cash_payins = get_cash(payin_payments)
        self.cash_payouts = get_cash(payout_payments)

        self.sales__total = sum_by_total(sales)
        self.sales__discount_amount = sum_by_discount(sales)
        self.returns__net_total = sum_by_net(returns)
        self.total__net_total = sum_by_net(sales + returns)
        self.total__total_taxes_and_charges = sum_by_taxes(sales + returns)
        self.total__grand_total = sum_by_grand(sales + returns)
        self.total__rounded_total = sum_by_rounded(sales + returns)

        make_invoice = keyfilter(
            lambda x: x
            in [
                "invoice",
                "total_taxes_and_charges",
                "rounded_total",
                "grand_total",
                "outstanding_amount",
            ]
        )
        mops = compose(unique, pluck("mode_of_payment"))

        self.sales = []
        for invoice in sales:
            self.append("sales", make_invoice(invoice))
        self.returns = []
        for invoice in returns:
            self.append("returns", make_invoice(invoice))
        self.payments = []
        for payment in mops(
            sales_payments + returns_payments + payin_payments + payout_payments
        ):
            self.append("payments", make_payment(payment))