コード例 #1
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def modf(x):
    tmp = elemwise(np.modf, x)
    left = next(names)
    right = next(names)
    ldsk = dict(((left,) + key[1:], (getitem, key, 0))
                for key in core.flatten(tmp._keys()))
    rdsk = dict(((right,) + key[1:], (getitem, key, 1))
                for key in core.flatten(tmp._keys()))

    if x._dtype is not None:
        a = np.empty((1,), dtype=x._dtype)
        l, r = np.modf(a)
        ldt = l.dtype
        rdt = r.dtype
    else:
        ldt = None
        rdt = None

    L = Array(merge(tmp.dask, ldsk), left, blockdims=tmp.blockdims,
                dtype=ldt)

    R = Array(merge(tmp.dask, rdsk), right, blockdims=tmp.blockdims,
                dtype=rdt)

    return L, R
コード例 #2
0
def compare_streams(db_engine, date_range, stream_names, allowed_parts_of_speech, max_num_words):
    """Compare tokens from each stream in the stream_names list"""

    ## Create token count dictionaries for each stream name
    count_dicts_dict = {}
    for stream_name in stream_names:
        count_dicts_dict[stream_name] = tz.pipe(
            get_content(
                db_engine, 
                stream_name,
                date_range),
            parse_content_into_count(max_num_words, allowed_parts_of_speech))

    ## Create cross-stream count dictionary
    all_streams_count_dict = reduce(
        lambda x,y: tz.merge_with(sum, x, y),
        count_dicts_dict.values())

    ## Calculate posterior probabilities of the tokens
    posterior_probs = {}
    for stream_name in stream_names:
        posterior_probs[stream_name] = tz.pipe(
            get_posterior_probs_freq(
                500, # limited to the 500 most frequent words in this stream, at this time
                all_streams_count_dict, 
                count_dicts_dict[stream_name]),
            tz.map(lambda x: tz.merge({"stream":stream_name}, x)),
            tz.take(max_num_words),
            list,
        )
    return posterior_probs
コード例 #3
0
ファイル: core.py プロジェクト: kastnerkyle/dask
    def __getitem__(self, index):
        # Field access, e.g. x['a'] or x[['a', 'b']]
        if (isinstance(index, (str, unicode)) or
            (    isinstance(index, list)
            and all(isinstance(i, (str, unicode)) for i in index))):
            if self._dtype is not None and isinstance(index, (str, unicode)):
                dt = self._dtype[index]
            elif self._dtype is not None and isinstance(index, list):
                dt = np.dtype([(name, self._dtype[name]) for name in index])
            else:
                dt = None
            return elemwise(getitem, self, index, dtype=dt)

        # Slicing
        out = next(names)
        if not isinstance(index, tuple):
            index = (index,)

        if all(isinstance(i, slice) and i == slice(None) for i in index):
            return self

        dsk, blockdims = slice_array(out, self.name, self.blockdims, index)

        return Array(merge(self.dask, dsk), out, blockdims=blockdims,
                     dtype=self._dtype)
コード例 #4
0
def compare_streams_across_time(db_engine, configuration):
    """Return distinct words for each considered stream at each time step in 
    the given date range."""
    def date_range_iterator(overall_date_range, time_step):
        """Returns an iterator of the time ranges being considered.
        time_step is assumed to be in minutes"""
        def get_time(overall_start, time_step, step):
            """Return the timestamp that is step time_step's beyond overall_start"""
            return (overall_start + (time_step*(step-1))).strftime("%Y-%m-%dT%H:%M:%SZ")
        overall_start = dt.datetime.strptime(overall_date_range[0], "%Y-%m-%dT%H:%M:%SZ")
        overall_end = dt.datetime.strptime(overall_date_range[1], "%Y-%m-%dT%H:%M:%SZ")
        time_step = dt.timedelta(minutes=time_step)
        return tz.pipe(
            # Number of steps to take
            (overall_end - overall_start).total_seconds() / time_step.total_seconds(), 
            int,
            # Build range
            lambda x: range(1,x+2), 
            # Convert to timestamps
            tz.map(lambda x: [
                get_time(overall_start, time_step, x-1), 
                get_time(overall_start, time_step, x)]))
    result = []
    for date_range in date_range_iterator(configuration['overall_date_range'], configuration['time_step']):
        result.append(
            tz.pipe( # Stream comparison for a particular time period
                compare_streams(
                    db_engine,
                    date_range,
                    configuration['stream_names'],
                    configuration['allowed_parts_of_speech'],
                    configuration['max_num_words']),
                lambda x: tz.merge(x, {'date_range': date_range}))) # add in date_range entry
    return result
コード例 #5
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def store(sources, targets, **kwargs):
    """ Store dask arrays in array-like objects, overwrite data in target

    This stores dask arrays into object that supports numpy-style setitem
    indexing.  It stores values chunk by chunk so that it does not have to
    fill up memory.  For best performance you can align the block size of
    the storage target with the block size of your array.

    If your data fits in memory then you may prefer calling
    ``np.array(myarray)`` instead.

    Parameters
    ----------

    sources: Array or iterable of Arrays
    targets: array-like or iterable of array-likes
        These should support setitem syntax ``target[10:20] = ...``

    Examples
    --------

    >>> x = ...  # doctest: +SKIP

    >>> import h5py  # doctest: +SKIP
    >>> f = h5py.File('myfile.hdf5')  # doctest: +SKIP
    >>> dset = f.create_dataset('/data', shape=x.shape,
    ...                                  chunks=x.blockshape,
    ...                                  dtype='f8')  # doctest: +SKIP

    >>> store(x, dset)  # doctest: +SKIP

    Alternatively store many arrays at the same time

    >>> store([x, y, z], [dset1, dset2, dset3])  # doctest: +SKIP
    """
    single_output = True
    if not isinstance(sources, (list, tuple)):
        sources = [sources]
    if not isinstance(targets, (list, tuple)):
        targets = [targets]
        single_output = False

    if len(sources) != len(targets):
        raise ValueError("Different number of sources [%d] and targets [%d]"
                        % (len(sources), len(targets)))

    updates = [insert_to_ooc(tgt, src) for tgt, src in zip(targets, sources)]
    dsk = merge([src.dask for src in sources] + updates)
    keys = [key for u in updates for key in u]
    get(dsk, keys, **kwargs)

    if single_output:
        targets = targets[0]
    return targets
コード例 #6
0
ファイル: tools.py プロジェクト: wd15/extremefill2D
def get_treant_data(treant):
    """Extract UUID, tags and categories as a dict from Treant.

    Args:
      treant: the treant to extract data from

    Returns:
      a dict of treant data
    """
    return merge(
        dict(uuid=treant.uuid[:8], tags=list(treant.tags)),
        dict(treant.categories)
    )
コード例 #7
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def from_array(x, blockdims=None, blockshape=None, name=None, **kwargs):
    """ Create dask array from something that looks like an array

    Input must have a ``.shape`` and support numpy-style slicing.

    Example
    -------

    >>> x = h5py.File('...')['/data/path']  # doctest: +SKIP
    >>> a = da.from_array(x, blockshape=(1000, 1000))  # doctest: +SKIP
    """
    if blockdims is None:
        blockdims = blockdims_from_blockshape(x.shape, blockshape)
    name = name or next(names)
    dask = merge({name: x}, getem(name, blockdims=blockdims))
    return Array(dask, name, blockdims=blockdims, dtype=x.dtype)
コード例 #8
0
ファイル: tools.py プロジェクト: wd15/extremefill2D
def render_j2(tpl_path, data, filters):
    """Render a Jinja template

    Args:
      tpl_path: path to the template
      data: the template data as a dictionary
      filters: any filter functions to apply as a dictionary

    Returns:
      rendered template as a string
    """
    path, filename = os.path.split(tpl_path)
    loader = jinja2.FileSystemLoader(path or './')
    env = jinja2.Environment(loader=loader)
    env.filters = merge(env.filters, filters)
    return env.get_template(filename).render(**data)
コード例 #9
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def map_blocks(x, func, blockshape=None, blockdims=None, dtype=None):
    """ Map a function across all blocks of a dask array

    You must also specify the blockdims/blockshape of the resulting array.  If
    you don't then we assume that the resulting array has the same block
    structure as the input.

    >>> import dask.array as da
    >>> x = da.ones((8,), blockshape=(4,))

    >>> np.array(x.map_blocks(lambda x: x + 1))
    array([ 2.,  2.,  2.,  2.,  2.,  2.,  2.,  2.])

    If function changes shape of the blocks provide a blockshape

    >>> y = x.map_blocks(lambda x: x[::2], blockshape=(2,))

    Or, if the result is ragged, provide a blockdims

    >>> y = x.map_blocks(lambda x: x[::2], blockdims=((2, 2),))

    Your block function can learn where in the array it is if it supports a
    block_id keyword argument.  This will receive entries like (2, 0, 1), the
    position of the block in the dask array.

    >>> def func(block, block_id=None):
    ...     pass
    """
    if blockshape is not None:
        blockdims = tuple([nb * (bs,)
                            for nb, bs in zip(x.numblocks, blockshape)])
    if blockdims is None:
        blockdims = x.blockdims

    name = next(names)

    try:
        spec = inspect.getargspec(func)
    except:
        spec = None
    if spec and 'block_id' in spec.args:
        dsk = dict(((name,) + k[1:], (partial(func, block_id=k[1:]), k))
                    for k in core.flatten(x._keys()))
    else:
        dsk = dict(((name,) + k[1:], (func, k)) for k in core.flatten(x._keys()))

    return Array(merge(dsk, x.dask), name, blockdims=blockdims, dtype=dtype)
コード例 #10
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def coarsen(reduction, x, axes):
    if not all(bd % div == 0 for i, div in axes.items()
                             for bd in x.blockdims[i]):
        raise ValueError(
            "Coarsening factor does not align with block dimensions")

    if 'dask' in inspect.getfile(reduction):
        reduction = getattr(np, reduction.__name__)

    name = next(names)
    dsk = dict(((name,) + key[1:], (chunk.coarsen, reduction, key, axes))
                for key in core.flatten(x._keys()))
    blockdims = tuple(tuple(int(bd / axes.get(i, 1)) for bd in bds)
                      for i, bds in enumerate(x.blockdims))

    if x._dtype is not None:
        dt = reduction(np.empty((1,) * x.ndim, dtype=x.dtype)).dtype
    else:
        dt = None
    return Array(merge(x.dask, dsk), name, blockdims=blockdims, dtype=dt)
コード例 #11
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def broadcast_to(x, shape):
    shape = tuple(shape)
    ndim_new = len(shape) - x.ndim
    if ndim_new < 0 or any(new != old
                           for new, old in zip(shape[ndim_new:], x.shape)
                           if old != 1):
        raise ValueError('cannot broadcast shape %s to shape %s'
                         % (x.shape, shape))

    name = next(names)
    blockdims = (tuple((s,) for s in shape[:ndim_new])
                 + tuple(bd if old > 1 else (new,)
                         for bd, old, new in zip(x.blockdims, x.shape,
                                                 shape[ndim_new:])))
    dsk = dict(((name,) + (0,) * ndim_new + key[1:],
                (chunk.broadcast_to, key,
                 shape[:ndim_new] +
                 tuple(bd[i] for i, bd in zip(key[1:], blockdims[ndim_new:]))))
               for key in core.flatten(x._keys()))
    return Array(merge(dsk, x.dask), name, blockdims=blockdims, dtype=x.dtype)
コード例 #12
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def atop(func, out, out_ind, *args, **kwargs):
    """ Array object version of dask.array.top """
    dtype = kwargs.get('dtype', None)
    arginds = list(partition(2, args)) # [x, ij, y, jk] -> [(x, ij), (y, jk)]
    numblocks = dict([(a.name, a.numblocks) for a, ind in arginds])
    argindsstr = list(concat([(a.name, ind) for a, ind in arginds]))

    dsk = top(func, out, out_ind, *argindsstr, numblocks=numblocks)

    # Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
    shapes = dict((a.name, a.shape) for a, _ in arginds)
    nameinds = [(a.name, i) for a, i in arginds]
    dims = broadcast_dimensions(nameinds, shapes)
    shape = tuple(dims[i] for i in out_ind)

    blockdim_dict = dict((a.name, a.blockdims) for a, _ in arginds)
    blockdimss = broadcast_dimensions(nameinds, blockdim_dict)
    blockdims = tuple(blockdimss[i] for i in out_ind)

    dsks = [a.dask for a, _ in arginds]
    return Array(merge(dsk, *dsks), out, shape, blockdims=blockdims,
                dtype=dtype)
コード例 #13
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def compute(*args, **kwargs):
    """ Evaluate several dask arrays at once

    The result of this function is always a tuple of numpy arrays. To evaluate
    a single dask array into a numpy array, use ``myarray.compute()`` or simply
    ``np.array(myarray)``.

    Example
    -------

    >>> import dask.array as da
    >>> d = da.ones((4, 4), blockshape=(2, 2))
    >>> a = d + 1  # two different dask arrays
    >>> b = d + 2
    >>> A, B = da.compute(a, b)  # Compute both simultaneously
    """
    dsk = merge(*[arg.dask for arg in args])
    keys = [arg._keys() for arg in args]
    results = get(dsk, keys, **kwargs)

    results2 = tuple(rec_concatenate(x) if arg.shape else unpack_singleton(x)
                     for x, arg in zip(results, args))
    return results2
コード例 #14
0
ファイル: customer.py プロジェクト: libermatic/leiteng
def _get_formatted_sales_order(doc):
    return merge(
        pick(
            [
                "name",
                "transaction_date",
                "delivery_date",
                "total",
                "grand_total",
                "rounding_adjustment",
                "rounded_total",
            ],
            doc.as_dict(),
        ),
        {
            "delivery_time": doc.le_delivery_time,
            "items": [
                pick(
                    [
                        "item_code",
                        "item_name",
                        "item_group",
                        "qty",
                        "rate",
                        "amount",
                        "net_amount",
                    ],
                    x.as_dict(),
                )
                for x in doc.items
            ],
            "taxes": [
                pick(["description", "tax_amount"], x.as_dict()) for x in doc.taxes
            ],
        },
    )
コード例 #15
0
ファイル: customer.py プロジェクト: libermatic/leiteng
def create_address(token, **kwargs):
    decoded_token = get_decoded_token(token)
    customer_id = frappe.db.exists(
        "Customer", {"le_firebase_uid": decoded_token["uid"]}
    )
    if not customer_id:
        frappe.throw(frappe._("Customer does not exist on backend"))

    session_user = frappe.session.user
    settings = frappe.get_single("Leiteng Website Settings")
    if not settings.user:
        frappe.throw(frappe._("Site setup not complete"))
    frappe.set_user(settings.user)

    fields = ["address_line1", "address_line2", "city", "state", "country", "pincode"]

    args = pick(fields, kwargs,)
    doc = frappe.get_doc(
        merge({"doctype": "Address", "address_type": "Billing"}, args,)
    )
    doc.append("links", {"link_doctype": "Customer", "link_name": customer_id})
    doc.insert()
    frappe.set_user(session_user)
    return pick(["name"] + fields, doc.as_dict())
コード例 #16
0
def generate_url_map(yaml_path=TOC_PATH) -> dict:
    """
    Generates mapping from each URL to its previous and next URLs in the
    textbook. The dictionary looks like:

    {
        'ch/10/some_page.html' : {
            'prev': 'ch/09/foo.html',
            'next': 'ch/10/bar.html',
        },
        ...
    }
    """
    with open(yaml_path) as f:
        data = yaml.load(f)

    pipeline = [
        t.remove(_not_internal_link),
        flatmap(_flatten_sections),
        t.map(t.get('url')), list, _sliding_three,
        t.map(_adj_pages),
        t.merge()
    ]
    return t.pipe(data, *pipeline)
コード例 #17
0
def _set_freight_in_booking_orders():
    freight = frappe.db.sql(
        """
            SELECT
                boc.name,
                bo.creation,
                bo.modified,
                bo.modified_by,
                bo.owner,
                bo.docstatus,
                bo.name AS parent,
                'freight' AS parentfield,
                'Booking Order' AS parenttype,
                1 AS idx,
                'Packages' AS based_on,
                bo.item_description,
                bo.no_of_packages AS qty,
                boc.charge_amount / bo.no_of_packages AS rate,
                boc.charge_amount AS amount
            FROM `tabBooking Order Charge` AS boc
            LEFT JOIN `tabBooking Order` AS bo ON
                bo.name = boc.parent
            WHERE boc.charge_type = 'Freight'
        """,
        as_dict=1,
    )

    for row in freight:
        _insert_freight(
            merge(row, {
                "name":
                frappe.generate_hash("Booking Order Freight Detail", 10)
            }))

    for row in freight:
        _remove_freight_from_charges(row.get("name"))
コード例 #18
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def compute(*args, **kwargs):
    """ Evaluate several dask arrays at once

    The result of this function is always a tuple of numpy arrays. To evaluate
    a single dask array into a numpy array, use ``myarray.compute()`` or simply
    ``np.array(myarray)``.

    Example
    -------

    >>> import dask.array as da
    >>> d = da.ones((4, 4), blockshape=(2, 2))
    >>> a = d + 1  # two different dask arrays
    >>> b = d + 2
    >>> A, B = da.compute(a, b)  # Compute both simultaneously
    """
    dsk = merge(*[arg.dask for arg in args])
    keys = [arg._keys() for arg in args]
    results = get(dsk, keys, **kwargs)

    results2 = tuple(
        rec_concatenate(x) if arg.shape else unpack_singleton(x)
        for x, arg in zip(results, args))
    return results2
コード例 #19
0
ファイル: customer.py プロジェクト: libermatic/leiteng
def get_notes(token, so_name):
    decoded_token = get_decoded_token(token)
    customer_id = frappe.db.exists(
        "Customer", {"le_firebase_uid": decoded_token["uid"]}
    )
    if not customer_id:
        frappe.throw(frappe._("Customer does not exist on backend"))

    if customer_id != frappe.db.get_value("Sales Order", so_name, "customer"):
        frappe.throw(frappe._("Not allowed to view this document"))

    get_dn_fields = compose(
        keyfilter(
            lambda x: x
            in [
                "name",
                "partner",
                "partner_name",
                "scheduled_datetime",
                "posting_datetime",
                "total",
                "total_taxes_and_charges",
                "grand_total",
                "rounding_adjustment",
                "rounded_total",
                "status",
            ]
        ),
        first,
    )
    get_item_fields = compose(
        list,
        map(
            keyfilter(
                lambda x: x
                in [
                    "name",
                    "item_code",
                    "item_name",
                    "item_group",
                    "rate",
                    "amount",
                    "so_detail",
                ]
            )
        ),
        map(lambda x: merge(x, {"name": x.get("child_name")})),
    )

    get_deliveries = compose(
        lambda x: x.values(),
        valmap(lambda x: merge(get_dn_fields(x), {"items": get_item_fields(x)})),
        groupby("name"),
        lambda x: frappe.db.sql(
            """
                SELECT
                    dn.name,
                    dn.sales_partner AS partner,
                    sp.partner_name,
                    dn.le_scheduled_datetime AS scheduled_datetime,
                    TIMESTAMP(dn.posting_date, dn.posting_time) AS posting_datetime,
                    dn.total,
                    dn.total_taxes_and_charges,
                    dn.grand_total,
                    dn.rounding_adjustment,
                    dn.rounded_total,
                    dn.workflow_state AS status,
                    dni.name AS child_name,
                    dni.item_code,
                    dni.item_name,
                    dni.item_group,
                    dni.qty,
                    dni.rate,
                    dni.amount,
                    dni.so_detail
                FROM `tabDelivery Note Item` AS dni
                LEFT JOIN `tabDelivery Note` AS dn ON dn.name = dni.parent
                LEFT JOIN `tabSales Partner` AS sp ON sp.name = dn.sales_partner
                WHERE
                    dn.status < 2 AND
                    dn.workflow_state IN ('Pending', 'Completed') AND
                    dni.against_sales_order = %(against_sales_order)s
            """,
            values={"against_sales_order": x},
            as_dict=1,
        ),
    )
    return get_deliveries(so_name)
コード例 #20
0
ファイル: xz_report.py プロジェクト: sunhoww/posx
    def set_report_details(self):
        args = merge(
            keyfilter(
                lambda x: x in ["user", "pos_profile", "company"], self.as_dict()
            ),
            {
                "start_datetime": self.start_datetime or frappe.utils.now(),
                "end_datetime": self.end_datetime or frappe.utils.now(),
            },
        )

        sales, returns = _get_invoices(args)
        sales_payments, returns_payments = _get_si_payments(args)
        payin_payments, payout_payments = _get_pe_payments(args)

        def get_mop_amount(mode_of_payment=None, payments=[]):
            return compose(
                lambda x: x.get("amount"),
                excepts(StopIteration, first, lambda x: {"amount": 0}),
                filter(lambda x: x.get("mode_of_payment") == mode_of_payment),
            )(payments)

        get_sales_amount = partial(get_mop_amount, payments=sales_payments)
        get_returns_amount = partial(get_mop_amount, payments=returns_payments)
        get_payin_amount = partial(get_mop_amount, payments=payin_payments)
        get_payout_amount = partial(get_mop_amount, payments=payout_payments)

        def make_payment(mode_of_payment):
            sales_amount = get_sales_amount(mode_of_payment)
            returns_amount = get_returns_amount(mode_of_payment)
            payin_amount = get_payin_amount(mode_of_payment)
            payout_amount = get_payout_amount(mode_of_payment)
            return {
                "mode_of_payment": mode_of_payment,
                "sales_amount": sales_amount,
                "returns_amount": returns_amount,
                "payin_amount": payin_amount,
                "payout_amount": payout_amount,
                "total_amount": sales_amount
                + returns_amount
                + payin_amount
                + payout_amount,
            }

        sum_by_total = sumby("total")
        sum_by_net = sumby("net_total")
        sum_by_discount = compose(operator.neg, sumby("discount_amount"))
        sum_by_taxes = sumby("total_taxes_and_charges")
        sum_by_grand = sumby("grand_total")
        sum_by_rounded = sumby("rounded_total")

        get_cash = compose(
            sum,
            map(lambda x: x.get("amount")),
            filter(lambda x: x.get("type") == "Cash"),
        )

        self.cash_sales = get_cash(sales_payments)
        self.cash_returns = get_cash(returns_payments)
        self.cash_payins = get_cash(payin_payments)
        self.cash_payouts = get_cash(payout_payments)

        self.sales__total = sum_by_total(sales)
        self.sales__discount_amount = sum_by_discount(sales)
        self.returns__net_total = sum_by_net(returns)
        self.total__net_total = sum_by_net(sales + returns)
        self.total__total_taxes_and_charges = sum_by_taxes(sales + returns)
        self.total__grand_total = sum_by_grand(sales + returns)
        self.total__rounded_total = sum_by_rounded(sales + returns)

        make_invoice = keyfilter(
            lambda x: x
            in [
                "invoice",
                "total_taxes_and_charges",
                "rounded_total",
                "grand_total",
                "outstanding_amount",
            ]
        )
        mops = compose(unique, pluck("mode_of_payment"))

        self.sales = []
        for invoice in sales:
            self.append("sales", make_invoice(invoice))
        self.returns = []
        for invoice in returns:
            self.append("returns", make_invoice(invoice))
        self.payments = []
        for payment in mops(
            sales_payments + returns_payments + payin_payments + payout_payments
        ):
            self.append("payments", make_payment(payment))
コード例 #21
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def unique(x):
    name = next(names)
    dsk = dict(
        ((name, i), (np.unique, key)) for i, key in enumerate(x._keys()))
    parts = get(merge(dsk, x.dask), list(dsk.keys()))
    return np.unique(np.concatenate(parts))
コード例 #22
0
 def make_ag_row(row, label):
     return merge(row, {"voucher_type": label})
コード例 #23
0
ファイル: app.py プロジェクト: dmyers87/provision-account
def prepare_output(world):
    valid_cfn = get_in(['valid_cfn'], world)
    metadata = callback_metadata(properties(world))
    message_type = 'account-link-provisioned' if request_type(world) in {
        'Create', 'Update'
    } else 'account-link-deprovisioned'
    visible_cloudtrail_arns_string = null_to_none(
        get_in(['Discovery', 'VisibleCloudTrailArns'], valid_cfn))
    visible_cloudtrail_arns = visible_cloudtrail_arns_string.split(
        ',') if visible_cloudtrail_arns_string else None
    master_payer_billing_bucket_name = (
        null_to_none(
            get_in(['Discovery', 'MasterPayerBillingBucketName'], valid_cfn))
        or null_to_none(
            get_in(['MasterPayerAccount', 'ReportS3Bucket'], valid_cfn)))
    master_payer_billing_bucket_path = (
        null_to_none(
            get_in(['Discovery', 'MasterPayerBillingBucketPath'], valid_cfn))
        or null_to_none(
            get_in(['MasterPayerAccount', 'ReportS3Prefix'], valid_cfn)))
    output = {
        **default_metadata, 'message_type': message_type,
        'data': {
            'metadata': {
                'cloud_region': metadata['Region'],
                'external_id': metadata['ExternalId'],
                'cloud_account_id': metadata['AccountId'],
                'cz_account_name': metadata['AccountName'],
                'reactor_id': metadata['ReactorId'],
                'reactor_callback_url': metadata['ReactorCallbackUrl'],
            },
            'links': {
                'audit': {
                    'role_arn':
                    null_to_none(get_in(['AuditAccount', 'RoleArn'],
                                        valid_cfn))
                },
                'cloudtrail_owner': {
                    'sqs_queue_arn':
                    null_to_none(
                        get_in(['CloudTrailOwnerAccount', 'SQSQueueArn'],
                               valid_cfn)),
                    'sqs_queue_policy_name':
                    null_to_none(
                        get_in(
                            ['CloudTrailOwnerAccount', 'SQSQueuePolicyName'],
                            valid_cfn)),
                },
                'master_payer': {
                    'role_arn':
                    null_to_none(
                        get_in(['MasterPayerAccount', 'RoleArn'], valid_cfn))
                },
                'resource_owner': {
                    'role_arn':
                    null_to_none(
                        get_in(['ResourceOwnerAccount', 'RoleArn'], valid_cfn))
                },
                'legacy': {
                    'role_arn':
                    null_to_none(
                        get_in(['LegacyAccount', 'RoleArn'], valid_cfn))
                },
            },
            'discovery': {
                'audit_cloudtrail_bucket_name':
                null_to_none(
                    get_in(['Discovery', 'AuditCloudTrailBucketName'],
                           valid_cfn)),
                'audit_cloudtrail_bucket_prefix':
                null_to_none(
                    get_in(['Discovery', 'AuditCloudTrailBucketPrefix'],
                           valid_cfn)),
                'cloudtrail_sns_topic_arn':
                null_to_none(
                    get_in(['Discovery', 'CloudTrailSNSTopicArn'], valid_cfn)),
                'cloudtrail_trail_arn':
                null_to_none(
                    get_in(['Discovery', 'CloudTrailTrailArn'], valid_cfn)),
                'is_audit_account':
                string_to_bool(
                    get_in(['Discovery', 'IsAuditAccount'], valid_cfn)),
                'is_cloudtrail_owner_account':
                string_to_bool(
                    get_in(['Discovery', 'IsCloudTrailOwnerAccount'],
                           valid_cfn)),
                'is_master_payer_account':
                string_to_bool(
                    get_in(['Discovery', 'IsMasterPayerAccount'], valid_cfn)),
                'is_organization_master_account':
                string_to_bool(
                    get_in(['Discovery', 'IsOrganizationMasterAccount'],
                           valid_cfn)),
                'is_organization_trail':
                string_to_bool(
                    get_in(['Discovery', 'IsOrganizationTrail'], valid_cfn)),
                'is_resource_owner_account':
                string_to_bool(
                    get_in(['Discovery', 'IsResourceOwnerAccount'],
                           valid_cfn)),
                'master_payer_billing_bucket_name':
                master_payer_billing_bucket_name,
                'master_payer_billing_bucket_path':
                master_payer_billing_bucket_path,
                'remote_cloudtrail_bucket':
                string_to_bool(
                    get_in(['Discovery', 'RemoteCloudTrailBucket'],
                           valid_cfn)),
                'visible_cloudtrail_arns':
                visible_cloudtrail_arns,
            }
        }
    }
    return update_in(world, ['output'], lambda x: merge(x or {}, output))
コード例 #24
0
 def fun(self, dic):
     return merge(pick(['_url'], dic),
                  self.plet.parse(StringIO(dic['_body'])))
コード例 #25
0
def lookup_mac(mac):
    return _.pipe(
        requests.get(f'http://macvendors.co/api/{mac}'),
        __.maybe_json(default={},
        _.get('result', default={}),
        lambda d: {'mac':mac, 'info': d},
    )

@_.curry
def mac_conv(split_char, mac): 
    return _.pipe(
        mac.split(split_char),
        _.map(lambda b: int(b, 16)),
        _.map(hex),
        _.map(lambda h: h[2:]),
        _.map(lambda h: h.zfill(2)),
        ':'.join,
        lookup_mac,
    )
        
win_mac_conv = mac_conv('-')
macos_mac_conv = mac_conv(':')

# ----------------------------------------------------------------------
# ARP
# ----------------------------------------------------------------------

arp_output_macos = _.partial(getoutput, 'arp -a')
arp_macos_re = re.compile(
    fr'^(?P<name>[?.\w-]*)\s+\((?P<ip>{ip_re})\) at (?P<mac>.*?) on .*$'
)

arp_output_macos = _.partial(getoutput, 'arp -a')
arp_win_re = re.compile(
    fr'^\s+(?P<ip>{ip_re})\s+(?P<mac>.*?)\s+\w+\s*$'
)

def get_arp_data(arp_output, regex, mac_conv):
    return _.pipe(
        arp_output.splitlines(),
        _.map(regex.match),
        _.filter(None),
        _.map(__.call('groupdict')),
        larc.parallel.thread_map(
            lambda d: _merge(d, mac_conv(d['mac'])),
            max_workers=5,
        ),
        tuple,
    )

    def get_arp(arp_output_f, regex, mac_conv):
        def arp(*args):
            return _.pipe(
                arp_output_f(*args),
                lambda output: get_arp_data(output, regex, mac_conv),
            )
            retur arp

get_arp_macos = get_arp(arp_output_macos, arp_macos_re, macos_mac_conv)
get_arp_win = get_arp(arp_output_win, arp_win_re, win_mac_conv)

# -----------------------------------------------------------------------
# ICMP ping
# -----------------------------------------------------------------------

@_.curry
def re_map(regex, map_d, content):
    match = regex.search(content)
    if match:
        d = match.groupict()
        return _.merge(d, _.pipe(
            map_d,
            _.itemmap(__.vcall(lambda key, func: (
                key, func(d[key])
            ))),
        ))

    return {}

ping_re_macos = {
    'tick': re_map(re.compile(
        fr'\d+ bytes from (?P<ip>{ip_re}): icmp_seq=\d+' ttl=\d+'
        fr' time=(?P<ms>\d+(?:\.\d+)?) ms'
    ), {'ms': float}),
}    'totals': re_map(re.compile(
    r'(?P<sent>\d+) packets transmitted,'
    r' (?P<received>\d+) packets received,'
    r' (?P<lost>\d+(?:\.\d+))% packet loss'
), {'sent': int, 'received ': int, 'lost': float}),
'stats': re_map(re.compile(
    fr'round-trip' min/avg/max/stddev ='
    fr' (?P<min>{float_re})/'
    fr'(?P<avg>{float_re})/'
    fr'(?P<max>{float_re})/'
    fr'(?P<std>{float_re}) ms'
), {'min': float, 'avg': float, 'max': float, 'std': float}),
}
コード例 #26
0
def get_freight_summary_rows(shipping_order):
    def get_amount(row):
        rate = row.get("rate") or 0
        if row.get("based_on") == "Packages":
            return (row.get("cur_no_of_packages") or 0) * rate
        if row.get("based_on") == "Weight":
            return (row.get("cur_weight_actual") or 0) * rate
        return row.get("amount") or 0

    freight_rows = frappe.db.sql(
        """
            SELECT
                bo.name AS booking_order,
                bo.consignor_name,
                bo.consignee_name,
                bofd.item_description,
                SUM(lobo.no_of_packages) AS cur_no_of_packages,
                SUM(lobo.weight_actual) AS cur_weight_actual,
                bofd.based_on,
                bofd.rate
            FROM `tabLoading Operation Booking Order` AS lobo
            LEFT JOIN `tabLoading Operation` AS lo ON
                lo.name = lobo.parent
            LEFT JOIN `tabBooking Order` AS bo ON
                bo.name = lobo.booking_order
            LEFT JOIN `tabBooking Order Freight Detail` AS bofd ON
                bofd.name = lobo.bo_detail
            WHERE
                lo.docstatus = 1 AND
                lobo.parentfield = 'on_loads' AND
                lo.shipping_order = %(shipping_order)s
            GROUP BY lobo.name
            ORDER BY lo.name, lobo.idx
        """,
        values={"shipping_order": shipping_order},
        as_dict=1,
    )

    booking_orders = set([x.get("booking_order") for x in freight_rows])

    get_first_loaded_booking_orders = compose(
        list, map(lambda x: x.get("booking_order")), frappe.db.sql,
    )
    first_loaded_booking_orders = (
        get_first_loaded_booking_orders(
            """
                SELECT
                    lobo.booking_order,
                    lo.shipping_order
                FROM `tabLoading Operation Booking Order` AS lobo
                LEFT JOIN `tabLoading Operation` AS lo ON
                    lo.name = lobo.parent
                LEFT JOIN `tabBooking Order Charge` AS boc ON
                    boc.parent = lobo.booking_order
                WHERE
                    lo.docstatus = 1 AND
                    lobo.parentfield = 'on_loads' AND
                    lobo.booking_order IN %(booking_orders)s
                GROUP by lobo.booking_order
                HAVING lo.shipping_order = %(shipping_order)s
                ORDER BY lo.posting_datetime
            """,
            values={"booking_orders": booking_orders, "shipping_order": shipping_order},
            as_dict=1,
        )
        if booking_orders
        else []
    )

    charges_rows = (
        frappe.db.sql(
            """
                SELECT
                    bo.name AS booking_order,
                    bo.consignor_name,
                    bo.consignee_name,
                    GROUP_CONCAT(boc.charge_type SEPARATOR ', ') AS item_description,
                    0 AS cur_no_of_packages,
                    0 AS cur_weight_actual,
                    '' AS based_on,
                    0 AS rate,
                    SUM(boc.charge_amount) AS amount
                FROM `tabBooking Order` AS bo
                LEFT JOIN `tabBooking Order Charge` AS boc ON
                    boc.parent = bo.name
                WHERE
                    bo.name IN %(booking_orders)s AND
                    boc.charge_amount > 0
                GROUP BY bo.name
            """,
            values={"booking_orders": first_loaded_booking_orders},
            as_dict=1,
        )
        if first_loaded_booking_orders
        else []
    )

    return sorted(
        [merge(x, {"amount": get_amount(x)}) for x in freight_rows + charges_rows],
        key=lambda x: x.get("booking_order"),
    )
コード例 #27
0
ファイル: label_printer.py プロジェクト: sunhoww/posx
def get_item_details(item_code, batch_no=None, price_list=None):
    return merge(
        {"price": _get_price(item_code, batch_no, price_list)},
        _get_batch(batch_no),
        _get_barcode(item_code),
    )
コード例 #28
0
def shell_iter(command,
               *,
               echo: bool = True,
               echo_func: Callable[[Any], None] = cprint(file=sys.stderr,
                                                         end=''),
               timeout: int = None,
               **popen_kw):
    '''Execute a shell command, yield lines of output as they come
    possibly echoing command output to a given echo_func, and finally
    yields the status code of the process.

    This will run the shell command, yielding each line of output as
    it runs. When the process terminates, it will then yield the
    remainer of output, then finally the integer status code. It can
    also be terminated early via a timeout parameter. By default, the
    command will also echo to stderr.

    Args:

      command (str): Shell command to execute. Tilde (~) and shell
        variable completion provided

      echo (bool): Should the output be echoed to echo_func in
        addition to yielding lines of output?

      echo_func (Callable[[Any], None]): Function to use when echoing
        output. **Be warned**, this funciton is called __for each
        character__ of output. By default, this is `cprint(end='')`
        (i.e. print with end='')

      timeout (int): If set, the process will be killed after this
        many seconds (kill -9).

    Returns: generator of the form

        *output_lines, status_code = shell_iter(...)

      where output_lines is a sequence of strings of output and
      status_code is an integer status code

    Examples:

    >>> with tempfile.TemporaryDirectory() as tempdir:
    ...     root = Path(tempdir)
    ...     _ = Path(root, 'a.txt').write_text('')
    ...     _ = Path(root, 'b.txt').write_text('')
    ...     # FYI, this echos to stderr, which doctests won't capture
    ...     *lines, status = shell_iter(f'ls {root}')
    >>> lines
    ['a.txt', 'b.txt']
    >>> status
    0

    >>> with tempfile.TemporaryDirectory() as tempdir:
    ...     root = Path(tempdir)
    ...     _ = Path(root, 'c.txt').write_text('')
    ...     _ = Path(root, 'd.txt').write_text('')
    ...     *lines, _ = shell_iter(f'ls {root}', echo=False)
    >>> lines
    ['c.txt', 'd.txt']

    >>> *lines, status = shell_iter(
    ...     f'sleep 5', echo=False, timeout=0.01
    ... )
    >>> lines
    []
    >>> status
    -9

    '''
    popen_kw = merge({
        'stdout': subprocess.PIPE,
        'stderr': subprocess.PIPE,
    }, popen_kw)

    command_split = pipe(
        shlex.split(command),
        map(os.path.expanduser),
        map(os.path.expandvars),
        tuple,
    )

    process = subprocess.Popen(command_split, **popen_kw)

    timer = None
    if timeout:
        timer = start_timeout(command_split, process, timeout)

    def process_running():
        return process.poll() is None

    line = ''
    while process_running():
        char = process.stdout.read(1).decode('utf-8', errors='ignore')
        if char:
            echo_func(char) if echo else ''
            if char == '\n':
                yield line
                line = ''
            else:
                line += char

    if timer:
        timer.cancel()

    rest = process.stdout.read().decode('utf-8', errors='ignore')
    for char in rest:
        echo_func(char) if echo else ''
        if char == '\n':
            yield line
            line = ''
        else:
            line += char

    if line:
        echo_func(char) if echo else ''
        yield line

    yield process.poll()
コード例 #29
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def stack(seq, axis=0):
    """
    Stack arrays along a new axis

    Given a sequence of dask Arrays form a new dask Array by stacking them
    along a new dimension (axis=0 by default)

    Example
    -------

    Create slices

    >>> import dask.array as da
    >>> import numpy as np

    >>> data = [from_array(np.ones((4, 4)), blockshape=(2, 2))
    ...          for i in range(3)]

    >>> x = da.stack(data, axis=0)
    >>> x.shape
    (3, 4, 4)

    >>> da.stack(data, axis=1).shape
    (4, 3, 4)

    >>> da.stack(data, axis=-1).shape
    (4, 4, 3)

    Result is a new dask Array

    See Also:
        concatenate
    """
    n = len(seq)
    ndim = len(seq[0].shape)
    if axis < 0:
        axis = ndim + axis + 1
    if axis > ndim:
        raise ValueError("Axis must not be greater than number of dimensions"
                         "\nData has %d dimensions, but got axis=%d" %
                         (ndim, axis))

    assert len(set(a.blockdims for a in seq)) == 1  # same blockshape
    shape = seq[0].shape[:axis] + (len(seq), ) + seq[0].shape[axis:]
    blockdims = (seq[0].blockdims[:axis] + ((1, ) * n, ) +
                 seq[0].blockdims[axis:])

    name = next(stacked_names)
    keys = list(product([name], *[range(len(bd)) for bd in blockdims]))

    names = [a.name for a in seq]
    inputs = [(names[key[axis + 1]], ) + key[1:axis + 1] + key[axis + 2:]
              for key in keys]
    values = [(getitem, inp, (slice(None, None, None), ) * axis + (None, ) +
               (slice(None, None, None), ) * (ndim - axis)) for inp in inputs]

    dsk = dict(zip(keys, values))
    dsk2 = merge(dsk, *[a.dask for a in seq])

    if all(a._dtype is not None for a in seq):
        dt = reduce(np.promote_types, [a._dtype for a in seq])
    else:
        dt = None

    return Array(dsk2, name, shape, blockdims=blockdims, dtype=dt)
コード例 #30
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def stack(seq, axis=0):
    """
    Stack arrays along a new axis

    Given a sequence of dask Arrays form a new dask Array by stacking them
    along a new dimension (axis=0 by default)

    Example
    -------

    Create slices

    >>> import dask.array as da
    >>> import numpy as np

    >>> data = [from_array(np.ones((4, 4)), blockshape=(2, 2))
    ...          for i in range(3)]

    >>> x = da.stack(data, axis=0)
    >>> x.shape
    (3, 4, 4)

    >>> da.stack(data, axis=1).shape
    (4, 3, 4)

    >>> da.stack(data, axis=-1).shape
    (4, 4, 3)

    Result is a new dask Array

    See Also:
        concatenate
    """
    n = len(seq)
    ndim = len(seq[0].shape)
    if axis < 0:
        axis = ndim + axis + 1
    if axis > ndim:
        raise ValueError("Axis must not be greater than number of dimensions"
                "\nData has %d dimensions, but got axis=%d" % (ndim, axis))

    assert len(set(a.blockdims for a in seq)) == 1  # same blockshape
    shape = seq[0].shape[:axis] + (len(seq),) + seq[0].shape[axis:]
    blockdims = (  seq[0].blockdims[:axis]
                + ((1,) * n,)
                + seq[0].blockdims[axis:])

    name = next(stacked_names)
    keys = list(product([name], *[range(len(bd)) for bd in blockdims]))

    names = [a.name for a in seq]
    inputs = [(names[key[axis+1]],) + key[1:axis + 1] + key[axis + 2:]
                for key in keys]
    values = [(getitem, inp, (slice(None, None, None),) * axis
                           + (None,)
                           + (slice(None, None, None),) * (ndim - axis))
                for inp in inputs]

    dsk = dict(zip(keys, values))
    dsk2 = merge(dsk, *[a.dask for a in seq])

    if all(a._dtype is not None for a in seq):
        dt = reduce(np.promote_types, [a._dtype for a in seq])
    else:
        dt = None

    return Array(dsk2, name, shape, blockdims=blockdims, dtype=dt)
コード例 #31
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def unique(x):
    name = next(names)
    dsk = dict(((name, i), (np.unique, key)) for i, key in enumerate(x._keys()))
    parts = get(merge(dsk, x.dask), list(dsk.keys()))
    return np.unique(np.concatenate(parts))
コード例 #32
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def concatenate(seq, axis=0):
    """
    Concatenate arrays along an existing axis

    Given a sequence of dask Arrays form a new dask Array by stacking them
    along an existing dimension (axis=0 by default)

    Example
    -------

    Create slices

    >>> import dask.array as da
    >>> import numpy as np

    >>> data = [from_array(np.ones((4, 4)), blockshape=(2, 2))
    ...          for i in range(3)]

    >>> x = da.concatenate(data, axis=0)
    >>> x.shape
    (12, 4)

    >>> da.concatenate(data, axis=1).shape
    (4, 12)

    Result is a new dask Array

    See Also:
        stack
    """
    n = len(seq)
    ndim = len(seq[0].shape)
    if axis < 0:
        axis = ndim + axis
    if axis >= ndim:
        raise ValueError("Axis must be less than than number of dimensions"
                "\nData has %d dimensions, but got axis=%d" % (ndim, axis))

    bds = [a.blockdims for a in seq]

    if not all(len(set(bds[i][j] for i in range(n))) == 1
            for j in range(len(bds[0])) if j != axis):
        raise ValueError("Block shapes do not align")

    shape = (seq[0].shape[:axis]
            + (sum(a.shape[axis] for a in seq),)
            + seq[0].shape[axis + 1:])
    blockdims = (  seq[0].blockdims[:axis]
                + (sum([bd[axis] for bd in bds], ()),)
                + seq[0].blockdims[axis + 1:])

    name = next(concatenate_names)
    keys = list(product([name], *[range(len(bd)) for bd in blockdims]))

    cum_dims = [0] + list(accumulate(add, [len(a.blockdims[axis]) for a in seq]))
    names = [a.name for a in seq]
    values = [(names[bisect(cum_dims, key[axis + 1]) - 1],)
                + key[1:axis + 1]
                + (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis+1]) - 1],)
                + key[axis + 2:]
                for key in keys]

    dsk = dict(zip(keys, values))
    dsk2 = merge(dsk, *[a.dask for a in seq])

    if all(a._dtype is not None for a in seq):
        dt = reduce(np.promote_types, [a._dtype for a in seq])
    else:
        dt = None

    return Array(dsk2, name, shape, blockdims=blockdims, dtype=dt)
from . import lib as lib
from . import subcomponent as subcomponent
from .popularity_contest import popularity_contest
from .split_paths import split_paths

from .lib import (
    # references_graph_to_igraph
    debug,
    pick_attrs)

funcs = tlz.merge(
    pick_attrs([
        "flatten", "over", "split_every", "limit_layers", "remove_paths",
        "reverse"
    ], lib), pick_attrs([
        "subcomponent_in",
        "subcomponent_out",
    ], subcomponent), {
        "split_paths": split_paths,
        "popularity_contest": popularity_contest,
        "map": tlz.map
    })


@curry
def nth_or_none(index, xs):
    try:
        return xs[index]
    except IndexError:
        return None

コード例 #34
0
def _extend_data(filters, data):
    account_currency = frappe.db.get_value("Account", filters.account,
                                           "account_currency")

    def make_row(row, reverse):
        return merge(
            row,
            {
                "payment_document": "GL Payment",
                get_direction(row.payment_type, reverse): row.total_amount,
                get_direction(row.payment_type, reverse=(not reverse)): 0,
                "account_currency": account_currency,
            },
        )

    gl_payments = [
        make_row(x, reverse=False) for x in frappe.db.sql(
            """
                SELECT
                    name AS payment_entry,
                    reference_no,
                    reference_date AS ref_date,
                    payment_type,
                    total_amount,
                    posting_date,
                    IFNULL(party, (
                        SELECT GROUP_CONCAT(gpi.account SEPARATOR ', ')
                        FROM `tabGL Payment Item` AS gpi WHERE gpi.parent = gp.name
                    )) AS against_account,
                    clearance_date
                FROM `tabGL Payment` AS gp
                WHERE
                    payment_account = %(account)s AND
                    docstatus = 1 AND
                    posting_date <= %(report_date)s AND
                    IFNULL(clearance_date, '4000-01-01') > %(report_date)s
            """,
            values=filters,
            as_dict=1,
        )
    ]
    gl_payment_items = [
        make_row(x, reverse=True) for x in frappe.db.sql(
            """
                SELECT
                    gp.name AS payment_entry,
                    gp.reference_no,
                    gp.reference_date AS ref_date,
                    gp.payment_type,
                    (gpi.net_amount + gpi.tax_amount) AS total_amount,
                    gp.posting_date,
                    IFNULL(gp.party, gp.payment_account) AS against_account,
                    gp.clearance_date
                FROM `tabGL Payment Item` AS gpi
                LEFT JOIN `tabGL Payment` AS gp ON gp.name = gpi.parent
                WHERE
                    gpi.account = %(account)s AND
                    gp.docstatus = 1 AND
                    gp.posting_date <= %(report_date)s AND
                    IFNULL(gp.clearance_date, '4000-01-01') > %(report_date)s
            """,
            values=filters,
            as_dict=1,
        )
    ]

    items = data[:-6]
    summary = data[-6:]
    balance_per_gl = summary[0]
    outstanding = summary[2]
    incorrect = summary[3]
    balance_calculated = summary[5]

    total_debit = sum([x.get("debit", 0) for x in gl_payments]) + sum(
        [x.get("debit", 0) for x in gl_payment_items])
    total_credit = sum([x.get("credit", 0) for x in gl_payments]) + sum(
        [x.get("credit", 0) for x in gl_payment_items])

    amounts_not_reflected_in_system = _get_invalid_gl_payments(filters)

    return sorted(
        items + gl_payments + gl_payment_items,
        key=lambda k: k["posting_date"] or frappe.utils.getdate(frappe.utils.
                                                                nowdate()),
    ) + [
        balance_per_gl,
        {},
        merge(
            outstanding,
            {
                "debit": outstanding.get("debit") + total_debit,
                "credit": outstanding.get("credit") + total_credit,
            },
        ),
        get_balance_row(
            incorrect.get("payment_entry"),
            incorrect.get("debit") - incorrect.get("credit") +
            amounts_not_reflected_in_system,
            incorrect.get("account_currency"),
        ),
        {},
        get_balance_row(
            balance_calculated.get("payment_entry"),
            balance_calculated.get("debit") - balance_calculated.get("credit")
            - total_debit + total_credit + amounts_not_reflected_in_system,
            balance_calculated.get("account_currency"),
        ),
    ]
コード例 #35
0
ファイル: common.py プロジェクト: lowlandresearch/larc
 def __init__(self, graph, selection, *function_spaces):
     self.graph = graph
     self.selection = selection
     self.function_space = _.merge(*function_spaces)
コード例 #36
0
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('-d', '--debug', action='store_true', default=False, help='Display debug messages')
    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Increase output verbosity')
    global args
    args = parser.parse_args()
    logging.basicConfig(
        level=logging.DEBUG if args.debug else (logging.INFO if args.verbose else logging.WARNING),
        stream=sys.stdout,
        )

    if not os.path.isdir(json_dir_path):
        os.mkdir(json_dir_path)
    if not os.path.isdir(ast_dir_path):
        os.mkdir(ast_dir_path)

    # Load variables definitions

    tgvh_infos = list(load_tgvH_file())

    # Write constants

    constant_by_name = pipe(
         tgvh_infos,
         filter(lambda val: val['type'] == 'variable_const'),
         map(lambda d: (d['name'], d['value'])),
         dict,
         )
    write_json_file(data=constant_by_name, file_name='constants.json')

    # Write variables dependencies

    regles_nodes = list(mapcat(load_regles_nodes, iter_json_file_names('chap-*.json', 'res-ser*.json')))
    dependencies_by_formula_name = dict(list(mapcat(dependencies_visitors.visit_node, regles_nodes)))
    write_json_file(data=dependencies_by_formula_name, file_name='formulas_dependencies.json')

    # Write variables definitions

    ast_infos_by_variable_name = {}
    for regle_node in regles_nodes:
        regle_infos = {
            'regle_applications': regle_node['applications'],
            'regle_linecol': regle_node['linecol'],
            'regle_name': regle_node['name'],
            'source_file_name': regle_node['source_file_name'],
            }
        regle_tags = list(pluck('value', regle_node.get('tags', [])))
        if regle_tags:
            regle_infos['regle_tags'] = regle_tags
        for formula_node in regle_node['formulas']:
            if formula_node['type'] == 'formula':
                ast_infos_by_variable_name[formula_node['name']] = assoc(
                    regle_infos, 'formula_linecol', formula_node['linecol'])
            elif formula_node['type'] == 'pour_formula':
                for unlooped_formula_node in unloop_helpers.iter_unlooped_nodes(
                        loop_variables_nodes=formula_node['loop_variables'],
                        node=formula_node['formula'],
                        unloop_keys=['name'],
                        ):
                    pour_formula_infos = merge(regle_infos, {
                        'pour_formula_linecol': formula_node['formula']['linecol'],
                        'pour_formula_name': formula_node['formula']['name'],
                        })
                    ast_infos_by_variable_name[unlooped_formula_node['name']] = pour_formula_infos
            else:
                assert False, 'Unhandled formula_node type: {}'.format(formula_node)

    def rename_key(d, key_name, key_new_name):
        return assoc(dissoc(d, key_name), key_new_name, d[key_name])

    tgvh_infos_by_variable_name = pipe(
        tgvh_infos,
        filter(lambda d: d['type'] in ('variable_calculee', 'variable_saisie')),
        map(lambda d: rename_key(d, 'linecol', 'tgvh_linecol')),
        map(lambda d: (d['name'], d)),  # Index by name
        dict,
        )

    definition_by_variable_name = merge_with(merge, ast_infos_by_variable_name, tgvh_infos_by_variable_name)

    write_json_file(data=definition_by_variable_name, file_name='variables_definitions.json')

    return 0
コード例 #37
0
mem = Memory(cachedir='cache', verbose=0)

# import tsutils.stan_utility_betan as stan_utility
# from sub_utils import eda_utes as eu; reload(eu)
# import tsutils.mutes as mt; from tsutils import lrn_utes as lu; reload(lu)
import myutils as mu
reload(mu)
import pandas_utils as pu
import pandas_utils3 as p3
from faster_pandas import MyPandas as fp
ap = mu.dmap

import dask.dataframe as dd
import dask
dd.DataFrame.q = lambda self, q, local_dict={}, **kw: self.query(
    q, local_dict=z.merge(local_dict, kw))

import altair as A
from altair import Chart, expr as E, datum as D

vc = z.compose(Series, Counter)

from plotnine import ggplot, qplot, aes, theme, ggtitle, xlab, ylab
import plotnine as p9


def mk_geom(p9, pref='geom_'):
    geoms = [c for c in dir(p9) if c.startswith(pref)]
    geom = lambda: None
    geom.__dict__.update(
        {name[len(pref):]: getattr(p9, name)
コード例 #38
0
def parse_tables(lines):
    table_lines = pipe(
        enumerate(lines),
        vmap(lambda i, l: (i, TABLE_RE.match(l))),
        vfilter(lambda i, m: m),
        tuple,
    )

    # log.debug(table_lines)

    ntables = 0

    for start_index, match in table_lines:
        table = {'start_index': start_index}
        log.debug(f'start index: {start_index}')
        attr = attr_dict(match.groupdict()['attrib'])
        table['caption'] = attr.pop('caption', '')
        table['attr'] = attr

        for i, line in enumerate(lines[start_index + 1:], start_index + 1):
            th_match = TH_RE.match(line)
            tr_match = TR_RE.match(line)
            if th_match:
                match_dict = th_match.groupdict()
                table['header'] = {
                    'line': i,
                    'tr': pipe(
                        th_match.groupdict()['th'].split('|'),
                        map(lambda s: s.strip()),
                        map(lambda s: (ATTR_RE.search(s),
                                       ATTR_RE.sub('', s))),
                        vmap(lambda m, s:
                             (merge(
                                 {'scope': 'col'},
                                 attr_dict(m.groupdict()['a']) if m else {},
                             ), s)),
                        vmap(lambda attr, td: {'td': td, 'attr': attr}),
                        tuple,
                    ),
                    'attr': pipe(
                        ['ra1', 'ra2'],
                        map(lambda k: match_dict.get(k, '')),
                        filter(None),
                        map(attr_dict),
                        merge,
                    ),
                }

            elif tr_match:
                match_dict = tr_match.groupdict()
                row = {
                    'line': i,
                    'tr': pipe(
                        match_dict['tr'].split('|'),
                        map(lambda s: s.strip()),
                        map(lambda s: (ATTR_RE.search(s),
                                       ATTR_RE.sub('', s))),
                        vmap(lambda m, s: (
                            attr_dict(m.groupdict()['a']) if m else {}, s
                        )),
                        vmap(lambda attr, td: {'attr': attr, 'td': td}),
                        tuple,
                    ),
                    'attr': pipe(
                        ['ra1', 'ra2'],
                        map(lambda k: match_dict.get(k, '')),
                        filter(None),
                        map(attr_dict),
                        merge,
                    )
                }

                table.setdefault('rows', []).append(row)

            else:
                # This is the case where we have gotten to a non-table
                # line of content, thus we are done with the table
                table['end_index'] = i
                log.debug(f'table end: {i}')
                ntables += 1
                yield table
                break
        
    if ntables < len(table_lines):
        # This is the case where the last line of content is a table
        # line
        log.debug('finishing table')
        table['end_index'] = i + 1
        yield table
コード例 #39
0
ファイル: customer.py プロジェクト: libermatic/leiteng
def list_orders(token, page="1", page_length="10", status=None):
    decoded_token = get_decoded_token(token)
    customer_id = frappe.db.exists(
        "Customer", {"le_firebase_uid": decoded_token["uid"]}
    )
    if not customer_id:
        frappe.throw(frappe._("Customer does not exist on backend"))

    get_conditions = compose(lambda x: " AND ".join(x), filter(None))
    conditions = get_conditions(
        [
            "docstatus = 1",
            "customer = %(customer)s",
            "status IN %(statuses)s" if status else None,
        ]
    )

    statuses = json.loads(status) if status else None

    get_count = compose(
        lambda x: x[0][0],
        lambda x: frappe.db.sql(
            """
                SELECT COUNT(name) FROM `tabSales Order`
                WHERE {conditions}
            """.format(
                conditions=conditions
            ),
            values={"customer": x, "statuses": statuses},
        ),
    )

    orders = frappe.db.sql(
        """
            SELECT
                name, transaction_date, status,
                total, total_taxes_and_charges, grand_total, rounding_adjustment, rounded_total
            FROM `tabSales Order`
            WHERE {conditions}
            ORDER BY transaction_date DESC, creation DESC
            LIMIT %(start)s, %(page_length)s
        """.format(
            conditions=conditions
        ),
        values={
            "customer": customer_id,
            "statuses": statuses,
            "start": (frappe.utils.cint(page) - 1) * frappe.utils.cint(page_length),
            "page_length": frappe.utils.cint(page_length),
        },
        as_dict=1,
    )
    items = (
        groupby(
            "parent",
            frappe.db.sql(
                """
                    SELECT parent, name, item_code, item_name, item_group, qty, rate, amount
                    FROM `tabSales Order Item`
                    WHERE parent IN %(parents)s
                """,
                values={"parents": [x.get("name") for x in orders]},
                as_dict=1,
            ),
        )
        if orders
        else {}
    )

    return {
        "count": get_count(customer_id),
        "items": [merge(x, {"items": items.get(x.get("name"), [])}) for x in orders],
    }
コード例 #40
0
def discover_connected_account(world):
    output = {
        'IsResourceOwnerAccount': True,
    }
    return update_in(world, ['output'], lambda x: merge(x or {}, output))
コード例 #41
0
ファイル: core.py プロジェクト: kastnerkyle/dask
def concatenate(seq, axis=0):
    """
    Concatenate arrays along an existing axis

    Given a sequence of dask Arrays form a new dask Array by stacking them
    along an existing dimension (axis=0 by default)

    Example
    -------

    Create slices

    >>> import dask.array as da
    >>> import numpy as np

    >>> data = [from_array(np.ones((4, 4)), blockshape=(2, 2))
    ...          for i in range(3)]

    >>> x = da.concatenate(data, axis=0)
    >>> x.shape
    (12, 4)

    >>> da.concatenate(data, axis=1).shape
    (4, 12)

    Result is a new dask Array

    See Also:
        stack
    """
    n = len(seq)
    ndim = len(seq[0].shape)
    if axis < 0:
        axis = ndim + axis
    if axis >= ndim:
        raise ValueError("Axis must be less than than number of dimensions"
                         "\nData has %d dimensions, but got axis=%d" %
                         (ndim, axis))

    bds = [a.blockdims for a in seq]

    if not all(
            len(set(bds[i][j] for i in range(n))) == 1
            for j in range(len(bds[0])) if j != axis):
        raise ValueError("Block shapes do not align")

    shape = (seq[0].shape[:axis] + (sum(a.shape[axis] for a in seq), ) +
             seq[0].shape[axis + 1:])
    blockdims = (seq[0].blockdims[:axis] + (sum([bd[axis] for bd in bds],
                                                ()), ) +
                 seq[0].blockdims[axis + 1:])

    name = next(concatenate_names)
    keys = list(product([name], *[range(len(bd)) for bd in blockdims]))

    cum_dims = [0] + list(
        accumulate(add, [len(a.blockdims[axis]) for a in seq]))
    names = [a.name for a in seq]
    values = [
        (names[bisect(cum_dims, key[axis + 1]) - 1], ) + key[1:axis + 1] +
        (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1], ) +
        key[axis + 2:] for key in keys
    ]

    dsk = dict(zip(keys, values))
    dsk2 = merge(dsk, *[a.dask for a in seq])

    if all(a._dtype is not None for a in seq):
        dt = reduce(np.promote_types, [a._dtype for a in seq])
    else:
        dt = None

    return Array(dsk2, name, shape, blockdims=blockdims, dtype=dt)
コード例 #42
0
ファイル: rest.py プロジェクト: lowlandresearch/larc
 def __call__(self, *parts, **kw):
     return Endpoint(self, parts, **merge(self.method_kw, kw))
コード例 #43
0
ファイル: item.py プロジェクト: libermatic/leiteng
def get_items(page="1",
              field_filters=None,
              attribute_filters=None,
              search=None):
    other_fieldnames = ["item_group", "thumbnail", "has_variants"]
    price_list = frappe.db.get_single_value("Shopping Cart Settings",
                                            "price_list")
    products_per_page = frappe.db.get_single_value("Products Settings",
                                                   "products_per_page")
    get_item_groups = compose(
        list,
        unique,
        map(lambda x: x.get("name")),
        concat,
        map(lambda x: get_child_nodes("Item Group", x)
            if x and frappe.db.exists("Item Group", x, cache=True) else []),
    )
    get_other_fields = compose(
        valmap(excepts(StopIteration, first, lambda _: {})),
        groupby("name"),
        lambda item_codes: frappe.db.sql(
            """
                SELECT name, {other_fieldnames}
                FROM `tabItem`
                WHERE name IN %(item_codes)s
            """.format(other_fieldnames=", ".join(other_fieldnames)),
            values={"item_codes": item_codes},
            as_dict=1,
        ),
        lambda items: [x.get("name") for x in items],
    )

    get_page_count = compose(
        lambda x: frappe.utils.ceil(x[0][0] / products_per_page),
        lambda x: frappe.db.sql(
            """
                SELECT COUNT(name) FROM `tabItem` WHERE
                    show_in_website = 1 AND
                    item_group IN %(item_groups)s
            """,
            values={"item_groups": x},
        ),
    )

    field_dict = (frappe.parse_json(field_filters) if isinstance(
        field_filters, str) else field_filters) or {}
    item_groups = (get_item_groups(field_dict.get("item_group"))
                   if field_dict.get("item_group") else None)

    frappe.form_dict.start = (frappe.utils.cint(page) - 1) * products_per_page
    items = get_products_for_website(
        field_filters=merge(
            field_dict, {"item_group": item_groups} if item_groups else {}),
        attribute_filters=frappe.parse_json(attribute_filters),
        search=search,
    )
    other_fields = get_other_fields(items) if items else {}
    item_prices = _get_item_prices(price_list, items) if items else {}

    get_rates = _rate_getter(price_list, item_prices)

    return {
        "page_count":
        get_page_count(item_groups) if item_groups else 0,
        "items": [
            merge(
                x,
                {
                    "route":
                    transform_route(x),
                    "description":
                    frappe.utils.strip_html_tags(x.get("description") or ""),
                },
                get_rates(x.get("name")),
                {
                    k: other_fields.get(x.get("name"), {}).get(k)
                    for k in other_fieldnames
                },
            ) for x in items
        ],
    }
コード例 #44
0
ファイル: booking_order.py プロジェクト: libermatic/gg_custom
 def set_qty(row):
     qty = get_qty(row)
     return merge(row, {"qty": qty, "available": qty})
コード例 #45
0
ファイル: assignment.py プロジェクト: dogwynn/coursework
def sync_assignment_from_path(course: IdResourceEndpoint, course_root: str,
                              path: Union[str, Path]):
    @curry
    def do_log(logger, msg):
        return logger('[sync_assignment_from_path] ' + msg)

    log_info = do_log(log.info)
    log_error = do_log(log.error)

    html = templates.common.render_markdown_path(course, course_root, path)

    assign_data = html.meta.copy()
    name = assign_data.get('name')

    if not name:
        log_error(f'The Canvas assignment at {path} does not have a '
                  '"name:" specified.')
        return False

    group_ep = maybe_pipe(
        assign_data.pop('assignment_group', None),
        find_assignment_group(course),
    )
    # rubric = assign_data.pop('rubric', None)
    # if rubric:
    #     if 'use_rubric_for_grading' not in assign_data:
    #         assign_data['use_rubric_for_grading'] = True

    assignment = find_assignment(course, name)
    if not assignment:
        # Need to create new
        log_info(f'Creating new assignment: "{name}" from {path}')
        assignment = new_assignment(
            course,
            merge(assign_data, {'description': str(html)}),
        )

    assign_md = get_metadata(assignment)

    path = resolve_path(course_root, path)
    content_hash = hash_from_content(path.read_text())
    extant_hash = assign_md.get('hash')

    if not content_hash == extant_hash:
        if group_ep:
            log_info(
                f"Found assignment group for {name}: {group_ep.data['name']}")
            assign_data['assignment_group_id'] = group_ep.data['id']

        # log_info(f'Hashes different:'
        #          f' {content_hash} {extant_hash}')
        log_info(f'Updating assignment "{name}" at {path}:\n'
                 f'{pprint.pformat(assign_data)}')
        assign_md['hash'] = content_hash
        set_metadata(assignment, assign_md)

        return update_assignment(
            assignment,
            merge(assign_data, {'description': str(html)}),
            # do_refresh=False,
        )
    return assignment
コード例 #46
0
def test_merge():
    assert merge(factory=lambda: defaultdict(int))({1: 1}) == {1: 1}
    assert merge({1: 1}) == {1: 1}
    assert merge({1: 1}, factory=lambda: defaultdict(int)) == {1: 1}
コード例 #47
0
def signature_to_dict(sig):
    return merge(sig._asdict(), {'python': dict(sig.python._asdict())})