Beispiel #1
0
def _extend_data(filters, data, inv_idx, emp_idx):
    invoices = [x[inv_idx] for x in data]
    get_employee_map = compose(
        valmap(lambda x:
               [x.get("pb_sales_employee"),
                x.get("pb_sales_employee_name")]),
        valmap(first),
        groupby("name"),
        lambda: frappe.db.sql(
            """
            SELECT name, pb_sales_employee, pb_sales_employee_name FROM `tabSales Invoice`
            WHERE name IN %(invoices)s
        """,
            values={"invoices": invoices},
            as_dict=1,
        ),
    )
    employees = get_employee_map() if invoices else {}
    set_employee = compose(list, lambda x: concatv(x, employees[x[inv_idx]]))
    extended = [set_employee(x) for x in data]

    if not filters.sales_employee:
        return extended

    return [x for x in extended if x[emp_idx] == filters.sales_employee]
def _get_species(patients):
    species = compose(
        valmap(lambda x: x['vc_species']), valmap(first), groupby('name'),
        lambda: frappe.get_all('Patient',
                               filters=[['name', 'in', patients]],
                               fields=['name', 'vc_species']))
    return species()
Beispiel #3
0
def filter_data(field, yaml_data):
    """Extract a field of data from the YAML files.

    Args:
      field: the name of the field to extract
      yaml_data: the benchmark YAML data

    Returns:
      the filtered data from the YAML data
    """
    return pipe(
        yaml_data,
        dict,
        valmap(lambda val: val["data"]),
        valmap(filter(lambda item: item["name"].lower() == field)),
        valmap(list),
        valmap(get(0, default=None)),
        valfilter(lambda x: x is not None),
        itemmap(lambda item: (item[0], update_dict(item[1], name=item[0]))),
        lambda dict_: sorted(list(dict_.values()), key=lambda item: item["name"]),
        map(
            update_in(
                keys=["transform"],
                func=lambda x: x + [dict(expr="datum.x > 0.01", type="filter")],
            )
        ),
    )
Beispiel #4
0
def filter_memory_data(yaml_data):
    """Filter the memory time data from the meta.yaml's

    Args:
      yaml_data: the benchmark YAML data

    Returns:
      memory versus time data
    """
    def time_ratio(data):
        """Calcuate the sim_time over wall_time ration
        """
        def not0(value):
            """Set to 1e-10 if 0
            """
            if value == 0:
                return 1e-10
            return value

        return pipe(
            data[-1],
            juxt(lambda x: x.get('sim_time', x.get('time')),
                 lambda x: x.get('wall_time', x.get('time'))),
            lambda x: float(x[0]) / not0(float(x[1])))

    def memory_usage(data):
        """Calculate the memory usage in KB
        """
        unit_map = dict(GB=1048576., KB=1., MB=1024., B=1. / 1024.)
        if isinstance(data, dict):
            data_ = data
        else:
            data_ = data[-1]
        key = next(k for k in data_.keys() if 'value' in k)
        return float(data_[key]) * unit_map[data_.get('unit', 'KB')]

    def make_datum(data):
        """Build an item in the data list for one simulation
        """
        return dict(
            name='efficiency',
            values=[
                dict(time_ratio=time_ratio(data['run_time']),
                     memory_usage=memory_usage(data['memory_usage']))
            ],
        )

    return pipe(
        yaml_data, dict, valmap(lambda x: x['data']),
        valmap(
            filter(lambda item: item['name'].lower() in
                   ('memory_usage', 'run_time'))),
        valmap(map(lambda x: (x['name'], x['values']))), valmap(dict),
        valmap(make_datum),
        itemmap(lambda item: (item[0], update_dict(item[1], name=item[0]))),
        lambda dict_: sorted(list(dict_.values()),
                             key=lambda item: item['name']))
def get_end_to_end_metrics_table(y_true, results_for_methods_optimized,
                                 results_for_methods_default):
    metrics_for_methods_optimized = valmap(
        lambda r: compute_classification_metrics_from_results_with_statistics(
            y_true, r), results_for_methods_optimized)

    metrics_for_methods_default = valmap(
        lambda r: compute_classification_metrics_from_results_with_statistics(
            y_true, r), results_for_methods_default)

    return metrics_for_methods_optimized, metrics_for_methods_default
Beispiel #6
0
def _get_balance_qty(items, filters):
    clauses = _get_clauses(filters)
    return compose(
        valmap(lambda x: x['qty']), valmap(first), groupby('item_code'),
        lambda: frappe.db.sql("""
				SELECT
					item_code,
					SUM(actual_qty) as qty
				FROM `tabBin`
				WHERE {clauses}
				GROUP BY item_code
			""".format(clauses=clauses),
                              values=merge(filters, {'items': items}),
                              as_dict=1))()
Beispiel #7
0
def get_data(filter_func):
    """Read in the YAML data and group by benchmark id

    Args:
      filter_func: function to filter data

    Returns:
      a dictionary with benchmark ids as keys and lists of appropriate
      data for values
    """
    return pipe(
        get_yaml_data(),
        groupby(lambda item: "{0}.{1}".format(
            item[1]['benchmark']['id'], str(item[1]['benchmark']['version']))),
        valmap(filter_func), valmap(vega2to3))
 def transform(self, X, exposure=None):
     data = valmap(growd(2), valfilter(notnone, dict(X=X,
                                                     exposure=exposure)))
     return np.concatenate(tuple(
         map(compose(growd(2), methodcaller('predict', **data)),
             self.estimators)),
                           axis=1)
Beispiel #9
0
def ccds_to_bed(ccds_stream):
  """Convert CCDS dump to Chanjo-style BED stream.

  Main entry point for default Chanjo converter (ccds). It converts
  a sorted (start, chrom) CCDS database to the Chanjo BED-format.

  Args:
    ccds_stream (file): file handle to read CCDS lines from

  Yields:
    Interval: interval with merged block and superblock ids
  """
  return pipe(
    ccds_stream,
    filter(grep('Public')),                    # filter out Public tx
    map(text_type.rstrip),                     # strip \n and spaces
    map(split(sep='\t')),                      # split into list
    map(extract_intervals),                    # convert to Interval
    concat,                                    # flatten
    map(rename_sex_interval),                  # rename sex contigs
    partial(lazy_groupby, key=attrgetter('contig')),  # group by contig
    pluck(1),                                  # extract second item
    map(groupby(attrgetter('name'))),          # non-lazy group by id
    map(valmap(merge_related_elements)),       # group intervals
    map(itervalues),                           # extract values
    map(partial(sorted, key=attrgetter('start'))),  # sort by start pos
    concat                                     # flatten
  )
def _append_summary(data):
	def make_data(val):
		clients = compose(
			count,
			unique,
			pluck('customer'),
			lambda: val
		)
		animals = compose(
			valmap(count),
			groupby('species'),
			lambda: val
		)
		return {
			'total_val': reduce(lambda total, x: total + x.get('total_vat'), val, 0.00),
			'animals': _get_dict_to_csv(animals()),
			'clients': clients()
		}
	sales_persons = compose(
		valmap(make_data),
		groupby('sales_person_name'),
		lambda: data
	)()

	data.append({'invoice_no': "'-'"})  # for report html (break loop)
	for k, v in sales_persons.items():
		sales_person = k or 'Not specified'
		data.append({'invoice_no': "'Sales Person'", 'item': f"'{sales_person}'"})
		data.append({'invoice_no': "'Total Amt'", 'item': f"'{fmt_money(v.get('total_val'))}'"})
		data.append({'invoice_no': "'Clients'", 'item': f"'{v.get('clients')}'"})
		data.append({'invoice_no': "'Animals'", 'item': f"'{v.get('animals')}'"})
		data.append({})

	return data
Beispiel #11
0
def get_batch_no(doctype, txt, searchfield, start, page_len, filters):
    from erpnext.controllers.queries import get_batch_no

    result = get_batch_no(doctype, txt, searchfield, start, page_len, filters)

    get_batch_prices = compose(
        valmap(lambda x: x[0].get("px_price_list_rate")),
        groupby("name"),
        lambda batches: frappe.db.sql(
            """
                SELECT name, px_price_list_rate FROM `tabBatch`
                WHERE name IN %(batches)s
            """,
            values={"batches": [x[0] for x in batches]},
            as_dict=1,
        )
        if batches
        else {},
    )

    batch_prices = get_batch_prices(result)

    def set_price(batch):
        price = batch_prices.get(batch[0])
        if not price:
            return batch
        return batch + tuple(["PRICE-{}".format(price)])

    return [set_price(x) for x in result]
Beispiel #12
0
def _(dictionary):
    try:
        parameters = pipeline(
            dictionary,
            [
                keyfilter(lambda k: k != '__type'),
                valmap(json_deserialize_types),
            ],
        )
        Class = get_member_by_path(dictionary['__type'])
        instance = make_instance(Class)
        for key, value in parameters.items():
            setattr(instance, key, json_deserialize_types(value))
        return instance
    except KeyError:
        return valmap(json_deserialize_types, dictionary)
def filter_data(yaml_data):
    """Extract the free_energy data from the YAML files.

    Args:
      yaml_data: the benchmark YAML data

    Returns:
      the free_energy data from the YAML data
    """
    return pipe(
        yaml_data, dict, valmap(lambda val: val['data']),
        valmap(filter(lambda item: item['name'].lower() == 'free_energy')),
        valmap(list), valmap(get(0)),
        itemmap(lambda item: (item[0], update_dict(item[1], name=item[0]))),
        lambda dict_: sorted(list(dict_.values()),
                             key=lambda item: item['name']))
Beispiel #14
0
def get_char_to_xhe_shapes() -> Dict[str, List[str]]:
    char_to_shape = pipe(CharHeShapeTable.select(),
                         map(lambda e: (e.char, e.shapes)),
                         filter(lambda e: e[0] != '' and e[1] != ''),
                         groupby(lambda e: e[0]),
                         valmap(lambda e: [s[1] for s in e]), dict)
    return char_to_shape
Beispiel #15
0
def get_freight_rates():
    price_list = frappe.get_cached_value(
        "Selling Settings",
        None,
        "selling_price_list",
    )

    def get_rate(item):
        args = {"price_list": price_list, "uom": item.get("uom")}
        rate = get_item_price(args, item.get("item_code"), ignore_party=True)
        if rate:
            return rate[0][1]

        return 0

    get_freight_items = compose(
        valmap(first),
        groupby("based_on"),
        map(lambda x: merge(x, {"rate": get_rate(x)})),
        frappe.db.sql,
    )

    return get_freight_items(
        """
            SELECT name AS item_code, stock_uom AS uom, gg_freight_based_on AS based_on
            FROM `tabItem`
            WHERE gg_freight_based_on IN ('Packages', 'Weight')
        """,
        as_dict=1,
    )
 def test_returns_lookp(self):
     self.assertDictEqual(
         # "references" in the result are iterators so we need
         # to convert them to a list before asserting.
         tlz.valmap(
             over("references", list),
             make_lookup([{
                 "path":
                 "/nix/store/foo",
                 "references":
                 ["/nix/store/foo", "/nix/store/bar", "/nix/store/hello"]
             }, {
                 "path": "/nix/store/bar",
                 "references": ["/nix/store/bar", "/nix/store/tux"]
             }, {
                 "path": "/nix/store/hello",
                 "references": []
             }])),
         {
             "/nix/store/foo": {
                 "path": "/nix/store/foo",
                 "references": ["/nix/store/bar", "/nix/store/hello"]
             },
             "/nix/store/bar": {
                 "path": "/nix/store/bar",
                 "references": ["/nix/store/tux"]
             },
             "/nix/store/hello": {
                 "path": "/nix/store/hello",
                 "references": []
             }
         })
Beispiel #17
0
def print_earnings():
    registry = Registry()
    registry.load_strategies()

    earnings = []
    for vault in registry.vaults:
        for strategy in vault.strategies:
            contract = strategy.strategy
            strategist = getattr(contract, "strategist", None)

            if strategist:
                config = vault.vault.strategies(contract).dict()
                if config.get("performanceFee"):
                    earnings.append({"strategist": strategist(), **config})

    if earnings:
        print("....All earnings:")
        pprint(earnings)
        data = valmap(
            compose(lambda fg: sum(gain / fee for fee, gain in fg),
                    pluck(["performanceFee", "totalGain"])),
            groupby("strategist", earnings))
        print("....Earnings grouped by strategist")
        pprint(data)
    else:
        print("No earnings found")
Beispiel #18
0
def get_char_to_lu_phones() -> Dict[str, List[str]]:
    char_to_phones = pipe(CharPhoneTable.select(),
                          map(lambda e: (e.char, e.lu)),
                          filter(lambda e: e[0] != '' and e[1] != ''),
                          groupby(lambda e: e[0]),
                          valmap(lambda phones: [e[1] for e in phones]), dict)
    return char_to_phones
Beispiel #19
0
def filter_data(field, yaml_data):
    """Extract a field of data from the YAML files.

    Args:
      field: the name of the field to extract
      yaml_data: the benchmark YAML data

    Returns:
      the filtered data from the YAML data
    """
    return pipe(
        yaml_data, dict, valmap(lambda val: val['data']),
        valmap(filter(lambda item: item['name'].lower() == field)),
        valmap(list), valmap(get(0)),
        itemmap(lambda item: (item[0], update_dict(item[1], name=item[0]))),
        lambda dict_: sorted(list(dict_.values()),
                             key=lambda item: item['name']))
Beispiel #20
0
def tom_jacobian_to_jacobian(panes):
    jac = defaultdict(dict)

    def flip_axes_and_squeeze(x):
        return x[::-1, ::-1].squeeze()

    q_scale = Lc / 1000
    t_scale = cp

    jac["q"]["q"] = panes["q"]["q"]
    jac["q"]["s"] = panes["q"]["T"] / (q_scale / t_scale)
    jac["s"]["q"] = panes["T"]["q"] / (t_scale / q_scale)
    jac["s"]["s"] = panes["T"]["T"]

    jac = valmap(valmap(flip_axes_and_squeeze), jac)

    return jac
Beispiel #21
0
 def get_values(_type):
     fields = list(map(lambda x: "{}_{}".format(_type, x), params))
     _get = compose(
         valmap(lambda x: x or 0),
         keymap(lambda x: x.replace("{}_".format(_type), "")),
         keyfilter(lambda x: x in fields),
     )
     return _get(data)
Beispiel #22
0
def tom_base_state_to_base_state(tom_base_state):
    tom_base_state = valmap(compose(np.copy, np.flip), tom_base_state)
    q, T, z, rho = get(["qv", "T", "z", "rho"], tom_base_state)
    return {
        "QT": q * 1000.0,
        "SLI": T + grav / cp * z,
        "height_center": z,
        "density": rho
    }
Beispiel #23
0
def _compute_scaler_data_from_xarray(dataset):
    logger.info("Computing mean")
    mean = dataset.mean(['x', 'y', 'time'])
    logger.info("Computing std")
    scale = dataset.std(['x', 'y', 'time'])

    args = [valmap(torch.squeeze, _convert_dataset_to_torch_dict(arg))
            for arg in [mean, scale]]
    return args
Beispiel #24
0
def groupby_count(func):
    """Group the simulation data based on a function.

    Args:
      func: function to group by

    Returns:
      grouped data
    """
    return pipe(get_yaml_data(), map(get(1)), groupby(func), valmap(count))
Beispiel #25
0
    def from_file_path(cls,
                       file_path: FilePath,
                       sheet_name: str,
                       *,
                       row_limit: int = 100):
        """Help function to populate the columns of a sheet."""
        wb = get_wb(file_path)
        ws = wb[sheet_name]
        rows = tz.take(row_limit, ws.rows)
        header = next(rows)
        names = [c.value for c in header]
        letters = [c.column_letter for c in header]
        indices = [c.column for c in header]
        data_types = tz.pipe(
            rows
            # For each row, create a dict usng names has keys
            ,
            tz.map(lambda row: dict(zip(names, row)))
            # Get the .xlsx data_type for each cell
            ,
            tz.map(tz.valmap(lambda cell: cell.data_type))
            # Combine cells into a list per column
            ,
            tz.merge_with(list)
            # Count the cells for each data type in the column
            ,
            tz.valmap(tz.frequencies)
            # Consolidate types
            ,
            tz.valmap(lambda freq: (
                # If at least 1 "d"
                "date" if "d" in freq else
                # If at least 1 "s"
                "text" if "s" in freq else
                # If at least 1 "n"
                "number" if "n" in freq else str(freq))),
            lambda d: [v for k, v in d.items()])

        cols = [
            Col(name=N, letter=L, index=I, data_type=D)
            for N, L, I, D in zip(names, letters, indices, data_types)
        ]
        return cls(name=sheet_name, cols=cols)
Beispiel #26
0
def extract_ctr_children_sum(doc: Tabs_doc) -> Dict[Tab_num, Disp_dur]:
    default = {'dispatchCount': np.nan, 'duration': np.nan}

    def reduce_children(chs: List[Disp_dur]):
        # TODO: extract host info?
        disp_durs = lmap(extract_dur_disp_tab, chs)
        return reduce_keys(op.add, disp_durs) or default

    tab_docs = z.valmap(z.compose(reduce_children, itg('children')), doc)
    return tab_docs
 def make_data(val):
     clients = compose(count, unique, pluck('customer'), lambda: val)
     animals = compose(valmap(count), groupby('species'), lambda: val)
     return {
         'total_val':
         reduce(lambda total, x: total + x.get('total_vat'), val, 0.00),
         'animals':
         _get_dict_to_csv(animals()),
         'clients':
         clients()
     }
Beispiel #28
0
def update_dict(funcs, values):
    """Apply a dict of funcs to a dict of values

    >>> values = dict(a=1., b=2., c=4.)
    >>> funcs = dict(
    ...     a=lambda a, b, c: a + b + c,
    ...     b=lambda a, b, c: a * b * c,
    ...     c=lambda a, b, c: a - b - c
    ... )
    >>> assert update_dict(funcs, values) == dict(a=7, b=8, c=-5)
    """
    return valmap(lambda f: f(**values), funcs)
Beispiel #29
0
 def _prepare_kwargs(self, **kwargs):
     """Filter keywords with the function arguments.  Call
     any value that is callable, no arguments are applied
     to these function.
     """
     return valmap(
         self._call_lazy_function, 
         merge(
             keyfilter(partial(operator.contains, self.arguments), self.keywords),
             kwargs,
         )
     )
Beispiel #30
0
 def from_tom_base_state(tom_base_state):
     """Return WaveEq from Tom's base state"""
     # TODO refactor this to an abstract factor (A)
     tom_base_state = valmap(compose(np.copy, np.flip), tom_base_state)
     q, T, z, rho = get(["qv", "T", "z", "rho"], tom_base_state)
     base_state = {
         "QT": q * 1000.0,
         "SLI": T + grav / cp * z,
         "height_center": z,
         "density": rho
     }
     return WaveEq(base_state)
def save_as_html(distinct_words, file_name):
    """Generate and save an html display of the distinct words"""
    ## Wrangle data for presentation
    # Convert tokens into a single string
    def get_token_string(given_values):
        """Return a token string, if the given values are a list of dictionaries"""
        # check if it is a list of token-related information
        if (isinstance(given_values, list) and 
            len(given_values) > 0 and
            isinstance(given_values[0], dict)):
            return tz.pipe(
                given_values,
                tz.map(lambda x: x['token']),
                tz.map(wrap_in_highlight_link), # wrap in link to highlight words
                tz.reduce(lambda x,y: u"{}, {}".format(x, y)))
        # return empty string for empty lists
        elif isinstance(given_values, list) and len(given_values) == 0:
            return ''
        # check if it is a date range in need of formating
        elif isinstance(given_values, list) and len(given_values) == 2:
            return format_date_range(given_values)
        else:
            return given_values
    def format_date_range(given_date_range):
        """Return a pretty version of the given date_range"""
        date_range = map(
            lambda x: dt.datetime.strptime(x, "%Y-%m-%dT%H:%M:%SZ"),
            given_date_range)
        return "{} to {} UTC".format(
            date_range[0].strftime("%Y-%m-%d %H:%M"),
            date_range[1].strftime("%H:%M"))
    def wrap_in_highlight_link(given_string):
        """return the given string wrapped in the html code to highlight 
        other occurances of that same word"""
        return u"""<a href="javascript:void($('.distinct_words').removeHighlight().highlight('{string}'));">{string}</a>""".format(string=given_string)
    formated_distinct_words = tz.pipe(
        distinct_words,
        tz.map(
            tz.valmap(get_token_string)),
        list)

    ## Send to Template For Display
    template_dir = 'templates'
    loader = jinja2.FileSystemLoader(template_dir)
    environment = jinja2.Environment(loader=loader)
    template = environment.get_template('distinct_words.html')
    with open(file_name, 'w') as f:
        tz.pipe(
            template.render(distinct_words = formated_distinct_words),
            lambda x: x.encode('utf8'),
            lambda x: f.write(x))
Beispiel #32
0
def broadcast_dimensions(argpairs, numblocks, sentinels=(1, (1,))):
    """ Find block dimensions from arguments

    Parameters
    ----------

    argpairs: iterable
        name, ijk index pairs
    numblocks: dict
        maps {name: number of blocks}
    sentinels: iterable (optional)
        values for singleton dimensions

    Examples
    --------

    >>> argpairs = [('x', 'ij'), ('y', 'ji')]
    >>> numblocks = {'x': (2, 3), 'y': (3, 2)}
    >>> broadcast_dimensions(argpairs, numblocks)
    {'i': 2, 'j': 3}

    Supports numpy broadcasting rules

    >>> argpairs = [('x', 'ij'), ('y', 'ij')]
    >>> numblocks = {'x': (2, 1), 'y': (1, 3)}
    >>> broadcast_dimensions(argpairs, numblocks)
    {'i': 2, 'j': 3}

    Works in other contexts too

    >>> argpairs = [('x', 'ij'), ('y', 'ij')]
    >>> d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
    >>> broadcast_dimensions(argpairs, d)
    {'i': 'Hello', 'j': (2, 3)}
    """
    # List like [('i', 2), ('j', 1), ('i', 1), ('j', 2)]
    L = concat([zip(inds, dims)
                    for (x, inds), (x, dims)
                    in join(first, argpairs, first, numblocks.items())])
    g = groupby(0, L)
    g = dict((k, set([d for i, d in v])) for k, v in g.items())

    g2 = dict((k, v - set(sentinels) if len(v) > 1 else v) for k, v in g.items())

    if g2 and not set(map(len, g2.values())) == set([1]):
        raise ValueError("Shapes do not align %s" % g)

    return valmap(first, g2)
Beispiel #33
0
def vega_contours(treant, counter=0):
    """
    Get the contours as Vega data.

    Args:
      treant: a Treant object with data files

    Returns:
      contours formatted as Vega data
    """
    return pipe(
        treant,
        all_files('*.nc'),
        map(contours_from_datafile),
        concat,
        map(pandas.DataFrame),
        map(lambda x: x.rename(columns={0: 'x', 1: 'y'})),
        map(lambda x: x.to_dict(orient='records')),
        map(map(valmap(float))),
        map(list),
        enum(lambda i, x: dict(name='contour_data{0}_{1}'.format(i, counter),
                               values=x)),
    )