示例#1
0
def load(
    refresh,
    bq_read,
    sqlfn="../fis/data/hist_data_proto.sql",
    dest="/tmp/hists.pq",
):
    if refresh:
        sql = read(sqlfn)
        df_ = bq_read(sql)
        hist_cols = get_hist_cols_raw_dl(df_)
        h_kw = {
            h: lambda df, h=h: df[h].map(arr_of_str2dict)
            for h in hist_cols
        }
        df_ = df_.assign(**h_kw)
        # hist_cols = get_hist_cols(df)
        hist_cols_asn = {
            c: lambda x, c=c: x[c].map(z.keymap(str))
            for c in hist_cols
        }
        ds = df_.assign(**hist_cols_asn)
        ds.to_parquet(dest)

    ds = pd.read_parquet(dest)
    # print(ds.cycle_collector)
    # return ds

    fn = z.compose(typed_dict, z.keymap(int),
                   z.valfilter(lambda x: x is not None))
    hist_cols = get_dict_hist_cols(ds)
    hist_cols_asn = {c: lambda df, c=c: df[c].map(fn) for c in hist_cols}
    #     return ds
    df = ds.assign(**hist_cols_asn)
    return df
示例#2
0
 def _switch_models():
     # Model switching (section 3.4)
     print "Switching model params..."
     g_params = tz.keymap(lambda n: ".".join(n.split(".")[1:]),
                          dict(G.enc.named_parameters()))
     d_params = tz.keymap(lambda n: ".".join(n.split(".")[1:]),
                          dict(D.named_parameters()))
     for name, param in g_params.iteritems():
         d_params[name].data.copy_(param.data)
     print "Model params switched!"
示例#3
0
def vega2to3(data):
    """Transform a Vega data list from version 2 to 3.

    Args:
      data: vega data list

    Returns:
      update vega data list
    """
    def keymapping(key):
        """Map vega data  keys from version 2 to 3

        The mapping is `test` -> `expr` and `field` -> `as` otherwise
        the input key is just returned.

        Args:
          key: the key to map

        Returns:
          a new key
        """
        return dict(test='expr', field='as').get(key, key)

    update_transform = fcompose(map(keymap(keymapping)), list)
    return pipe(
        data,
        map(update_in(keys=['transform'], func=update_transform, default=[])),
        list)
示例#4
0
 def get_values(_type):
     fields = list(map(lambda x: "{}_{}".format(_type, x), params))
     _get = compose(
         valmap(lambda x: x or 0),
         keymap(lambda x: x.replace("{}_".format(_type), "")),
         keyfilter(lambda x: x in fields),
     )
     return _get(data)
示例#5
0
def annodize(name="__main__", annotations=None):
    globals, locals = map(vars, map(importlib.import_module, [name] * 2))
    __annotations__ = globals.get("__annotations__", {})
    annotations = toolz.keymap(Forward, (annotations or {}))
    annotations = toolz.keymap(lambda x: typing._eval_type(x, globals, locals),
                               annotations)
    for key, value in __annotations__.items():
        if getattr(value, "__forward_coerce__", False) is True:
            value.__forward_coerce__ = key
        if value in annotations:
            new = copy.copy(annotations[value])

            if getattr(new, "__forward_coerce__", False) is True:
                new.__forward_coerce__ = key
            __annotations__[key] = typing.Union[value, new]

        typing._eval_type(__annotations__[key], globals, locals)
示例#6
0
def graph_descriptors_np(data, delta_x=1.0, periodic_boundary=True):
    """Numpy only version of graph_descriptors function

    Args:
      data (array): array of phases ``(n_samples, n_x, n_y)``, values must be 0 or 1
      delta_x (float): pixel size
      periodic_boundary (bool): whether the boundaries are periodic

    Returns:
      A Pandas data frame with samples along rows and descriptors
      along columns

    """
    # pylint: enable=line-too-long
    columns = keymap(
        lambda x: x.encode("UTF-8"),
        dict(
            STAT_n="n_vertices",
            STAT_e="n_edges",
            STAT_n_D="n_phase0",
            STAT_n_A="n_phase1",
            STAT_CC_D="n_phase0_connect",
            STAT_CC_A="n_phase1_connect",
            STAT_CC_D_An="n_phase0_connect_top",
            STAT_CC_A_Ca="n_phase1_connect_bottom",
            ABS_wf_D="w_frac_phase0",
            ABS_f_D="frac_phase0",
            DISS_wf10_D="w_frac_phase0_10_dist",
            DISS_f10_D="fraction_phase0_10_dist",
            DISS_f2_D="fraction_phase0_2_dist",
            CT_f_conn_D="frac_useful",
            CT_f_e_conn="inter_frac_bottom_and_top",
            CT_f_conn_D_An="frac_phase0_top",
            CT_f_conn_A_Ca="frac_phase1_bottom",
            CT_e_conn="n_inter_paths",
            CT_e_D_An="n_phase0_inter_top",
            CT_e_A_Ca="n_phase1_inter_bottom",
            CT_f_D_tort1="frac_phase0_rising",
            CT_f_A_tort1="frac_phase1_rising",
            CT_n_D_adj_An="n_phase0_connect_anode",
            CT_n_A_adj_Ca="n_phase1_connect_cathode",
        ),
    )

    return pipe(
        data,
        fmap(
            graph_descriptors_sample(delta_x=delta_x,
                                     periodic_boundary=periodic_boundary)),
        list,
        # debug('check fields in graspi'),
        pandas.DataFrame,
        lambda x: x.rename(columns=columns),
        lambda x: x.apply(lambda x: np.rint(x).astype(int)
                          if x.name[:2] == "n_" else x),
    )
示例#7
0
 def to_ascii_table(self):
     table_data = tuple(
         map(
             flatten(1),
             sorted(
                 valmap(', '.join, keymap(flatten(float('inf')),
                                          self.dict)).items())))
     header = [
         [x if isinstance(x, string_types) else '' for x in self.levels] + [
             '',
         ],
     ]
     table = AsciiTable(header + list(map(list, table_data)),
                        'Collection: %s' % self.name)
     return table
示例#8
0
文件: graph.py 项目: 0xnurl/gamla
                           get_neighbors(current))

    return map(
        toolz.first,
        graph_traverse(source=(source, 0),
                       get_neighbors=get_neighbors_limiting_radius),
    )


edges_to_graph = toolz.compose(
    curried.valmap(toolz.compose(frozenset, curried.map(toolz.second))),
    curried.groupby(toolz.first),
)

graph_to_edges = toolz.compose_left(
    curried.keymap(lambda x: (x, )),
    dict.items,
    curried.mapcat(functional.star(itertools.product)),
)

reverse_graph = toolz.compose_left(
    graph_to_edges, curried.map(toolz.compose_left(reversed, tuple)),
    edges_to_graph)

cliques_to_graph = toolz.compose_left(
    curried.mapcat(lambda clique: itertools.permutations(clique, r=2)),
    edges_to_graph)


def get_connectivity_components(graph: Dict) -> Iterable[FrozenSet]:
    """Graph is assumed to undirected, so each edge must appear both ways."""
示例#9
0
def sym_inverse_transform_label_encoder(estimator):
    mapping = keymap(
        as_value,
        dict(enumerate(map(compose(as_value, np_to_py), estimator.classes_))))
    arg = StringVariable('x')
    return Function.from_expression(FiniteMap(mapping=mapping, arg=arg))
示例#10
0
from datetime import datetime
from tables import db, DelWordTable
from toolz.curried import map, filter, pipe, groupby, keymap

if __name__ == "__main__":
    if len(sys.argv) != 2:
        print("USAGE: python3 add_del_words.py words.txt")
        sys.exit(1)

    _, words_path = sys.argv

    exist_wordphones = pipe(DelWordTable.select(), map(lambda e: e.word), set)

    with open(words_path, "r", encoding='utf8') as fin:
        to_add_words = pipe(
            fin,
            map(lambda e: e.strip()),
            filter(lambda e: e != ''),
            filter(lambda e: e not in exist_wordphones),
            groupby(lambda e: e),
            keymap(lambda e: DelWordTable(word=e, updatedt=datetime.now())),
        )

        with db.atomic():
            DelWordTable.bulk_create(to_add_words, batch_size=100)

        # for w in to_add_words:
        #     print(f"add {w}")
        #     # w.save()

    print('done')
示例#11
0
     [
         ("Alice", "NYC"),
         ("Alice", "Chicago"),
         ("Dan", "Syndey"),
         ("Edith", "Paris"),
         ("Edith", "Berlin"),
         ("Zhao", "Shanghai"),
     ],
 ),
 "count_by": (curried.countby(lambda x: x % 2 == 0), range(20)),
 "groupby": (
     chained(curried.groupby(lambda x: x % 2 == 0), curried.valmap(sorted)),
     range(20),
 ),
 "keymap": (
     chained(dict, curried.keymap(lambda x: 2 * x)),
     dict.items({
         1: 2,
         3: 4,
         5: 6,
         7: 8,
         9: 10
     }),
 ),
 "valmap": (
     chained(dict, curried.valmap(lambda x: 2 * x)),
     dict.items({
         1: 2,
         3: 4,
         5: 6,
         7: 8,