Пример #1
0
def dict2pdfa(mapping, start: PA.State):
    """Convert nested dictionary into a PDFA.

    - mapping is a nested dictionary of the form:

       mapping = {
         <State>:  (<Label>, {
            <Action>: {
                <State>: <Probability>
            }
         }
       }
    """
    label_map = fn.walk_values(ig(0), mapping)
    transition_map = fn.walk_values(ig(1), mapping)

    outputs = set(bind(mapping).Values()[0].collect())
    inputs = set(bind(mapping).Values()[1].Keys().collect())

    return PA.pdfa(
        start=start,
        label=label_map.get,
        inputs=inputs,
        outputs=outputs,
        **_encode_two_player_game(transition_map)
    )
Пример #2
0
    def visit_aag(self, _, children):
        header, ios1, lgs1, ios2, lgs2, symbols, comments = children
        ios, lgs = ios1 + ios2, lgs1 + lgs2
        assert len(ios) == header.num_inputs + header.num_outputs
        inputs, outputs = ios[:header.num_inputs], ios[header.num_inputs:]
        assert len(lgs) == header.num_ands + header.num_latches

        latches, gates = lgs[:header.num_latches], lgs[header.num_latches:]

        # TODO: need to allow for inputs, outputs, latches not in
        # symbol table.
        inputs = {
            symbols.inputs.inv.get(idx, f'i{idx}'): i
            for idx, i in enumerate(inputs)
        }
        outputs = {
            symbols.outputs.inv.get(idx, f'o{idx}'): i
            for idx, i in enumerate(outputs)
        }

        latches = {
            symbols.latches.inv.get(idx, f'l{idx}'): tuple(i)
            for idx, i in enumerate(latches)
        }
        latches = fn.walk_values(lambda l: (l + (0, ))[:3], latches)

        if len(comments) > 0:
            assert comments[0].startswith('c\n')
            comments[0] = comments[0][2:]
        return AAG(inputs=inputs,
                   outputs=outputs,
                   latches=fn.walk_values(tuple, latches),
                   gates=fn.lmap(tuple, gates),
                   comments=tuple(comments))
Пример #3
0
def mutex_coins(name2prob, input_name=None, keep_seperate=False):
    """Mutually exclusive coins.

    Encoded using the common denominator method.
    """
    name2prob = fn.walk_values(utils.to_frac, name2prob)
    assert sum(name2prob.values()) == 1

    bots = [p.denominator for p in name2prob.values()]
    lcm = reduce(utils.lcm, bots, 1)
    word_len = max(math.ceil(math.log2(lcm)), 1)
    max_val = 2**word_len

    name2weight = fn.walk_values(
        lambda p: p.numerator * (lcm // p.denominator), name2prob)

    bits = atom(word_len, input_name, signed=False)
    const_true = ~(bits @ 0)
    total, coins = 0, []
    for name, weight in name2weight.items():
        lb = const_true if total == 0 else (bits >= total)
        total += weight
        ub = const_true if total == max_val else (bits < total)
        expr = (lb & ub)
        output = dict(expr.aigbv.output_map)[expr.output][0]
        coins.append(expr.aig['o', {output: name}])

    is_valid = const_true if lcm == max_val else bits < lcm

    if keep_seperate:
        return coins, is_valid
    return reduce(lambda x, y: x | y, coins), is_valid
Пример #4
0
    def get_balances(self):
        available = {
            'STEEM': Amount(self['balance']).amount,
            'SBD': Amount(self['sbd_balance']).amount,
            'VESTS': Amount(self['vesting_shares']).amount,
        }

        savings = {
            'STEEM': Amount(self['savings_balance']).amount,
            'SBD': Amount(self['savings_sbd_balance']).amount,
        }

        rewards = {
            'STEEM': Amount(self['reward_steem_balance']).amount,
            'SBD': Amount(self['reward_sbd_balance']).amount,
            'VESTS': Amount(self['reward_vesting_balance']).amount,
        }

        totals = {
            'STEEM':
            sum([available['STEEM'], savings['STEEM'], rewards['STEEM']]),
            'SBD': sum([available['SBD'], savings['SBD'], rewards['SBD']]),
            'VESTS': sum([available['VESTS'], rewards['VESTS']]),
        }

        total = walk_values(rpartial(round, 3), totals)

        return {
            'available': available,
            'savings': savings,
            'rewards': rewards,
            'total': total,
        }
Пример #5
0
    def get_balances(self):
        available = {
            'GOLOS': Amount(self['balance']).amount,
            'GBG': Amount(self['sbd_balance']).amount,
            'GESTS': Amount(self['vesting_shares']).amount,
        }

        savings = {
            'GOLOS': Amount(self['savings_balance']).amount,
            'GBG': Amount(self['savings_sbd_balance']).amount,
        }

        totals = {
            'GOLOS': sum([available['GOLOS'], savings['GOLOS']]),
            'GBG': sum([available['GBG'], savings['GBG']]),
            'GESTS': sum([available['GESTS']]),
        }

        total = walk_values(rpartial(round, 3), totals)

        return {
            'available': available,
            'savings': savings,
            'total': total,
        }
Пример #6
0
def simplify_fragment(obj):
    """ Simplify and flatten individual fragment."""

    # clean up the mess
    def replace_values(val):
        if type(val) == dict:
            return walk_values(replace_values, val)
        if val == "?" or val == 'None':
            return 0
        return val

    obj = walk_values(replace_values, obj)

    result = None
    with suppress(Exception):
        result = {
            'symbol': obj['symbol'],
            'category': obj['category'],
            'supply': obj['availableSupply'],
            'change_7d': round(float(obj['change7d']), 2),
            'change_1d': round(float(obj['change24h']), 2),
            'change_1h': round(float(obj['change1h']), 2),
            'position': int(obj['position']),
            'cap_usd': round(float(obj['marketCap']['usd'])),
            'cap_btc': round(float(obj['marketCap']['btc'])),
            'volume_btc': round(float(obj['volume24']['btc'])),
            'price_usd': float(obj['price']['usd']),
            'price_btc': float(obj['price']['btc']),
            'timestamp': dt.datetime.fromtimestamp(obj['timestamp'])
        }
    return result
Пример #7
0
    def evaluate(
        self,
        trace: Trace,
        condition: Optional[Node] = None,
        *,
        dt=1.0,
        time: Any = False,
        quantitative=False,
        logic: _ConnectivesDef = default
    ) -> Mapping[Node, Optional[bool]]:
        """Evaluate the truth values of the monitor conditions on the specified trace."""
        evaluated_conditions: Iterable[Node] = (
            self.conditions if condition is None else {condition}
        )

        results: MutableMapping[Node, Optional[bool]] = dict()
        for phi in evaluated_conditions:
            signals = {
                k.id: v for k, v in trace.project(self.atoms(phi), logic).items()
            }
            # FIXME A default value is required by mtl even if no atoms required (TOP/BOT)
            signals[None] = [(0, logic.const_false)]
            if all(a.id in signals for a in self.atoms(phi)):
                r = phi(signals, dt=dt, time=time, logic=logic)
                if not quantitative:
                    if time is None:
                        r = funcy.walk_values(lambda v: v >= logic.const_true, r)
                    else:
                        r = r >= logic.const_true
                results[phi] = r
            else:
                results[phi] = None
        return results if condition is None else funcy.first(results.values())
Пример #8
0
def preprocess(tiles: Dict[int, np.ndarray]) -> Dict[int, HashTile]:
    def _create_hash_tile(tile: np.ndarray) -> HashTile:
        # just store the hashed borders and hash the plugs of each tile
        edges_ = lmap(hashed, edges(tile))
        plugs_ = hashed(plugs(tile))
        return HashTile(tile=tile, edges=edges_, plugs=plugs_, neighbours=[])

    def _compute_neighbours(no: int, hash_tiles: Dict[int, HashTile]) -> None:
        # the hash tile numbers, except this one
        other_nos = [i for i in hash_tiles.keys() if i != no]

        # identify how many matching tiles each edge has in this tile
        for e1, e2 in hash_tiles[no].edges:
            neighbours = []

            # check if this edge matches up with any of the other tile plugs
            for other in other_nos:
                other_plugs = hash_tiles[other].plugs
                if e1 in other_plugs or e2 in other_plugs:
                    neighbours.append(other)

            # fortunately, each edge has always only at most one matching tile pair
            assert len(neighbours) <= 1
            candidate = neighbours[0] if len(neighbours) > 0 else None

            # the number of matching tiles for this border index
            hash_tiles[no].neighbours.append(candidate)

    # first create the hash tiles, then, compute the edge match list
    result = walk_values(_create_hash_tile, tiles)
    for key in result.keys():
        _compute_neighbours(key, result)

    return result
Пример #9
0
 def extract_map(name_map, names):
     lookup_root = fn.merge(*({v: k
                               for v in vals} for k, vals in name_map))
     mapping = fn.group_by(lambda x: lookup_root[x.split('##time_')[0]],
                           names)
     mapping = fn.walk_values(tuple, mapping)  # Make hashable.
     return frozenset(mapping.items())
    def handle(self, *args, **options):
        group = group_values(
            [convert_date(attr),
             attr.get('pubmed_id', '').split('|\n|')]
            for attr in Series.objects.values_list('attrs', flat=True))

        uniq_pmids = set([])

        def count_uniq_pmids(pmids):
            uniq_pmids.update(set(flatten(pmids)))
            return len(uniq_pmids)

        pmids = dict(walk_values(count_uniq_pmids, sorted(group.items())))

        delta = CURRENT_DATE - START_DATE
        keys = sorted(
            set(
                ceil_date(START_DATE + timedelta(days=index * 20))
                for index in range(delta.days / 20 + 1)))

        for index, date in enumerate(keys):
            hc = HistoricalCounter.objects.filter(created_on=date).first()
            if not hc:
                continue
            hc.counters['PMID'] = get_value(keys, index)(pmids)
            hc.save()
Пример #11
0
    def multi(coll):
        def make_apply(el):
            return lambda f: f(el) if callable(f) else f

        if is_mapping(coll):
            return lambda el: walk_values(make_apply(el), coll)
        else:
            return lambda el: lmap(make_apply(el), coll)
Пример #12
0
def r(stats):
    """Reduce collection of results"""
    sorted_stats = reversed(sorted(walk_values(sum, stats), key=lambda s: s[1]))

    return {
        'words': len(stats),
        'top': list(sorted_stats),
    }
def get_vertical_cages(rows):
    transpose_coordinates = lambda t: (t[1], t[0])
    res = get_horizontal_cages(transpose(rows))
    res = walk_keys(transpose_coordinates, res)
    transpose_coordinates_lst = compose(list,
                                        partial(map, transpose_coordinates))
    res = walk_values(transpose_coordinates_lst, res)
    return res
Пример #14
0
def cookiecutter(*args, **kwargs) -> str:
    """Call cookiecutter.main.cookiecutter after stringifying paths

    Return:
        project directory path
    """
    args = fy.walk(_stringify_path, args)
    kwargs = fy.walk_values(_stringify_path, kwargs)
    return _cookiecutter(*args, **kwargs)
Пример #15
0
    def _call_info(self, func, args, kwargs):
        serialized_args = map(serialize, args)
        serialized_kwargs = walk_values(serialize, kwargs)

        parts = []
        parts.extend(smart_str(a) for a in args)
        parts.extend('%s=%s' % (k, smart_str(v)) for k, v in sorted(kwargs.items()))
        parts.append(hash_args(serialized_args, serialized_kwargs))
        dirname = '%s/%s' % (func.__name__, '.'.join(parts))

        return dirname, serialized_args, serialized_kwargs
Пример #16
0
def adj_list(concept_class, parallel=True):
    if parallel:
        from pathos.multiprocessing import ProcessingPool
        pool = ProcessingPool()
        mapper = pool.map
    else:
        mapper = map

    edge_generator = fn.cat(mapper(get_edges, possible_edges(concept_class)))
    edge_lists = fn.walk_values(set, fn.group_values(edge_generator))
    return defaultdict(set, edge_lists)
Пример #17
0
    def _load_call_info(self, dirname):
        path = os.path.join(self._path, dirname)
        files = os.listdir(path)

        arg_files = sorted(filter(r'^a', files))
        args = tuple(map(self._read_data, (os.path.join(path, f) for f in arg_files)))

        kwarg_files = filter(r'^k', files)
        kwarg_files = {filename[1:]: os.path.join(path, filename) for filename in kwarg_files}
        kwargs = walk_values(self._read_data, kwarg_files)

        return args, kwargs
Пример #18
0
    def export(self):
        """ This method returns a dictionary that is type-safe to store as JSON or in a database.
        """
        # Remove Steem instance object
        safe_dict = remove_from_dict(self, ['steem'])

        # Convert Amount class objects into pure dictionaries
        def decompose_amounts(item):
            if type(item) == Amount:
                return item.__dict__
            return item
        return walk_values(decompose_amounts, safe_dict)
Пример #19
0
 def setup_slug(sender, instance, **kwargs):
     data = walk_values(compose(slugify, unicode),
                        instance.__dict__)
     related = {item: data['_{0}_cache'.format(item)]
                for item in map(lambda u: u[0:-3],
                                filter(lambda u: u.endswith('_id'),
                                       data.keys()))}
     data = merge(data, related)
     slug = self.populate_from.format(
         **data)[:self.max_length]
     if slug != getattr(instance, name):
         setattr(instance, name, slug)
         instance.save()
Пример #20
0
    def export(self):
        """ This method returns a dictionary that is type-safe to store as JSON or in a database.
        """
        self.refresh()

        # Remove Steem instance object
        safe_dict = remove_from_dict(self, ['steem'])

        # Convert Amount class objects into pure dictionaries
        def decompose_amounts(item):
            if type(item) == Amount:
                return item.__dict__
            return item
        return walk_values(decompose_amounts, safe_dict)
Пример #21
0
async def get_task(request):
    app = request.app

    raw_task_id = request.match_info['task_id']
    if not _valid_task_id(raw_task_id):
        return json_response({'errors': ['Parameter task_id is not valid']})

    task_id = ObjectId(raw_task_id)

    task = await store.get_task(app['db'], task_id)
    if task is None:
        return json_response(
            {'errors': ['Task with provided id is not found']})

    return json_response(walk_values(str, task))
Пример #22
0
def compute_greedy_policy(
    values: ValueFunction,
    policy: Policy,
    rewards: Rewards,
    transition_probabilities: TransitionProbabilities,
    grid_size: int,
    terminal_states: Set[GridState],
    gamma: float,
) -> Policy:
    """Compute a greedy policy wrt a given value function & current policy."""
    updated_policy: Policy = defaultdict(dict)
    for state in get_states(grid_size):
        # don't touch terminal states
        if state in terminal_states:
            updated_policy[state] = {
                action: 0.0
                for action in policy[state].keys()
            }
            continue

        action_to_action_value = {
            action: calculate_action_value(
                state,
                values,
                action,
                rewards,
                transition_probabilities,
                grid_size,
                gamma,
            )
            for action in policy[state].keys()
        }

        max_action_value = max(action_to_action_value.values())

        # if we have multiple actions that result in max value, set probability of 1 / n argmax actions
        updated_policy_for_state = {
            action: 1.0 if action_value == max_action_value else 0.0
            for action, action_value in action_to_action_value.items()
        }
        n_argmax_actions = sum(updated_policy_for_state.values())
        updated_policy_for_state = funcy.walk_values(
            lambda is_action_argmax: is_action_argmax / n_argmax_actions,
            updated_policy_for_state,
        )
        updated_policy[state] = updated_policy_for_state

    return updated_policy
Пример #23
0
 def values(self, *fields, **expressions):
     """
     Extended version supporting renames:
         .values('id', 'name', author__name='author')
     """
     renames = select_values(isa(six.string_types), expressions)
     if not renames:
         return base.values(self, *fields, **expressions)
     elif django.VERSION >= (1, 11):
         rename_expressions = walk_values(F, renames)
         expressions.update(rename_expressions)
         return base.values(self, *fields, **expressions)
     else:
         f_to_name = flip(renames)
         rename = lambda d: {f_to_name.get(k, k): v for k, v in d.items()}
         return base.values(self, *chain(fields, f_to_name)).map(rename)
Пример #24
0
def mygene_fetch(platform, probes, scopes):
    """Queries mygene.info for current entrezid and sym, given an identifier."""
    if scopes == "dna":
        probes = get_dna_probes(platform, probes)
        scopes = "accession"

    def extract_queries(lines):
        lines = remove(r'^(IMAGE:\d+|--[\w>-]+)$', lines)
        queries = cat(re_iter(r'[\w+.-]+', l) for l in lines)
        queries = remove(r'_at$|^\d+-\d+$', queries)  # No such thing
        return queries
        # Clean unicode for mygene
        # http://stackoverflow.com/questions/15321138/removing-unicode-u2026-like-characters
        return [
            q.decode('unicode_escape').encode('ascii', 'ignore')
            for q in queries
        ]

    _by_probe = group_values(probes.items())
    queries_by_probe = walk_values(extract_queries, _by_probe)

    # Collect all possible queries to make a single request to mygene
    queries = set(cat(queries_by_probe.values()))

    if not queries:
        return []
    mygenes = _mygene_fetch(queries, scopes, platform.specie)

    # Form results into rows
    results = []
    dups = 0
    for probe, queries in queries_by_probe.items():
        matches = ldistinct(keep(mygenes.get, queries))
        # Skip dups
        if len(matches) > 1:
            dups += 1
        elif matches:
            entrez, sym = matches[0]
            results.append({
                'probe': probe,
                'mygene_sym': sym,
                'mygene_entrez': entrez
            })
    if dups:
        cprint('-> Produced %d dups' % dups, 'red')
    return results
Пример #25
0
def ByUrl(mapping):
    schemas = walk_values(Schema, mapping)

    def validate(data):
        if "url" not in data:
            raise Invalid("expected 'url'")

        parsed = urlparse(data["url"])
        # Windows absolute paths should really have scheme == "" (local)
        if os.name == "nt" and len(parsed.scheme) == 1 and parsed.netloc == "":
            return schemas[""](data)
        if parsed.scheme not in schemas:
            raise Invalid(f"Unsupported URL type {parsed.scheme}://")

        return schemas[parsed.scheme](data)

    return validate
Пример #26
0
def copy_output_files(app):
    """
    Creates a list of source file paths and destination directory names. Copies each
    the source file to destination directory.
    """
    path = lambda x: os.path.abspath(fs.get_task_file_path(app, x))

    output_files = {'container_log': path('meta/log.txt')}

    if fs.biobox_yaml_exists(app):
        tmp_files = funcy.walk_values(lambda x: path("tmp/" + x),
                                      image_type(app).output_file_paths(app))
        output_files = funcy.merge(output_files, tmp_files)
    else:
        msg = "No biobox.yaml file created, cannot find paths of any container generated files"
        app['logger'].warn(msg)

    fs.copy_container_output_files(app, output_files)
Пример #27
0
def typify(value):
    """
    typify takes a blockchain operation or dict/list/value,
    and then it parses and converts string types into native data types where appropriate.
    """
    if type(value) == dict:
        return walk_values(typify, value)

    if type(value) in [list, set]:
        return list(map(typify, value))

    if type(value) == str:
        if re.match('^\d+\.\d+ (STEEM|SBD|VESTS)$', value):
            return keep_in_dict(Amount(value).__dict__, ['amount', 'asset'])

        if re.match('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$', value):
            return parse_time(value)

    return value
Пример #28
0
    def processText(self, text):
        (counters, llens, lines) = zip(*map(self._mapper, text.split("\n")))

        # Reduce list of counters to one count
        WC = self._reducer(counters)

        # Total number of words and words per segment
        num_words = sum(llens)
        wps = num_words / 1.0  #TODO: consider segments

        # Convert to percentages
        WC = walk_values(lambda x: "{:.2f}".format(100 * x / float(num_words)),
                         dict(WC))

        #
        WC['num_words'] = num_words
        WC['wps'] = wps

        return WC
Пример #29
0
def get_mbta_station_info(cfg):
    route_info = query_mbta_id("routes", cfg['route'])
    stop_info = query_mbta_id("stops", cfg['stop'])

    params = (('filter[stop]', cfg['stop']), ('filter[route]', cfg['route']),
              ('page[limit]', '10'))
    arrivals = query_mbta('predictions', params)
    by_direction = f.walk_values(
        vectorize(f.compose(relative_ts, op.itemgetter('arrival_time'))),
        f.group_by(op.itemgetter('direction_id'), arrivals))

    return [
        f.merge(
            {
                "station": stop_info['name'],
                "route": cfg['route'],
                "direction": route_info['direction_destinations'][k],
            }, dict(zip(range(5), pad(v, 5))))
        for k, v in by_direction.items()
    ]
Пример #30
0
    def cutlatches(self, latches=None, renamer=None):
        if renamer is None:
            @fn.memoize
            def renamer(_):
                return common._fresh()

        def renamer_bv(name):
            root, idx = unpack_name(name)
            return f"{renamer(root)}[{idx}]"

        aig, lmap = self.aig.cutlatches(latches, renamer=renamer_bv)
        circ = rebundle_aig(aig)
        lmap = self.lmap.unblast(lmap)

        def unblast_vals(vals):
            name, _ = vals[0]
            name = unpack_name(name)[0]
            bdl = Bundle(size=len(vals), name=name)
            return (name, bdl.unblast(dict(vals)))

        lmap = fn.walk_values(unblast_vals, lmap)
        return circ, lmap
Пример #31
0
def collection_health(mongo):
    last_items = {
        'Posts':
        find_latest_item(mongo, 'Posts', 'created'),
        'Comments':
        find_latest_item(mongo, 'Comments', 'created'),
        'Operations':
        find_latest_item(mongo, 'Operations', 'timestamp'),
        'AccountOperations':
        find_latest_item(mongo, 'AccountOperations', 'timestamp'),
    }

    def time_delta(item_time):
        delta = dt.datetime.utcnow().replace(tzinfo=None) - item_time.replace(
            tzinfo=None)
        return delta.seconds

    timings = walk_values(time_delta, last_items)
    return {
        **timings, 'status':
        'impaired' if any(lambda x: x > (60 * 10), timings.values()) else 'ok'
    }
def get_series_tag_history():
    series_tag_history = {
        'created': defaultdict(int),
        'validated': defaultdict(int),
        'invalidated': defaultdict(int)
    }
    qs = SeriesTag.objects.filter(
        is_active=True).prefetch_related('validations')

    for tag in tqdm(qs, total=qs.count(), desc='series tag history'):
        validations = list(tag.validations.all())
        series_tag_history['created'][ceil_date(tag.created_on)] += 1
        validated = silent(min)(v.created_on for v in validations
                                if v.annotation_kappa == 1)
        if validated:
            series_tag_history['validated'][ceil_date(validated)] += 1
            invalidated = silent(min)(v.created_on for v in validations
                                      if v.agrees_with is not None)
            if invalidated:
                series_tag_history['invalidated'][ceil_date(invalidated)] += 1

    return walk_values(accumulate, series_tag_history)
Пример #33
0
    def run(self: 'ReviewProject') -> None:
        "Runs the setup task `'review'`."
        reports = {
            'pyflakes': ReviewProject.lint(),
            'pycodestyle': ReviewProject.style(),
            'mypy': ReviewProject.types(),
        }

        for tool in reports:
            code, warn, err = reports[tool]
            if code != 0:
                ReviewProject.separator_line()
                print(f'{tool}: code {code}')
                print('WARN')
                print(warn)
                print('ERROR')
                print(err)

        issues = sum(funcy.walk_values(lambda x: x[0], reports).values())
        if issues > 0:
            ReviewProject.separator_line()
            print()
            print(f'{issues} issues found.')
Пример #34
0
    def get_balances(self):
        available = {
            "GOLOS": Amount(self["balance"]).amount,
            "GBG": Amount(self["sbd_balance"]).amount,
            "GESTS": Amount(self["vesting_shares"]).amount,
        }

        savings = {
            "GOLOS": Amount(self["savings_balance"]).amount,
            "GBG": Amount(self["savings_sbd_balance"]).amount,
        }

        accumulative = {"GOLOS": Amount(self["accumulative_balance"]).amount}

        tip = {"GOLOS": Amount(self["tip_balance"]).amount}

        totals = {
            "GOLOS":
            sum([
                available["GOLOS"], savings["GOLOS"], accumulative["GOLOS"],
                tip["GOLOS"]
            ]),
            "GBG":
            sum([available["GBG"], savings["GBG"]]),
            "GESTS":
            sum([available["GESTS"]]),
        }

        total = walk_values(rpartial(round, 3), totals)

        return {
            "available": available,
            "savings": savings,
            "accumulative": accumulative,
            "tip": tip,
            "total": total,
        }
Пример #35
0
def rebundle_names(names):
    grouped_names = fn.group_values(map(unpack_name, names))
    return BundleMap(pmap(fn.walk_values(to_size, grouped_names)))
Пример #36
0
def time_shift(t, xs):
    return fn.walk_values(_time_shift(t), xs)
Пример #37
0
def get_state(t, xs):
    return fn.walk_values(lens[0].get(), xs)