Beispiel #1
0
def replace_last_number(substr: str, number: int) -> str:
    if found := last(re_single_number.finditer(substr), None):
        value = int(found.group())
        left = substr[0:found.start()]
        substr = re_single_number.sub(f"{value + number}",
                                      substr[found.start():], 1)
        substr = f"{left}{substr}"
Beispiel #2
0
def viterbi_path_dp(hmm: HMM, O: pd.Series):
    """
    Find one of the most likely sequence of hidden states
    for the sequence of observations O using dynamic programming.
    """

    norm = (lambda s: s / s.sum())

    d = pd.DataFrame(index=hmm.Q, columns=O.index)
    m = pd.DataFrame(index=hmm.Q, columns=O.index)
    d[first(O.index)] = hmm.b[first(O)] * hmm.e
    for ((s, __), (t, ot)) in pairwise(O.items()):
        x: pd.DataFrame
        x = hmm.a * np.outer(d[s], hmm.b[ot])
        m[t] = x.idxmax(axis=0)
        d[t] = norm(x.max(axis=0))

    # Inferred sequence of hidden states
    qq = pd.Series(index=O.index, dtype=object)

    q = d[last(d)].idxmax()
    qq[last(d)] = q
    for (s, t) in reversed(list(pairwise(O.index))):
        q = m[t][q]
        qq[s] = q

    return qq
Beispiel #3
0
def learn_baum_welch(hmm: HMM, O: pd.Series, niter=5, delay=0.9):
    for i in range(niter):
        # Forward variable
        a = pd.DataFrame(index=hmm.Q, columns=O.index)
        a[first(a)] = hmm.b[O[first(a)]] * hmm.e
        for (s, t) in pairwise(a):
            a[t] = hmm.b[O[t]] * (a[s] @ hmm.a)

        # Baum-Welch score Pr(O|M), based on remark in [1, p.179]:
        prom = sum(a[last(a.columns)])
        assert (prom > 0)

        print(F"Model likelihood before training step #{i + 1}: {prom}")

        # Backward variable (includes the hmm.b factor)
        b = pd.DataFrame(index=hmm.Q, columns=O.index)
        b[last(b)] = hmm.b[O[last(b)]] * 1
        for (s, t) in reversed(list(pairwise(b))):
            b[s] = hmm.b[O[s]] * (hmm.a @ b[t])

        # Remark [1, p.182]:
        if not np.isclose(prom, sum(b[first(b.columns)] * hmm.e), atol=0, rtol=1e-3):
            print("ERROR:", prom, "should equal", sum(b[first(b.columns)] * hmm.e))
            exit()

        # Expected number of transitions state i -> state j [1, Claim 5.12 and p.183]:
        n = pd.Series(
            data={
                s: hmm.a * np.outer(a[s], b[t]) / prom
                for (s, t) in pairwise(O.index)
            },
        )

        # From [1, Claim 5.9 on p.181]:
        # g = a * b / prom  # Not correct with the redefinition of b
        # Use [1, Claim 5.12 and Note on p.183]:
        g = n.apply(lambda x: x.sum(axis=1)).append(a[last(a.columns)] * 1 / prom, verify_integrity=True).T
        assert all(np.isclose(g.sum(axis=0), 1))
        assert all(np.isclose(n.sum().sum(axis=1), g[n.index].sum(axis=1)))
        assert all(np.isclose(g.groupby(O, axis=1).sum().sum(axis=1), g.sum(axis=1)))

        norm_rows = (lambda df: df.apply(lambda s: s / s.sum(), axis=1))
        hmm.e = delay * hmm.e + (1 - delay) * np.sum(first(n), axis=1)
        hmm.a = delay * hmm.a + (1 - delay) * norm_rows(n.sum())
        hmm.b = delay * hmm.b + (1 - delay) * norm_rows(pd.DataFrame(columns=hmm.S, data=g.groupby(O, axis=1).sum()).fillna(0))
        assert np.isclose(hmm.e.sum(), 1)
        assert all(np.isclose(hmm.a.sum(axis=1), 1))
        assert all(np.isclose(hmm.b.sum(axis=1), 1))
Beispiel #4
0
    def is_matched_with(self, block_device: BlockDevice) -> bool:
        if block_device.has_root():
            root_location = self.root_location

            if root_location is not None:
                root_partition = none_throws(block_device.root)
                filesystem = none_throws(root_partition.filesystem)
                normalized_root_location = last(
                    strip_quotes(root_location).split(
                        constants.PARAMETERIZED_OPTION_SEPARATOR))
                root_location_comparers = [
                    root_partition.label,
                    root_partition.uuid,
                    filesystem.label,
                    filesystem.uuid,
                ]

                if (normalized_root_location in root_location_comparers
                        or block_device.is_matched_with(
                            normalized_root_location)):
                    root_mount_options = self.root_mount_options
                    subvolume = none_throws(filesystem.subvolume)

                    return (root_mount_options.is_matched_with(subvolume)
                            if root_mount_options is not None else False)

        return False
Beispiel #5
0
 def __getitem__(self, index):
     try:
         if isinstance(index, slice):
             molecules = []
             info = list(self._molecules_ordered_all_gen())
             for info in islice_extended(self._molecules_ordered_all_gen(),
                                         index.start, index.stop,
                                         index.step):
                 itp_index, gro_start, gro_end = info
                 residues = self.system_gro[gro_start:gro_end]
                 mol = self.different_molecules[itp_index].copy(residues)
                 molecules.append(mol)
             return molecules
         elif isinstance(index, int):
             if index == -1:
                 info = last(self._molecules_ordered_all_gen())
             else:
                 info = next(
                     islice_extended(self._molecules_ordered_all_gen(),
                                     index, index + 1))
             itp_index, gro_start, gro_end = info
             residues = self.system_gro[gro_start:gro_end]
             mol = self.different_molecules[itp_index].copy(residues)
             return mol
         else:
             raise TypeError(('System indices must be integers or slices,'
                              f' not {type(index)}.'))
     except StopIteration:
         raise IndexError('Molecule index out of range')
Beispiel #6
0
def dotted_name(name: str, ctx: expr_context) -> expr:
    res = last(res for res in [None] for p in name.split('.') for res in [
        Name(id=p, ctx=Load()
             ) if res is None else Attribute(value=res, attr=p, ctx=Load())
    ])
    res.ctx = ctx
    return res
Beispiel #7
0
 def wrapped_func(st_x: StX, st_it: StIt) -> None:
     freq = last(freq
                 for start, (_,
                             freq) in zip(intervals_starts, intervals)
                 if st_it["num_iters_done"] >= start)
     if st_it["num_iters_done"] % freq == 0:
         func(st_x, st_it)
Beispiel #8
0
    def proximity1(self, traj1: pd.DataFrame, traj2: pd.DataFrame):
        (a1, b1) = [first(traj1.node), last(traj1.node)]
        (a2, b2) = [first(traj2.node), last(traj2.node)]

        len1 = self.pathdist.dist_only((a1, b1))
        len2 = self.pathdist.dist_only((a2, b2))
        len3 = min(
            sum(
                self.pathdist.dist_only(uv) for uv in pairwise(inout_sequence))
            for inout_sequence in [
                (a1, a2, b2, b1),
                (a1, a2, b1, b2),
            ])
        savings = (len1 + len2) - len3
        savings = savings if (savings > 0) else np.nan
        return savings
def test_compound_rows():
    pane = Pane.parse("_ Tag\t${lemma}\n")
    row = one(pane.rows)

    multiple_forms = ("form", "longer-form")
    filled_row = row.fill({"${lemma}": multiple_forms})
    assert isinstance(filled_row, CompoundRow)

    # Ensure all generated forms are in there:
    for form in multiple_forms:
        assert filled_row.contains_wordform(form)

    assert (filled_row.num_subrows == ilen(filled_row.subrows) ==
            len(multiple_forms)), "expected as many subrows as there are forms"

    first_row_cells = tuple(first(filled_row.subrows).cells)
    assert first(row.cells).fst_tags == first_row_cells[0].fst_tags
    assert first_row_cells[0].row_span == len(multiple_forms)

    first_form = first_row_cells[-1]
    assert isinstance(first_form, WordformCell)
    assert first_form.inflection == multiple_forms[0]

    last_row_cells = tuple(last(filled_row.subrows).cells)
    assert isinstance(last_row_cells[0], SuppressOutputCell)

    last_form = last_row_cells[-1]
    assert isinstance(last_form, WordformCell)
    assert last_form.inflection == multiple_forms[-1]
Beispiel #10
0
    def _update_params_def_target_config(self, param_def):
        """
        calculates parameter.target_config based on extra config
        at `parameter_name__target value
        user might want to change the target_config for specific param using configuration/cli
        """

        # used for target_format update
        # change param_def definition based on config state
        target_def_key = "%s__target" % param_def.name
        target_config = self.config.get_multisection_config_value(
            self.config_sections, key=target_def_key)
        if not target_config:
            return param_def

        # the last is from a higher level
        target_config = last(target_config)
        try:
            target_config = parse_target_config(target_config.value)
        except Exception as ex:
            raise param_def.parameter_exception(
                "Calculate target config for %s : target_config='%s'" %
                (target_def_key, target_config.value),
                ex,
            )

        param_def = param_def.modify(target_config=target_config)
        return param_def
Beispiel #11
0
 def __getitem__(self, index):
     try:
         if isinstance(index, slice):
             molecules = []
             for _, start, len_mol in islice_extended(
                     self._molecules_ordered_all_gen(), index.start,
                     index.stop, index.step):
                 self._open_fgro.seek_atom(start)
                 molecules.append(
                     Residue([
                         AtomGro(next(self._open_fgro))
                         for _ in range(len_mol)
                     ]))
             return molecules
         if isinstance(index, int):
             if index == -1:
                 info = last(self._molecules_ordered_all_gen())
             else:
                 info = next(
                     islice_extended(self._molecules_ordered_all_gen(),
                                     index, index + 1))
             _, start, len_mol = info
             self._open_fgro.seek_atom(start)
             return Residue(
                 [AtomGro(next(self._open_fgro)) for _ in range(len_mol)])
         else:
             raise TypeError(
                 ('SystemGro indices must be integers or slices,'
                  f' not {type(index)}.'))
     except StopIteration:
         raise IndexError('Residue index out of range')
Beispiel #12
0
    def extract_definition_line_comment(
            cls, lines: List[str], node: UniversalAssign) -> Optional[str]:
        def valid_comment_or_none(comment):
            if comment.startswith('#:'):
                return comment[2:].strip()
            return None

        # will fetch all tokens until closing bracket of appropriate type occurs
        #  recursively calls himself when new opening bracket detected
        matching_brackets = {'{': '}', '[': ']', '(': ')'}

        def consume_between_bracers(iterable, bracket_type: str):
            closing_bracket = matching_brackets[bracket_type]
            for op in iterable:
                if op.string == closing_bracket:
                    return
                if op.string in matching_brackets:
                    return consume_between_bracers(iterable, op.string)
            # should never occurs because this lines already parsed and validated
            raise ValueError(
                f'no closing bracket for bracket of type "{bracket_type}"')

        # find last node
        if node.value is None:
            if not isinstance(node, ast.AnnAssign) or node.annotation is None:
                return None
            last_node = node.annotation
        else:
            if (isinstance(node.value, ast.Tuple)
                    and lines[node.value.lineno - 1][node.value.col_offset - 1]
                    != '('):
                last_node = node.value.elts[-1]
            else:
                last_node = node.value

        tokens_iter = cls._tokens_peekable_iter(lines)

        # skip tokens until first token of last node occurred
        tokens_iter.prepend(
            mitertools.last(cls._take_until_node(tokens_iter, last_node)))

        # skip all except newline (for \ escaped newlines NEWLINE token isn't emitted)
        #  and comment token itself
        for tok in tokens_iter:
            if tok.type in (tokenize.COMMENT, tokenize.NEWLINE):
                tokens_iter.prepend(tok)
                break
            if tok.type == tokenize.OP and tok.string in matching_brackets:
                consume_between_bracers(tokens_iter, tok.string)

        try:
            maybe_comment = next(tokens_iter)
        except StopIteration:
            return None

        if maybe_comment.type == tokenize.COMMENT:
            return valid_comment_or_none(maybe_comment.string)

        return None
Beispiel #13
0
 def last_job(self) -> str:
     """Return the last successful job or first failed job for this job"""
     if not self.jobs:
         return
     if not self.is_ok:
         failed = [job for job in self.jobs if not job.is_ok]
         return first(failed, None)
     return last(self.jobs, None)
Beispiel #14
0
    def as_line(self):
        for block in self._blocks:
            internal_lines = self.internal(block)

            yield from chain(
                pairwise(internal_lines),
                zip(repeat(last(internal_lines)),
                    self.external(block.jump_offsets)),
            )
Beispiel #15
0
def part1_debug():
    m = Map('15.test5')
    t = m.tick()
    tmp = rnd_num, remaining = last(t)
    print(tmp)
    return rnd_num * remaining
    for r in range(1000):
        print(f'after {r} rounds')
        m.display()
        next(t)
Beispiel #16
0
def getFirstLastDaysOfMonth(month, dataframe):
    first = 0
    last = 0

    first = mit.first(
        dataframe['Date'][dataframe['Date'].apply(lambda x: x.month) == month])
    last = mit.last(
        dataframe['Date'][dataframe['Date'].apply(lambda x: x.month) == month])

    return str(first.date()), str(last.date())
Beispiel #17
0
    def file_name(self) -> str:
        if self.can_be_used_for_bootable_snapshot():
            normalized_volume = self.normalized_volume
            dir_separator_pattern = re.compile(constants.DIR_SEPARATOR_PATTERN)
            split_loader_path = dir_separator_pattern.split(
                none_throws(self.loader_path))
            loader = last(split_loader_path)
            extension = constants.CONFIG_FILE_EXTENSION

            return f"{normalized_volume}_{loader}{extension}".lower()

        return constants.EMPTY_STR
Beispiel #18
0
def part1(fname=None):
    m = Map(fname)
    t = m.tick()
    with localtimer():
        rnd_num, remaining_hp = last(t)
    # hack for off by 1 error on big input...
    if not fname:
        rnd_num -= 1

    m.display()
    print(rnd_num, remaining_hp)
    return rnd_num * remaining_hp
Beispiel #19
0
def parse_property_value(body: dict) -> str:
    """
        ref: <reference val>
        or
        type: <string/integer/number/bool>
    """
    if '$ref' in body:
        return last(body['$ref'].split('/'))
    elif 'type' in body:
        return parse_to_python_type(body['type'])
    else:
        raise Exception(f"{body} not expected here")
Beispiel #20
0
def interpret_left_to_right(expression: str) -> int:
    expression = expression.replace(" ", "")
    outermost_right = find_outermost_right_operator(expression)
    if outermost_right is None:
        if first(expression) == "(" and last(expression) == ")":
            return interpret_left_to_right(expression[1:-1])
        return int(expression)
    index, operator = outermost_right
    return OPERATORS[operator](
        interpret_left_to_right(expression[:index]),
        interpret_left_to_right(expression[index + 1 :]),
    )
Beispiel #21
0
def part2():
    # 50380 is too low
    m = Map(elf_attack_power=12)
    m = Map('15.e3', elf_attack_power=13)
    t = m.tick()
    m.display()

    try:
        rnd_num, remaining_hp = last(t)
    except Exception as e:
        print(e.args)

    m.display()
    print(rnd_num, remaining_hp)
    return rnd_num * remaining_hp
Beispiel #22
0
def play_round():
    sleep(0.001)
    global board_cards
    board_cards[0].append(player1.play_card())
    board_cards[1].append(player2.play_card())

    if mit.last(board_cards[0]).val > mit.last(board_cards[1]).val:
        cards_qty = len(board_cards[0]) + len(board_cards[1])
        print(f'{player1.name} wins this round and take {cards_qty} cards')
        append_cards(player1, board_cards)
        board_cards = [[], []]

    elif mit.last(board_cards[0]).val < mit.last(board_cards[1]).val:
        cards_qty = len(board_cards[0]) + len(board_cards[1])
        print(f'{player2.name} wins this round and take {cards_qty} cards')
        append_cards(player2, board_cards)
        board_cards = [[], []]
    else:
        print(
            f'The round ended in tie, each player plays 2 more cards:\none facing up and one facing down'
        )
        board_cards[0].append(player1.play_card())
        board_cards[1].append(player2.play_card())
        play_round()
    def test_set_external_resource_urls(self, mock_channel_tracker):
        @task()
        def task_with_set_external_resource_urls():
            set_external_resource_urls(
                {
                    "my_resource": "http://some_resource_name.com/path/to/resource/123456789"
                }
            )
            task_run = try_get_current_task_run()
            return task_run.task_run_attempt_uid

        task_run_attempt_uid = task_with_set_external_resource_urls()
        save_external_links_call = last(get_save_external_links(mock_channel_tracker))
        assert save_external_links_call["external_links_dict"] == {
            "my_resource": "http://some_resource_name.com/path/to/resource/123456789"
        }
        assert save_external_links_call["task_run_attempt_uid"] == task_run_attempt_uid
Beispiel #24
0
async def validator():
    cache_data = validator_TTCache.get(VALIDATOR_CACHE_KEY)
    if cache_data:
        resp: ValidatorsResponse = cache_data
    else:
        async with lock:
            cache_data = validator_TTCache.get(VALIDATOR_CACHE_KEY)
            if cache_data:
                return cache_data
            else:
                latest_block_number_tasks = []
                for validator in setting.validator_list:
                    latest_block_number_tasks.append(get_latest_block(validator))
                latest_infos = await asyncio.gather(*latest_block_number_tasks, return_exceptions=True)
                latest_infos_no_exception = list(filter(lambda x: x.block_number != NO_LATEST_BLOCK, latest_infos))
                latest_num_dict: Dict[str, LatestInfo] = {i.validator.host: i for i in latest_infos}
                # get latest blocks from all the validators failed then randomly return the `nextToPropose`
                if len(latest_infos_no_exception) == 0:
                    best = random.choice(setting.validator_list)
                    max_block_numbers = NO_LATEST_BLOCK
                else:
                    max_block_numbers = max([i.block_number for i in latest_infos_no_exception])
                    latest = first_true(latest_infos_no_exception, lambda x: x.block_number == max_block_numbers)
                    index = one(locate(setting.validator_list, lambda x: x.pub_key == latest.sender))

                    # why +2 ?
                    # actually index validator should be the latest proposed validator
                    # but it is possible that at this moment, the next validator is already trying
                    # to propose a new block. So choosing the +2 validator is more reliable
                    best = nth(ncycles(setting.validator_list, 2), index + 2)
                split_validators = list(split_before(setting.validator_list, lambda x: x.host == best.host))
                if len(split_validators) == 1:
                    sorted_validators = one(split_validators)
                else:
                    sorted_validators = last(split_validators) + first(split_validators)

                validators = list(map(lambda x: Validator(host=x.host, grpc_port=x.grpc_port, http_port=x.http_port,
                                                          latestBlockNumber=latest_num_dict.get(x.host).block_number,
                                                          timestamp=latest_num_dict.get(x.host).timestamp),
                                      sorted_validators))

                nextToPropose = NextToPropose(host=best.host, grpcPort=best.grpc_port, httpPort=best.http_port,
                                              latestBlockNumber=max_block_numbers)
                resp = ValidatorsResponse(nextToPropose=nextToPropose, validators=validators)
                validator_TTCache[VALIDATOR_CACHE_KEY] = resp
    return resp.dict()
Beispiel #25
0
    def __init__(self, val: Optional[Union[str, int, Iterable[int], Sequence[Sequence[int]]]] = None) -> None:
        # The idea is to bring all acceptable values to one canonical intermediate format: the `Sequence[Sequence[
        # int]]`. Where the inner sequence is either a one or two element sequence. The one element sequence
        # represents a single VLAN, the two element sequence represents a VLAN range.
        #
        # An example of this intermediate format is::
        #
        #     vlans = [[5], [10, 12]]
        #
        # That example represents 4 VLANs, namely: 5, 10, 11, 12. The latter three VLANs are encode as a range.
        #
        # This intermediate format happens to be the format as accepted by :func:`expand_ranges`. This function has
        # the advantage of deduplicating overlapping ranges or VLANs specified more than once. In addition its return
        # value can be use as input to the :func:`to_ranges` function.
        vlans: Sequence[Sequence[int]] = []
        if val is None:
            self._vlan_ranges = ()
            return
        elif isinstance(val, str):
            if val.strip() != "":
                # This might look complex, but it does handle strings such as `"  3, 4, 6-9, 4, 8 - 10"`
                try:
                    vlans = list(map(lambda s: list(map(int, s.strip().split("-"))), val.split(",")))
                except ValueError:
                    raise ValueError(f"{val} could not be converted to a {self.__class__.__name__} object.")
        elif isinstance(val, int):
            vlans = [[val]]
        elif isinstance(val, abc.Sequence):
            if len(val) > 0:
                if isinstance(first(val), int):
                    vlans = list(map(lambda x: [x], val))
                elif isinstance(first(val), abc.Sequence):
                    vlans = cast(Sequence[Sequence[int]], val)
                else:
                    raise ValueError(f"{val} could not be converted to a {self.__class__.__name__} object.")
        elif isinstance(val, abc.Iterable):
            vlans = list(map(lambda x: [x], val))  # type: ignore
        else:
            raise ValueError(f"{val} could not be converted to a {self.__class__.__name__} object.")

        er = expand_ranges(vlans, inclusive=True)
        if er and not (first(er) >= 0 and last(er) <= 4096):
            raise ValueError(f"{val} is out of range (0-4096).")

        self._vlan_ranges = tuple(to_ranges(er))
Beispiel #26
0
async def run_bus(send_channel,
                  bus_id,
                  route,
                  start_offset=0,
                  refresh_timeout=0):
    while True:
        for bus_point in route['coordinates'][start_offset:]:
            await send_channel.send(
                json.dumps(
                    {
                        'busId': bus_id,
                        'lat': first(bus_point, 0),
                        'lng': last(bus_point, 0),
                        'route': route['name']
                    },
                    ensure_ascii=False))
            await trio.sleep(refresh_timeout)
        start_offset = 0
Beispiel #27
0
def _get_bad_argument(ctx, param):
    content = ctx.message.content
    view = ctx.view
    if param.kind == param.KEYWORD_ONLY and not ctx.command.rest_is_raw:
        # Keyword only arguments are interpreted as "consume rest", unless
        # rest_is_raw is True. Internally, this means that view.read_rest() is
        # called as opposed to commands.view.quoted_word(view). This means we
        # can just grab the whole string via view.previous.
        return content[view.previous:], view.previous

    # It's a non-consume-rest arg which means we need to figure out where
    # the bad argument started. Believe it or not this is extremely hard as
    # commands.view.quoted_word(view) calls view.get() repeatedly, which
    # corrupts view.previous as now it's only the previous character rather
    # than the whole word. What makes it worse are the quotes, as that means
    # we can't just find the "last word" in that string.
    #
    # An alternative way would be to monkey patch quoted_word to set something
    # like view.quoted_word_previous = view.index before calling the real
    # one, but such thing would be ridiculously horrid, and would probably
    # be more fragile, as it assumes that StringView isn't slotted and
    # quoted_word would stay "public".

    # Anything past view.index can't (or shouldn't) be checked for validity,
    # so we can safely discard it.
    content = ctx.message.content[:ctx.view.index]
    bad_quote = content[-1]
    bad_open_quote = _reverse_quotes.get(bad_quote)
    if not bad_open_quote or content[-2:-1] in ['\\', '']:
        # If there was no quote, or if it was escaped, then we can just
        # chomp off the whitespace up to that point.
        #
        # Use rsplit instead of rpartition as the former can take any
        # whitespace. (ext can take any sort of whitespace)
        bad_content = content.rsplit(None, 1)[-1]

        return bad_content, view.index - len(bad_content)

    # We need to look for the last "quoted" word.
    quote_pattern = rf'{bad_open_quote}((?:[^{bad_quote}\\]|\\.)*){bad_quote}'
    last_match = last(re.finditer(quote_pattern, content))
    # I swear if last_match is None...
    assert last_match, f'last_match is None with content {content}'
    return last_match[1], last_match.start()
Beispiel #28
0
def visualize_graph(g: nx.DiGraph) -> Plox:
    with Plox() as px:
        nodes_0 = ["___"]
        nodes_1 = [
            n for (n, k) in g.nodes(data='kind')
            if (k != "aa") and (n.count('_') == 2)
        ]
        nodes_2 = [
            n for (n, k) in g.nodes(data='kind')
            if (k != "aa") and (n.count('_') == 1)
        ]
        nodes_3 = [
            n for (n, k) in g.nodes(data='kind')
            if (k != "aa") and (n.count('_') == 0)
        ]
        nodes_aa = [n for (n, k) in g.nodes(data='kind') if (k == "aa")]

        pos = graphviz_layout(g, prog="twopi", root='___')
        # pos = nx.spring_layout(g, pos=pos)
        # pos = nx.shell_layout(g, nlist=[nodes_0, nodes_1, nodes_2, nodes_3, nodes_aa])
        # pos = nx.planar_layout(g)
        # pos = nx.kamada_kawai_layout(g)
        # pos = nx.spring_layout(g, pos=pos, k=10, iterations=10, threshold=1e-8)

        nx.draw_networkx_edges(g, pos=pos)

        nx.draw_networkx_nodes(g,
                               pos=pos,
                               nodelist=(nodes_0 + nodes_1 + nodes_2 +
                                         nodes_3))
        nx.draw_networkx_nodes(g, pos=pos, nodelist=nodes_aa, node_color='r')

        labels = {
            n: {
                'aa': n,
                'origin': "-",
                None: last(n.strip('_'), None)
            }[k]
            for (n, k) in g.nodes(data='kind')
        }
        nx.draw_networkx_labels(g, pos=pos, labels=labels)

        yield px
Beispiel #29
0
    def __init__(
        self,
        logger_factory: BaseLoggerFactory,
        model: Model,
        states: States,
    ):
        self._logger = logger_factory.logger(__name__)

        if not has_items(states) or is_singleton(states):
            raise ValueError(
                "The 'states' collection must be initialized and contain at least two items!"
            )

        initial = checked_cast(State, first(states))
        expected_initial_name = StateNames.INITIAL.value

        if initial.name != expected_initial_name:
            raise ValueError("The first item of the 'states' collection must "
                             f"be a state named '{expected_initial_name}'!")

        final = checked_cast(State, last(states))
        expected_final_name = StateNames.FINAL.value

        if final.name != expected_final_name:
            raise ValueError("The last item of the 'states' collection must "
                             f"be a state named '{expected_final_name}'!")

        conditions = model.conditions

        super().__init__(
            model=model,
            states=list(states),
            initial=initial,
            auto_transitions=False,
            name=__name__,
        )
        self.add_ordered_transitions(
            loop=False,
            conditions=conditions,
        )

        self._initial_state = initial
Beispiel #30
0
def viterbi_path_nx(hmm: HMM, O: pd.Series):
    """
    Find one of the most likely sequence of hidden states
    for the sequence of observations O.
    Use the networkx library.
    """

    import networkx as nx
    g = nx.DiGraph()

    from collections import namedtuple
    HiddenState = namedtuple('HiddenState', ['time', 'state'])

    (Alpha, Omega) = ("+", "-")

    # Negative log-likelihood
    nll = (lambda x: np.inf if (x <= 0) else -10 * np.log10(x))

    # Graph source
    for q in hmm.Q:
        # Likelihood = P[Observe the first o | Hidden state q at time 0] x P[Hidden state q at time 0]
        g.add_edge(Alpha, HiddenState(first(O.index), q), nll=nll(hmm.b[first(O)][q] * hmm.e[q]))

    # Pairwise observations
    for ((s, os), (t, ot)) in pairwise(O.items()):
        # Hidden transitions
        for (qs, qt) in product(hmm.Q, hmm.Q):
            # Likelihood = P[Observe ot | Hidden state qt at time t] x P[qt at time t | qs at time s]
            g.add_edge(HiddenState(s, qs), HiddenState(t, qt), nll=nll(hmm.b[ot][qt] * hmm.a[qt][qs]))

    # Graph sink
    for q in hmm.Q:
        g.add_edge(HiddenState(last(O.index), q), Omega, nll=0)

    # Inferred sequence of hidden states
    qq = pd.concat([
        pd.Series(index=[hs.time], data=[hs.state], dtype=object)
        for hs in nx.shortest_path(g, source=Alpha, target=Omega, weight='nll')[1:-1]
    ])

    return qq