Example #1
0
def routh_row(i_n_minus_2: Iterable[Basic],
              i_n_minus_1: Iterable[Basic]) -> \
        Iterable[Basic]:
    "Computes the next row for a Routh matrix"
    pp_iter, pp_counter = counter_wrap(i_n_minus_2)
    p_iter, p_counter = counter_wrap(i_n_minus_1)
    a02, pp_iter = spy(pp_iter, 2)
    a1, p_iter = spy(p_iter, 1)

    for (a0, a2), (a1, a3) in zip(pairwise(pp_iter), pairwise(p_iter)):
        yield (a1 * a2 - a0 * a3) / a1
    consume(map(consume, (pp_iter, p_iter)))
    if pp_counter() == 2 and p_counter() == 1:
        yield a02[1]
        return
    if not 0 <= pp_counter() - p_counter() <= 1 \
       or p_counter() < 1:
        raise ValueError("pp row should be at most one item "
                         "larger than p row and at least equal in size")


#def routh_matrix(coeffs: Iterable[Basic]) ->
#        Iterable[List[Basic]]:
#    coeffs, coeffs_n = counter_wrap(coeffs)
#    i0, i1 = map(list, unzip(grouper(coeffs, 2, 0)))
#    i2: List[Basic]
#    for _ in range(coeffs_n() - 2):

#def routh_recursive(coeffs: )
Example #2
0
File: hmm1.py Project: numpde/cbb
def viterbi_path_dp(hmm: HMM, O: pd.Series):
    """
    Find one of the most likely sequence of hidden states
    for the sequence of observations O using dynamic programming.
    """

    norm = (lambda s: s / s.sum())

    d = pd.DataFrame(index=hmm.Q, columns=O.index)
    m = pd.DataFrame(index=hmm.Q, columns=O.index)
    d[first(O.index)] = hmm.b[first(O)] * hmm.e
    for ((s, __), (t, ot)) in pairwise(O.items()):
        x: pd.DataFrame
        x = hmm.a * np.outer(d[s], hmm.b[ot])
        m[t] = x.idxmax(axis=0)
        d[t] = norm(x.max(axis=0))

    # Inferred sequence of hidden states
    qq = pd.Series(index=O.index, dtype=object)

    q = d[last(d)].idxmax()
    qq[last(d)] = q
    for (s, t) in reversed(list(pairwise(O.index))):
        q = m[t][q]
        qq[s] = q

    return qq
Example #3
0
def _common_ancestor(types):
    g = nx.DiGraph()
    for t in types:
        g.add_edges_from(pairwise(reversed(inspect.getmro(t))))
    while len(types) != 1:
        types, pairs = [], pairwise(types)
        for pair in pairs:
            types.append(nx.lowest_common_ancestor(g, *pair))
    return types[0]
Example #4
0
def _transact(graph: nx.MultiGraph, src: int, tgt: int, amount: int, new_node: int, info: dict):
    from more_itertools import pairwise

    path, weights, fees = find_route(graph, src, tgt, amount)

    # no path found
    if path is None:
        info['no_path'] += 1
        return

    for (node, neighbour) in pairwise(path):
        channel_trans_amt = weights[neighbour]
        channel_policy = graph.get_policy(node, neighbour)

        # channel cannot handle transaction
        if channel_policy.balance < channel_trans_amt:
            info['channel_imbalance'] += 1
            if node == new_node or neighbour == new_node:
                info['failure'][node, neighbour] += 1
                raise Exception('Adversary channel imbalance')
            return

    # record successful transaction
    info['total_success'] += 1

    total_amt = 0
    # move amount between nodes in path
    for (node, neighbour) in pairwise(path):
        if neighbour is None:
            # remove total amount from src
            graph.nodes[path[0]]['data'].capacity -= total_amt
            # add amount to tgt
            graph.nodes[node]['data'].capacity += weights[node]
            break

        channel_trans_amt = weights[neighbour]
        channel_policy = graph.get_policy(node, neighbour)

        # pay channel fees to node
        graph.nodes[node]['data'].capacity += fees[node]
        total_amt += fees[node]

        # shift channel balance
        channel_policy.balance -= channel_trans_amt
        graph.get_policy(neighbour, node).balance += channel_trans_amt

        # record profit for adversary
        if node == new_node or neighbour == new_node:
            info['success'][node, neighbour] += 1
            info['profit'][node, neighbour] += fees[node]
Example #5
0
File: hmm1.py Project: numpde/cbb
def learn_baum_welch(hmm: HMM, O: pd.Series, niter=5, delay=0.9):
    for i in range(niter):
        # Forward variable
        a = pd.DataFrame(index=hmm.Q, columns=O.index)
        a[first(a)] = hmm.b[O[first(a)]] * hmm.e
        for (s, t) in pairwise(a):
            a[t] = hmm.b[O[t]] * (a[s] @ hmm.a)

        # Baum-Welch score Pr(O|M), based on remark in [1, p.179]:
        prom = sum(a[last(a.columns)])
        assert (prom > 0)

        print(F"Model likelihood before training step #{i + 1}: {prom}")

        # Backward variable (includes the hmm.b factor)
        b = pd.DataFrame(index=hmm.Q, columns=O.index)
        b[last(b)] = hmm.b[O[last(b)]] * 1
        for (s, t) in reversed(list(pairwise(b))):
            b[s] = hmm.b[O[s]] * (hmm.a @ b[t])

        # Remark [1, p.182]:
        if not np.isclose(prom, sum(b[first(b.columns)] * hmm.e), atol=0, rtol=1e-3):
            print("ERROR:", prom, "should equal", sum(b[first(b.columns)] * hmm.e))
            exit()

        # Expected number of transitions state i -> state j [1, Claim 5.12 and p.183]:
        n = pd.Series(
            data={
                s: hmm.a * np.outer(a[s], b[t]) / prom
                for (s, t) in pairwise(O.index)
            },
        )

        # From [1, Claim 5.9 on p.181]:
        # g = a * b / prom  # Not correct with the redefinition of b
        # Use [1, Claim 5.12 and Note on p.183]:
        g = n.apply(lambda x: x.sum(axis=1)).append(a[last(a.columns)] * 1 / prom, verify_integrity=True).T
        assert all(np.isclose(g.sum(axis=0), 1))
        assert all(np.isclose(n.sum().sum(axis=1), g[n.index].sum(axis=1)))
        assert all(np.isclose(g.groupby(O, axis=1).sum().sum(axis=1), g.sum(axis=1)))

        norm_rows = (lambda df: df.apply(lambda s: s / s.sum(), axis=1))
        hmm.e = delay * hmm.e + (1 - delay) * np.sum(first(n), axis=1)
        hmm.a = delay * hmm.a + (1 - delay) * norm_rows(n.sum())
        hmm.b = delay * hmm.b + (1 - delay) * norm_rows(pd.DataFrame(columns=hmm.S, data=g.groupby(O, axis=1).sum()).fillna(0))
        assert np.isclose(hmm.e.sum(), 1)
        assert all(np.isclose(hmm.a.sum(axis=1), 1))
        assert all(np.isclose(hmm.b.sum(axis=1), 1))
Example #6
0
    def check(self, checker):
        assert len(self.instances) >= 2

        def cc(b1, b2, c='x'):  # Create coordinate constraint
            if self.abut:
                return getattr(b1, f'ur{c}') == getattr(b2, f'll{c}')
            else:
                return getattr(b1, f'ur{c}') <= getattr(b2, f'll{c}')

        super().check(checker)
        bvars = checker.iter_bbox_vars(self.instances)
        for b1, b2 in itertools.pairwise(bvars):
            if self.direction == 'left_to_right':
                checker.append(cc(b1, b2, 'x'))
            elif self.direction == 'right_to_left':
                checker.append(cc(b2, b1, 'x'))
            elif self.direction == 'bottom_to_top':
                checker.append(cc(b1, b2, 'y'))
            elif self.direction == 'top_to_bottom':
                checker.append(cc(b2, b1, 'y'))
            if self.direction == 'horizontal':
                checker.append(checker.Or(cc(b1, b2, 'x'), cc(b2, b1, 'x')))
            elif self.direction == 'vertical':
                checker.append(checker.Or(cc(b1, b2, 'y'), cc(b2, b1, 'y')))
            else:
                checker.append(
                    checker.Or(cc(b1, b2, 'x'), cc(b2, b1, 'x'),
                               cc(b1, b2, 'y'), cc(b2, b1, 'y')))
Example #7
0
def get_path_effect(graph: BELGraph, path, relationship_dict) -> Effect:
    """Calculate the final effect of the root node to the sink node in the path.

    :param graph: A BEL graph
    :param list path: Path from root to sink node
    :param dict relationship_dict: dictionary with relationship effects
    """
    causal_effect = []

    for predecessor, successor in pairwise(path):
        if pair_has_contradiction(graph, predecessor, successor):
            return Effect.ambiguous

        edges = graph.get_edge_data(predecessor, successor)

        edge_key, edge_relation, _ = rank_edges(edges)

        relation = graph[predecessor][successor][edge_key][RELATION]

        # Returns Effect.no_effect if there is a non causal edge in path
        if relation not in relationship_dict or relationship_dict[
                relation] == 0:
            return Effect.no_effect

        causal_effect.append(relationship_dict[relation])

    final_effect = reduce(lambda x, y: x * y, causal_effect)

    return Effect.activation if final_effect == 1 else Effect.inhibition
Example #8
0
def tracefile(dirname):
    '''
    ## time_checker ver1.0

    __USAGE__
    引数:
        dirname : datetimeがファイル名になったファイルが詰まったディレクトリの場所(string型)
    戻り値:
        timepair : datetime2つが入ったタプル(tuple型)

    __INTRODUCTION__
    あるファイル名から次のファイルが作成された時刻の差分を出す。

    __ACTION__
    SAtraceGraph.pyのサブプロセスのfilefiller.filecheck()でエラーが生じたときに実行される。

    __UPDATE1.0__
    First commit

    __TODO__
    None

    __TEST__

    >>> [i for i in tracefile(param['out']+'/160817/rawdata/trace/')]
    (datetime.datetime(1900, 1, 1, 8, 50, 29), datetime.datetime(1900, 1, 1, 8, 59, 12))

    '''
    line = glob.glob1(dirname, '*')
    ttlist = [datetime.datetime.strptime(i[:-4], '%Y%m%d_%H%M%S') for i in line]
    for timepair in pairwise(ttlist):
        sub = timepair[1] - timepair[0]
        if not datetime.timedelta(minutes=4) < sub < datetime.timedelta(minutes=6):
            yield timepair  # 時間差の生じたとこ
Example #9
0
def katabasis():
    p = Problem()

    num_notes = 12
    lower_bound = 22
    upper_bound = 108

    notes = [p.add_constant(22)]
    for i in range(num_notes - 1):
        var = p.add_variable(lower_bound, upper_bound, f"n{i}")
        notes.append(var)

    intervals = []
    for i in range(num_notes - 1):
        var = p.add_variable_from_domain([1, 2, 3, 4, 5, 7], f"i{i}")
        intervals.append(var)

    p.add_all_different_constraint(notes)

    # variables should ascend
    for i, (current, next_) in enumerate(pairwise(notes)):
        p.add_constraint(current + intervals[i] == next_)

    p.add_constraint(intervals[0] == intervals[10])
    p.add_constraint(intervals[1] == intervals[9])
    p.add_constraint(intervals[2] == intervals[8])
    p.add_constraint(intervals[3] == intervals[7])
    p.add_constraint(intervals[4] == intervals[6])

    p.add_filter(unique_pitch_classes)

    solutions = p.solve(for_variables=notes)
    return solutions
Example #10
0
def gaps(args):
    """
    %prog gaps OM.bed fastafile

    Create patches around OM gaps.
    """
    from jcvi.formats.bed import uniq

    p = OptionParser(gaps.__doc__)
    opts, args = p.parse_args(args)

    if len(args) != 2:
        sys.exit(not p.print_help())

    ombed, fastafile = args
    ombed = uniq([ombed])
    bed = Bed(ombed)

    for a, b in pairwise(bed):
        om_a = (a.seqid, a.start, a.end, "+")
        om_b = (b.seqid, b.start, b.end, "+")
        ch_a = range_parse(a.accn)
        ch_b = range_parse(b.accn)
        ch_a = (ch_a.seqid, ch_a.start, ch_a.end, "+")
        ch_b = (ch_b.seqid, ch_b.start, ch_b.end, "+")

        om_dist, x = range_distance(om_a, om_b, distmode="ee")
        ch_dist, x = range_distance(ch_a, ch_b, distmode="ee")

        if om_dist <= 0 and ch_dist <= 0:
            continue

        print(a)
        print(b)
        print(om_dist, ch_dist)
Example #11
0
def average_compass_bearing_line(line: LineString) -> float:
    """
    Calculates the average weighted bearing for a LineString by
    deconstructing it into segment where bearing is the segment oriented angle and
    weight is the length of the segment.

    :Parameters:
      - `line: LineString in meter projection
    :Returns:
      The bearing in degrees
    :Returns Type:
      float
    """

    coords = list(line.coords)
    bearings = []
    amounts = []
    for u, v in pairwise(coords):
        bearing = compass_bearing(u, v)
        bearings.append(bearing)
        amount = LineString([u, v]).length
        amounts.append(amount)

    if len(bearings) == 1:
        return bearings[0]
    else:
        return np.degrees(circmean(np.radians(bearings),
                                   weights=amounts)) % 360
Example #12
0
def feature_metrics(feature_names, x, y, buckets, enabled_rules):
    x_t = x.T  # [[...feature0 values across all pages...], [...feature1 values...], ...].
    for name, values in zip(feature_names, x_t):
        if name not in enabled_rules:
            continue
        is_boolean = is_boolean_feature(values)
        _, boundaries = numpy.histogram(values.numpy(),
                                        bins=2 if is_boolean else buckets)
        highest_boundary = boundaries[-1]
        bars = []
        for boundary, (low_bound, high_bound) in zip(boundaries,
                                                     pairwise(boundaries)):
            is_last_time = high_bound == highest_boundary

            # Whether each feature value is a member of this bucket. Last
            # interval is inclusive on the right.
            x_is_for_this_bar = ((values >= low_bound) &
                                 ((values <= high_bound) if is_last_time else
                                  (values < high_bound)))

            y_for_this_bar = y.T[0].masked_select(x_is_for_this_bar)
            positives = (y_for_this_bar.numpy() == 1).sum()
            negatives = len(y_for_this_bar) - positives
            label = str(ceil(boundary)) if is_boolean else f'{boundary:.1f}'
            bars.append((label, positives, negatives))
        yield name, bars
Example #13
0
def get_tracks(G,
               th=0.1,
               th_re=0.8,
               feature_name='solution',
               with_fit=True,
               min_hits=3):
    """
    Don't use nx.MultiGraphs
    """
    used_nodes = []
    sub_graphs = []
    next_hit_fn = partial(find_next_hits,
                          th=th,
                          th_re=th_re,
                          feature_name=feature_name)
    for node in G.nodes():
        if node in used_nodes:
            continue
        road = build_roads(G, node, next_hit_fn, used_nodes)
        diff = fit_road(G, road) if with_fit else [0.] * len(road)
        a_road = chose_a_road(road, diff)

        if len(a_road) < min_hits:
            used_nodes.append(node)
            sub_graphs.append(G.subgraph([node]))
            continue

        a_track = list(pairwise(a_road[:-1]))
        sub = G.edge_subgraph(a_track)
        sub_graphs.append(sub)
        used_nodes += list(sub.nodes())

    return sub_graphs
Example #14
0
    def test_scale_by_detector(self):

        scale = [1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0]
        self.math = scale_by_detector(self.data[0], scale)
        num_dec = 5

        points = self.data[0].points_per_detector

        cumul = [sum(points[:i]) for i in range(1, len(points)+1)]
        cumul.insert(0, 0)

        indices = more_itertools.pairwise(cumul)

        self.assertTrue(np.array_equal(self.data[0].x, self.math.x))

        for i, s in zip(indices,scale):
            self.assertTrue(np.array_equal(np.around(self.math.y[i[0]:i[1]], num_dec),
                                           np.around(self.data[0].y[i[0]:i[1]]*s, num_dec)))
            self.assertTrue(np.array_equal(self.math.y_err[i[0]:i[1]],
                                           self.data[0].y_err[i[0]:i[1]]*s))

        self.assertEqual(self.math.title, '')
        self.assertEqual(self.math.description, '')

        self.assertListEqual(self.data[0].components, self.math.components)

        self.assertRaises(ValueError, scale_by_detector, self.data[0],
                          [1, 2, 3])
Example #15
0
    def realign_history(self, history):
        """"Realigns history so as to be compatible with auditors.

    Since the true applicants groups, unmanipulated test scores and
    true_eligible
    are generated before the agent's action, they are in the previous state, so
    we
    push them one step ahead in history and ignore the first step.

    Args:
      history: A list of tuples of state, action pairs.

    Returns:
      A realigned history with changed state, action pairs.
    """
        realign_variables = [
            'test_scores_x', 'applicant_groups', 'true_eligible', 'params'
        ]
        realigned_history = []
        for (state, _), (next_state,
                         next_action) in more_itertools.pairwise(history):
            new_history_point = core.HistoryItem(
                state=copy.deepcopy(next_state),
                action=copy.deepcopy(next_action))
            for variable in realign_variables:
                setattr(new_history_point.state, variable,
                        getattr(state, variable))
            realigned_history.append(new_history_point)
        return realigned_history
Example #16
0
def blend_pyramid(a, b, mask, num_layers=None, weights=None):
    """TODO: Docstring for function.

    :weights: dictionary with {scale: weight}
    :returns: TODO

    """
    if weights is None:
        weights = [1] * num_layers
    num_layers = len(weights)
    gauss_pyr_a = list(pyramid_gaussian(a, num_layers))
    gauss_pyr_b = list(pyramid_gaussian(b, num_layers))
    gauss_pyr_mask = list(pyramid_gaussian(mask, num_layers))
    lap_pyr_a = pyramid_laplace(gauss_pyr_a) + gauss_pyr_a[-1:]
    lap_pyr_b = pyramid_laplace(gauss_pyr_b) + gauss_pyr_b[-1:]

    blend_pyr = []
    for gauss_mask, lap_a, lap_b in zip(gauss_pyr_mask, lap_pyr_a, lap_pyr_b):
        blend = lap_a*gauss_mask + lap_b*(1-gauss_mask)
        blend_pyr.append(blend)

    img = None
    for weight, (low, high) in zip([0] + weights,
                                   pairwise(reversed(blend_pyr))):
        if img is None:
            img = low
        img = upsample(img) + weight*high
    return img
Example #17
0
def import_opus(
    ad_reader=None,
    import_all: bool = False,
    import_last=False,
    opus_id=None,
    rundb_write: bool = True,
) -> None:
    """Import one or all files from opus even if no previous files have been imported"""
    settings = load_settings()
    filter_ids = settings.get("integrations.opus.units.filter_ids", [])
    skip_employees = settings.get("integrations.opus.skip_employees", False)
    dumps = opus_helpers.read_available_dumps()

    all_dates = dumps.keys()
    # Default is read first file only
    export_dates = [min(all_dates)]
    if import_last:
        export_dates = [max(all_dates)]
    elif import_all:
        export_dates = sorted(all_dates)

    export_dates = prepend(None, export_dates)
    date_pairs = pairwise(export_dates)
    for date1, date2 in date_pairs:
        import_one(
            ad_reader,
            date2,
            date1,
            dumps,
            filter_ids,
            opus_id=opus_id,
            rundb_write=rundb_write,
        )
Example #18
0
    def move(self, move):
        """Takes a move and apply it to the game."""

        if move not in self.legal_moves():
            raise ValueError(f'illegal move: {move!r}')

        board = self._board
        squares = [_xy_to_i(xy) for xy in sliced(move, 2)]
        end = squares[-1]

        piece = board[squares[0]]
        if end >> 3 == 7 * (not self.turn) and not _is_king(piece):
            # New king
            piece = piece.upper()

        for before, after in pairwise(squares):
            difference = abs(before - after)
            if difference not in {18, 14}:
                continue

            # A two step rather than a one step means a capture.
            square_between = min(before, after) + difference // 2
            board[square_between] = ' '

        board[squares[0]] = ' '
        board[end] = piece
        self._last_move = move
        self._half_moves += 1
        self.turn = not self.turn
Example #19
0
    def on_select(self):
        info(f"selecting piece: {self.token}")
        self.selected_sprite.visible = True
        self.unselected_sprite.visible = False

        self.move_lines = []

        for move in self.token.find_possible_moves():
            xyw_waypoints = [self.token.xyw, *move.xyw_path]
            xygs = [
                self.actor.xyg_from_xyw(v)
                for v in flatten(pairwise(xyw_waypoints))
            ]

            n = len(xygs)
            v2f = sum((x.tuple for x in xygs), ())
            c3B = n * [0, 32, 73]  # navy

            line = self.actor.gui.batch.add(
                n,
                GL_LINES,
                None,
                ('v2f', v2f),
                ('c3B', c3B),
            )
            self.move_lines.append(line)
Example #20
0
def silicosoma(args):
    """
    %prog silicosoma in.silico > out.soma

    Convert .silico to .soma file.

    Format of .silico
        A text file containing in-silico digested contigs. This file contains pairs
    of lines. The first line in each pair constains an identifier, this contig
    length in bp, and the number of restriction sites, separated by white space.
    The second line contains a white space delimited list of the restriction
    site positions.

    Format of .soma
        Each line of the text file contains two decimal numbers: The size of the
    fragment and the standard deviation (both in kb), separated by white space.
    The standard deviation is ignored.
    """
    p = OptionParser(silicosoma.__doc__)
    p.set_outfile()
    opts, args = p.parse_args(args)

    if len(args) != 1:
        sys.exit(not p.print_help())

    (silicofile,) = args
    fp = must_open(silicofile)
    fw = must_open(opts.outfile, "w")
    next(fp)
    positions = [int(x) for x in next(fp).split()]
    for a, b in pairwise(positions):
        assert a <= b
        fragsize = int(round((b - a) / 1000.0))  # kb
        if fragsize:
            print(fragsize, 0, file=fw)
Example #21
0
    def count_and_sum_text_traces(self):
        """Computes the number of traces and sums of these traces for all values
        of each text byte.

        Returns:
            A tuple ``(cnts, sums)``, where
                - ``cnts`` is a (16, 256, 1) array where ``cnts[i, j, 0]`` gives the
                  number of traces where text byte i is j, and
                - ``sums`` is a (16, 256, NUM_SAMPLES) array where ``sums[i, j, :]``
                  gives the sum of traces where text byte i is j.
        """
        sums = np.zeros((16, 256, self.num_samples))
        # Need to specify the last dimension for broadcasting to work during
        # aggregation.
        cnts = np.zeros((16, 256, 1))
        for byte_pos in range(16):
            # While a little bit more complex, below code is more efficient than
            # a naive implementation that searches for all possible byte values
            # in ``self.texts``.
            sorted_indices = self.texts[:, byte_pos].argsort()
            sorted_bytes = self.texts[sorted_indices, byte_pos]
            # Find the indices where byte values change.
            val_changes = np.where(np.roll(sorted_bytes, 1) != sorted_bytes)[0]
            # Append the number of rows to be able to use ``pairwise``.
            val_indices = list(val_changes) + [sorted_bytes.shape[0]]
            for (start, end) in more_itertools.pairwise(val_indices):
                byte_val = sorted_bytes[start]
                cnts[byte_pos, byte_val] = end - start
                act_indices = sorted_indices[start:end]
                sums[byte_pos, byte_val] = self.traces[act_indices].sum(axis=0)
        return cnts, sums
Example #22
0
def DijkstraPath(
    dfed,
    source,
    target,
    from_label="node1",
    to_label="node2",
    weight_label="weight",
    **kwargs,
):
    """
    最短路問題
    入力
        dfed: 辺のDataFrameもしくはCSVファイル名
        source: 開始点
        target: 終了点
        from_label: 元点の属性文字
        to_label: 先点の属性文字
        weight_label: 辺の重みの属性文字
    出力
        最大カットと片方の集合の点のDataFrame
    """
    g, _, dfed = graph_from_table(None,
                                  dfed,
                                  from_label=from_label,
                                  to_label=to_label,
                                  from_to="FrTo_",
                                  **kwargs)
    rt = nx.dijkstra_path(g, source, target, weight=weight_label)
    return pd.concat([
        dfed[dfed.FrTo_ == f"{min(i,j)}-{max(i,j)}"] for i, j in pairwise(rt)
    ]).drop("FrTo_", 1)
Example #23
0
def cells_from_nodes(nodes_array):
    """
    Find the cell vertices in a regular grid defined by an array of nodes
    
    """
    c = 0
    cells = []

    for ri, row in enumerate(nodes_array):
        count = 0
        if ri != len(nodes_array) - 1:
            cells.append([])
        for item1, item2 in pairwise(row):
            if ri == 0:
                cells[ri].append([item1, item2])
            elif ri == len(nodes_array) - 1:
                cells[ri - 1][count].append(item1)
                cells[ri - 1][count].append(item2)

            else:
                cells[ri].append([item1, item2])
                cells[ri - 1][count].append(item1)
                cells[ri - 1][count].append(item2)
            count += 1

    return np.array(cells)
Example #24
0
def main() -> None:
    numbers = aoc.get_integers(9)

    stack: List[int] = []
    for index, (appending_number, number) in enumerate(pairwise(numbers)):
        stack.append(appending_number)
        if len(stack) < 25:
            continue
        if len(stack) > 25:
            stack.pop(0)

        if any(a + b == number for a, b in combinations(stack, 2)):
            continue

        print(f"No combination found for {number} (#{index}).")

        target = number

        for length in range(2, len(numbers)):
            for start_index in range(len(numbers) - length + 1):
                segment = numbers[start_index:start_index + length]
                if sum(segment) == target:
                    print(f"Found range {segment} "
                          f"at [{start_index} : {start_index + length}) "
                          f"length ({length}).")
                    weakness = min(segment) + max(segment)
                    print(f"The weakness is {weakness}.")
Example #25
0
 def check(self):
     constraints = super().check()
     assert len(self.blocks) >= 2
     bvars = self._get_bbox_vars(self.blocks)
     for b1, b2 in itertools.pairwise(bvars):
         constraints.append(b1.urx <= b2.llx)
     return constraints
Example #26
0
def same_file(*filepaths: Union[Path, str],
              not_exists_ok: bool = True) -> bool:
    """Return True if given files are the same, False if not.

    Args:
        *filepaths (iter of pathlib.Path or str): Collection of filepaths of files to
            compare.
        not_exists_ok (bool): True if a path for a nonexistent file should be treated as
            a file and as "different" than any actual files.
    """
    filepaths = {Path(filepath) for filepath in filepaths}
    if any(not filepath.is_file() for filepath in filepaths):
        if not_exists_ok:
            same = False
        else:
            raise FileNotFoundError(
                "One or more nonexistant files (not_exists_ok=False)")

    elif len(filepaths) <= 1:
        same = True
    else:
        same = all(
            filecmp.cmp(filepath, cmp_filepath)
            for filepath, cmp_filepath in pairwise(filepaths))
    return same
Example #27
0
def coordinate_distance(*coordinates):
    """Return total distance between coordinates.

    Args:
        *coordinates: Collection of coordinates to compare. Coordinates can be `x,y` or
            `x,y,z`.

    Returns:
        float: Euclidian distance between coordinates.
    """
    distance = 0.0
    for coord1, coord2 in pairwise(coordinates):
        coord = {
            1: dict(zip(["x", "y", "z"], coord1)),
            2: dict(zip(["x", "y", "z"], coord2)),
        }
        coord[1].setdefault("z", 0)
        coord[2].setdefault("z", 0)
        distance += sqrt(
            sum(
                (coord[2]["x"] - coord[1]["x"])**2,
                (coord[2]["y"] - coord[1]["y"])**2,
                (coord[2]["z"] - coord[1]["z"])**2,
            ))
    return distance
Example #28
0
 def check(self, checker):
     super().check(checker)
     assert len(self.instances) >= 2
     bvars = checker.iter_bbox_vars(self.instances)
     for b1, b2 in itertools.pairwise(bvars):
         if self.line == 'h_top':
             checker.append(b1.ury == b2.ury)
         elif self.line == 'h_bottom':
             checker.append(b1.lly == b2.lly)
         elif self.line == 'h_center':
             checker.append((b1.lly + b1.ury) / 2 == (b2.lly + b2.ury) / 2)
         elif self.line == 'h_any':
             checker.append(
                 checker.Or(  # We don't know which bbox is higher yet
                     checker.And(b1.lly >= b2.lly, b1.ury <= b2.ury),
                     checker.And(b2.lly >= b1.lly, b2.ury <= b1.ury)))
         elif self.line == 'v_left':
             checker.append(b1.llx == b2.llx)
         elif self.line == 'v_right':
             checker.append(b1.urx == b2.urx)
         elif self.line == 'v_center':
             checker.append((b1.llx + b1.urx) / 2 == (b2.llx + b2.urx) / 2)
         elif self.line == 'v_any':
             checker.append(
                 checker.Or(  # We don't know which bbox is wider yet
                     checker.And(b1.urx <= b2.urx, b1.llx >= b2.llx),
                     checker.And(b2.urx <= b1.urx, b2.llx >= b1.llx)))
         else:
             checker.append(
                 checker.Or(  # h_any OR v_any
                     checker.And(b1.urx <= b2.urx, b1.llx >= b2.llx),
                     checker.And(b2.urx <= b1.urx, b2.llx >= b1.llx),
                     checker.And(b1.lly >= b2.lly, b1.ury <= b2.ury),
                     checker.And(b2.lly >= b1.lly, b2.ury <= b1.ury)))
Example #29
0
def multipoint_shortest_path(
    graph: nx.DiGraph,
    nodes: List[str],
    weight_key: str,
    cyclic=False,
    cyclic_sort_key=None,
):
    """Return shortest path through nodes. If cyclic, will return the cycle
    sorted with the 'lowest' node at index 0. Self cycles are not supported.

    :param graph: the graph
    :param nodes: list of nodes to find path
    :param weight_key: weight key
    :param cyclic: whether the path is cyclic
    :param cyclic_sort_key: the key function to use to sort the cycle (if cyclic)
    :return:
    """
    if cyclic_sort_key and not cyclic:
        raise ValueError(
            "cyclic_sort_key was provided but 'cyclic' was False.")
    full_path = []
    if cyclic:
        nodes = nodes + nodes[:1]
    for n1, n2 in pairwise(nodes):
        path = nx.shortest_path(graph, n1, n2, weight=weight_key)
        full_path += path[:-1]
    if not cyclic:
        full_path.append(nodes[-1])
    if cyclic:
        return sort_cycle(full_path, cyclic_sort_key)
    else:
        return full_path
Example #30
0
    def __init__(self, joints, edges):
        if isinstance(joints, dict):
            self.ids = joints
        elif isinstance(joints, (list, tuple)):
            self.ids = JointInfo.make_id_map(joints)
        elif isinstance(joints, str):
            self.ids = JointInfo.make_id_map(joints.split(','))
        else:
            raise Exception

        self.names = list(sorted(self.ids.keys(), key=self.ids.get))
        self.n_joints = len(self.ids)

        if isinstance(edges, str):
            self.stick_figure_edges = []
            for path_str in edges.split(','):
                joint_names = path_str.split('-')
                for joint_name1, joint_name2 in more_itertools.pairwise(
                        joint_names):
                    if joint_name1 in self.ids and joint_name2 in self.ids:
                        edge = (self.ids[joint_name1], self.ids[joint_name2])
                        self.stick_figure_edges.append(edge)
        else:
            self.stick_figure_edges = edges

        # the index of the joint on the opposite side (e.g. maps index of left wrist to index
        # of right wrist)
        self.mirror_mapping = [
            self.ids[JointInfo.other_side_joint_name(name)]
            for name in self.names
        ]
Example #31
0
def sympy_multipoint_shortest_path(
    graph: nx.DiGraph,
    nodes: List[str],
    f: str,
    accumulators: dict,
    init=None,
    cutoff=None,
    cyclic=False,
    cyclic_sort_key=None,
):
    if cyclic_sort_key and not cyclic:
        raise ValueError(
            "cyclic_sort_key was provided but 'cyclic' was False.")
    full_path = []
    full_path_length = 0.0
    if cyclic:
        nodes = nodes + nodes[:1]
    for n1, n2 in pairwise(nodes):
        path_length, path = sympy_dijkstras(
            graph,
            f=f,
            source=n1,
            target=n2,
            accumulators=accumulators,
            init=init,
            cutoff=cutoff,
        )
        full_path_length += path_length
        full_path += path[:-1]
    if not cyclic:
        full_path.append(nodes[-1])
    if cyclic:
        return full_path_length, sort_cycle(full_path, cyclic_sort_key)
    else:
        return full_path_length, full_path
Example #32
0
 def test_monotonicity_in_rank_type(self):
     """Test monotonicity for different rank-types."""
     self.instance: RankBasedMetricResults
     metric_names, targets = [
         set(map(itemgetter(i), self.instance.data.keys())) for i in (0, 1)
     ]
     for metric_name in metric_names:
         if metric_name in {
                 "variance", "standard_deviation",
                 "median_absolute_deviation"
         }:
             continue
         norm_metric_name = metric_name
         if metric_name.startswith("hits_at_"):
             norm_metric_name = "hits_at_"
         increasing = rank_based_metric_resolver.lookup(
             norm_metric_name).increasing
         exp_sort_indices = [0, 1, 2] if increasing else [2, 1, 0]
         for target in targets:
             values = numpy.asarray([
                 self.instance.data[metric_name, target, rank_type]
                 for rank_type in (RANK_PESSIMISTIC, RANK_REALISTIC,
                                   RANK_OPTIMISTIC)
             ])
             for i, j in pairwise(exp_sort_indices):
                 assert values[i] <= values[j], metric_name
Example #33
0
    def __init__(
        self,
        space: gym.spaces.Dict,
        names: Iterable[str],
        *,
        embedding_size: int,
        layers: List[int],
    ):
        super().__init__()
        self.space = space

        num_embeddings = max(
            space['grid'].high.max() + 1,
            space['item'].high.max() + 1,
        )
        self.embedding = EmbeddingRepresentation(num_embeddings,
                                                 embedding_size)
        gv_models = [self._make_gv_model(name) for name in names]
        self.cat_representation = CatRepresentation(gv_models)
        self.fc_model: nn.Module

        if len(layers) > 0:
            dims = [self.cat_representation.dim] + layers
            linear_modules = [
                make_module('linear', 'relu', in_dim, out_dim)
                for in_dim, out_dim in mitt.pairwise(dims)
            ]
            relu_modules = [nn.ReLU() for _ in linear_modules]
            modules = mitt.interleave(linear_modules, relu_modules)
            self.fc_model = nn.Sequential(*modules)
            self._dim = dims[-1]

        else:
            self.fc_model = nn.Identity()
            self._dim = self.cat_representation.dim
Example #34
0
 def introns(self):
     introns = []
     for a, b in pairwise(self.exons):
         if self.strand == '-':
             i = Intron(b.five_prime + 1, a.three_prime - 1, self)
         else:
             i = Intron(a.three_prime + 1, b.five_prime - 1, self)
         introns.append(i)
     return introns
Example #35
0
 def get_rates(cls, svc_type, svc_name, path):
     """
     :return: the derivative of mgr.get_counter()
     :rtype: list[tuple[int, float]]"""
     data = mgr.get_counter(svc_type, svc_name, path)[path]
     if not data:
         return [(0, 0)]
     elif len(data) == 1:
         return [(data[0][0], 0)]
     return [(data2[0], differentiate(data1, data2)) for data1, data2 in pairwise(data)]
Example #36
0
def use_pairs(paths, bins):
    is_first = True

    for path_a, path_b in pairwise(paths):
        if is_first:
            is_first = False
            yield io.imread(path_a)

        else:
            ref = ref_curve(path_a, bins)
            img_b = io.imread(path_b)
            yield match(img_b, ref, bins)
Example #37
0
    def get_output(self, train=False):
        tag_mean = get_output(self.tag_mean, train,
                              self.layer_cache)
        black_mean = tag_mean[:, 0]
        white_mean = K.abs(tag_mean[:, 1]) + black_mean + \
            self.min_black_white_distance

        nb_pyramid_layers = 3
        input = self.get_input(train)
        image = input[:, :1]
        grid_idx = input[:, 1:]
        selection_mask = binary_mask(grid_idx, ignore=0, black=0.8, white=0.8)

        pattern = (0, 'x', 'x', 'x')
        tag = adaptive_mask(grid_idx, ignore=0,
                            black=black_mean.dimshuffle(*pattern),
                            white=white_mean.dimshuffle(*pattern))

        gauss_pyr_tag = list(pyramid_gaussian(tag, nb_pyramid_layers))
        gauss_pyr_image = list(pyramid_gaussian(image, nb_pyramid_layers))
        gauss_pyr_mask = list(pyramid_gaussian(selection_mask,
                                               nb_pyramid_layers))
        pyr_masks = [0]*(len(gauss_pyr_mask) - 1) + gauss_pyr_mask[-1:]

        lap_pyr_tag = pyramid_laplace(gauss_pyr_tag) + gauss_pyr_tag[-1:]
        lap_pyr_image = pyramid_laplace(gauss_pyr_image) + gauss_pyr_image[-1:]

        blend_pyr = []
        for mask, lap_tag, lap_image in zip(pyr_masks, lap_pyr_tag,
                                            lap_pyr_image):
            blend = lap_tag*mask + lap_image*(1 - mask)
            blend_pyr.append(blend)

        img = None
        for low, high in pairwise(reversed(blend_pyr)):
            if img is None:
                img = low
            img = upsample(img) + high
        return img
Example #38
0
 def test_short_case(self):
     """ensure an empty iterator if there's not enough values to pair"""
     p = mi.pairwise("a")
     self.assertRaises(StopIteration, lambda: next(p))
Example #39
0
    def call(self, inputs, mask=None):
        def collect_variable_weights_inputs(weights, start_idx):
            i = start_idx
            collect_weights = []
            for weight in weights:
                if weight == 'variable':
                    collect_weights.append(
                        inputs[i].dimshuffle(0, 1, 'x', 'x'))
                    i += 1
                else:
                    collect_weights.append(weight)
            return collect_weights

        def fill(lst, value=None):
            return [value] * (self.max_pyramid_layers - len(lst)) + lst

        nb_fix_inputs = 3
        offset, mask, selection = inputs[:nb_fix_inputs]
        mask = 2*mask - 1

        idx = nb_fix_inputs
        nb_variable_offsets = len([w for w in self.offset_weights
                                   if w is 'variable'])
        offset_weights = collect_variable_weights_inputs(
            self.offset_weights, idx)
        mask_weights = collect_variable_weights_inputs(
            self.mask_weights, idx + nb_variable_offsets)
        offset_weights = fill(offset_weights, value=0)
        mask_weights = fill(mask_weights, value=0)
        gauss_pyr_in = list(pyramid_gaussian(
            offset, self.offset_pyramid_layers))
        gauss_pyr_mask = list(pyramid_gaussian(mask, self.mask_pyramid_layers))
        gauss_pyr_select = list(pyramid_gaussian(
            selection, self.mask_pyramid_layers))

        pyr_select = fill(gauss_pyr_select, value=1)

        lap_pyr_in = fill(pyramid_laplace(gauss_pyr_in) + gauss_pyr_in[-1:])
        lap_pyr_mask = fill(pyramid_laplace(gauss_pyr_mask) +
                            gauss_pyr_mask[-1:])

        blend_pyr = []
        pyramids = [pyr_select, self.use_selection, lap_pyr_in, lap_pyr_mask,
                    offset_weights, mask_weights]
        assert len({len(p) for p in pyramids}) == 1, \
            "Different pyramid heights"

        for selection, use_selection, lap_in, lap_mask, offset_weight, \
                mask_weight in zip(*pyramids):

            if lap_in is None:
                lap_in = K.zeros_like(lap_mask)
            elif lap_mask is None:
                lap_mask = K.zeros_like(lap_in)
            if use_selection:
                blend = lap_in*selection*offset_weight + \
                    lap_mask*(1 - selection)*mask_weight
            else:
                blend = lap_in*offset_weight + lap_mask*mask_weight
            blend_pyr.append(blend)

        img = None
        for i, (low, high) in enumerate(pairwise(reversed(blend_pyr))):
            if img is None:
                img = low
            img = upsample(img) + high
        return img
Example #40
0
 def test_base_case(self):
     """ensure an iterable will return pairwise"""
     p = mi.pairwise([1, 2, 3])
     self.assertEqual([(1, 2), (2, 3)], list(p))
    img = io.imread(path)
    hsv = color.rgb2hsv(img)
    values = hsv[:, :, 2].flatten()
    return values

def open_grayscale(path):
    return io.imread(path, as_grey=True)

def mean_hist_delta(path_a, path_b, bins):
    values_a = get_hsv_values(path_a)
    values_b = get_hsv_values(path_b)
    # values_a = open_grayscale(path_a).flatten()
    # values_b = open_grayscale(path_b).flatten()

    hist_a, _ = numpy.histogram(values_a, bins, density=True)
    hist_b, _ = numpy.histogram(values_b, bins, density=True)

    delta = numpy.absolute(hist_b - hist_a)
    return numpy.mean(delta)


if __name__ == '__main__':
    args = parser.parse_args()
    paths = list(args.images)
    bins = 256

    for path_a, path_b in pairwise(paths):
        m = mean_hist_delta(path_a, path_b, bins)
        if m > 0.6:
            print(os.path.basename(path_a), os.path.basename(path_b), m)
Example #42
0
def pyramid_laplace(gauss_pyr):
    return [high - upsample(low)
            for high, low in pairwise(gauss_pyr)]