def bar(x: tc.all(tc.re("abcdef"), tc.re("defghi"), tc.re("^abc"))):
     pass
 def foo_all(arg: tc.all(tc.any(bytes,bytearray), complete_blocks)): pass
 foo_all(b"x" * 512)              # OK
 def foo(x: tc.all()):
     pass
Esempio n. 4
0
def rips_filtration(max_dim: tc.all(int, gte_zero), max_scale: tc.all(
    tc.any(int, float), gt_zero), dist_mat: array_like_2d):
    """
    Builds a boundary matrix for the boundary-Rips filtration up to dimension `max_dim`.

    Also builds the corresponding list of bigrades follows closely
    the "incremental algorithm" in the paper on fast Vietoris-Rips comptuation
    by Zomorodian, with some modification to store boundary matrix and
    filtration info. That in turn is based on a version of Bron-Kerbosch algorithm.

    Parameters
    ----------

    max_dim: int >= 0
        the highest dimension to compute
    max_scale: float
        the highest scale (distance) to consider
    dist_mat: 2D array
        an n x n distance matrix, which may be lower-triangular.

    Returns
    -------

    pairs: list of (column, grade) pairs
        The barcodes up to dimension max_dim, for the truncated Vietoris-Rips
        filtration, including only simplices whose index of appearance is <= max_scale
        The barcodes are output as a list of three-element lists. Each three-element
        lists represents one interval of in barcode and has the form
        [birth,death,dimension]
    """
    sorted_simplices = _rips_simplices(max_dim, max_scale, dist_mat)
    len_minus_one = len(sorted_simplices) - 1
    cobdy_matrix_pre = _create_coboundary_matrix(sorted_simplices, max_dim)
    # print(cobdy_matrix_pre);

    # print(sorted_simplices)
    # print(bdy_matrix_pre)

    cobdy_matrix = phat.boundary_matrix(
        representation=phat.representations.bit_tree_pivot_column)
    cobdy_matrix.columns = cobdy_matrix_pre

    # call Bryn's PHAT wrapper for the persistence computation
    pairs = cobdy_matrix.compute_persistence_pairs()

    # next, rescale the pairs to their original filtration values, eliminating pairs with the same birth and death time.
    # In keeping with our chosen output format, we also add the dimension to the pair.
    scaled_pairs = []
    for i in range(len(pairs)):
        cobirth = sorted_simplices[len_minus_one - pairs[i][0]][1]
        codeath = sorted_simplices[len_minus_one - pairs[i][1]][1]
        if codeath < cobirth:
            dimension = len(
                sorted_simplices[len_minus_one - pairs[i][1]][0]) - 1
            scaled_pairs.append((codeath, cobirth, dimension))

            # add in the intervals with endpoint inf
    # To do this, we first construct an array paired_indices such that
    # if the j^th simplex (in the coboundary order) appears in a pair, paired_incides[j] = 1
    # otherwise paired_incides[j] = 0.

    paired_indices = np.zeros(len(sorted_simplices))
    for i in range(len(pairs)):
        paired_indices[pairs[i][0]] = 1
        paired_indices[pairs[i][1]] = 1
    for i in range(len(paired_indices)):
        if paired_indices[i] == 0:
            birth = sorted_simplices[len_minus_one - i][1]
            dimension = len(sorted_simplices[len_minus_one - i][0]) - 1
            scaled_pairs.append((birth, float("inf"), dimension))
    return scaled_pairs
Esempio n. 5
0
 def foo_all(arg: tc.all(tc.any(bytes, bytearray), complete_blocks)):
     pass
Esempio n. 6
0
 def bar(x: tc.all(tc.re("abcdef"), tc.re("defghi"), tc.re("^abc"))):
     pass
Esempio n. 7
0
 def foo(x: tc.all()):
     pass