Пример #1
0
def vietoris_rips(X, skeleton=5, max=10000):
    """ Generate the Vietoris-Rips complex on the given set of points in 2D.
    Only simplexes up to dimension skeleton are computed.
    The max parameter denotes the distance cut-off value.
    """
    distances = PairwiseDistances(X.tolist())
    rips = Rips(distances)
    simplices = Filtration()
    rips.generate(skeleton, max, simplices.append)
    for sx in simplices:
        sx.data = rips.eval(sx)
    return simplices
Пример #2
0
def main(filename, skeleton, max, prime=11):
    points = [p for p in points_file(filename)]
    print '#', time.asctime(), "Points read"
    distances = PairwiseDistances(points)
    distances = ExplicitDistances(
        distances
    )  # speeds up generation of the Rips complex at the expense of memory usage
    rips = Rips(distances)
    print '#', time.asctime(), "Rips initialized"

    simplices = []
    rips.generate(skeleton, max, simplices.append)
    print '#', time.asctime(
    ), "Generated complex: %d simplices" % len(simplices)

    # While this step is unnecessary (Filtration below can be passed rips.cmp),
    # it greatly speeds up the running times
    for s in simplices:
        s.data = rips.eval(s)
    print '#', time.asctime(), simplices[0], '...', simplices[-1]

    simplices.sort(data_dim_cmp)
    print '#', time.asctime(), "Simplices sorted"

    ch = CohomologyPersistence(prime)
    complex = {}

    for s in simplices:
        i, d = ch.add([complex[sb] for sb in s.boundary],
                      (s.dimension(), s.data),
                      store=(s.dimension() < skeleton))
        complex[s] = i
        if d:
            dimension, birth = d
            print dimension, birth, s.data
        # else birth

    for ccl in ch:
        dimension, birth = ccl.birth
        if dimension >= skeleton: continue
        print dimension, birth, 'inf'  # dimension, simplex data = birth
        print "# Cocycle at (dim=%d, birth=%f)" % ccl.birth
        for e in ccl:
            print "#  ", e.si.order, normalized(e.coefficient, prime)
Пример #3
0
def main(filename, skeleton, max):
    points = np.random.rand(120, 3)  #[p for p in points_file(filename)]
    print(points)
    distances = PairwiseDistances(points)
    # distances = ExplicitDistances(distances)           # speeds up generation of the Rips complex at the expense of memory usage
    rips = Rips(distances)
    print time.asctime(), "Rips initialized"

    simplices = Filtration()
    rips.generate(skeleton, max, simplices.append)
    print time.asctime(), "Generated complex: %d simplices" % len(simplices)

    # While this step is unnecessary (Filtration below can be passed rips.cmp),
    # it greatly speeds up the running times
    for s in simplices:
        s.data = rips.eval(s)
    print time.asctime(), simplices[0], '...', simplices[-1]

    simplices.sort(
        data_dim_cmp
    )  # could be rips.cmp if s.data for s in simplices is not set
    print time.asctime(), "Set up filtration"

    p = StaticPersistence(simplices)
    print time.asctime(), "Initialized StaticPersistence"

    p.pair_simplices()
    print time.asctime(), "Simplices paired"

    print "Outputting persistence diagram"
    smap = p.make_simplex_map(simplices)
    for i in p:
        if i.sign():
            b = smap[i]

            if b.dimension() >= skeleton: continue

            if i.unpaired():
                print b.dimension(), b.data, "inf"
                continue

            d = smap[i.pair()]
            print b.dimension(), b.data, d.data
Пример #4
0
def bench_cluster(X, y, pca_n_comp):
    n = len(np.unique(y))
    pca = PCA(pca_n_comp)
    X_ = pca.fit_transform(X)
    sc = SpectralClustering(n)
    km = KMeans(n)
    sc_pred = sc.fit_predict(X_)
    km_pred = km.fit_predict(X_)
    distances = PairwiseDistances(X_.tolist())
    distances = ExplicitDistances(distances)
    singlel_pred = fcluster(linkage(ssd.squareform(distances.distances)),
                            n,
                            criterion='maxclust')
    print "single-linkage clustering prediction:", singlel_pred
    print "single-linkage clustering score:", adjusted_rand_score(
        y, singlel_pred), mutual_info_score(y, singlel_pred)
    print "spectral clustering prediction:", sc_pred
    print "spectral clustering score:", adjusted_rand_score(
        y, sc_pred), mutual_info_score(y, sc_pred)
    print "kmeans clustering prediction", km_pred
    print "kmeans clustering score:", adjusted_rand_score(
        y, km_pred), mutual_info_score(y, km_pred)
    print "ground truth labels", y
Пример #5
0
def compute_rips_diagram(points, max_hom, max_death):
    """Workflow to construct a Persistence Diagram object from the level sets
    of the given function.

    Arguments
        1.

    Returns
        1.

    Raises
        None

    """

    # get pairwise distances
    distances = PairwiseDistances(points)

    rips = Rips(distances)
    simplices = Filtration()
    rips.generate(int(max_hom + 1), float(max_death), simplices.append)

    # step to speed up computation
    for s in simplices:
        s.data = rips.eval(s)

    # compute persistence
    simplices.sort(data_dim_cmp)
    p = StaticPersistence(simplices)
    p.pair_simplices()

    # construct persistence diagram
    smap = p.make_simplex_map(simplices)
    pd = _persistence_diagram_rips(smap, p, max_hom, max_death)

    return (dg.PersistenceDiagram(PD=pd))
Пример #6
0
def get_max_dist(X):
    distances = PairwiseDistances(X.tolist())
    distances = ExplicitDistances(distances)
    return max(flatten(distances.distances))
Пример #7
0
 def _pairwise_distances(self, X_list):
     self.distances = PairwiseDistances(X_list)