def persistence_image_features(X_train, X_test, pixels=[20, 20], spread=1):
    start = time.time()
    pim = PersImage(pixels=pixels, spread=spread)
    imgs_train = pim.transform(X_train)
    X_train_features = np.array([img.flatten() for img in imgs_train])
    pim = PersImage(pixels=pixels, spread=spread)
    imgs_test = pim.transform(X_test)
    X_test_features = np.array([img.flatten() for img in imgs_test])
    print("Total Time (Persistence Images): ", time.time() - start)
    return X_train_features, X_test_features
def generate_PI(PD_list, emb_dim, tau, pixels, spread):

    pim = PersImage(pixels=[pixels, pixels], spread=spread)
    PI_list = pim.transform(PD_list)
    PI_list = np.array([img.flatten() for img in PI_list])

    return np.array(PI_list)
Ejemplo n.º 3
0
def persistence_image_features(X_train, pixels=[20, 20], spread=1):
    start = time.time()
    pim = PersImage(pixels=pixels, spread=spread)
    imgs_train = pim.transform(X_train)
    X_train_features = np.array([img.flatten() for img in imgs_train])
    timing = time.time() - start
    return timing
Ejemplo n.º 4
0
def subj_to_pims(sbj, sdict, px, sd, nobs):
    """
    generate persistence images for all gestures for a given subject
    INPUTS
    sbj - subject number
    sdict - dict of gestures performed by a subject
    px - pixel dimension/ resolution; e.g. px=20 gives 20x20 persistence image
    sd - persistence image concentration parameter (gaussian)
    nobs - number of observations per subj (total num of gests they performed)

    OUTPUTS
    array of gestures made by subject
    """
    # instantiate persistence image generator & vietoris-rips complex generators
    rips = Rips(maxdim=1, verbose=False)  # 1-D homology rips complex
    pim = PersImage(pixels=[px, px], spread=sd)
    # each vector have equal # persim pix + 2 cols for subj & gest labels
    res_mat = np.zeros(px**2 * nobs + 2 * nobs).reshape(nobs, -1)

    v = 0
    for gnum, garray in sdict.items():
        # generate rips complex on points; slice out time col and gesture label
        dgms = rips.fit_transform(garray[:, 1:-1])
        img = pim.transform(dgms[1])  # persistence image of 1 cycles
        obs_vec = np.r_[img.flatten(), int(gnum[0]), int(sbj)]
        res_mat[v, :] = obs_vec
        v += 1

    return res_mat
Ejemplo n.º 5
0
def generate_persistence_images(directory, threshold, pixel, spreadval,
                                rangemin, rangemax):
    for file in os.listdir(directory):
        dgm0 = np.squeeze(sio.loadmat(directory + file)['PD'])
        # Computing Lifetimes for Thresholding of Persistence Diagrams
        lifetime = np.ones(len(dgm0))

        for h in range(len(dgm0)):
            lifetime[h] = dgm0[h][1] - dgm0[h][0]

        #Keeps only points greater than reduction value
        isvalid = np.greater(lifetime, threshold)
        reduced_pd = dgm0[isvalid]

        print("Beginning Persistence Image Generation for " + file)
        print("Length of PD: " + str(len(dgm0)) + " points")
        print("Length Reduced PD: " + str(len(reduced_pd)) + " points")

        pim = PersImage(pixels=[pixel, pixel],
                        spread=spreadval,
                        specs={
                            "minBD": rangemin,
                            "maxBD": rangemax
                        })
        imgs = pim.transform(reduced_pd)
        imgs = imgs / np.max(imgs)
        imgs = imgs.reshape((-1))

        filename = "./Persistence_Images/" + file
        sio.savemat(filename, dict([('PersImg', imgs)]))
Ejemplo n.º 6
0
    def test_empyt_diagram_list(self):
        dgm1 = [np.array([[2, 3]]), np.zeros((0, 2))]
        pim1 = PersImage(pixels=(10, 10))
        res1 = pim1.transform(dgm1)
        assert np.all(res1[1] == np.zeros((10, 10)))

        dgm2 = [np.zeros((0, 2)), np.array([[2, 3]])]
        pim2 = PersImage(pixels=(10, 10))
        res2 = pim2.transform(dgm2)
        assert np.all(res2[0] == np.zeros((10, 10)))

        dgm3 = [np.zeros((0, 2)), np.zeros((0, 2))]
        pim3 = PersImage(pixels=(10, 10))
        res3 = pim3.transform(dgm3)
        assert np.all(res3[0] == np.zeros((10, 10)))
        assert np.all(res3[1] == np.zeros((10, 10)))
Ejemplo n.º 7
0
def generate_persistence_image(dgm0, threshold, pixel, spreadval, rangemin,
                               rangemax):

    # Computing Lifetimes for Thresholding of Persistence Diagrams
    lifetime = np.ones(len(dgm0))

    for h in range(len(dgm0)):
        lifetime[h] = dgm0[h][1] - dgm0[h][0]

    #Keeps only points greater than reduction value
    isvalid = np.greater(lifetime, threshold)
    reduced_pd = dgm0[isvalid]

    print("Length of PD: " + str(len(dgm0)) + " points")
    print("Length Reduced PD: " + str(len(reduced_pd)) + " points")

    pim = PersImage(pixels=[pixel, pixel],
                    spread=spreadval,
                    specs={
                        "minBD": rangemin,
                        "maxBD": rangemax
                    })
    imgs = pim.transform(reduced_pd)
    imgs = imgs / np.max(imgs)
    #imgs = imgs.reshape((-1))

    return imgs
Ejemplo n.º 8
0
def test_integer_diagrams():
    """ This test is inspired by gh issue #3 by gh user muszyna25.

    Integer diagrams return nan values.

    This does not work: dgm = [[0, 2], [0, 6], [0, 8]];

    This one works fine: dgm = [[0.0, 2.0], [0.0, 6.0], [0.0, 8.0]];

    """

    dgm = [[0, 2], [0, 6], [0, 8]]
    dgm2 = [[0.0, 2.0], [0.0, 6.0], [0.0, 8.0]]
    pim = PersImage()
    res = pim.transform(dgm2)
    res2 = pim.transform(dgm)
    np.testing.assert_array_equal(res, res2)
Ejemplo n.º 9
0
    def test_multiple_diagrams(self):
        pim = PersImage(pixels=(3, 3))

        diagram1 = np.array([[0, 1], [1, 1], [3, 5]])
        diagram2 = np.array([[0, 1], [1, 1], [3, 6]])
        imgs = pim.transform([diagram1, diagram2])

        assert len(imgs) == 2
        assert imgs[0].shape == imgs[1].shape
def generatePI(allPDs, segmentLength, embDim, tau, PCA_n_components, pixels,
               spread, norm, savePI):

    pim = PersImage(pixels=[pixels, pixels], spread=spread)
    imgs = pim.transform(allPDs)
    imgs_array = np.array([img.flatten() for img in imgs])
    if savePI == True:
        np.save(
            'PI_Len%s_Dim%s_Tau%s_PCA%s_p%s_s%s_%s.npy' %
            (segmentLength, embDim, tau, PCA_n_components, pixels, spread,
             norm), imgs_array)
    return imgs_array
Ejemplo n.º 11
0
    def persistence(self, k_max=1, type="standard", representation="surface", shape=(16,16)):

        if representation=="surface":
            pi = PersImage(spread=0.025, pixels=shape, verbose=False)
            result = np.array(pi.transform(diagrams[1]))
        elif representation=="landscape":
            raise NotImplementedError() # TODO
        elif representation=="diagram":
            result = diagrams
        else:
            raise ValueError("Representation should be 'vector', 'surface', 'landscape', or 'diagram'")
    
        return result
Ejemplo n.º 12
0
def calculatePI(val = None):
    global dgms, first
    if not first:
        r = int(sld_resolution.val)
        v = sld_spread.val
    else:
        r = 10
        v = 1
    pim = PersImage(spread=v, pixels=[r,r], verbose=False)
    img = pim.transform(dgms[1])
    ax_1 = plt.subplot(233)
    plt.title("PI for $H_1$\nspread = " + str(v)[0:4] + "\n" + str(r) + "x" + str(r))
    pim.show(img, ax_1)
    first = False
def compute_single_diagram_statistics(dgm, threshold=None, dim=0):
    """
    Computes stats of a single persistence diagram dgm, input as an
    array with 2 columns.
    """

    if dgm is None:  # Return list of stat names
        basenames = compute_stats(None)
        stat_names = []
        # Birth stats
        stat_names.append(['b' + name for name in basenames])
        # Death stats
        stat_names.append(['d' + name for name in basenames])
        # Persistence stats
        stat_names.append(['p' + name for name in basenames])

        # Miscllaneous stats
        stat_names.append([
            'bdcor', 'bdcencor', 'totalper', 'bmaxper', 'dmaxper', 'bminper',
            'dminper', 'Nfeatures'
        ])
        # Betti curve
        stat_names.append(['betticurve'])
        # Persistence image
        stat_names.append(['PI'])
        return stat_names

    # Else:

    Nbins = 100  # Number of betti curve bins
    stats = []

    dgm = clean_births_deaths(dgm, threshold, dim)
    b = dgm[:, 0]
    d = dgm[:, 1]

    if b.size > 0:

        p = d - b  # persistence
        stats.append(compute_stats(b))
        stats.append(compute_stats(d))
        stats.append(compute_stats(p))
        #stats[binds0:binds1] = compute_stats(b)
        #stats[dinds0:dinds1] = compute_stats(d)
        #stats[pinds0:pinds1] = compute_stats(p)

        bmean = stats[0][3]
        dmean = stats[1][3]
        #bmean = stats[binds0 + 3]
        #dmean = stats[dinds0 + 3]

        maxfeatind = np.argmax(p)
        minfeatind = np.argmin(p)
        stats.append([
            np.sum(b * d),
            np.sum((b - bmean) * (d - dmean)),
            np.sum(p), b[maxfeatind], d[maxfeatind], b[minfeatind],
            d[minfeatind], b.size
        ])
        #stats[minds0:minds1] = [np.sum(b*d),
        #                        np.sum((b-bmean)*(d-dmean)),
        #                        np.sum(p),
        #                        b[maxfeatind],
        #                        d[maxfeatind],
        #                        b[minfeatind],
        #                        d[minfeatind],
        #                        b.size]

        betticurve = compute_betti_curve(dgm)

        ## Compute integral of Betti curve over bin
        bettiints, bin_edges = integrate_binned_betti_curve(
            betticurve, Nbins, threshold)

        stats.append([bettiints])
        #stats[betti0:betti1] = bettiints

        pim = PersImage(spread=0.05, pixels=[10, 10], verbose=False)
        img = pim.transform(dgm)
        stats.append([img])

    else:
        stats = None

    return stats, threshold / Nbins
Ejemplo n.º 14
0
def do_full_run(data,
                quality=50,
                spread=0.05,
                kernel="gaussian",
                weighting="linear"):
    """
    Does the full PI analysis and some training
    saves the results in the results folder
    :param quality: resulution (int)
    :param spread:  variance in smoothing (float)
    :param kernel:  cdf to be used for smoothing (string: gaussian, laplace, lognorm, gamma)
    :param weighting: weighting to be used (string: linear, pm_linear, logistic)
    :return: reports for H0 and H1
    """
    pixels = quality * quality
    PI_vectors_H0 = np.zeros((6 * m, pixels))
    PI_vectors_H1 = np.zeros((6 * m, pixels))

    target = np.zeros(6 * m)
    labels = {i: shape for i, shape in enumerate(data)}
    index = 0
    print("Creating the PI images for the 6 figures")
    for i in tqdm(range(6)):
        shape = labels[i]
        shape_data = data[shape]
        for j in range(m):
            pim_0 = PersImage(spread=spread,
                              pixels=[quality, quality],
                              kernel_type=kernel,
                              weighting_type=weighting,
                              verbose=False,
                              specs={
                                  "maxBD": 2,
                                  "minBD": 0,
                              })
            pim_1 = PersImage(spread=spread,
                              pixels=[quality, quality],
                              kernel_type=kernel,
                              weighting_type=weighting,
                              verbose=False,
                              specs={
                                  "maxBD": 2,
                                  "minBD": 0,
                              })

            PI_data = shape_data[j, :, :]
            rips = Rips(verbose=False)
            dgms = rips.fit_transform(PI_data)

            PI_data_H0 = pim_0.transform(dgms[0])
            PI_data_H1 = pim_1.transform(dgms[1])

            PI_data_H1 = PI_data_H1.reshape(pixels)
            PI_data_H0 = PI_data_H0.reshape(pixels)

            # PI_data_H0 = PI_data_H0[:, None]
            # PI_data_H1 = PI_data_H1[:, None]
            #
            target[index] = int(i)

            PI_vectors_H0[index, :] = PI_data_H0
            PI_vectors_H1[index, :] = PI_data_H1
            index += 1

    PI_vectors_H0_train, PI_vectors_H0_test, y_train_H0, y_test_H0 = train_test_split(
        PI_vectors_H0, target, test_size=0.33)
    PI_vectors_H1_train, PI_vectors_H1_test, y_train_H1, y_test_H1 = train_test_split(
        PI_vectors_H1, target, test_size=0.33)

    y_H0_hat_km, y_H0_hat_lr = do_training(PI_vectors_H0_train,
                                           PI_vectors_H0_test, y_train_H0)
    y_H1_hat_km, y_H1_hat_lr = do_training(PI_vectors_H1_train,
                                           PI_vectors_H1_test, y_train_H1)

    reports_H0_gnb, reports_H0_lr = do_analysis(y_test_H0, y_H0_hat_km,
                                                y_H0_hat_lr, labels)
    reports_H0_gnb, reports_H0_lr = do_analysis(y_test_H0, y_H0_hat_km,
                                                y_H0_hat_lr, labels)
    reports_H1_gnb, reports_H1_lr = do_analysis(y_test_H1, y_H1_hat_km,
                                                y_H1_hat_lr, labels)

    reports_H0_gnb.to_csv("results/{}_{}_{}_{}_H0_gnb.csv".format(
        quality, spread, kernel, weighting))
    reports_H0_lr.to_csv("results/{}_{}_{}_{}_H0_lr.csv".format(
        quality, spread, kernel, weighting))
    reports_H1_gnb.to_csv("results/{}_{}_{}_{}_H1_gnb.csv".format(
        quality, spread, kernel, weighting))
    reports_H1_lr.to_csv("results/{}_{}_{}_{}_H1_lr.csv".format(
        quality, spread, kernel, weighting))

    print(reports_H0_gnb)
    print(reports_H0_lr)
    print(reports_H1_gnb)
    print(reports_H1_lr)
    return reports_H0_gnb, reports_H0_lr, reports_H1_gnb, reports_H1_lr
from persim import PersImage

s = (80, 100)
Persims = np.zeros(s)

import glob
#by changing Dim0 to Dim1 you can read the other Dim data
txt_files = glob.glob("Dim1-resized/*.txt")
len(txt_files)
for i in range(0, len(txt_files), 1):
    print(txt_files[i])
    D = np.genfromtxt(txt_files[i], skip_header=1)
    D = np.array(D)
    D = D[:, [1, 2]]
    pim = PersImage(pixels=(10, 10))
    img = pim.transform(D)
    Persims[i][:] = img.flatten()

Persims

#for 8classes
#c=np.array([1,2,3,4,5,6,7,8])
#ccol=np.repeat(c,10)

#for 4 classes
c = np.array([1, 2, 3, 4])
ccol = np.repeat(c, 20)

cname = np.array(
    ["Apple", "Bell", "Bird", "Bottle", "Brick", "Children", "Key", "Rat"])
ccolnames = np.repeat(cname, 10)
Ejemplo n.º 16
0
    # generate SSMs for each gesture
    raw_ssm_lst = [np.zeros(shape=(a.shape[0], a.shape[0])) for a in arrays]
    for n, a in enumerate(arrays):
        for i in range(a.shape[0]):
            for j in range(a.shape[0]):
                raw_ssm_lst[n][i,j] = cumulated_ts_2(a[i,:],a[j,:])

    # smooth SSM images
    for r, s in enumerate(raw_ssm_lst):
        raw_ssm_lst[r] = gaussian_filter(s, sigma=1)

    # generate persistence images
    for n, s in enumerate(raw_ssm_lst):
        pd = lower_star_img(s)
        img = pim.transform(pd[:-1,:]) # remove 'inf' persistence
        raw_pim_mat[n,:-2] = img.reshape(1,-1)
        raw_pim_mat[n,-2] = subj_lab[n]
        raw_pim_mat[n,-1] = gest_lab[n]

    # save matrix as DataFrame
    raw_pim_df = DataFrame(raw_pim_mat)
    cnames = ["px"+str(i) for i in raw_pim_df.columns]
    cnames[-2:] = ["subj", "gest"]
    raw_pim_df.columns = cnames
    raw_pim_df.to_csv("./Data/raw_pim_vectors.csv", index=False)

    ############################################################################
    ################################ ISOMAP SSMs ###############################
    ############################################################################
Ejemplo n.º 17
0
								spread = spreadL[s]
								#if 'MidPD_Len%s_Dim%s_Tau%s_PCA%s.npy' %(segmentLength, embDim, tau, PCA_n_components)
								print('processing segmentLength', segmentLength, 'embDim ', embDim, ' tau ', tau, ' PCA_n_components ', PCA_n_components, ' pixels ', pixels, ' spread ', spread, '\n\n') 
#								#PI0 = generatePI(allPDs, segmentLength, embDim, tau, PCA_n_components, pixels, spread, savePI, norm)
#allPIs = np.load('PI_Len%s_Dim%s_Tau%s_PCA%s_p%s_s%s_%s.npy' %(segmentLength, embDim, tau, PCA_n_components, pixels, spread, norm))

x = allSegments[:4]
allTimeDelayedSeg = timeDelayPCA(x, segmentLength, embDim, tau, PCA_n_components)

from persim import PersImage, plot_diagrams
from ripser import Rips

rips = Rips(maxdim=1, coeff=2)
diagrams_h1 = [rips.fit_transform(data)[1] for data in allTimeDelayedSeg]
pim = PersImage(pixels=[15,15], spread=1)
imgs = pim.transform(diagrams_h1)


fig = plt.figure(figsize=(5,5))
ax = fig.subplots()
ax.imshow(imgs[0], cmap=plt.get_cmap("viridis"))
ax.axis("off")

#pim.show(imgs[0], ax=ax)
#plot_diagrams(diagrams_h1[3], ax=ax, legend=False)

#ax.xaxis.set_major_locator(MultipleLocator(5000))
#ax.yaxis.set_major_locator(MultipleLocator(5000))
##MI = MI_lcmin(x, order=1, PLOT=True)
##print(MI)
#Attractor = TimeDelayReconstruct(x, 2, 3)
Ejemplo n.º 18
0
def do_full_run(data, quality=50, spread=0.05, kernel="gaussian", weighting="linear"):
    """
    Does the full PI analysis and some training
    saves the results in the results folder
    :param quality: resulution (int)
    :param spread:  variance in smoothing (float)
    :param kernel:  cdf to be used for smoothing (string: gaussian, laplace, lognorm, gamma)
    :param weighting: weighting to be used (string: linear, pm_linear, logistic)
    :return: reports for H0 and H1
    """
    pixels = quality * quality
    PI_vectors_H0 = np.zeros((5 * m, pixels))
    PI_vectors_H1 = np.zeros((5 * m, pixels))
    PI_vectors_con = np.zeros((5 * m, 2 * pixels))

    target = np.zeros(5 * m)
    labels = [2, 3.5, 4.0, 4.1, 4.3]
    index = 0
    print("Creating the PI images for the different r's")
    for i in range(5):
        shape_data = data[i]
        r = labels[i]
        print("training for r:{}".format(r))
        for j in tqdm(range(m)):
            pim_0 = PersImage(spread=spread,
                              pixels=[quality, quality],
                              kernel_type=kernel,
                              weighting_type=weighting,
                              verbose=False,
                              specs={
                                  "maxBD": 2,
                                  "minBD": 0,
                              })
            pim_1 = PersImage(spread=spread,
                              pixels=[quality, quality],
                              kernel_type=kernel,
                              weighting_type=weighting,
                              verbose=False,
                              specs={
                                  "maxBD": 2,
                                  "minBD": 0,
                              })

            PI_data = shape_data[j, :, :]
            rips = Rips(verbose=False)
            dgms = rips.fit_transform(PI_data)

            PI_data_H0 = pim_0.transform(dgms[0])
            PI_data_H1 = pim_1.transform(dgms[1])

            PI_data_H1 = PI_data_H1.reshape(pixels)
            PI_data_H0 = PI_data_H0.reshape(pixels)

            # PI_data_H0 = PI_data_H0[:, None]
            # PI_data_H1 = PI_data_H1[:, None]
            #
            target[index] = int(i)

            PI_vectors_H0[index, :] = PI_data_H0
            PI_vectors_H1[index, :] = PI_data_H1
            PI_vectors_con[index, :] = np.concatenate((PI_data_H0, PI_data_H1))
            index += 1

    PI_vectors_H0_train, PI_vectors_H0_test, y_train_H0, y_test_H0 = train_test_split(PI_vectors_H0,
                                                                                      target,
                                                                                      test_size=0.33)
    PI_vectors_H1_train, PI_vectors_H1_test, y_train_H1, y_test_H1 = train_test_split(PI_vectors_H1,
                                                                                      target,
                                                                                      test_size=0.33)
    PI_vectors_con_train, PI_vectors_con_test, y_train_con, y_test_con = train_test_split(PI_vectors_con,
                                                                                          target,
                                                                                          test_size=0.33)

    y_H0_hat_ada, y_H0_hat_grad = do_training(PI_vectors_H0_train, PI_vectors_H0_test, y_train_H0)
    y_H1_hat_ada, y_H1_hat_grad = do_training(PI_vectors_H1_train, PI_vectors_H1_test, y_train_H1)
    y_con_hat_ada, y_con_1_hat_grad = do_training(PI_vectors_con_train, PI_vectors_con_test, y_train_con)


    reports_H0_ada, reports_H0_grad = do_analysis(y_test_H0, y_H0_hat_ada, y_H0_hat_grad, labels)
    reports_H1_ada, reports_H1_grad = do_analysis(y_test_H1, y_H1_hat_ada, y_H1_hat_grad, labels)
    reports_con_ada, reports_con_grad = do_analysis(y_test_con, y_con_hat_ada, y_con_1_hat_grad, labels)

    reports_H0_ada.to_csv("results_dynamic/{}_{}_{}_{}_H0_ada.csv".format(quality, spread, kernel, weighting))
    reports_H0_grad.to_csv("results_dynamic/{}_{}_{}_{}_H0_grad.csv".format(quality, spread, kernel, weighting))
    reports_H1_ada.to_csv("results_dynamic/{}_{}_{}_{}_H1_ada.csv".format(quality, spread, kernel, weighting))
    reports_H1_grad.to_csv("results_dynamic/{}_{}_{}_{}_H1_grad.csv".format(quality, spread, kernel, weighting))
    reports_con_ada.to_csv("results_dynamic/{}_{}_{}_{}_con_ada.csv".format(quality, spread, kernel, weighting))
    reports_con_grad.to_csv("results_dynamic/{}_{}_{}_{}_con_grad.csv".format(quality, spread, kernel, weighting))

    return reports_H0_ada, reports_H0_grad, reports_H1_ada, reports_H1_grad, reports_con_ada, reports_con_grad
Ejemplo n.º 19
0
                      kernel_type="gaussian",
                      weighting_type="logistic",
                      verbose=False,
                      specs={
                          "maxBD": 2,
                          "minBD": 0,
                      })

    rips = Rips(verbose=False)
    dgms_bin = rips.fit_transform(sampled_bin_vec)
    dgms_hf = rips.fit_transform(sampled_hf_vec)
    dgms_df = rips.fit_transform(sampled_df_vec)
    dgms_rf = rips.fit_transform(sampled_rf_vec)


    sampled_bin_vec_H0 = pim.transform(dgms_bin[0]).reshape(100)
    sampled_bin_vec_H1 = pim.transform(dgms_bin[1]).reshape(100)

    sampled_hf_vec_H0 = pim.transform(dgms_hf[0]).reshape(100)
    sampled_hf_vec_H1 = pim.transform(dgms_hf[1]).reshape(100)

    sampled_df_vec_H0 = pim.transform(dgms_df[0]).reshape(100)
    sampled_df_vec_H1 = pim.transform(dgms_df[1]).reshape(100)

    sampled_rf_vec_H0 = pim.transform(dgms_rf[0]).reshape(100)
    sampled_rf_vec_H1 = pim.transform(dgms_rf[1]).reshape(100)

    con = [
        sampled_bin_vec_H0,
        sampled_bin_vec_H1,
        sampled_hf_vec_H0,
Ejemplo n.º 20
0
    def test_n_pixels(self):
        pim = PersImage(pixels=(3, 3))
        diagram = np.array([[0, 1], [1, 1], [3, 5]])
        img = pim.transform(diagram)

        assert img.shape == (3, 3)
Ejemplo n.º 21
0
    def test_lists_of_lists(self):
        pim = PersImage(pixels=(3, 3))
        diagram = [[0, 1], [1, 1], [3, 5]]
        img = pim.transform(diagram)

        assert img.shape == (3, 3)
Ejemplo n.º 22
0
fig = plt.figure()
fig.set_figwidth(9)
fig.set_figheight(6)

index = 1
for i, kern in enumerate(kernels):
    for j, weighting in enumerate(weightings):
        pim = PersImage(spread=spread,
                        pixels=pixels,
                        verbose=False,
                        kernel_type=kern,
                        weighting_type=weighting)
        rips = Rips(verbose=False)
        dgms = rips.fit_transform(PI_data)

        img = pim.transform(dgms[1])
        mini, maxi = np.amin(img), min(np.amax(img), 0.05)
        print(mini, maxi)
        X = np.arange(0, 150)
        Y = np.arange(0, 150)
        X, Y = np.meshgrid(X, Y)
        Z = img[::-1]

        ax = fig.add_subplot(2, 2, index, projection='3d')
        #ax = fig.gca(projection='3d')
        surf = ax.plot_surface(X, Y, Z, rstride=15, cstride=15, cmap="inferno")
        ax.set_zlim(mini, maxi)
        ax.set_xlim(1, 150)
        ax.set_ylim(1, 150)

        plt.yticks([], [])

PI_null_images = []
PI_one_images = []

for i in range(images.shape[0]):
    print(i)
    f_lower_star = d.fill_freudenthal(images[i, :, :, 0].astype(float))
    f_upper_star = d.fill_freudenthal(images[i, :, :, 0].astype(float),
                                      reverse=True)
    p = d.homology_persistence(f_lower_star)
    dgms_temp = d.init_diagrams(p, f_lower_star)
    h0_temp = homology_persistent_diagrams(dgms_temp)[0]
    h1_temp = homology_persistent_diagrams(dgms_temp)[1]
    pim = PersImage(pixels=[20, 20], spread=1)
    PI_0_temp = pim.transform(h0_temp[1:, :])
    PI_1_temp = pim.transform(h1_temp)
    PI_null_images.append(PI_0_temp)
    PI_one_images.append(PI_1_temp)

PI_null_images = np.array(PI_null_images, dtype=np.float32)
PI_null_images = PI_null_images.reshape((-1, 20, 20, 1))  # reshape
PI_null_images = PI_null_images / (PI_null_images.max() / 255.0)  # normalize

PI_one_images = np.array(PI_one_images, dtype=np.float32)
PI_one_images = PI_one_images.reshape((-1, 20, 20, 1))  # reshape
PI_one_images = PI_one_images / (PI_one_images.max() / 255.0)  # normalize

np.save('PI_null_images', PI_null_images)
np.save('PI_one_images', PI_one_images)
Ejemplo n.º 24
0

import numpy as np
import matplotlib.pyplot as plt
import scipy
from persim import plot_diagrams, PersImage
from ripser import ripser, Rips
from train_feats import get_data

sample_size = 5000
num_points = 300
homology_dim = 2

data = get_data(num=sample_size)
dgm = Rips(maxdim=homology_dim,n_perm=num_points).fit_transform(data)

img_generator = PersImage()

im = img_generator.transform(dgm)

fig, ax = plt.subplots()

img_generator.show(im,ax=ax)

fig.savefig("pim.png")
Ejemplo n.º 25
0
 def test_empty_diagram(self):
     dgm = np.zeros((0, 2))
     pim = PersImage(pixels=(10, 10))
     res = pim.transform(dgm)
     assert np.all(res == np.zeros((10, 10)))