Example #1
0
def persistence_image_features(X_train, pixels=[20, 20], spread=1):
    start = time.time()
    pim = PersImage(pixels=pixels, spread=spread)
    imgs_train = pim.transform(X_train)
    X_train_features = np.array([img.flatten() for img in imgs_train])
    timing = time.time() - start
    return timing
Example #2
0
def subj_to_pims(sbj, sdict, px, sd, nobs):
    """
    generate persistence images for all gestures for a given subject
    INPUTS
    sbj - subject number
    sdict - dict of gestures performed by a subject
    px - pixel dimension/ resolution; e.g. px=20 gives 20x20 persistence image
    sd - persistence image concentration parameter (gaussian)
    nobs - number of observations per subj (total num of gests they performed)

    OUTPUTS
    array of gestures made by subject
    """
    # instantiate persistence image generator & vietoris-rips complex generators
    rips = Rips(maxdim=1, verbose=False)  # 1-D homology rips complex
    pim = PersImage(pixels=[px, px], spread=sd)
    # each vector have equal # persim pix + 2 cols for subj & gest labels
    res_mat = np.zeros(px**2 * nobs + 2 * nobs).reshape(nobs, -1)

    v = 0
    for gnum, garray in sdict.items():
        # generate rips complex on points; slice out time col and gesture label
        dgms = rips.fit_transform(garray[:, 1:-1])
        img = pim.transform(dgms[1])  # persistence image of 1 cycles
        obs_vec = np.r_[img.flatten(), int(gnum[0]), int(sbj)]
        res_mat[v, :] = obs_vec
        v += 1

    return res_mat
Example #3
0
def generate_persistence_images(directory, threshold, pixel, spreadval,
                                rangemin, rangemax):
    for file in os.listdir(directory):
        dgm0 = np.squeeze(sio.loadmat(directory + file)['PD'])
        # Computing Lifetimes for Thresholding of Persistence Diagrams
        lifetime = np.ones(len(dgm0))

        for h in range(len(dgm0)):
            lifetime[h] = dgm0[h][1] - dgm0[h][0]

        #Keeps only points greater than reduction value
        isvalid = np.greater(lifetime, threshold)
        reduced_pd = dgm0[isvalid]

        print("Beginning Persistence Image Generation for " + file)
        print("Length of PD: " + str(len(dgm0)) + " points")
        print("Length Reduced PD: " + str(len(reduced_pd)) + " points")

        pim = PersImage(pixels=[pixel, pixel],
                        spread=spreadval,
                        specs={
                            "minBD": rangemin,
                            "maxBD": rangemax
                        })
        imgs = pim.transform(reduced_pd)
        imgs = imgs / np.max(imgs)
        imgs = imgs.reshape((-1))

        filename = "./Persistence_Images/" + file
        sio.savemat(filename, dict([('PersImg', imgs)]))
def generate_PI(PD_list, emb_dim, tau, pixels, spread):

    pim = PersImage(pixels=[pixels, pixels], spread=spread)
    PI_list = pim.transform(PD_list)
    PI_list = np.array([img.flatten() for img in PI_list])

    return np.array(PI_list)
Example #5
0
    def test_kernel_mean(self):
        pim = PersImage()
        kf = pim.kernel(2)

        data = np.array([[0,0]])
        assert kf(np.array([[0,0]]), [0,0]) >= kf(np.array([[1,1]]), [0,0]), "decreasing away"
        assert kf(np.array([[0,0]]), [1,1]) == kf(np.array([[1,1]]), [0,0]), "symmetric"
Example #6
0
def generate_persistence_image(dgm0, threshold, pixel, spreadval, rangemin,
                               rangemax):

    # Computing Lifetimes for Thresholding of Persistence Diagrams
    lifetime = np.ones(len(dgm0))

    for h in range(len(dgm0)):
        lifetime[h] = dgm0[h][1] - dgm0[h][0]

    #Keeps only points greater than reduction value
    isvalid = np.greater(lifetime, threshold)
    reduced_pd = dgm0[isvalid]

    print("Length of PD: " + str(len(dgm0)) + " points")
    print("Length Reduced PD: " + str(len(reduced_pd)) + " points")

    pim = PersImage(pixels=[pixel, pixel],
                    spread=spreadval,
                    specs={
                        "minBD": rangemin,
                        "maxBD": rangemax
                    })
    imgs = pim.transform(reduced_pd)
    imgs = imgs / np.max(imgs)
    #imgs = imgs.reshape((-1))

    return imgs
Example #7
0
    def test_zero_on_xaxis(self):
        pim = PersImage()

        wf = pim.weighting()

        assert wf([1, 0]) == 0
        assert wf([100, 0]) == 0
        assert wf([99, 1.4]) == 1.4
Example #8
0
    def test_scales(self):
        pim = PersImage()

        wf = pim.weighting(np.array([[0, 1], [1, 2], [3, 4]]))

        assert wf([1, 0]) == 0
        assert wf([1, 4]) == 1
        assert wf([1, 2]) == .5
Example #9
0
    def test_multiple_diagrams(self):
        pim = PersImage(pixels=(3, 3))

        diagram1 = np.array([[0, 1], [1, 1], [3, 5]])
        diagram2 = np.array([[0, 1], [1, 1], [3, 6]])
        imgs = pim.transform([diagram1, diagram2])

        assert len(imgs) == 2
        assert imgs[0].shape == imgs[1].shape
def generatePI(allPDs, segmentLength, embDim, tau, PCA_n_components, pixels,
               spread, norm, savePI):

    pim = PersImage(pixels=[pixels, pixels], spread=spread)
    imgs = pim.transform(allPDs)
    imgs_array = np.array([img.flatten() for img in imgs])
    if savePI == True:
        np.save(
            'PI_Len%s_Dim%s_Tau%s_PCA%s_p%s_s%s_%s.npy' %
            (segmentLength, embDim, tau, PCA_n_components, pixels, spread,
             norm), imgs_array)
    return imgs_array
Example #11
0
    def persistence(self, k_max=1, type="standard", representation="surface", shape=(16,16)):

        if representation=="surface":
            pi = PersImage(spread=0.025, pixels=shape, verbose=False)
            result = np.array(pi.transform(diagrams[1]))
        elif representation=="landscape":
            raise NotImplementedError() # TODO
        elif representation=="diagram":
            result = diagrams
        else:
            raise ValueError("Representation should be 'vector', 'surface', 'landscape', or 'diagram'")
    
        return result
Example #12
0
def calculatePI(val = None):
    global dgms, first
    if not first:
        r = int(sld_resolution.val)
        v = sld_spread.val
    else:
        r = 10
        v = 1
    pim = PersImage(spread=v, pixels=[r,r], verbose=False)
    img = pim.transform(dgms[1])
    ax_1 = plt.subplot(233)
    plt.title("PI for $H_1$\nspread = " + str(v)[0:4] + "\n" + str(r) + "x" + str(r))
    pim.show(img, ax_1)
    first = False
Example #13
0
def test_integer_diagrams():
    """ This test is inspired by gh issue #3 by gh user muszyna25.

    Integer diagrams return nan values.

    This does not work: dgm = [[0, 2], [0, 6], [0, 8]];

    This one works fine: dgm = [[0.0, 2.0], [0.0, 6.0], [0.0, 8.0]];

    """

    dgm = [[0, 2], [0, 6], [0, 8]]
    dgm2 = [[0.0, 2.0], [0.0, 6.0], [0.0, 8.0]]
    pim = PersImage()
    res = pim.transform(dgm2)
    res2 = pim.transform(dgm)
    np.testing.assert_array_equal(res, res2)
Example #14
0
    def test_empyt_diagram_list(self):
        dgm1 = [np.array([[2, 3]]), np.zeros((0, 2))]
        pim1 = PersImage(pixels=(10, 10))
        res1 = pim1.transform(dgm1)
        assert np.all(res1[1] == np.zeros((10, 10)))

        dgm2 = [np.zeros((0, 2)), np.array([[2, 3]])]
        pim2 = PersImage(pixels=(10, 10))
        res2 = pim2.transform(dgm2)
        assert np.all(res2[0] == np.zeros((10, 10)))

        dgm3 = [np.zeros((0, 2)), np.zeros((0, 2))]
        pim3 = PersImage(pixels=(10, 10))
        res3 = pim3.transform(dgm3)
        assert np.all(res3[0] == np.zeros((10, 10)))
        assert np.all(res3[1] == np.zeros((10, 10)))
def persistence_image_features(X_train, X_test, pixels=[20, 20], spread=1):
    start = time.time()
    pim = PersImage(pixels=pixels, spread=spread)
    imgs_train = pim.transform(X_train)
    X_train_features = np.array([img.flatten() for img in imgs_train])
    pim = PersImage(pixels=pixels, spread=spread)
    imgs_test = pim.transform(X_test)
    X_test_features = np.array([img.flatten() for img in imgs_test])
    print("Total Time (Persistence Images): ", time.time() - start)
    return X_train_features, X_test_features
Example #16
0
    def fit_transform(self, data):
        '''
        Compute the persistence images 
        Params:
            data: list of 1D numpy arrays, representing the surface profiles
        Returns a list of persistence images
        '''
        Filt = [self.filtration(d) for d in data]
        Diags = [
            ripser(f, maxdim=0, thresh=self.thresh,
                   distance_matrix=True)['dgms'][0][:-1] for f in Filt
        ]
        Imgs = PersImage(pixels=self.pixels, verbose=False).transform(Diags)

        return Imgs


## Use example (see Ref)
# S2I = SurfacesProfilesToImages(thresh=20,pixels=(20,20))
# s = np.array([11, 14, 9, 7, 9, 7, 8, 10, 9])
# data = [s]
# im = S2I.fit_transform(data)
# plt.imshow(np.flip(im[0],axis=1))
Example #17
0
    def test_n_pixels(self):
        pim = PersImage(pixels=(3, 3))
        diagram = np.array([[0, 1], [1, 1], [3, 5]])
        img = pim.transform(diagram)

        assert img.shape == (3, 3)
Example #18
0
from persim import PersImage

from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt

pim_df = pd.read_csv("./pim_vectors_mp20_sbst.csv")
pim_df.gest = pim_df.gest.astype("category")

pims = pim_df.values[:, :
                     -2]  # predictor vectors: persistence images (864xpx**2)
gests = pim_df.values[:, -2].astype("int64")  # data labels: gesture numbers

pimsd = 1e-5
px = 20
pim = PersImage(pixels=[px, px], spread=pimsd)

# code to load model
with open("./saved_models/log_reg_skl.sav", "rb") as fh:
    log_reg = pickle.load(fh)

######## train/ test split ########
np.random.seed(1)
pims_train, pims_test, gests_train, gests_test = train_test_split(
    pims, gests, test_size=0.2, random_state=1)

oos_acc = log_reg.score(pims_test, gests_test)
print(f"Accuracy: {oos_acc * 100}%")

inverse_image = np.copy(log_reg.coef_).reshape(-1, px)
for i in range(4):
def compute_single_diagram_statistics(dgm, threshold=None, dim=0):
    """
    Computes stats of a single persistence diagram dgm, input as an
    array with 2 columns.
    """

    if dgm is None:  # Return list of stat names
        basenames = compute_stats(None)
        stat_names = []
        # Birth stats
        stat_names.append(['b' + name for name in basenames])
        # Death stats
        stat_names.append(['d' + name for name in basenames])
        # Persistence stats
        stat_names.append(['p' + name for name in basenames])

        # Miscllaneous stats
        stat_names.append([
            'bdcor', 'bdcencor', 'totalper', 'bmaxper', 'dmaxper', 'bminper',
            'dminper', 'Nfeatures'
        ])
        # Betti curve
        stat_names.append(['betticurve'])
        # Persistence image
        stat_names.append(['PI'])
        return stat_names

    # Else:

    Nbins = 100  # Number of betti curve bins
    stats = []

    dgm = clean_births_deaths(dgm, threshold, dim)
    b = dgm[:, 0]
    d = dgm[:, 1]

    if b.size > 0:

        p = d - b  # persistence
        stats.append(compute_stats(b))
        stats.append(compute_stats(d))
        stats.append(compute_stats(p))
        #stats[binds0:binds1] = compute_stats(b)
        #stats[dinds0:dinds1] = compute_stats(d)
        #stats[pinds0:pinds1] = compute_stats(p)

        bmean = stats[0][3]
        dmean = stats[1][3]
        #bmean = stats[binds0 + 3]
        #dmean = stats[dinds0 + 3]

        maxfeatind = np.argmax(p)
        minfeatind = np.argmin(p)
        stats.append([
            np.sum(b * d),
            np.sum((b - bmean) * (d - dmean)),
            np.sum(p), b[maxfeatind], d[maxfeatind], b[minfeatind],
            d[minfeatind], b.size
        ])
        #stats[minds0:minds1] = [np.sum(b*d),
        #                        np.sum((b-bmean)*(d-dmean)),
        #                        np.sum(p),
        #                        b[maxfeatind],
        #                        d[maxfeatind],
        #                        b[minfeatind],
        #                        d[minfeatind],
        #                        b.size]

        betticurve = compute_betti_curve(dgm)

        ## Compute integral of Betti curve over bin
        bettiints, bin_edges = integrate_binned_betti_curve(
            betticurve, Nbins, threshold)

        stats.append([bettiints])
        #stats[betti0:betti1] = bettiints

        pim = PersImage(spread=0.05, pixels=[10, 10], verbose=False)
        img = pim.transform(dgm)
        stats.append([img])

    else:
        stats = None

    return stats, threshold / Nbins
Example #20
0
def inverse(allFeatures,
            classes,
            nonanIndFeature=None,
            cv=False,
            printijScores=False):
    # use only uniqueClassName with larger than 10 samples
    MINCOUNT = 10
    cvFolds = 10
    MINCOUNTTRAINING = 5

    if nonanIndFeature is not None:
        classes = classes[nonanIndFeature]
        allFeatures = allFeatures[nonanIndFeature]

    uniqueClassName, uniqueCNameCount = np.unique(
        classes, return_counts=True
    )  # uniqueClassName to be discriminated should be same as lr.classes_
    goodClassNameInd = np.array([n >= MINCOUNT for n in uniqueCNameCount])
    goodSampleInd = np.array(
        [b in uniqueClassName[goodClassNameInd] for b in classes])
    goodClassNameInd[np.where(uniqueClassName == 'Unknown00F')[0]] = False

    #take good indices and enode the classes labels as numbers
    goodClasses = classes[goodSampleInd]
    goodFeatures = allFeatures[goodSampleInd]

    le = preprocessing.LabelEncoder()
    le.fit(goodClasses)
    goodClassLabels = le.transform(goodClasses)
    nGoodClass = uniqueClassName[goodClassNameInd].size
    nPair = int(nGoodClass * (nGoodClass - 1) / 2)
    scores = np.zeros(nPair)
    pValues = np.zeros(nPair)

    ncounter = 0
    nhighScores = 0
    nSigP = 0

    lr = LogisticRegression(C=1,
                            class_weight=None,
                            dual=False,
                            fit_intercept=True,
                            intercept_scaling=1,
                            max_iter=100,
                            multi_class='ovr',
                            n_jobs=1,
                            penalty='l2',
                            random_state=0,
                            solver='liblinear',
                            tol=0.0001,
                            verbose=0,
                            warm_start=False)
    #lt = [108, 110, 164,163,134,244,100,97,123,13,101,23,208,37]

    for i in range(0):  #range(nGoodClass):
        for j in range(1, 2):  #range(nGoodClass):
            if i < j:
                #if (i ==0) & (j ==14):
                featureij = np.vstack(
                    (goodFeatures[np.where(goodClassLabels == i)[0]],
                     goodFeatures[np.where(goodClassLabels == j)[0]]))
                classij = np.hstack(
                    (goodClassLabels[np.where(goodClassLabels == i)[0]],
                     goodClassLabels[np.where(goodClassLabels == j)[0]]))
                if cv == True:
                    cvCount = 0
                    lrYes = 0
                    skf = StratifiedKFold(n_splits=cvFolds)
                    skfList = skf.split(featureij, classij)
                    for train, test in skfList:
                        # Enforce the MINCOUNT in each class for Training
                        trainClasses, trainCount = np.unique(
                            classij[train], return_counts=True)
                        goodIndClasses = np.array(
                            [n >= MINCOUNTTRAINING for n in trainCount])
                        goodIndTrain = np.array([
                            b in trainClasses[goodIndClasses]
                            for b in classij[train]
                        ])
                        # Specity the training data set, the number of groups and priors
                        yTrain = classij[train[goodIndTrain]]
                        XrTrain = featureij[train[goodIndTrain]]
                        trainClasses, trainCount = np.unique(
                            yTrain, return_counts=True)
                        ntrainClasses = trainClasses.size
                        # Skip this cross-validation fold because of insufficient data
                        if ntrainClasses < 2:
                            continue
                        goodTrainInd = np.array(
                            [b in trainClasses for b in classij[test]])
                        if (goodTrainInd.size == 0):
                            continue
                        lr.fit(XrTrain, yTrain)
                        print(lr.coef_)

                        inverse_image = np.copy(lr.coef_).reshape((15, 15))
                        #np.save('inverseimage_0_14_p20_s0.01.npy', inverse_image)
                        pim = PersImage(pixels=[15, 15], spread=1)
                        pim.show(inverse_image)
                        plt.show()
                        lrYes += np.around(
                            (lr.score(featureij[test[goodTrainInd]],
                                      classij[test[goodTrainInd]])) *
                            goodTrainInd.size)
                        cvCount += goodTrainInd.size
                    lrYesInt = int(lrYes)
                    p = 1.0 / 2
                    lrP = 0
                    for k in range(lrYesInt, cvCount + 1):
                        lrP += binom.pmf(k, cvCount, p)

                    print("LR: %.2f %% (%d/%d p=%.4f)" %
                          (100.0 * lrYes / cvCount, lrYes, cvCount, lrP))
                    #if ncounter in lt:
                    #	print(ncounter, i, j)
                    scores[ncounter] = 100.0 * lrYes / cvCount
                    pValues[ncounter] = lrP
                    if scores[ncounter] >= 90.0 and pValues[ncounter] <= 0.05:
                        nhighScores += 1
                    if pValues[ncounter] <= 0.05:
                        nSigP += 1

                else:
                    X_train, X_test, y_train, y_test = train_test_split(
                        featureij, classij, test_size=0.10, random_state=42)
                    lr.fit(X_train, y_train)
                    scores[ncounter] = lr.score(X_test, y_test)
                    if printijScores:
                        print('classes:', i, j, 'scores', scores[ncounter])

                    if scores[ncounter] >= .95:
                        nhighScores += 1

                ncounter += 1
    print('mean scores: ', np.mean(scores),
          'nhighScores/ntotal: %s/%s' % (nhighScores, ncounter),
          'nSigP/ntotal: %s/%s' % (nSigP, nPair))
    return scores, pValues, nhighScores, nPair
import numpy as np
from persim import PersImage

s = (80, 100)
Persims = np.zeros(s)

import glob
#by changing Dim0 to Dim1 you can read the other Dim data
txt_files = glob.glob("Dim1-resized/*.txt")
len(txt_files)
for i in range(0, len(txt_files), 1):
    print(txt_files[i])
    D = np.genfromtxt(txt_files[i], skip_header=1)
    D = np.array(D)
    D = D[:, [1, 2]]
    pim = PersImage(pixels=(10, 10))
    img = pim.transform(D)
    Persims[i][:] = img.flatten()

Persims

#for 8classes
#c=np.array([1,2,3,4,5,6,7,8])
#ccol=np.repeat(c,10)

#for 4 classes
c = np.array([1, 2, 3, 4])
ccol = np.repeat(c, 20)

cname = np.array(
    ["Apple", "Bell", "Bird", "Bottle", "Brick", "Children", "Key", "Rat"])
Example #22
0
								pixels = pixelsL[p]
								spread = spreadL[s]
								#if 'MidPD_Len%s_Dim%s_Tau%s_PCA%s.npy' %(segmentLength, embDim, tau, PCA_n_components)
								print('processing segmentLength', segmentLength, 'embDim ', embDim, ' tau ', tau, ' PCA_n_components ', PCA_n_components, ' pixels ', pixels, ' spread ', spread, '\n\n') 
#								#PI0 = generatePI(allPDs, segmentLength, embDim, tau, PCA_n_components, pixels, spread, savePI, norm)
#allPIs = np.load('PI_Len%s_Dim%s_Tau%s_PCA%s_p%s_s%s_%s.npy' %(segmentLength, embDim, tau, PCA_n_components, pixels, spread, norm))

x = allSegments[:4]
allTimeDelayedSeg = timeDelayPCA(x, segmentLength, embDim, tau, PCA_n_components)

from persim import PersImage, plot_diagrams
from ripser import Rips

rips = Rips(maxdim=1, coeff=2)
diagrams_h1 = [rips.fit_transform(data)[1] for data in allTimeDelayedSeg]
pim = PersImage(pixels=[15,15], spread=1)
imgs = pim.transform(diagrams_h1)


fig = plt.figure(figsize=(5,5))
ax = fig.subplots()
ax.imshow(imgs[0], cmap=plt.get_cmap("viridis"))
ax.axis("off")

#pim.show(imgs[0], ax=ax)
#plot_diagrams(diagrams_h1[3], ax=ax, legend=False)

#ax.xaxis.set_major_locator(MultipleLocator(5000))
#ax.yaxis.set_major_locator(MultipleLocator(5000))
##MI = MI_lcmin(x, order=1, PLOT=True)
##print(MI)
Example #23
0
import numpy as np
from sklearn import datasets

from ripser import ripser
from persim import PersImage, plot_diagrams
import scipy.misc

#generate circles
print("Generating circles")
data = None
pim = PersImage(spread=1, pixels=[10, 10], verbose=False)

for i in range(500):
    if i % 10 != 0:
        print(str(i), end=" ")
    else:
        print(str(i))

    data = np.concatenate([
        150 * np.random.random((150, 2)),
        np.random.randint(10, 100) +
        20 * datasets.make_circles(n_samples=150, factor=0.99)[0]
    ])

    dgms = ripser(data)["dgms"]

    img = pim.transform(dgms[1])

    pImg = np.zeros((10, 10), dtype=np.uint8)

    for idxR, r in enumerate(img):
Example #24
0
def plot_coeff(fig, coeff, location, title):
    pim = PersImage(pixels=[10, 10], spread=1)
    ax = fig.add_subplot(location)
    inverse_image = np.copy(coeff).reshape((10, 10))
    ax.set_title(title)
    pim.show(inverse_image, ax)
def do_full_run(data, quality=50, spread=0.05, kernel="gaussian", weighting="linear"):
    """
    Does the full PI analysis and some training
    saves the results in the results folder
    :param quality: resulution (int)
    :param spread:  variance in smoothing (float)
    :param kernel:  cdf to be used for smoothing (string: gaussian, laplace, lognorm, gamma)
    :param weighting: weighting to be used (string: linear, pm_linear, logistic)
    :return: reports for H0 and H1
    """
    pixels = quality * quality
    PI_vectors_H0 = np.zeros((5 * m, pixels))
    PI_vectors_H1 = np.zeros((5 * m, pixels))
    PI_vectors_con = np.zeros((5 * m, 2 * pixels))

    target = np.zeros(5 * m)
    labels = [2, 3.5, 4.0, 4.1, 4.3]
    index = 0
    print("Creating the PI images for the different r's")
    for i in range(5):
        shape_data = data[i]
        r = labels[i]
        print("training for r:{}".format(r))
        for j in tqdm(range(m)):
            pim_0 = PersImage(spread=spread,
                              pixels=[quality, quality],
                              kernel_type=kernel,
                              weighting_type=weighting,
                              verbose=False,
                              specs={
                                  "maxBD": 2,
                                  "minBD": 0,
                              })
            pim_1 = PersImage(spread=spread,
                              pixels=[quality, quality],
                              kernel_type=kernel,
                              weighting_type=weighting,
                              verbose=False,
                              specs={
                                  "maxBD": 2,
                                  "minBD": 0,
                              })

            PI_data = shape_data[j, :, :]
            rips = Rips(verbose=False)
            dgms = rips.fit_transform(PI_data)

            PI_data_H0 = pim_0.transform(dgms[0])
            PI_data_H1 = pim_1.transform(dgms[1])

            PI_data_H1 = PI_data_H1.reshape(pixels)
            PI_data_H0 = PI_data_H0.reshape(pixels)

            # PI_data_H0 = PI_data_H0[:, None]
            # PI_data_H1 = PI_data_H1[:, None]
            #
            target[index] = int(i)

            PI_vectors_H0[index, :] = PI_data_H0
            PI_vectors_H1[index, :] = PI_data_H1
            PI_vectors_con[index, :] = np.concatenate((PI_data_H0, PI_data_H1))
            index += 1

    PI_vectors_H0_train, PI_vectors_H0_test, y_train_H0, y_test_H0 = train_test_split(PI_vectors_H0,
                                                                                      target,
                                                                                      test_size=0.33)
    PI_vectors_H1_train, PI_vectors_H1_test, y_train_H1, y_test_H1 = train_test_split(PI_vectors_H1,
                                                                                      target,
                                                                                      test_size=0.33)
    PI_vectors_con_train, PI_vectors_con_test, y_train_con, y_test_con = train_test_split(PI_vectors_con,
                                                                                          target,
                                                                                          test_size=0.33)

    y_H0_hat_ada, y_H0_hat_grad = do_training(PI_vectors_H0_train, PI_vectors_H0_test, y_train_H0)
    y_H1_hat_ada, y_H1_hat_grad = do_training(PI_vectors_H1_train, PI_vectors_H1_test, y_train_H1)
    y_con_hat_ada, y_con_1_hat_grad = do_training(PI_vectors_con_train, PI_vectors_con_test, y_train_con)


    reports_H0_ada, reports_H0_grad = do_analysis(y_test_H0, y_H0_hat_ada, y_H0_hat_grad, labels)
    reports_H1_ada, reports_H1_grad = do_analysis(y_test_H1, y_H1_hat_ada, y_H1_hat_grad, labels)
    reports_con_ada, reports_con_grad = do_analysis(y_test_con, y_con_hat_ada, y_con_1_hat_grad, labels)

    reports_H0_ada.to_csv("results_dynamic/{}_{}_{}_{}_H0_ada.csv".format(quality, spread, kernel, weighting))
    reports_H0_grad.to_csv("results_dynamic/{}_{}_{}_{}_H0_grad.csv".format(quality, spread, kernel, weighting))
    reports_H1_ada.to_csv("results_dynamic/{}_{}_{}_{}_H1_ada.csv".format(quality, spread, kernel, weighting))
    reports_H1_grad.to_csv("results_dynamic/{}_{}_{}_{}_H1_grad.csv".format(quality, spread, kernel, weighting))
    reports_con_ada.to_csv("results_dynamic/{}_{}_{}_{}_con_ada.csv".format(quality, spread, kernel, weighting))
    reports_con_grad.to_csv("results_dynamic/{}_{}_{}_{}_con_grad.csv".format(quality, spread, kernel, weighting))

    return reports_H0_ada, reports_H0_grad, reports_H1_ada, reports_H1_grad, reports_con_ada, reports_con_grad
    return [h0, h1]


PI_null_images = []
PI_one_images = []

for i in range(images.shape[0]):
    print(i)
    f_lower_star = d.fill_freudenthal(images[i, :, :, 0].astype(float))
    f_upper_star = d.fill_freudenthal(images[i, :, :, 0].astype(float),
                                      reverse=True)
    p = d.homology_persistence(f_lower_star)
    dgms_temp = d.init_diagrams(p, f_lower_star)
    h0_temp = homology_persistent_diagrams(dgms_temp)[0]
    h1_temp = homology_persistent_diagrams(dgms_temp)[1]
    pim = PersImage(pixels=[20, 20], spread=1)
    PI_0_temp = pim.transform(h0_temp[1:, :])
    PI_1_temp = pim.transform(h1_temp)
    PI_null_images.append(PI_0_temp)
    PI_one_images.append(PI_1_temp)

PI_null_images = np.array(PI_null_images, dtype=np.float32)
PI_null_images = PI_null_images.reshape((-1, 20, 20, 1))  # reshape
PI_null_images = PI_null_images / (PI_null_images.max() / 255.0)  # normalize

PI_one_images = np.array(PI_one_images, dtype=np.float32)
PI_one_images = PI_one_images.reshape((-1, 20, 20, 1))  # reshape
PI_one_images = PI_one_images / (PI_one_images.max() / 255.0)  # normalize

np.save('PI_null_images', PI_null_images)
np.save('PI_one_images', PI_one_images)
Example #27
0
    def test_lists_of_lists(self):
        pim = PersImage(pixels=(3, 3))
        diagram = [[0, 1], [1, 1], [3, 5]]
        img = pim.transform(diagram)

        assert img.shape == (3, 3)
Example #28
0
def test_landscape():
    bds = np.array([[1, 1], [1, 2]])

    ldsp = PersImage.to_landscape(bds)

    np.testing.assert_array_equal(ldsp, [[1, 0], [1, 1]])
Example #29
0
weightings = ["linear", "logistic"]

spread = 0.05
quality = 150
pixels = [quality, quality]

fig = plt.figure()
fig.set_figwidth(9)
fig.set_figheight(6)

index = 1
for i, kern in enumerate(kernels):
    for j, weighting in enumerate(weightings):
        pim = PersImage(spread=spread,
                        pixels=pixels,
                        verbose=False,
                        kernel_type=kern,
                        weighting_type=weighting)
        rips = Rips(verbose=False)
        dgms = rips.fit_transform(PI_data)

        img = pim.transform(dgms[1])
        mini, maxi = np.amin(img), min(np.amax(img), 0.05)
        print(mini, maxi)
        X = np.arange(0, 150)
        Y = np.arange(0, 150)
        X, Y = np.meshgrid(X, Y)
        Z = img[::-1]

        ax = fig.add_subplot(2, 2, index, projection='3d')
        #ax = fig.gca(projection='3d')
Example #30
0
 def test_empty_diagram(self):
     dgm = np.zeros((0, 2))
     pim = PersImage(pixels=(10, 10))
     res = pim.transform(dgm)
     assert np.all(res == np.zeros((10, 10)))