Ejemplo n.º 1
0
 def network_atlas(C, K, alpha, T):
     datap = []
     for i in range(len(C)):
         datap.append(C[i][:][:])
     affinity_networks = snf.make_affinity(datap, K=K, mu=alpha) #The first step in SNF is converting these data arrays into similarity (or "affinity") networks.
     fused_network_atlas_C = snf.snf(affinity_networks, K=K, alpha=alpha, t=T) # Once we have our similarity networks we can fuse them together.
     return fused_network_atlas_C
Ejemplo n.º 2
0
def snf_cal(D):

    view_num = len(D)
    k_neighs = 20
    mu = 0.4
    iter = 20

    aff_mat_ls = snf.make_affinity(D, K=k_neighs, mu=mu)

    if view_num != len(aff_mat_ls):
        assert "the number of views check fails"

    # denoise for each network
    order = 2
    alpha = 0.7
    ne_aff_mat_ls = [
        ne(aff_mat_ls[i], order, k_neighs, alpha) for i in range(view_num)
    ]

    # fuse networks
    fuesd_network = snf.snf(ne_aff_mat_ls[0:3], K=k_neighs, t=iter)
    ne_fused_network = ne(fuesd_network, order, k_neighs, alpha)

    D = np.diag(ne_fused_network.sum(axis=1))
    L = D - ne_fused_network

    return L
Ejemplo n.º 3
0
def random20_test():
    r = []

    h = randint(5, 5)
    w = randint(5, 5)
    contents = []
    for i in range(h * w):
        contents.append(ZI(randint(-10, 10), randint(-10, 10)))
    A = Matrix(h, w, contents)
    s, j, t = snf(A)
    assert s * A * t == j
    print A
    print
    print j
Ejemplo n.º 4
0
def __snf_based_merge(link_1, link_2):
    """
    snf network merge based on Wang, Bo, et al. Nature methods 11.3 (2014): 333.
    """
    warnings.simplefilter("ignore")
    node_list = list(set(link_1['source']) & set(link_1['target']) & set(link_2['source']) & set(link_2['target']))

    adjlinks = list()
    adjlinks.append(__linkage_to_adjlink(link_1, node_list))
    adjlinks.append(__linkage_to_adjlink(link_2, node_list))
    affinity_matrix = snf.make_affinity(adjlinks)
    fused_network = snf.snf(affinity_matrix)
    Graph = nx.from_pandas_adjacency(pd.DataFrame(fused_network, index=node_list, columns=node_list))
    return pd.DataFrame(Graph.edges, columns=['source', 'target'])
Ejemplo n.º 5
0
    def __init__(self, normal_sum, cancer_sum, tumor):
        self.tumor = tumor
        self.normal = normal_sum
        self.cancer = cancer_sum
        self.both = [self.normal, self.cancer]
        self.fused = pd.DataFrame(snf.snf(self.both, K=20))
        self.fused_edgelist = adj_to_list(self.fused)
        self.fused_edgelist.columns = ['src', 'trg', 'weight']
        self.fused_edgelist.to_csv('./Networks/fused_edgelist_' +
                                   str(self.tumor) + '.csv',
                                   sep='\t')

        self.fused_thr = self.threshold_snf()
        np.save('./Networks/fused_' + str(self.tumor) + '.npy', self.fused_thr)
Ejemplo n.º 6
0
def random20_test():
    for run in range(100):
        sys.stderr.write(str(run)+'\n')
        r = []

        h = randint(2,6)
        w = randint(2,6)
        contents = []
        for i in range(h*w):
            contents.append(ZI(randint(-10,10), randint(-10,10)))
        A = Matrix(h,w,contents)
        s,j,t = snf(A)
        print s,j,t
        assert s*A*t==j
        assert s.determinant().isUnit()
        assert t.determinant().isUnit()
        for i in range(min(j.h, j.w)-1):
            assert (j.get(i+1,i+1) % j.get(i,i)) == j.get(i,i).getZero()
Ejemplo n.º 7
0
def NAGFS(train_data, train_Labels, Nf, displayResults):

    XC1 = np.empty((0, train_data.shape[2], train_data.shape[2]), int)
    XC2 = np.empty((0, train_data.shape[2], train_data.shape[2]), int)

    # * * (5.1) In this part, Training samples which were chosen last part are seperated as class-1 and class-2 samples.
    for i in range(len(train_Labels)):

        if (train_Labels[i] == 1):
            XC1 = np.append(XC1, [train_data[i, :, :]], axis=0)
        else:
            XC2 = np.append(XC2, [train_data[i, :, :]], axis=0)

# * *

# * * (5.2) SIMLR functions need matrixes which has 1x(N*N) shape.So all training matrixes are converted to this shape.

#For C1 group
    k = np.empty((0, XC1.shape[1] * XC1.shape[1]), int)
    for i in range(XC1.shape[0]):
        k1 = np.concatenate(XC1[i])  #it vectorizes all NxN matrixes
        k = np.append(k, [k1.reshape(XC1.shape[1] * XC1.shape[1])], axis=0)

# For C2 group
    kk = np.empty((0, XC2.shape[1] * XC2.shape[1]), int)
    for i in range(XC2.shape[0]):
        kk1 = np.concatenate(XC2[i])
        kk = np.append(kk, [kk1.reshape(XC2.shape[1] * XC2.shape[1])], axis=0)

# * *

# * * (5.3) SIMLR(Single-Cell Interpretation via Multi Kernel Learning) is used to clustering of samples of 2 classes into 3 clusters.

#For C1 group
#[t1, S2, F2, ydata2,alpha2] = SIMLR(kk,3,2);
    simlr = SIMLR.SIMLR_LARGE(
        3, 4, 0
    )  #This is how we initialize an object for SIMLR.The first input is number of rank (clusters) and the second input is number of neighbors.The third one is an binary indicator whether to use memory-saving mode.You can turn it on when the number of cells are extremely large to save some memory but with the cost of efficiency.
    S1, F1, val1, ind1 = simlr.fit(k)
    y_pred_X1 = simlr.fast_minibatch_kmeans(
        F1, 3
    )  #This SIMLR function predicts training 1x(N*N) samples what they belong.
    #to first, second or third clusters.(0,1 or 2)

    # For C2 group
    simlr = SIMLR.SIMLR_LARGE(3, 4, 0)
    S2, F2, val2, ind2 = simlr.fit(kk)
    y_pred_X2 = simlr.fast_minibatch_kmeans(F2, 3)

    # * *

    # * * (5.4) Training samples are placed into their predicted clusters for Class-1 and Class-2 samples.
    #For XC1, +1 k
    Ca1 = np.empty((0, XC1.shape[2], XC1.shape[2]), int)
    Ca2 = np.empty((0, XC1.shape[2], XC1.shape[2]), int)
    Ca3 = np.empty((0, XC1.shape[2], XC1.shape[2]), int)

    for i in range(len(y_pred_X1)):
        if y_pred_X1[i] == 0:
            Ca1 = np.append(Ca1, [XC1[i, :, :]], axis=0)
            Ca1 = np.abs(Ca1)
        elif y_pred_X1[i] == 1:
            Ca2 = np.append(Ca2, [XC1[i, :, :]], axis=0)
            Ca2 = np.abs(Ca2)
        elif y_pred_X1[i] == 2:
            Ca3 = np.append(Ca3, [XC1[i, :, :]], axis=0)
            Ca3 = np.abs(Ca3)

#For XC2, -1 kk
    Cn1 = np.empty((0, XC2.shape[2], XC2.shape[2]), int)
    Cn2 = np.empty((0, XC2.shape[2], XC2.shape[2]), int)
    Cn3 = np.empty((0, XC2.shape[2], XC2.shape[2]), int)

    for i in range(len(y_pred_X2)):
        if y_pred_X2[i] == 0:
            Cn1 = np.append(Cn1, [XC2[i, :, :]], axis=0)
            Cn1 = np.abs(Cn1)
        elif y_pred_X2[i] == 1:
            Cn2 = np.append(Cn2, [XC2[i, :, :]], axis=0)
            Cn2 = np.abs(Cn2)
        elif y_pred_X2[i] == 2:
            Cn3 = np.append(Cn3, [XC2[i, :, :]], axis=0)
            Cn3 = np.abs(Cn3)

# * *

#SNF PROCESS
# * * (5.5) SNF(Similarity Network Fusion) is used for create a local centered network atlas which is the best representative matrix
#of other similar matrixes.In this process, for every class, there are 3 clusters, so snf create 3 representative-center
#matrixes for both classes.After that it create 1 general representative matrixes of 3 matrixes.
#So finally there are 2 general representative matrixes.

#Ca1
    class1 = []
    if Ca1.shape[0] > 1:
        for i in range(Ca1.shape[0]):
            class1.append(Ca1[i, :, :])
        affinity_networks = snf.make_affinity(class1,
                                              metric='euclidean',
                                              K=20,
                                              mu=0.5)
        AC11 = snf.snf(affinity_networks,
                       K=20)  #First local network atlas for C1 group
        class1 = []
    else:
        AC11 = Ca1[0]

    #Ca2
    class1 = []
    if Ca2.shape[0] > 1:
        for i in range(Ca2.shape[0]):
            class1.append(Ca2[i, :, :])
        affinity_networks = snf.make_affinity(class1,
                                              metric='euclidean',
                                              K=20,
                                              mu=0.5)
        AC12 = snf.snf(affinity_networks,
                       K=20)  #Second local network atlas for C1 group
        class1 = []
    else:
        AC12 = Ca2[0]

    #Ca3
    class1 = []
    if Ca3.shape[0] > 1:
        for i in range(Ca3.shape[0]):
            class1.append(Ca3[i, :, :])
        affinity_networks = snf.make_affinity(class1,
                                              metric='euclidean',
                                              K=20,
                                              mu=0.5)
        AC13 = snf.snf(affinity_networks,
                       K=20)  #Third local network atlas for C1 group
        class1 = []
    else:
        AC13 = Ca3[0]

    #Cn1
    if Cn1.shape[0] > 1:
        class1 = []
        for i in range(Cn1.shape[0]):
            class1.append(Cn1[i, :, :])
        affinity_networks = snf.make_affinity(class1,
                                              metric='euclidean',
                                              K=20,
                                              mu=0.5)
        AC21 = snf.snf(affinity_networks,
                       K=20)  #First local network atlas for C2 group
        class1 = []
    else:
        AC21 = Cn1[0]

    #Cn2
    class1 = []
    if Cn2.shape[0] > 1:
        for i in range(Cn2.shape[0]):
            class1.append(Cn2[i, :, :])
        affinity_networks = snf.make_affinity(class1,
                                              metric='euclidean',
                                              K=20,
                                              mu=0.5)
        AC22 = snf.snf(affinity_networks,
                       K=20)  #Second local network atlas for C2 group
        class1 = []
    else:
        AC22 = Cn2[0]

    #Cn3
    class1 = []
    if Cn3.shape[0] > 1:
        for i in range(Cn3.shape[0]):
            class1.append(Cn3[i, :, :])
        affinity_networks = snf.make_affinity(class1,
                                              metric='euclidean',
                                              K=20,
                                              mu=0.5)
        AC23 = snf.snf(affinity_networks,
                       K=20)  #Third local network atlas for C2 group
        class1 = []
    else:
        AC23 = Cn3[0]

    #A1
    AC1 = snf.snf([AC11, AC12, AC13], K=20)  #Global network atlas for C1 group

    #A2
    AC2 = snf.snf([AC21, AC22, AC23], K=20)  #Global network atlas for C2 group

    # * *

    # * * (5.6) In this part, most 5 discriminative connectivities are determined and their indexes are saved in ind array.

    D0 = np.abs(AC1 - AC2)  #find differences between AC1 and AC2
    D = np.triu(D0)  #Upper triangular part of matrix
    D1 = D[np.triu_indices(AC1.shape[0], 1)]  #Upper triangular part of matrix
    D1 = D1.transpose()
    D2 = np.sort(D1)  #Ranking features
    D2 = D2[::-1]
    Dif = D2[0:Nf]  #Extract most 5 discriminative connectivities
    D3 = []
    for i in D1:
        D3.append(i)
    ind = []
    for i in range(len(Dif)):
        ind.append(D3.index(Dif[i]))
# * *

# * * (5.7) Coordinates of most 5 disriminative features are determined for plotting for each iteration if displayresults==1.

    coord = []
    for i in range(len(Dif)):
        for j in range(D0.shape[0]):
            for k in range(D0.shape[1]):
                if Dif[i] == D0[j][k]:
                    coord.append([j, k])

    topFeatures = np.zeros((D0.shape[0], D0.shape[1]))
    s = 0
    ss = 0
    for i in range(len(Dif) * 2):
        topFeatures[coord[i][0]][coord[i][1]] = Dif[s]
        ss += 1
        if ss == 2:
            s += 1
            ss = 0
    if displayResults == 1:
        plt.imshow(topFeatures)
        plt.colorbar()
        plt.show()
# * *

    return AC1, AC2, ind
Ejemplo n.º 8
0
def atlas(train_data, train_labels):
# Disentangling the heterogeneous distribution of the input_ networks using SIMLR clustering method
    z = np.zeros((1,1))
    k = np.zeros((len(train_labels), len(train_data[1])*len(train_data[1])))

    for i in range(0, len(train_labels)):

        k1 = train_data[i][:][:]
        k2 = np.zeros((0, 1))
        #vectorizing k1 into 1D vector k2.
        for ii in range(0, len(train_data[0])):
            for jj in range(0, len(train_data[0])):
                z[0,0] = k1[jj,ii]
                k2 = np.append(k2, z, axis=0)

        k2 = np.transpose(k2)

        for h in range(0, len(train_data[1])*len(train_data[1])):
            k[i][h] = k2[0][h]

    K = 4 # number of neighbors
    simlr = SIMLR.SIMLR_LARGE(2,K,0) # This is how we initialize an object for SIMLR. The first input is number of rank (clusters) and the second input is number of neighbors.The third one is an binary indicator whether to use memory-saving mode.You can turn it on when the number of cells are extremely large to save some memory but with the cost of efficiency.
    S1, F1, val1, ind1 = simlr.fit(k)
    y_pred_X1 = simlr.fast_minibatch_kmeans(F1,2)

    # After using SIMLR, we extract each cluster independently
    C1 = np.zeros((0, len(train_data[1]), len(train_data[1]))) # initialize cluster1
    C2 = np.zeros((0, len(train_data[1]), len(train_data[1]))) # initialize cluster2
    for y in range(0, len(train_labels)):
        if y_pred_X1[y] == 0:
            C1 = np.append(C1, np.abs([train_data[y][:][:]]), axis=0)
        elif y_pred_X1[y] == 1:
            C2 = np.append(C2, np.abs([train_data[y][:][:]]), axis=0)

    # For each cluster, we non-linearly diffuse and fuse all networks into a local cluster-specific CBT using SNF

    # Setting all the parameters.
    alpha = 0.5 #hyperparameter, usually (0.3~0.8)
    T = 20 #Number of Iterations, usually (10~20)

    def network_atlas(C, K, alpha, T):
        datap = []
        for i in range(len(C)):
            datap.append(C[i][:][:])
        affinity_networks = snf.make_affinity(datap, K=K, mu=alpha) #The first step in SNF is converting these data arrays into similarity (or "affinity") networks.
        fused_network_atlas_C = snf.snf(affinity_networks, K=K, alpha=alpha, t=T) # Once we have our similarity networks we can fuse them together.
        return fused_network_atlas_C

    if C1.shape[0] > 1:
        atlas_c1 = network_atlas(C1, K, alpha, T) # First cluster-specific CBT
    else:
        atlas_c1 = C1[0][:][:]

    if C2.shape[0] > 1:
        atlas_c2 = network_atlas(C2, K, alpha, T) # Second cluster-specific CBT
    else:
        atlas_c2 = C2[0][:][:]

    # SNF

    CBT = snf.snf([atlas_c1, atlas_c2], K=K, t=T) # Global connectional brain template
    return CBT
Ejemplo n.º 9
0
wall_label = wall_label[~wall_label.index.duplicated(keep="first")]


"""
    Step1 : Apply the original SNF on the common samples, and the score will be a reference point
"""
w1_com = w1.filter(regex="^common_", axis=0)
w2_com = w2.filter(regex="^common_", axis=0)

# need to make sure the order of common samples are the same for all views before fusing
dist1_com = dist2(w1_com.values, w1_com.values)
dist2_com = dist2(w2_com.values, w2_com.values)
S1_com = snf.compute.affinity_matrix(dist1_com, K=args.neighbor_size, mu=args.mu)
S2_com = snf.compute.affinity_matrix(dist2_com, K=args.neighbor_size, mu=args.mu)

fused_network = snf.snf([S1_com, S2_com], t=10, K=20)
labels_com = spectral_clustering(fused_network, n_clusters=10)
score_com = v_measure_score(wcom_label["label"].tolist(), labels_com)
print("Original SNF for clustering intersecting 832 samples NMI score: ", score_com)

# Do SNF2 diffusion
(
    dicts_common,
    dicts_commonIndex,
    dict_sampleToIndexs,
    dicts_unique,
    original_order,
) = data_indexing([w1_com, w2_com])
S1_df = pd.DataFrame(data=S1_com, index=original_order[0], columns=original_order[0])
S2_df = pd.DataFrame(data=S2_com, index=original_order[1], columns=original_order[1])
Ejemplo n.º 10
0
def netNorm(v, nbr_of_sub, nbr_of_regions):
    nbr_of_feat = int((np.square(nbr_of_regions) - nbr_of_regions) / 2)

    def minmax_sc(x):
        min_max_scaler = preprocessing.MinMaxScaler()
        x = min_max_scaler.fit_transform(x)
        return x

    def upper_triangular():
        All_subj = np.zeros((nbr_of_sub, len(v), nbr_of_feat))
        for i in range(len(v)):
            for j in range(nbr_of_sub):

                subj_x = v[i, j, :, :]
                subj_x = np.reshape(subj_x, (nbr_of_regions, nbr_of_regions))
                subj_x = minmax_sc(subj_x)
                subj_x = subj_x[np.triu_indices(nbr_of_regions, k=1)]
                subj_x = np.reshape(subj_x, (1, 1, nbr_of_feat))
                All_subj[j, i, :] = subj_x

        return All_subj

    def distances_inter(All_subj):
        theta = 0
        distance_vector = np.zeros(1)
        distance_vector_final = np.zeros(1)
        x = All_subj
        for i in range(nbr_of_feat):  #par rapport ll number of ROIs
            ROI_i = x[:, :, i]
            ROI_i = np.reshape(ROI_i, (nbr_of_sub, nbr_of_views))  #1,3
            for j in range(nbr_of_sub):
                subj_j = ROI_i[j:j + 1, :]
                subj_j = np.reshape(subj_j, (1, nbr_of_views))
                for k in range(nbr_of_sub):
                    if k != j:
                        subj_k = ROI_i[k:k + 1, :]
                        subj_k = np.reshape(subj_k, (1, nbr_of_views))

                        for l in range(nbr_of_views):
                            if l == 0:
                                distance_euclidienne_sub_j_sub_k = np.square(
                                    subj_k[:, l:l + 1] - subj_j[:, l:l + 1])
                            else:
                                distance_euclidienne_sub_j_sub_k = distance_euclidienne_sub_j_sub_k + np.square(
                                    subj_k[:, l:l + 1] - subj_j[:, l:l + 1])

                            theta += 1
                if j == 0:
                    distance_vector = np.sqrt(distance_euclidienne_sub_j_sub_k)
                else:
                    distance_vector = np.concatenate(
                        (distance_vector,
                         np.sqrt(distance_euclidienne_sub_j_sub_k)),
                        axis=0)

            if i == 0:
                distance_vector_final = distance_vector
            else:
                distance_vector_final = np.concatenate(
                    (distance_vector_final, distance_vector), axis=1)

        print(theta)
        return distance_vector_final

    def minimum_distances(distance_vector_final):
        x = distance_vector_final

        for i in range(nbr_of_feat):
            for j in range(nbr_of_sub):
                minimum_sub = x[j:j + 1, i:i + 1]
                minimum_sub = float(minimum_sub)
                for k in range(nbr_of_sub):
                    if k != j:
                        local_sub = x[k:k + 1, i:i + 1]
                        local_sub = float(local_sub)
                        if local_sub < minimum_sub:
                            general_minimum = k
                            general_minimum = np.array(general_minimum)
            if i == 0:
                final_general_minimum = np.array(general_minimum)
            else:
                final_general_minimum = np.vstack(
                    (final_general_minimum, general_minimum))

        final_general_minimum = np.transpose(final_general_minimum)

        return final_general_minimum

    def new_tensor(final_general_minimum, All_subj):
        y = All_subj
        x = final_general_minimum
        for i in range(nbr_of_feat):
            optimal_subj = x[:, i:i + 1]
            optimal_subj = np.reshape(optimal_subj, (1))
            optimal_subj = int(optimal_subj)
            if i == 0:
                final_new_tensor = y[optimal_subj:optimal_subj + 1, :, i:i + 1]
            else:
                final_new_tensor = np.concatenate(
                    (final_new_tensor, y[optimal_subj:optimal_subj + 1, :,
                                         i:i + 1]),
                    axis=2)

        return final_new_tensor

    def make_sym_matrix(nbr_of_regions, feature_vector):

        nbr_of_regions = nbr_of_regions
        feature_vector = feature_vector
        my_matrix = np.zeros([nbr_of_regions, nbr_of_regions], dtype=np.double)

        my_matrix[np.triu_indices(nbr_of_regions, k=1)] = feature_vector
        my_matrix = my_matrix + my_matrix.T
        my_matrix[np.diag_indices(nbr_of_regions)] = 0

        return my_matrix

    def re_make_tensor(final_new_tensor, nbr_of_regions):
        x = final_new_tensor
        x = np.reshape(x, (nbr_of_views, nbr_of_feat))
        for i in range(nbr_of_views):
            view_x = x[i, :]
            view_x = np.reshape(view_x, (1, nbr_of_feat))
            view_x = make_sym_matrix(nbr_of_regions, view_x)
            view_x = np.reshape(view_x, (1, nbr_of_regions, nbr_of_regions))
            if i == 0:
                tensor_for_snf = view_x
            else:
                tensor_for_snf = np.concatenate((tensor_for_snf, view_x),
                                                axis=0)
        return tensor_for_snf

    def create_list(tensor_for_snf):
        x = tensor_for_snf
        for i in range(nbr_of_views):
            view = x[i, :, :]
            view = np.reshape(view, (nbr_of_regions, nbr_of_regions))
            list = [view]
            if i == 0:
                list_final = list
            else:
                list_final = list_final + list
        return list_final

    def cross_subjects_cbt(fused_network, nbr_of_exemples):
        final_cbt = np.zeros((nbr_of_exemples, nbr_of_feat))
        x = fused_network
        x = x[np.triu_indices(nbr_of_regions, k=1)]
        x = np.reshape(x, (1, nbr_of_feat))
        for i in range(nbr_of_exemples):
            final_cbt[i, :] = x

        return final_cbt

    Upp_trig = upper_triangular()
    Dis_int = distances_inter(Upp_trig)
    Min_dis = minimum_distances(Dis_int)
    New_ten = new_tensor(Min_dis, Upp_trig)
    Re_ten = re_make_tensor(New_ten, nbr_of_regions)
    Cre_lis = create_list(Re_ten)
    fused_network = snf.snf((Cre_lis), K=20)
    fused_network = minmax_sc(fused_network)
    np.fill_diagonal(fused_network, 0)
    fused_network = np.array(fused_network)
    return fused_network
Ejemplo n.º 11
0
def main(multiMatPath, useGenePath, outPath):
    """
    In short, we first separately calculate euclidean mat for dimension reduced gene expression mat, APA mat and spliced mat.
    then use SNF method fusion these matrices, the resulting mat can be load to clustering algorithms, such as leiden.
    This method is inspired by scLAPA (https://github.com/BMILAB/scLAPA)

    we have not verified this method and it is a pre-release version.

    multiMatPath:
        multilayer mat path
    useGenePath:
        gene used for calculating correlation matrix. if not provided, all gene will be used. NO HEADER, NO INDEX
        e.g:
            AT1G01010
            AT1G01020
            AT1G01030
    outPath:
        prefix of output file containing fused connectivities matrix and leiden clustering result.
        matrix: fused eulidean mat npy format, could be loaded by numpy.load function
    """
    def __calSimMat(adata):
        sc.pp.scale(adata, max_value=10)
        sc.tl.pca(adata, svd_solver="arpack", n_comps=50)
        sc.pp.neighbors(adata, n_pcs=30)

    adata = updateOldMultiAd(sc.read_10x_mtx(multiMatPath))

    if useGenePath:
        useGeneLs = pd.read_table(useGenePath, header=None,
                                  names=["gene"])["gene"]
        adata = adata[:, useGeneLs]

    spliceAd = getMatFromObsm(adata,
                              "Spliced",
                              adata.var.index,
                              ignoreN=True,
                              clear=True)

    apaAd = getMatFromObsm(adata,
                           "APA",
                           adata.var.index,
                           ignoreN=True,
                           clear=True)

    abunAd = getMatFromObsm(adata,
                            "Abundance",
                            adata.var.index,
                            ignoreN=True,
                            clear=True)

    [__calSimMat(x) for x in [spliceAd, apaAd, abunAd]]

    similarityMat = [
        x.obsp["connectivities"].A for x in [abunAd, apaAd, spliceAd]
    ]

    fusedMat = snf.snf(similarityMat, K=20, alpha=0.5, t=10)
    np.save(f"{outPath}_fusedMat.npy", fusedMat)

    sc.tl.leiden(adata, adjacency=fusedMat, key_added="leiden_fused")

    clusterDf = adata.obs[["leiden_fused"]]
    clusterDf.to_csv(f"{outPath}_leiden_resolution_1.0.tsv", sep="\t")
Ejemplo n.º 12
0
def BGSR(train_data, train_labels, HR_features, kn, K1, K2):

    #These are a reproduction of the closeness, degrees and isDirected functions of aeolianine since I couldn't find a compatible functions in python.
    def isDirected(adj):

        adj_transpose = np.transpose(adj)
        for i in range(len(adj)):
            for j in range(len(adj[0])):
                if adj[j][i] != adj_transpose[j][i]:
                    return True
        return False

    def degrees(adj):

        indeg = np.sum(adj, axis=1)
        outdeg = np.sum(np.transpose(adj), axis=0)
        if isDirected(adj):
            deg = indeg + outdeg  #total degree
        else:  #undirected graph: indeg=outdeg
            deg = indeg + np.transpose(
                np.diag(adj))  #add self-loops twice, if any

        return deg

    def closeness(G, adj):

        c = np.zeros((len(adj), 1))
        all_sum = np.zeros((len(adj[1]), 1))
        spl = nx.all_pairs_dijkstra_path_length(G, weight="weight")
        spl_list = list(spl)
        for i in range(len(adj[1])):
            spl_dict = spl_list[i][1]
            c[i] = 1 / sum(spl_dict.values())
        return c

    sz1, sz2, sz3 = train_data.shape

    # (1) Estimation of a connectional brain template (CBT)
    CBT = atlas(train_data, train_labels)

    # (2) Proposed CBT-guided graph super-resolution
    # Initialization
    c_degree = np.zeros((sz1, sz2))
    c_closeness = np.zeros((sz1, sz2))
    c_betweenness = np.zeros((sz1, sz2))
    residual = np.zeros(
        (len(train_data), len(train_data[1]), len(train_data[1])))

    for i in range(sz1):

        residual[i][:][:] = np.abs(train_data[i][:][:] -
                                   CBT)  #Residual brain graph
        G = nx.from_numpy_matrix(np.array(residual[i][:][:]))
        for j in range(0, sz2):
            c_degree[i][j] = degrees(residual[i][:][:])[j]  #Degree matrix
            c_closeness[i][j] = closeness(
                G, residual[i][:][:])[j]  #Closeness matrix
            c_betweenness[i][j] = nx.betweenness_centrality(
                G, weight=True)[j]  #Betweenness matrix

    # Degree similarity matrix
    simlr1 = SIMLR.SIMLR_LARGE(
        1, K1, 0
    )  #The first input is number of rank (clusters) and the second input is number of neighbors.The third one is an binary indicator whether to use memory-saving mode.You can turn it on when the number of cells are extremely large to save some memory but with the cost of efficiency.
    S1, F1, val1, ind1 = simlr1.fit(c_degree)
    y_pred_X1 = simlr1.fast_minibatch_kmeans(F1, 1)

    # Closeness similarity matrix
    simlr2 = SIMLR.SIMLR_LARGE(1, K1, 0)
    S2, F2, val2, ind2 = simlr2.fit(c_closeness)
    y_pred_X2 = simlr2.fast_minibatch_kmeans(F2, 1)

    # Betweenness similarity matrix
    if not np.count_nonzero(c_betweenness):
        S3 = np.zeros((len(c_betweenness), len(c_betweenness)))
    else:
        simlr3 = SIMLR.SIMLR_LARGE(1, K1, 0)
        S3, F3, val3, ind3 = simlr3.fit(c_betweenness)
        y_pred_X3 = simlr3.fast_minibatch_kmeans(F3, 1)

    alpha = 0.5  # hyperparameter, usually (0.3~0.8)
    T = 20  # Number of Iterations, usually (10~20)

    wp1 = snf.make_affinity(S1.toarray(), K=K2, mu=alpha)
    wp2 = snf.make_affinity(S2.toarray(), K=K2, mu=alpha)
    wp3 = snf.make_affinity(S3, K=K2, mu=alpha)
    FSM = snf.snf([wp1, wp2, wp3], K=K2, alpha=alpha,
                  t=T)  #Fused similarity matrix
    FSM_sorted = np.sort(FSM, axis=0)

    ind = np.zeros((kn, 1))
    HR_ind = np.zeros((kn, len(HR_features[1])))
    for i in range(1, kn + 1):
        a, b, pos = np.intersect1d(FSM_sorted[len(FSM_sorted) - i][0],
                                   FSM,
                                   return_indices=True)
        ind[i - 1] = pos
        for j in range(len(HR_features[1])):
            HR_ind[i - 1][j] = HR_features[int(ind[i - 1][0])][j]

    pHR = np.mean(HR_ind, axis=0)  # Predicted features of the testing subject
    return pHR