예제 #1
0
def learn_adj(x):
    y = []
    for t in x:
        b = t.numpy()
        y.append(b)

    x = np.array(y)
    batchsize = x.shape[0]
    simlr = SIMLR.SIMLR_LARGE(1, batchsize / 3, 0)
    adj, _, _, _ = simlr.fit(x)
    array = adj.toarray()
    tensor = torch.Tensor(array)

    return tensor
예제 #2
0
    def fit_predict(self, matrix):
        X = matrix

        # Selecting 500 features with PCA
        if X.shape[1] > 500:
            # fast_pca assumes the number of cells > 500 therefore try-catch
            try:
                X = SIMLR.helper.fast_pca(X, self.pca_components)
            except:
                pass
        # Running Simlr
        simlr = SIMLR.SIMLR_LARGE(num_of_rank=self.n_components,
                                  num_of_neighbor=self.n_neighbours,
                                  max_iter=self.max_iter)
        S, F, val, ind = simlr.fit(X)
        return F
예제 #3
0
def NAGFS(train_data, train_Labels, Nf, displayResults):

    XC1 = np.empty((0, train_data.shape[2], train_data.shape[2]), int)
    XC2 = np.empty((0, train_data.shape[2], train_data.shape[2]), int)

    # * * (5.1) In this part, Training samples which were chosen last part are seperated as class-1 and class-2 samples.
    for i in range(len(train_Labels)):

        if (train_Labels[i] == 1):
            XC1 = np.append(XC1, [train_data[i, :, :]], axis=0)
        else:
            XC2 = np.append(XC2, [train_data[i, :, :]], axis=0)

# * *

# * * (5.2) SIMLR functions need matrixes which has 1x(N*N) shape.So all training matrixes are converted to this shape.

#For C1 group
    k = np.empty((0, XC1.shape[1] * XC1.shape[1]), int)
    for i in range(XC1.shape[0]):
        k1 = np.concatenate(XC1[i])  #it vectorizes all NxN matrixes
        k = np.append(k, [k1.reshape(XC1.shape[1] * XC1.shape[1])], axis=0)

# For C2 group
    kk = np.empty((0, XC2.shape[1] * XC2.shape[1]), int)
    for i in range(XC2.shape[0]):
        kk1 = np.concatenate(XC2[i])
        kk = np.append(kk, [kk1.reshape(XC2.shape[1] * XC2.shape[1])], axis=0)

# * *

# * * (5.3) SIMLR(Single-Cell Interpretation via Multi Kernel Learning) is used to clustering of samples of 2 classes into 3 clusters.

#For C1 group
#[t1, S2, F2, ydata2,alpha2] = SIMLR(kk,3,2);
    simlr = SIMLR.SIMLR_LARGE(
        3, 4, 0
    )  #This is how we initialize an object for SIMLR.The first input is number of rank (clusters) and the second input is number of neighbors.The third one is an binary indicator whether to use memory-saving mode.You can turn it on when the number of cells are extremely large to save some memory but with the cost of efficiency.
    S1, F1, val1, ind1 = simlr.fit(k)
    y_pred_X1 = simlr.fast_minibatch_kmeans(
        F1, 3
    )  #This SIMLR function predicts training 1x(N*N) samples what they belong.
    #to first, second or third clusters.(0,1 or 2)

    # For C2 group
    simlr = SIMLR.SIMLR_LARGE(3, 4, 0)
    S2, F2, val2, ind2 = simlr.fit(kk)
    y_pred_X2 = simlr.fast_minibatch_kmeans(F2, 3)

    # * *

    # * * (5.4) Training samples are placed into their predicted clusters for Class-1 and Class-2 samples.
    #For XC1, +1 k
    Ca1 = np.empty((0, XC1.shape[2], XC1.shape[2]), int)
    Ca2 = np.empty((0, XC1.shape[2], XC1.shape[2]), int)
    Ca3 = np.empty((0, XC1.shape[2], XC1.shape[2]), int)

    for i in range(len(y_pred_X1)):
        if y_pred_X1[i] == 0:
            Ca1 = np.append(Ca1, [XC1[i, :, :]], axis=0)
            Ca1 = np.abs(Ca1)
        elif y_pred_X1[i] == 1:
            Ca2 = np.append(Ca2, [XC1[i, :, :]], axis=0)
            Ca2 = np.abs(Ca2)
        elif y_pred_X1[i] == 2:
            Ca3 = np.append(Ca3, [XC1[i, :, :]], axis=0)
            Ca3 = np.abs(Ca3)

#For XC2, -1 kk
    Cn1 = np.empty((0, XC2.shape[2], XC2.shape[2]), int)
    Cn2 = np.empty((0, XC2.shape[2], XC2.shape[2]), int)
    Cn3 = np.empty((0, XC2.shape[2], XC2.shape[2]), int)

    for i in range(len(y_pred_X2)):
        if y_pred_X2[i] == 0:
            Cn1 = np.append(Cn1, [XC2[i, :, :]], axis=0)
            Cn1 = np.abs(Cn1)
        elif y_pred_X2[i] == 1:
            Cn2 = np.append(Cn2, [XC2[i, :, :]], axis=0)
            Cn2 = np.abs(Cn2)
        elif y_pred_X2[i] == 2:
            Cn3 = np.append(Cn3, [XC2[i, :, :]], axis=0)
            Cn3 = np.abs(Cn3)

# * *

#SNF PROCESS
# * * (5.5) SNF(Similarity Network Fusion) is used for create a local centered network atlas which is the best representative matrix
#of other similar matrixes.In this process, for every class, there are 3 clusters, so snf create 3 representative-center
#matrixes for both classes.After that it create 1 general representative matrixes of 3 matrixes.
#So finally there are 2 general representative matrixes.

#Ca1
    class1 = []
    if Ca1.shape[0] > 1:
        for i in range(Ca1.shape[0]):
            class1.append(Ca1[i, :, :])
        affinity_networks = snf.make_affinity(class1,
                                              metric='euclidean',
                                              K=20,
                                              mu=0.5)
        AC11 = snf.snf(affinity_networks,
                       K=20)  #First local network atlas for C1 group
        class1 = []
    else:
        AC11 = Ca1[0]

    #Ca2
    class1 = []
    if Ca2.shape[0] > 1:
        for i in range(Ca2.shape[0]):
            class1.append(Ca2[i, :, :])
        affinity_networks = snf.make_affinity(class1,
                                              metric='euclidean',
                                              K=20,
                                              mu=0.5)
        AC12 = snf.snf(affinity_networks,
                       K=20)  #Second local network atlas for C1 group
        class1 = []
    else:
        AC12 = Ca2[0]

    #Ca3
    class1 = []
    if Ca3.shape[0] > 1:
        for i in range(Ca3.shape[0]):
            class1.append(Ca3[i, :, :])
        affinity_networks = snf.make_affinity(class1,
                                              metric='euclidean',
                                              K=20,
                                              mu=0.5)
        AC13 = snf.snf(affinity_networks,
                       K=20)  #Third local network atlas for C1 group
        class1 = []
    else:
        AC13 = Ca3[0]

    #Cn1
    if Cn1.shape[0] > 1:
        class1 = []
        for i in range(Cn1.shape[0]):
            class1.append(Cn1[i, :, :])
        affinity_networks = snf.make_affinity(class1,
                                              metric='euclidean',
                                              K=20,
                                              mu=0.5)
        AC21 = snf.snf(affinity_networks,
                       K=20)  #First local network atlas for C2 group
        class1 = []
    else:
        AC21 = Cn1[0]

    #Cn2
    class1 = []
    if Cn2.shape[0] > 1:
        for i in range(Cn2.shape[0]):
            class1.append(Cn2[i, :, :])
        affinity_networks = snf.make_affinity(class1,
                                              metric='euclidean',
                                              K=20,
                                              mu=0.5)
        AC22 = snf.snf(affinity_networks,
                       K=20)  #Second local network atlas for C2 group
        class1 = []
    else:
        AC22 = Cn2[0]

    #Cn3
    class1 = []
    if Cn3.shape[0] > 1:
        for i in range(Cn3.shape[0]):
            class1.append(Cn3[i, :, :])
        affinity_networks = snf.make_affinity(class1,
                                              metric='euclidean',
                                              K=20,
                                              mu=0.5)
        AC23 = snf.snf(affinity_networks,
                       K=20)  #Third local network atlas for C2 group
        class1 = []
    else:
        AC23 = Cn3[0]

    #A1
    AC1 = snf.snf([AC11, AC12, AC13], K=20)  #Global network atlas for C1 group

    #A2
    AC2 = snf.snf([AC21, AC22, AC23], K=20)  #Global network atlas for C2 group

    # * *

    # * * (5.6) In this part, most 5 discriminative connectivities are determined and their indexes are saved in ind array.

    D0 = np.abs(AC1 - AC2)  #find differences between AC1 and AC2
    D = np.triu(D0)  #Upper triangular part of matrix
    D1 = D[np.triu_indices(AC1.shape[0], 1)]  #Upper triangular part of matrix
    D1 = D1.transpose()
    D2 = np.sort(D1)  #Ranking features
    D2 = D2[::-1]
    Dif = D2[0:Nf]  #Extract most 5 discriminative connectivities
    D3 = []
    for i in D1:
        D3.append(i)
    ind = []
    for i in range(len(Dif)):
        ind.append(D3.index(Dif[i]))
# * *

# * * (5.7) Coordinates of most 5 disriminative features are determined for plotting for each iteration if displayresults==1.

    coord = []
    for i in range(len(Dif)):
        for j in range(D0.shape[0]):
            for k in range(D0.shape[1]):
                if Dif[i] == D0[j][k]:
                    coord.append([j, k])

    topFeatures = np.zeros((D0.shape[0], D0.shape[1]))
    s = 0
    ss = 0
    for i in range(len(Dif) * 2):
        topFeatures[coord[i][0]][coord[i][1]] = Dif[s]
        ss += 1
        if ss == 2:
            s += 1
            ss = 0
    if displayResults == 1:
        plt.imshow(topFeatures)
        plt.colorbar()
        plt.show()
# * *

    return AC1, AC2, ind
예제 #4
0
import pandas as pd
import SIMLR
from SIMLR import helper
from sklearn import metrics
from sklearn.metrics.cluster import adjusted_rand_score as ari
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi

X = pd.read_csv('yan.csv', header=None)
X = np.array(X)
X = X.transpose()

label = pd.read_csv('yan_label.csv')
y = np.array(label)
label = y.ravel()

c = label.max()  # number of clusters
### if the number of genes are more than 500, we recommend to perform pca first!
start_main = time.time()
if X.shape[1] > 500:
    X = helper.fast_pca(X, 500)
else:
    X = X.todense()
start_main = time.time()
simlr = SIMLR.SIMLR_LARGE(c, 30, 0)
###This is how we initialize an object for SIMLR. the first input is number of rank (clusters) and the second input is number of neighbors. The third one is an binary indicator whether to use memory-saving mode. you can turn it on when the number of cells are extremely large to save some memory but with the cost of efficiency.
S, F, val, ind = simlr.fit(X)
julei = simlr.fast_minibatch_kmeans(F, c)
print('NMI value is %f \n' % nmi(julei.flatten(), label.flatten()))
print('ARI value is %f \n' % ari(julei.flatten(), label.flatten()))
print('HOM value is %f \n' % metrics.homogeneity_score(julei, label))
print("AMI: %0.3f" % metrics.adjusted_mutual_info_score(label, julei))
예제 #5
0
                tsne = TSNE(args.n_component)
            else:
                tsne = TSNE()

            code = tsne.fit_transform(transform_data)
        elif args.use_SIMLR:
            print 'not implemented for experiment'
            X = transform_data
            n_dim = min([X.shape[0], X.shape[1]])
            if X.shape[1] > 500:
                #X = SIMLR.helper.fast_pca(X,max([X.shape[0],X.shape[1]]))
                X = SIMLR.helper.fast_pca(X, n_dim)
            else:
                X = X.todense()
            c = len(set(labeled_label))
            simlr = SIMLR.SIMLR_LARGE(c, min(30, X.shape[0] / 2), 0)
            ###This is how we initialize an object for SIMLR. the first input is number of rank (clusters) and the second input is number of neighbors. The third one is an binary indicator whether to use memory-saving mode. you can turn it on when the number of cells are extremely large to save some memory but with the cost of efficiency.
            S, F, val, ind = simlr.fit(X)
            print F.shape
            print val.shape
            print S.shape
            print ind.shape
            code = val
        else:
            if args.n_component:
                pca = PCA(args.n_component)
            else:
                pca = PCA()
            pca.fit(fit_data)
            code = pca.transform(transform_data)
예제 #6
0
    def train(self):
        """
        Train MultiGraphGAN
        """
        nb_clusters = self.nb_clusters

        #fixed data for evaluating: generate samples.
        src_iter = iter(self.src_loader)

        x_src_fixed = next(src_iter)
        x_src_fixed = x_src_fixed[0].to(self.device)
        d = next(iter(self.src_loader))

        tgt_iters = []
        for loader in self.tgt_loaders:
            tgt_iters.append(iter(loader))

        # label
        label_pos = torch.FloatTensor([1] * d[0].shape[0]).to(self.device)
        label_neg = torch.FloatTensor([0] * d[0].shape[0]).to(self.device)

        # Start training from scratch or resume training.
        start_iters = 0
        if self.opts.resume_iters:
            start_iters = self.opts.resume_iters
            self.restore_model(self.opts.resume_iters)

        # Start training.
        print('Start training MultiGraphGAN...')
        start_time = time.time()

        for i in range(start_iters, self.opts.num_iters):
            print("iteration", i)
            # =================================================================================== #
            #                             1. Preprocess input data                                #
            # =================================================================================== #
            try:
                x_src = next(src_iter)
            except:
                src_iter = iter(self.src_loader)
                x_src = next(src_iter)

            x_src = x_src[0].to(self.device)

            x_tgts = []
            for tgt_idx in range(len(tgt_iters)):
                try:
                    x_tgt_i = next(tgt_iters[tgt_idx])
                    x_tgts.append(x_tgt_i)
                except:
                    tgt_iters[tgt_idx] = iter(self.tgt_loaders[tgt_idx])
                    x_tgt_i = next(tgt_iters[tgt_idx])
                    x_tgts.append(x_tgt_i)

            for tgt_idx in range(len(x_tgts)):
                x_tgts[tgt_idx] = x_tgts[tgt_idx][0].to(self.device)
                print("x_tgts", x_tgts[tgt_idx].shape)

            # =================================================================================== #
            #                             2. Train the discriminator                              #
            # =================================================================================== #

            embedding = self.E(x_src, learn_adj(x_src)).detach()
            ## Cluster the source graph embeddings using SIMLR
            simlr = SIMLR.SIMLR_LARGE(nb_clusters, embedding.shape[0] / 2, 0)
            S, ff, val, ind = simlr.fit(embedding)
            y_pred = simlr.fast_minibatch_kmeans(ff, nb_clusters)
            y_pred = y_pred.tolist()
            get_indexes = lambda x, xs: [
                i for (y, i) in zip(xs, range(len(xs))) if x == y
            ]

            x_fake_list = []
            x_src_list = []
            d_loss_cls = 0
            d_loss_fake = 0
            d_loss = 0

            print("Train the discriminator")
            for par in range(nb_clusters):
                print("================")
                print("cluster", par)
                print("================")
                cluster_index_list = get_indexes(par, y_pred)
                print(cluster_index_list)
                for idx in range(len(self.Gs)):
                    x_fake_i = self.Gs[idx][par](
                        embedding[cluster_index_list],
                        learn_adj(x_tgts[idx][cluster_index_list])).detach()
                    x_fake_list.append(x_fake_i)
                    x_src_list.append(x_src[cluster_index_list])

                    out_fake_i, out_cls_fake_i = self.D(
                        x_fake_i, learn_adj(x_fake_i))
                    _, out_cls_real_i = self.D(
                        x_tgts[idx][cluster_index_list],
                        learn_adj(x_tgts[idx][cluster_index_list]))

                    ### Graph domain classification loss
                    d_loss_cls_i = self.classification_loss(out_cls_real_i, label_pos[cluster_index_list], type=self.opts.cls_loss) \
                                   + self.classification_loss(out_cls_fake_i, label_neg[cluster_index_list], type=self.opts.cls_loss)
                    d_loss_cls += d_loss_cls_i

                    # Part of adversarial loss
                    d_loss_fake += torch.mean(out_fake_i)

                out_src, out_cls_src = self.D(
                    x_src[cluster_index_list],
                    learn_adj(x_src[cluster_index_list]))
                ### Adversarial loss
                d_loss_adv = torch.mean(
                    out_src) - d_loss_fake / (self.opts.num_domains - 1)

                ### Gradient penalty loss
                x_fake_cat = torch.cat(x_fake_list)
                x_src_cat = torch.cat(x_src_list)

                alpha = torch.rand(x_src_cat.size(0), 1).to(self.device)
                x_hat = (alpha * x_src_cat.data +
                         (1 - alpha) * x_fake_cat.data).requires_grad_(True)

                out_hat, _ = self.D(x_hat, learn_adj(x_hat.detach()))
                d_loss_reg = self.gradient_penalty(out_hat, x_hat,
                                                   self.opts.Lf)

                # Cluster-based loss to update the discriminator
                d_loss_cluster = -1 * d_loss_adv + self.opts.lambda_cls * d_loss_cls + self.opts.lambda_reg * d_loss_reg

                ### Discriminator loss
                d_loss += d_loss_cluster

            print("d_loss", d_loss)
            self.reset_grad()
            d_loss.backward()
            self.d_optimizer.step()

            # Logging.
            loss = {}
            loss['D/loss_adv'] = d_loss_adv.item()
            loss['D/loss_cls'] = d_loss_cls.item()
            loss['D/loss_reg'] = d_loss_reg.item()

            # =================================================================================== #
            #                       3. Train the cluster-specific generators                      #
            # =================================================================================== #
            print("Train the generators")
            if (i + 1) % self.opts.n_critic == 0:
                g_loss_info = 0
                g_loss_adv = 0
                g_loss_idt = 0
                g_loss_topo = 0
                g_loss_rec = 0
                g_loss = 0

                for par in range(nb_clusters):
                    print("cluster", par)
                    for idx in range(len(self.Gs)):
                        # ========================= #
                        # =====source-to-target==== #
                        # ========================= #
                        x_fake_i = self.Gs[idx][par](
                            embedding[cluster_index_list],
                            learn_adj(x_tgts[idx][cluster_index_list]))

                        # Global topology loss
                        global_topology = self.criterionIdt(
                            x_fake_i, x_tgts[idx][cluster_index_list])

                        # Local topology loss
                        real_topology = topological_measures(
                            x_tgts[idx][cluster_index_list])
                        fake_topology = topological_measures(x_fake_i.detach())
                        # 0:closeness centrality    1:betweeness centrality    2:eginvector centrality
                        local_topology = mean_absolute_error(
                            fake_topology[0], real_topology[0])

                        ### Topology loss
                        g_loss_topo += (local_topology + global_topology)

                        if self.opts.lambda_idt > 0:
                            x_fake_i_idt = self.Gs[idx][par](
                                self.E(
                                    x_tgts[idx][cluster_index_list],
                                    learn_adj(
                                        x_tgts[idx][cluster_index_list])),
                                learn_adj(x_tgts[idx][cluster_index_list]))
                            g_loss_idt += self.criterionIdt(
                                x_fake_i_idt, x_tgts[idx][cluster_index_list])

                        out_fake_i, out_cls_fake_i = self.D(
                            x_fake_i, learn_adj(x_fake_i.detach()))

                        ### Information maximization loss
                        g_loss_info_i = F.binary_cross_entropy_with_logits(
                            out_cls_fake_i, label_pos[cluster_index_list])
                        g_loss_info += g_loss_info_i

                        ### Adversarial loss
                        g_loss_adv -= torch.mean(out_fake_i)  # opposed sign

                        # ========================= #
                        # =====target-to-source==== #
                        # ========================= #
                        x_reconst = self.Gs[idx][par](
                            self.E(x_fake_i, learn_adj(x_fake_i.detach())),
                            learn_adj(x_fake_i.detach()))

                        # Reconstructed global topology loss
                        reconstructed_global_topology = self.criterionIdt(
                            x_src[cluster_index_list], x_reconst)

                        # Reconstructed local topology loss
                        real_topology = topological_measures(
                            x_src[cluster_index_list])
                        fake_topology = topological_measures(
                            x_reconst.detach())
                        # 0:closeness centrality    1:betweeness centrality    2:eginvector centrality
                        reconstructed_local_topology = mean_absolute_error(
                            fake_topology[0], real_topology[0])

                        ### Graph reconstruction loss
                        g_loss_rec += (reconstructed_local_topology +
                                       reconstructed_global_topology)

                    # Cluster-based loss to update the generators
                    g_loss_cluster = g_loss_adv / (
                        self.opts.num_domains - 1
                    ) + self.opts.lambda_info * g_loss_info + self.opts.lambda_idt * g_loss_idt + self.opts.lambda_topology * g_loss_topo + self.opts.lambda_rec * g_loss_rec

                    ### Generator loss
                    g_loss += g_loss_cluster

                print("g_loss", g_loss)
                self.reset_grad()
                g_loss.backward()
                self.g_optimizer.step()

                # Logging.
                loss['G/loss_adv'] = g_loss_adv.item()
                loss['G/loss_rec'] = g_loss_rec.item()
                loss['G/loss_cls'] = g_loss_info.item()
                if self.opts.lambda_idt > 0:
                    loss['G/loss_idt'] = g_loss_idt.item()

            # =================================================================================== #
            #                                 4. Miscellaneous                                    #
            # =================================================================================== #
            # print out training information.
            if (i + 1) % self.opts.log_step == 0:
                et = time.time() - start_time
                et = str(datetime.timedelta(seconds=et))[:-7]
                log = "Elapsed [{}], Iteration [{}/{}]".format(
                    et, i + 1, self.opts.num_iters)
                for tag, value in loss.items():
                    log += ", {}: {:.4f}".format(tag, value)
                print(log)

            # save model checkpoints.
            if (i + 1) % self.opts.model_save_step == 0:
                E_path = os.path.join(self.opts.checkpoint_dir,
                                      '{}-E.ckpt'.format(i + 1))
                torch.save(self.E.state_dict(), E_path)

                D_path = os.path.join(self.opts.checkpoint_dir,
                                      '{}-D.ckpt'.format(i + 1))
                torch.save(self.D.state_dict(), D_path)

                for par in range(nb_clusters):
                    for idx in range(len(self.Gs)):
                        G_i_path = os.path.join(
                            self.opts.checkpoint_dir,
                            '{}-G{}-{}.ckpt'.format(i + 1, idx + 1, par))
                        print(G_i_path)
                        torch.save(self.Gs[idx][par].state_dict(), G_i_path)

                print('Saved model checkpoints into {}...'.format(
                    self.opts.checkpoint_dir))

                print('=============================')
                print("End of Training")
                print('=============================')
예제 #7
0
 def __init__(self, n_classes, **params):
     super(SimlrKm, self).__init__(n_classes, **params)
     self.simlr = SIMLR.SIMLR_LARGE(8, 30, 0)
     self.name = 'km'
예제 #8
0
 def __init__(self, **params):
     self.output_names = ['SIMLR']
     # TODO: make params tunable... what do these numbers even mean???
     self.simlr = SIMLR.SIMLR(params['k'], 30, 0)
     super(Simlr, self).__init__(**params)
예제 #9
0
def HADA(sourceGraph, targetGraph, labels, settings):

    # initialisation
    subject = 150
    overallResult_PCC = np.zeros((subject, 32))
    overallResult_TSW = np.zeros((subject, 32))
    allSV = np.empty((0, sourceGraph.shape[1]), int)
    allTV = np.empty((0, targetGraph.shape[1]), int)
    allpredTV = np.empty((0, targetGraph.shape[1]), int)
    testlabel = []
    # Create training and testing sets
    loo = LeaveOneOut()
    loo.get_n_splits(sourceGraph)
    for train_index, test_index in loo.split(sourceGraph):
        rearrangedPredictorView = np.concatenate((np.transpose(
            sourceGraph[train_index]), np.transpose(sourceGraph[test_index])),
                                                 axis=1)
        rearrangedTargetView = np.concatenate((np.transpose(
            targetGraph[train_index]), np.transpose(targetGraph[test_index])),
                                              axis=1)

        ## Domain Alignment (DA) using ARGA and Similarity matrix learning using SIMLR
        simlr = SIMLR.SIMLR_LARGE(1, 50, 0)
        enc = Encoder(settings)

        ## STEP 1: Hierarchical Domain Alignment for traing samples
        print("Hierarchical Domain Alignment for traing samples")
        print("level 1")
        Simlarity2, _, _, _ = simlr.fit(targetGraph[train_index])
        encode_S_T = enc.erun(Simlarity2, sourceGraph[train_index])

        # H denotes the number of hierarchical levels
        H = 2
        temporary = encode_S_T
        for number in range(1, H):
            print("level ", H)
            encode_train__TV_A = enc.erun(Simlarity2, temporary)
            temporary = encode_train__TV_A

    ## STEP 2: Target Graph Prediction
    ## STEP 2.1: Source graph embedding of training and testing subjects
        test__train__SV = np.vstack(
            (sourceGraph[train_index], sourceGraph[test_index]))
        print("Source graph embedding of training and testing subjects...")
        Simlarity1, _, _, _ = simlr.fit(test__train__SV)
        encode_test__train__SV = enc.erun(Simlarity1, test__train__SV)

        ## STEP 2.2: Connectomic Manifold Learning using SIMLR
        print("SIMLR...")
        SALL, FALL, val, ind = simlr.fit(encode_test__train__SV)
        SY, FY, val, ind = simlr.fit(encode_train__TV_A)
        # number of neighbors for trust score
        TS_bestNb = 5
        # get the best neighbors in the learned manifold of the regularized source graph embeddings
        sall = SALL.todense()
        Index_ALL = np.argsort(-sall, axis=0)
        des = np.sort(-sall, axis=0)
        Bvalue_ALL = -des
        # get the best neighbors in the learned manifold of the hierarchically aligned source and target graph embeddings
        sy = SY.todense()
        Index_Y = np.argsort(-sy, axis=0)
        desy = np.sort(-sy, axis=0)
        Bvalue_Y = -desy

        # make prediction for each testing subject
        for testingSubject in range(1, 2):
            print "testing subject:", test_index
            # get this testing subject's rearranged index and original index
            tSubjectIndex = (sourceGraph[train_index].shape[0] -
                             2) + testingSubject
            tSubjectOriginalIndex = test_index
            # compute Tscore for each neighbor
            trustScore = np.ones((TS_bestNb, TS_bestNb))
            newWeight_TSW = np.ones(TS_bestNb)

            for neighbor in range(0, TS_bestNb):
                neighborIndex = Index_ALL[tSubjectIndex, neighbor]
                temp_counter = 0
                while (neighborIndex > sourceGraph[train_index].shape[0]):
                    # best neighbor is a testing data
                    temp_counter = temp_counter + 1
                    neighborIndex = Index_ALL[tSubjectIndex,
                                              (TS_bestNb + temp_counter)]

                if (temp_counter != 0):
                    neighborSequence = TS_bestNb + temp_counter
                else:
                    neighborSequence = neighbor

                    #print(neighborIndex)
                    # get top nb neighbors in mappedX
                    neighborListX = Index_ALL[neighborIndex, 0:TS_bestNb]
                    # get top nb neighbors in mappedY
                    neighborListY = Index_Y[neighborIndex, 0:TS_bestNb]
                    # calculate trust score
                    trustScore[TS_bestNb - 1, neighbor] = len(
                        np.intersect1d(np.array(neighborListX),
                                       np.array(neighborListY)))
                    # calculate new weight (TS * Similarity)
                    newWeight_TSW[neighbor] = exp(
                        trustScore[TS_bestNb - 1, neighbor] / TS_bestNb *
                        Bvalue_ALL[tSubjectIndex, neighborSequence])

            #reconstruct with Tscore and similarity weight
            innerPredict_TSW = np.zeros(
                sourceGraph[train_index].shape[1])[np.newaxis]
            #summing up the best neighbors
            for j1 in range(0, TS_bestNb):
                tr = (rearrangedTargetView[:, Index_ALL[tSubjectIndex,
                                                        j1]])[np.newaxis]
                if j1 == 0:
                    innerPredict_TSW = innerPredict_TSW.T + tr.T * newWeight_TSW[
                        j1]
                else:
                    innerPredict_TSW = innerPredict_TSW + tr.T * newWeight_TSW[
                        j1]

            # scale weight to 1
            Scale_TSW = sum(newWeight_TSW)
            innerPredict_TSW = np.divide(innerPredict_TSW, Scale_TSW)

            # calculate result (MAE)
            tr2 = (rearrangedTargetView[:, tSubjectIndex])[np.newaxis]
            resulttsw = abs(tr2.T - innerPredict_TSW)
            iMAE_TSW = mean_absolute_error(tr2.T, innerPredict_TSW)
            overallResult_TSW[tSubjectOriginalIndex,
                              TS_bestNb] = overallResult_TSW[
                                  tSubjectOriginalIndex, TS_bestNb] + iMAE_TSW

            allSV = np.append(allSV, sourceGraph[test_index], axis=0)
            testlabel.append(labels[test_index])
            allpredTV = np.append(allpredTV, innerPredict_TSW.T, axis=0)

            print test_index

    dataset_source_and_predicted_target = np.concatenate((allSV, allpredTV),
                                                         axis=1)

    print('END')

    mae = np.mean(overallResult_TSW, axis=0)
    print("Mean Absolute Error: ")
    print(mae[np.nonzero(mae)])

    return mae, dataset_source_and_predicted_target, testlabel