Пример #1
0
    def test(self, views, TrainShare=None, verbose=True):
        tic = time.time()
        assert len(np.shape(
            views)) == 3, "Data shape must be 3D, i.e. sub x time x voxel"

        self.TestFeatures = None

        NumSub, NumTime, NumVoxel = np.shape(views)
        NumFea = self.net_shape[-1]
        if NumFea is None:
            NumFea = np.min((NumTime, NumVoxel))
            if verbose:
                print(
                    "Number of features is automatically assigned, Features: ",
                    NumFea)
                self.net_shape[-1] = NumFea

        if TrainShare is not None:
            Share = TrainShare
            self.Share = TrainShare
        elif self.Share is not None:
            Share = self.Share

        if self.loss_type == 'mse':
            criterion = torch.nn.MSELoss()
        elif self.loss_type == 'soft':
            criterion = torch.nn.MultiLabelSoftMarginLoss()
        elif self.loss_type == 'mean':
            criterion = torch.mean
        elif self.loss_type == 'norm':
            criterion = torch.norm
        else:
            raise Exception(
                "Loss function type is wrong! Options: \'mse\', \'soft\', \'mean\', or \'norm\'"
            )

        self.ha_loss_test_vec = list()
        self.ha_loss_test = None

        NewViews = list()
        G = torch.Tensor(Share)

        for s in range(NumSub):
            net_shape = np.concatenate(([NumVoxel], self.net_shape))
            net = MLP(model=net_shape,
                      activation=self.activation,
                      gpu_enable=self.gpu_enable)

            if self.optim == "adam":
                optimizer = optim.Adam(net.parameters(), lr=self.learning_rate)
            elif self.optim == "sgd":
                optimizer = optim.SGD(net.parameters(), lr=self.learning_rate)
            else:
                raise Exception(
                    "Optimization algorithm is wrong! Options: \'adam\' or \'sgd\'"
                )

            X = torch.Tensor(views[s])
            net.train()

            for j in range(self.iteration):
                for epoch in range(self.epoch):
                    perm = torch.randperm(NumTime)
                    sum_loss = 0

                    for i in range(0, NumTime, self.batch_size):
                        x = X[perm[i:i + self.batch_size]]
                        g = G[perm[i:i + self.batch_size]]

                        # Send data to GPU
                        if self.gpu_enable:
                            x = x.cuda()
                            g = g.cuda()

                        optimizer.zero_grad()
                        fx = net(x)

                        if self.loss_type == 'mse' or self.loss_type == 'soft':
                            loss = criterion(g, fx)
                        else:
                            loss = criterion(g - fx)

                        loss.backward()
                        optimizer.step()
                        sum_loss += loss.data.cpu().numpy()

                        if self.epoch_internal_iteration > (i / NumTime):
                            break

                    if verbose:
                        print(
                            "TEST, UPDATE NETWORK: Iteration {:6d}, Subject {:6d}, Epoch {:6d}, loss error: {}"
                            .format(j + 1, s + 1, epoch + 1, sum_loss))

            if self.gpu_enable:
                X = X.cuda()

            NewViews.append(net(X).data.cpu().numpy())

        ha_model = GPUHA(Dim=NumFea, regularization=self.regularization)
        ha_model.test(views=NewViews, G=Share, verbose=verbose)
        self.TestFeatures = ha_model.Xtest
        self.TestRuntime = time.time() - tic
        return self.TestFeatures
Пример #2
0
    def train(self, views, verbose=True):
        tic = time.time()
        assert len(np.shape(
            views)) == 3, "Data shape must be 3D, i.e. sub x time x voxel"

        self.Share = None
        self.TrainFeatures = None

        NumSub, NumTime, NumVoxel = np.shape(views)
        NumFea = self.net_shape[-1]
        if NumFea is None:
            NumFea = np.min((NumTime, NumVoxel))
            if verbose:
                print(
                    "Number of features is automatically assigned, Features: ",
                    NumFea)
                self.net_shape[-1] = NumFea

        Share = np.random.randn(NumTime, NumFea)

        if self.loss_type == 'mse':
            criterion = torch.nn.MSELoss()
        elif self.loss_type == 'soft':
            criterion = torch.nn.MultiLabelSoftMarginLoss()
        elif self.loss_type == 'mean':
            criterion = torch.mean
        elif self.loss_type == 'norm':
            criterion = torch.norm
        else:
            raise Exception(
                "Loss function type is wrong! Options: \'mse\', \'soft\', \'mean\', or \'norm\'"
            )

        self.ha_loss_vec = list()

        self.ha_loss = None

        for j in range(self.iteration):

            NewViews = list()
            G = torch.Tensor(Share)

            for s in range(NumSub):
                net_shape = np.concatenate(([NumVoxel], self.net_shape))
                net = MLP(model=net_shape,
                          activation=self.activation,
                          gpu_enable=self.gpu_enable)

                if self.optim == "adam":
                    optimizer = optim.Adam(net.parameters(),
                                           lr=self.learning_rate)
                elif self.optim == "sgd":
                    optimizer = optim.SGD(net.parameters(),
                                          lr=self.learning_rate)
                else:
                    raise Exception(
                        "Optimization algorithm is wrong! Options: \'adam\' or \'sgd\'"
                    )

                X = torch.Tensor(views[s])
                net.train()

                for epoch in range(self.epoch):
                    perm = torch.randperm(NumTime)
                    sum_loss = 0

                    for i in range(0, NumTime, self.batch_size):
                        x = X[perm[i:i + self.batch_size]]
                        g = G[perm[i:i + self.batch_size]]

                        # Send data to GPU
                        if self.gpu_enable:
                            x = x.cuda()
                            g = g.cuda()

                        optimizer.zero_grad()
                        fx = net(x)

                        if self.loss_type == 'mse' or self.loss_type == 'soft':
                            loss = criterion(fx, g) / NumTime
                        else:
                            loss = criterion(fx - g) / NumTime

                        if self.norm1_enable or self.norm2_enable:
                            for weight in net.get_weights():
                                if self.norm1_enable:
                                    loss += self.alpha * torch.mean(
                                        torch.abs(weight[1]))

                                if self.norm2_enable:
                                    loss += self.alpha * torch.mean(weight[1]**
                                                                    2)

                        loss.backward()
                        optimizer.step()
                        sum_loss += loss.data.cpu().numpy()

                        if self.epoch_internal_iteration > (i / NumTime):
                            break

                    if verbose:
                        print(
                            "TRAIN, UPDATE NETWORK: Iteration {:5d}, Subject {:6d}, Epoch {:6d}, loss error: {}"
                            .format(j + 1, s + 1, epoch + 1, sum_loss))

                if self.gpu_enable:
                    X = X.cuda()

                NewViews.append(net(X).data.cpu().numpy())

            ha_model = GPUHA(Dim=NumFea, regularization=self.regularization)

            if NumFea >= NumTime:
                ha_model.train(views=NewViews,
                               verbose=verbose,
                               gpu=self.gpu_enable)
            else:
                ha_model.train(views=NewViews, verbose=verbose, gpu=False)

            Share = ha_model.G
            out_features = ha_model.Xtrain
            error = np.mean(ha_model.Etrain)

            if error == 0:
                assert self.Share is not None, "All extracted features are zero, i.e. number of features is not enough for creating a shared space"
                self.TrainRuntime = time.time() - tic
                return self.TrainFeatures, self.Share

            if self.best_result_enable:
                if self.ha_loss is None:
                    self.Share = Share
                    self.TrainFeatures = out_features
                    self.ha_loss = error

                if error <= self.ha_loss:
                    self.Share = Share
                    self.TrainFeatures = out_features
                    self.ha_loss = error
            else:
                self.Share = Share
                self.TrainFeatures = out_features
                self.ha_loss = error

            if verbose:
                print("Hyperalignment error: {}".format(error))

            self.ha_loss_vec.append(error)

        self.TrainRuntime = time.time() - tic
        return self.TrainFeatures, self.Share
Пример #3
0
    def fit(self, data_vals, design_vals):
        tic = time.time()

        SampleSize, FeatureSize = np.shape(data_vals)
        Sam, RegressorSize = np.shape(design_vals)
        assert SampleSize == Sam, "Data and Design Matrix must have the same size samples, data shape: " + \
                                  str(np.shape(data_vals)) + ", design shape: " + str(np.shape(design_vals))
        del Sam
        A = torch.Tensor(design_vals)
        B = torch.Tensor(data_vals)
        if self.normalization:
            A = A - A.mean() / A.std()
            B = B - B.mean() / B.std()

        model = MLP([RegressorSize, FeatureSize], [None],
                    gpu_enable=self.gpu_enable)

        if self.optim == "adam":
            optimizer = optim.Adam(model.parameters(), lr=self.learning_rate)
        elif self.optim == "sgd":
            optimizer = optim.SGD(model.parameters(), lr=self.learning_rate)
        else:
            raise Exception(
                "Optimization algorithm is wrong! Options: \'adam\' or \'sgd\'"
            )

        if self.loss_type == 'mse':
            criterion = torch.nn.MSELoss()
        elif self.loss_type == 'soft':
            criterion = torch.nn.MultiLabelSoftMarginLoss()
        elif self.loss_type == 'mean':
            criterion = torch.mean
        elif self.loss_type == 'norm':
            criterion = torch.norm
        else:
            raise Exception(
                "Loss function type is wrong! Options: \'mse\', \'soft\', \'mean\', or \'norm\'"
            )

        ModelIsWrong = True
        for mod in [
                'linear', 'lasso', 'elastic', 'ridge', 'ln1', 'ln2', 'ln12'
        ]:
            if self.method == mod:
                ModelIsWrong = False
                break

        if ModelIsWrong:
            raise Exception(
                "Method option is wrong! Options: \'linear\', \'lasso\', \'elastic\', \'ridge\', \'ln1\', \'ln2\', \'ln12\'"
            )

        model.train()

        self.loss_vec = list()

        for epoch in range(self.epoch):
            perm = torch.randperm(SampleSize)
            sum_loss = 0

            for i in range(0, SampleSize, self.batch_size):
                a = A[perm[i:i + self.batch_size]]
                b = B[perm[i:i + self.batch_size]]

                # Send data to GPU
                if self.gpu_enable:
                    a = a.cuda()
                    b = b.cuda()

                optimizer.zero_grad()

                output = model(a)
                W = model.get_weights()[0][1]

                if self.method == 'linear':
                    # ||y - Xw||
                    if self.loss_type == 'mse' or self.loss_type == 'soft':
                        loss = criterion(output, b)
                    else:
                        loss = criterion(output - b)

                elif self.method == 'lasso':
                    # (1 / (2 * n_samples)) * ||y - Xw|| + alpha * ||w||_1
                    if self.loss_type == 'mse' or self.loss_type == 'soft':
                        loss = criterion(output, b) / (
                            SampleSize * 2) + self.lasso_alpha * torch.norm(
                                W, p=1)
                    else:
                        loss = criterion(output - b) / (
                            SampleSize * 2) + self.lasso_alpha * torch.norm(
                                W, p=1)

                elif self.method == 'elastic':
                    # 1 / (2 * n_samples) * ||y - Xw|| +
                    # alpha * l1_ratio * ||w||_1 +
                    # 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
                    if self.loss_type == 'mse' or self.loss_type == 'soft':
                        loss = criterion(output, b) / (SampleSize * 2) + \
                               self.elstnet_alpha * self.elstnet_l1_ratio * torch.norm(W, p=1) +\
                               0.5 * self.elstnet_alpha * (1 - self.elstnet_l1_ratio) * torch.norm(W, p=2) ** 2
                    else:
                        loss = criterion(output - b) / (SampleSize * 2) + \
                               self.elstnet_alpha * self.elstnet_l1_ratio * torch.norm(W, p=1) +\
                               0.5 * self.elstnet_alpha * (1 - self.elstnet_l1_ratio) * torch.norm(W, p=2) ** 2

                elif self.method == 'ridge':
                    # || y - Xw|| + ||w||^2_2
                    if self.loss_type == 'mse' or self.loss_type == 'soft':
                        loss = criterion(
                            output,
                            b) + self.ridge_param * torch.norm(W, p=2)**2
                    else:
                        loss = criterion(output -
                                         b) + self.ridge_param * torch.norm(
                                             W, p=2)**2

                elif self.method == 'ln1':
                    # ||w||_1
                    loss = torch.norm(W, p=1)
                elif self.method == 'ln2':
                    # ||w||_2^2
                    loss = torch.norm(W, p=2)**2
                elif self.method == 'ln12':
                    # ||w||_2^2 + ||w||_1
                    loss = torch.norm(W, p=2)**2 + torch.norm(W, p=1)
                else:
                    raise Exception(
                        "Method option is wrong! Options: \'linear\', \'lasso\', \'elastic\', \'ridge\', \'ln1\', \'ln2\', \'ln12\'"
                    )

                loss.backward()
                optimizer.step()
                sum_loss += loss.data.cpu().numpy()
                self.loss_vec.append(loss.data.cpu().numpy())

            if self.verbose:
                if (epoch + 1) % self.report_step == 0:
                    print("Epoch: {:4d}  Error: {}".format(
                        epoch + 1, sum_loss))

        self.Beta = np.transpose(model.get_weights()[0][1].data.cpu().numpy())
        self.Eps = model.get_bias()[0][1].data.cpu().numpy()

        if self.gpu_enable:
            A = A.cuda()
            B = B.cuda()

        Performance = torch.mean((B - model(A))**2) / SampleSize
        Performance = Performance.data.cpu().numpy()
        MSE = mean_squared_error(data_vals, np.dot(design_vals, self.Beta))
        return self.Beta, self.Eps, self.loss_vec, MSE, Performance, time.time(
        ) - tic
Пример #4
0
    def fit(self, data_vals, design_vals, sess=None):
        import tensorflow as tf
        import numpy as np
        import os
        from Network.MLP import MLP
        # RSA Parameters
        F = tf.placeholder("float", [None, np.shape(data_vals)[1]])
        D = tf.placeholder(shape=[None, np.shape(design_vals)[1]],
                           dtype=tf.float32)
        Beta = tf.Variable(
            tf.random_normal(
                shape=[np.shape(design_vals)[1], self.Layers[-1]]))
        #Eps  = tf.Variable(tf.random_normal(shape=[1, self.Layers[-1]]))
        oldBeta = tf.placeholder(
            shape=[np.shape(design_vals)[1], self.Layers[-1]],
            dtype=tf.float32)
        # Kernel Optimization
        MappedF = tf.placeholder("float", [None, self.Layers[-1]])
        mlp = MLP()
        kernelmapping = mlp.multilayer_perceptron(F,
                                                  LayerShape=self.Layers,
                                                  Activation=self.activation)
        kernel_loss = tf.reduce_mean(
            tf.square(kernelmapping - tf.matmul(D, oldBeta)))
        kernel_train = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate).minimize(kernel_loss)
        # RSA optimization
        l1_term = tf.multiply(tf.constant(self.alpha, dtype=tf.float32),
                              tf.reduce_mean(tf.abs(Beta)))
        l2_term = tf.multiply(tf.constant(-10 * self.alpha, dtype=tf.float32),
                              tf.reduce_mean(tf.square(Beta)))
        if self.loss_type == 'norm':
            rsa_loss = tf.add(
                tf.add(
                    tf.square(
                        tf.norm(tf.subtract(MappedF, tf.matmul(D, Beta)),
                                ord=self.loss_norm)), l1_term), l2_term)
        else:
            rsa_loss = tf.add(
                tf.add(
                    tf.reduce_mean(
                        tf.square(tf.subtract(MappedF, tf.matmul(D, Beta)))),
                    l1_term), l2_term)
        rsa_train = tf.train.GradientDescentOptimizer(
            learning_rate=self.learning_rate).minimize(rsa_loss)
        # Performance Estimation mean((F - D * Beta)**2) / n
        perf = tf.divide(
            tf.reduce_mean(tf.square(MappedF - tf.matmul(D, Beta))),
            tf.constant(np.shape(data_vals)[0], dtype=tf.float32))
        if self.CPU:
            os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
            print("Switch to CPU env ...")

        if sess is None:
            sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        if self.verbose:
            print("Before Mapping, Performance:   {:20.10f}".format(
                sess.run(
                    perf, {
                        D: design_vals,
                        MappedF: sess.run(kernelmapping, {F: data_vals})
                    })))
        # Training loop
        self.loss_vec = list()
        for i in range(self.n_iter):
            rand_index = np.random.choice(len(design_vals),
                                          size=self.batch_size)
            rand_design = design_vals[rand_index]
            rand_data = sess.run(kernelmapping, {F: data_vals[rand_index]})
            sess.run(rsa_train, {D: rand_design, MappedF: rand_data})
            temp_loss = sess.run(rsa_loss,
                                 feed_dict={
                                     D: rand_design,
                                     MappedF: rand_data
                                 })
            self.loss_vec.append(temp_loss)
            if self.verbose:
                if (i == 0) or ((i + 1) % self.report_step
                                == 0) or (i == self.n_iter - 1):
                    print('It: {:9d} of {:9d} \t Loss: {:20.10f}'.format(
                        i + 1, self.n_iter, temp_loss))
            oBeta = sess.run(Beta)
            sess.run([kernel_train, kernel_loss, kernelmapping], {
                D: rand_design,
                F: data_vals[rand_index],
                oldBeta: oBeta
            })
        Fhat = sess.run(kernelmapping, {F: data_vals})
        Performance = sess.run(perf, {D: design_vals, MappedF: Fhat})
        if self.verbose:
            print(
                "After Mapping, Performance:    {:20.10f}".format(Performance))
        # Final Results
        self.Beta = sess.run(Beta)
        self.Weights, self.Biases = mlp.return_values(sess)
        sess.close()
        MSE = mean_squared_error(Fhat, np.dot(design_vals, self.Beta))
        return self.Beta, self.Weights, self.Biases, self.loss_vec, MSE, Performance
Пример #5
0
    def fit(self, data_vals, design_vals, sess=None):
        import tensorflow as tf
        import numpy as np
        from Network.MLP import MLP
        import os
        # RSA Parameters
        F = tf.placeholder("float", [None, self.NVoxel])
        D = tf.placeholder(shape=[None, self.NCat], dtype=tf.float32)
        Beta = tf.Variable(tf.random_normal(shape=[self.NCat, self.Layers[-1]]))
        Eps  = tf.Variable(tf.random_normal(shape=[1, self.Layers[-1]]))
        # Kernel Optimization
        MappedF = tf.placeholder("float", [None, self.Layers[-1]])
        mlp = MLP()
        kernelmapping = mlp.multilayer_perceptron(F, LayerShape=self.Layers, Activation=self.activation)
        kernel_loss = tf.reduce_mean(tf.square(kernelmapping - tf.matmul(D, Beta)))
        kernel_train = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(kernel_loss)
        # RSA optimization
        rsa_loss  = tf.square(tf.norm(MappedF - tf.add(tf.matmul(D, Beta), Eps), ord=self.loss_norm))
        rsa_train = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(rsa_loss)
        # Performance Estimation MSE( F - D * Beta + Eps )
        perf  = tf.divide(tf.reduce_mean(tf.square(MappedF - tf.matmul(D, Beta))), tf.constant(np.shape(data_vals)[0],dtype=tf.float32))
        if self.CPU:
            os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
            print("Switch to CPU env ...")

        if sess is None:
                sess = tf.Session()

        sess.run(tf.global_variables_initializer())
        # Tuning Kernel Parameters
        for i in range(self.kernel_iter):
            data_index  = np.random.choice(len(data_vals), size=1)[0]
            data        = data_vals[data_index]
            if np.shape(data)[0] == 1:
                data = data[0]
            design      = design_vals[data_index]
            if np.shape(design)[0] == 1:
                design = design[0]
            rand_index  = np.random.choice(len(data), size=self.batch_size)
            rand_design = design[rand_index]
            rand_data = sess.run(kernelmapping, {F:data[rand_index]})
            sess.run(rsa_train, {D: rand_design, MappedF: rand_data})
            sess.run([kernel_train, kernel_loss, kernelmapping], {D: rand_design, F: data[rand_index]})
            if self.verbose:
                if (i == 0) or ((i + 1)%self.report_step == 0) or (i == self.kernel_iter - 1):
                    print('Tuning Kernel Parameters: \t It {:9d} of {:9d}'.format(i + 1, self.kernel_iter))

        # Estimating Betas
        self.Beta        = list()
        self.Eps         = list()
        self.loss_mat    = list()
        self.AMSE        = list()
        for data_index, data in enumerate(data_vals):
            loss_vec = list()
            for i in range(self.rsa_iter):
                design      = design_vals[data_index]
                rand_index  = np.random.choice(len(data), size=self.batch_size)
                rand_design = design[rand_index]
                rand_data = sess.run(kernelmapping, {F:data[rand_index]})
                sess.run(rsa_train, {D: rand_design, MappedF: rand_data})
                loss_temp = sess.run(rsa_loss, feed_dict={D: rand_design, MappedF: rand_data})
                loss_vec.append(loss_temp)
                if self.verbose:
                    if (i == 0) or ((i + 1)%self.report_step == 0) or (i == self.rsa_iter - 1):
                        print('Estimating RSA: View {:d} of {:d} \t It {:9d} of {:9d} \t Loss: {:20.10f}'.format(data_index + 1, len(data_vals), i + 1, self.rsa_iter, loss_temp))
            MSE = sess.run(perf, {D: design_vals[data_index], MappedF: sess.run(kernelmapping, {F: data_vals[data_index]})})
            self.AMSE.append(MSE)
            if self.verbose:
                print("View {:d} Performance: {:20.10f}".format(data_index + 1, MSE))
            self.loss_mat.append(loss_vec)
            self.Beta.append(sess.run(Beta))
            self.Eps.append(sess.run(Eps))

        self.Weights, self.Biases   = mlp.return_values(sess)
        sess.close()
        return self.Beta, self.Eps, self.Weights, self.Biases, np.mean(self.AMSE), self.loss_mat