Example #1
0
def procrustes2d(origin):
    f = np.copy(origin)
    numberOfShapes = np.shape(f)[0]
    
    diff = np.inf
    timesToConverge = 0        
    while(timesToConverge < 2):
        print(timesToConverge)
        if(timesToConverge == 0):
            pivotIndex = np.random.randint(0, numberOfShapes)
            for i in range(0, numberOfShapes):
                if(i == pivotIndex):
                    continue
                mtx1, mtx2, disparity = procrustes(f[pivotIndex], f[i])
                f[pivotIndex] = mtx1
                f[i] = mtx2
        else:
            for i in range(0, numberOfShapes):
                mtx1, mtx2, disparity = procrustes(newMean[0], f[i])
                f[i] = mtx2
                
        newMean = np.sum(f, axis = 0, keepdims=True)/numberOfShapes
        diff = np.sqrt(np.sum(np.square(f-newMean)))
        print(diff)
        timesToConverge += 1
    
    print(diff)
    return f, newMean
Example #2
0
    def test_procrustes2(self):
        # procrustes disparity should not depend on order of matrices
        m1, m3, disp13 = procrustes(self.data1, self.data3)
        m3_2, m1_2, disp31 = procrustes(self.data3, self.data1)
        assert_almost_equal(disp13, disp31)

        # try with 3d, 8 pts per
        rand1 = np.array([[2.61955202, 0.30522265, 0.55515826],
                         [0.41124708, -0.03966978, -0.31854548],
                         [0.91910318, 1.39451809, -0.15295084],
                         [2.00452023, 0.50150048, 0.29485268],
                         [0.09453595, 0.67528885, 0.03283872],
                         [0.07015232, 2.18892599, -1.67266852],
                         [0.65029688, 1.60551637, 0.80013549],
                         [-0.6607528, 0.53644208, 0.17033891]])

        rand3 = np.array([[0.0809969, 0.09731461, -0.173442],
                         [-1.84888465, -0.92589646, -1.29335743],
                         [0.67031855, -1.35957463, 0.41938621],
                         [0.73967209, -0.20230757, 0.52418027],
                         [0.17752796, 0.09065607, 0.29827466],
                         [0.47999368, -0.88455717, -0.57547934],
                         [-0.11486344, -0.12608506, -0.3395779],
                         [-0.86106154, -0.28687488, 0.9644429]])
        res1, res3, disp13 = procrustes(rand1, rand3)
        res3_2, res1_2, disp31 = procrustes(rand3, rand1)
        assert_almost_equal(disp13, disp31)
Example #3
0
    def test_eigsh(self):
        np.random.seed(123)
        X = np.vstack([
            np.repeat([[0.2, 0.2, 0.2]], 50, axis=0),
            np.repeat([[0.5, 0.5, 0.5]], 50, axis=0),
        ])
        P = X @ X.T
        A = np.random.binomial(1, P).astype(np.float)
        A = symmetrize(A, method="triu")
        n_components = 3

        # Full SVD
        U_full, D_full, V_full = select_svd(A,
                                            n_components=n_components,
                                            algorithm="full")
        X_full = U_full @ np.diag(np.sqrt(D_full))
        _, _, norm_full = procrustes(X, X_full)

        # eigsh SVD
        U_square, D_square, V_square = select_svd(A,
                                                  n_components=n_components,
                                                  algorithm="eigsh",
                                                  n_iter=10)
        X_square = U_square @ np.diag(np.sqrt(D_square))
        _, _, norm_square = procrustes(X, X_square)

        rtol = 1e-4
        atol = 1e-4
        np.testing.assert_allclose(norm_full, norm_square, rtol, atol)
Example #4
0
def test_outputs():
    np.random.seed(123)
    X = np.vstack([
        np.repeat([[0.2, 0.2, 0.2]], 50, axis=0),
        np.repeat([[0.5, 0.5, 0.5]], 50, axis=0),
    ])
    P = X @ X.T
    A = np.random.binomial(1, P).astype(np.float)

    n_components = 3

    U_full, D_full, V_full = selectSVD(A,
                                       n_components=n_components,
                                       algorithm="full")
    X_full = U_full @ np.diag(np.sqrt(D_full))
    _, _, norm_full = procrustes(X, X_full)

    U_trunc, D_trunc, V_trunc = selectSVD(A,
                                          n_components=n_components,
                                          algorithm="truncated")
    X_trunc = U_trunc @ np.diag(np.sqrt(D_trunc))
    _, _, norm_trunc = procrustes(X, X_trunc)

    U_rand, D_rand, V_rand = selectSVD(A,
                                       n_components=n_components,
                                       algorithm="randomized",
                                       n_iter=10)
    X_rand = U_rand @ np.diag(np.sqrt(D_rand))
    _, _, norm_rand = procrustes(X, X_rand)

    rtol = 1e-4
    atol = 1e-4
    assert_allclose(norm_full, norm_trunc, rtol, atol)
    assert_allclose(norm_full, norm_rand, rtol, atol)
Example #5
0
    def test_procrustes2(self):
        # procrustes disparity should not depend on order of matrices
        m1, m3, disp13 = procrustes(self.data1, self.data3)
        m3_2, m1_2, disp31 = procrustes(self.data3, self.data1)
        assert_almost_equal(disp13, disp31)

        # try with 3d, 8 pts per
        rand1 = np.array([[2.61955202, 0.30522265, 0.55515826],
                          [0.41124708, -0.03966978, -0.31854548],
                          [0.91910318, 1.39451809, -0.15295084],
                          [2.00452023, 0.50150048, 0.29485268],
                          [0.09453595, 0.67528885, 0.03283872],
                          [0.07015232, 2.18892599, -1.67266852],
                          [0.65029688, 1.60551637, 0.80013549],
                          [-0.6607528, 0.53644208, 0.17033891]])

        rand3 = np.array([[0.0809969, 0.09731461, -0.173442],
                          [-1.84888465, -0.92589646, -1.29335743],
                          [0.67031855, -1.35957463, 0.41938621],
                          [0.73967209, -0.20230757, 0.52418027],
                          [0.17752796, 0.09065607, 0.29827466],
                          [0.47999368, -0.88455717, -0.57547934],
                          [-0.11486344, -0.12608506, -0.3395779],
                          [-0.86106154, -0.28687488, 0.9644429]])
        res1, res3, disp13 = procrustes(rand1, rand3)
        res3_2, res1_2, disp31 = procrustes(rand3, rand1)
        assert_almost_equal(disp13, disp31)
Example #6
0
    def align(self, standardfigure=None):
        '''Uses Procrustes transformation to align to standard figure
        If you have nans in your data, then it will try to use subset of keypoints that are not nans.

        Args:
            standardfigure: dataframe in Pose2D format to act as standard. default, standardfig.csv
        Returns:
            df: Pose2D dataframe with aligned coordinates.
        '''

        aligned_df = self._obj.copy()
        if standardfigure is None:
            standardfigure = pd.read_csv(os.path.join(get_resource_path(),
                                                      'standardfig.csv'),
                                         index_col=['frame'])
            xy_standard = _grab_coordinates(standardfigure)
        for rowix, row in self._obj.iterrows():
            # check if nans exist
            coords = _grab_coordinates(row.to_frame().T)
            coordbool = np.any(~np.isnan(coords), axis=1)
            if np.any(~coordbool):
                mtx1, mtx2, _ = procrustes(xy_standard[coordbool],
                                           coords[coordbool])
                coords[coordbool] = mtx2
                aligned_df.loc[rowix, [
                    col for col in self._obj.columns
                    if 'x_' in col or 'y_' in col
                ]] = coords.flatten()
            else:
                mtx1, mtx2, _ = procrustes(xy_standard, coords)
                aligned_df.loc[rowix, [
                    col for col in self._obj.columns
                    if 'x_' in col or 'y_' in col
                ]] = mtx2.flatten()
        return aligned_df
Example #7
0
def protest(a, b, n=999):
    """a: pd.DataFrame of Ordination coordinates for dataset a
     b: pd.DataFrame of Ordination coordinates for dataset b
     n: Integer, number of randomizations
     - - - - - - - - - - - - - - - - - - - - 
     returns:
     disparity, pval"""
    mtx1, mtx2, disparity = procrustes(a, b)

    #Randomization test time
    rows, cols = a.shape

    #disparities will be the list containing M^2 values for each test
    disparities = []
    for x in range(n):

        #start by randomly sampling 100% of the ordination coordinates
        a_rand = a.sample(frac=1, axis=0).reset_index(drop=True)
        b_rand = b.sample(frac=1, axis=0).reset_index(drop=True)

        #run the procrustes on the two randomly sampled coordinates
        mtx1_rand, mtx2_rand, disparity_rand = procrustes(a_rand, b_rand)

        #add the result to disparities
        disparities.append(disparity_rand)

    #set pval = proportion of randomized samples where the random disparity is smaller than
    #our observed disparity
    pval = (sum([disparity > d for d in disparities]) + 1) / (n + 1)

    return disparity, pval, disparities
Example #8
0
    def compareBeforeAndAfterDataReduction(self, current_row):
        ## get current settings
        current_u_level = self.threshold_setting_summary['u'].iloc[current_row]
        current_l_level = self.threshold_setting_summary['l'].iloc[current_row]
        current_t_level = self.threshold_setting_summary['t'].iloc[current_row]

        ## iteration
        iteratesThroughGroupSet_return_dict = self.iteratesThroughGroupSet(current_u_level = current_u_level, current_l_level = current_l_level, current_t_level = current_t_level)
        info_rich_at_current_threshold_level = iteratesThroughGroupSet_return_dict['info_rich_at_current_threshold_level']
        merged_data = iteratesThroughGroupSet_return_dict['merged_data']

        ## when there are information-rich feature(s)
        if len(info_rich_at_current_threshold_level) > 0:
            # subset data
            non_info = [value for value in merged_data.columns.values if value not in info_rich_at_current_threshold_level]
            current_non_info = merged_data[non_info]
            self.current_non_info = current_non_info # pass x on to self.x for unittest; use x in computation; TODO: update unittest code

            current_info_rich = merged_data[info_rich_at_current_threshold_level]
            self.current_info_rich = current_info_rich # pass x on to self.x for unittest; use x in computation; TODO: update unittest code

            # project samples onto PCA space
            current_data_info_projection = Projection(merged_dataframe = merged_data, normalize = self.normalize).projection_df
            current_non_info_projection = Projection(merged_dataframe = current_non_info, normalize = self.normalize).projection_df
            current_info_rich_projection = Projection(merged_dataframe = current_info_rich, normalize = self.normalize).projection_df

            # procrustes test
            if current_info_rich_projection.shape[1] > 1:
                if current_info_rich_projection.shape[1] > 2:
                     d = 3
                else:
                     d = 2

                data_projection = numpy.array(current_data_info_projection.iloc[:, 0:d])
                non_info_projection = numpy.array(current_non_info_projection.iloc[:, 0:d])
                info_projection = numpy.array(current_info_rich_projection.iloc[:, 0:d])

                current_non_info_projection_disparity = procrustes(data_projection, non_info_projection)[2]
                current_info_projection_disparity = procrustes(data_projection, info_projection)[2]

            else:
                current_non_info_projection_disparity = numpy.nan
                current_info_projection_disparity = numpy.nan

        ## when there is no information-rich feature
        else:
            current_non_info_projection_disparity = 0
            current_info_projection_disparity = numpy.nan

        # ['current_u_level', 'current_l_level', 'current_t_level', 'num_info_rich', 'current_info_projection_disparity', 'current_non_info_projection_disparity']
        compareBeforeAndAfterDataReduction_return_list = [current_u_level, current_l_level, current_t_level, info_rich_at_current_threshold_level, current_info_projection_disparity, current_non_info_projection_disparity]
        return(compareBeforeAndAfterDataReduction_return_list)
Example #9
0
    def test_summarize_pcoas(self):
        """summarize_pcoas works
        """
        master_pcoa = [['1', '2', '3'], \
            array([[-1.0, 0.0, 1.0], [2.0, 4.0, -4.0]]), \
            array([.76, .24])]
        jn1 = [['1', '2', '3'], \
            array([[1.2, 0.1, -1.2],[-2.5, -4.0, 4.5]]), \
            array([0.80, .20])]
        jn2 = [['1', '2', '3'], \
            array([[-1.4, 0.05, 1.3],[2.6, 4.1, -4.7]]), \
            array([0.76, .24])]
        jn3 = [['1', '2', '3'], \
            array([[-1.5, 0.05, 1.6],[2.4, 4.0, -4.8]]), \
            array([0.84, .16])]
        jn4 = [['1', '2', '3'], \
            array([[-1.5, 0.05, 1.6],[2.4, 4.0, -4.8]]), \
            array([0.84, .16])]
        support_pcoas = [jn1, jn2, jn3, jn4]
        #test with the ideal_fourths option
        matrix_average, matrix_low, matrix_high, eigval_average, m_names = \
            summarize_pcoas(master_pcoa, support_pcoas, 'ideal_fourths',
                            apply_procrustes=False)
        self.assertEqual(m_names, ['1', '2', '3'])
        assert_almost_equal(matrix_average[(0, 0)], -1.4)
        assert_almost_equal(matrix_average[(0, 1)], 0.0125)
        assert_almost_equal(matrix_low[(0, 0)], -1.5)
        assert_almost_equal(matrix_high[(0, 0)], -1.28333333)
        assert_almost_equal(matrix_low[(0, 1)], -0.0375)
        assert_almost_equal(matrix_high[(0, 1)], 0.05)
        assert_almost_equal(eigval_average[0], 0.81)
        assert_almost_equal(eigval_average[1], 0.19)
        #test with the IQR option
        matrix_average, matrix_low, matrix_high, eigval_average, m_names = \
            summarize_pcoas(master_pcoa, support_pcoas, method='IQR',
                            apply_procrustes=False)
        assert_almost_equal(matrix_low[(0, 0)], -1.5)
        assert_almost_equal(matrix_high[(0, 0)], -1.3)

        #test with procrustes option followed by sdev
        m, m1, msq = procrustes(master_pcoa[1], jn1[1])
        m, m2, msq = procrustes(master_pcoa[1], jn2[1])
        m, m3, msq = procrustes(master_pcoa[1], jn3[1])
        m, m4, msq = procrustes(master_pcoa[1], jn4[1])
        matrix_average, matrix_low, matrix_high, eigval_average, m_names = \
            summarize_pcoas(master_pcoa, support_pcoas, method='sdev',
                            apply_procrustes=True)

        x = array([m1[0, 0], m2[0, 0], m3[0, 0], m4[0, 0]])
        self.assertEqual(x.mean(), matrix_average[0, 0])
        self.assertEqual(-x.std(ddof=1) / 2, matrix_low[0, 0])
        self.assertEqual(x.std(ddof=1) / 2, matrix_high[0, 0])
Example #10
0
def task_2_3():

    knn1 = KNeighborsClassifier()
    knn1.fit(XTrain, XTrainL)
    knn1Predictions = knn1.predict(XTest)
    PrintAccuracy(XTestL, knn1Predictions)

    train2 = procrustes(XTrain[0], XTrain)
    test2 = procrustes(XTrain[0], XTest)

    knn2 = KNeighborsClassifier()
    knn2.fit(train2, XTrainL)
    knn2Predictions = knn2.predict(test2)
    PrintAccuracy(XTestL, knn2Predictions)
Example #11
0
def normLmarks(lmarks):
    norm_list = []
    idx = -1
    max_openness = 0.2
    mouthParams = np.zeros((1, 100))
    mouthParams[:, 1] = -0.06
    tmp = deepcopy(MSK)
    tmp[:, 48 * 2:] += np.dot(mouthParams, SK)[0, :, 48 * 2:]
    open_mouth_params = np.reshape(np.dot(S, tmp[0, :] - MSK[0, :]), (1, 100))
    if len(lmarks.shape) == 2:
        lmarks = lmarks.reshape(1, 68, 2)
    for i in range(lmarks.shape[0]):
        mtx1, mtx2, disparity = procrustes(ms_img, lmarks[i, :, :])
        mtx1 = np.reshape(mtx1, [1, 136])
        mtx2 = np.reshape(mtx2, [1, 136])
        norm_list.append(mtx2[0, :])
    pred_seq = []
    init_params = np.reshape(np.dot(S, norm_list[idx] - mtx1[0, :]), (1, 100))
    for i in range(lmarks.shape[0]):
        params = np.reshape(np.dot(S, norm_list[i] - mtx1[0, :]), (1, 100)) \
        - init_params - open_mouth_params

        predicted = np.dot(params, SK)[0, :, :] + MSK
        pred_seq.append(predicted[0, :])
    return np.array(pred_seq), np.array(norm_list), 1
def errorCalculation(robots,logFile):
    """
    Error calculation of modelling, computes different errors and writes to file

    Input arguments:
    robots = instance of the robots
    logFile = where to save the output
    """
    
    #TODO: use nrmse next time or fnorm
    for robot in robots:
        rmse = np.sqrt(np.square(robot.mappingGroundTruth - robot.expectedMeasurement).mean())
        logFile.writeError(robot.ID,rmse,robot.currentTime, 'RMSE', endTime=True)

        # nrmse = 100 * rmse/(np.max(robot.mappingGroundTruth)-np.min(robot.mappingGroundTruth))
        # logFile.writeError(robot.ID,nrmse,robot.currentTime, 'NRMSE', endTime=True)
        
        # rmse = np.sqrt(np.sum(np.square(robot.mappingGroundTruth - robot.expectedMeasurement)))
        # fnorm = rmse/(np.sqrt(np.sum(np.square(robot.mappingGroundTruth))))
        # logFile.writeError(robot.ID,fnorm,robot.currentTime, 'FNORM', endTime=True)

        similarity = ssim(robot.mappingGroundTruth,robot.expectedMeasurement, gaussian_weights=False)
        logFile.writeError(robot.ID,similarity,robot.currentTime, 'SSIM', endTime=True)

        _, _, procru = procrustes(robot.mappingGroundTruth,robot.expectedMeasurement)
        logFile.writeError(robot.ID,procru,robot.currentTime, 'Dissim')
Example #13
0
 def fit(self, corpus):
     self.vocab = corpus.vocab
     self.inv_vocab = {val: key for key, val in self.vocab.items()}
     self.embeddings = [static_w2v(M, self.rank) for M in corpus.SPPMI]
     for t in range(1, corpus.times):
         self.embeddings[t - 1], self.embeddings[t], disp = procrustes(
             self.embeddings[t - 1], self.embeddings[t])
Example #14
0
    def _calculate_error(
        self, data, data_prev=None, weights=None, subsample_genes=None
    ):
        """Calculates difference before and after diffusion

        Parameters
        ----------
        data : array-like
            current data matrix
        data_prev : array-like, optional (default: None)
            previous data matrix. If None, `data` is simply prepared for
            comparison and no error is returned
        weights : list-like, optional (default: None)
            weightings for dimensions of data. If None, dimensions are equally
            weighted
        subsample_genes : like-like, optional (default: None)
            genes to select in subsampling. If None, no subsampling is
            performed

        Returns
        -------
        error : float
            Procrustes disparity value
        data_curr : array-like
            transformed data to use for the next comparison
        """
        if subsample_genes is not None:
            data = data[:, subsample_genes]
        if weights is None:
            weights = np.ones(data.shape[1]) / data.shape[1]
        if data_prev is not None:
            _, _, error = spatial.procrustes(data_prev, data)
        else:
            error = None
        return error, data
def get_circle_positions(topic_similarity_matrix):
    #Get new circle position regarding to proposed topic similarity matrix
    new_circle_positions = dict()
    for lambda_ in range(0, 101):
        lambda_ = lambda_ / 100
        matrix_cosine_distance = 1 - topic_similarity_matrix[lambda_]
        np.fill_diagonal(matrix_cosine_distance, 0)
        new_circle_positions[lambda_] = _pcoa(matrix_cosine_distance,
                                              n_components=2).tolist()
    #Apply procrusteres
    lambdas = list(new_circle_positions.keys())
    standardized_matrix = dict()
    disparity_values = dict()
    original_a = new_circle_positions[0.0]
    for i in range(len(lambdas) - 1):
        #print(lambdas[i], lambdas[i+1])
        original_b = new_circle_positions[lambdas[i + 1]]
        mtx1, mtx2, disparity = procrustes(original_a, original_b)
        disparity_values[lambdas[i]] = disparity
        standardized_matrix[lambdas[i]] = mtx1.tolist()
        original_a = mtx2
    standardized_matrix[lambdas[len(lambdas) - 1]] = mtx2.tolist()
    disparity_values[lambdas[len(lambdas) - 1]] = disparity

    new_circle_positions = standardized_matrix
    new_circle_positions = json.dumps(new_circle_positions)
    return new_circle_positions
Example #16
0
def procrustes_analysis(data):
    for d in data:
        mtx1, mtx2, disparity = procrustes(data[0], d)
        # disparity is the sum of the square errors
        # mtx2 is the optimal matrix transformation
        disp_vals.append(disparity.round(3))
        pd_maps.append(mtx2)
Example #17
0
def GPA(specTooth, disp):

    #specTooth is a matrix containing the shapes of the 14 teeth in a specific position, 40x2x1x14 matrix
    #keeping the third dimension, more practical to handle

    #choose initial reference shape randomly
    initShapePos = random.randint(0, 13)

    #initialize point matrices
    ComparedShape = specTooth[:, :, initShapePos]
    sumShape = np.zeros((40, 2), dtype=np.double)
    normalizedShapes = np.zeros((40, 2, 14), dtype=np.double)

    #initialize variables for checking the disparity reduction
    prev_meanDisp = 0
    dispReduct = 1
    cyc = 1

    #cycles through until the difference beetween the mean disparity of the cicle i and the mean disparity of the cycle i-1
    #gets smaller than the selected treshold
    thresh = 1e-06

    while abs(dispReduct) > thresh:

        sumShape = np.zeros((40, 2), dtype=np.double)
        sumDisp = 0

        for inst in range(13):
            [stdMat1, orientation,
             disparity] = spsptl.procrustes(ComparedShape, specTooth[:, :,
                                                                     inst])
            #normalizedShapes[:,:,inst] = stdMat1
            sumShape = sumShape + orientation
            sumDisp = sumDisp + disparity
            if disp:
                plt.axis([-0.3, 0.3, -0.3, 0.3])
                plt.plot(orientation[:, 0], orientation[:, 1], 'ro', hold=True)
                plt.pause(0.05)
                plt.show()

        meanShape = sumShape / 14
        meanDisp = sumDisp / 14
        dispReduct = meanDisp - prev_meanDisp

        print 'disparity reduction, cycle ', cyc, ': ', dispReduct
        print 'mean disparity, cycle ', cyc, ': ', meanDisp

        if disp:
            plt.plot(stdMat1[:, 0], stdMat1[:, 1], 'bo', hold=True)
            plt.plot(meanShape[:, 0], meanShape[:, 1], 'yo', hold=True)
            plt.pause(0.05)
            plt.show()
            plt.figure()
            plt.axis([-0.3, 0.3, -0.3, 0.3])

        ComparedShape = meanShape
        prev_meanDisp = meanDisp
        cyc += 1

    return [meanShape, meanDisp]
Example #18
0
    def procrustes_word2vec(self, row):
        try:
            if row['id'] % 10000 == 0:
                elapsed = time.time() - start_time
                print("Processed {:10.0f} questions in {:10.0f} s ".format(
                    row['id'], elapsed))
        except KeyError:
            if row['test_id'] % 10000 == 0:
                elapsed = time.time() - start_time
                print("Processed {:10.0f} questions in {:10.0f} s ".format(
                    row['test_id'], elapsed))

        stops = set(stopwords.words("english"))
        q1 = self.getWordVecs(row['question1'])
        q2 = self.getWordVecs(row['question2'])

        q1 = list(set(word for word in q1 if word not in stops))
        q2 = list(set(word for word in q2 if word not in stops))

        trim_length = min(4, min(len(q1), len(q2)))
        q1 = q1[:trim_length]
        q2 = q2[:trim_length]

        if len(q1) == 0 or len(q2) == 0 or len(q1) == 1 or len(q2) == 1:
            return 0

        q1_vecs = tuple(self.wordvecs[word].reshape((1, 300)) for word in q1)
        q2_vecs = tuple(self.wordvecs[word].reshape((1, 300)) for word in q2)

        q1_vecs = np.concatenate(q1_vecs, axis=0)
        q2_vecs = np.concatenate(q2_vecs, axis=0)

        score = procrustes(q1_vecs, q2_vecs)[2]
        return score
Example #19
0
def summarize_pcoas(master_pcoa, support_pcoas, method='IQR', apply_procrustes=True):
    """returns the average PCoA vector values for the support pcoas

    Also returns the ranges as calculated with the specified method.
    The choices are:
        IQR: the Interquartile Range
        ideal fourths: Ideal fourths method as implemented in scipy
    """
    if apply_procrustes:
        # perform procrustes before averaging
        support_pcoas = [list(sp) for sp in support_pcoas]
        master_pcoa = list(master_pcoa)
        for i, pcoa in enumerate(support_pcoas):
            master_std, pcoa_std, m_squared = procrustes(master_pcoa[1],pcoa[1])
            support_pcoas[i][1] = pcoa_std
        master_pcoa[1] = master_std

    m_matrix = master_pcoa[1]
    m_eigvals = master_pcoa[2]
    m_names = master_pcoa[0]
    jn_flipped_matrices = []
    all_eigvals = []
    for rep in support_pcoas:
        matrix = rep[1]
        eigvals = rep[2]
        all_eigvals.append(eigvals)
        jn_flipped_matrices.append(_flip_vectors(matrix, m_matrix))
    matrix_average, matrix_low, matrix_high = _compute_jn_pcoa_avg_ranges(\
            jn_flipped_matrices, method)
    #compute average eigvals
    all_eigvals_stack = vstack(all_eigvals)
    eigval_sum = numpy_sum(all_eigvals_stack, axis=0)
    eigval_average = eigval_sum / float(len(all_eigvals))
    return matrix_average, matrix_low, matrix_high, eigval_average, m_names
def get_circle_positions_from_old_matrix(old_circle_positions,
                                         topic_similarity_matrix):
    data_keys = list(old_circle_positions.keys())

    #get new positions from new matrix
    new_circle_positions = dict()
    for lambda_ in range(0, 101):
        lambda_ = lambda_ / 100
        matrix_cosine_distance = 1 - topic_similarity_matrix[lambda_]
        np.fill_diagonal(matrix_cosine_distance, 0)
        new_circle_positions[str(lambda_)] = _pcoa(matrix_cosine_distance,
                                                   n_components=2).tolist()

    #Apply procrusteres. Compare old positions with new positions
    standardized_matrix = dict()
    disparity_values = dict()

    for i in range(len(data_keys)):

        current_omega = data_keys[i]
        original_a = np.array(old_circle_positions[current_omega])
        original_b = np.array(new_circle_positions[current_omega])
        mtx1, mtx2, disparity = procrustes(original_a, original_b)
        disparity_values[current_omega] = disparity
        standardized_matrix[current_omega] = mtx2.tolist()

    new_circle_positions = standardized_matrix
    new_circle_positions = json.dumps(new_circle_positions)
    return new_circle_positions
Example #21
0
def procrustes_plot(a, b, a_name, b_name):
    """a: ordination coordinates for dataset a
     b: ordination coordinates for dataset b
     a_name: name for dataset a
     b_name: name for dataset b
     - - - - - - - - - - - - - - - - - - - - 
     returns:
     ax: axes of plot
     disp: disparity score of procrustes
    """

    # do procrustes
    mtx1, mtx2, disparity = procrustes(a, b)

    # Make our plotting df
    proplot = pd.concat([pd.DataFrame(mtx1), pd.DataFrame(mtx2)])
    proplot.columns = ["PCo1", "PCo2", "PCo3"]
    proplot["Dataset"] = [a_name] * a.shape[0] + [b_name] * b.shape[0]

    # plot
    ax = sns.scatterplot(x="PCo1",
                         y="PCo2",
                         style="Dataset",
                         hue="Dataset",
                         data=proplot,
                         markers=["v", "o"],
                         s=150)
    # Add lines
    for i in range(len(mtx1)):
        plt.plot([mtx1[i, 0], mtx2[i, 0]], [mtx1[i, 1], mtx2[i, 1]],
                 c="black",
                 linewidth=0.75)

    return ax, disparity
Example #22
0
def Proc_PDB(inputPtr, comparePtr):
    xyzIn = Read_PDB(inputPtr)
    xyzComp = Read_PDB(comparePtr)
    standXYZComp, newXYZ, dis = procrustes(xyzComp, xyzIn)
    print(xyzIn)
    print(xyzComp)
    print(newXYZ)
    return standXYZComp, newXYZ
Example #23
0
File: rsa.py Project: qihongl/qmvpa
def inter_procrustes(matrix_array):
    # input: matrix_array, n_subj x n_units x n_examples
    n_nets = np.shape(matrix_array)[0]
    D = np.zeros((n_nets, n_nets))
    for i in range(n_nets):
        for j in np.arange(0, i):
            _, _, D[i, j] = procrustes(matrix_array[i], matrix_array[j])
    return D
Example #24
0
    def test_procrustes(self):
        # tests procrustes' ability to match two matrices.
        #
        # the second matrix is a rotated, shifted, scaled, and mirrored version
        # of the first, in two dimensions only
        #
        # can shift, mirror, and scale an 'L'?
        a, b, disparity = procrustes(self.data1, self.data2)
        assert_allclose(b, a)
        assert_almost_equal(disparity, 0.)

        # if first mtx is standardized, leaves first mtx unchanged?
        m4, m5, disp45 = procrustes(self.data4, self.data5)
        assert_equal(m4, self.data4)

        # at worst, data3 is an 'L' with one point off by .5
        m1, m3, disp13 = procrustes(self.data1, self.data3)
Example #25
0
    def test_procrustes(self):
        # tests procrustes' ability to match two matrices.
        #
        # the second matrix is a rotated, shifted, scaled, and mirrored version
        # of the first, in two dimensions only
        #
        # can shift, mirror, and scale an 'L'?
        a, b, disparity = procrustes(self.data1, self.data2)
        assert_allclose(b, a)
        assert_almost_equal(disparity, 0.)

        # if first mtx is standardized, leaves first mtx unchanged?
        m4, m5, disp45 = procrustes(self.data4, self.data5)
        assert_equal(m4, self.data4)

        # at worst, data3 is an 'L' with one point off by .5
        m1, m3, disp13 = procrustes(self.data1, self.data3)
    def rescaledProcrustes(self, mtx1, mtx2):
        #Move all data to origin
        newMtx1, newMtx2, disparity1 = procrustes(mtx1, mtx2)

        #Rescale by multiplying the normals
        newMtx1 *= np.linalg.norm(mtx1)
        newMtx2 *= np.linalg.norm(mtx2)

        return newMtx1, newMtx2
Example #27
0
def procrustes_analysis(
    reference: OrdinationResults,
    other: OrdinationResults,
    dimensions: int = 5,
    permutations: int = 999
) -> (OrdinationResults, OrdinationResults, pd.DataFrame):

    if reference.samples.shape != other.samples.shape:
        raise ValueError('The matrices cannot be fitted unless they have the '
                         'same dimensions')

    if reference.samples.shape[1] < dimensions:
        raise ValueError('Cannot fit fewer dimensions than available')

    # fail if there are any elements in the symmetric difference
    diff = reference.samples.index.symmetric_difference(other.samples.index)
    if not diff.empty:
        raise ValueError('The ordinations represent two different sets of '
                         'samples')

    # make the matrices be comparable
    other.samples = other.samples.reindex(index=reference.samples.index)
    mtx1, mtx2, m2 = procrustes(reference.samples.values[:, :dimensions],
                                other.samples.values[:, :dimensions])

    axes = reference.samples.columns[:dimensions]
    samples1 = pd.DataFrame(data=mtx1,
                            index=reference.samples.index.copy(),
                            columns=axes.copy())
    samples2 = pd.DataFrame(data=mtx2,
                            index=reference.samples.index.copy(),
                            columns=axes.copy())

    info = _procrustes_monte_carlo(reference.samples.values[:, :dimensions],
                                   other.samples.values[:, :dimensions], m2,
                                   permutations)

    out1 = OrdinationResults(short_method_name=reference.short_method_name,
                             long_method_name=reference.long_method_name,
                             eigvals=reference.eigvals[:dimensions].copy(),
                             samples=samples1,
                             features=reference.features,
                             biplot_scores=reference.biplot_scores,
                             sample_constraints=reference.sample_constraints,
                             proportion_explained=reference.
                             proportion_explained[:dimensions].copy())
    out2 = OrdinationResults(
        short_method_name=other.short_method_name,
        long_method_name=other.long_method_name,
        eigvals=other.eigvals[:dimensions].copy(),
        samples=samples2,
        features=other.features,
        biplot_scores=other.biplot_scores,
        sample_constraints=other.sample_constraints,
        proportion_explained=other.proportion_explained[:dimensions].copy())
    return out1, out2, info
Example #28
0
            def proc(to_fit):

                # Reshape to_fit
                to_fit = to_fit.reshape(self.template_dims)

                # Call procrustes
                _, pr_fit, _ = procrustes(self.average_shape, to_fit)

                # Returned flattened version
                return pr_fit.flatten()
Example #29
0
def procrustes_analysis(landmarks):

    mean = np.mean(landmarks, 0)
    landmarks_std = np.empty_like(landmarks)

    for i, landmark in enumerate(landmarks):

        mean_std, landmark_std, disp = procrustes(mean, landmark)
        landmarks_std[i] = landmark_std

    return landmarks_std
Example #30
0
def extract_procrustes( ref_coord, result_MDS, ismean = True):
        if ismean:
                disp_layer, score_procrustes, tform_procrustes = algos.procrustes( ref_coord, result_MDS[:,:3] )
                return disp_layer, tform_procrustes
        else:
                DISPLAY, TFORM = list(),list()
                for i in range(len(result_MDS)):
                        disp_layer, score_procrustes, tform_procrustes = algos.procrustes( ref_coord, result_MDS[i][:,:3] )
                        _, m2, disp_layer = procrustes(ref_coord, result_MDS[i][:,:3])
                        DISPLAY.append(100*(1 - disp_layer)), TFORM.append(tform_procrustes)
                return DISPLAY, TFORM
def _calculate_error(data, data_prev=None, weights=None, subsample_genes=None):

    if subsample_genes is not None:
        data = data[:, subsample_genes]
    if weights is None:
        weights = np.ones(data.shape[1]) / data.shape[1]
    if data_prev is not None:
        _, _, error = spatial.procrustes(data_prev, data)
    else:
        error = 99999
    return error, data
Example #32
0
def generalized_procrustes_analysis(G_cen):
    # Initialize data arrays based on input training set
    G_cen_aligned = np.zeros(np.shape(G_cen))
    shapes = np.zeros((np.shape(G_cen)[0], int(np.shape(G_cen)[1] / 3), 3))
    shapes_aligned = np.zeros((np.shape(G_cen)[0], int(np.shape(G_cen)[1] / 3), 3))

    # Convert training set for procrustest analysis
    for i in range(0, np.shape(G_cen)[0]):
        shapes[i, :, :] = np.reshape(G_cen[i, :], (-1, 3))

    # initialize Procrustest distance
    current_distance = 0

    # Initialize a mean shape (first element), with zero mean and Frobenius norm
    mean_shape = shapes[0, :, :]
    mean_shape -= np.mean(mean_shape, 0)
    norm1 = np.linalg.norm(mean_shape)
    mean_shape /= norm1
    num_shapes = len(shapes)

    while True:

        shapes_aligned[0, :, :] = mean_shape

        for sh in range(1, num_shapes):
            shapes_aligned[0, :, :], shapes_aligned[sh, :, :], _ = procrustes(mean_shape, shapes[sh, :, :])

        new_mean = np.mean(shapes_aligned, axis=0)

        new_distance = procrustes_distance(new_mean, mean_shape)

        if new_distance == current_distance:
            break

        _, new_mean, _ = procrustes(mean_shape, new_mean)
        mean_shape = new_mean
        current_distance = new_distance

    for i in range(0, np.shape(G_cen)[0]):
        G_cen_aligned[i, :] = shapes_aligned[i, :, :].flatten()
    return shapes_aligned, G_cen_aligned
Example #33
0
def procrustes_analysis(reference: OrdinationResults, other: OrdinationResults,
                        dimensions: int=5) -> (OrdinationResults,
                                               OrdinationResults):

    if reference.samples.shape != other.samples.shape:
        raise ValueError('The matrices cannot be fitted unless they have the '
                         'same dimensions')

    if reference.samples.shape[1] < dimensions:
        raise ValueError('Cannot fit fewer dimensions than available')

    # fail if there are any elements in the symmetric difference
    if not (reference.samples.index ^ other.samples.index).empty:
        raise ValueError('The ordinations represent two different sets of '
                         'samples')

    # make the matrices be comparable
    other.samples = other.samples.reindex(index=reference.samples.index)

    mtx1, mtx2, _ = procrustes(reference.samples.values[:, :dimensions],
                               other.samples.values[:, :dimensions])

    axes = reference.samples.columns[:dimensions]
    samples1 = pd.DataFrame(data=mtx1,
                            index=reference.samples.index.copy(),
                            columns=axes.copy())
    samples2 = pd.DataFrame(data=mtx2,
                            index=reference.samples.index.copy(),
                            columns=axes.copy())

    out1 = OrdinationResults(
            short_method_name=reference.short_method_name,
            long_method_name=reference.long_method_name,
            eigvals=reference.eigvals[:dimensions].copy(),
            samples=samples1,
            features=reference.features,
            biplot_scores=reference.biplot_scores,
            sample_constraints=reference.sample_constraints,
            proportion_explained=reference.proportion_explained[:dimensions]
            .copy())
    out2 = OrdinationResults(
            short_method_name=other.short_method_name,
            long_method_name=other.long_method_name,
            eigvals=other.eigvals[:dimensions].copy(),
            samples=samples2,
            features=other.features,
            biplot_scores=other.biplot_scores,
            sample_constraints=other.sample_constraints,
            proportion_explained=other.proportion_explained[:dimensions]
            .copy())
    return out1, out2