Ejemplo n.º 1
0
    def project_param_to_shape(self, params_vec):
        """
        projects PCA params back to restore the shape and set it to given ShapeVector

        :param params_vec: vector of ASM parameters
        :type params_vec: numpy.ndarray
        :return: vector of points (2Nx1 array)
        :rtype numpy.ndarray
        """
        return cv.PCABackProject(params_vec, self.pca_shape, self.eigenvectors)
Ejemplo n.º 2
0
def demo_reconstruction(mean, eigenvectors, test_images, test_vecs):

    instructions = [
        'Reconstruction from PCA',
        '',
        'Hit [ or ] for prev/next image',
        'Hit R for random image',
        'Hit ESC when done',
        '',
        'Hit any key to begin',
    ]

    show_text_screen(instructions)

    test_recons = cv2.PCABackProject(test_vecs, mean, eigenvectors)

    idx = 0
    n = len(test_images)

    while True:

        img_orig = test_images[idx]
        img_recons = test_recons[idx]

        orig = row2img(img_orig, TARGET_DISPLAY_SIZE, f'Test image {idx:04d}')

        recons = row2img(img_recons, TARGET_DISPLAY_SIZE,
                         f'Reconstructed from {len(eigenvectors)} PCs')

        err = row2img(img_orig - img_recons,
                      TARGET_DISPLAY_SIZE,
                      'Error image',
                      img_type='error')

        display = np.hstack((orig, recons, err))

        cv2.imshow(WINDOW_NAME, display)

        k = cv2.waitKey(5)

        if k == ord(' ') or k == ord(']'):
            idx = (idx + 1) % n
        elif k == ord('['):
            idx = (idx + n - 1) % n
        elif k == ord('r') or k == ord('R'):
            idx = np.random.randint(n)
        elif k == 27:
            return
def pcaDistanceSegmentation(p: QPoint, model: HyperspectralImgModel, threshold,
                            maxComponents) -> QImage:
    #zmiana rozmiaru macierzy na (dlugosc obrazu * szerokosc) x il. bandów
    imgarr = model.imgArr.reshape(
        (model.imgArr.shape[0] * model.imgArr.shape[1], model.imgArr.shape[2]))
    #PCA
    mean, eigenv = np.array(cv2.PCACompute(imgarr, mean=None))
    #wybor n glownych skladowych i projekcja wsteczna
    reconstructed = cv2.PCABackProject(imgarr, mean[:, 0:maxComponents],
                                       eigenv[:, 0:maxComponents])
    #zmiana rozmiaru macierzy na pierwotny
    reconstructed = reconstructed.reshape(model.img.shape[0],
                                          model.img.shape[1], maxComponents)
    #metoda odleglosci dla nowej macierzy
    return distanceSegmentation(
        p, HyperspectralImgModel(model.img, model.sceneImg, reconstructed),
        threshold)
Ejemplo n.º 4
0
def get_curve(data, E=0.001, s=500):

    mean, px = cv2.PCACompute(data=data, mean=np.array([data.mean(axis=0)]))
    new_data = cv2.PCAProject(data, mean, px)
    new_data = np.array(sorted(new_data, key=lambda x: x[0]))

    def dist(t1, t2, sigma):
        return exp(-(t2[0] - t1[0])**2 / (2 * sigma))

    left_index = 0
    right_index = 0
    index = 0
    all_px = new_data.shape[0]
    curve = []

    while index < all_px:
        is_ = False
        while (right_index < all_px) and (dist(new_data[index],
                                               new_data[right_index], s) > E):
            right_index += 1
        while (left_index < index) and (dist(new_data[index],
                                             new_data[left_index], s) < E):
            left_index += 1

        up = np.sum(np.array([
            x * dist(x, new_data[index], s)
            for x in new_data[left_index:right_index]
        ]),
                    axis=0)
        down = np.sum(np.array([
            dist(x, new_data[index], s)
            for x in new_data[left_index:right_index]
        ]),
                      axis=0)
        res = up / down
        curve.append(res)
        index += 1

    return cv2.PCABackProject(np.array(curve), mean, px)
Ejemplo n.º 5
0
def get_curve(data, E=0.001, s=200):

    mean, px = cv2.PCACompute(data=data, mean=np.array([data.mean(axis=0)]))
    new_data = cv2.PCAProject(data, mean, px)
    new_data = np.array(sorted(new_data, key=lambda x: x[0]))

    def dist(t1, t2, sigma):
        return exp(-(t2 - t1)**2 / (2 * sigma))

    left_index = 0
    right_index = 0
    all_px = new_data.shape[0]
    curve = []
    is_ = 0
    min_, max_ = int(min(new_data[:, 0])) - 1, int(max(new_data[:, 0])) + 1

    for index in range(min_, max_, 10):
        is_ += 1
        while (right_index < all_px) and (dist(index, new_data[right_index][0],
                                               s) > E):
            right_index += 1
        while (left_index < is_) and (dist(index, new_data[left_index][0], s) <
                                      E):
            left_index += 1

        up = np.sum(np.array([
            x * dist(x[0], index, s) for x in new_data[left_index:right_index]
        ]),
                    axis=0)
        down = np.sum(np.array(
            [dist(x[0], index, s) for x in new_data[left_index:right_index]]),
                      axis=0)
        res = up / down
        curve.append(res)
        index += 1

    return cv2.PCABackProject(np.array(curve), mean, px)
Ejemplo n.º 6
0
def back_projecao_PCA(projecao, media, auto_vetores):
    back_projecao = cv.PCABackProject(projecao, media, auto_vetores)
    return back_projecao
Ejemplo n.º 7
0
    def find_params_for_shape(self, vec, vec_old, fit_result_old, level):
        """
        Finds b parameters of the model for given shape

        :param vec: fitted shape
        :type vec: ShapeVector
        :param vec_old: prior shape
        :type vec_old: ShapeVector
        :param fit_result_old: result object of the fitting
        :type fit_result_old: ASMFitResult
        :param level: level of the pyramid
        :type level: int
        :return: fitting result with found parameters set
        :rtype: ASMFitResult
        """
        c = np.array([0.0005, 0.0005, 0.0005])
        vec_t = ShapeVector()
        vec_t.set_from_vector(vec_old.vector)
        vec_t.subtract_vector(vec)
        rho2 = c[level] * vec_t.vector.dot(vec_t.vector)
        x = ShapeVector()
        x_from_params = ShapeVector()
        vec_repr = ShapeVector()

        cur_trans = fit_result_old.similarity_trans
        cur_params = np.zeros([1, self.eigenvalues_pyr[level].shape[0]])
        for i in range(self.eigenvalues_pyr[level].shape[0]):
            if i < fit_result_old.params.shape[1]:
                cur_params[0, i] = fit_result_old.params[0, i]
            else:
                cur_params[0, i] = 0

        ii = 0
        while True:
            s = cur_trans.get_scale()
            last_params = cur_params.copy()

            vec_r = cur_trans.inverted_transform(vec)
            p = self.sigma2_pyr[level] / (self.sigma2_pyr[level] + rho2 /
                                          (s * s))
            delta2 = 1 / (1 / self.sigma2_pyr[level] + (s * s) / rho2)
            x_from_params.set_from_vector(
                cv.PCABackProject(cur_params, self.pca_shape_pyr[level],
                                  self.eigenvectors_pyr[level])[0])
            tmp = vec_r.vector.reshape([1, -1])
            tmp_full_params = cv.PCAProject(
                tmp, self.pca_full_shape['mean'],
                self.pca_full_shape['eigenvectors'],
                self.pca_full_shape['eigenvalues'])
            vec_repr.set_from_vector(
                cv.PCABackProject(tmp_full_params, self.pca_full_shape['mean'],
                                  self.pca_full_shape['eigenvectors'],
                                  self.pca_full_shape['eigenvalues'])[0])
            x.set_from_vector(p * vec_repr.vector +
                              (1 - p) * x_from_params.vector)
            x2 = x.vector.dot(x.vector) + (x.vector.shape[0] - 4) * delta2

            tmp = x.vector.reshape([1, -1])
            cur_params = cv.PCAProject(tmp, self.pca_shape_pyr[level],
                                       self.eigenvectors_pyr[level],
                                       self.eigenvalues_pyr[level])
            for i in range(self.eigenvalues_pyr[level].shape[0]):
                cur_params[0, i] *= (
                    self.eigenvalues[i, 0] / self.eigenvalues[i, 0] +
                    self.sigma2_pyr[level])

            n_p = x.n_points
            cur_trans.a = vec.vector.dot(x.vector) / x2
            cur_trans.b = 0
            for i in range(n_p):
                cur_trans.b += x.get_point(i)[0] * vec.get_point(
                    i)[1] - x.get_point(i)[1] * vec.get_point(i)[0]
            cur_trans.b /= x2
            cur_trans.x_t = vec.get_x_mean()
            cur_trans.y_t = vec.get_y_mean()

            ii += 1
            if ii == 20 or np.linalg.norm(last_params - cur_params) <= 1e-4:
                break

        fit_result = ASMFitResult(cur_params, cur_trans, self)
        return fit_result
Ejemplo n.º 8
0
    def fit(self, img, verbose):
        """
        fits the model to given image

        :param img: target image
        :type img: numpy.ndarray
        :param verbose: if the progress should be displayed
        :type verbose: bool
        :return: result of fitting
        :rtype: ASMFitResult
        """
        # make sure image is in greyscale
        grey_img = img.copy()
        if len(grey_img.shape) == 3:
            grey_img = cv.cvtColor(grey_img, cv.COLOR_BGR2GRAY)

        # scale down the image
        ratio = math.sqrt(40000 / (grey_img.shape[0] * grey_img.shape[1]))
        new_w = int(grey_img.shape[1] * ratio)
        new_h = int(grey_img.shape[0] * ratio)
        resized_img = cv.resize(grey_img, (new_w, new_h))

        # create temporary search model image
        cur_search = ModelImage()
        cur_search.shape_info = self.shape_info
        cur_search.image = resized_img

        # create fit result object for search
        params = np.zeros([1, self.eigenvalues.shape[0]])
        sv = cur_search.shape_vector
        sv.set_from_vector(self.project_param_to_shape(params)[0])
        st = sv.get_shape_transform_fitting_size(resized_img.shape)
        fit_result = ASMFitResult(params, st, self)
        cur_search.build_from_shape_vector(st)

        total_offset: int  # sum of offsets of current iteration
        shape_old = ShapeVector()
        self.feature_extractor.load_image(resized_img)

        if verbose:
            cur_search.show(True, "resized image")

        for level in range(self.pyramid_level - 1, -1, -1):
            if verbose:
                print(f"Pyramid level: {level}\n")
            for run in range(10):
                shape_old.set_from_points_array(
                    cur_search.points)  # store old shape
                total_offset = 0
                best_ep = np.zeros((self.n_landmarks, 2), np.int)

                # find best fit for every point
                for i in range(self.n_landmarks):
                    if verbose:
                        print(f"Fitting point {i}\n")

                    cur_best = -1
                    best_i = 0
                    candidate_points, features = \
                        self.feature_extractor.get_candidates_with_feature(cur_search.points, i, level)

                    # select best candidate point with Mahalanobis distance
                    for j in range(len(candidate_points)):
                        x = np.zeros(len(features[j]))
                        for f, feature in enumerate(features[j]):
                            x[f] = feature

                        mean = self.mean_vec_pyr[level][i]
                        inv_cov = self.cov_mat_pyr_inv[level][i]
                        # ct = distance.mahalanobis(x, mean, inv_cov)
                        ct = cv.Mahalanobis(x, mean, inv_cov)
                        # if verbose:
                        #     print(f"Mahalanobis distance: {ct}")

                        if ct < cur_best or cur_best < 0:
                            cur_best = ct
                            best_i = j
                            best_ep[i, 0] = candidate_points[j][0]
                            best_ep[i, 1] = candidate_points[j][1]

                    total_offset += abs(best_i)
                    # if :
                    #     cur_search.show(True)

                # modify results with factor of current pyramid level
                for i in range(self.n_landmarks):
                    cur_search.points[i] = best_ep[i]
                    x = int(cur_search.points[i, 0])
                    cur_search.points[i, 0] = x << level
                    y = int(cur_search.points[i, 1])
                    cur_search.points[i, 1] = y << level
                    if level > 0:
                        cur_search.points[i, 0] += (1 << (level - 1))
                        cur_search.points[i, 1] += (1 << (level - 1))

                cur_search.shape_vector.set_from_points_array(
                    cur_search.points)

                # if verbose:
                #     cur_search.show(True)

                # project found shape to PCA model and back to get parameters
                fit_result = self.find_params_for_shape(
                    cur_search.shape_vector, shape_old, fit_result, level)

                # apply limits to found params
                for i, param in enumerate(fit_result.params[0]):
                    if param < self.params_limits[i, 0]:
                        fit_result.params[0, i] = self.params_limits[i, 0]
                    if param > self.params_limits[i, 1]:
                        fit_result.params[0, i] = self.params_limits[i, 1]

                cur_search.shape_vector.set_from_vector(
                    cv.PCABackProject(fit_result.params,
                                      self.pca_shape_pyr[level],
                                      self.eigenvectors_pyr[level])[0])
                cur_search.build_from_shape_vector(fit_result.similarity_trans)

                avg_mov = total_offset / self.n_landmarks
                if verbose:
                    print(f"Iteration: {run + 1}, Average offset: {avg_mov}\n")
                    cur_search.show(True, f"iteration {run + 1}")

                if avg_mov < 1.3:
                    break

            if verbose:
                print(
                    f"Finished fitting after {run + 1} iterations, Average offset of last iteration: {avg_mov}\n"
                )
                cur_search.show(True, f"level {level} final fit")

        st_ = SimilarityTransformation()
        st_.a = 1 / ratio
        fit_result.similarity_trans = st_.multiply(fit_result.similarity_trans)

        return fit_result
Ejemplo n.º 9
0
# 0425.py
import cv2
import numpy as np

X = np.array([[0, 0, 0, 100, 100, 150, -100, -150],
              [0, 50, -50, 0, 30, 100, -20, -100]],
             dtype=np.float64)
X = X.transpose()  # X = X.T

##mean = cv2.reduce(X, 0, cv2.REDUCE_AVG)
##print('mean = ', mean)

mean, eVects = cv2.PCACompute(X, mean=None)
print('mean = ', mean)
print('eVects = ', eVects)

Y = cv2.PCAProject(X, mean, eVects)
print('Y = ', Y)

X2 = cv2.PCABackProject(Y, mean, eVects)
print('X2 = ', X2)
print(np.allclose(X, X2))
cv2.waitKey()
cv2.destroyAllWindows()
Ejemplo n.º 10
0
def pca_reconstruct(terms, mean, eigenvecs):
    return cv2.PCABackProject(terms, mean, eigenvecs.T)