示例#1
0
def active_shape(img_edge, init_shape, pca, img, centroid, length=10):
    'edge _img ,  pca_tooth : from prepratopn function '
    'tooth_point  ininitial position'

    new_points, error = fi.fit_measure(init_shape.matrix.T, length, img_edge)
    new_point11 = Shape(new_points)

    b, pose_param = fi.match_model_points(new_points, pca)

    x = fi.generate_model_point(b, pca)
    x = Shape(x)
    x = aligner.transform(x, pose_param)

    meanShapeCentroid = (np.sum(x.x) / 30, np.sum(x.y) / 30)
    # centroid= tran.set_clicked_center(np.uint8(testimage))
    res = tran.initalizeShape(centroid, meanShapeCentroid, x.matrix.T)
    res = Shape(res.T)

    plt.imshow(img)
    plt.plot(res.x, res.y, 'r.')
    plt.show()

    y = aligner.invert_transform(x, pose_param)
    y = aligner.transform(y, pose_param)

    #   meanShapeCentroid = (np.sum(y.x)/30,np.sum(y.y)/30)
    # # centroid= tran.set_clicked_center(np.uint8(testimage))
    #   res1 = tran.initalizeShape(centroid, meanShapeCentroid, y.matrix.T)
    #   y= Shape(res1.T)

    plt.imshow(img)
    plt.plot(y.x, y.y, 'r.')
    plt.show()

    return x, res.matrix, new_point11
示例#2
0
def match_model_points(Y, pca):
    b = np.zeros(len(pca.eigenvalues))

    max_conv_iter = 20
    best_b = b
    best_pose_param = (0, 0, 0, 0)
    best_MSE = np.inf
    convergence_iter = max_conv_iter

    while (1):

        x = generate_model_point(b, pca)
        x = Shape(x)

        plt.plot(x.x, x.y, '.')
        plt.show()

        pose_param = getparameter(Y, x.matrix.T)

        global pose_param2
        pose_param2 = pose_param

        pose_param = pose_param2['rotation'][0, 0], pose_param2['rotation'][
            0, 1], pose_param2['translation'][0], pose_param2['translation'][1]

        Y_pred = inv_transform(x.matrix.T, pose_param)
        #print(Y_pred.shape)
        # plt.plot(Y_)

        #MSE = sk.mean_squared_error(Y.matrix.T, Y_pred)
        MSE = sk.mean_squared_error(Y, Y_pred)

        if (MSE < best_MSE):
            best_b = b
            best_pose_param = pose_param
            best_MSE = MSE
            convergence_iter = max_conv_iter

        convergence_iter -= 1
        if (convergence_iter == 0 or best_MSE < 1):
            # print(convergence_iter, best_MSE)
            break

        #y = transform(Y.matrix.T,pose_param)
        y = transform(Y, pose_param)
        y = project_to_tangent_plane(y, pca)

        # x,y = np.split(np.hstack(y), 2)
        # y = np.vstack((x, y))

        # print (y.shape)
        # y = np.vstack((y[0:29], y[30:59])).transpose()
        y = Shape(y)
        b = update_model_param(y.vector, pca)
        #  print(b)

        b = constraint_model_param(b, pca)

    return best_b, best_pose_param
 def scale_and_rotate(self, subject, s, theta, inverse=False):
     '''Rotate over theta and scale by s'''
     rotation_matrix = np.array(
         [[s * math.cos(theta), -1 * s * math.sin(theta)],
          [s * math.sin(theta), s * math.cos(theta)]])
     if inverse:
         return Shape(np.dot(rotation_matrix.T, subject.matrix))
     else:
         return Shape(np.dot(rotation_matrix, subject.matrix))
    def normalize(self, shape):
        '''
        Perform isomorphic scaling in order to normalize shapes
        See Amy Ross on GPA p.5

        in: target Shape
        out: scaled Shape object
        '''
        return Shape([shape.vector / np.linalg.norm(shape.vector)])
示例#5
0
    def __init__(self, eigenvalues, eigenvectors, mean):
        self.dimension = eigenvalues.size
        self.eigenvalues = eigenvalues
        self.eigenvectors = eigenvectors
        self.mean = Shape(mean)

        # create a set of scaled eigenvectors
        self.scaled_eigenvectors = np.dot(self.eigenvectors,
                                          np.diag(self.eigenvalues))
示例#6
0
    def deform(self, shape_param):
        '''
        Reconstruct a shape based on principal components and a set of
        parameters that define a deformable model (see Cootes p. 6 eq. 2)

        in: Tx1 vector deformable model b
        out: 1xC deformed image
        '''
        return Shape(self.mean.vector +
                     self.scaled_eigenvectors.dot(shape_param))
示例#7
0
    def translate_to_origin(self, shape):
        '''
        Move all shapes to a common center, most likely the origin (0,0)

        In: array x
            array y
        Out = array, array
        '''
        # compute centroid
        centr_x, centr_y = shape.centroid()
        # translate w.r.t. centroid
        return Shape([shape.x - centr_x, shape.y - centr_y])
示例#8
0
 def align(self, shape):
     '''
     Align shape with the mean shape and return the aligned shape.
     All arrays in form (x1, ..., xC, ..., y1, ..., yC)
     In: 1xC array shape
     Out: 1xC aligned shape
     '''
     # perform aligning
     translated = self.translate_to_origin(Shape(shape))
     scaled = self.normalize(translated)
     aligned = self.rotate_to_target(scaled, self.mean_shape)
     return aligned.vector
示例#9
0
def render_shape_to_image(img, shape, color=(255, 255, 0), title='Image'):
    '''
    Draw shape over image
    '''
    if not isinstance(shape, Shape):
        shape = Shape(shape)

    for i in range(shape.length - 1):
        
        cv2.line(img, (int(shape.x[i]), int(shape.y[i])),
            (int(shape.x[i + 1]), int(shape.y[i + 1])), color, 5)

    render(img)
示例#10
0
    def rotate_to_target(self, subject, target):
        '''
        Rotate shape such that it aligns with the target shape

        in: Shapes
        out: rotated Rx2 matrix of subject
        '''
        # perform singular value decomposition (svd) to get U, S, V'
        u, s, v = np.linalg.svd(target.matrix.dot(np.transpose(
            subject.matrix)))
        # multiply VU' with subject to get the rotated matrix
        vu = np.transpose(v).dot(np.transpose(u))
        return Shape(vu.dot(subject.matrix))
示例#11
0
    def fit(self, prev_shape, new_shape, pyramid_level=0, n=None):
        '''
        Algorithm that finds the best shape parameters that match identified
        image points.

        In: PointDistributionModel instance pdm,
            array of new image points (x1, x2, ..., xN, y1, y2,..., yN)
        Out: the pose params (Tx, Ty, s, theta) and shape parameter (c) to
            fit the model to the image
        '''
        if not isinstance(new_shape, Shape):
            new_shape = Shape(new_shape)
        if not isinstance(prev_shape, Shape):
            prev_shape = Shape(prev_shape)
        if not self.start_pose:
            raise ValueError('No inital pose parameters found.')

        # find pose parameters to align with new image points
        Tx, Ty, s, theta = self.start_pose
        dx, dy, ds, dTheta = self.aligner.get_pose_parameters(prev_shape, new_shape)
        changed_pose = (Tx + dx, Ty + dy, s*(1+ds), theta+dTheta)

        # align image with model
        y = self.aligner.invert_transform(new_shape, changed_pose)

        # SVD on scaled eigenvectors of the model
        u, w, v = np.linalg.svd(self.pdmodel.scaled_eigenvectors, full_matrices=False)
        W = np.zeros_like(w)

        # define weight vector n
        if n is None:
            last_eigenvalue = self.pdmodel.eigenvalues[-1]
            n = last_eigenvalue**2 if last_eigenvalue**2 >= 0 else 0

        # calculate the shape vector
        W = np.diag(w/((w**2) + n))
        c = (v.T).dot(W).dot(u.T).dot(y.vector)

        return changed_pose, c
示例#12
0
def run(img, centroid):

    # --------------- TESTING ----------------- #

    idge_canny = prep.calc_external_img_active_contour(img)
    edge_sobel = prep.sobel(img)
    testimage, testlandmarks = test_set
    test = Shape(testlandmarks)

    pose_para = aligner.get_pose_parameters(pca.mean, test)
    lst = list(pose_para)
    lst[0] = 0
    lst[1] = 0
    lst[2] = pose_para[2]
    lst[3] = 0

    t = tuple(lst)

    points = aligner.transform(pca.mean, t)

    plt.imshow(img)
    plt.plot(points.x, points.y, "r.")

    meanShapeCentroid = (np.sum(points.x) / 30, np.sum(points.y) / 30)
    #perform manual centroid
    # centroid= tran.set_clicked_center(np.uint8(testimage))
    matches1 = tran.initalizeShape(centroid, meanShapeCentroid,
                                   points.matrix.T)
    if not isinstance(matches1, Shape):
        matches1 = Shape(matches1.T)

    # meanShapeCentroid = (np.sum(testlandmarks.x)/30,np.sum(testlandmarks.y)/30)
    plt.imshow(img)
    plt.plot(matches1.x, matches1.y, '.')
    plt.show()

    x, y, new_p = active_shape(edge_sobel, matches1, pca, img, centroid)

    return y
示例#13
0
    def multiresolution_search(self,
                               image,
                               region,
                               t=0,
                               max_level=0,
                               max_iter=5,
                               n=None):
        '''
        Perform Multi-resolution Search ASM algorithm.

        in: np array of training image
            np array region; array of coordinates that gives a rough
                estimation of the target in form (x1, ..., xN, y1, ..., yN)
            int t; amount of pixels to be examined on each side of the
                normal of each point during an iteration (t>k)
            int max_levels; max amount of levels to be searched
            int max_iter; amount to stop iterations at each level
            int n; fitting parameter
        out: Shape region; approximation of the target
        '''
        if not isinstance(region, Shape):

            region = Shape(region)

        # create Gaussian pyramid of input image
        image_pyramid = gaussian_pyramid(image,
                                         levels=len(self.glmodel_pyramid))

        # allow examiner to render the largest image (for testing)
        self.examiner.bigImage = image_pyramid[0]

        level = max_level
        max_level = True

        while level >= 0:
            # get image at level resolution
            image = image_pyramid[level]
            # search in the image
            region = self.search(image,
                                 region,
                                 t=t,
                                 level=level,
                                 max_level=max_level,
                                 max_iter=max_iter,
                                 n=n)
            # descend the pyramid
            level -= 1
            max_level = False

        return region
示例#14
0
    def examine(self, model_points, t=0, pyramid_level=0):
        '''
        Examines points normal to model points and compare its grey-levels
        with the grey-level model.

        in: matrix of pixels image
            array of model points (x1, x2, ..., xN, y1, ..., yN)
            int t amount of pixels examined either side of the normal (t > k)
        out: Shape with adjustments (dx, dy) to better approximate target
        '''
        if not isinstance(model_points, Shape):
            model_points = Shape(model_points)

        new_points = model_points.matrix
        # keep track of large movements
        movement = np.zeros((model_points.length, 1))
        # get greylevel model for requested pyramid level
        glmodel = self.glmodel_pyramid[pyramid_level]
        # determine reduction based on pyramid level
        reduction = 2**pyramid_level

        i = 0
        for m in range(model_points.length):
            i += 1
            # set model index (for mean/cov)
            glmodel.set_evaluation_index(m - 1)
            # choose model points according to pyramid level
            prev, curr, nex = m - 2, m - 1, m
            reduced_points = Shape(model_points.vector / reduction)
            # get current, previous and next
            points = np.array([
                reduced_points.get(prev),
                reduced_points.get(curr),
                reduced_points.get(nex)
            ])
            # get point that best matches gray levels
            new_points[:, curr], movement[curr] = self.get_best_match(glmodel,
                                                                      points,
                                                                      t=t)
        print('Number of points examined:', str(i))

        return Shape(new_points) * reduction, movement
示例#15
0
def project_to_tangent_plane(y, pca):
    xm = pca.mean.vector
    y = Shape(y)
    #y = y.reshape(-1)
    #print ("project function ", y.vector.shape)
    return y.vector / np.dot(y.vector, xm)
示例#16
0
 def translate(self, shape, Tx, Ty):
     '''
     Translate a shape according to translation parameters
     '''
     return Shape([shape.x + Tx, shape.y + Ty])
示例#17
0
def run(imageslice, centroid):
    '''
    Main method of the package.
    '''

    # ------------- LOAD DATA -------------- #
    loader = DataLoader()
    training_set, test_set = loader.leave_one_out(test_index=1)

    # --------------- TRAINING ---------------- #
    trainlandmarks = training_set[1]

    # build and train an Active Shape Model
    asm = ASMTraining(training_set, k=3, levels=3)
    pca = asm.activeshape.pdmodel

    t = 0
    for i in range(len(pca.eigenvalues)):
        if sum(pca.eigenvalues[:i]) / sum(pca.eigenvalues) < 0.99:
            t = t + 1
        else:
            break

    print("Constructed model with {0} modes of variation".format(t))

    # --------------- TESTING ----------------- #
    aligner = Aligner()
    testimage, testlandmarks = test_set
    test = Shape(testlandmarks)
    # remove some noise from the test image
    testimage = remove_noise(testimage)
    plt.imshow(testimage)
    plt.plot(test.x, test.y, 'r.')
    plt.show()

    plt.imshow(imagestest)
    plt.plot(test.x, test.y, "r.")

    pose_para = aligner.get_pose_parameters(pca.mean, test)
    lst = list(pose_para)
    lst[0] = 0
    lst[1] = 0
    lst[2] = pose_para[2]
    lst[3] = 0
    t = tuple(lst)

    points = aligner.transform(pca.mean, t)

    meanShapeCentroid = (np.sum(points.x) / 30, np.sum(points.y) / 30)
    # centroid= tran.set_clicked_center(np.uint8(testimage))
    matches1 = tran.initalizeShape(centroid, meanShapeCentroid,
                                   points.matrix.T)
    if not isinstance(matches1, Shape):
        matches1 = Shape(matches1.T)

    plt.imshow(imagestest)
    plt.plot(matches1.x, matches1.y, '.')
    plt.show()

    new_fit = asm.activeshape.multiresolution_search(imagestest,
                                                     matches1,
                                                     t=10,
                                                     max_level=0,
                                                     max_iter=20,
                                                     n=0.1)
    # Find the target that the new fit represents in order
    # to compute the error. This is done by taking the smallest
    # MSE of all targets.
    mse = mean_squared_error(testlandmarks, new_fit)
    # implement maximally tolerable error
    if int(mse) < MSE_THRESHOLD:
        print('MSE:', mse)
        # plot.render_shape_to_image(np.uint8(testimage), trainlandmarks[best_fit_index], color=(0, 0, 0))
    else:
        print('Bad fit. Needs to restart.')
示例#18
0
 def set_mean_shape(self, shape):
     self.mean_shape = Shape(shape)
示例#19
0
def update_model_param(y, pca):
    y = Shape(y)
    xm = pca.mean
    PT = pca.eigenvectors.T
    return np.dot(PT, y.vector - xm.vector)  # update eigenvalue
示例#20
0
def render_shape(shape):
    if not isinstance(shape, Shape):
        shape = Shape(shape)
    plt.plot(shape.x, shape.y, marker='o')
    plt.show()