示例#1
0
def homografia(real_width, real_height, width, height):
    global points, teclado    
    if len(points)!=4 :
        raise Exception('falto algun punto')
 
    p = cv.CreateMat(2,4,cv.CV_64FC1)
    h = cv.CreateMat(2,4,cv.CV_64FC1)
    p2h = cv.CreateMat(3,3,cv.CV_64FC1)
 
    cv.Zero(p)
    cv.Zero(h)
    cv.Zero(p2h)
 
    for i in range(4):
        (x,y) = points[i]
        p[0,i] = (float(real_width)/float(width)) * x
        p[1,i] = (float(real_height)/float(height)) * y
 
    h[0,0] = 0
    h[1,0] = real_height
 
    h[0,1] = real_width
    h[1,1] = real_height
 
    h[0,2] = real_width
    h[1,2] = 0
 
    h[0,3] = 0
    h[1,3] = 0
 
    cv.FindHomography(p,h,p2h)
    return p2h
示例#2
0
def PerspectiveFromPoints(source,
                          dest,
                          new_size,
                          method=0,
                          ransacReprojThreshold=1.5):
    '''
    Calls the OpenCV function: cvFindHomography.  This method has
    additional options to use the CV_RANSAC or CV_LMEDS methods to
    find a robust homography.  Method=0 appears to be similar to 
    PerspectiveFromPoints.  
    '''
    assert len(source) == len(dest)

    n_points = len(source)

    s = cv.CreateMat(n_points, 2, cv.CV_32F)
    d = cv.CreateMat(n_points, 2, cv.CV_32F)
    p = cv.CreateMat(3, 3, cv.CV_32F)

    for i in range(n_points):
        s[i, 0] = source[i].X()
        s[i, 1] = source[i].Y()

        d[i, 0] = dest[i].X()
        d[i, 1] = dest[i].Y()

    results = cv.FindHomography(s, d, p, method, ransacReprojThreshold)

    matrix = pv.OpenCVToNumpy(p)

    return PerspectiveTransform(matrix, new_size)
示例#3
0
    def __init__(self, worldPositions, screenPositions):

        assert len(worldPositions) == 4
        assert len(screenPositions) == 4

        worldPosMtx = np.matrix(worldPositions, dtype=np.float32)
        screenPosMtx = np.matrix(screenPositions, dtype=np.float32)

        self.worldToScreenHomography = np.matrix(
            np.ndarray(shape=(3, 3), dtype=np.float32))
        self.screenToWorldHomography = np.matrix(
            np.ndarray(shape=(3, 3), dtype=np.float32))

        cv.FindHomography(worldPosMtx, screenPosMtx,
                          self.worldToScreenHomography)
        cv.Invert(self.worldToScreenHomography, self.screenToWorldHomography)
示例#4
0
def correct_horizontal_motion(image, prev_trackpts, curr_trackpts, boundary_pts):
    image_boundaries = (1, 1, image.width-2, image.height-2)
    motion_matrix=cv.CreateMat(3, 3, cv.CV_32FC1)
    cv.FindHomography(prev_trackpts, curr_trackpts,
                      motion_matrix,
                      cv.CV_RANSAC, 10)
    boundary_pts = boundary_pts.reshape((1, -1, 2)).astype(np.float32)
    warped_boundary_pts = np.zeros_like(boundary_pts)
    cv.PerspectiveTransform(boundary_pts, warped_boundary_pts, motion_matrix)
    pt11, pt12 = cvutils.line_rectangle_intersection(
        tuple(warped_boundary_pts[0, 0,:]), tuple(warped_boundary_pts[0, 3,:]),
        image_boundaries)
    pt21, pt22 = cvutils.line_rectangle_intersection(
        tuple(warped_boundary_pts[0, 1,:]), tuple(warped_boundary_pts[0, 2,:]),
        image_boundaries)
    
    boundary_pts = cvutils.reorder_boundary_points(np.array((pt11, pt12, pt21, pt22)))
    return boundary_pts.reshape((-1, 2)).astype(np.int), motion_matrix
示例#5
0
    def validateCalibrationData(self, all_object_points, all_corners):
        """Builds up linear equations using method of Zhang 1999 "Flexible Camera Calibration By Viewing a Plane From Unknown Orientations". The condition number of the linear equations can tell us how good the data is. The object data all needs to lie on the Z=0 plane (actually any plane would probably do?)
        """
        cvH = cv.fromarray(eye(3))
        B = zeros((6, 1))
        B[1, 0] = 1  # skewness is 0 constraint
        # for every observation, solve for the homography and build up a matrix to solve for the intrinsicparameters
        for corners, object_points in izip(all_corners, all_object_points):
            if not all(object_points[:, 2] == 0):
                raise ValueError(
                    'The object data all needs to lie on the Z=0 plane.')

            cv.FindHomography(cv.fromarray(object_points[:, 0:2]),
                              cv.fromarray(corners), cvH, 0)
            H = array(cvH)
            v = []
            for i, j in [(0, 1), (0, 0), (1, 1)]:
                v.append([
                    H[0, i] * H[0, j], H[0, i] * H[1, j] + H[1, i] * H[0, j],
                    H[1, i] * H[1, j], H[2, i] * H[0, j] + H[0, i] * H[2, j],
                    H[2, i] * H[1, j] + H[1, i] * H[2, j], H[2, i] * H[2, j]
                ])
            B = c_[B, array(v[0]), array(v[1]) - array(v[2])]
        U, s, Vh = linalg.svd(dot(B, transpose(B)))
        b = U[:, -1]
        A = array([[b[0], b[1], b[3]], [b[1], b[2], b[4]], [b[3], b[4], b[5]]])
        # A has to be positive definite
        Aeigs = linalg.eigvals(A)
        if not all(sign(Aeigs) > 0) and not all(sign(Aeigs) < 0):
            return None
        #print 'eigenvalues: ',sqrt(s)
        print 'condition is: ', sqrt(s[-2] / s[-1])
        if False:
            # compute the intrinsic parameters K = [alpha c u0; 0 beta v0; 0 0 1]
            b = U[:, -1]
            v0 = (b[1] * b[3] - b[0] * b[4]) / (b[0] * b[2] - b[1] * b[1])
            lm = b[5] - (b[3] * b[3] + v0 * (b[1] * b[3] - b[0] * b[4])) / b[0]
            alpha = sqrt(lm / b[0])
            beta = sqrt(lm * b[0] / (b[0] * b[2] - b[1] * b[1]))
            c = -b[1] * alpha * alpha * beta / lm
            u0 = c * v0 / alpha - b[3] * alpha * alpha / lm
            print alpha, beta, u0, v0
        return sqrt(s) / len(all_object_points)
示例#6
0
def find_homography(corners1,
                    corners2,
                    method=METHOD,
                    threshold=THRESHOLD,
                    status=STATUS):
    array1 = np.mat(corners1)
    array2 = np.mat(corners2)
    homography = cv.CreateMat(3, 3, cv.CV_64F)
    cv.FindHomography(cv.fromarray(array1), cv.fromarray(array2), homography,
                      METHOD, THRESHOLD)
    if VERBOSE:
        # TODO: find a less obnoxious way to do this
        print "Homography matrix:"
        for i in range(3):
            print homography[i, 0],
            print homography[i, 1],
            print homography[i, 2]
    #find_svd(homography)
    return homography
示例#7
0
def calc(real_width, real_height, width, height):
    """ calculates the homography """
    global points
    if len(points) != 4:
        raise Exception('need exactly four points')

    order_points(width, height)

    p = cv.CreateMat(2, 4, cv.CV_64FC1)
    h = cv.CreateMat(2, 4, cv.CV_64FC1)
    p2h = cv.CreateMat(3, 3, cv.CV_64FC1)

    cv.Zero(p)
    cv.Zero(h)
    cv.Zero(p2h)

    for i in range(4):
        (x, y) = points[i]
        p[0, i] = (float(real_width) / float(width)) * x
        p[1, i] = (float(real_height) / float(height)) * y

    h[0, 0] = 0
    h[1, 0] = real_height

    h[0, 1] = real_width
    h[1, 1] = real_height

    h[0, 2] = real_width
    h[1, 2] = 0

    h[0, 3] = 0
    h[1, 3] = 0

    cv.FindHomography(p, h, p2h)
    #cv.ReleaseMat(p)
    #cv.ReleaseMat(h)
    return p2h
    def capture(self):
        blank = self.get_picture_of_projection(self.blank_projection)
        positive = self.get_picture_of_projection(
            self.positive_chessboard_projection)
        negative = self.get_picture_of_projection(
            self.negative_chessboard_projection)

        difference = cv.CreateMat(self.camera_info.height,
                                  self.camera_info.width, cv.CV_8UC1)
        cv.Sub(positive, negative, difference)

        cv.NamedWindow("blank", flags=0)
        cv.ShowImage("blank", blank)
        cv.WaitKey(300)

        cv.NamedWindow("difference", flags=0)
        cv.ShowImage("difference", difference)
        cv.WaitKey(300)

        camera_image_points, camera_object_points = detect_chessboard(
            blank, self.printed_chessboard_corners_x,
            self.printed_chessboard_corners_y, self.printed_chessboard_spacing,
            self.printed_chessboard_spacing)
        if camera_image_points is None:
            return False
        cv.UndistortPoints(camera_image_points, camera_image_points,
                           self.camera_model.intrinsicMatrix(),
                           self.camera_model.distortionCoeffs())
        homography = cv.CreateMat(3, 3, cv.CV_32FC1)
        cv.FindHomography(camera_image_points, camera_object_points,
                          homography)
        object_points, dummy = detect_chessboard(
            difference, self.projected_chessboard_corners_x,
            self.projected_chessboard_corners_y, None, None)
        if object_points is None:
            return False
        cv.UndistortPoints(object_points, object_points,
                           self.camera_model.intrinsicMatrix(),
                           self.camera_model.distortionCoeffs())

        cv.PerspectiveTransform(object_points, object_points, homography)

        object_points_3d = cv.CreateMat(
            1, self.projected_chessboard_corners_x *
            self.projected_chessboard_corners_y, cv.CV_32FC3)

        x = cv.CreateMat(
            1, self.projected_chessboard_corners_x *
            self.projected_chessboard_corners_y, cv.CV_32FC1)
        y = cv.CreateMat(
            1, self.projected_chessboard_corners_x *
            self.projected_chessboard_corners_y, cv.CV_32FC1)
        cv.Split(object_points, x, y, None, None)
        z = cv.CreateMat(
            1, self.projected_chessboard_corners_x *
            self.projected_chessboard_corners_y, cv.CV_32FC1)
        cv.SetZero(z)

        cv.Merge(x, y, z, None, object_points_3d)

        self.object_points.append(object_points_3d)
        self.image_points.append(self.get_scene_image_points())
        self.number_of_scenes += 1

        return True
示例#9
0
def annotate_image(cv_im, mech_info_dict, dir):
    #cv_im = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
    size = cv.GetSize(cv_im)

    print 'Image size:', size[0], size[1]  # col, row
    wnd = 'Image Annotate'
    cv.NamedWindow(wnd, cv.CV_WINDOW_AUTOSIZE)
    disp_im = cv.CloneImage(cv_im)
    new_size = (size[0] / 2, size[1] / 2)
    scale_factor = 1

    checker_origin_height = mech_info_dict['height']
    # chesscoard corners mat
    cb_dims = (5, 8)  # checkerboard dims. (x, y)
    sq_sz = 19  # size of checkerboard in real units.
    cb_offset = 500, 500
    cb_coords = cv.CreateMat(2, cb_dims[0] * cb_dims[1], cv.CV_64FC1)
    n = 0
    for r in range(cb_dims[1]):
        for c in range(cb_dims[0]):
            cb_coords[0, n] = c * sq_sz + cb_offset[0]  # x coord
            cb_coords[1, n] = r * sq_sz + cb_offset[1]  # y coord
            n += 1

    clicked_list = []
    recreate_image = False
    mechanism_calc_state = 0
    mechanism_calc_dict = {}
    while True:
        for p in clicked_list:
            x, y = p[0] * scale_factor, p[1] * scale_factor
            cv.Circle(disp_im, (x, y), 3, (255, 0, 0))
        cv.ShowImage(wnd, disp_im)
        k = cv.WaitKey(10)
        k = k % 256

        if k == 27:
            # ESC
            break
        elif k == ord('='):
            # + key without the shift
            scale_factor = scale_factor * 1.2
            recreate_image = True
        elif k == ord('-'):
            # - key
            scale_factor = scale_factor / 1.2
            recreate_image = True
        elif k == ord('c'):
            # find chessboard corners.
            succ, corners = cv.FindChessboardCorners(disp_im, cb_dims)
            if succ == 0:
                print 'Chessboard detection FAILED.'
            else:
                # chessboard detection was successful
                cv.DrawChessboardCorners(disp_im, cb_dims, corners, succ)
                cb_im = cv.CreateMat(2, cb_dims[0] * cb_dims[1], cv.CV_64FC1)
                corners_mat = np.array(corners).T
                n = 0
                for r in range(cb_dims[1]):
                    for c in range(cb_dims[0]):
                        cb_im[0, n] = corners_mat[0, n]  # x coord
                        cb_im[1, n] = corners_mat[1, n]  # y coord
                        n += 1
                H = cv.CreateMat(3, 3, cv.CV_64FC1)
                H1 = cv.FindHomography(cb_im, cb_coords, H)
                Hnp = np.reshape(np.fromstring(H1.tostring()), (3, 3))
                print 'Homography:'
                print Hnp

                d = cv.CloneImage(disp_im)
                cv.WarpPerspective(d, disp_im, H1, cv.CV_WARP_FILL_OUTLIERS)
                cv_im = cv.CloneImage(disp_im)
        elif k == ord('1'):
            # calculate width of the mechanism
            del clicked_list[:]
            cv.SetMouseCallback(wnd, on_mouse,
                                (disp_im, clicked_list, scale_factor))
            recreate_image = True
            print 'Click on two endpoints to mark the width of the mechanism'
            mechanism_calc_state = 1
        elif k == ord('2'):
            # distance of handle from the hinge
            del clicked_list[:]
            cv.SetMouseCallback(wnd, on_mouse,
                                (disp_im, clicked_list, scale_factor))
            recreate_image = True
            print 'Click on handle and hinge to compute distance of handle from hinge.'
            mechanism_calc_state = 2
        elif k == ord('3'):
            # height of the handle above the ground
            del clicked_list[:]
            cv.SetMouseCallback(wnd, on_mouse,
                                (disp_im, clicked_list, scale_factor))
            recreate_image = True
            print 'Click on handle top and bottom to compute height of handle above the ground.'
            mechanism_calc_state = 3
        elif k == ord('4'):
            # top and bottom edge of the mechanism
            del clicked_list[:]
            cv.SetMouseCallback(wnd, on_mouse,
                                (disp_im, clicked_list, scale_factor))
            recreate_image = True
            print 'Click on top and bottom edges of the mechanism.'
            mechanism_calc_state = 4
        elif k == ord('d'):
            # display the calculated values
            print 'mechanism_calc_dict:', mechanism_calc_dict
            print 'mech_info_dict:', mech_info_dict
        elif k == ord('s'):
            # save the pkl
            ut.save_pickle(mechanism_calc_dict,
                           dir + '/mechanism_calc_dict.pkl')
            print 'Saved the pickle'
        #elif k != -1:
        elif k != 255:
            print 'k:', k

        if recreate_image:
            new_size = (int(size[0] * scale_factor),
                        int(size[1] * scale_factor))
            disp_im = cv.CreateImage(new_size, cv_im.depth, cv_im.nChannels)
            cv.Resize(cv_im, disp_im)
            cv.SetMouseCallback(wnd, on_mouse,
                                (disp_im, clicked_list, scale_factor))
            recreate_image = False

        if len(clicked_list) == 2:
            if mechanism_calc_state == 1:
                w = abs(clicked_list[0][0] - clicked_list[1][0])
                print 'Width in mm:', w
                mechanism_calc_dict['mech_width'] = w / 1000.
            if mechanism_calc_state == 2:
                w = abs(clicked_list[0][0] - clicked_list[1][0])
                print 'Width in mm:', w
                mechanism_calc_dict['handle_hinge_dist'] = w / 1000.
            if mechanism_calc_state == 3:
                p1, p2 = clicked_list[0], clicked_list[1]
                h1 = (cb_offset[1] - p1[1]) / 1000. + checker_origin_height
                h2 = (cb_offset[1] - p2[1]) / 1000. + checker_origin_height
                mechanism_calc_dict['handle_top'] = max(h1, h2)
                mechanism_calc_dict['handle_bottom'] = min(h1, h2)
            if mechanism_calc_state == 4:
                p1, p2 = clicked_list[0], clicked_list[1]
                h1 = (cb_offset[1] - p1[1]) / 1000. + checker_origin_height
                h2 = (cb_offset[1] - p2[1]) / 1000. + checker_origin_height
                mechanism_calc_dict['mechanism_top'] = max(h1, h2)
                mechanism_calc_dict['mechanism_bottom'] = min(h1, h2)

            mechanism_calc_state = 0
示例#10
0
    def update(self,frame):
        '''
        This tracks the points_b to the next frame using the LK tracker.
        Add more good points_b to track.
        
        @param frame: update optical flow for the frame.
        @type frame: pv.Image
        '''
        if self.frame_size == None:
            self.frame_size = frame.size
            
        if self.prev_frame == None:
            #Initialize the tracker
            
            # Pick a frame size
            if self.tile_size == "AUTO":
                # Rescale the image so that the largest demenion is between 256 and 512
                w,h = frame.size
                n = max(w,h)
                while n > 512:
                    n = n/2
                
                scale = n/float(max(w,h))
                
                w = int(round(scale*w))
                h = int(round(scale*h))
                
                self.tile_size = (w,h)
                
                
            if self.tile_size == "ORIG":
                self.tile_size = frame.size

            # allocate memory
            w,h = self.tile_size
            self.frame = cv.CreateImage((w,h), 8, 1)
            self.prev_frame = cv.CreateImage((w,h), 8, 1) 
            
            # Resize the frame
            cvim = frame.asOpenCVBW()
            cv.Resize(cvim,self.frame)
            
            # Find good features to track
            self._selectTrackingPoints(self.frame)
            
            # Init transform
            
            
        else:
            # Resize the frame
            cvim = frame.asOpenCVBW()
            cv.Resize(cvim,self.frame)
        
        
            # Track the points_b (optical flow)
            new_tracks = self._opticalFlow()
        
            
            # Compute the transform
            assert len(new_tracks) == len(self.tracks)
            n = len(new_tracks)
            src_points = cv.CreateMat(n,2,cv.CV_32F)
            dst_points = cv.CreateMat(n,2,cv.CV_32F)
    
            # Copy data into matricies
            for i in range(len(new_tracks)):
                src_points[i,0] = self.tracks[i].X()
                src_points[i,1] = self.tracks[i].Y()

                dst_points[i,0] = new_tracks[i].X()
                dst_points[i,1] = new_tracks[i].Y()
    
            # Create homography matrix
            self.homography = cv.CreateMat(3,3,cv.CV_32F)
            self.homography_rev = cv.CreateMat(3,3,cv.CV_32F)
            mask = cv.CreateMat(1,n,cv.CV_8U)
            cv.FindHomography(dst_points,src_points,self.homography_rev,cv.CV_LMEDS,1.5,mask)
            cv.FindHomography(src_points,dst_points,self.homography,cv.CV_LMEDS,1.5,mask)
            
            # Drop bad points_b
            self.tracks = []
            self.bad_points = []
            for i in range(n):
                if mask[0,i]:
                    self.tracks.append(new_tracks[i])
                else:
                    self.bad_points.append(new_tracks[i])
                    
            self.n = len(self.tracks)
            
            # Add new points_b
            self._selectTrackingPoints(self.frame)
            
            self.prev_input.to_next = self.asHomography(forward = False)
            frame.to_prev = self.asHomography(forward = True)
            
        self.prev_input = frame

        cv.Copy(self.frame,self.prev_frame)
示例#11
0
 def featureimagecb(self, featuremsg):
     # if not in GUI mode and no one is subscribing, don't do anything
     if self.cvwindow is None and self.pub_objdet.get_num_connections(
     ) == 0:
         rospy.logdebug(
             'PointPoseExtraction.py no connections, so ignoring image')
         return
     self.featuremsg = featuremsg
     with self.extractionlck:
         try:
             cv_image = self.bridge.imgmsg_to_cv(featuremsg.image, "bgr8")
         except CvBridgeError as e:
             print(e)
             return
         # decompose P=KK*Tcamera
         P = reshape(array(featuremsg.info.P), (3, 4))
         cvKK = cv.fromarray(eye(3))
         cvRcamera = cv.fromarray(eye(3))
         cvTcamera = cv.fromarray(zeros((4, 1)))
         cv.DecomposeProjectionMatrix(cv.fromarray(P), cvKK, cvRcamera,
                                      cvTcamera)
         KK = array(cvKK)
         Tcamera = eye(4)
         Tcamera[0:3, :] = c_[array(cvRcamera),
                              -dot(array(cvRcamera),
                                   array(cvTcamera)[0:3] / cvTcamera[3, 0])]
         print("Tcamera: ", Tcamera)
         if self.pe is None:
             if len(self.imagepoints) >= 4:
                 xaxis = self.imagepoints[1] - self.imagepoints[0]
                 yaxis = self.imagepoints[2] - self.imagepoints[1]
                 if xaxis[0] * yaxis[1] - xaxis[1] * yaxis[0] < 0:
                     print(
                         'point order is not correct! need to specify points in clockwise order'
                     )
                     self.imagepoints = []
                     return
                 # find the homography, warp the image, and get new features
                 width = int(
                     sqrt(
                         max(
                             sum((self.imagepoints[1] -
                                  self.imagepoints[0])**2),
                             sum((self.imagepoints[3] -
                                  self.imagepoints[2])**2))))
                 height = int(
                     sqrt(
                         max(
                             sum((self.imagepoints[2] -
                                  self.imagepoints[1])**2),
                             sum((self.imagepoints[3] -
                                  self.imagepoints[0])**2))))
                 cvimagepoints = cv.fromarray(array(self.imagepoints))
                 cvtexturepoints = cv.fromarray(
                     array(
                         ((0, 0), (width, 0), (width, height), (0, height)),
                         float))
                 cv_texture = cv.CreateMat(height, width, cv_image.type)
                 cv_texture2 = cv.CreateMat(height / 2, width / 2,
                                            cv_image.type)
                 cvH = cv.fromarray(eye(3))
                 cv.FindHomography(cvimagepoints, cvtexturepoints, cvH, 0)
                 cv.WarpPerspective(cv_image, cv_texture, cvH)
                 try:
                     res = rospy.ServiceProxy(
                         'Feature0DDetect',
                         posedetection_msgs.srv.Feature0DDetect)(
                             image=self.bridge.cv_to_imgmsg(cv_texture))
                     N = len(res.features.positions) / 2
                     positions = reshape(res.features.positions, (N, 2))
                     descriptors = reshape(res.features.descriptors,
                                           (N, res.features.descriptor_dim))
                     texturedims_s = raw_input(
                         'creating template ' + self.templateppfilename +
                         ' enter the texture dimensions (2 values): ')
                     texturedims = [float(f) for f in texturedims_s.split()]
                     points3d = c_[positions[:, 0] * texturedims[0] / width,
                                   positions[:, 1] * texturedims[1] /
                                   height,
                                   zeros(len(positions))]
                     self.Itemplate = array(cv_texture)
                     self.Itemplate[:, :, (0, 2)] = self.Itemplate[:, :,
                                                                   (2, 0)]
                     self.boundingbox = transformPoints(
                         linalg.inv(self.Tright),
                         array(((0, 0, 0), (texturedims[0], texturedims[1],
                                            0))))
                     template = {
                         'type': self.type,
                         'points3d': points3d,
                         'descriptors': descriptors,
                         'Itemplate': self.Itemplate,
                         'boundingbox': self.boundingbox,
                         'Tright': self.Tright,
                         'featuretype': featuremsg.features.type
                     }
                     pickle.dump(template, open(self.templateppfilename,
                                                'w'))
                     scipy.misc.pilutil.imshow(self.Itemplate)
                     self.pe = PointPoseExtractor(type=self.type,
                                                  points3d=points3d,
                                                  descriptors=descriptors)
                     self.imagepoints = []
                 except rospy.service.ServiceException as e:
                     print(e)
                 return
         else:
             success = False
             projerror = -1.0
             try:
                 if self.featuretype is not None and self.featuretype != featuremsg.features.type:
                     rospy.logwarn(
                         'feature types do not match: %s!=%s' %
                         (self.featuretype, featuremsg.features.type))
                     raise detection_error()
                 starttime = time.time()
                 N = len(featuremsg.features.positions) / 2
                 positions = reshape(featuremsg.features.positions, (N, 2))
                 descriptors = reshape(
                     featuremsg.features.descriptors,
                     (N, featuremsg.features.descriptor_dim))
                 Tlocal, projerror = self.pe.extractpose(
                     KK,
                     positions,
                     descriptors,
                     featuremsg.features.confidences,
                     cv_image.width,
                     verboselevel=self.verboselevel,
                     neighthresh=self.neighthresh,
                     thresh=self.thresh,
                     dminexpected=self.dminexpected,
                     ransaciters=self.ransaciters)
                 success = projerror < self.errorthresh
                 if success:  # publish if error is low enough
                     Tlocalobject = dot(Tlocal, self.Tright)
                     Tglobal = dot(linalg.inv(Tcamera), Tlocalobject)
                     poseglobal = poseFromMatrix(Tglobal)
                     pose = posedetection_msgs.msg.Object6DPose()
                     pose.type = self.type
                     pose.pose.orientation.w = poseglobal[0]
                     pose.pose.orientation.x = poseglobal[1]
                     pose.pose.orientation.y = poseglobal[2]
                     pose.pose.orientation.z = poseglobal[3]
                     pose.pose.position.x = poseglobal[4]
                     pose.pose.position.y = poseglobal[5]
                     pose.pose.position.z = poseglobal[6]
                     objdetmsg = posedetection_msgs.msg.ObjectDetection()
                     objdetmsg.objects = [pose]
                     objdetmsg.header = featuremsg.image.header
                     print('local texture: ', repr(Tlocal))
                     self.pub_objdet.publish(objdetmsg)
                     self.drawpart(cv_image, Tlocalobject, KK)
                     self.drawcoordsys(cv_image, Tlocalobject, KK)
             except detection_error as e:
                 pass
             if self.verboselevel > 0:
                 rospy.loginfo(
                     '%s: %s, detection time: %fs, projerror %f < %f' %
                     (self.type, 'success' if success else 'failure',
                      time.time() - starttime, projerror, self.errorthresh))
         if self.cvwindow is not None:
             if not self.cvwindowstarted:
                 cv.NamedWindow(self.cvwindow, cv.CV_WINDOW_AUTOSIZE)
                 cv.SetMouseCallback(self.cvwindow, self.cvmousecb)
                 self.cvwindowwstarted = True
             for x, y in self.imagepoints:
                 cv.Circle(cv_image, (int(x), int(y)), 2, (0, 0, 255), 2)
             cv.ShowImage(self.cvwindow, cv_image)
             char = cv.WaitKey(20)
示例#12
0
def getHomography(histogram, capturePoints, canonicalPoints):    
    
    NH = 0
    
    orderIdx = np.argsort(histogram)
    
    bestMarker = orderIdx[len(orderIdx)-1]
    secondBestMarker = orderIdx[len(orderIdx)-2]
    

    
    H1 = None
    H2 = None
    bestRatio = None
    
    mat_canonical_points1 = None
    mat_capture_points1 = None
    mat_canonical_points2 = None
    mat_capture_points2 = None
    
    #Get RANSCAR homography for the best marker
    if histogram[bestMarker] >= 4:
        H1 = cv.CreateMat(3, 3, cv.CV_32FC1)
        
        mat_canonical_points1 = cv.CreateMat(len(canonicalPoints[bestMarker]), 3, cv.CV_32FC1)
        mat_capture_points1 = cv.CreateMat(len(canonicalPoints[bestMarker]), 3, cv.CV_32FC1)

       
              
        for i in range(0,len(canonicalPoints[bestMarker])):
            mat_canonical_points1[i,0] = canonicalPoints[bestMarker][i][0]
            mat_canonical_points1[i,1] = canonicalPoints[bestMarker][i][1]
            mat_canonical_points1[i,2] = 1
            
            mat_capture_points1[i,0] = capturePoints[bestMarker][i][0]
            mat_capture_points1[i,1] = capturePoints[bestMarker][i][1]
            mat_capture_points1[i,2] = 1
        
        
        
        
        cv.FindHomography(mat_canonical_points1, mat_capture_points1, H1, cv.CV_RANSAC, 10)
        
        NH+=1


        
    #Get RANSCAR homography for the second best marker
    if histogram[secondBestMarker] >= 4:
        H2 = cv.CreateMat(3, 3, cv.CV_32FC1)
        mat_canonical_points2 = cv.CreateMat(len(canonicalPoints[secondBestMarker]), 3, cv.CV_32FC1)
        mat_capture_points2 = cv.CreateMat(len(canonicalPoints[secondBestMarker]), 3, cv.CV_32FC1)
        
        for i in range(0,len(canonicalPoints[secondBestMarker])):
            mat_canonical_points2[i,0] = canonicalPoints[secondBestMarker][i][0]
            mat_canonical_points2[i,1] = canonicalPoints[secondBestMarker][i][1]
            mat_canonical_points2[i,2] = 1
            
            mat_capture_points2[i,0] = capturePoints[secondBestMarker][i][0]
            mat_capture_points2[i,1] = capturePoints[secondBestMarker][i][1]
            mat_capture_points2[i,2] = 1
        
        
        
        
        cv.FindHomography(mat_canonical_points2, mat_capture_points2, H2, cv.CV_RANSAC, 10)
        
        NH+=1
    
    THRESHOLD = 10

    bestH = None
    bestImage = None 
    
    
    if(H1!=None):
        ##Calculare outliner and inliners for H1
        i1 = 0.
        o1 = 0.
        
        H1np = np.asarray(H1)
        
        
        
        for i in range(0,len(canonicalPoints[bestMarker])):
            
            
            tempPoint = np.dot(H1np,np.asmatrix(mat_canonical_points1[i]).T)
            
            dist = euclidDist([tempPoint[0]/tempPoint[2],tempPoint[1]/tempPoint[2]], capturePoints[bestMarker][i])
            
            if dist < THRESHOLD:
                i1+=1.
            else:
                o1+=1.           
    
    
    if(H2!=None):
        ##Calculte outliners and inliners for H2
        i2 = 0.
        o2 = 0.
        
        H2np = np.asarray(H2)
        
        for i in range(0,len(canonicalPoints[secondBestMarker])):
           
            tempPoint = np.dot(H2np,np.asmatrix(mat_canonical_points2[i]).T)
            
            dist = euclidDist([tempPoint[0]/tempPoint[2],tempPoint[1]/tempPoint[2]], capturePoints[secondBestMarker][i])
            
            if dist < THRESHOLD:
                i2+=1.
            else:
                o2+=1. 
        
        
        #print "I/0: "+str(i1)+", "+str(o1)+", "+str(i2)+", "+str(o2)
        
        #Compare ratios of inliers and outliers
        if(i1/(i1+o1) < i2/(i2+o2)):
            bestH = H2
            bestImage = secondBestMarker
            bestRatio = i1/(i1+o1)
        else:
            bestH = H1
            bestImage = bestMarker
            bestRatio = i2/(i2+o2)
        
        
        
    
    if H1 !=None and H2 == None:
        bestH = H1
        bestImage = bestMarker
        bestRatio = i1/(i1+o1)
        

    
    return bestH, bestImage, NH, bestRatio