Ejemplo n.º 1
0
    def drawAxisOnLane(self, perspectiveImage):
        be_corner = np.float32(
            [[self.curDstRoadCorners[0][0],
              self.curDstRoadCorners[0][1], 0],
             [self.curDstRoadCorners[0][0],
              self.curDstRoadCorners[0][1], 0],
             [self.curDstRoadCorners[0][0],
              self.curDstRoadCorners[0][1], 0]]).reshape(-1, 3)

        be_axis = np.float32(
            [[self.curDstRoadCorners[0][0] + 64,
              self.curDstRoadCorners[0][1], 0],
             [self.curDstRoadCorners[0][0],
              self.curDstRoadCorners[0][1] + 128, 0],
             [self.curDstRoadCorners[0][0],
              self.curDstRoadCorners[0][1], -64]]).reshape(-1, 3)

        corner, jac = cv2.projectPoints(
            be_corner, self.rvecs, self.tvecs, self.mtx, self.dist)
        axis, jac = cv2.projectPoints(
            be_axis, self.rvecs, self.tvecs, self.mtx, self.dist)
        corner = tuple(corner[0].ravel())

        cv2.line(perspectiveImage, corner, tuple(
            axis[0].ravel()), (255, 0, 0), 5)
        cv2.line(perspectiveImage, corner, tuple(
            axis[1].ravel()), (0, 255, 0), 5)
        cv2.line(perspectiveImage, corner, tuple(
            axis[2].ravel()), (0, 0, 255), 5)
Ejemplo n.º 2
0
def draw_things(rvec, tvec, cam_matrix, dist_coefs):
    COLOR_FRAME  = [0, 255, 0]
    COLOR_MARKER = [0, 255, 255]
    side_w = 0.5
    dx, dy, dz = 0 - (side_w /2.0), 0 - (side_w /2.0), 0 - (side_w /2.0)

    shift_v = lambda v: [v[0] + dx, v[1] + dy, v[2] + dz]

    sides = []
    base = [[0, 0], [side_w, 0], [side_w, side_w], [0.0, side_w]]
    for i in xrange(3):
        for c in [0, side_w]:
            sides.append(map(shift_v, [ v[:i] + [c] + v[i:] for v in base]))

    for i in xrange(len(sides)):
        proj, _ = cv2.projectPoints(np.float32(sides[i]), rvec, tvec, cam_matrix, dist_coefs)
        proj = np.int32(map(lambda x: x[0], proj))
        cv2.polylines(frame, [proj], True, COLOR_FRAME)

    # draw marker on front side of cube
    front_side_marker = map(shift_v, [[0.0, side_w / 2.0, side_w], [side_w, side_w / 2.0, side_w],
                                      [side_w / 2.0, 0.0, side_w], [side_w / 2.0, side_w, side_w]])
    proj, _ = cv2.projectPoints(np.float32(front_side_marker), rvec, tvec, cam_matrix, dist_coefs)
    proj = np.int32(map(lambda x: x[0], proj))
    cv2.polylines(frame, [proj], True, COLOR_MARKER)
Ejemplo n.º 3
0
def imagePubCB(event):
    
    camPos = pose[0:3]
    camOrient = pose[3:7]
    targetPos = pose[7:10]
    targetOrient = pose[10:]
    
    # Transform
    br.sendTransform(targetPos,targetOrient,rospy.Time.now(),"ugv0","world")
    
    # Inverse camera pose
    coiInv = qInv(camOrient)
    cpiInv = rotateVec(-camPos,coiInv)
    (rvec,jac) = cv2.Rodrigues(q2m(coiInv)[0:3,0:3])
    
    # Image Projections
    (imagePoint,jac) = cv2.projectPoints(np.array([[targetPos]]),rvec,cpiInv,camMat,distCoeffs)
    imagePoint = np.squeeze(imagePoint)
    (bearing,jac) = cv2.projectPoints(np.array([[targetPos]]),rvec,cpiInv,np.eye(3),np.zeros(5))
    bearing = np.squeeze(bearing)
    
    # Publish image point
    centMsg = Center()
    centMsg.header.stamp = rospy.Time.now()
    centMsg.header.frame_id = str(markerID)
    centMsg.x = imagePoint[0]
    centMsg.y = imagePoint[1]
    centMsg.x1 = bearing[0]
    centMsg.x2 = bearing[1]
    centerPub.publish(centMsg)
Ejemplo n.º 4
0
    def render(self, CSH, hammer, other, image, axisList):

        # load calibration data
        with np.load('../params/webcam_calibration_ouput.npz') as X:
            mtx, dist, _, _ = [X[i] for i in ('mtx', 'dist', 'rvecs', 'tvecs')]

        # set up criteria, object points and axis
        criteria = (cv2.TERM_CRITERIA_EPS +
                    cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        objp = np.zeros((6 * 7, 3), np.float32)
        objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)

        # find grid corners in image
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        ret, corners = cv2.findChessboardCorners(gray, (7, 9), None)

        if ret == True:
            cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
            rvecs, tvecs, _ = cv2.solvePnPRansac(objp, corners, mtx, dist)
            if CSH == True:
                # project 3D points to image plane
		for axis in axisList:
                    imgpts, _ = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
                    self._draw_cube(image, imgpts)
                return image
            elif hammer == True:
                imgptsOther, _ = cv2.projectPoints(
                    other, rvecs, tvecs, mtx, dist)
                self._draw_cube(image, imgptsOther)
                return image
Ejemplo n.º 5
0
 def test_projectPoints(self):
     objpt = np.float64([[1,2,3]])
     imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([]))
     imgpt1, jac1 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None)
     self.assertEqual(imgpt0.shape, (objpt.shape[0], 1, 2))
     self.assertEqual(imgpt1.shape, imgpt0.shape)
     self.assertEqual(jac0.shape, jac1.shape)
     self.assertEqual(jac0.shape[0], 2*objpt.shape[0])
Ejemplo n.º 6
0
    def DrawGrid(self, img, rvec, tvec):
        if self.camerainfo is not None:
            hLinspace = (N.array(range(self.nRows)) - (self.nCols-1)/2) * self.checker_size
            hZeros = N.zeros(hLinspace.shape)
            hOnes = N.ones(hLinspace.shape) * self.checker_size * (self.nCols - 1)/2

            vLinspace = (N.array(range(self.nCols)) - (self.nRows-1)/2) * self.checker_size
            vZeros = N.zeros(vLinspace.shape)
            vOnes = N.ones(vLinspace.shape) * self.checker_size * (self.nRows - 1)/2

            hStart = N.array([-hOnes, hLinspace, hZeros]).astype('float32')
            hEnd   = N.array([ hOnes, hLinspace, hZeros]).astype('float32')
            vStart = N.array([vLinspace, -vOnes, vZeros]).astype('float32')
            vEnd   = N.array([vLinspace,  vOnes, vZeros]).astype('float32')
            
            if self.bRotateGrid:
                hStart = self.from_homo(N.dot(self.T_stage_arena, self.to_homo(hStart)))
                hEnd   = self.from_homo(N.dot(self.T_stage_arena, self.to_homo(hEnd)))
                vStart = self.from_homo(N.dot(self.T_stage_arena, self.to_homo(vStart)))
                vEnd   = self.from_homo(N.dot(self.T_stage_arena, self.to_homo(vEnd)))
                self.bRotateGrid = False
            
            widthGridline = 1
            
            (hStartProjected,jacobian) = cv2.projectPoints(hStart.transpose(),
                                              rvec,
                                              tvec,
                                              self.K,
                                              self.D)
            (hEndProjected,jacobian) = cv2.projectPoints(hEnd.transpose(),
                                              rvec,
                                              tvec,
                                              self.K,
                                              self.D)
            (vStartProjected,jacobian) = cv2.projectPoints(vStart.transpose(),
                                              rvec,
                                              tvec,
                                              self.K,
                                              self.D)
            (vEndProjected,jacobian) = cv2.projectPoints(vEnd.transpose(),
                                              rvec,
                                              tvec,
                                              self.K,
                                              self.D)

            for iLine in range(self.nRows):
                cv2.line(img, tuple(hStartProjected[iLine,:][0].astype('int32')), tuple(hEndProjected[iLine,:][0].astype('int32')), cv.CV_RGB(self.colorMax,0,0), widthGridline)
            for iLine in range(self.nCols):
                cv2.line(img, tuple(vStartProjected[iLine,:][0].astype('int32')), tuple(vEndProjected[iLine,:][0].astype('int32')), cv.CV_RGB(self.colorMax,0,0), widthGridline)
Ejemplo n.º 7
0
def cube(img):
    #img_in = cv2.imread("Picture 27.jpg")
    #img = cv2.resize(img_in,None,fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC)
        #cv2.imshow('img',img)
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        #cv2.imshow('gray',gray)
    ret, corners = cv2.findChessboardCorners(gray, (8,7),None)
        # print ret,corners

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    objp = np.zeros((7*8,3), np.float32)
    objp[:,:2] = np.mgrid[0:8,0:7].T.reshape(-1,2)

    #axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
    axis = np.float32([[0,0,0], [0,3,0], [3,3,0], [3,0,0],
                       [0,0,-3],[0,3,-3],[3,3,-3],[3,0,-3] ])

    if ret == True:
        cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
            
            
                # Find the rotation and translation vectors.
        rvecs, tvecs, inliers = cv2.solvePnPRansac(objp, corners, mtx, dist)

                # project 3D points to image plane
        imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
        #print imgpts
        img = draw2(img,corners,imgpts)
        
    return img
 def update(self, time, ims):
     """Update calibration (transforms, markers, and times)."""
     if time in self.times: return
     N = len(self.fridge_config.cameras.items())
     if len(ims) != N: raise Exception("ExtrinsicCalibration update() expects %d images." % N)
     transforms, markers = [], []
     for im in ims:
         all_detected = self.md.detect(im)
         calib = [(m, m_el) for m in all_detected for (m_id, m_el) in self.fridge_config.markers.items()\
                  if int(m_id)==m.id]
         noncalib = [v for v in all_detected if v.id not in map(int, self.fridge_config.markers.keys())]
         cur_transform, cur_markers = None, noncalib
         for combo in [w for v in xrange(len(calib),2,-1) for w in itc(calib,v)]:
             combo_markers, marker_els = zip(*combo)
             combo_corners = np.array([v.corners for v in combo_markers]).reshape(-1,2)
             pts = np.array([v.T_self_in_world.dot(v.points_in_self)[:3, :].T \
                             for v in marker_els]).reshape(-1,3)
             r, rv, tv = cv2.solvePnP(pts, combo_corners, CAMERA_CMAT, CAMERA_DCOEFFS)
             ip = cv2.projectPoints(pts, rv, tv, CAMERA_CMAT, CAMERA_DCOEFFS)[0].reshape(-1,2)
             perror = np.linalg.norm((ip-combo_corners)) / ip.shape[0]
             if perror < 5:
                 R = cv2.Rodrigues(rv)[0]
                 T_world_in_camera = getTransformFromRt(R, tv) # since marker positions in world coords
                 cur_markers = list(combo_markers) + noncalib
                 cur_transform = T_world_in_camera
                 break
         transforms.append(cur_transform)
         markers.append(cur_markers)
     self.transforms.append(transforms)
     self.markers.append(markers)
     self.times.append(time)
Ejemplo n.º 9
0
    def carve(self, camera, camera_pose, image_mask):

        camera_inv = camera_pose.Inverse()
        out_points = np.zeros((1, 3, 1), dtype="float32")
        cam = np.array(camera.camera_matrix)
        dist = np.array(camera.distortion_coefficients)
        for ix in range(0, self.side):
            for iy in range(0, self.side):
                for iz in range(0, self.side):
                    p = self.getCoordinates(np.array([ix, iy, iz]))
                    point = PyKDL.Frame(PyKDL.Vector(p[0], p[1], p[2]))
                    object_to_camera = camera_inv * point
                    cRvec, cTvec = transformations.KDLToCv(object_to_camera)

                    point2d, _ = cv2.projectPoints(
                        out_points, cRvec, cTvec,
                        cam,
                        dist
                    )
                    point2d = point2d.reshape((2, 1))
                    i = int(point2d[1])
                    j = int(point2d[0])
                    if i < image_mask.shape[0] and j < image_mask.shape[1] and i >= 0 and j >= 0:
                        if image_mask[i, j] < 1:
                            self._voxels[
                                ix, iy, iz] = self._voxels[ix, iy, iz] + 1

        import scipy.io
        scipy.io.savemat("provola.mat", mdict={
                         'out': self._voxels}, oned_as='row')
Ejemplo n.º 10
0
def assign_points_thermal_data(dataset_name, cam_index, thermal_cameras, K_thermal,d_thermal, cameras_top, points_3d):
    pic_index, rvec, tvec = thermal_cameras[cam_index]
    visible_point_indices = np.array(cameras_top[cam_index][2])
    print visible_point_indices.shape
    visible_points = np.reshape(points_3d[visible_point_indices,:3],(-1,1,3))
#     print visible_point_indices
    img_points, _ = cv2.projectPoints(visible_points, rvec, tvec, K_thermal, d_thermal)
    img_points = np.squeeze(img_points)
    img_points = np.array([[int(u),int(v)] for u,v in img_points])
    
#     print img_points

    valid = np.nonzero(np.array([u>=0 and v>=0 and u<640 and v<480 for u,v in img_points]))[0].astype(np.int)
    print valid.shape
    thermal_img = cv2.imread(config.DATASET_PATH + dataset_name +
                              '/data/thermal'+str(pic_index)+'.jpg')
    
#     thermal_img_vis = thermal_img[:]
#     for u,v, in img_points:
#         thermal_img_vis[v,u] = [255,0,0]
#     cv2.imshow(str(pic_index),thermal_img_vis)
#     cv2.waitKey()

    points_thermal = np.array([thermal_img[v,u] for u,v in img_points if u>=0 and v>=0 and u<640 and v<480])
#     points_thermal = np.array([[255, 255, 255] for u,v in img_points if u>=0 and v>=0 and u<640 and v<480])    
    return (visible_point_indices[valid], points_thermal)
def reprojection_error_ext(objp, imgp, cameraMatrix, distCoeffs, rvecs, tvecs):
    """
    Returns the mean absolute error, and the RMS error of the reprojection
    of 3D points "objp" on the images from a camera
    with intrinsics ("cameraMatrix", "distCoeffs") and poses ("rvecs", "tvecs").
    The original 2D points should be given by "imgp".
    
    See OpenCV's doc about the format of "cameraMatrix", "distCoeffs", "rvec" and "tvec".
    """
    
    mean_error = np.zeros((1, 2))
    square_error = np.zeros((1, 2))
    n_images = len(imgp)

    for i in xrange(n_images):
        imgp_reproj, jacob = cv2.projectPoints(
                objp[i], rvecs[i], tvecs[i], cameraMatrix, distCoeffs )
        error = imgp_reproj.reshape(-1, 2) - imgp[i]
        mean_error += abs(error).sum(axis=0) / len(imgp[i])
        square_error += (error**2).sum(axis=0) / len(imgp[i])

    mean_error = cv2.norm(mean_error / n_images)
    square_error = np.sqrt(square_error.sum() / n_images)
    
    return mean_error, square_error
Ejemplo n.º 12
0
def drawAxis(img, corners, rvecs, tvecs, mtx, dist, scale=1):
    axis = np.float32([[1, 0, 0], [0, 1, 0], [0, 0, -1]])

    axis = axis * scale

    imgpts, _jac = cv2.projectPoints(axis, np.array(rvecs), np.array(tvecs), mtx, dist)
    img = draw3DAxisLines(img, corners[0], imgpts)
Ejemplo n.º 13
0
    def get_view_points(self, angle, height):
        width = 0.3
        z_offset = -0.2

        view_points = [
            (616, 425, 1),
            (469, 350, 1),
            (169, 356, 1),
            (23,  430, 1),
        ]
        points = []
        for pt in view_points:
            pt = np.matrix(pt).T # Append a 1!

            back_projected = (np.matrix(self.c920_cam).I * pt).A1
            print ' one point', back_projected
            points.append(back_projected)


        objectPoints = map(lambda pt: np.array([pt], np.float32), points)

        rvec = angle * np.array((1.0, 0.0, 0.0), np.float32)
        tvec = np.array((0.0, 0.0, 0.0), np.float32)


        distCoeffs = np.array([0.169985, -0.401052, 0.005058, -0.00463, 0.0], np.float32)
        # distCoeffs = np.array([0.0, 0.0, 0.0, 0.0, 0.0])

        view_coordinates = []
        for i in range(4):
            img_point, _ = cv2.projectPoints(objectPoints[i], rvec, tvec, self.c920_cam, distCoeffs)
            view_coordinates.append(img_point[0][0])
        return np.float32(view_coordinates)
Ejemplo n.º 14
0
    def createExtrinsicsMatrix(self, chessboardImage, camera_matrix):
        found, corners = cv2.findChessboardCorners(chessboardImage,self.patternSize)
        if not found:
            return

        objectPoints = np.array([self.objectPoint])
        imagePoints = np.array([corners])
        imageShape = chessboardImage.shape[:2]
        _, _, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objectPoints, imagePoints, imageShape)

        objectPoints = np.asarray(self.objectPoint, np.float64)
        imagePoints = np.asarray(corners, np.float64)
        camera_matrix = np.asarray(camera_matrix, np.float64)
        dist_coefs = np.array([0, 0, 0, 0], np.float64)

        found, rvecs_new, tvecs_new = cv2.solvePnP(objectPoints, imagePoints, camera_matrix, dist_coefs)
        rotationMatrix = cv2.Rodrigues(rvecs_new)[0]

        img = cv2.projectPoints(np.array([np.array([0,0,0], np.float64)]), rvecs_new, tvecs_new, camera_matrix, dist_coefs)
        print img

        extrinsicMatrix = np.zeros((3,4))
        extrinsicMatrix[:,:-1] = rotationMatrix
        extrinsicMatrix[:,-1:] = tvecs_new

        return extrinsicMatrix
Ejemplo n.º 15
0
    def _map_monocular(self,p):
        if '3d' not in p['method']:
            return None

        gaze_point =  np.array(p['circle_3d']['normal'] ) * self.gaze_distance  + np.array( p['sphere']['center'] )

        image_point, _  =  cv2.projectPoints( np.array([gaze_point]) , self.rotation_vector, self.translation_vector , self.camera_matrix , self.dist_coefs )
        image_point = image_point.reshape(-1,2)
        image_point = normalize( image_point[0], self.world_frame_size , flip_y = True)

        eye_center = self.toWorld(p['sphere']['center'])
        gaze_3d = self.toWorld(gaze_point)
        normal_3d = np.dot( self.rotation_matrix, np.array( p['circle_3d']['normal'] ) )

        g = {   'norm_pos':image_point,
                'eye_center_3d':eye_center.tolist(),
                'gaze_normal_3d':normal_3d.tolist(),
                'gaze_point_3d':gaze_3d.tolist(),
                'confidence':p['confidence'],
                'timestamp':p['timestamp'],
                'base_data':[p]}

        if self.visualizer.window:
            self.gaze_pts_debug.append( gaze_3d )
            self.sphere['center'] = eye_center #eye camera coordinates
            self.sphere['radius'] = p['sphere']['radius']
        return g
Ejemplo n.º 16
0
def drawAxisSystem(img, cameraMatrix, distCoeffs, rvec, tvec, scale=4.):
    """
    Draws an axis-system on image "img" of which the camera has
    the intrinsics ("cameraMatrix", "distCoeffs") and pose ("rvec", "tvec").
    The scale of the axis-system is set by "scale".
    
    See OpenCV's doc about the format of "cameraMatrix", "distCoeffs", "rvec" and "tvec".
    """
    
    # Define world object-points
    axis_system_objp = np.array([ [0., 0., 0.],      # Origin (black)
                                  [1., 0., 0.],      # X-axis (red)
                                  [0., 1., 0.],      # Y-axis (green)
                                  [0., 0., 1.] ])    # Z-axis (blue)
    axis_system_objp *= scale
    
    # Project the object-points on the camera
    imgp_reproj, jacob = cv2.projectPoints(
            axis_system_objp, rvec, tvec, cameraMatrix, distCoeffs )
    origin, xAxis, yAxis, zAxis = np.rint(imgp_reproj.reshape(-1, 2)).astype(np.int32)    # round to nearest int
    
    # If projected origin lays out of the image, don't draw axis-system
    if not (0 <= origin[0] < img.shape[1] and 0 <= origin[1] < img.shape[0]):
        return img
    
    # Draw the axis-system
    line(img, origin, xAxis, rgb(255,0,0), thickness=2, lineType=cv2.CV_AA)
    line(img, origin, yAxis, rgb(0,255,0), thickness=2, lineType=cv2.CV_AA)
    line(img, origin, zAxis, rgb(0,0,255), thickness=2, lineType=cv2.CV_AA)
    circle(img, origin, 4, rgb(0,0,0), thickness=-1)    # filled circle, radius 4
    circle(img, origin, 5, rgb(255,255,255), thickness=2)    # white 'O', radius 5
    
    return img
Ejemplo n.º 17
0
def undistort(mtx, dist, objpoints, imgpoints, rvecs, tvecs, img):
    #read in an image of choice
    #usefule for the reading in and segmenting of board to make hough lines easier
    h, w = img.shape[:2]
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))

        # undistort
    dst = cv2.undistort(img, mtx, dist, None, newcameramtx)

    # crop the image
    x, y, w, h = roi
    dst = dst[y:y + h, x:x + w]
    cv2.imwrite('calibresult.png', dst)

    # undistort
    mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w, h), 5)
    dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)

    # crop the image
    x, y, w, h = roi
    dst = dst[y:y + h, x:x + w]
    cv2.imwrite('calibresult.png', dst)

    mean_error = 0
    for i in xrange(len(objpoints)):
        imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
        error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
        mean_error += error

    print "total error: ", mean_error / len(objpoints)
Ejemplo n.º 18
0
def getProjectedPoints(points_3D, Rvect, Tvect, camera, distVect):
    resultPoints = cv2.projectPoints(points_3D, Rvect, Tvect, camera, distVect)
    resultPoints2 = [[resultPoints[0][0][0][0], resultPoints[0][0][0][1]],
                 [resultPoints[0][1][0][0], resultPoints[0][1][0][1]],
                 [resultPoints[0][2][0][0], resultPoints[0][2][0][1]],
                 [resultPoints[0][3][0][0], resultPoints[0][3][0][1]]]
    return resultPoints2
Ejemplo n.º 19
0
def getCameraCalibration(image, objp):
    global criteria, mtx, dist

    axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
    #height, width, depth = imgorg.shape
    gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
    ret = False
    # Find the chess board corners
    ret, corners = cv2.findChessboardCorners(gray, (7,7), None)

    h,w =image.shape[:2]

    # If found, add object points, image points (after refining them)
    if ret == True:
        #objpoints.append(objp)

        cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
        #imgpoints.append(corners)

        rvecs,tvecs,inliers = cv2.solvePnPRansac(objp, corners, mtx, dist)

        imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
        img = draw(image, corners, imgpts)
        
        return img

    return image
def projectPoints(radar_data, args):
    """ Projects radar points into the camera's frame
        Args: radar_data, the output from loadRDR
              args, the output from parse_args
        Returns: A new numpy array with columns:
                    [dist, lat_dist, z(guess), l, w, rcs, rel_spd, id, x, y]
    """
    params = args["params"]
    cam_num = args["cam_num"]
    cam = params["cam"][cam_num - 1]

    radar_data[:, :3] = calibrateRadarPts(radar_data[:, :3], params["radar"])

    pts = radar_data[:, :3]
    pts_wrt_cam = pts + cam["displacement_from_l_to_c_in_lidar_frame"]
    pts_wrt_cam = np.dot(R_to_c_from_l(cam), pts_wrt_cam.transpose())

    (pix, J) = cv2.projectPoints(
        pts_wrt_cam.transpose(), np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0]), cam["KK"], cam["distort"]
    )
    pix = pix.transpose()
    pix = np.around(pix[:, 0, :])
    pix = pix.astype(np.int32)

    # Filter the points to remove points that exist outside the video frame
    # This is not a problem for the radar because the cameras see everything
    # the radar does
    # dist_sqr = np.sum(pts_wrt_cam[0:3, :] ** 2, axis = 0)
    # mask = (pix[0,:] > 0) & (pix[1,:] > 0) & \
    #     (pix[0,:] < 1279) & (pix[1,:] < 959) & \
    #     (pts_wrt_cam[2,:] > 0) & (dist_sqr > 3)

    # Outputs [dist, lat_dist, z(guess), l, w, rcs, rel_spd, id, x, y]
    radar_data_projected = np.hstack((radar_data, pix.transpose()))
    return radar_data_projected
Ejemplo n.º 21
0
    def DrawOriginAxes(self, cvimage, rvec, tvec):
        if self.camerainfo is not None:
            self.pointsAxes       = N.zeros([4, 3], dtype=N.float32) #cv.CreateMat(4, 3, cv.CV_32FC1)
            self.pointsAxes[0][:] = 0.0               # (0,0,0)T origin point,    pt[0] 
            self.pointsAxes[1][0] = self.checker_size # (1,0,0)T point on x-axis, pt[1]
            self.pointsAxes[2][1] = self.checker_size # (0,1,0)T point on y-axis, pt[2]
            self.pointsAxes[3][2] = self.checker_size # (0,0,1)T point on z-axis, pt[3]
            widthAxisLine = 3
            
            (self.pointsAxesProjected,jacobian) = cv2.projectPoints(self.pointsAxes,
                                                                    rvec,
                                                                    tvec,
                                                                    self.K,
                                                                    self.D
                                                                    )
                
            # origin point
            pt1 = tuple(self.pointsAxesProjected[0][0])

            # draw x-axis
            pt2 = tuple(self.pointsAxesProjected[1][0])
            cv2.line(cvimage, pt1, pt2, cv.CV_RGB(self.colorMax,0,0), widthAxisLine)

            # draw y-axis
            pt2 = tuple(self.pointsAxesProjected[2][0])
            cv2.line(cvimage, pt1, pt2, cv.CV_RGB(0,self.colorMax,0), widthAxisLine)
            
            # draw z-axis
            pt2 = tuple(self.pointsAxesProjected[3][0])
            cv2.line(cvimage, pt1, pt2, cv.CV_RGB(0,0,self.colorMax), widthAxisLine)
Ejemplo n.º 22
0
def projectPoints(mbly_data, args, T, R):
    """ Projects mobileye points into the camera's frame
        Args: mbly_data, the output from loadMblyWindow
              args, the output from parse_args
    """
    params = args['params']
    cam_num = args['cam_num']
    cam = params['cam'][cam_num]

    # Move points to the lidar FoR
    pts_wrt_lidar = T_from_mbly_to_lidar(mbly_data, T, R)

    # Move the points to the cam FoR
    pts_wrt_cam = pts_wrt_lidar +\
      cam['displacement_from_l_to_c_in_lidar_frame']
    pts_wrt_cam = np.dot(R_to_c_from_l(cam), pts_wrt_cam.transpose())

    # Project the points into the camera space
    (pix, J) = cv2.projectPoints(pts_wrt_cam.transpose(),
        np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0]),
        cam['KK'], cam['distort'])
    pix = pix.transpose()
    pix = np.around(pix[:, 0, :])
    pix = pix.astype(np.int32)

    mbly_data_projected = np.hstack((mbly_data, pix.transpose()))
    return mbly_data_projected
Ejemplo n.º 23
0
    def draw3dCoordAxis(self, img=None, thickness=8):
        '''
        draw the 3d coordinate axes into given image
        if image == False:
            create an empty image
        '''
        if img is None:
            img = self.img
        elif img is False:
            img = np.zeros(shape=self.img.shape, dtype=self.img.dtype)
        else:
            img = imread(img)
        # project 3D points to image plane:
        # self.opts['obj_width_mm'], self.opts['obj_height_mm']
        w, h = self.opts['new_size']
        axis = np.float32([[0.5 * w, 0.5 * h, 0],
                           [w, 0.5 * h, 0],
                           [0.5 * w, h, 0],
                           [0.5 * w, 0.5 * h, -0.5 * w]])
        t, r = self.pose()
        imgpts = cv2.projectPoints(axis, r, t,
                                   self.opts['cameraMatrix'],
                                   self.opts['distCoeffs'])[0]

        mx = int(img.max())
        origin = tuple(imgpts[0].ravel())
        cv2.line(img, origin, tuple(imgpts[1].ravel()), (0, 0, mx), thickness)
        cv2.line(img, origin, tuple(imgpts[2].ravel()), (0, mx, 0), thickness)
        cv2.line(
            img, origin, tuple(imgpts[3].ravel()), (mx, 0, 0), thickness * 2)
        return img
Ejemplo n.º 24
0
    def render_cube(self, image, points):
        # load calibration data
        with np.load('webcam_calibration_ouput.npz') as X:
            mtx, dist, _, _ = [X[i] for i in ('mtx', 'dist', 'rvecs', 'tvecs')]

        # set up criteria, image, points and axis
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        imgp = np.array(points, dtype="float32")

        objp = np.array([[0., 0., 0.], [1., 0., 0.],
                         [1., 1., 0.], [0., 1., 0.]], dtype="float32")

        axis = np.float32([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0],
                           [0, 0, -1], [0, 1, -1], [1, 1, -1], [1, 0, -1]])

        # project 3D points to image plane
        cv2.cornerSubPix(gray, imgp, (11, 11), (-1, -1), criteria)
        rvecs, tvecs, _ = cv2.solvePnPRansac(objp, imgp, mtx, dist)
        imgpts, _ = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)

        # draw cube
        self._draw_cube(image, imgpts)

        return image
Ejemplo n.º 25
0
def PointsMask(pos_wrt_camera, Camera):
    if False:#'distort' in Camera:
      (vpix, J) = projectPoints(pts_wrt_camera.transpose(), np.array([0.0,0.0,0.0]), np.array([0.0,0.0,0.0]), Camera['KK'], Camera['distort'])
    else:
      vpix = dot(Camera['KK'], divide(pos_wrt_camera, pos_wrt_camera[2,:]))
    vpix = around(vpix).astype(np.int32)
    return vpix
Ejemplo n.º 26
0
def plot_circle_centers(board_directories, save_path, world_coords, image_coords, rvec, tvec, cam_mat, dist_coeffs):
    
    start = 0
    end = 44
    for directory in board_directories:
        img = cv2.imread(os.path.join(directory, 'cam1_frame_1.bmp'), 1)
        dist_world_coords = world_coords[start:end, :]
        
        if start == 0:
            dist = 50.0
        elif start == 44:
            dist = 100.0
        elif start == 88:
            dist = 450.0

        screen_edges = np.array([[-121/2.0, 68/2.0, dist],
                                 [121/2.0, 68/2.0, dist],
                                 [-121/2.0, -68/2.0, dist],
                                 [121/2.0, -68/2.0, dist]])
        
        dist_world_coords = np.concatenate((dist_world_coords, screen_edges))
        camera_coords, jac = cv2.projectPoints(objectPoints=dist_world_coords,rvec=rvec,tvec=tvec,cameraMatrix=cam_mat,distCoeffs=dist_coeffs)
        camera_coords = camera_coords.squeeze()
        for coords in camera_coords:
            cv2.circle(img, tuple(coords), 3, (0,0,255), -1)
        for coords in image_coords[start:end]:
            cv2.circle(img, tuple(coords), 5, (180,255,0), 2)
        cv2.imwrite(os.path.join(save_path, 'reprojection_'+str(int(dist))+'.png'), img)
        
        start += 44
        end += 44
Ejemplo n.º 27
0
def construct_test_image( az_rot, pitch_rot, pos_x, pos_y, pos_z ):
	projected = {}
	rectangles = []
	y_axis = np.array([0,1,0])
	az_rot_matrix = rotation_matrix(y_axis,az_rot)
	x_axis = np.array([1,0,0])
	el_rot_matrix = rotation_matrix(x_axis,pitch_rot)

	sum_rot_matrix = np.dot(el_rot_matrix,az_rot_matrix)
	
	for k, a in test_locs.iteritems():
		p = cv2.projectPoints(np.array([a + [-pos_x,-pos_y,-pos_z]]), sum_rot_matrix, np.float64([0,0,0]), cameraMatrix, distCoeff)[0][0][0]
		if ( 0 <= p[0] < 319 ) and ( 0 <= p[1] < 239 ):
			projected[k] = p

	if ('ml-ul' in projected) and ('ml-ll' in projected) and ('ml-ur' in projected) and ('ml-lr' in projected):
		rectangles.append( get_sides( projected['ml-ul'], projected['ml-ll'], projected['ml-ur'], projected['ml-lr'] ) )

	if ('mr-ul' in projected) and ('mr-ll' in projected) and ('mr-ur' in projected) and ('mr-lr' in projected):
		rectangles.append( get_sides( projected['mr-ul'], projected['mr-ll'], projected['mr-ur'], projected['mr-lr'] ) )

 	if ('bt-ul' in projected) and ('bt-ll' in projected) and ('bt-ur' in projected) and ('bt-lr' in projected):
		rectangles.append( get_sides( projected['bt-ul'], projected['bt-ll'], projected['bt-ur'], projected['bt-lr'] ) )

 	if ('tp-ul' in projected) and ('tp-ll' in projected) and ('tp-ur' in projected) and ('tp-lr' in projected):
		rectangles.append( get_sides( projected['tp-ul'], projected['tp-ll'], projected['tp-ur'], projected['tp-lr'] ) )

	return rectangles
Ejemplo n.º 28
0
    def update(self,frame,events):

        gaze_pts = []
        for p in events['pupil_positions']:
            if p['method'] == '3d c++' and p['confidence'] > self.g_pool.pupil_confidence_threshold:

                gaze_point =  np.array(p['circle_3d']['normal'] ) * self.gaze_distance  + np.array( p['sphere']['center'] )

                image_point, _  =  cv2.projectPoints( np.array([gaze_point]) , self.rotation_vector, self.translation_vector , self.camera_matrix , self.dist_coefs )
                image_point = image_point.reshape(-1,2)
                image_point = normalize( image_point[0], (frame.width, frame.height) , flip_y = True)

                eye_center = self.toWorld(p['sphere']['center'])
                gaze_3d = self.toWorld(gaze_point)
                normal_3d = np.dot( self.rotation_matrix, np.array( p['circle_3d']['normal'] ) )

                gaze_pts.append({   'norm_pos':image_point,
                                    'eye_center_3d':eye_center.tolist(),
                                    'gaze_normal_3d':normal_3d.tolist(),
                                    'gaze_point_3d':gaze_3d.tolist(),
                                    'confidence':p['confidence'],
                                    'timestamp':p['timestamp'],
                                    'base':[p]})

                if self.visualizer.window:
                    self.gaze_pts_debug.append( gaze_3d )
                    self.sphere['center'] = eye_center #eye camera coordinates
                    self.sphere['radius'] = p['sphere']['radius']

        events['gaze_positions'] = gaze_pts
Ejemplo n.º 29
0
def project_xy(xy_coords, pvec):

    # get cubic polynomial coefficients given
    #
    #  f(0) = 0, f'(0) = alpha
    #  f(1) = 0, f'(1) = beta

    alpha, beta = tuple(pvec[CUBIC_IDX])

    poly = np.array([
        alpha + beta,
        -2*alpha - beta,
        alpha,
        0])

    xy_coords = xy_coords.reshape((-1, 2))
    z_coords = np.polyval(poly, xy_coords[:, 0])

    objpoints = np.hstack((xy_coords, z_coords.reshape((-1, 1))))

    image_points, _ = cv2.projectPoints(objpoints,
                                        pvec[RVEC_IDX],
                                        pvec[TVEC_IDX],
                                        K, np.zeros(5))

    return image_points
    def on_frame(self, vis):
        match = self.match_frames()
        if match is None:
            return
        w, h = getsize(self.frame)
        p0, p1, H = match
        for (x0, y0), (x1, y1) in zip(np.int32(p0), np.int32(p1)):
            cv2.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
        x0, y0, x1, y1 = self.ref_rect
        corners0 = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
        img_corners = cv2.perspectiveTransform(corners0.reshape(1, -1, 2), H)
        cv2.polylines(vis, [np.int32(img_corners)], True, (255, 255, 255), 2)

        corners3d = np.hstack([corners0, np.zeros((4, 1), np.float32)])
        fx = 0.9
        K = np.float64([[fx*w, 0, 0.5*(w-1)],
                        [0, fx*w, 0.5*(h-1)],
                        [0.0,0.0,      1.0]])
        dist_coef = np.zeros(4)
        ret, rvec, tvec = cv2.solvePnP(corners3d, img_corners, K, dist_coef)
        verts = ar_verts * [(x1-x0), (y1-y0), -(x1-x0)*0.3] + (x0, y0, 0)
        verts = cv2.projectPoints(verts, rvec, tvec, K, dist_coef)[0].reshape(-1, 2)
        for i, j in ar_edges:
            (x0, y0), (x1, y1) = verts[i], verts[j]
            cv2.line(vis, (int(x0), int(y0)), (int(x1), int(y1)), (255, 255, 0), 2)
Ejemplo n.º 31
0
undistorted = undistorted[y:y + h, x:x + w]
cv2.imwrite('./generated/resultl.jpg', undistorted)

imgr = cv2.imread('./images/chessboard-5r.jpg')
h, w = imgr.shape[:2]
newcameramtxr, roir = cv2.getOptimalNewCameraMatrix(mtxr, distr, (w, h), 1,
                                                    (w, h))

undistorted = cv2.undistort(imgr, mtxr, distr, None, newcameramtxr)
x, y, w, h = roir
undistorted = undistorted[y:y + h, x:x + w]
cv2.imwrite('./generated/resultr.jpg', undistorted)

mean_error = 0
for i in range(len(objpoints)):
    imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecsl[i], tvecsl[i], mtxl,
                                      distl)
    error = cv2.norm(imgpointsl[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
    mean_error += error
print("total error(left): {}".format(mean_error / len(objpoints)))

mean_error = 0
for i in range(len(objpoints)):
    imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecsr[i], tvecsr[i], mtxr,
                                      distr)
    error = cv2.norm(imgpointsr[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
    mean_error += error
print("total error(right): {}".format(mean_error / len(objpoints)))

# Stereo stuff
error, mtxl, distl, mtxr, distr, R, T, E, F = cv2.stereoCalibrate(
    objpoints,
Ejemplo n.º 32
0
def get3D(c1, c2, mask1, mask2, glass, _img1=None, _img2=None, drawCentroid=False, drawDimensions=False):
	
	img1 = copy.deepcopy(_img1)
	img2 = copy.deepcopy(_img2)

	centr1 = getCentroid(c1, mask1)
	centr2 = getCentroid(c2, mask2)
	if centr1 is not None and centr2 is not None:
		glass.centroid = triangulate(c1, c2, centr1, centr2)[:-1].reshape(-1,3)

		# Draw centroid
		if drawCentroid:
			
			# Draw 2D centroid of tracking mask
			#cv2.circle(img1, tuple(centr1.astype(int)), 10, (0,128,0), -1)
			#cv2.circle(img2, tuple(centr2.astype(int)), 10, (0,128,0), -1)

			# Draw 3D centroid projected to image
			point1, _ = cv2.projectPoints(glass.centroid, c1.extrinsic['rgb']['rvec'], c1.extrinsic['rgb']['tvec'], c1.intrinsic['rgb'], c1.distCoeffs)
			point2, _ = cv2.projectPoints(glass.centroid, c2.extrinsic['rgb']['rvec'], c2.extrinsic['rgb']['tvec'], c2.intrinsic['rgb'], c2.distCoeffs)

			point1 = point1.squeeze().astype(int)
			point2 = point2.squeeze().astype(int)
			
			cv2.circle(img1, tuple(point1), 6, (128,0,0), -1)
			cv2.circle(img2, tuple(point2), 6, (128,0,0), -1)

		# Draw height and width lines
		if drawDimensions:

			# Get top/bottom points
			top = copy.deepcopy(glass.centroid)
			bottom = copy.deepcopy(glass.centroid)
			top[0,2] += glass.h/2	
			bottom[0,2] -= glass.h/2
			topC1, _ 	= cv2.projectPoints(top, c1.extrinsic['rgb']['rvec'], c1.extrinsic['rgb']['tvec'], c1.intrinsic['rgb'], c1.distCoeffs)
			bottomC1, _ = cv2.projectPoints(bottom, c1.extrinsic['rgb']['rvec'], c1.extrinsic['rgb']['tvec'], c1.intrinsic['rgb'], c1.distCoeffs)
			topC2, _ 	= cv2.projectPoints(top, c2.extrinsic['rgb']['rvec'], c1.extrinsic['rgb']['tvec'], c2.intrinsic['rgb'], c2.distCoeffs)
			bottomC2, _ = cv2.projectPoints(bottom, c2.extrinsic['rgb']['rvec'], c2.extrinsic['rgb']['tvec'], c2.intrinsic['rgb'], c2.distCoeffs)
			topC1 = topC1.squeeze().astype(int)
			bottomC1 = bottomC1.squeeze().astype(int)
			topC2 = topC2.squeeze().astype(int)
			bottomC2 = bottomC2.squeeze().astype(int)

			# Get rigth/left points
			right = copy.deepcopy(glass.centroid)
			left = copy.deepcopy(glass.centroid)
			right[0,0] += glass.w/2
			left[0,0] -= glass.w/2
			rightC1, _ = cv2.projectPoints(right, c1.extrinsic['rgb']['rvec'], c1.extrinsic['rgb']['tvec'], c1.intrinsic['rgb'], c1.distCoeffs)
			leftC1, _ = cv2.projectPoints(left, c1.extrinsic['rgb']['rvec'], c1.extrinsic['rgb']['tvec'], c1.intrinsic['rgb'], c1.distCoeffs)
			rightC2, _ = cv2.projectPoints(right, c2.extrinsic['rgb']['rvec'], c2.extrinsic['rgb']['tvec'], c2.intrinsic['rgb'], c2.distCoeffs)
			leftC2, _ = cv2.projectPoints(left, c2.extrinsic['rgb']['rvec'], c2.extrinsic['rgb']['tvec'], c2.intrinsic['rgb'], c2.distCoeffs)
			rightC1 = rightC1.squeeze().astype(int)
			leftC1 = leftC1.squeeze().astype(int)
			rightC2 = rightC2.squeeze().astype(int)
			leftC2 = leftC2.squeeze().astype(int)

			cv2.line(img1, tuple(topC1), tuple(bottomC1), (128,0,0), 2)
			cv2.line(img1, tuple(rightC1), tuple(leftC1), (128,0,0), 2)
			cv2.line(img2, tuple(topC2), tuple(bottomC2), (128,0,0), 2)
			cv2.line(img2, tuple(rightC2), tuple(leftC2), (128,0,0), 2)


	return glass, img1, img2
 def distort(points):
     points = points.reshape((-1, 2))
     points_3d = numpy.zeros((len(points), 3))
     points_3d[:, 0:2] = points
     return cv2.projectPoints(points_3d, (0, 0, 0), (0, 0, 0), cameraMatrix,
                              distCoeffs)[0]
Ejemplo n.º 34
0
    def process(self, curr_frame):
        """
        Track features from previous frame to current frame, if a parallax exceding a threshold has been
        reached, determine change in pose between previous keyframe and current frame and update the state 
        accordingly. Then triangulate the tracked features in current frame and previous key to achieve 3D
        observations of tracked features. 
        """

        # Preprocess curr frame
        curr_frame = self.preprocess(curr_frame)

        # First iteration
        if self.prev_frame is None:
            # Detect first features and create first keyframe
            curr_points = self.detect_features(curr_frame)
            self.keyframe_points = curr_points
        else:
            # Track features detected in first image to second image
            curr_points, status,_ = cv.calcOpticalFlowPyrLK(self.prev_frame, curr_frame, self.prev_points, None, **self.lk_params)

            if (np.count_nonzero(status) < self.points_thresh):
                # Detect new features (or add them on to existing ones??)
                print('Detecting New Features, Ran Out', np.count_nonzero(status))
                curr_points = self.detect_features(curr_frame)
                self.keyframe_points = curr_points
            else: 
                # Only keep the points in the images tracked succesfully from the first to the second image
                # Store in seperate variable to preserve shapes of prev, curr, keyframe points i.e  (n, 1, 2) instead of (n,2)
                points_prev = self.prev_points[status == 1]
                points_curr = curr_points[status == 1]
                points_key = self.keyframe_points[status == 1] 

                # Update prev, curr, and keyframe points, reshaped
                self.prev_points = points_prev.reshape((points_prev.shape[0], 1, 2))
                curr_points = points_curr.reshape((points_curr.shape[0], 1, 2))
                self.keyframe_points = points_key.reshape((points_key.shape[0], 1, 2))   

                # Calculate the mean distance between all corresponding points in each image 
                # This is a rough estimation of how much motion has been captured by the frame sequence
                self.parallax = self.parallax + np.mean(np.linalg.norm(points_prev-points_curr, axis=1))         

                #If we have approximately moved enough for an accurate pose estimate
                if (self.parallax > self.parallax_thresh):

                    # Reset self.parallax
                    self.parallax = 0.0    

                    # Recover change in pose between last keyframe and curr points
                    E,_ = cv.findEssentialMat(points_key, points_curr, self. K)
                    _, R, t,_ = cv.recoverPose(E, points_key, points_curr, self.K)

                    if self.R_net is None and self.t_net is None:
                        self.R_net = np.eye(3)
                        self.t_net = np.zeros((3,1))
                    
                    # Store values of self.state before updating it (to calculate reprojection of 3D points)
                    R_net_prev = self.R_net.copy()
                    t_net_prev = self.t_net.copy()

                    # Projection matrix from origin to last keyframe
                    P_key = np.matmul(self.K, np.hstack((self.R_net, self.t_net)))

                    # Update self.state using only pose estimation from essential matrix
                    self.t_net = self.t_net + np.dot(self.R_net, t)
                    self.R_net = np.matmul(R, self.R_net)
                    self.state = self.t_net.flatten()
                    self.state_data.append(self.state.copy())

                    # Projection matrix from last keyframe to curr frame
                    P = np.matmul(self.K, np.hstack((self.R_net, self.t_net)))

                    # Triangulate from keyframe points to curr points
                    points_hom = cv.triangulatePoints(P_key, P, points_key.T.astype(float), points_curr.T.astype(float))
                    
                    # Convert homogenous coordinates to 3D coordinates
                    points_hom[0] = points_hom[0]/points_hom[3]
                    points_hom[1] = points_hom[1]/points_hom[3]
                    points_hom[2] = points_hom[2]/points_hom[3]
                    points_3d = points_hom[:3].T

                    # Check reprojection error of 3D points onto keyframe
                    R_vec_prev,_ = cv.Rodrigues(R_net_prev)
                    points_key_reproj,_ = cv.projectPoints(points_3d, R_vec_prev, t_net_prev, self.K, np.zeros((4,)) )
                    points_key_reproj = points_key_reproj.reshape((points_key_reproj.shape[0], 2))
                    print("Reprojection error of triangulated 3D points onto last keyframe: ", np.linalg.norm(points_key-points_key_reproj))

                    # Plot 3D points at every keyframe creation
                    # ax.scatter(points_3d.T[0], points_3d.T[1], points_3d.T[2])
                    # plt.pause(10)

                    # Solve for R, t between keyframe and curr frame using PNP
                    # _,R_vec, t, inliers = cv.solvePnPRansac(points_3d, points_curr, self.K, np.zeros((4,)))
                    
                    # Make keyframe newly detected points
                    curr_points = self.detect_features(curr_frame)
                    self.keyframe_points = curr_points

        # Draw features on orig image
        for p in curr_points:
            x,y = p.ravel()
            cv.circle(curr_frame, (x,y), 3,  255, -1)

        # Set prev values to curr values
        self.prev_frame = curr_frame
        self.prev_points = curr_points

        return curr_frame
Ejemplo n.º 35
0
def main():
    files = glob.glob('./Calibration_Imgs/*.jpg')
    # given, reference block size is 21.5 units
    # note XY refer to horizontal and vertical of image (not to the axis marked on the reference image)
    wCordXY = np.array([[21.5, 21.5],
                        [21.5,21.5*9],
                        [21.5*6, 21.5*9],
                        [21.5*6,21.5]], dtype= np.float32)

    allImgs= []
    allImgOrg = []
    for File in files:
        img = cv2.imread(File)
        imgOrg =img.copy()
        allImgOrg.append(imgOrg)
        allImgs.append(img)

    allH, all_corn, allcr = findHomo(allImgs, wCordXY)
    vmat, b = VMat(allH)
    A = find_K(b)
    # allRT = getRT(allH, A)
    K = [0,0]
    print('Initial estimate for Camera Intrinsic Matrix -')
    print(A)
    print('')
    param = np.array([A[0,0],0.0,A[0,2],A[1,1],A[1,2],0.0,0.0])
    allW_xy = []
    for i in range(6):
        for j in range(9):
            allW_xy.append(np.array([21.5*(i+1), 21.5*(j+1),0,1]))
    allW_xy = np.array(allW_xy, dtype =np.float64).reshape(54,4)

    res = least_squares(fun, x0=param, method='lm', args=(all_corn, allW_xy, allH))
    print('Minimization convergance status= ', res.success)
    print('')
    A_opt = np.zeros([3,3], dtype= np.float)
    A_opt[0,0] = res.x[0]
    A_opt[0,1] = res.x[1]
    A_opt[0,2] = res.x[2]
    A_opt[1,1] = res.x[3]
    A_opt[1,2] = res.x[4]
    A_opt[2,2] = 1
    k_opt = [res.x[5], res.x[6]]
    print('Optimised Distortion Coefficients -')
    print(k_opt)
    print('')
    print('Optimized Camera Intrinsic Matrix -')
    print(A_opt)
    print('')
    dist_opt = np.array([k_opt[0],k_opt[1],0,0,0])
    allRT_opt = getRT(allH, A_opt)
    err= []
    allW_xy1 = np.array(allW_xy[:,:3], dtype =np.float32)

    for i in xrange(len(allH)):
        R = allRT_opt[i][:,0:3]
        rvec,_ = cv2.Rodrigues(R)
        t = allRT_opt[i][:,3]
        imgpoints2, _ = cv2.projectPoints(allW_xy1, rvec, t, A_opt,dist_opt)
        diff = allcr[i] - imgpoints2
        error = np.sum(np.linalg.norm(diff,axis=1))/len(imgpoints2)
        err.append(error)
    err =np.array(err)
    rejErr = np.mean(err)
    print('Re-projection error -')
    print(rejErr)
    print('')
    print('Generating undistorted images')
    count =1
    for image in allImgOrg:
        undistImg = cv2.undistort(image,A_opt,dist_opt)
        cv2.imwrite('./undist_output/'+str(count)+'_undistort.png',undistImg)
        count+=1
Ejemplo n.º 36
0
    img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0, 255, 0), 5)
    img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0, 0, 255), 5)
    return img


criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((6 * 7, 3), np.float32)
objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)

axis = np.float32([[3, 0, 0], [0, 3, 0], [0, 0, -3]]).reshape(-1, 3)

for fname in glob.glob('calib_images/left03.jpg'):
    img = cv2.imread(fname)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)

    if ret == True:
        corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                    criteria)

        # Find the rotation and translation vectors.
        _, rvecs, tvecs, inliers = cv2.solvePnPRansac(objp, corners2, mtx,
                                                      dist)
        # project 3D points to image plane
        imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)

        img = draw(img, corners2, imgpts)
        cv2.imshow('img', img)
        k = cv2.waitKey(0) & 0xff

cv2.destroyAllWindows()
Ejemplo n.º 37
0
    def triangulate(self, cal, det1, det2):
        """
        Assuming, det1 matches det2, we determine 3d-coordinates of each
        detection and measure the reprojection error.

        References:
            http://answers.opencv.org/question/117141
            https://gist.github.com/royshil/7087bc2560c581d443bc
            https://stackoverflow.com/a/29820184/887074

        Doctest:
            >>> # Rows are detections in img1, cols are detections in img2
            >>> from viame.processes.camtrawl.algos import *
            >>> from viame.processes.camtrawl.demo import *
            >>> detections1, detections2, cal = demodata_detections(target_step='triangulate', target_frame_num=6)
            >>> det1, det2 = detections1[0], detections2[0]
            >>> self = FishStereoMeasurments()
            >>> assignment, assign_data, cand_errors = self.triangulate(cal, det1, det2)
        """
        logger.debug('triangulate')
        _debug = 0
        if _debug:
            # Use 4 corners and center to ensure matrix math is good
            # (hard to debug when ndims == npts, so make npts >> ndims)
            pts1 = np.vstack([det1.box_points(), det1.oriented_bbox().center])
            pts2 = np.vstack([det2.box_points(), det2.oriented_bbox().center])
        else:
            # Use only the corners of the bbox
            pts1 = det1.box_points()[[0, 2]]
            pts2 = det2.box_points()[[0, 2]]

        # Move into opencv point format (num x 1 x dim)
        pts1_cv = pts1[:, None, :]
        pts2_cv = pts2[:, None, :]

        # Grab camera parameters
        K1, K2 = cal.intrinsic_matrices()
        kc1, kc2 = cal.distortions()
        rvec1, tvec1, rvec2, tvec2 = cal.extrinsic_vecs()

        # Make extrincic matrices
        R1 = cv2.Rodrigues(rvec1)[0]
        R2 = cv2.Rodrigues(rvec2)[0]
        T1 = tvec1[:, None]
        T2 = tvec2[:, None]
        RT1 = np.hstack([R1, T1])
        RT2 = np.hstack([R2, T2])

        # Undistort points
        # This puts points in "normalized camera coordinates" making them
        # independent of the intrinsic parameters. Moving to world coordinates
        # can now be done using only the RT transform.
        unpts1_cv = cv2.undistortPoints(pts1_cv, K1, kc1)
        unpts2_cv = cv2.undistortPoints(pts2_cv, K2, kc2)

        # note: trinagulatePoints docs say that it wants a 3x4 projection
        # matrix (ie K.dot(RT)), but we only need to use the RT extrinsic
        # matrix because the undistorted points already account for the K
        # intrinsic matrix.
        world_pts_homog = cv2.triangulatePoints(RT1, RT2, unpts1_cv, unpts2_cv)
        world_pts = from_homog(world_pts_homog)

        # Compute distance between 3D bounding box points
        if _debug:
            corner1, corner2 = world_pts.T[[0, 2]]
        else:
            corner1, corner2 = world_pts.T

        # Length is in milimeters
        fishlen = np.linalg.norm(corner1 - corner2)

        # Reproject points
        world_pts_cv = world_pts.T[:, None, :]
        proj_pts1_cv = cv2.projectPoints(world_pts_cv, rvec1, tvec1, K1,
                                         kc1)[0]
        proj_pts2_cv = cv2.projectPoints(world_pts_cv, rvec2, tvec2, K2,
                                         kc2)[0]

        # Check error
        err1 = ((proj_pts1_cv - pts1_cv)[:, 0, :]**2).sum(axis=1)
        err2 = ((proj_pts2_cv - pts2_cv)[:, 0, :]**2).sum(axis=1)
        errors = np.hstack([err1, err2])

        # Get 3d points in each camera's reference frame
        # Note RT1 is the identity and RT are 3x4, so no need for `from_homog`
        # Return points in with shape (N,3)
        pts1_3d = RT1.dot(to_homog(world_pts)).T
        pts2_3d = RT2.dot(to_homog(world_pts)).T
        return pts1_3d, pts2_3d, errors, fishlen
    def __init__(self):
        # Variable Initialization
        self.grid_size = 5  # grid_size and grid_center are related
        self.grid_center = 2
        self.bridge = CvBridge()  # ROS/Gazebo to cv2 converter
        self.object_points_index = np.zeros(
            (self.grid_size * self.grid_size, 3),
            np.float32)  # Form object points
        self.object_points_index[:, :2] = np.mgrid[0:self.grid_size,
                                                   0:self.grid_size].T.reshape(
                                                       -1,
                                                       2)  # FOrm object points
        self.number_of_cal_images = 0  # Track number of calibration images taken
        self.object_points = []  # Array for storing object points
        self.image_points = []  # Array for storing image points
        self.axis = np.float32(
            [[3, 0, 0], [0, 3, 0], [0, 0, -3],
             [self.grid_center, self.grid_center,
              0]]).reshape(-1, 3)  # Axis for determining point location
        self.transform = np.zeros((4, 4))
        self.center_from_vehicle = np.zeros((4, 1))
        self.center_in_image = np.zeros((4, 1))
        self.command_timer_start = rospy.get_time()

        # Configuration Parameters
        self.Hertz = 20  # frequency of while loop
        criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001
                    )  # Termination Criteria
        self.cal_images_needed = 10  # Number of Calibration images needed
        self.gain = 1
        PID_x = [
            0.02, 0.5, 3
        ]  # PID Controller Tuning Values (latitude) TODO Tune Controller
        PID_y = [
            0.02, 0.5, 3
        ]  # PID Controller Tuning Values (longitude) TODO Tune Controller
        self.decent_rate = -0.4

        # Subscribers
        rospy.Subscriber("/down_cam/camera/image",
                         Image,
                         self.callbackCamera,
                         queue_size=1)  # Downward Facing Camera Subscriber

        # Publishers
        vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)

        # Messages
        vel_msg = Twist()
        vel_msg.linear.z = self.decent_rate
        vel_msg.angular.x = 0
        vel_msg.angular.y = 0
        vel_msg.angular.z = 0

        # Loop Timing
        rate = rospy.Rate(self.Hertz)
        time.sleep(2)  # allows the callback functions to populate variables

        print("Commencing Camera Calibration")
        while not rospy.is_shutdown():
            # Process Image
            gray_image = cv.cvtColor(self.image, cv.COLOR_BGR2GRAY)

            # Check for corners in the image
            ret, corners = cv.findChessboardCorners(
                gray_image, (self.grid_size, self.grid_size), None)
            # If the corners are seen the image can be processed
            if ret:
                corners2 = cv.cornerSubPix(gray_image, corners, (11, 11),
                                           (-1, -1), criteria)
                if self.number_of_cal_images <= self.cal_images_needed:
                    self.object_points.append(self.object_points_index)
                    self.image_points.append(corners)

                    # Update Calibration Tracker
                    self.number_of_cal_images = self.number_of_cal_images + 1
                    print(
                        "Need {} more images to complete calibration.".format(
                            self.cal_images_needed -
                            self.number_of_cal_images))

                    # Draw Calibration Image
                    cv.drawChessboardCorners(self.image,
                                             (self.grid_size, self.grid_size),
                                             corners2, ret)
                    ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(
                        self.object_points, self.image_points,
                        gray_image.shape[::-1], None, None)

                # If the calibration has been done at least once
                if self.number_of_cal_images > 0:
                    ret, rvecs, tvecs = cv.solvePnP(self.object_points_index,
                                                    corners2, mtx, dist)
                    # rvecs and tvecs are 3x1 arrays
                    # Determine Image Points
                    image_points, jacobian = cv.projectPoints(
                        self.axis, rvecs, tvecs, mtx, dist)
                    annotated_image = self.draw(self.image, corners2,
                                                image_points)
                    # Transform
                    rotation = cv.Rodrigues(
                        rvecs)  # 3x3 rotation matrix with 9x3 jacobian
                    tvecs = np.transpose(tvecs)
                    self.transform[0:3, 0:3] = rotation[0]
                    self.transform[0:3, 3] = tvecs
                    self.transform = np.linalg.pinv(self.transform)
                    formated_center = np.transpose(self.axis[3, :])
                    self.center_in_image[0:3, 0] = formated_center
                    self.center_in_image[3, 0] = 1
                    self.center_from_vehicle = self.transform.dot(
                        self.center_in_image)

                if (self.number_of_cal_images > self.cal_images_needed):
                    x_pid = PID(PID_x[0],
                                PID_x[1],
                                PID_x[2],
                                setpoint=-self.center_from_vehicle[0],
                                sample_time=1 / self.Hertz,
                                output_limits=(-2, 2))
                    y_pid = PID(PID_y[0],
                                PID_y[1],
                                PID_y[2],
                                setpoint=self.center_from_vehicle[1],
                                sample_time=1 / self.Hertz,
                                output_limits=(-2, 2))
                    vel_msg.linear.x = x_pid(self.grid_center)
                    vel_msg.linear.y = y_pid(self.grid_center)
                    vel_pub.publish(vel_msg)

            elif self.number_of_cal_images > self.cal_images_needed:
                vel_msg.linear.x = 0
                vel_msg.linear.y = 0
                vel_pub.publish(vel_msg)

            cv.imshow("Processed Image", self.image)
            cv.waitKey(3)

            rate.sleep()

        cv.destroyAllWindows()
Ejemplo n.º 39
0
# random.shuffle(tmp)
# objpoints_L, imgpoints_L, objpoints_R, imgpoints_R = zip(*tmp)
# objpoints_L = objpoints_L[:valid_num]
# imgpoints_L = imgpoints_L[:valid_num]
# objpoints_R = objpoints_R[:valid_num]
# imgpoints_R = imgpoints_R[:valid_num]

ret, mtxL, distcoeffL, rvecsL, tvecsL = cv2.calibrateCamera(
    objpoints_L, imgpoints_L, size, None, None)
ret, mtxR, distcoeffR, rvecsR, tvecsR = cv2.calibrateCamera(
    objpoints_R, imgpoints_R, size, None, None)
print("Valid img count:" + str(count))

error_L = []
for i in range(len(objpoints_L)):
    imgpoints2, _ = cv2.projectPoints(objpoints_L[i], rvecsL[i], tvecsL[i],
                                      mtxL, distcoeffL)
    error = cv2.norm(imgpoints_L[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
    error_L.append(error)
print(error_L)
error_R = []
ret, mtxR, distcoeffR, rvecsR, tvecsR = cv2.calibrateCamera(
    objpoints_R, imgpoints_R, size, None, None)
for i in range(len(objpoints_R)):
    imgpoints2, _ = cv2.projectPoints(objpoints_R[i], rvecsR[i], tvecsR[i],
                                      mtxR, distcoeffR)
    error = cv2.norm(imgpoints_R[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
    error_R.append(error)
print(error_R)

# stereoCalibrate
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
Ejemplo n.º 40
0
        h, w = img1.shape
        pts = np.float32([[0, 0], [w / 2, 0], [w - 1, 0], [0, h / 2],
                          [w - 1, h / 2], [0, h - 1], [w / 2, h - 1],
                          [w - 1, h - 1]]).reshape(-1, 1, 2)
        #print(M)
        if (not (M is None)) and M.shape[0] != None:
            dst = cv2.perspectiveTransform(pts, M)
        else:
            dst = []

        img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
        #print(dst)
        try:
            pnp = cv2.solvePnPRansac(objectPoints, dst, cameraMatrix, None)
            #print(pnp)
            dst, j = cv2.projectPoints(objectPoints, pnp[1], pnp[2],
                                       cameraMatrix, None)
            print(dst)
            print(pnp[2])
            img2 = cv2.circle(img2, (int(dst[0][0][0]), int(dst[0][0][1])), 30,
                              (0, 255, 0), -1)  # Top Left
            img2 = cv2.circle(img2, (int(dst[2][0][0]), int(dst[2][0][1])), 30,
                              (0, 255, 255), -1)  # Bottom Left
            img2 = cv2.circle(img2, (int(dst[5][0][0]), int(dst[5][0][1])), 30,
                              (0, 0, 255), -1)  # Bottom Right
            img2 = cv2.circle(img2, (int(dst[7][0][0]), int(dst[7][0][1])), 30,
                              (255, 255, 0), -1)  # Top Right
            print("c")
            print(roundArray(rotationMatrixToEulerAngles(pnp[1])))
            #eulerAngles = rotationMatrixToEulerAngles(cv2.Rodrigues(pnp[1]))
            #print(eulerAngles)
Ejemplo n.º 41
0
 def project(self, p3d):
     img_pts = np.zeros((len(self.cameras), p3d.shape[0], 2))
     for i, c in enumerate(self.cameras):
         proj, _ = cv2.projectPoints(p3d, c.rvec, c.tvec, c.K, c.dist)
         img_pts[i, :, :] = proj[:, 0, :]
     return img_pts
Ejemplo n.º 42
0
def getPoints(im, detector, predictor):
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    rects = detector(gray, 1)
    size = im.shape
    for (i, rect) in enumerate(rects):
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)
      
        count = 0
        for (x,y) in shape :
            #cv2.circle(im, (x, y), 1, (0, 0, 255), -1)
            if count == 34 :
                nose_tip = (x,y)
            if count == 9 :
                chin = (x,y)
            if count == 46 :
                leyelcorner = (x,y)
            if count == 18 :
                reyercorner = (x,y)
            if count == 55 :
                lmouthcorner = (x,y)
            if count == 49 :
                rmouthcorner = (x,y)
            count += 1

    #2D image points. If you change the image, you need to change vector
    image_points = np.array([
                                nose_tip,     # Nose tip
                                chin,     # Chin
                                leyelcorner,     # Left eye left corner
                                reyercorner,     # Right eye right corne
                                lmouthcorner,     # Left Mouth corner
                                rmouthcorner      # Right mouth corner
                            ], dtype="double")
     
    # 3D model points.
    model_points = np.array([
                                (0.0, 0.0, 0.0),             # Nose tip
                                (0.0, -330.0, -65.0),        # Chin
                                (-225.0, 170.0, -135.0),     # Left eye left corner
                                (225.0, 170.0, -135.0),      # Right eye right corne
                                (-150.0, -150.0, -125.0),    # Left Mouth corner
                                (150.0, -150.0, -125.0)      # Right mouth corner
                             
                            ])

    # Camera internals
     
    focal_length = size[1]
    center = (size[1]/2, size[0]/2)
    camera_matrix = np.array(
                             [[focal_length, 0, center[0]],
                             [0, focal_length, center[1]],
                             [0, 0, 1]], dtype = "double"
                             )

    dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
    (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)

    # Project a 3D point (0, 0, 1000.0) onto the image plane.
    # We use this to draw a line sticking out of the nose
     
     
    (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) 
     
    p1 = ( int(image_points[0][0]), int(image_points[0][1]))
    p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
     
    # cv2.line(im, p1, p2, (255,0,0), 2)
    # cv2.imshow("line", im)
    # cv2.waitKey(0)
    sign = 0

    if p2[0] >= 0 :
        sign = -1
    else : 
        sign = 1

    return (p1, p2, sign)
Ejemplo n.º 43
0
    point2d = [d["cross2d"] for d in data]
    #print point3d
    p = Program(point3d, point2d, x0, x0_int)
    cam = p.run()
    
    # show reprojection
    fx, fy = x0_int[0], x0_int[1]
    cx, cy = x0_int[2], x0_int[3]
    distCoeffs = (0,0,0,0,0)
    tvec = cam[0:3]  # x,y,z
    rvec = cam[3:6]  # rodrigues

    # project
    cameraMatrix = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

    point2Ds_p, jacobian = cv2.projectPoints(npa(point3d, dtype=np.float), rvec, tvec, cameraMatrix, distCoeffs)
    point2Ds_p_nd, jacobian = cv2.projectPoints(npa(point3d, dtype=np.float), rvec, tvec, cameraMatrix, (0.,0.,0.,0.,0.))

    for i, d in enumerate(data):
        image_viz = cv2.imread(save_dir + d['pic_path'])
        
        
        pt_int = tuple([int(round(p)) for p in d['cross2d']])
        cv2.line(image_viz, (pt_int[0]-2, pt_int[1]), (pt_int[0]+2, pt_int[1]), color1)
        cv2.line(image_viz, (pt_int[0], pt_int[1]-2), (pt_int[0], pt_int[1]+2), color1)
        
        pt_int = tuple([int(round(p)) for p in point2Ds_p_nd[i][0]])
        cv2.line(image_viz, (pt_int[0]-2, pt_int[1]), (pt_int[0]+2, pt_int[1]), color3)
        cv2.line(image_viz, (pt_int[0], pt_int[1]-2), (pt_int[0], pt_int[1]+2), color3)
        
        pt_int = tuple([int(round(p)) for p in point2Ds_p[i][0]])
Ejemplo n.º 44
0
 def draw_quads(self, img, quads, color=(0, 255, 0)):
     img_quads = cv.projectPoints(quads.reshape(-1, 3), self.rvec,
                                  self.tvec, self.K, self.dist_coef)[0]
     img_quads.shape = quads.shape[:2] + (2, )
     for q in img_quads:
         cv.fillConvexPoly(img, np.int32(q * 4), color, cv.LINE_AA, shift=2)
def camera_intrinsic_calibration(req, image_data):
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    criteria_cal = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10,
                    0.001)
    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6 * 7, 3), np.float32)
    objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)

    for img in image_data:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)

        # If found, add object points, image points (after refining them)
        if ret == True:
            objpoints.append(objp)
            # Once we find the corners, we can increase their accuracy
            corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                        criteria)
            imgpoints.append(corners2)

    # index list for imgpoints
    list1 = np.arange(0, len(imgpoints), 1)
    # camera intrinsic matrix
    mtx = np.zeros((3, 3))
    if len(req.params) > 3:
        mtx[0, 0] = req.params[0]
        mtx[1, 1] = req.params[1]
        mtx[0, 2] = req.params[2]
        mtx[1, 2] = req.params[3]
    mtx[2, 2] = 1

    # optimize data step by step based on sampled imgs, get best one
    min_error = 1000
    best_mtx = mtx
    for i in range(10):
        cur_data = list1
        if len(imgpoints) > 20:
            # randomly select 20 keypoints to do calibration
            cur_data = sample(list1, 20)
        cur_obj = list(objpoints[i] for i in cur_data)
        cur_img = list(imgpoints[i] for i in cur_data)
        try:
            ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
                cur_obj,
                cur_img,
                gray.shape[::-1],
                cameraMatrix=mtx,
                distCoeffs=None,
                rvecs=None,
                tvecs=None,
                flags=0,
                criteria=criteria_cal)
        except:
            gray = cv2.cvtColor(image_data[0], cv2.COLOR_BGR2GRAY)
            ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
                cur_obj,
                cur_img,
                gray.shape[::-1],
                cameraMatrix=mtx,
                distCoeffs=None,
                rvecs=None,
                tvecs=None,
                flags=0,
                criteria=criteria_cal)

        #evaluate
        tot_error = 0
        mean_error = 0
        for j in range(len(cur_obj)):
            imgpoints2, _ = cv2.projectPoints(cur_obj[j], rvecs[j], tvecs[j],
                                              mtx, dist)
            error = cv2.norm(cur_img[j], imgpoints2,
                             cv2.NORM_L2) / len(imgpoints2)
            tot_error += error
        mean_error = tot_error / len(cur_obj)
        if mean_error < min_error:
            min_error = mean_error
            best_mtx = mtx

        rospy.loginfo(rospy.get_caller_id() + 'I get corners %s',
                      len(imgpoints))
        rospy.loginfo(rospy.get_caller_id() + 'I get parameters %s',
                      best_mtx[0, 0])
    return imgpoints, best_mtx
Ejemplo n.º 46
0
    for i, j in zip(range(4), range(4, 8)):
        img = cv.line(img, tuple(imgpts[i]), tuple(imgpts[j]), (255), 3)
    # draw top layer in red color
    img = cv.drawContours(img, [imgpts[4:]], -1, (0, 0, 255), 3)
    return img


criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((6 * 7, 3), np.float32)
objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)
#axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
axis = np.float32([[0, 0, 0], [0, 3, 0], [3, 3, 0], [3, 0, 0], [0, 0, -3],
                   [0, 3, -3], [3, 3, -3], [3, 0, -3]])

for fname in glob.glob('images/*.jpg'):
    img = cv.imread(fname)
    img = cv.resize(img, None, fx=0.4, fy=0.4)
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    ret, corners = cv.findChessboardCorners(gray, (7, 6), None)
    if ret == True:
        corners2 = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
        # Find the rotation and translation vectors.
        ret, rvecs, tvecs, _ = cv.solvePnPRansac(objp, corners2, K, d)
        # project 3D points to image plane
        imgpts, jac = cv.projectPoints(axis, rvecs, tvecs, K, d)
        img = draw(img, corners2, imgpts)
        cv.imshow('img', img)
        k = cv.waitKey(1000)
        if k == ord('s'):
            cv.imwrite(fname[:6] + '.png', img)
cv.destroyAllWindows()
Ejemplo n.º 47
0
 def project_many(self, points):
     """Project 3D points in camera coordinates to the image plane."""
     distortion = np.array([self.k1, self.k2, self.p1, self.p2, self.k3])
     K, R, t = self.get_K(), np.zeros(3), np.zeros(3)
     pixels, _ = cv2.projectPoints(points, R, t, K, distortion)
     return pixels.reshape((-1, 2))
def rms_calc(objpoints, rvecs, tvecs, mtx, dist):
    mean_error = 0
    imgpoints2, jacob = cv2.projectPoints(objpoints[0], rvecs, tvecs, mtx,
                                          dist)

    return imgpoints2
Ejemplo n.º 49
0
def direct(fiducialPoints, rVec, tVec, linearCoeffs, distCoeffs):
    projected, _ = projectPoints(fiducialPoints, rVec, tVec, linearCoeffs,
                                 distCoeffs)

    return projected
Ejemplo n.º 50
0
    def Stereo_Paras(self):

        # criteria_stereo = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        # Detect the patterns from the calibration board
        objpoints, imgpointsL, imgpointsR, ChessImaL, ChessImaR = self.Stereo_Calib(
        )

        # Get some images for the size of the image
        gray_left = cv2.cvtColor(ChessImaL, cv2.COLOR_BGR2GRAY)
        gray_right = cv2.cvtColor(ChessImaR, cv2.COLOR_BGR2GRAY)

        # Determine the new values for different parameters
        # Calibration_Right_Logitech camera calibration
        retR, mtxR, distR, rvecsR, tvecsR = cv2.calibrateCamera(
            objpoints, imgpointsR, gray_right.shape[::-1], None, None)

        hR, wR = ChessImaR.shape[:2]  # The size of the image
        OmtxR, roiR = cv2.getOptimalNewCameraMatrix(mtxR, distR, (wR, hR), 1,
                                                    (wR, hR))

        # ROI used to crop the image
        # Calibration_Left_Logitech camera calibration
        retL, mtxL, distL, rvecsL, tvecsL = cv2.calibrateCamera(
            objpoints, imgpointsL, gray_left.shape[::-1], None, None)
        hL, wL = ChessImaL.shape[:2]
        OmtxL, roiL = cv2.getOptimalNewCameraMatrix(mtxL, distL, (wL, hL), 1,
                                                    (wL, hL))

        # Check the reprojection errors
        mean_error_L = 0
        tot_error_L = 0
        mean_error_R = 0
        tot_error_R = 0

        for i in range(len(objpoints)):
            imgpoints2_L, _ = cv2.projectPoints(objpoints[i], rvecsL[i],
                                                tvecsL[i], mtxL, distL)
            error = cv2.norm(imgpointsL[i], imgpoints2_L,
                             cv2.NORM_L2) / len(imgpoints2_L)
            tot_error_L += error

        for i in range(len(objpoints)):
            imgpoints2_R, _ = cv2.projectPoints(objpoints[i], rvecsR[i],
                                                tvecsR[i], mtxR, distR)
            error = cv2.norm(imgpointsR[i], imgpoints2_R,
                             cv2.NORM_L2) / len(imgpoints2_R)
            tot_error_R += error

        print("Calibration_Left_Logitech total error: ", tot_error_L)
        print("The total number of left points ", len(objpoints))
        print("Calibration_Left_Logitech mean error: ",
              tot_error_L / len(objpoints))
        print("Calibration_Right_Logitech total error: ", tot_error_R)
        print("The total number of right points ", len(objpoints))
        print("Calibration_Right_Logitech mean error: ",
              tot_error_R / len(objpoints))

        # Stereo calibrate function
        flags = 0
        flags |= cv2.CALIB_FIX_INTRINSIC

        retS = None
        MLS = None
        dLS = None
        dRS = None
        R = None
        T = None
        E = None
        F = None

        retS, MLS, dLS, MRS, dRS, R, T, E, F = cv2.stereoCalibrate(
            objpoints,
            imgpointsL,
            imgpointsR,
            mtxL,
            distL,
            mtxR,
            distR,
            gray_right.shape[::-1],
            flags=cv2.CALIB_FIX_INTRINSIC)
        # criteria_stereo,
        # flags)

        print("The translation between the first and second camera is ", T)
        '''
        Stereo Rectification Process
        '''
        # StereoRectify function
        # last paramater is alpha, if 0= croped, if 1= not croped
        rectify_scale = 0  # if 0 image croped, if 1 image nor croped
        RL, RR, PL, PR, Q, roiL, roiR = cv2.stereoRectify(
            MLS, dLS, MRS, dRS, gray_right.shape[::-1], R, T, rectify_scale,
            (0, 0))

        # print("The Q matrix is", Q)

        # initUndistortRectifyMap function -- map the images to the undistorted images
        # cv2.CV_16SC2 this format enables us the programme to work faster
        Left_Stereo_Map = cv2.initUndistortRectifyMap(MLS, dLS, RL, PL,
                                                      gray_left.shape[::-1],
                                                      cv2.CV_16SC2)
        Right_Stereo_Map = cv2.initUndistortRectifyMap(MRS, dRS, RR, PR,
                                                       gray_right.shape[::-1],
                                                       cv2.CV_16SC2)

        return Left_Stereo_Map, Right_Stereo_Map, Q
def chessboard_pose(img_dir,
                    img_filename,
                    cam_mtx,
                    cam_dist,
                    objp,
                    pattern=(7, 6)):
    """
    Find the chessboard pose with OpenCV.

    @type  img_dir: string
    @param img_dir: directory of the image
    @type  img_filename: string
    @param img_filename: filename of the image
    @type  cam_mtx: numpy.ndarray
    @param cam_mtx: intrinsic matrix of the camera
    @type  cam_dist: numpy.ndarry
    @param cam_dist: distortion coefficient of the camera
    @type  objp: numpy.ndarray
    @param objp: 3D positions of the points on the chessboard
    @type  pattern: tuple
    @param pattern: pattern of the chessboard
    """
    img_filepath = os.path.join(img_dir, img_filename)
    img_name = img_filename.split('.')[0]
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,
                0.0005)
    chessboard_size_tuple = pattern

    # IR and RGB both read as RGB and then turned to gray
    img = cv2.imread(img_filepath)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    ret, corners = cv2.findChessboardCorners(gray, chessboard_size_tuple, None)

    if ret == True:
        # Increase corner accuracy
        corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                    criteria)

        cv2.drawChessboardCorners(img, chessboard_size_tuple, corners, ret)
        cv2.imwrite(os.path.join(img_dir, img_name + "_corner.png"), img)

        # print ("Camera Info")
        # print (cam_mtx)
        # print (cam_dist)

        _, rvecs, tvecs, inlier = cv2.solvePnPRansac(objp, corners2, cam_mtx,
                                                     cam_dist)

        # print ("aa")
        # print (rvecs)
        # print ("t")
        # print (tvecs)

        R, _ = cv2.Rodrigues(rvecs)
        # print ("rotm")
        # print (R)

        imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, cam_mtx, cam_dist)

        imgpts = np.int32(imgpts).reshape(-1, 2)
        # print ("imgpts")
        # print (imgpts)

        img = draw(img, imgpts)
        cv2.imwrite(os.path.join(img_dir, img_name + '_axis.png'), img)

        print("Finish processing: {}".format(img_filename))

        return R, tvecs
    else:
        print("Cannot find chessboard in {}".format(img_filename))
        return None, None
Ejemplo n.º 52
0
def main():
    tracker = FaceTracker()
    capturing = False
    captured_eye_data = []
    captured_head_data = []
    captured_mouse_data = []

    while True:
        img = np.ones((1920, 1080, 3), np.uint8)
        cv2.imshow('trainer', img)
        cv2.setWindowProperty('trainer', cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        cv2.moveWindow('trainer', 0, 0)
        frame = tracker.tick()
        if DEBUG:
            text_x1 = tracker.face_x1
            text_y1 = tracker.face_y1 - 3
            if text_y1 < 0: text_y1 = 0
            cv2.putText(frame, "FACE", (text_x1, text_y1),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
            cv2.rectangle(frame, (tracker.face_x1, tracker.face_y1),
                          (tracker.face_x2, tracker.face_y2), (0, 255, 0), 2)
        if tracker.face_type > 0:
            if DEBUG:
                for point in tracker.landmarks_2D:
                    cv2.circle(frame, (point[0], point[1]), 2, (0, 0, 255), -1)
            (mx, my) = mouse_pos()
            axis = np.float32([[50, 0, 0], [0, 50, 0], [0, 0, 50]])
            imgpts, jac = cv2.projectPoints(axis, tracker.rvec, tracker.tvec,
                                            tracker.camera_matrix,
                                            tracker.camera_distortion)
            sellion_xy = (tracker.landmarks_2D[7][0],
                          tracker.landmarks_2D[7][1])
            cv2.line(frame, sellion_xy, tuple(imgpts[1].ravel()), (0, 255, 0),
                     3)  #GREEN
            cv2.line(frame, sellion_xy, tuple(imgpts[2].ravel()), (255, 0, 0),
                     3)  #BLUE
            cv2.line(frame, sellion_xy, tuple(imgpts[0].ravel()), (0, 0, 255),
                     3)  #RED
            if tracker.left_eye is not None and tracker.right_eye is not None:
                cv2.imshow('left eye', tracker.left_eye)
                cv2.imshow('right eye', tracker.right_eye)
                if capturing:
                    captured_eye_data.append(
                        (tracker.left_eye, tracker.right_eye))
                    captured_head_data.append((tracker.rvec, tracker.tvec))
                    captured_mouse_data.append((mx, my))
                    cv2.circle(frame, (10, 10), 5, (0, 0, 255), -1)
        if DEBUG:
            text_x1 = tracker.roi_x1
            text_y1 = tracker.roi_y1 - 3
            if text_y1 < 0: text_y1 = 0
            cv2.putText(frame, "ROI", (text_x1, text_y1),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
            cv2.rectangle(frame, (tracker.roi_x1, tracker.roi_y1),
                          (tracker.roi_x2, tracker.roi_y2), (0, 255, 255), 2)

        cv2.imshow('Video', frame)
        key = cv2.waitKey(1) & 0xFF
        capturing = key == ord('c')
        if key == ord('w'):
            num_existing_pairs = len(os.listdir('./data/imgs/')) / 2
            for idx, (left, right) in enumerate(captured_eye_data):
                (left_gauss, right_gauss) = add_gaussian_noise([left, right])
                cv2.imwrite(
                    'data/imgs/l/' +
                    str(int((idx * 2 + 1) + num_existing_pairs)) + '.png',
                    left_gauss)
                cv2.imwrite(
                    'data/imgs/r/' +
                    str(int((idx * 2 + 1) + num_existing_pairs)) + '.png',
                    right_gauss)
                cv2.imwrite(
                    'data/imgs/l/' + str(int((idx * 2) + num_existing_pairs)) +
                    '.png', left)
                cv2.imwrite(
                    'data/imgs/r/' + str(int((idx * 2) + num_existing_pairs)) +
                    '.png', right)
            with open('data/data.csv', 'a') as file:
                for idx, (mx, my) in enumerate(captured_mouse_data):
                    (rvec, tvec) = captured_head_data[idx]
                    file.write(
                        str(rvec[0][0]) + ',' + str(rvec[1][0]) + ',' +
                        str(rvec[2][0]) + ',' + str(tvec[0][0]) + ',' +
                        str(tvec[1][0]) + ',' + str(tvec[2][0]) + ',' +
                        str(mx) + ',' + str(my))
                    file.write('\n')
                    file.write(
                        str(rvec[0][0]) + ',' + str(rvec[1][0]) + ',' +
                        str(rvec[2][0]) + ',' + str(tvec[0][0]) + ',' +
                        str(tvec[1][0]) + ',' + str(tvec[2][0]) + ',' +
                        str(mx) + ',' +
                        str(my))  # write twice due to gauss augmentation
                    file.write('\n')
            print("Wrote " + str(len(captured_eye_data) * 2) +
                  " data points to disk.")
            captured_eye_data.clear()
            captured_mouse_data.clear()
            captured_head_data.clear()
        if key == ord('q'): break
    def camera_lidar_read(self, image_pub=None):
        global CAMERA_MODEL, FIRST_TIME, PAUSE, TF_BUFFER, TF_LISTENER, CAMERA_MODEL_INITED, PROJECT_MODE

        # # Projection/display mode
        # if CAMERA_MODEL_INITED and PROJECT_MODE:
        #     rospy.loginfo("Entering project_point_cloud")
        #     self.project_point_cloud(pointcloud_msg, image_msg, image_pub)
        #     PROJECT_MODE = False

        # Calibration mode
        # elif PAUSE:
        i = 1
        while (i < len(self.pcd_files) + 1):

            # image_filename = self.image_files[i]
            image_filename = os.path.join(self.path, self.image_filename % i)
            # pcd_filename = self.pcd_files[i]
            pcd_filename = os.path.join(self.path, self.pcd_filename % i)
            rospy.loginfo("Frame %d. image file: %s. pcd file: %s" %
                          (i, os.path.basename(image_filename),
                           os.path.basename(pcd_filename)))

            if CAMERA_MODEL_INITED and PROJECT_MODE:
                rospy.loginfo("Entering project_point_cloud")
                self.project_point_cloud(pcd_filename, image_filename,
                                         image_pub)
                PROJECT_MODE = False

            self.board_points_num = len(self.board_points)
            self.pixel_points_num = len(self.pixel_points)
            self.lidar_points_num = len(self.lidar_points)

            # Create GUI processes
            now = rospy.get_rostime()
            # proc_pool = multiprocessing.Pool(processes=2)
            # proc_results = []
            self.proc_results = multiprocessing.Queue()
            # proc_results.append(proc_pool.apply_async(self.extract_points_3D, args=(pointcloud_msg, self.lidar_points,)))
            pcl_proc = multiprocessing.Process(
                target=self.extract_points_3D,
                args=[pcd_filename, self.proc_results])
            self.procs.append(pcl_proc)
            # proc_results.append(proc_pool.apply_async(self.extract_points_2D, args=(image_msg, self.board_points, self.pixel_points,)))
            img_proc = multiprocessing.Process(
                target=self.extract_points_2D,
                args=[image_filename, self.proc_results])
            self.procs.append(img_proc)
            # pool.close()  # 关闭进程池,不再接受新的进程
            # pool.join()  # 主进程阻塞等待子进程的退出
            # rospy.loginfo("Starting sub threads.")
            img_proc.start()
            pcl_proc.start()
            img_proc.join()
            pcl_proc.join()
            # rospy.loginfo("All sub threads ended.")
            results = []
            results_valid = True
            if (self.proc_results.empty()):
                results_valid = False
            else:
                try:
                    for proc in self.procs:
                        results.append(self.proc_results.get_nowait())
                except Exception:
                    rospy.logwarn("Get results error. Pass this frame.")
                    results_valid = False
            if (results_valid and len(results) > 1):
                if (len(results[0]) == 1 and len(results[1]) == 2):
                    self.lidar_points.append(results[0][0])
                    self.board_points.append(results[1][0])
                    self.pixel_points.append(results[1][1])
                elif (len(results[1]) == 1 and len(results[0]) == 2):
                    self.lidar_points.append(results[1][0])
                    self.board_points.append(results[0][0])
                    self.pixel_points.append(results[0][1])
                self.frame_count += 1

            print(
                "current number of middle varaibles: board_points %d, pixel_points %d, lidar_points %d"
                % (len(self.board_points), len(
                    self.pixel_points), len(self.lidar_points)))
            del self.procs
            self.procs = []

            # Calibrate for existing corresponding points
            if (self.frame_count >= self.calibrate_min_frames
                    and CAMERA_MODEL_INITED
                    and (self.board_points_num < len(self.board_points))):
                ret, self.camera_matrix, self.dist_coeffs, self.chessboard_to_camera_rvecs, self.chessboard_to_camera_tvecs = cv2.calibrateCamera(
                    self.board_points, self.pixel_points, self.image_shape,
                    None, None)
                # 重投影误差
                total_error = 0
                for i in range(len(self.board_points)):
                    imgpoints2, _ = cv2.projectPoints(
                        self.board_points[i],
                        self.chessboard_to_camera_rvecs[i],
                        self.chessboard_to_camera_tvecs[i], self.camera_matrix,
                        self.dist_coeffs)
                    error = cv2.norm(np.array(self.pixel_points[i]),
                                     np.squeeze(imgpoints2),
                                     cv2.NORM_L2) / len(imgpoints2)
                    total_error += error
                print("reprojection error for current camera calibration: ",
                      total_error / len(self.board_points))
                print("self.dist_coeffs:", self.dist_coeffs)
                # save current camera calibration to file
                self.R = np.eye(3, dtype=np.float64)
                self.P = np.zeros((3, 4), dtype=np.float64)
                ncm, _ = cv2.getOptimalNewCameraMatrix(self.camera_matrix,
                                                       self.dist_coeffs,
                                                       self.image_shape, 0)
                for j in range(3):
                    for i in range(3):
                        self.P[j, i] = ncm[j, i]
                calibration_string = cameraCalibrationYamlBuf(
                    self.dist_coeffs,
                    self.camera_matrix,
                    self.R,
                    self.P,
                    self.image_shape,
                    name=self.camera_name)
                if (not os.path.exists(os.path.join(PKG_PATH, CALIB_PATH))):
                    os.mkdir(os.path.join(PKG_PATH, CALIB_PATH))
                with open(
                        os.path.join(
                            PKG_PATH, CALIB_PATH,
                            "intrinsics_" + self.camera_name + ".yaml"),
                        'a') as f:
                    f.write(calibration_string)

                CAMERA_MODEL_INITED = True
                CAMERA_MODEL.K = mkmat(3, 3, self.camera_matrix)
                if (self.dist_coeffs is not None):
                    CAMERA_MODEL.D = mkmat(len(self.dist_coeffs.squeeze()), 1,
                                           self.dist_coeffs)
                else:
                    CAMERA_MODEL.D = None
                CAMERA_MODEL.R = mkmat(3, 3, self.R)
                CAMERA_MODEL.P = mkmat(3, 4, self.P)
                CAMERA_MODEL.full_K = mkmat(3, 3, self.camera_matrix)
                CAMERA_MODEL.full_P = mkmat(3, 4, self.P)
                CAMERA_MODEL.width = self.image_shape[1]
                CAMERA_MODEL.height = self.image_shape[0]
                CAMERA_MODEL.resolution = (CAMERA_MODEL.width,
                                           CAMERA_MODEL.height)
                CAMERA_MODEL.tf_frame = self.camera_name

                self.chessboard_to_camera_R = [
                    cv2.Rodrigues(x)[0]
                    for x in self.chessboard_to_camera_rvecs
                ]
                rospy.loginfo("Entering calibration")
                self.calibrate()
            if (self.success):
                self.calibration_finished = True
                PROJECT_MODE = True

            key = six.moves.input(
                'Press [ENTER] to continue or [q] to quit or [r] to process current frame again: '
            )
            if (key == 'q'):
                if (key == 'q'):
                    print("Exit.")
                    sys.exit()
            elif (key != 'r'):
                i += 1
                print("[ENTER] pressed. Process next frame.")
            else:
                print("Process frame %d again" % i)
Ejemplo n.º 54
0
    def calibrate(self, video_source="./", dim_x=7, dim_y=7):
        """
        This will call a reference to a video taken, based on ?camera information?
        and will calibrate the camera and save the results in self.calibrated
        Probably calibrate to default video link

        With the calibration we get the distortion coefficients.
        NOTE We use a 7x7 grid
        
        Returns
        -------
            Camera matrix, distortion coefficients, rotation and translation vectors

        Distortion Coefficients = (k1, k2, p1, p2, k3)

        Camera Matrix:
            focal length (fx,fy), optical centers (cx, cy)
            [fx, 0 , cx]
            [0 , fy, cy]
            [0 , 0 , 1 ]
        
        """
        cap = cv2.VideoCapture(video_source)
        if not cap.isOpened():
            raise ValueError("Unable to open video source", video_source)
        print("Calibrating Camera...")

        cap = cv2.VideoCapture(video_source)
        while True:
            corners_found = 0
            # termination criteria
            criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

            # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
            objp = np.zeros((dim_x*dim_y, 3), np.float32)
            objp[:, :2] = np.mgrid[0:dim_x, 0:dim_y].T.reshape(-1, 2)

            # Arrays to store object points and image points from all the images.
            objpoints = [] # 3d point in real world space
            framepoints = [] # 2d points in image plane.
            ret, frame = cap.read()
            if ret:
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                if cv2.waitKey(30) & 0xFF == ord('q'):
                    break

                # print("Finding Corners... ", end ="")
                # Find the chess board corners
                ret, corners = cv2.findChessboardCorners(gray, (dim_x, dim_y), None)
                # print(ret)
                # If found, add object points, image points (after refining them)
                if ret:
                    corners_found += 1
                    objpoints.append(objp)

                    corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
                    framepoints.append(corners2)

                    # Draw and display the corners
                    frame = cv2.drawChessboardCorners(frame, (dim_x, dim_y), corners2, ret)

                    cv2.imshow('frame', frame)

                    ret, cam_mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, framepoints, gray.shape[::-1], None,None)
        
                    w = np.size(frame, 1)
                    h = np.size(frame, 0)
                    newcameramtx, roi=cv2.getOptimalNewCameraMatrix(cam_mtx, dist, (w, h), 1, (w, h))

                    # undistort
                    dst = cv2.undistort(frame, cam_mtx, dist, None, newcameramtx)
                    # crop the image
                    x,y,w,h = roi
                    dst = dst[y:y+h, x:x+w]
                    if x+w == 0:
                        print(cam_mtx)
                        print("Bad export. Trying next frame")
                        continue

                    # crop the image
                    x,y,w,h = roi
                    dst = dst[y:y+h, x:x+w]
                    cv2.imwrite('calibresult.png',dst)

                    mean_error = 0
                    tot_error = 0
                    for i in range(len(objpoints)):
                        framepoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], cam_mtx, dist)
                        error = cv2.norm(framepoints[i], framepoints2, cv2.NORM_L2)/len(framepoints2)
                        tot_error += error

                    print("Mean Error: " + str(mean_error/len(objpoints)))
                    
                #we found a valid frame so we can use it to calculate
                    break
                else:
                    print("Cannot find corners")
            else:
                break
        cap.release()
        cv2.destroyAllWindows()
        return cam_mtx, newcameramtx
                (R, T) = util.camera_pose_from_homography(mtx_inv, H)
                rvec_, _ = cv2.Rodrigues(R.T)
                r = rot.from_rotvec(rvec_.T).as_euler('xyz', degrees=True)
                cv2.putText(
                    img,
                    'Rotation(Euler angles): X: {:0.2f} Y: {:0.2f} Z: {:0.2f}'.
                    format(r[0][0], r[0][1],
                           r[0][2]), (20, int(frame_height) - 20),
                    cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255))
                cv2.putText(
                    img,
                    'Translation(mm): X: {:0.2f} Y: {:0.2f} Z: {:0.2f}'.format(
                        T[0], T[1], T[2]), (20, int(frame_height) - 60),
                    cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255))
                # Project the boundary of the planar target to the image.
                pts = np.float32([[0, 0, 0], [0, PHY_HEIGHT - 1, 0],
                                  [PHY_WIDTH - 1, PHY_HEIGHT - 1, 0],
                                  [PHY_WIDTH - 1, 0, 0]]).reshape(-1, 1, 3)
                dst, jac = cv2.projectPoints(pts, rvec_, T, mtx, dist)
                img = cv2.polylines(img, [np.int32(dst)], True,
                                    (100, 230, 240), 3, cv2.LINE_AA)
                # Project 3D axes points to the image.
                img_axes_pts, jac = cv2.projectPoints(axes, rvec_, T, mtx,
                                                      dist)
                img = util.draw_axes(img, np.int32(img_axes_pts))
    cv2.imshow('img', img)
    key = cv2.waitKey(delay=1) & 0xFF
    if key == ord("q"):
        break
cv2.destroyAllWindows()
Ejemplo n.º 56
0
    def image_jacobian_gen(self, result, corners, ids, CamParam, board_ground,board_panel,id_start_ground, id_board_ground_size, tag_ground_size, loaded_object_points_ground_in_panel_system, display_window):

        rospy.loginfo(str(id_start_ground))
        rospy.loginfo(str(id_board_ground_size))
        rospy.loginfo(str(tag_ground_size))
        rospy.loginfo(str(corners)) #float32
        rospy.loginfo(str(board_ground))  
        rospy.loginfo(str(board_panel)) 

        idx_max = 180
        UV = np.zeros([idx_max,8])
        P = np.zeros([idx_max,3])
        id_valid = np.zeros([idx_max,1])
        
        f_hat_u = CamParam.camMatrix[0][0]
        f_hat_v = CamParam.camMatrix[1][1]
        f_0_u = CamParam.camMatrix[0][2]
        f_0_v = CamParam.camMatrix[1][2]
        
        imgPoints_ground, rvec_ground, tvec_ground, Rca_ground, b_ground = self.get_pose(board_ground, corners, ids, CamParam)
        imgPoints_panel, rvec_panel, tvec_panel, Rca_panel, b_panel = self.get_pose(board_panel, corners, ids, CamParam)
        
        corners_ground = []
        corners_panel = []
        for i_ids,i_corners in zip(ids,corners):
            if i_ids<=(id_start_ground+id_board_ground_size):
                corners_ground.append(i_corners)
            else:
                corners_panel.append(i_corners)
        #rospy.loginfo(str(id_start_ground))
        
        rvec_all_markers_ground, tvec_all_markers_ground, _ = aruco.estimatePoseSingleMarkers(corners_ground, tag_ground_size, CamParam.camMatrix, CamParam.distCoeff)
        rvec_all_markers_panel, tvec_all_markers_panel, _ = aruco.estimatePoseSingleMarkers(corners_panel, 0.025, CamParam.camMatrix, CamParam.distCoeff)
        #rospy.loginfo(str(tvec_all_markers_ground))
        #rospy.loginfo(str(tvec_all_markers_panel))
        tvec_all=np.concatenate((tvec_all_markers_ground,tvec_all_markers_panel),axis=0)
        
        
        for i_ids,i_corners,i_tvec in zip(ids,corners,tvec_all):
            if i_ids<idx_max:
                #print 'i_corners',i_corners,i_corners.reshape([1,8])
                UV[i_ids,:] = i_corners.reshape([1,8]) #np.average(i_corners, axis=1) 
                P[i_ids,:] = i_tvec
                id_valid[i_ids] = 1
    
        
        id_select = range(id_start_ground,(id_start_ground+id_board_ground_size))
        #used to find the height of the tags and the delta change of height, z height at desired position
        Z = P[id_select,2] #- [0.68184539, 0.68560932, 0.68966803, 0.69619578])
        id_valid = id_valid[id_select]
    
        dutmp = []
        dvtmp = []
        
        #Pixel estimates of the ideal ground tag location
        reprojected_imagePoints_ground_2, jacobian2	=	cv2.projectPoints(	loaded_object_points_ground_in_panel_system.transpose(), rvec_panel, tvec_panel, CamParam.camMatrix, CamParam.distCoeff)
        reprojected_imagePoints_ground_2 = reprojected_imagePoints_ground_2.reshape([reprojected_imagePoints_ground_2.shape[0],2])
       
        if(display_window):
            frame_with_markers_and_axis = cv2.cvtColor(result, cv2.COLOR_GRAY2BGR)
            frame_with_markers_and_axis	=	cv2.aruco.drawAxis(	frame_with_markers_and_axis,  CamParam.camMatrix, CamParam.distCoeff, Rca_ground, tvec_ground, 0.2	)
            frame_with_markers_and_axis	=	cv2.aruco.drawAxis(	frame_with_markers_and_axis,  CamParam.camMatrix, CamParam.distCoeff, Rca_panel, tvec_panel, 0.2	)
        
            #plot image points for ground tag from corner detection and from re-projections
            for point1,point2 in zip(imgPoints_ground,np.float32(reprojected_imagePoints_ground_2)):
                cv2.circle(frame_with_markers_and_axis,tuple(point1),5,(0,0,255),3)
                cv2.circle(frame_with_markers_and_axis,tuple(point2),5,(255,0,0),3) 
                
            height, width, channels = frame_with_markers_and_axis.shape
            cv2.imshow(display_window,cv2.resize(frame_with_markers_and_axis, (width/4, height/4)))
            cv2.waitKey(1)
            #Save
            #filename_image = "/home/rpi-cats/Desktop/DJ/Code/Images/Panel2_Acquisition_"+str(t1)+"_"+str(iteration)+".jpg"
            #scipy.misc.imsave(filename_image, frame_with_markers_and_axis)
        
        reprojected_imagePoints_ground_2 = np.reshape(reprojected_imagePoints_ground_2,(id_board_ground_size,8))
        #Go through a particular point in all tags to build the complete Jacobian
        for ic in range(4):
            #uses first set of tags, numbers used to offset camera frame, come from camera parameters               
            UV_target = np.vstack([reprojected_imagePoints_ground_2[:,2*ic]-f_0_u, reprojected_imagePoints_ground_2[:,2*ic+1]-f_0_v]).T
            uc = UV_target[:,0]
            vc = UV_target[:,1]
    
            UV_current = np.vstack([UV[id_select,2*ic]-f_0_u, UV[id_select,2*ic+1]-f_0_v]).T
            #find difference between current and desired tag difference
            delta_UV = UV_target-UV_current
    
            delet_idx = []
            J_cam_tmp =np.array([])
            for tag_i in range(id_board_ground_size):
                if id_valid[tag_i] == 1:
                    tmp = 1.0*np.array([[uc[tag_i]*vc[tag_i]/f_hat_u, -1.0*(uc[tag_i]*uc[tag_i]/f_hat_u + f_hat_u), vc[tag_i],-f_hat_u/Z[tag_i], 0.0, uc[tag_i]/Z[tag_i]],
                                               [ vc[tag_i]*vc[tag_i]/f_hat_v+f_hat_v, -1.0*uc[tag_i]*vc[tag_i]/f_hat_v, -uc[tag_i],0.0, -f_hat_v/Z[tag_i], vc[tag_i]/Z[tag_i]]])
                    if not (J_cam_tmp).any():
                        J_cam_tmp = tmp
                    else:
                        J_cam_tmp= np.concatenate((J_cam_tmp, tmp), axis=0)
                else:
                    delet_idx.append(tag_i)
                    
            delta_UV = np.delete(delta_UV, delet_idx, 0)
            dutmp.append(np.mean(delta_UV[:,0]))
            dvtmp.append(np.mean(delta_UV[:,1]))
            #camera jacobian
            if ic ==0:
                J_cam = J_cam_tmp
                delta_UV_all = delta_UV.reshape(np.shape(delta_UV)[0]*np.shape(delta_UV)[1],1)
                UV_target_all = UV_target.reshape(np.shape(UV_target)[0]*np.shape(UV_target)[1],1)
            else:
                J_cam = np.vstack([J_cam,J_cam_tmp])
                delta_UV_all = np.vstack([delta_UV_all,delta_UV.reshape(np.shape(delta_UV)[0]*np.shape(delta_UV)[1],1)]) 
                UV_target_all = np.vstack([UV_target_all,UV_target.reshape(np.shape(UV_target)[0]*np.shape(UV_target)[1],1)])
    
        du = np.mean(np.absolute(dutmp))
        dv = np.mean(np.absolute(dvtmp))
        print 'Average du of all points:',du
        print 'Average dv of all points:',dv
        
        return du, dv, J_cam, delta_UV_all
Ejemplo n.º 57
0
                                                      None)
    img = cv.imread('/home/farman/Downloads/object1.jpg')
    h, w = img.shape[:2]
    newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1,
                                                     (w, h))
    mapx, mapy = cv.initUndistortRectifyMap(mtx, dist, None, newcameramtx,
                                            (w, h), 5)
    dst = cv.remap(img, mapx, mapy, cv.INTER_LINEAR)

    # crop the image
    x, y, w, h = roi
    print(roi)
    dst = dst[y:y + h, x:x + w]
    cv.imwrite('calibresult.png', dst)
    print(ret)
    print('calibration matrix')
    print(mtx)
    print('distortion coefficients')
    print(dist)
    total_error = 0
    for i in range(len(objpoints)):
        imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx,
                                         dist)
        error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2) / len(imgpoints2)
        total_error += error
    print("mean error: {}".format(total_error / len(objpoints)))

print('rotational verstors:', rvecs)
print('???????????????????????????????????????????????')
print('translational vectors', tvecs)
Ejemplo n.º 58
0
    dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
    (success, rotation_vector, translation_vector) = cv2.solvePnP(
        model_points,
        image_points,
        camera_matrix,
        dist_coeffs,
        flags=cv2.SOLVEPNP_ITERATIVE)  #flags=cv2.CV_ITERATIVE)

    print "Rotation Vector:\n {0}".format(rotation_vector)
    print "Translation Vector:\n {0}".format(translation_vector)

    # Project a 3D point (0, 0 , 1000.0) onto the image plane
    # We use this to draw a line sticking out of the nose_end_point2D
    (nose_end_point2D,
     jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]),
                                   rotation_vector, translation_vector,
                                   camera_matrix, dist_coeffs)

    for p in image_points:
        cv2.circle(frame, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

    p1 = (int(image_points[0][0]), int(image_points[0][1]))
    p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))

    cv2.line(frame, p1, p2, (255, 0, 0), 2)
    # show the frame
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF

    # if the `q` key was pressed, break from the loop
    if key == ord("q"):
Ejemplo n.º 59
0
def prod_trans_point(p3d, rotation_vector, trans_vector, camera_matrix,
                     dist_coeffs):
    plane_point, _ = cv2.projectPoints(np.array([p3d]), rotation_vector,
                                       trans_vector, camera_matrix,
                                       dist_coeffs)
    return (int(plane_point[0][0][0]), int(plane_point[0][0][1]))
Ejemplo n.º 60
0
    def _map_binocular(self, p0, p1):

        if '3d' not in p0['method'] or '3d' not in p1['method']:
            return None

        #find the nearest intersection point of the two gaze lines
        # a line is defined by two point
        s0_center = self.eye0_to_World(np.array(p0['sphere']['center']))
        s1_center = self.eye1_to_World(np.array(p1['sphere']['center']))

        s0_normal = np.dot(self.rotation_matricies[0],
                           np.array(p0['circle_3d']['normal']))
        s1_normal = np.dot(self.rotation_matricies[1],
                           np.array(p1['circle_3d']['normal']))

        gaze_line0 = [s0_center, s0_center + s0_normal]
        gaze_line1 = [s1_center, s1_center + s1_normal]

        nearest_intersection_point, intersection_distance = math_helper.nearest_intersection(
            gaze_line0, gaze_line1)

        if nearest_intersection_point is not None:
            self.last_gaze_distance = np.sqrt(
                nearest_intersection_point.dot(nearest_intersection_point))
            image_point, _ = cv2.projectPoints(
                np.array([nearest_intersection_point]),
                np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0]),
                self.camera_matrix, self.dist_coefs)
            image_point = image_point.reshape(-1, 2)
            image_point = normalize(image_point[0],
                                    self.world_frame_size,
                                    flip_y=True)

        if self.visualizer.window:
            gaze0_3d = s0_normal * self.last_gaze_distance + s0_center
            gaze1_3d = s1_normal * self.last_gaze_distance + s1_center
            self.gaze_pts_debug0.append(gaze0_3d)
            self.gaze_pts_debug1.append(gaze1_3d)
            if nearest_intersection_point is not None:
                self.intersection_points_debug.append(
                    nearest_intersection_point)

            self.sphere0['center'] = s0_center  #eye camera coordinates
            self.sphere0['radius'] = p0['sphere']['radius']
            self.sphere1['center'] = s1_center  #eye camera coordinates
            self.sphere1['radius'] = p1['sphere']['radius']

        if nearest_intersection_point is None:
            return None

        confidence = (p0['confidence'] + p1['confidence']) / 2.
        ts = (p0['timestamp'] + p1['timestamp']) / 2.
        g = {
            'norm_pos': image_point,
            'eye_centers_3d': {
                0: s0_center.tolist(),
                1: s1_center.tolist()
            },
            'gaze_normals_3d': {
                0: s0_normal.tolist(),
                1: s1_normal.tolist()
            },
            'gaze_point_3d': nearest_intersection_point.tolist(),
            'confidence': confidence,
            'timestamp': ts,
            'base': [p0, p1]
        }
        return g