def estimateTransformToCamera(self, corners_3d, camera_matrix, dist_coeff, rmat, tvec):
     rot_vec=np.array([])
     # is ok?
     rmat=np.array([])
     cv2.solvePnP(corners_3d, self.m_corners, camera_matrix, dist_coeff, rot_vec, tvec)
     cv2.Rodrigues(rot_vec, rmat)
     return rmat
Exemple #2
0
	def solvePnp(self, image, objpoints, cameraMatrix, distortionVector, patternColumns, patternRows):
		gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
		# the fast check flag reduces significantly the computation time if the pattern is out of sight 
		retval, corners = cv2.findChessboardCorners(gray, (patternColumns,patternRows), flags=cv2.CALIB_CB_FAST_CHECK)

		if retval:
			cv2.cornerSubPix(gray, corners, winSize=(11,11), zeroZone=(-1,-1), criteria=self.criteria)
			if self.useDistortion:
				ret, rvecs, tvecs = cv2.solvePnP(objpoints, corners, cameraMatrix, distortionVector)
			else:
				ret, rvecs, tvecs = cv2.solvePnP(objpoints, corners, cameraMatrix, None)
			return (ret, cv2.Rodrigues(rvecs)[0], tvecs, corners)
Exemple #3
0
    def solvePnP(
        self,
        uv3d,
        xy,
        flags=cv2.SOLVEPNP_ITERATIVE,
        useExtrinsicGuess=False,
        rvec=None,
        tvec=None,
    ):
        try:
            uv3d = np.reshape(uv3d, (1, -1, 3))
            xy = np.reshape(xy, (1, -1, 2))
        except ValueError:
            raise ValueError

        if uv3d.shape[1] != xy.shape[1] or uv3d.shape[1] == 0 or xy.shape[1] == 0:
            return False, None, None

        res = cv2.solvePnP(
            uv3d,
            xy,
            self.K,
            self.D,
            flags=flags,
            useExtrinsicGuess=useExtrinsicGuess,
            rvec=rvec,
            tvec=tvec,
        )
        return res
    def on_frame(self, vis):
        match = self.match_frames()
        if match is None:
            return
        w, h = getsize(self.frame)
        p0, p1, H = match
        for (x0, y0), (x1, y1) in zip(np.int32(p0), np.int32(p1)):
            cv2.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
        x0, y0, x1, y1 = self.ref_rect
        corners0 = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
        img_corners = cv2.perspectiveTransform(corners0.reshape(1, -1, 2), H)
        cv2.polylines(vis, [np.int32(img_corners)], True, (255, 255, 255), 2)

        corners3d = np.hstack([corners0, np.zeros((4, 1), np.float32)])
        fx = 0.9
        K = np.float64([[fx*w, 0, 0.5*(w-1)],
                        [0, fx*w, 0.5*(h-1)],
                        [0.0,0.0,      1.0]])
        dist_coef = np.zeros(4)
        ret, rvec, tvec = cv2.solvePnP(corners3d, img_corners, K, dist_coef)
        verts = ar_verts * [(x1-x0), (y1-y0), -(x1-x0)*0.3] + (x0, y0, 0)
        verts = cv2.projectPoints(verts, rvec, tvec, K, dist_coef)[0].reshape(-1, 2)
        for i, j in ar_edges:
            (x0, y0), (x1, y1) = verts[i], verts[j]
            cv2.line(vis, (int(x0), int(y0)), (int(x1), int(y1)), (255, 255, 0), 2)
Exemple #5
0
def find(img, mtx, dist):

    try:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        id7 = cv2.imread('images/fiducial/id7.png')
        gray7 = cv2.cvtColor(id7, cv2.COLOR_BGR2GRAY)

        # start using SIFT
        sift = cv2.SIFT()

        kp, des = sift.detectAndCompute(gray,None)
        id7_kp, id7_des = sift.detectAndCompute(gray7, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        search_params = dict(checks = 50)

        # debug
        if des is None or id7_des is None:
            return (False, None)

        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(id7_des, des, k=2)

        # store all the good matches as per Lowe's ratio test
        good = []
        for m,n in matches:
            if m.distance < 0.7*n.distance:
                good.append(m)

        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([ id7_kp[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([ kp[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            matchesMask = mask.ravel().tolist()

            h, w = gray7.shape
            
            # draw a square box around the detected fiducial
            pts = np.float32([ [0,0], [0,h-1], [w-1,h-1], [w-1,0] ]).reshape(-1, 1, 2)
            dst = cv2.perspectiveTransform(pts, M)
            cv2.polylines( gray, [np.int32(dst)], True, 255, 3, cv2.CV_AA)

            # Required to allow the solvePnP to work
            objp = np.zeros( (2 * 2, 3), np.float32 )
            objp[1,1:2] = 1
            objp[2,0:2] = 1
            objp[3,0:1] = 1

            ret, rvec, tvec = cv2.solvePnP( objp, dst, mtx, dist)
            return get_transform(ret, tvec, rvec)

        else:
            matchesMask = None

            return (False, None)
    except cv2.error as e:
        return (False,None)
Exemple #6
0
def projtest():
    numpts = 10
    tro = Utils.getTransform(0, 0, 0, 330, 0, 0, True)
    trt = Utils.getTransform(180, 0, 180, 300, 0, 500, True)
    ttc = Utils.getTransform(0, 0, 0, 0, 0, 20, True)
    tco = np.linalg.inv(ttc).dot(np.linalg.inv(trt).dot(tro))
    # print tro
    # print np.dot(trt, ttc.dot(tco))
    objpts = np.zeros((numpts, 4))
    objpts[:,:2] = np.random.rand(numpts, 2) * 10
    objpts[:,3] = np.ones((numpts,))
    objpts = objpts.T
    print objpts
    imgpts = Utils.camMtx.dot(tco[:3,:].dot(objpts))
    for i in range(numpts):
        imgpts[:,i] /= imgpts[2,i]
    print imgpts
    objpts = objpts[:3, :].T.reshape(-1, 1, 3)
    imgpts = imgpts[:2, :].T.reshape(-1, 1, 2)

    imgpts_ = imgpts + (np.random.rand(numpts, 1, 2) * 2 - np.ones_like(imgpts)) * 0
    print imgpts - imgpts_

    _, rvec, tvec = cv2.solvePnP(np.array(objpts), np.array(imgpts_), Utils.camMtx, None)
    print "--"

    tco_est = np.eye(4)
    tco_est[:3,:3] = cv2.Rodrigues(rvec)[0]
    tco_est[:3,3] = tvec.reshape((3, ))

    tro_est = trt.dot(ttc.dot(tco_est))
    print tro-tro_est
Exemple #7
0
def get_default_params(corners, ycoords, xcoords):

    # page width and height
    page_width = np.linalg.norm(corners[1] - corners[0])
    page_height = np.linalg.norm(corners[-1] - corners[0])
    rough_dims = (page_width, page_height)

    # our initial guess for the cubic has no slope
    cubic_slopes = [0.0, 0.0]

    # object points of flat page in 3D coordinates
    corners_object3d = np.array([
        [0, 0, 0],
        [page_width, 0, 0],
        [page_width, page_height, 0],
        [0, page_height, 0]])

    # estimate rotation and translation from four 2D-to-3D point
    # correspondences
    _, rvec, tvec = cv2.solvePnP(corners_object3d,
                                 corners, K, np.zeros(5))

    span_counts = [len(xc) for xc in xcoords]

    params = np.hstack((np.array(rvec).flatten(),
                        np.array(tvec).flatten(),
                        np.array(cubic_slopes).flatten(),
                        ycoords.flatten()) +
                       tuple(xcoords))

    return rough_dims, span_counts, params
    def _poseFromQuad(self, quad=None):
        '''
        estimate the pose of the object plane using quad
            setting:
        self.rvec -> rotation vector
        self.tvec -> translation vector
        '''
        if quad is None:
            quad = self.quad
        if quad.ndim == 3:
            quad = quad[0]
        # http://answers.opencv.org/question/1073/what-format-does-cv2solvepnp-use-for-points-in/
        # Find the rotation and translation vectors.
        img_pn = np.ascontiguousarray(quad[:, :2],
                                      dtype=np.float32).reshape((4, 1, 2))

        obj_pn = self.obj_points - self.obj_points.mean(axis=0)
        retval, rvec, tvec = cv2.solvePnP(
            obj_pn,
            img_pn,
            self.opts['cameraMatrix'],
            self.opts['distCoeffs'],
            flags=cv2.SOLVEPNP_P3P  # because exactly four points are given
        )
        if not retval:
            print("Couln't estimate pose")
        return tvec, rvec
Exemple #9
0
    def update_pos_stats(self, cam_matrix, distortion_coefficients, object_points):
        """
        Takes the square's corners found in the image, corresponding 3d
        coordinates, and intrinsic camera information. Sets fields related to
        extrinsic camera information: cam_rot, normal, cam_pos, location.
        Note that the square might be bogus until you check its score, and
        camera extrinsics are unaligned until align squares is called.
        :param cam_matrix:
        :param distortion_coefficients:
        :param object_points:
        :return: nothing
        """

        temp_corners = self.corners.reshape(4, 2, 1).astype(float)

        # Gets vector from camera to center of square
        inliers, self.rvec, self.tvec = cv2.solvePnP(object_points, temp_corners,
                                                     cam_matrix, distortion_coefficients)
        rot_matrix = cv2.Rodrigues(self.rvec)
        cam_pos = np.multiply(cv2.transpose(rot_matrix[0]), -1).dot(self.tvec)
        cam_to_grid_transform = np.concatenate((cv2.transpose(rot_matrix[0]), cam_pos), axis=1)
        grid_to_cam_transform = np.linalg.inv(np.concatenate((cam_to_grid_transform, np.array([[0, 0, 0, 1]])), axis=0))

        self.cam_rot = list(cam_to_grid_transform.dot(np.array([0, 0, 1, 0])))
        self.normal = grid_to_cam_transform.dot(np.array([0, 0, 1, 0]))
        self.cam_pos = cam_pos
        self.location = [cam_pos[0][0],cam_pos[1][0],cam_pos[2][0]]
 def update(self, time, ims):
     """Update calibration (transforms, markers, and times)."""
     if time in self.times: return
     N = len(self.fridge_config.cameras.items())
     if len(ims) != N: raise Exception("ExtrinsicCalibration update() expects %d images." % N)
     transforms, markers = [], []
     for im in ims:
         all_detected = self.md.detect(im)
         calib = [(m, m_el) for m in all_detected for (m_id, m_el) in self.fridge_config.markers.items()\
                  if int(m_id)==m.id]
         noncalib = [v for v in all_detected if v.id not in map(int, self.fridge_config.markers.keys())]
         cur_transform, cur_markers = None, noncalib
         for combo in [w for v in xrange(len(calib),2,-1) for w in itc(calib,v)]:
             combo_markers, marker_els = zip(*combo)
             combo_corners = np.array([v.corners for v in combo_markers]).reshape(-1,2)
             pts = np.array([v.T_self_in_world.dot(v.points_in_self)[:3, :].T \
                             for v in marker_els]).reshape(-1,3)
             r, rv, tv = cv2.solvePnP(pts, combo_corners, CAMERA_CMAT, CAMERA_DCOEFFS)
             ip = cv2.projectPoints(pts, rv, tv, CAMERA_CMAT, CAMERA_DCOEFFS)[0].reshape(-1,2)
             perror = np.linalg.norm((ip-combo_corners)) / ip.shape[0]
             if perror < 5:
                 R = cv2.Rodrigues(rv)[0]
                 T_world_in_camera = getTransformFromRt(R, tv) # since marker positions in world coords
                 cur_markers = list(combo_markers) + noncalib
                 cur_transform = T_world_in_camera
                 break
         transforms.append(cur_transform)
         markers.append(cur_markers)
     self.transforms.append(transforms)
     self.markers.append(markers)
     self.times.append(time)
def stuff():
    # Get calibration board pose relative to image sensor, expressed in image frame. p_image = rvec*p_board + tvec
    (poseFound,rvec,tvec) = cv2.solvePnP(localObjPts,corners,K,D)
    rvec = np.squeeze(rvec)
    tvec = np.squeeze(tvec)
    q = rvec2quat(rvec)
    
    # Calculate image pose relative to calibration board, expressed in board frame. p_board = qIm2Board*p_image + tIm2Board
    qIm2Board = qConj(q)
    tIm2Board = -1*rotateVec(tvec,qIm2Board)
    
    # Get calibration board pose w.r.t. world, expressed in world frame. p_world = qBoard*p_board + tBoard
    tfl.waitForTransform("/world","/board",imageTimeStamp,rospy.Duration(0.5))
    (tBoard,qBoard) = tfl.lookupTransform("/world","/board",imageTimeStamp)
    
    # Calculate image pose w.r.t. world, expressed in world frame. p_world = qIm2World*p_image + tIm2World
    tIm2World = tBoard + rotateVec(tIm2Board,qBoard)
    qIm2World = qMult(qBoard,qIm2Board)
    tfbr.sendTransform(tIm2World,qIm2World,imageTimeStamp,"image","world")
    
    # Get camera pose w.r.t. world, expressed in world frame. p_world = qCam*p_cam + tCam
    tfl.waitForTransform("/world",cameraTF,imageTimeStamp,rospy.Duration(0.5))
    (tCam,qCam) = tfl.lookupTransform("/world",cameraTF,imageTimeStamp)
    
    # Calculate image pose w.r.t. camera, expressed in camera frame. p_cam = qIm2Cam*p_image + tIm2Cam
    tIm2Cam = rotateVec(tIm2World-tCam,qConj(qCam))
    qIm2Cam = qMult(qConj(qCam),qIm2World)
    
    print tIm2Cam
    print qIm2Cam
Exemple #12
0
def getParams(filename):
    points = detectMarker(filename)
    for p in points:
        if p is None:
            return

    imgPt = np.array([list(p) for p in points], np.float32)

    flag = cv2.CV_ITERATIVE
    retval, rvec, tvec = cv2.solvePnP(objPtMarker.T, imgPt, camMtx, None, flags=flag)

    if retval:
        print(retval, rvec, tvec)
        rmat, jac = cv2.Rodrigues(rvec)
        print "-- rpy --"
        print rpy(rmat)
        tmat = np.zeros((3,4))
        tmat[:3,:3] = rmat
        tmat[:3,3] = tvec.T
        print("-- tmat --")
        print(tmat)
        print "-- projmtx --"
        print np.dot(camMtx, tmat)
        print "-- reproj test --"
        test_reproj(imgPt, objPtMarker.T, tmat, camMtx)
        return tmat, rvec, tvec
    return None, None, None
Exemple #13
0
    def createExtrinsicsMatrix(self, chessboardImage, camera_matrix):
        found, corners = cv2.findChessboardCorners(chessboardImage,self.patternSize)
        if not found:
            return

        objectPoints = np.array([self.objectPoint])
        imagePoints = np.array([corners])
        imageShape = chessboardImage.shape[:2]
        _, _, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objectPoints, imagePoints, imageShape)

        objectPoints = np.asarray(self.objectPoint, np.float64)
        imagePoints = np.asarray(corners, np.float64)
        camera_matrix = np.asarray(camera_matrix, np.float64)
        dist_coefs = np.array([0, 0, 0, 0], np.float64)

        found, rvecs_new, tvecs_new = cv2.solvePnP(objectPoints, imagePoints, camera_matrix, dist_coefs)
        rotationMatrix = cv2.Rodrigues(rvecs_new)[0]

        img = cv2.projectPoints(np.array([np.array([0,0,0], np.float64)]), rvecs_new, tvecs_new, camera_matrix, dist_coefs)
        print img

        extrinsicMatrix = np.zeros((3,4))
        extrinsicMatrix[:,:-1] = rotationMatrix
        extrinsicMatrix[:,-1:] = tvecs_new

        return extrinsicMatrix
def detect_paper_sheet(src_img):
    """
        Detect the paper_sheet on the picture and return its coordinates in pxls
        Input:
            src_img - BGR image
        Output:
            is_detected - is any paper_sheet detected
            paper_sheet_crds - numpy array of 2D coordinates of paper_sheet's
                vertexes on picture in pxls.
            cmr_rvec - the paper sheet coordinate system rotation
                as seemes in camera coordinate system
            cmr_tvec - the transition from camera coordinate system center
                towards the paper sheet coordinate system center
    """

    gray_img = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)
    vertexes = cv2.Canny(gray_img, 50, 150, apertureSize=3)
    all_contours, hierarchy = cv2.findContours(vertexes, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    is_detected, paper_sheet_crds = find_paper_sheet_in_contours(all_contours)

    # If any paper_sheet found, then find the coordinates of vertexes with subpixel
    # preciese and draw the found contour on the image
    if is_detected:
        cv2.cornerSubPix(gray_img, paper_sheet_crds, (11, 11), (-1, -1), cornerSubPix_criteria)
        ret_val, cmr_rvec, cmr_tvec = cv2.solvePnP(paper_sheet_vertexes,
                                                   paper_sheet_crds, cam_intr_mtx, cam_dist)
    else:
        cmr_rvec = None
        cmr_tvec = None

    return is_detected, paper_sheet_crds, cmr_rvec, cmr_tvec
Exemple #15
0
def contours(img):
	#img = blurred(img)
	#img = cv2.erode(hsvthreshold(img), cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
	img = hsvthreshold(img)
	ret = cv2.merge([img, img, img])
	cons, hier = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
	targets = []

	#cv2.drawContours(ret, np.array(cons), -1, np.array([255.0, 255.0, 0.0]), -1)
	for i in range(len(cons)):
		if cv2.contourArea(cons[i]) >= 1000:
			#print len(cons[i])
			#print len(cv2.convexHull(cons[i]))
			#cons[i] = cv2.convexHull(cons[i])
			cv2.drawContours(ret, np.array(cons), i, np.array([255.0, 0.0, 0.0]), 1)
			pts = []
			for pt in cons[i]:
				pts.append(pt[0])
				#ret[pt[0][1]][pt[0][0]] = np.array([255.0, 0.0, 0.0])
			
			corn = corners(pts, ret)
			if len(corn) == 4:
				rvec, tvec = cv2.solvePnP(target,np.array(corn[:]),cam,dis)
				print np.linalg.norm(tvec)
				#target = pts[:,1].astype(np.double).sum() / len(pts)
				for x, y in corn:
					cv2.circle(ret, (int(x), int(y)), 5, np.array([0.0, 0.0, 255.0]), -1)
	
	return ret
 def calculate_object_pose(self, image_raw, camera_matrix, dist_coeffs, is_grayscale):
     '''
     Calculate 3D pose of calibration object in image given the camera's
     camera matrix and distortion coefficients. 
     
     Returns rotation matrix and translation vector.
     
     @param image_raw: input image, not distortion corrected
     @type  image_raw: cv2 compatible numpy image
     
     @param camera_matrix: camera matrix of camera
     @type  camera_matrix: numpy matrix
     
     @param dist_coeffs: distortion coefficients of camera
     @type  dist_coeffs: numpy matrix
     
     @param is_grayscale: set to true if image is grayscale
     @type  is_grayscale: bool
     '''
     # get image and object points
     image_points = self.detect_image_points(image_raw, is_grayscale)
     object_points = self.calibration_object.get_pattern_points()
     
     # get object pose in raw image (not yet distortion corrected)
     (retval, rvec, tvec) = cv2.solvePnP(object_points, image_points, camera_matrix, dist_coeffs)
     
     # convert rvec to rotation matrix
     rmat = cv2.Rodrigues(rvec)[0]
     return (rmat, tvec)
def get_checkerboard_transform (rgb, chessboard_size=0.024):
    """
    Assuming only one checkerboard is found.
    """

    rtn, corners = get_corners_rgb(rgb, method='pycb')
    if rtn <= 0:
        print "Failed", rtn
        return None
    
    print "Checking",rtn
    if rtn == 1:
        objPoints = get_3d_chessboard_points(corners.shape[0],corners.shape[1],cb_size)
        imgPoints = corners.reshape(corners.shape[0]*corners.shape[1],2,order='F')
        
        
        ret, rvec, tvec = cv2.solvePnP(objPoints, imgPoints, cam_mat, dist_coeffs)
        print ret
        if ret:
            ang = np.linalg.norm(rvec)
            dir = rvec/ang
            
            tfm = transformations.rotation_matrix(ang,dir)
            tfm[0:3,3] = np.squeeze(tvec)
            return tfm
        else:
            return None
Exemple #18
0
 def match3DModel(self, camera_matrix, camera_dist, res=640, min_nb_of_codes=2):
     if self._hamcodes is None:
         raise NotImplementedError("The current 3D matching algorithm uses Hamming codes and can't work without")
     res_diff = res/320.  # Because the calibration was made using 320x240 images
     object_points = []
     image_points = []
     nb_of_codes = 0
     for hamcode in self._hamcodes:
         hole_id = int(round(int(hamcode.id)/1000))-1
         if 0 <= hole_id <= 6:  # If the code has been read correctly and is one of the Connect 4 Hamming codes
             nb_of_codes += 1
             object_points.extend(self._model.getHamcode(hole_id))
             # image_points.append(np.uint16(geom.sort_rectangle_corners(hamcode.contours)/res_diff))
             image_points.extend(hamcode.contours)
             # if self._ham_id_to_rect_centres.get(hamcode.id) is not None:
             #     object_points.extend(self._model.getUpperHole(hole_id))
             #     image_points.extend(self._boxes[self._centres_to_indices[self._ham_id_to_rect_centres[hamcode.id]]])
     if nb_of_codes < min_nb_of_codes:
         raise NotEnoughLandmarksException(
             "The model needs at least " + str(min_nb_of_codes) + " detected codes")
     for i in range(len(image_points)):
         image_points[i] = tuple(image_points[i])
         object_points[i] = tuple(object_points[i])
     object_points = np.array(object_points)
     image_points = np.array(image_points) / res_diff
     retval, rvec, tvec = cv2.solvePnP(object_points, image_points, camera_matrix, camera_dist)
     if not retval:
         print "ERR: SolvePnP failed"
     return rvec, tvec
Exemple #19
0
    def handle_monocular(self, msg):

        (image, camera) = msg
        gray = self.mkgray(image)
        C = self.image_corners(gray)
        if C:
            linearity_rms = self.mc.linear_error(C, self.board)

            # Add in reprojection check
            image_points = C
            object_points = self.mc.mk_object_points([self.board], use_board_size=True)[0]
            dist_coeffs = numpy.zeros((4, 1))
            camera_matrix = numpy.array( [ [ camera.P[0], camera.P[1], camera.P[2]  ],
                                           [ camera.P[4], camera.P[5], camera.P[6]  ],
                                           [ camera.P[8], camera.P[9], camera.P[10] ] ] )
            ok, rot, trans = cv2.solvePnP(object_points, image_points, camera_matrix, dist_coeffs)
            # Convert rotation into a 3x3 Rotation Matrix
            rot3x3, _ = cv2.Rodrigues(rot)
            # Reproject model points into image
            object_points_world = numpy.asmatrix(rot3x3) * (numpy.asmatrix(object_points.squeeze().T) + numpy.asmatrix(trans))
            reprojected_h = camera_matrix * object_points_world
            reprojected   = (reprojected_h[0:2, :] / reprojected_h[2, :])
            reprojection_errors = image_points.squeeze().T - reprojected.T
            reprojection_rms = numpy.sqrt(numpy.sum(numpy.array(reprojection_errors) ** 2) / numpy.product(reprojection_errors.shape))

            # Print the results
            print("Linearity RMS Error: %.3f Pixels      Reprojection RMS Error: %.3f Pixels" % (linearity_rms, reprojection_rms))
        else:
            print('no chessboard')
Exemple #20
0
def calculateProjectionMatrix(world, camera):

    global projectionMatrix
    projectionMatrix = np.array([], dtype=np.float32).reshape(0,3,4)
    for i in range(len(cameras_index)):
	points = camera[i, 0:(worldCoordinates.shape)[0], 0:2]
	point = np.float32(points)
	myworld = np.float32(world)
	print "Camera " + str(i) + " points : "
	print point
	print "World : "
	print myworld
	print "dist_coef : "
	print camera.distortion_coefficient
	print "camera_matrix : "
	print camera.camera_matrix

	ret, rvec, tvec = cv2.solvePnP(myworld, point, camera.camera_matrix, camera.distortion_coefficient)
	rotM_cam = cv2.Rodrigues(rvec)[0]

# calculate camera position (= translation), in mm from 0,0,0 point
	cameraPosition = -np.matrix(rotM_cam).T * np.matrix(tvec)
	print "Camera " + str(i) + " position : "
	print cameraPosition

	camMatrix = np.append(cv2.Rodrigues(rvec)[0], tvec, 1)
	projMat = np.dot(camera.camera_matrix, camMatrix)

	projectionMatrix = np.append(projectionMatrix, [projMat], axis=0)
    #TODO finish
    def get_pose_3D(self, corners, intrinsics=None, dist_coeffs=None, cam=None, rectified=False):
        '''
        Uses the model of the object, the corresponding pixels in the image, and camera
        intrinsics to estimate a 3D pose of the object.

        @param corners 4x2 numpy array from get_corners representing the 4 sorted corners in the image

        One of the two must be set:
        @param cam PinholeCameraModel object from ImageGeometry package
        @param rectified if cam is set, set True if corners were found in an
               already rectified image (image_rect_color topic)

        Or:
        @param instrinsics camera intrinisic matrix as numpy array
        @param dist_coeffs camera distortion coefficients as numpy array

        @return tuple (tvec, rvec) representing the translation and rotation vector
        between the camera and the object in meters/radians. Use cv2.Rodrigues to convert
        rvec to a 3x3 rotation matrix.
        '''
        corners = np.array(corners, dtype=np.float)
        # Use camera intrinsics and knowledge of marker's real demensions to
        # get a pose estimate in camera frame
        if cam is not None:
            intrinsics = cam.intrinsicMatrix()
            if rectified:
                dist_coeffs = np.zeros((5, 1))
            else:
                dist_coeffs = cam.distortionCoeffs()
        assert intrinsics is not None
        assert dist_coeffs is not None
        _, rvec, tvec = cv2.solvePnP(
            self.model_3D, corners, intrinsics, dist_coeffs)
        return (tvec, rvec)
 def detect_pose(self, image, flags=None):
     corners = self._detect_chessboard(image, flags)
     if corners is not None:
         ret, rvecs, tvecs = cv2.solvePnP(
             self.object_pattern_points, corners,
             self.config.calibration.camera_matrix, self.config.calibration.distortion_vector)
         if ret:
             return (cv2.Rodrigues(rvecs)[0], tvecs, corners)
def H_from_Xx(LPA, cam, X, x ):
    '''Get a ladybug translation from a camera's view of the world points.'''
    if X.shape[0] == 4:
        X /= X[3]
    r,t = cv2.solvePnP(X[:3].T, x[:2].T, eye(3), empty(0))[1:3]
    r,t = r.flatten(), t.flatten()
    H = LPA.dot( LPA(cam), decode6(r_[r,t]) )
    return H
Exemple #24
0
 def PoseEstimationMethod2(self, corners, patternPoints, cameraMatrix, distCoeffs):
     """This function uses the chessboard pattern for finding the extrinsic parameters (R|T) of the camera in the current view."""
     retval, rvec, tvec = cv2.solvePnP(patternPoints, corners, cameraMatrix, distCoeffs)
     # Convert the rotation vector to a 3 x 3 rotation matrix
     R = cv2.Rodrigues(np.array(rvec))[0]
     # Stack rotationVector and translationVectors so we get a single array
     stacked = np.hstack((R, np.array(tvec)))
     # Now we can get P by getting the dot product of K and the rotation/translation elements
     return np.dot(cameraMatrix, stacked)
def main():
    # ファイル名宣言
    imreadName = "mov0.jpg"              # 読み込む画像ファイル名
    imwriteName = "image_correction.jpg"    # 歪み補正済み画像ファイル名
    jsonName = "calibrateParameter.json"    # 読み込むJSONファイル名
    
    # パターン宣言
    square_size = 23.0      # パターン1マスの1辺サイズ[mm]
    pattern_size = (10, 7)  # パターンの行列数
    
    # Object Points(ワールド座標系), Image Points(画像座標系)宣言
    pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
    pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
    pattern_points *= square_size
    object_points = []      # 3D point in real world spece
    image_points = []       # 2D point in image plane
    
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    
    # 内部パラメータの読み込み
    inputfile = open(jsonName, "r")
    data = json.load(inputfile)
    inputfile.close()
    
    cameraMatrix = np.array(data["CameraMatrix"])
    distCoeffs =np.array(data["DistortCoeffs"])
     
    # 画像の取得
    image = cv2.imread(imreadName, 0)
    # チェスボードのコーナーを検出
    found, corners = cv2.findChessboardCorners(image, pattern_size)
    # コーナーを検出した場合
    if found:
        print "Success : chessboard found"
        cv2.cornerSubPix(image, corners, (11, 11), (-1, -1), criteria)
    # コーナを検出できない場合
    if not found:
        print "Fail : chessboard not found"
        return
    image_points.append(corners.reshape(-1, 2))
    object_points.append(pattern_points)
    imagePoints = np.array(image_points)
    objectPoints = np.array(object_points)
    
    # solvePnP
    retval, rvec, tvec = cv2.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs)
    print "retval: ", retval
    print "rvec: ", rvec
    print "tvec:", tvec
    
    # 歪み補正
    newcamera, roi = cv2.getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, image.shape, 0)
    image_correction = cv2.undistort(image, cameraMatrix, distCoeffs, None, newcamera)
    
    # 補正した画像の保存
    cv2.imwrite(imwriteName, image_correction)
def calibrate_relative_poses_interactive(image_sets, cameraMatrixs, distCoeffss, imageSizes, boardSizes, board_scales, board_rvecs, board_tvecs):
    """
    Make an estimate of the relative poses (as 4x4 projection matrices) between many cameras.
    Base these relative poses to the first camera.
    
    Each camera should be looking at its own chessboard,
    use different sizes of chessboards if a camera sees chessboard that are not associated with that camera.
    'board_scales' scales the chessboard-units to world-units.
    'board_rvecs' and 'board_tvecs' transform the rescaled local chessboard-coordinates to world-coordinates.
    
    The inverse of the reprojection error is used for weighting.
    """
    num_cams = len(image_sets)
    num_images = len(image_sets[0])
    reproj_error_max = 0
    
    # Preprocess object-points of the different boards
    board_objps = []
    for boardSize, board_scale, board_rvec, board_tvec in zip(
            boardSizes, board_scales, board_rvecs, board_tvecs ):
        objp = np.ones((np.prod(boardSize), 4))
        objp[:, 0:3] = calibration_tools.grid_objp(boardSize) * board_scale
        objp = objp.dot(trfm.P_from_R_and_t(cvh.Rodrigues(board_rvec), np.array(board_tvec).reshape(3, 1))[0:3, :].T)
        board_objps.append(objp)
    
    # Calculate all absolute poses
    Ps = np.zeros((num_images, num_cams, 4, 4))
    weights = np.zeros((num_images, 1, 1, 1))
    for i, images in enumerate(zip(*image_sets)):
        reproj_error = 0
        for c, (image, cameraMatrix, distCoeffs, imageSize, boardSize, board_objp) in enumerate(zip(
                images, cameraMatrixs, distCoeffss, imageSizes, boardSizes, board_objps )):
            img = cv2.imread(image)
            ret, corners = cvh.extractChessboardFeatures(img, boardSize)
            if not ret:
                print ("Error: Image '%s' didn't contain a chessboard of size %s." % (image, boardSize))
                return False, None
            
            # Draw and display the corners
            cv2.drawChessboardCorners(
                    img, boardSize, corners, ret )
            cv2.imshow("img", img)
            cv2.waitKey(100)
            
            ret, rvec, tvec = cv2.solvePnP(board_objp, corners, cameraMatrix, distCoeffs)
            Ps[i, c, :, :] = trfm.P_from_R_and_t(cvh.Rodrigues(rvec), tvec)
            reproj_error = max(reprojection_error(    # max: worst case
                    [board_objp], [corners], cameraMatrix, distCoeffs, [rvec], [tvec] )[1], reproj_error)
        reproj_error_max = max(reproj_error, reproj_error_max)
        weights[i] = 1. / reproj_error
    
    # Apply weighting on Ps, and rebase against first camera
    Ps *= weights / weights.sum()
    Ps = Ps.sum(axis=0)
    Pref_inv = trfm.P_inv(Ps[0, :, :])    # use first cam as reference
    return True, [P.dot(Pref_inv) for P in Ps], reproj_error_max
 def camera_poses(self,box):
     rows = box.shape[0]        
     poses = np.zeros((3,0))
     for a in range(rows):           
         rtVec, rVec, tVec = cv2.solvePnP(self.qr_points, box[a], self.intrinsic, self.dist_coeff)
         rot = cv2.Rodrigues(rVec)[0]
         rot_mat = np.transpose(rot)
         camera_pose = np.dot(-rot_mat, tVec)
         poses = np.append(poses,camera_pose, axis = 1)
     return poses
Exemple #28
0
    def sort_closest_qr(self, qr_list):
        """returns sortest list with closest qr first"""

        for code in qr_list:
            _, code.rvec, code.tvec = cv2.solvePnP(code.verts
                                               , code.points
                                               , self.cam_matrix
                                               , self.dist_coeffs)
            
        # Finds qr with smallest displacement 
        qr_list.sort(key=lambda qr: qr.displacement, reverse=False)
        return qr_list 
 def get_pose(self, object_points, image_points, ransac=False):
     ip = np.array(image_points, np.float32)
     op = np.array(object_points, np.float32)
     #        op_3d = np.array([o + [0.0] for o in object_points], np.float32)
     #        flag = cv2.CV_P3P
     flag = cv2.CV_ITERATIVE
     if ransac:
         rvec, tvec, inliers = cv2.solvePnPRansac(op, ip, self.cm, self.dc, flags=flag)
         return rvec, tvec
     else:
         ret, rvec, tvec = cv2.solvePnP(op, ip, self.cm, self.dc, flags=flag)
         return rvec, tvec
def calib_camera(model3D, fidu_XY):
    #compute pose using refrence 3D points + query 2D points
    ret, rvecs, tvec = cv2.solvePnP(model3D.model_TD, fidu_XY, model3D.out_A, None, None, None, False)
    rmat, jacobian = cv2.Rodrigues(rvecs, None)

    inside = calc_inside(model3D.out_A, rmat, tvec, model3D.size_U[0], model3D.size_U[1], model3D.model_TD)
    if(inside == 0):
        tvec = -tvec
        t = np.pi
        RRz180 = np.asmatrix([np.cos(t), -np.sin(t), 0, np.sin(t), np.cos(t), 0, 0, 0, 1]).reshape((3, 3))
        rmat = RRz180*rmat
    return rmat, tvec
Exemple #31
0
world_points = world_points_raw - origin

# load camera matrix defined as:
# [fx 0 cx; 0 fy cy; 0 0 1]
camera_matrix = np.loadtxt(os.path.join(args.input_dir, "camera_matrix.txt"),
                           dtype=float)

dist_coeffs = np.zeros(
    (4, 1)
)  # Assuming no lens distortion - the image points were obtained from a rectified image

# solving the perspective n point projection problem - using the iterative solver
(success, rotation_vector,
 translation_vector) = cv2.solvePnP(world_points,
                                    image_points,
                                    camera_matrix,
                                    dist_coeffs,
                                    flags=cv2.SOLVEPNP_ITERATIVE)

# converting the rotation vector to a rotation matrix
rot_mat, jacobian = cv2.Rodrigues(rotation_vector)

# printing all the parameters
print "Camera Matrix :\n {0}".format(camera_matrix)
print "Rotation Vector:\n {0}".format(rotation_vector)
print "Translation Vector:\n {0}".format(translation_vector)
print "Rotation Matrix: \n {0}".format(rot_mat)

print "saved world_points, rotation matrix and translation matrix text files \n "

np.savetxt(os.path.join(args.output_dir, "rotation_matrix.txt"),
Exemple #32
0
data_points = open('Data Points.txt')

for line in data_points:  # deal with data line by line
    data = [float(x) for x in line.split()
            ]  # separate every factor by detecting block space
    if len(data) == 2:  # two factors for image points
        imgpoints.append(data)
    elif len(data) == 3:  # three factors for object points
        objpoints.append(data)
    else:
        exit()

# Change to array of float32
imgpoints = np.asarray(imgpoints, dtype=np.float32)
objpoints = np.asarray(objpoints, dtype=np.float32)

# Find rotation and translation vectors.
rvecs, tvecs = cv2.solvePnP(objpoints, imgpoints, mtx, dist)[1:3]

print('rvecs = ', rvecs)
print('tvecs = ', tvecs)

# Find rotation and translation matrices.
rmat = cv2.Rodrigues(rvecs)[0]
tmat = cv2.Rodrigues(tvecs)[0]

print('rmat = ', rmat)
print('tmat = ', tmat)

cv2.destroyAllWindows()
Exemple #33
0
    def calculate_head_pose(self):
        """Calculates the  head pose angles when a head frame is passed in"""
        self.frame = resize_image(self.frame, width=400)
        gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)

        (h, w) = self.frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(self.frame, (300, 300)), 1.0,
                                     (300, 300), (104.0, 177.0, 123.0))

        self.face_detector.setInput(blob)
        detections = self.face_detector.forward()

        # ONLY IN DEBUG MODE: Draw number of faces on frame
        # self._draw_face_num(faces=faces)

        pitch, yaw, roll = 0.0, 0.0, 0.0

        # loop over the face detections
        for i in range(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with the
            # prediction
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the `confidence` is
            # greater than the minimum confidence
            if confidence < 0.5:
                continue

            # compute the (x, y)-coordinates of the bounding box for the object
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (start_x, start_y, end_x, end_y) = box.astype("int")

            face = dlib.rectangle(left=start_x,
                                  top=start_y,
                                  right=end_x,
                                  bottom=end_y)

            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy array
            shape = self.landmark_detector(gray, face)

            # 2D model points
            image_points = np.float32([
                (shape.part(7).x, shape.part(7).y),  # 33 - nose bottom
                (shape.part(34).x, shape.part(34).y),  # 8 - chin
                (shape.part(11).x, shape.part(11).y),  # 54 - lip right
                (shape.part(18).x, shape.part(18).y),  # 48 - lip left
                (shape.part(4).x, shape.part(4).y),  # 3 - ear right
                (shape.part(3).x, shape.part(3).y),  # 13 - ear left
            ])

            # 3D model points
            model_points = np.float32([
                (5.0, 0.0, -52.0),  # 33 - nose bottom
                (0.0, -330.0, -65.0),  # 8 - chin
                (150.0, -150.0, -125.0),  # 54 - lip right
                (-150.0, -150.0, -125.0),  # 48 - lip left
                (250.0, -20.0, 40.0),  # 3 - ear right
                (-250.0, -20.0, 40.0),  # 13 - ear left
            ])

            # image properties. channels is not needed so _ is to drop the value
            height, width, _ = self.frame.shape

            # Camera internals double
            focal_length = width
            center = np.float32([width / 2, height / 2])
            camera_matrix = np.float32([[focal_length, 0.0, center[0]],
                                        [0.0, focal_length, center[1]],
                                        [0.0, 0.0, 1.0]])
            dist_coeffs = np.zeros(
                (4, 1), dtype="float32")  # Assuming no lens distortion

            retval, rvec, tvec = cv2.solvePnP(model_points, image_points,
                                              camera_matrix, dist_coeffs)

            nose_end_point3D = np.float32([[50, 0, 0], [0, 50, 0], [0, 0, 50]])

            nose_end_point2D, jacobian = cv2.projectPoints(
                nose_end_point3D, rvec, tvec, camera_matrix, dist_coeffs)

            camera_rot_matrix, _ = cv2.Rodrigues(rvec)

            pitch, yaw, roll = calculate_euler_angles(rmat=camera_rot_matrix)

            # ONLY IN DEBUG MODE:
            if self.debug:
                # Draw landmarks used head pose estimation
                self._draw_face_landmarks(shape=shape)

                # Draw a green rectangle (and text) around the face.
                self._draw_face_rect(start_x=start_x,
                                     start_y=start_y,
                                     end_x=end_x,
                                     end_y=end_y)

                # Draw points used head pose estimation
                # self._draw_face_points(points=image_points)

                # Write the euler angles on the frame
                self._draw_angles(pitch=pitch,
                                  yaw=yaw,
                                  roll=roll,
                                  left=start_x,
                                  top=start_y)

            # only need one face
            break

        return self.frame, pitch, yaw, roll
Exemple #34
0
def calibrate_extrinsic(intr_cam_calib_path, extr_img_path, extr_pts_path):

    img = cv2.imread(extr_img_path, cv2.IMREAD_UNCHANGED)
    camera_matrix, dist_coeffs, w, h = smocap.camera.load_intrinsics(
        intr_cam_calib_path)

    new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(
        camera_matrix, dist_coeffs, (w, h), 1, (w, h))
    img_undistorted = cv2.undistort(img, camera_matrix, dist_coeffs, None,
                                    new_camera_matrix)
    cv2.imwrite('/tmp/foo.png', img_undistorted)

    pts_name, pts_img, pts_world = trrvu.read_point(extr_pts_path)
    #pdb.set_trace()
    pts_img_undistorted = cv2.undistortPoints(pts_img.reshape(
        (-1, 1, 2)), camera_matrix, dist_coeffs, None,
                                              new_camera_matrix).squeeze()

    (success, rotation_vector,
     translation_vector) = cv2.solvePnP(pts_world,
                                        pts_img.reshape(-1, 1, 2),
                                        camera_matrix,
                                        dist_coeffs,
                                        flags=cv2.SOLVEPNP_ITERATIVE)
    LOG.info("PnP {} {} {}".format(success, rotation_vector.squeeze(),
                                   translation_vector.squeeze()))

    rep_pts_img = cv2.projectPoints(pts_world, rotation_vector,
                                    translation_vector, camera_matrix,
                                    dist_coeffs)[0].squeeze()
    rep_err = np.mean(np.linalg.norm(pts_img - rep_pts_img, axis=1))
    LOG.info('reprojection error {} px'.format(rep_err))

    world_to_camo_T = smocap.utils.T_of_t_r(translation_vector.squeeze(),
                                            rotation_vector)
    world_to_camo_t, world_to_camo_q = smocap.utils.tq_of_T(world_to_camo_T)
    print(' world_to_camo_t {} world_to_camo_q {}'.format(
        world_to_camo_t, world_to_camo_q))

    camo_to_world_T = np.linalg.inv(world_to_camo_T)
    camo_to_world_t, camo_to_world_q = smocap.utils.tq_of_T(camo_to_world_T)
    print(' cam_to_world_t {} cam_to_world_q {}'.format(
        camo_to_world_t, camo_to_world_q))

    T_c2co = np.array([[0., -1., 0., 0], [0., 0., -1., 0], [1., 0., 0., 0],
                       [0., 0., 0., 1.]])
    T_co2c = np.linalg.inv(T_c2co)
    caml_to_ref_T = np.dot(camo_to_world_T, T_c2co)
    caml_to_ref_t, caml_to_ref_q = smocap.utils.tq_of_T(caml_to_ref_T)
    print(' caml_to_ref_t {} caml_to_ref_q {}'.format(caml_to_ref_t,
                                                      caml_to_ref_q))
    caml_to_ref_rpy = np.asarray(
        tf.transformations.euler_from_matrix(caml_to_ref_T, 'sxyz'))
    print(' caml_to_ref_rpy {}'.format(caml_to_ref_rpy))

    apertureWidth, apertureHeight = 6.784, 5.427
    fovx, fovy, focalLength, principalPoint, aspectRatio = cv2.calibrationMatrixValues(
        camera_matrix, (h, w), apertureWidth, apertureHeight)
    print(fovx, fovy, focalLength, principalPoint, aspectRatio)

    def draw(cam_img, pts_id, keypoints_img, rep_keypoints_img, pts_world):
        for i, p in enumerate(keypoints_img.astype(int)):
            cv2.circle(cam_img, tuple(p), 1, (0, 255, 0), -1)
            cv2.putText(cam_img, '{}'.format(pts_id[i][:1]), tuple(p),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 255, 0), 2)
            cv2.putText(cam_img, '{}'.format(pts_world[i][:2]),
                        tuple(p + [0, 25]), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 255, 0), 1)
        for i, p in enumerate(rep_keypoints_img.astype(int)):
            cv2.circle(cam_img, tuple(p), 1, (0, 0, 255), -1)

    draw(img, pts_name, pts_img, rep_pts_img, pts_world)
    cv2.imshow('original image', img)

    for i, p in enumerate(pts_img_undistorted.astype(int)):
        cv2.circle(img_undistorted, tuple(p), 1, (0, 255, 0), -1)

    cv2.imshow('undistorted image', img_undistorted)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Exemple #35
0
    def laserPointExtraction(self,
                             camera_matrix,
                             dist_coeffs,
                             path_img1,
                             path_img2,
                             cbrow=5,
                             cbcol=9,
                             cbsize=20,
                             accuracy=0.001):
        img = cv2.imread(path_img1)
        img_laser = cv2.imread(path_img2)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # cv2.imshow("distort",gray)

        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, cbsize,
                    accuracy)

        # -------------------------------------------------------------
        # get the rotation matrix and translation vector of the target
        # -------------------------------------------------------------

        # # get corner point from images
        ret, corners = cv2.findChessboardCorners(gray, (cbrow, cbcol), None)
        if ret:
            corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                        criteria)
            # draw the corner point on the chessboard
            # img_mtx = cv2.drawChessboardCorners(gray, (cbrow, cbcol), corners2, ret)
            # print(corners2)
            # cv2.imshow("get matrices", img_mtx)

        # # using the PnP algorithm to get the the rotation matrix and translation vector
        model_points = np.zeros((cbcol * cbrow, 3), np.float32)
        model_points[:, :2] = np.mgrid[0:cbrow, 0:cbcol].T.reshape(-1, 2)
        model_points = model_points * cbsize
        # print(model_points)
        image_points = corners2
        retval, rvecs, tvecs = cv2.solvePnP(
            model_points, image_points, camera_matrix,
            dist_coeffs)  # directly using the image got from camera
        # print(corners2)
        # print(f"rvec = {rvecs}")
        # print(f"tvec = {tvecs*cbsize}")
        rmat = cv2.Rodrigues(rvecs)[0]
        # rtmatrix = np.vstack((np.hstack((rmat, tvecs * cbsize)), [0, 0, 0, 1]))
        rmat_inv = np.linalg.pinv(rmat)

        # # set the variable for later use
        # plane vector parameter - extract
        r31 = rmat_inv[2, 0]
        r32 = rmat_inv[2, 1]
        r33 = rmat_inv[2, 2]
        tz = (tvecs[-1])
        tx = (tvecs[0])
        ty = (tvecs[1])
        # print(tvecs)

        fx = camera_matrix[0, 0]
        fy = camera_matrix[1, 1]
        u0 = camera_matrix[0, 2]
        v0 = camera_matrix[1, 2]

        # -------------------------------------------------------------
        # get the 2D coordinates of laser points
        # -------------------------------------------------------------

        # # get the corner point

        img = cv2.undistort(img, camera_matrix, dist_coeffs)
        gray_undistorted = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        ret, corners = cv2.findChessboardCorners(gray_undistorted,
                                                 (cbrow, cbcol), None)
        if ret:
            corners2_new = cv2.cornerSubPix(gray_undistorted, corners,
                                            (11, 11), (-1, -1), criteria)
            # draw the corner point on the chessboard
            img = cv2.drawChessboardCorners(img, (cbrow, cbcol), corners2_new,
                                            ret)
        # print(corners2_new)

        # # get the laser point

        img_laser = cv2.undistort(img_laser, camera_matrix, dist_coeffs)
        # cv2.imshow("laser", img_laser)
        fit_laser = get_laser(img_laser, method='skeleton')
        # # 标记激光线采样位置
        for i in fit_laser:
            cv2.circle(img_laser, (int(i[0]), int(i[1])), 1, (0, 0, 0))

        # # laser line fitting(laser line should be a straight line,so laser line can be fitted to decrease the noise)

        fit_laser_nom = np.polyfit(fit_laser[:, 0], fit_laser[:, 1],
                                   1)  # fittinjg laser line
        fit_laser_nom_figure = np.polyfit(
            fit_laser[:, 1], fit_laser[:, 0],
            1)  # reverse fitting laser line # the position of x,y are changed

        # # draw the white fitting line for laser extraction
        y1 = 0
        x1 = int(np.polyval(fit_laser_nom_figure, y1))
        y2 = 1000
        x2 = int(np.polyval(fit_laser_nom_figure, y2))
        cv2.line(img_laser, (x1, y1), (x2, y2), (255, 255, 255), 1,
                 4)  # draw the white fitting line for laser extraction

        # # fitting the grid of chessboard

        fit_corners = corners2_new.reshape((cbcol, cbrow, 2))
        # print(fit_corners) # print all corner points
        cross_list = []
        for ir in range(cbrow):
            # # # print(fit_corners[:, ir, :])  # print all x of corner points
            fit_corners_nom = np.polyfit(fit_corners[:, ir, 0],
                                         fit_corners[:, ir, 1], 1)
            x1 = 100
            y1 = int(np.polyval(fit_corners_nom, 100))
            x2 = 1500
            y2 = int(np.polyval(fit_corners_nom, 1500))
            cv2.line(img_laser, (x1, y1), (x2, y2), (0, 255, 0), 2, 4)

            # # # get the cross points of laser line and grid line
            x_cross = -(fit_laser_nom[1] - fit_corners_nom[1]) / (
                fit_laser_nom[0] - fit_corners_nom[0])
            y_cross = np.polyval(fit_corners_nom, x_cross)
            # print(x_cross,y_cross) # show the cross point between chess board and laser

            # -------------------------------------------------------------
            # conversion of the 2D coordinates  to 3D coordinates of laser points
            # -------------------------------------------------------------
            if self.solving_method == "cross-ratio":
                # cross-ratio invariance method
                u = x_cross
                v = y_cross
                # print(u,v)
                a = fit_corners[0, ir, :]
                b = fit_corners[1, ir, :]
                d = fit_corners[-1, ir, :]
                ab = np.linalg.norm(a - b)
                ad = np.linalg.norm(a - d)
                ac = np.linalg.norm(a - [x_cross, y_cross])
                # print(a, b, d)
                # print(ab, ad, ac)
                cr = (ac / (ac - ab)) / (ad / (ad - ab))
                AD = cbsize * (cbcol - 1)
                AB = cbsize
                r = AD / (AD - AB)
                AC = (cr * r) / (cr * r - 1) * AB
                # print(AD,AB,AC)
                Yt = AC
                Xt = ir * cbsize
                # print(Xt,Yt,0)
                [Xc, Yc, Zc] = rmat @ [[Xt], [Yt], [0]] + tvecs

            if self.solving_method == "spatial geometry":
                # spatial geometry method
                Xc = (fy * (u - u0) * (r31 * tx + r32 * ty + r33 * tz)) / (
                    fx * fy * r33 + fy * r31 * u - fy * r31 * u0 +
                    fx * r32 * v - fx * r32 * v0)
                Yc = (fx * (v - v0) * (r31 * tx + r32 * ty + r33 * tz)) / (
                    fx * fy * r33 + fy * r31 * u - fy * r31 * u0 +
                    fx * r32 * v - fx * r32 * v0)
                Zc = (fx * fy * (r31 * tx + r32 * ty + r33 * tz)) / (
                    fx * fy * r33 + fy * r31 * u - fy * r31 * u0 +
                    fx * r32 * v - fx * r32 * v0)

            cross_list.append([Xc[0], Yc[0], Zc[0]])

            # print(Xc, Yc, Zc)

            #  # the following method is only valid when camera is vertical to the board

            #     whole_length = np.linalg.norm(ifit[0] - ifit[-1])
            #     cross_length = np.linalg.norm(ifit[0] - [x_cross, y_cross])
            #     x_cross_target = cross_length / whole_length * (cbrow - 1) * cbsize
            #     y_cross_target = nfit * cbsize
            #     cross_list.append([x_cross_target, y_cross_target])

            # whole_length = np.linalg.norm(fit_corners[0, ir] - fit_corners[-1, ir])
            # cross_length = np.linalg.norm(fit_corners[0, ir] - [x_cross, y_cross])
            # x_cross_target = ir * cbsize
            # y_cross_target = cross_length / whole_length * (cbcol - 1) * cbsize
            # print(cross_length,whole_length)
            # print(x_cross_target,y_cross_target)

            # # # draw the  intersection on the scream

            cv2.circle(img_laser, (int(x_cross), int(y_cross)), 5,
                       (255, 255, 255), 2)

        cv2.imshow("img", img_laser)
        cv2.waitKey(0)
        return np.array(cross_list)
    chin_x = pts[2] + (pts[4] - pts[2]) + (pts[3] - pts[2])
    chin_y = pts[7] + (pts[9] - pts[7]) + (pts[8] - pts[7])
    image_points = np.array(
        [
            (pts[2] * w + x, pts[7] * h + y),
            # (chin_x*w+x, chin_y*h+y),
            (pts[0] * w + x, pts[5] * h + y),
            (pts[1] * w + x, pts[6] * h + y),
            (pts[3] * w + x, pts[8] * h + y),
            (pts[4] * w + x, pts[9] * h + y)
        ],
        dtype="double")

    if not last_frame_valid:
        (success, rotation_vector,
         translation_vector) = cv2.solvePnP(model_points, image_points,
                                            camera_matrix, dist_coeffs)
    else:
        (success, rotation_vector, translation_vector) = cv2.solvePnP(
            model_points, image_points, camera_matrix, dist_coeffs,
            last_rotation_vector, last_translation_vector, True)

    # print(rotation_vector, translation_vector)

    if success:
        rotation_matrix, _ = cv2.Rodrigues(rotation_vector)
        direction = np.dot(rotation_matrix, np.array([[1], [0], [0]]))
        if direction[0][0] >= 0:
            print("rotation vector", rotation_vector)
            norm = np.linalg.norm(rotation_vector)
            rotation_vector_normalized = rotation_vector / norm
            print("norm", norm)
Exemple #37
0
def PnP(imgpts, objpts, K=None, shape=None, ransac=False):

    # Finds R, T of an object w.r.t the camera given:
    #     objpts: the coordinates of keypoints in the object's frame
    #     imgpts: the pixel coordinates of those keypoints in the image
    # Input shapes -- imgpts: nx2, objpts: nx3, K: 3x3
    # If K or shape is None, then imgpts is in camera coordinates (meters not pixels). No x/y flipping or K matrix is applied.

    if type(imgpts) == list or type(imgpts) == tuple:
        imgpts = np.stack(imgpts, axis=0)
    if type(objpts) == list or type(objpts) == tuple:
        objpts = np.stack(objpts, axis=0)

    imgpts = imgpts.astype(np.float64)
    objpts = objpts.astype(np.float64)

    Rcoord = np.array([[0, -1, 0], [1, 0, 0],
                       [0, 0,
                        1]])  # opencv coordinate from wrt my coordinate frame

    if K is None or shape is None:
        K = np.eye(3)
        imgpts = np.stack((imgpts[:, 0], imgpts[:, 1]),
                          axis=1)  # if imgpts is nx3, make it nx2
    else:
        K = swapK(K)  # convert to [horizontal, vertical] for opencv
        K[1, 2] = shape[0] - K[
            1,
            2]  # opencv imgcenter is relative to top left instead of bottom left
        imgpts = np.stack(
            (imgpts[:, 1], imgpts[:, 0]),
            axis=1)  # convert to [horizontal, vertical] for opencv
        objpts = (Rcoord.T @ objpts.T).T

    imgpts = np.expand_dims(
        imgpts,
        axis=1)  # add extra dimension so solvpnp wont error out (now nx1x2)

    if ransac:
        _, rvec, T, inliers = cv2.solvePnPRansac(objpts,
                                                 imgpts,
                                                 K,
                                                 np.array([[0, 0, 0, 0]]),
                                                 flags=cv2.SOLVEPNP_EPNP)
        print(inliers)
    else:
        _, rvec, T = cv2.solvePnP(objpts,
                                  imgpts,
                                  K,
                                  np.array([[0, 0, 0, 0]]),
                                  flags=cv2.SOLVEPNP_EPNP)

    R = cv2.Rodrigues(rvec)[0]  # axis angle to rotation matrix

    if not (K is None or shape is None):
        T = Rcoord @ T  # transform back to unrotated coordinate frame (pre multiply for local transform)

    if ransac:
        return (R, T, inliers)
    else:
        return (R, T)
Exemple #38
0
def extract_features_from_openface(csv_file,
                                   frame_begin,
                                   frame_end,
                                   im,
                                   mode='global'):
    # print(img_size)
    size = im.shape
    focal_length = size[1]
    center = (size[1] / 2, size[0] / 2)
    camera_matrix = np.array([[focal_length, 0, center[0]],
                              [0, focal_length, center[1]], [0, 0, 1]],
                             dtype="double")

    image_coord = []
    world_coord = []
    if mode == 'global':
        select_idx = [i for i in range(1, 37)] + [49, 55]
    else:
        select_idx = [i for i in range(18, 37)] + [49, 55]
    noise_center_idx = select_idx.index(33)

    for idx in select_idx:
        featx = np.array(
            csv_file.iloc[frame_begin:frame_end][' ' + 'x_{}'.format(idx)])
        featy = np.array(
            csv_file.iloc[frame_begin:frame_end][' ' + 'y_{}'.format(idx)])

        image_coord.append(np.stack([featx, featy], axis=0))

        featX = np.array(
            csv_file.iloc[frame_begin:frame_end][' ' + 'X_{}'.format(idx)])
        featY = np.array(
            csv_file.iloc[frame_begin:frame_end][' ' + 'Y_{}'.format(idx)])
        featZ = np.array(
            csv_file.iloc[frame_begin:frame_end][' ' + 'Z_{}'.format(idx)])
        world_coord.append(np.stack([featX, featY, featZ], axis=0))

    image_coord = np.stack(image_coord, axis=1).reshape(1, 1, 2, -1).transpose(
        (0, 3, 1, 2)).astype(np.float32).squeeze(0).squeeze(1)

    world_coord = np.stack(world_coord, axis=1).reshape(1, 3, -1).transpose(
        (0, 2, 1)).astype(np.float32).squeeze(0)

    dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
    (success, rvec,
     tvec) = cv2.solvePnP(world_coord, image_coord, camera_matrix,
                          dist_coeffs)  # , flags=cv2.SOLVEPNP_ITERATIVE)

    (nose_end_point2D,
     jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rvec, tvec,
                                   camera_matrix, dist_coeffs)
    for p in image_coord:
        cv2.circle(im, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

    p1 = (int(image_coord[noise_center_idx][0]),
          int(image_coord[noise_center_idx][1]))
    p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
    if mode == 'global':
        cv2.line(im, p1, p2, (255, 0, 0), 2)  # 'blue
    else:
        cv2.line(im, p1, p2, (0, 0, 255), 2)

    cv2.imshow("Output", im)
    cv2.waitKey(0)
Exemple #39
0
sample = np.random.uniform(1, 10, size=(5, 3))
sampling3D1 = np.random.uniform(0, 20, size=(100, 3))
sampling2D1 = np.random.uniform(0, 5, size=(100, 2))
objIdx1 = np.random.randint(100, size=(hyp_num, 4))
objIdx1 = np.cast['int32'](objIdx1)
shuffleIdx1 = np.zeros((8, 100), dtype=np.int)
for i in range(shuffleIdx1.shape[0]):
    shuffleIdx1[i] = np.arange(100)
    np.random.shuffle(shuffleIdx1[i])
shuffleIdx1 = np.cast['int32'](shuffleIdx1)
objPts1 = copy.deepcopy(sampling3D1[objIdx1])
imgPts1 = copy.deepcopy(sampling2D1[objIdx1])
in_hyps = np.zeros((hyp_num, 6))
for h in range(hyp_num):
    done0, rot0, tran0 = cv2.solvePnP(objPts1[h],
                                      imgPts1[h],
                                      np_cmat,
                                      distCoeffs=None)
    in_hyps[h] = np.append(rot0, tran0)
diffMaps1 = np.zeros((hyp_num, 100))
#for i in range(1):
#	diffMaps1[i] = getDiffMap(in_hyps[i], sampling3D1, sampling2D1, np_cmat, D)

tf_objIdx = tf.constant(objIdx1)
tf_diffMaps = tf.constant(diffMaps1)
tf_sample3D = tf.constant(sampling3D1)
tf_sample2D = tf.constant(sampling2D1)
tf_hyp = tf.constant(in_hyps)
tf_objPts = tf.constant(objPts1)
tf_imgPts = tf.constant(imgPts1)
tf_shuffleIdx = tf.constant(shuffleIdx1)
out = refine([
Exemple #40
0
def computeMarker(img, flagFound, bbox, camera_matrix):
    #bbox = (0,0,0,0)
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)

    candidates = []
    candidates_contours = []
    candidates_len = []

    threshImage = cv.adaptiveThreshold(gray, 255,
                                       cv.ADAPTIVE_THRESH_GAUSSIAN_C,
                                       cv.THRESH_BINARY, 5, 3)

    contours, hierarchy = cv.findContours(threshImage, cv.RETR_LIST,
                                          cv.CHAIN_APPROX_NONE)
    drawing = np.zeros((threshImage.shape[0], threshImage.shape[1], 3),
                       dtype=np.uint8)
    for cnt in contours:
        cnt_len = cv.arcLength(cnt, True)
        cnt_orig = cnt
        cnt = cv.approxPolyDP(cnt,
                              params.polygonalApproxAccuracyRate * cnt_len,
                              True)
        if len(cnt) == 4 and cv.contourArea(cnt) > 80 and cv.contourArea(
                cnt) < 0.2 * width * height and cv.isContourConvex(cnt):
            cv.drawContours(drawing, contours, 1, (0, 255, 0), 1, cv.LINE_8,
                            hierarchy, 0)

            cnt = cnt.reshape(-1, 2)
            candidatesAux = []
            for i in range(4):
                candidatesAux.append([cnt[i][0], cnt[i][1]])
            candidates.append(candidatesAux)
            candidates_contours.append(cnt_orig)
            candidates_len.append(cnt_len)

    for i in range(len(candidates)):
        candidates[i] = order_points(candidates[i])

    candGroup = []
    candGroup = [-1 for i in range(len(candidates))]
    groupedCandidates = []

    for i in range(len(candidates)):
        for j in range(1, len(candidates)):
            minimum_perimeter = min(len(candidates_contours[i]),
                                    len(candidates_contours[j]))
            for fc in range(4):
                distsq = 0
                for c in range(4):
                    modC = (c + fc) % 4
                    distsq = distsq + (
                        candidates[i][modC][0] - candidates[j][c][0])**2 + (
                            candidates[i][modC][1] - candidates[j][c][1])**2
                distsq = distsq / 4.0
                minMarkerDistancePixels = float(
                    minimum_perimeter) * params.minMarkerDistanceRate
                if (distsq < minMarkerDistancePixels**2):
                    if (candGroup[i] < 0 and candGroup[j] < 0):
                        candGroup[i] = candGroup[j] = (int)(
                            len(groupedCandidates))
                        grouped = []
                        grouped.append(i)
                        grouped.append(j)
                        groupedCandidates.append(grouped)
                    elif (candGroup[i] > -1 and candGroup[j] == -1):
                        group = candGroup[i]
                        candGroup[j] = group
                        groupedCandidates[group].append(j)
                    elif (candGroup[j] > -1 and candGroup[i] == -1):
                        group = candGroup[j]
                        candGroup[i] = group
                        groupedCandidates[group].append(i)

    biggerCandidates = []
    biggerContours = []
    for i in range(len(groupedCandidates)):
        smallerIdx = groupedCandidates[i][0]
        biggerIdx = -1
        for j in range(1, len(groupedCandidates[i])):
            currPerim = candidates_len[groupedCandidates[i][j]]
            if biggerIdx < 0:
                biggerIdx = groupedCandidates[i][j]
            elif (currPerim >= len(candidates_contours[biggerIdx])):
                biggerIdx = groupedCandidates[i][j]

            if (currPerim < len(candidates_contours[smallerIdx])):
                smallerIdx = groupedCandidates[i][j]

        if (biggerIdx > -1):
            cnt = candidates[biggerIdx].reshape(-1, 2)
            issquare = compare_distances(cnt)
            if not (issquare):
                continue
            biggerCandidates.append(candidates[biggerIdx])
            biggerContours.append(candidates_contours[biggerIdx])

    squares = []

    for i in range(len(biggerCandidates)):
        x, y, w, h = cv.boundingRect(biggerCandidates[i])

        cv.rectangle(drawing, (x, y), (x + w, y + h), (255, 0, 0), 3)

        warped = four_point_transform(gray, biggerCandidates[i])

        _, warped = cv.threshold(warped, 125, 255,
                                 cv.THRESH_BINARY | cv.THRESH_OTSU)
        #_, warped = cv.threshold(warped, 125, 255, cv.THRESH_BINARY)

        aux = (x, y, w, h, warped, biggerCandidates[i])
        squares.append(aux)

    markers = match_warped(squares, gray)

    foundMarker = False
    for i in range(len(markers)):
        x1 = markers[i][0]
        y1 = markers[i][1]
        w = markers[i][2]
        h = markers[i][3]
        corners = markers[i][5]
        cv.rectangle(img, (x1, y1), (x1 + w, y1 + h), (0, 255, 0), 10)
        foundMarker = True

        #try:
        _, rvec, tvec = cv.solvePnP(object_points,
                                    corners,
                                    camera_matrix,
                                    camera_distortion,
                                    flags=cv.SOLVEPNP_IPPE_SQUARE)
        marker_distance = np.linalg.norm(tvec)
        R_ct = np.matrix(
            cv.Rodrigues(rvec)[0])  # rotation matrix of camera wrt marker.

        R_tc = R_ct.T  # rotation matrix of marker wrt camera
        roll_marker, pitch_marker, yaw_marker = rotationMatrixToEulerAngles(
            R_Flip * R_tc)
        str_position = "Marker Position: x = %4.0f y = %4.0f z = %4.0f" % (
            tvec[0], tvec[1], tvec[2])
        cv.putText(img, str_position, (0, 100), font, 1.5, (255, 0, 0), 2,
                   cv.LINE_AA)

        str_distance = "Marker Distance: d = %4.0f" % (marker_distance)
        cv.putText(img, str_distance, (0, 150), font, 1.5, (255, 0, 0), 2,
                   cv.LINE_AA)

        str_attitude = "Marker Attitude r=%4.0f p=%4.0f y=%4.0f" % (
            math.degrees(roll_marker), math.degrees(pitch_marker),
            math.degrees(yaw_marker))
        cv.putText(img, str_attitude, (0, 200), font, 1.5, (255, 0, 0), 2,
                   cv.LINE_AA)

        str_flagFound = "Flag Found = " + str(flagFound)
        cv.putText(img, str_flagFound, (0, 250), font, 1.5, (255, 0, 0), 2,
                   cv.LINE_AA)

        data.append(marker_distance)

        if flagFound == 0:
            flagFound = flagFound + 5
        else:
            flagFound = flagFound + 1
        bbox = (x1, y1, w, h)
        #except:
        #marker_distance = 0
        #print("error")
    if foundMarker == False:
        if flagFound > 1:
            flagFound = flagFound - 1
        else:
            flagFound = 0
    return img, drawing, flagFound, bbox
    # 灰度处理快
    gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    # 检测aruco二维码
    corners, ids, rejectedImgPoints = cv.aruco.detectMarkers(gray, aruco_dict, cameraMatrix=newcameramtx)

    # 如果检测到
    if corners:
        # 画出来
        cv.aruco.drawDetectedMarkers(frame, corners, ids)
        # 按每个二维码分开
        corner = np.array(corners).reshape(4, 2)
        corner = np.squeeze(np.array(corner))
        # 检测的点顺序是左上 右上 右下 左下 所以调换一下
        corner_pnp = np.array([corner[0], corner[1], corner[3], corner[2]])
        # solvePNP获取r,t矩阵
        retval, rvec, tvec = cv.solvePnP(objp, corner_pnp, newcameramtx, None)
        cv.aruco.drawAxis(frame, newcameramtx, np.zeros((1, 5)), rvec, tvec, 45.4)
        rm, _ = cv.Rodrigues(rvec)
        hvec = np.concatenate((np.concatenate((rm, tvec), axis=1), [[0, 0, 0, 1]]), axis=0)

    cv.imshow('img', frame)

    key = cv.waitKey(1)
    if key & 0xFF == ord('q'):
        break
    elif key & 0xFF == ord('a'):
        out = tripodheads.get_aimming_arc(hvec)
        deltatheta = [fmod(out[0], 2 * np.pi) / np.pi * 180, fmod(out[1], 2 * np.pi) / np.pi * 180]
        tripodheads.servo_run([0, 1], deltatheta)
        print('hvec:', hvec)
        print('result', deltatheta)
Exemple #42
0
     [
         math.tan(lighthouse_sweep_angles[2, 0]),
         math.tan(lighthouse_sweep_angles[2, 1])
     ],
     [
         math.tan(lighthouse_sweep_angles[3, 0]),
         math.tan(lighthouse_sweep_angles[3, 1])
     ]])

K = np.float64([[1, 0, 0], [0, 1, 0], [0.0, 0.0, 1.0]])

dist_coef = np.zeros(4)

_ret, rvec, tvec = cv.solvePnP(lighthouse_3d,
                               lighthouse_image_projection,
                               K,
                               dist_coef,
                               flags=cv.cv2.SOLVEPNP_ITERATIVE)

print(tvec)
print(rvec)

Rmatrix, _ = cv.Rodrigues(rvec)
print(Rmatrix)

vector = np.matmul(np.linalg.inv(Rmatrix), tvec)
print(vector)
#vector = np.matmul(Rmatrix,tvec)
#print(vector)

#Rt = np.append(Rmatrix,tvec,axis=1)
def head_pose_estimate(frame, shape):

    size = frame.shape
    #2D image points. If you change the image, you need to change vector
    image_points = np.array(
        [
            (shape[30][0], shape[30][1]),  # Nose tip
            (shape[8][0], shape[8][1]),  # Chin
            (shape[36][0], shape[36][1]),  # Left eye left corner
            (shape[45][0], shape[45][1]),  # Right eye right corne
            (shape[48][0], shape[48][1]),  # Left Mouth corner
            (shape[54][0], shape[54][1])  # Right mouth corner
        ],
        dtype="double")
    # 3D model points.
    model_points = np.array([
        (0.0, 0.0, 0.0),  # Nose tip
        (0.0, -330.0, -65.0),  # Chin
        (-225.0, 170.0, -135.0),  # Left eye left corner
        (225.0, 170.0, -135.0),  # Right eye right corne
        (-150.0, -150.0, -125.0),  # Left Mouth corner
        (150.0, -150.0, -125.0)  # Right mouth corner
    ])

    focal_length = size[1]
    center = (size[1] / 2, size[0] / 2)
    # camera_matrix = np.array(
    #     [[focal_length, 0, center[0]],
    #      [0, focal_length, center[1]],
    #      [0, 0, 1]], dtype="double"
    # )

    camera_matrix = np.array(
        [[1065.998192050825, 0.0, 650.5364868504282],
         [0.0, 1068.49376227235, 333.59792728394547], [0.0, 0.0, 1.0]],
        dtype="double")

    params = {}
    # print("Camera Matrix: \n {0}".format(camera_matrix))

    dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
    #dist_coeffs = np.array(
    #[0.05168885345466659, 0.08869302343380323, -0.011352749105937471, 0.0010267347279299176, -0.3245685548675939])
    (success, rotation_vector,
     translation_vector) = cv2.solvePnP(model_points,
                                        image_points,
                                        camera_matrix,
                                        dist_coeffs,
                                        flags=cv2.SOLVEPNP_ITERATIVE)

    print("Rotation Vector: \n {0}".format(rotation_vector))
    params["rotation_vector"] = np.concatenate(rotation_vector,
                                               axis=0).tolist()

    params["euler_angles"] = {}

    print("Rotation Euler angles (Radians): \n {0}".format(
        rmu.rotationMatrixToEulerAngles(cv2.Rodrigues(rotation_vector)[0])))
    params["euler_angles"]["radians"] = rmu.rotationMatrixToEulerAngles(
        cv2.Rodrigues(rotation_vector)[0]).tolist()

    print("Rotation Euler angles (Degrees): \n {0}".format(
        rmu.rotationMatrixToEulerAngles(cv2.Rodrigues(rotation_vector)[0]) *
        (180 / PI)))
    params["euler_angles"]["degrees"] = (
        rmu.rotationMatrixToEulerAngles(cv2.Rodrigues(rotation_vector)[0]) *
        (180 / PI)).tolist()

    print("Translation Vector: \n {0}".format(translation_vector))
    params["translation_vector"] = np.concatenate(rotation_vector,
                                                  axis=0).tolist()

    params["camera_position"] = np.matrix(
        cv2.Rodrigues(rotation_vector)[0]).T * np.matrix(translation_vector)
    print("Camera Position: \n {0}".format(params["camera_position"]))

    # find tilt agle
    eyeXdis = shape[45][0] - shape[36][0]
    eyeYdis = shape[45][1] - shape[36][1]
    angle = math.atan(eyeYdis / eyeXdis)
    degree = angle * 180 / PI
    # print("degree: \n {0}".format(degree))

    # Project a 3D point (0, 0, 1000.0) onto the image plane.
    # We use this to draw a line sticking out of the nose
    (nose_end_point2D,
     jacobian) = cv2.projectPoints(np.array([
         (0.0, 0.0, 1000.0)
     ]), rotation_vector, translation_vector, camera_matrix, dist_coeffs, None,
                                   None, cv2.CALIB_FIX_ASPECT_RATIO)
    p1 = (int(image_points[0][0]), int(image_points[0][1]))
    p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))

    return [p1, p2, params]
Exemple #44
0
    def solve_pnp(self, cuboid2d_points, pnp_algorithm=None):
        """
        Detects the rotation and traslation 
        of a cuboid object from its vertexes' 
        2D location in the image
        """

        # Fallback to default PNP algorithm base on OpenCV version
        if pnp_algorithm is None:
            if CuboidPNPSolver.cv2majorversion == 2:
                pnp_algorithm = cv2.CV_ITERATIVE
            elif CuboidPNPSolver.cv2majorversion == 3:
                pnp_algorithm = cv2.SOLVEPNP_ITERATIVE
            elif CuboidPNPSolver.cv2majorversion == 4:
                pnp_algorithm = cv2.SOLVEPNP_ITERATIVE
                # Alternative algorithms:
                # pnp_algorithm = SOLVE_PNP_P3P
                # pnp_algorithm = SOLVE_PNP_EPNP

        location = None
        quaternion = None
        projected_points = cuboid2d_points

        cuboid3d_points = np.array(self._cuboid3d.get_vertices())
        obj_2d_points = []
        obj_3d_points = []

        for i in range(CuboidVertexType.TotalVertexCount):
            check_point_2d = cuboid2d_points[i]
            # Ignore invalid points
            if (check_point_2d is None):
                continue
            obj_2d_points.append(check_point_2d)
            obj_3d_points.append(cuboid3d_points[i])

        obj_2d_points = np.array(obj_2d_points, dtype=float)
        obj_3d_points = np.array(obj_3d_points, dtype=float)

        valid_point_count = len(obj_2d_points)

        # Can only do PNP if we have more than 3 valid points
        is_points_valid = valid_point_count >= 4

        if is_points_valid:

            ret, rvec, tvec = cv2.solvePnP(obj_3d_points,
                                           obj_2d_points,
                                           self._camera_intrinsic_matrix,
                                           self._dist_coeffs,
                                           flags=pnp_algorithm)

            if ret:
                location = list(x[0] for x in tvec)
                quaternion = self.convert_rvec_to_quaternion(rvec)

                projected_points, _ = cv2.projectPoints(
                    cuboid3d_points, rvec, tvec, self._camera_intrinsic_matrix,
                    self._dist_coeffs)
                projected_points = np.squeeze(projected_points)

                # If the location.Z is negative or object is behind the camera then flip both location and rotation
                x, y, z = location
                if z < 0:
                    # Get the opposite location
                    location = [-x, -y, -z]

                    # Change the rotation by 180 degree
                    rotate_angle = np.pi
                    rotate_quaternion = Quaternion.from_axis_rotation(
                        location, rotate_angle)
                    quaternion = rotate_quaternion.cross(quaternion)

        return location, quaternion, projected_points
    def calib(self, imgPath, cameraCalibPath):
        # imgFileList = readFileList(imgFolder)

        data = np.load(cameraCalibPath)
        camMtx = data["mtx"]
        dist = data["dist"]

        objP_pixel = np.ceil(self.readCheckerObjPoint(self.checker_file))
        objP_pixel[:, 2] = 0
        objP = np.array(objP_pixel)
        for i in range(self.checker_pattern[1]):
            for j in range(math.floor(self.checker_pattern[0] / 2)):
                tmp = objP[self.checker_pattern[0] * i + j, 0]
                objP[self.checker_pattern[0] * i + j,
                     0] = objP[self.checker_pattern[0] * i +
                               self.checker_pattern[0] - j - 1, 0]
                objP[self.checker_pattern[0] * i + self.checker_pattern[0] -
                     j - 1, 0] = tmp
        objP[:, 0] -= (self.display_width / 2 - 1)
        objP[:, 1] -= (self.display_height / 2 - 1)

        objP *= self.checker_size

        rtA = []
        rB = []
        tB = []
        rC2Ss = []
        tC2Ss = []

        # define valid image
        validImg = -1
        # for i in trange(len(imgFileList), desc="Images"):
        for i in range(1):

            img = cv2.imread(imgPath, cv2.IMREAD_UNCHANGED)

            # Yunhao
            # img = (img/65535*255).astype(np.uint8)

            # Aruco marker for Mirror position
            cornersAruco, ids = self.detectAruco(img, debug=False)
            if cornersAruco is None and ids is None and len(cornersAruco) <= 3:
                continue

            # Checker for Display
            ret, cornersChecker = self.detectChecker(img, debug=False)
            if not ret:
                print("no Checker!!!")
                continue

            # for a valid image, aruco and checker must be both detected
            validImg += 1

            # Calibrate Mirror Pose with Aruco

            rvecMirror, tvecMirror = self.postEst(cornersAruco, ids, camMtx,
                                                  dist)
            img_axis = aruco.drawAxis(img, camMtx, dist, rvecMirror,
                                      tvecMirror, self.aruco_size)
            cv2.namedWindow('Img_axis', cv2.WINDOW_NORMAL)
            cv2.imshow('Img_axis', img_axis)
            cv2.waitKey(0)  # any key
            cv2.destroyWindow('Img_axis')

            ## Reproejct Camera Extrinsic
            self.reProjAruco(img, camMtx, dist, rvecMirror, tvecMirror,
                             cornersAruco)

            rMatMirror, _ = cv2.Rodrigues(
                rvecMirror)  # rotation vector to rotation matrix
            normalMirror = rMatMirror[:, 2]

            rC2W, tC2W = self.invTransformation(rMatMirror, tvecMirror)
            dW2C = abs(np.dot(normalMirror, tvecMirror))

            # Householder transformation
            p1, p2, p3 = self.householderTransform(normalMirror, dW2C)

            # Calibrate virtual to Camera with Checker
            rpe, rvecVirtual, tvecVirtual = cv2.solvePnP(
                objP, cornersChecker, camMtx, dist, flags=cv2.SOLVEPNP_IPPE
            )  # cv2.SOLVEPNP_IPPE for 4 point solution #cv2.SOLVEPNP_ITERATIVE
            # iterationsCount=200, reprojectionError=8.0,

            rvecVirtual, tvecVirtual = cv2.solvePnPRefineLM(
                objP, cornersChecker, camMtx, dist, rvecVirtual, tvecVirtual)

            proj, jac = cv2.projectPoints(objP, rvecVirtual, tvecVirtual,
                                          camMtx, dist)
            img_rep = img

            cv2.drawChessboardCorners(img_rep, self.checker_size, proj, True)
            width = 960
            height = int(img_rep.shape[0] * 960 / img_rep.shape[1])
            smallimg = cv2.resize(img_rep, (width, height))
            cv2.imshow("img_rep", smallimg)
            cv2.waitKey(0)  # any key
            cv2.destroyWindow("img_rep")

            rMatVirtual, _ = cv2.Rodrigues(
                rvecVirtual)  # rotation vector to rotation matrix

            print(tvecVirtual)
            if validImg == 0:
                rtA = p1
                rB = np.matmul(rMatVirtual, p2)
                tB = np.squeeze(tvecVirtual) + p3
            else:
                rtA = np.concatenate((rtA, p1))
                rB = np.concatenate((rB, np.matmul(rMatVirtual, p2)))
                tB = np.concatenate((tB, np.squeeze(tvecVirtual) + p3))

            rS2C = p1 @ rMatVirtual
            tS2C = p1 @ np.squeeze(tvecVirtual) + p3

            rC2S, tC2S = self.invTransformation(rS2C, tS2C)
            print("rC2S:", rC2S)
            print("tC2S:", tC2S)
            rC2Ss.append(rC2S)
            tC2Ss.append(tC2S)

        # rC2Ss = np.array(rC2Ss)
        # tC2Ss = np.array(tC2Ss)
        # fout = os.path.join(imgFolder, "Cam2Screen.npz")
        # np.savez(fout, rC2S=rC2Ss, tC2S=tC2Ss)
        return rC2Ss, tC2Ss
Exemple #46
0
    ret, img = cap.read()
    tv = sd.getNumber('tv', 0)

    if (ret == True) and (tv == 1):
        tcornx = sd.getNumberArray('tcornx', [0, 0])
        tcorny = sd.getNumberArray('tcorny', [0, 0])

        if (len(tcornx) == NUM_OF_POINTS) and (len(tcorny) == NUM_OF_POINTS):
            for x in range(NUM_OF_POINTS):
                corners[x][0] = tcornx[x]
                corners[x][1] = tcorny[x]

            x, y, midPoint, rotation = getPrincipalAxes(corners)
            corners = sortImgPts(corners, x, y, midPoint, rotation)

            retval, rvecs, tvecs = cv2.solvePnP(objp, corners, mtx, dist)
            rotMat, jacobian = cv2.Rodrigues(rvecs)
            angleToTarget = getAngleToTarget(rotMat)
            print(np.degrees(angleToTarget), tvecs)

            imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)

            img = draw(img, imgpts)
            # blue, green, red, white, light blue, pink, yellow, gray
            colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 255),
                      (255, 255, 0), (255, 0, 255), (0, 255, 255),
                      (100, 100, 100)]
            for x in range(8):
                cv2.circle(img, (corners[x][0], corners[x][1]), 4, colors[x],
                           -2)
            cv2.imshow('stream', img)
Exemple #47
0
    (150.0, -150.0, -125.0)  # Right mouth corner
])

# Camera internals

focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
    [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]],
    dtype="double")

print("Camera Matrix :\n {0}".format(camera_matrix))

dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
(success, rotation_vector,
 translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
                                    dist_coeffs)

print("Rotation Vector:\n {0}".format(rotation_vector))
print("Translation Vector:\n {0}".format(translation_vector))

# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose

(nose_end_point2D,
 jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector,
                               translation_vector, camera_matrix, dist_coeffs)
print('Nose', nose_end_point2D.shape)
# the shape is in 3D type.

for p in image_points:
    cv2.circle(im, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)
Exemple #48
0
def main():

    #Check if some argumentshave been passed
    #pass the path of a video
    if (len(sys.argv) > 2):
        file_path = sys.argv[1]
        if (os.path.isfile(file_path) == False):
            print(
                "ex_pnp_head_pose_estimation: the file specified does not exist."
            )
            return
        else:
            #Open the video file
            video_capture = cv2.VideoCapture(file_path)
            if (video_capture.isOpened() == True):
                print(
                    "ex_pnp_head_pose_estimation: the video source has been opened correctly..."
                )
            # Define the codec and create VideoWriter object
            #fourcc = cv2.VideoWriter_fourcc(*'XVID')
            output_path = sys.argv[2]
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            out = cv2.VideoWriter(output_path, fourcc, 20.0, (1280, 720))
    else:
        print(
            "You have to pass as argument the path to a video file and the path to the output file to produce, for example: \n python ex_pnp_pose_estimation_video.py /home/video.mpg ./output.avi"
        )
        return

    #Create the main window and move it
    cv2.namedWindow('Video')
    cv2.moveWindow('Video', 20, 20)

    #Obtaining the CAM dimension
    cam_w = int(video_capture.get(3))
    cam_h = int(video_capture.get(4))

    #Defining the camera matrix.
    #To have better result it is necessary to find the focal
    # lenght of the camera. fx/fy are the focal lengths (in pixels)
    # and cx/cy are the optical centres. These values can be obtained
    # roughly by approximation, for example in a 640x480 camera:
    # cx = 640/2 = 320
    # cy = 480/2 = 240
    # fx = fy = cx/tan(60/2 * pi / 180) = 554.26
    c_x = cam_w / 2
    c_y = cam_h / 2
    f_x = c_x / numpy.tan(60 / 2 * numpy.pi / 180)
    f_y = f_x

    #Estimated camera matrix values.
    camera_matrix = numpy.float32([[f_x, 0.0, c_x], [0.0, f_y, c_y],
                                   [0.0, 0.0, 1.0]])

    print("Estimated camera matrix: \n" + str(camera_matrix) + "\n")

    #These are the camera matrix values estimated on my webcam with
    # the calibration code (see: src/calibration):
    #camera_matrix = numpy.float32([[602.10618226,          0.0, 320.27333589],
    #[         0.0, 603.55869786,  229.7537026],
    #[         0.0,          0.0,          1.0] ])

    #Distortion coefficients
    camera_distortion = numpy.float32([0.0, 0.0, 0.0, 0.0, 0.0])

    #Distortion coefficients estimated by calibration
    #camera_distortion = numpy.float32([ 0.06232237, -0.41559805,  0.00125389, -0.00402566,  0.04879263])

    #This matrix contains the 3D points of the
    # 11 landmarks we want to find. It has been
    # obtained from antrophometric measurement
    # on the human head.
    landmarks_3D = numpy.float32([
        P3D_RIGHT_SIDE, P3D_GONION_RIGHT, P3D_MENTON, P3D_GONION_LEFT,
        P3D_LEFT_SIDE, P3D_FRONTAL_BREADTH_RIGHT, P3D_FRONTAL_BREADTH_LEFT,
        P3D_SELLION, P3D_NOSE, P3D_SUB_NOSE, P3D_RIGHT_EYE, P3D_RIGHT_TEAR,
        P3D_LEFT_TEAR, P3D_LEFT_EYE, P3D_STOMION
    ])

    #Declaring the two classifiers
    #my_cascade = haarCascade("./etc/haarcascade_frontalface_alt.xml", "./etc/haarcascade_profileface.xml")
    my_detector = faceLandmarkDetection(
        './etc/shape_predictor_68_face_landmarks.dat')
    my_face_detector = dlib.get_frontal_face_detector()

    while (True):

        # Capture frame-by-frame
        ret, frame = video_capture.read()
        #gray = cv2.cvtColor(frame[roi_y1:roi_y2, roi_x1:roi_x2], cv2.COLOR_BGR2GRAY)

        faces_array = my_face_detector(frame, 1)
        print("Total Faces: " + str(len(faces_array)))
        for i, pos in enumerate(faces_array):

            face_x1 = pos.left()
            face_y1 = pos.top()
            face_x2 = pos.right()
            face_y2 = pos.bottom()
            text_x1 = face_x1
            text_y1 = face_y1 - 3

            cv2.putText(frame, "FACE " + str(i + 1), (text_x1, text_y1),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
            cv2.rectangle(frame, (face_x1, face_y1), (face_x2, face_y2),
                          (0, 255, 0), 2)

            landmarks_2D = my_detector.returnLandmarks(
                frame,
                face_x1,
                face_y1,
                face_x2,
                face_y2,
                points_to_return=TRACKED_POINTS)

            for point in landmarks_2D:
                cv2.circle(frame, (point[0], point[1]), 2, (0, 0, 255), -1)

            #Applying the PnP solver to find the 3D pose
            # of the head from the 2D position of the
            # landmarks.
            #retval - bool
            #rvec - Output rotation vector that, together with tvec, brings
            # points from the model coordinate system to the camera coordinate system.
            #tvec - Output translation vector.
            retval, rvec, tvec = cv2.solvePnP(landmarks_3D, landmarks_2D,
                                              camera_matrix, camera_distortion)

            #Now we project the 3D points into the image plane
            #Creating a 3-axis to be used as reference in the image.
            axis = numpy.float32([[50, 0, 0], [0, 50, 0], [0, 0, 50]])
            imgpts, jac = cv2.projectPoints(axis, rvec, tvec, camera_matrix,
                                            camera_distortion)

            #Drawing the three axis on the image frame.
            #The opencv colors are defined as BGR colors such as:
            # (a, b, c) >> Blue = a, Green = b and Red = c
            #Our axis/color convention is X=R, Y=G, Z=B
            sellion_xy = (landmarks_2D[7][0], landmarks_2D[7][1])
            cv2.line(frame, sellion_xy, tuple(imgpts[1].ravel()), (0, 255, 0),
                     3)  #GREEN
            cv2.line(frame, sellion_xy, tuple(imgpts[2].ravel()), (255, 0, 0),
                     3)  #BLUE
            cv2.line(frame, sellion_xy, tuple(imgpts[0].ravel()), (0, 0, 255),
                     3)  #RED

        #Writing in the output file
        out.write(frame)

        #Showing the frame and waiting
        # for the exit command
        cv2.imshow('Video', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'): break

    #Release the camera
    video_capture.release()
    print("Bye...")
Exemple #49
0

# Camera internals

focal_length = size[1]
center = (size[1]/2, size[0]/2)
camera_matrix = np.array(
                         [[focal_length, 0, center[0]],
                         [0, focal_length, center[1]],
                         [0, 0, 1]], dtype = "double"
                         )

print "Camera Matrix :\n {name}".format(name="camera_matrix");
print("headpose")
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)

print "Rotation Vector:\n {rotate}".format(rotate="rotation_vector")
print "Translation Vector:\n {tran}".format(trans="translation_vector")


# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose


(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)

for p in image_points:
    cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1)

def aruco_processing(mtx, dist, img, save=False):
    aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
    arucoParameters = aruco.DetectorParameters_create()

    arucoParameters.adaptiveThreshWinSizeMin = 3
    arucoParameters.adaptiveThreshWinSizeStep = 2
    arucoParameters.adaptiveThreshWinSizeMax = 50
    arucoParameters.minMarkerPerimeterRate = 0.01
    arucoParameters.maxMarkerPerimeterRate = 4.0
    arucoParameters.markerBorderBits = 1

    img = cv2.detailEnhance(img)

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    marker_corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, aruco_dict, parameters=arucoParameters)

    # marker_corners += rejectedImgPoints
    img = aruco.drawDetectedMarkers(img, marker_corners)

    # cv2.imwrite("Object With Marker" + str(random.randint(0, 9000)) + ".jpg", img)

    if not marker_corners:
        # Do something next
        #print("No marker found")
        return (False, None, None, None, None, None)

    choosen_marker = None
    max_area = 0
    for (i, marker) in enumerate(marker_corners):
        area = cv2.contourArea(marker[0])
        if (area > max_area):
            max_area = area
            choosen_marker = marker

    corners_convert = sort_corner(choosen_marker)

    # Calculate the middle point

    # Left-top  [corner[corner_arr[3]][0], corner[corner_arr[3]][1]]] ;    [[corner[corner_arr[0]][0], corner[corner_arr[0]][1]], # Right Bottom
    left_top_x = corners_convert[3][0]
    left_top_y = corners_convert[3][1]

    right_bottom_x = corners_convert[0][0]
    right_bottom_y = corners_convert[0][1]

    middle_x = (left_top_x + right_bottom_x) / 2
    middle_y = (left_top_y + right_bottom_y) / 2

    # Calculate the pose
    axis = np.float32([[1, 0, 0], [0, 1, 0], [0, 0, -1]]).reshape(-1, 3)
    objp = np.zeros((2 * 2, 3), np.float32)
    objp[:, :2] = np.mgrid[0:2, 0:2].T.reshape(-1, 2)
    ret, rvecs, tvecs = cv2.solvePnP(objp, corners_convert, mtx, dist)
    # project 3D points to image plane
    imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)

    # Rendering model on video frame
    corners_convert = corners_convert.astype(int)
    imgpts = imgpts.astype(int)

    img = draw(img, corners_convert, imgpts)

    #img = aruco.drawAxis(img, mtx, dist, rvecs, tvecs, 20)

    # Get Degree Up
    rmat = cv2.Rodrigues(rvecs)[0]
    proj_matrix = np.hstack((rmat, tvecs))
    _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(proj_matrix)
    pitch, yaw, roll = [math.radians(_) for _ in euler_angles]
    #print("P/R/Y : ", pitch * 57.2957795, roll * 57.2957795, yaw * 57.2957795)
    pitch, yaw = yaw * 57.2957795, pitch * 57.2957795  # Switch and convert Pitch and Yaw
    # cv2.putText(img, "Yaw:  %.2f" % yaw, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 255, 0), 3)
    # cv2.putText(img, "Pitch: %.2f" % pitch, (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 255, 0), 3)
    # cv2.putText(img, "Roll: 180", (10, 150), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 255, 0), 3)

    #print("Real: ", yaw, pitch, yaw)
    randomNumber = random.randint(1, 1000)

    cv2.imwrite("Ill" + str(randomNumber) + ".jpg", img)
    threshold = 9
    # Handle PYR robot inverse vector from the object normal vector
    if abs(
            pitch
    ) < threshold:  # If lower than X degree than set to directly straight angle
        pitch = 0
    if abs(
            yaw
    ) < threshold:  # If lower than X degree than set to directly straight angle
        yaw = 0

    if pitch < 0:  # Could be reverse >0 and <0 - Needs Testing With Robot
        pitch = 180 - abs(pitch)  # Object Negative then robot Positive
    else:
        pitch = (180 - abs(pitch)) * -1  # Object Positive then robot Negative
    if yaw < 0:  # Could be reverse >0 and <0 - Needs Testing With Robot
        yaw = abs(yaw)
    else:
        yaw = abs(yaw) * -1
    roll = 180
    # pose = [pitch, yaw, roll]
    return (True, middle_x, middle_y, pitch, yaw, roll)
    img_before = cv2.imread(fp_img_before)
    img_after = cv2.imread(fp_img_after)
    intrinsic = np.loadtxt(fp_ins)
    distcoefs = np.loadtxt(fp_dis)
    img_points_before = np.loadtxt(fp_img_points_before)
    img_points_after = np.loadtxt(fp_img_points_after)
    object_points_before = np.loadtxt(fp_object_points_before)
    object_points_after = np.loadtxt(fp_object_points_after)

    object_points_before = np.array(
        [x.tolist() + [0.0] for x in object_points_before])
    alpha, delta_h, h = 43.4, 300, 1005
    object_points_after = np.array(
        [x.tolist() + [delta_h] for x in object_points_after])

    _, rvec, tvec = cv2.solvePnP(object_points_before, img_points_before,
                                 intrinsic, distcoefs)
    rmat = cv2.Rodrigues(rvec)[0]

    zcs = []
    for i in range(object_points_before.shape[0]):
        zcs.append((intrinsic @ (rmat @ object_points_before[i, :].reshape(
            (-1, 1)) + tvec))[2, 0])

    loss = []
    img_points_before_cal = np.array(
        [x.tolist() + [1.0] for x in img_points_before])
    for i in range(img_points_before.shape[0]):
        loss.append(intrinsic @ (rmat @ object_points_before[i, :].reshape(
            (-1, 1)) + tvec) / zcs[i] -
                    img_points_before_cal[i, :].reshape((-1, 1)))
    loss = np.squeeze(np.array(loss))
Exemple #52
0
            # mark_detector.draw_marks(img, marks, color=(0, 255, 0))
            image_points = np.array(
                [
                    marks[30],  # Nose tip
                    marks[8],  # Chin
                    marks[36],  # Left eye left corner
                    marks[45],  # Right eye right corne
                    marks[48],  # Left Mouth corner
                    marks[54]  # Right mouth corner
                ],
                dtype="double")
            dist_coeffs = np.zeros((4, 1))  # Assuming no lens distortion
            (success, rotation_vector,
             translation_vector) = cv2.solvePnP(model_points,
                                                image_points,
                                                camera_matrix,
                                                dist_coeffs,
                                                flags=cv2.SOLVEPNP_UPNP)

            # Project a 3D point (0, 0, 1000.0) onto the image plane.
            # We use this to draw a line sticking out of the nose

            (nose_end_point2D,
             jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]),
                                           rotation_vector, translation_vector,
                                           camera_matrix, dist_coeffs)

            for p in image_points:
                cv2.circle(img, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

            p1 = (int(image_points[0][0]), int(image_points[0][1]))
Exemple #53
0
    def track(self, frame1_grayscale_mat, frame2_grayscale_mat,
              pos1_rotation_mat, pos1_translation):
        print(pos1_translation)
        print(pos1_rotation_mat)
        control_points, control_points_pair = self.control_points(
            self.object_points,
            RapidTrackerVadimFarutin.EDGE_CONTROL_POINTS_NUMBER)
        control_points = [
            control_points[i *
                           RapidTrackerVadimFarutin.EDGE_CONTROL_POINTS_NUMBER:
                           (i + 1) *
                           RapidTrackerVadimFarutin.EDGE_CONTROL_POINTS_NUMBER]
            for i in range(RapidTrackerVadimFarutin.EDGE_NUMBER)
        ]
        control_points_pair = [
            control_points_pair[
                i *
                RapidTrackerVadimFarutin.EDGE_CONTROL_POINTS_NUMBER:(i + 1) *
                RapidTrackerVadimFarutin.EDGE_CONTROL_POINTS_NUMBER]
            for i in range(RapidTrackerVadimFarutin.EDGE_NUMBER)
        ]

        image_points = []
        image_points_idx = []
        edge_lines = []
        rejected_points = []

        pos1_rotation = rodrigues(pos1_rotation_mat)
        # shift = self.frame_size // 2
        frame2_gradient_map = cv2.Laplacian(frame2_grayscale_mat, cv2.CV_64F)

        for i in range(RapidTrackerVadimFarutin.EDGE_NUMBER):
            # noinspection PyPep8Naming
            R = control_points[i]
            # noinspection PyPep8Naming
            S = control_points_pair[i]
            r = project_points_int(R, pos1_rotation, pos1_translation,
                                   self.camera_mat)
            s = project_points_int(S, pos1_rotation, pos1_translation,
                                   self.camera_mat)
            # r = change_coordinate_system(r, shift)
            # s = change_coordinate_system(s, shift)

            found_points, found_points_idx = self.search_edge(
                r, s, frame2_gradient_map, i)
            # found_points = change_coordinate_system(found_points, -shift)

            if len(found_points) == 0:
                continue

            corners = RapidTrackerVadimFarutin.linear_regression(found_points)
            edge_lines.append(corners)
            error = RapidTrackerVadimFarutin.find_edge_error(
                found_points, corners)

            if error > RapidTrackerVadimFarutin.EDGE_ERROR_THRESHOLD:
                continue

            accepted, accepted_idx, rejected = RapidTrackerVadimFarutin.filter_edge_points(
                found_points, found_points_idx, corners)
            image_points.extend(accepted)
            image_points_idx.extend(accepted_idx)
            rejected_points.extend(rejected)

        image_points = np.array(image_points, np.float32)
        all_control_points = np.array([
            control_points[i //
                           RapidTrackerVadimFarutin.EDGE_CONTROL_POINTS_NUMBER]
            [i % RapidTrackerVadimFarutin.EDGE_CONTROL_POINTS_NUMBER]
            for i in image_points_idx
        ])

        last_r_vec = np.copy(pos1_rotation)
        last_t_vec = np.copy(pos1_translation)

        if len(image_points) < RapidTrackerVadimFarutin.MIN_PNP_POINTS_ALLOWED:
            rvec = np.copy(pos1_rotation)
            tvec = np.copy(pos1_translation)
        else:
            _, rvec, tvec = cv2.solvePnP(all_control_points,
                                         image_points,
                                         self.camera_mat,
                                         None,
                                         pos1_rotation,
                                         pos1_translation,
                                         useExtrinsicGuess=True)

        # retval, rvec, tvec, _ = cv2.solvePnPRansac(
        #     all_control_points, image_points, cameraMatrix, None)

        diff_r_vec = rvec - last_r_vec
        diff_t_vec = tvec - last_t_vec

        rvec = last_r_vec + RapidTrackerVadimFarutin.ALPHA * diff_r_vec \
               + self.vec_speed[0:3]
        tvec = last_t_vec + RapidTrackerVadimFarutin.ALPHA * diff_t_vec \
               + self.vec_speed[3:6]
        self.vec_speed += RapidTrackerVadimFarutin.BETA \
                          * np.append(rvec - last_r_vec, tvec - last_t_vec, axis=0)

        rmat = rodrigues(rvec)
        return rmat, tvec
Exemple #54
0
        kp_frame[m.trainIdx].pt for m in good_match_arr[:, 0]
    ]).reshape(-1, 1, 2)

    # ===== find the points that obbay homography
    H, masked = cv2.findHomography(good_kp_ref, good_kp_frame, cv2.RANSAC, 5.0)
    if not isinstance(H, np.ndarray):
        print("skip frame " + str(frame_num))
        continue

    best_kp_ref_XY = np.array(good_kp_ref_XY)[
        np.array(masked).flatten() > 0, :]
    best_kp_r = np.array(good_kp_frame.reshape(
        good_kp_frame.shape[0], 2))[np.array(masked).flatten() > 0, :]

    # ========= solve PnP to get cam pos
    res, rvec, tvec = cv2.solvePnP(best_kp_ref_XY[:, :, np.newaxis],
                                   best_kp_r[:, :, np.newaxis], K, dist_coeffs)
    # res_R,_ = cv2.Rodrigues(rvec)
    # x = np.array([[0,0,0]]).T
    # x_cam2_world = K@(res_R@x+tvec)
    # x2_norm = x_cam2_world[:2,:]/x_cam2_world[2,:]
    # plt.figure()
    # plt.imshow(frame_rgb)
    # plt.plot(x2_norm[0, 0], x2_norm[1, 0], '*w')
    # plt.show()

    # ======== draw proj res
    drawn_image = render.draw(frame_rgb, rvec, tvec)
    # plt.imshow(drawn_image)
    # plt.show()

    # =========== output
    def return_roll_pitch_yaw(self, image, radians=False):
        """ Return the the roll pitch and yaw angles associated with the input image.

         @param image It is a colour image. It must be >= 64 pixel.
         @param radians When True it returns the angle in radians, otherwise in degrees.
         """

        #The dlib shape predictor returns 68 points, we are interested only in a few of those
        TRACKED_POINTS = (0, 4, 8, 12, 16, 17, 26, 27, 30, 33, 36, 39, 42, 45,
                          62)

        #Antropometric constant values of the human head.
        #Check the wikipedia EN page and:
        #"Head-and-Face Anthropometric Survey of U.S. Respirator Users"
        #
        #X-Y-Z with X pointing forward and Y on the left and Z up.
        #The X-Y-Z coordinates used are like the standard
        # coordinates of ROS (robotic operative system)
        #OpenCV uses the reference usually used in computer vision:
        #X points to the right, Y down, Z to the front
        #
        #The Male mean interpupillary distance is 64.7 mm (https://en.wikipedia.org/wiki/Interpupillary_distance)
        #
        P3D_RIGHT_SIDE = np.float32([-100.0, -77.5, -5.0])  #0
        P3D_GONION_RIGHT = np.float32([-110.0, -77.5, -85.0])  #4
        P3D_MENTON = np.float32([0.0, 0.0, -122.7])  #8
        P3D_GONION_LEFT = np.float32([-110.0, 77.5, -85.0])  #12
        P3D_LEFT_SIDE = np.float32([-100.0, 77.5, -5.0])  #16
        P3D_FRONTAL_BREADTH_RIGHT = np.float32([-20.0, -56.1, 10.0])  #17
        P3D_FRONTAL_BREADTH_LEFT = np.float32([-20.0, 56.1, 10.0])  #26
        P3D_SELLION = np.float32([0.0, 0.0, 0.0])  #27 This is the world origin
        P3D_NOSE = np.float32([21.1, 0.0, -48.0])  #30
        P3D_SUB_NOSE = np.float32([5.0, 0.0, -52.0])  #33
        P3D_RIGHT_EYE = np.float32([-20.0, -32.35, -5.0])  #36
        P3D_RIGHT_TEAR = np.float32([-10.0, -20.25, -5.0])  #39
        P3D_LEFT_TEAR = np.float32([-10.0, 20.25, -5.0])  #42
        P3D_LEFT_EYE = np.float32([-20.0, 32.35, -5.0])  #45
        #P3D_LIP_RIGHT = np.float32([-20.0, 65.5,-5.0]) #48
        #P3D_LIP_LEFT = np.float32([-20.0, 65.5,-5.0]) #54
        P3D_STOMION = np.float32([10.0, 0.0, -75.0])  #62

        #This matrix contains the 3D points of the
        # 11 landmarks we want to find. It has been
        # obtained from antrophometric measurement
        # of the human head.
        landmarks_3D = np.float32([
            P3D_RIGHT_SIDE, P3D_GONION_RIGHT, P3D_MENTON, P3D_GONION_LEFT,
            P3D_LEFT_SIDE, P3D_FRONTAL_BREADTH_RIGHT, P3D_FRONTAL_BREADTH_LEFT,
            P3D_SELLION, P3D_NOSE, P3D_SUB_NOSE, P3D_RIGHT_EYE, P3D_RIGHT_TEAR,
            P3D_LEFT_TEAR, P3D_LEFT_EYE, P3D_STOMION
        ])

        #Return the 2D position of our landmarks
        img_h, img_w, img_d = image.shape
        landmarks_2D = self._return_landmarks(inputImg=image,
                                              roiX=0,
                                              roiY=img_w,
                                              roiW=img_w,
                                              roiH=img_h,
                                              points_to_return=TRACKED_POINTS)

        #Print som red dots on the image
        #for point in landmarks_2D:
        #cv2.circle(frame,( point[0], point[1] ), 2, (0,0,255), -1)

        #Applying the PnP solver to find the 3D pose
        #of the head from the 2D position of the
        #landmarks.
        #retval - bool
        #rvec - Output rotation vector that, together with tvec, brings
        #points from the world coordinate system to the camera coordinate system.
        #tvec - Output translation vector. It is the position of the world origin (SELLION) in camera co-ords
        retval, rvec, tvec = cv2.solvePnP(landmarks_3D, landmarks_2D,
                                          self.camera_matrix,
                                          self.camera_distortion)

        #Get as input the rotational vector
        #Return a rotational matrix
        rmat, _ = cv2.Rodrigues(rvec)

        #euler_angles contain (pitch, yaw, roll)
        #euler_angles = cv.DecomposeProjectionMatrix(projMatrix=rmat, cameraMatrix=camera_matrix, rotMatrix, transVect, rotMatrX=None, rotMatrY=None, rotMatrZ=None)

        head_pose = [
            rmat[0, 0], rmat[0, 1], rmat[0, 2], tvec[0], rmat[1, 0],
            rmat[1, 1], rmat[1, 2], tvec[1], rmat[2, 0], rmat[2, 1],
            rmat[2, 2], tvec[2], 0.0, 0.0, 0.0, 1.0
        ]
        #print(head_pose) #TODO remove this line
        return self.rotationMatrixToEulerAngles(rmat)
Exemple #56
0
    scanner = zbar.ImageScanner()

    # configure the reader
    scanner.parse_config('enable')

    # Find the corners of the pattern image
    scale_corners = get_corner_locs("pattern.jpg")

    # shift the corners so that in 3D space we consider our squares centered at
    # 0, 0
    length = scale_corners[1][1] - scale_corners[1][0]
    scale_corners -= scale_corners[0]
    scale_corners -= length / 2
    scale_corners /= (length / QR_LENGTH)

    # make 3D space 3 Dimensional by assuming Z component is 0
    objp = np.zeros((4, 3))
    objp[:, :-1] = scale_corners

    for img in images:
        # get the location of the corners
        locs = get_corner_locs(img)
        if locs is None:
            print "QR code could not be located in img " + img
        else:
            success, rvec, tvec = cv2.solvePnP(objp, locs, cam_matrix, None)
            if success:
                generate_visualization(img, rvec, tvec)
            else:
                print "No visualization able to be generated for img: " + img
Exemple #57
0
def calibration(undistort=False, selfCoor=False):
    """
    主要完成以下功能:
        1. 分别计算单目的投影矩阵并持久化。为计算3d坐标做准备
        2. 计算双目的投影矩阵及R1 R2 Q 矩阵。为行对准做准备
    """
    global imgpoints_r_selfcoor, imgpoints_selfcoor, objpoints_selfcoor
    global cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((7 * 9, 3), np.float32)
    objp[:, :2] = np.mgrid[0:9, 0:7].T.reshape(-1, 2) * 25
    #print(objp.shape)
    #objp[:, -1] = 0
    #print(objp)
    # Arrays to store object points and image points from all the images.
    objpoints = []  # 3d point in real world space
    imgpoints = []  # 2d points in image plane.

    objpoints_r = []
    imgpoints_r = []

    images = glob.glob('stereo512/left.bmp')
    images_r = glob.glob('stereo512/right.bmp')
    images.sort()
    images_r.sort()

    for fname, fname_r in zip(images, images_r):
        img = cv2.imread(fname)
        img_r = cv2.imread(fname_r)

        if undistort:
            img = undistortImage(img, cameraMatrix1, distCoeffs1)
            img_r = undistortImage(img_r, cameraMatrix2, distCoeffs2)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, (9, 7), None)
        #print('corners',corners)
        ret_r, corners_r = cv2.findChessboardCorners(gray_r, (9, 7), None)

        # If found, add object points, image points (after refining them)
        if ret == True and ret_r == True:
            objpoints.append(objp)
            objpoints_r.append(objp)

            corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                        criteria)
            corners2_r = cv2.cornerSubPix(gray_r, corners_r, (11, 11),
                                          (-1, -1), criteria)
            imgpoints.append(corners2)
            imgpoints_r.append(corners2_r)

    # 分别计算投影矩阵并持久化。
    if not selfCoor:
        objpoints_selfcoor = objpoints[0]
        imgpoints_selfcoor = imgpoints[0]
        imgpoints_r_selfcoor = imgpoints_r[0]
    else:
        if objpoints_selfcoor == None or imgpoints_selfcoor == None or imgpoints_r_selfcoor == None:
            print("Initial the self-defined coordinate first")
            return
    ret, rotation, translation = cv2.solvePnP(objpoints_selfcoor,
                                              imgpoints_selfcoor,
                                              cameraMatrix1, distCoeffs1)

    ret, rotation_r, translation_r = cv2.solvePnP(objpoints_selfcoor,
                                                  imgpoints_r_selfcoor,
                                                  cameraMatrix2, distCoeffs2)

    rt1 = getrtMtx(rotation, translation)
    rt2 = getrtMtx(rotation_r, translation_r)

    P1_own = np.dot(cameraMatrix1, rt1)
    P2_own = np.dot(cameraMatrix2, rt2)
    save_camera_matrix_own_proj(P1_own)
    save_camera_matrix_own_proj_r(P2_own)

    # 双目计算 R1 R2 P1 P2 Q并持久化。
    retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = \
    cv2.stereoCalibrate(objpoints, imgpoints, imgpoints_r, cameraMatrix1,
                        distCoeffs1, cameraMatrix2, distCoeffs2, gray.shape[::-1], flags = cv2.CALIB_FIX_INTRINSIC )
    #print("OPENCV R-T\n",R, "\n", T)
    R1, R2, P1_stereo, P2_stereo, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(
        cameraMatrix1,
        distCoeffs1,
        cameraMatrix2,
        distCoeffs2,
        gray.shape[::-1],
        R,
        T,
        flags=0)
    #print("P1,P2\n",P1_stereo,"\n", P2_stereo)
    save_camera_matrix_rot(R1)
    save_camera_matrix_rot_r(R2)
    save_camera_matrix_stereo_proj(P1_stereo)
    save_camera_matrix_stereo_proj_r(P2_stereo)
    save_camera_matrix_q(Q)

def draw(img, corners, imgpts):
    corner = tuple(corners[0].ravel())
    img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255, 0, 0), 5)
    img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0, 255, 0), 5)
    img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0, 0, 255), 5)
    return img


criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((6 * 7, 3), np.float32)
objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)
axis = np.float32([[3, 0, 0], [0, 3, 0], [0, 0, -3]]).reshape(-1, 3)

img = cv2.imread('left08.jpg')
cv2.imshow('img', img)
cv2.waitKey(0)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)
print(ret)
if ret == True:
    corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
    ret, rvecs, tvecs = cv2.solvePnP(objp, corners2, mtx, dist)
    imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
    img = draw(img, corners2, imgpts)
    cv2.imshow('img', img)
    k = cv2.waitKey(0) & 0xFF
    if k == ord('s'):
        cv2.imwrite(fname[:6] + '.png', img)
cv2.destroyAllWindows()
Exemple #59
0
    # detect faces in the grayscale frame
    rects = detector(gray, 0)

    # loop over the face detections
    for rect in rects:

        x1 = rect.left()  # left point
        y1 = rect.top()  # top point
        x2 = rect.right()  # right point
        y2 = rect.bottom()  # bottom point
        # Draw a rectangle
        cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 3)
        landmarks = predictor(image=gray, box=rect)

        # calculate rotation and translation vector using solvePnP
        success, rotationVector, translationVector = cv2.solvePnP(
            face3Dmodel, refImgPts, cameraMatrix, mdists)
        noseEndPoints3D = np.array([[0, 0, 1000.0]], dtype=np.float64)
        noseEndPoint2D, jacobian = cv2.projectPoints(noseEndPoints3D,
                                                     rotationVector,
                                                     translationVector,
                                                     cameraMatrix, mdists)

        #  draw nose line
        p1 = (int(refImgPts[0, 0]), int(refImgPts[0, 1]))
        p2 = (int(noseEndPoint2D[0, 0, 0]), int(noseEndPoint2D[0, 0, 1]))
        cv2.line(frame,
                 p1,
                 p2, (110, 220, 0),
                 thickness=2,
                 lineType=cv2.LINE_AA)
Exemple #60
0
def get_pose_direction(student, img):
    """makes a vector out of 2 points

    Args:
       Student: student object
       img: frame

    """
    img_size = img.shape
    focal_length = img_size[1]
    center = (img_size[1] / 2, img_size[0] / 2)
    camera_matrix = np.array([[focal_length, 0, center[0]],
                              [0, focal_length, center[1]], [0, 0, 1]],
                             dtype="double")
    model_points = np.array([
        (0.0, 0.0, 0.0),  # Nose tip
        (-225.0, 170.0, -135.0),  # Left eye left corner
        (225.0, 170.0, -135.0),  # Right eye right corne
        (-150.0, -150.0, -125.0),  # Left Mouth corner
        (150.0, -150.0, -125.0)  # Right mouth corner
    ])
    dist_coeffs = np.zeros((4, 1))
    image_points = np.array([
        student.face_points['nose'], student.face_points['left_eye'],
        student.face_points['right_eye'], student.face_points['mouth_left'],
        student.face_points['mouth_right']
    ],
                            dtype="double")
    (success, rotation_vector,
     translation_vector) = cv2.solvePnP(model_points,
                                        image_points,
                                        camera_matrix,
                                        dist_coeffs,
                                        flags=cv2.SOLVEPNP_UPNP)

    (nose_end_point2D,
     jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]),
                                   rotation_vector, translation_vector,
                                   camera_matrix, dist_coeffs)

    student.attention_points = ((int(image_points[0][0]),
                                 int(image_points[0][1])),
                                (int(nose_end_point2D[0][0][0]),
                                 int(nose_end_point2D[0][0][1])))

    # make a reference line
    vect = make_vect(student.attention_points[0], student.attention_points[1])

    d = get_magnitude(vect)

    student.reference_points = ((int(image_points[0][0]),
                                 int(image_points[0][1])),
                                (int(image_points[0][0] + d),
                                 int(image_points[0][1])))

    global debug
    if debug:
        global pinocchio
        global debug_dir
        global delimiter

        cv2.line(img, student.attention_points[0], student.attention_points[1],
                 (255, 0, 0), 2)
        cv2.line(img, student.reference_points[0], student.reference_points[1],
                 (0, 255, 0), 2)
        cv2.circle(img, student.face_points['nose'], 2, (0, 0, 255), 2)
        cv2.circle(img, student.face_points['left_eye'], 2, (0, 0, 255), 2)
        cv2.circle(img, student.face_points['right_eye'], 2, (0, 0, 255), 2)
        cv2.circle(img, student.face_points['mouth_left'], 2, (0, 0, 255), 2)
        cv2.circle(img, student.face_points['mouth_right'], 2, (0, 0, 255), 2)

        cv2.imwrite(
            debug_dir + delimiter + 'pinocchio-' + str(pinocchio) + '.jpg',
            img)
        pinocchio += 1