def rectifyPoint(self, uv_raw):
        """
        :param uv_raw:    pixel coordinates
        :type uv_raw:     (u, v)

        Applies the rectification specified by camera parameters
        :math:`K` and and :math:`D` to point (u, v) and returns the
        pixel coordinates of the rectified point.
        """

        src = mkmat(1, 2, list(uv_raw))
        src.resize((2, 1))
        dst = src.copy()
        cv2.undistortPoints(src, dst, self.K, self.D, self.R, self.P)
        return dst[0,0]
예제 #2
0
    def check_calibration(self, calibration):
        """
        Check calibration quality by computing average reprojection error.

        First, undistort detected points and compute epilines for each side.
        Then compute the error between the computed epipolar lines and the
        position of the points detected on the other side for each point and
        return the average error.
        """
        sides = "left", "right"
        which_image = {sides[0]: 1, sides[1]: 2}
        undistorted, lines = {}, {}
        for side in sides:
            undistorted[side] = cv2.undistortPoints(
                         np.concatenate(self.image_points[side]).reshape(-1,
                                                                         1, 2),
                         calibration.cam_mats[side],
                         calibration.dist_coefs[side],
                         P=calibration.cam_mats[side])
            lines[side] = cv2.computeCorrespondEpilines(undistorted[side],
                                              which_image[side],
                                              calibration.f_mat)
        total_error = 0
        this_side, other_side = sides
        for side in sides:
            for i in range(len(undistorted[side])):
                total_error += abs(undistorted[this_side][i][0][0] *
                                   lines[other_side][i][0][0] +
                                   undistorted[this_side][i][0][1] *
                                   lines[other_side][i][0][1] +
                                   lines[other_side][i][0][2])
            other_side, this_side = sides
        total_points = self.image_count * len(self.object_points)
        return total_error / total_points
예제 #3
0
파일: methods.py 프로젝트: IroneP/pupil
def undistort_unproject_pts(pts_uv, camera_matrix, dist_coefs):
    """
    This function converts a set of 2D image coordinates to the spherical coordinate system.
    Hereby the intrinsics of the camera are taken into account.
    The 2d point set gets undistorted, converted to cartesian vertices and then converted to spherical coordinates.

    @return: ndarray with shape=(n, 3)

    """
    pts_uv = np.array(pts_uv)
    camera_matrix_inv = np.linalg.inv(camera_matrix)
    num_pts = pts_uv.size / 2

    pts_uv.shape = (num_pts, 1, 2)
    pts_uv = cv2.undistortPoints(pts_uv, camera_matrix, dist_coefs, P=camera_matrix)
    # return pts_uv
    # P = camera_matrix enables denormalization as follows:
    # ```
    # pts_uv *= np.array([camera_matrix[0,0], camera_matrix[1,1]]) # [fx, fy]
    # pts_uv += np.array([camera_matrix[0,2], camera_matrix[1,2]]) # [cx, cy]
    # ```

    pts_h = cv2.convertPointsToHomogeneous(np.float32(pts_uv))
    pts_h.shape = (num_pts,3)

    xyz = np.zeros((num_pts, 3), dtype=np.float32)
    for i in range(num_pts):
        xyz[i]   = camera_matrix_inv.dot(pts_h[i])
    return xyz
예제 #4
0
 def rectifyPoint(self, uv_raw):
     src = mkmat(1, 2, list(uv_raw))
     src.resize((1, 1, 2))
     dst = src.copy()
     res = cv2.undistortPoints(src, self.K, self.D, dst, self.R, self.P)
     print res
     return [float(res[0,0,0]), float(res[0,0,1])]
예제 #5
0
파일: types.py 프로젝트: htmlfusion/OpenSfM
 def pixel_bearing(self, pixel):
     """Unit vector pointing to the pixel viewing direction."""
     point = np.asarray(pixel).reshape((1, 1, 2))
     distortion = np.array([self.k1, self.k2, 0., 0.])
     x, y = cv2.undistortPoints(point, self.get_K(), distortion).flat
     l = np.sqrt(x * x + y * y + 1.0)
     return np.array([x / l, y / l, 1.0 / l])
예제 #6
0
def cam_to_world_point(K,dist,M,u,v,z=0):
	p = np.zeros((1,1,2), dtype=np.float32)
	p[0,0,0] = v
	p[0,0,1] = u
	p = cv2.undistortPoints(p, K, dist, P=K)
	v = p[0,0,0]
	u = p[0,0,1]

	m00 = M[0,0]
	m01 = M[0,1]
	m02 = M[0,2]
	m03 = M[0,3]
	m10 = M[1,0]
	m11 = M[1,1]
	m12 = M[1,2]
	m13 = M[1,3]
	m20 = M[2,0]
	m21 = M[2,1]
	m22 = M[2,2]
	m23 = M[2,3]
	x =  (m01*m13 - m03*m11 + m11*m23*u - m13*m21*u - m01*m23*v + m03*m21*v + m01*m12*m23*z - m01*m13*m22*z - m02*m11*m23*z + m02*m13*m21*z + m03*m11*m22*z - m03*m12*m21*z)
	y = -(m00*m13 - m03*m10 + m10*m23*u - m13*m20*u - m00*m23*v + m03*m20*v + m00*m12*m23*z - m00*m13*m22*z - m02*m10*m23*z + m02*m13*m20*z + m03*m10*m22*z - m03*m12*m20*z)
	w =  (m00*m11 - m01*m10 + m10*m21*u - m11*m20*u - m00*m21*v + m01*m20*v - m00*m11*m22*z + m00*m12*m21*z + m01*m10*m22*z - m01*m12*m20*z - m02*m10*m21*z + m02*m11*m20*z)
	# return (x/w, y/w)
	return (0.1933 - x/w, -0.6107 + y/w)
예제 #7
0
def undistort(img, corners, profile_name):
    profile = load_calibration_profile(profile_name)
    img, corners = _uncrop(img, corners)
    if len(img.shape) == 3:
        # XXX Hack. Fix _uncrop!
        img = img[:, :, 1]

    if img.shape[:2] not in profile:
        raise ValueError('No calibration settings for input image size.')

    settings = profile[img.shape[:2]]
    height, width = img.shape[:2]

    # Undistort corners. OpenCV expects (x, y) and a 3D array.
    corners = np.array([np.fliplr(corners)])
    undist_corners = cv2.undistortPoints(corners, settings.camera_matrix,
                                         settings.distort_coeffs,
                                         P=settings.camera_matrix)
    undist_corners = np.fliplr(undist_corners[0])

    undist_img = cv2.undistort(img, settings.camera_matrix,
                               settings.distort_coeffs)

    log.debug('Undistorted image, shape={}, calibration_profile={}'.format(
              img.shape, profile_name))

    return undist_img, undist_corners
예제 #8
0
파일: camera.py 프로젝트: smidm/camera.py
    def undistort(self, distorted_image_coords, Kundistortion=None):
        """
        Remove distortion from image coordinates.

        :param distorted_image_coords: real image coordinates
        :type distorted_image_coords: numpy.ndarray, shape=(2, n)
        :param Kundistortion: camera matrix for undistorted view, None for self.K
        :type Kundistortion: array-like, shape=(3, 3)
        :return: linear image coordinates
        :rtype: numpy.ndarray, shape=(2, n)
        """
        assert distorted_image_coords.shape[0] == 2
        assert distorted_image_coords.ndim == 2
        if Kundistortion is None:
            Kundistortion = self.K
        if self.calibration_type == 'division':
            A = self.get_A(Kundistortion)
            Ainv = np.linalg.inv(A)
            undistorted_image_coords = p2e(A.dot(e2p(self._undistort_division(p2e(Ainv.dot(e2p(distorted_image_coords)))))))
        elif self.calibration_type == 'opencv':
            undistorted_image_coords = cv2.undistortPoints(distorted_image_coords.T.reshape((1, -1, 2)),
                                                           self.K, self.opencv_dist_coeff,
                                                           P=Kundistortion).reshape(-1, 2).T
        elif self.calibration_type == 'opencv_fisheye':
            undistorted_image_coords = cv2.fisheye.undistortPoints(distorted_image_coords.T.reshape((1, -1, 2)),
                                                                   self.K, self.opencv_dist_coeff,
                                                                   P=Kundistortion).reshape(-1, 2).T
        else:
            warn('undistortion not implemented')
            undistorted_image_coords = distorted_image_coords
        assert undistorted_image_coords.shape[0] == 2
        assert undistorted_image_coords.ndim == 2
        return undistorted_image_coords
예제 #9
0
	def ray_from_pixel(self, pixel, cam_extrin):
		"""
		Returns the light ray pixel is capturing on camera.

		Args:
			pixel: (x,y), first col then row
			cam_extrin: Extrinsics
		Returns:
			pt3d: 3x1 numpy array, the 3D location of camear center
			ray_vec: 3x1 numpy array, a unit vector pointing in the direction of 
					 the ray
			(pt3d + a * ray_vec, where a is a scalar, gives a point on the ray)
		"""
		# Map pixel to undistorted pixel location
		dist_loc = np.zeros((1,1,2), dtype=np.float32)
		dist_loc[0,0,0] = pixel[0]
		dist_loc[0,0,1] = pixel[1]
		nodist_loc = cv2.undistortPoints(dist_loc, self.intrinsics.intri_mat, \
										self.get_opencv_dist_coeffs()) #1x1x2 np array
		nodist_loc[0,0,0] = nodist_loc[0,0,0] * self.intrinsics.fx() \
							 + self.intrinsics.cx()
		nodist_loc[0,0,1] = nodist_loc[0,0,1] * self.intrinsics.fy() \
							 + self.intrinsics.cy()

		# Camera center is at -Rt from extrinsics
		pt3d = cam_extrin.get_inv_location()

		# Calculate ray from pixel from intrinsics
		a = (pixel[0] - self.intrinsics.cx()) / self.intrinsics.fx()
		b = (pixel[1] - self.intrinsics.cy()) / self.intrinsics.fy()
		ray_pt = cam_extrin.rot_mat.T.dot(np.array([a,b,1]).reshape(3,1)-cam_extrin.trans_vec)
		ray_vec = ray_pt - pt3d
		return pt3d, ray_vec
예제 #10
0
    def unprojectPoints(self, pts_2d, use_distortion=True, normalize=False):
        """
        Undistorts points according to the camera model.
        :param pts_2d, shape: Nx2
        :return: Array of unprojected 3d points, shape: Nx3
        """
        pts_2d = np.array(pts_2d, dtype=np.float32)

        # Delete any posibly wrong 3rd dimension
        if pts_2d.ndim == 1 or pts_2d.ndim == 3:
            pts_2d = pts_2d.reshape((-1, 2))

        # Add third dimension the way cv2 wants it
        if pts_2d.ndim == 2:
            pts_2d = pts_2d.reshape((-1, 1, 2))

        if use_distortion:
            _D = self.D
        else:
            _D = np.asarray([[0.0, 0.0, 0.0, 0.0, 0.0]])

        pts_2d_undist = cv2.undistortPoints(pts_2d, self.K, _D)

        pts_3d = cv2.convertPointsToHomogeneous(pts_2d_undist)
        pts_3d.shape = -1, 3

        if normalize:
            pts_3d /= np.linalg.norm(pts_3d, axis=1)[:, np.newaxis]

        return pts_3d
    def build_correspondance(self, visible_markers,camera_calibration,min_marker_perimeter):
        """
        - use all visible markers
        - fit a convex quadrangle around it
        - use quadrangle verts to establish perpective transform
        - map all markers into surface space
        - build up list of found markers and their uv coords
        """

        all_verts = [m['verts'] for m in visible_markers if m['perimeter']>=min_marker_perimeter]
        if not all_verts:
            return
        all_verts = np.array(all_verts)
        all_verts.shape = (-1,1,2) # [vert,vert,vert,vert,vert...] with vert = [[r,c]]
        # all_verts_undistorted_normalized centered in img center flipped in y and range [-1,1]
        all_verts_undistorted_normalized = cv2.undistortPoints(all_verts, camera_calibration['camera_matrix'],camera_calibration['dist_coefs'])
        hull = cv2.convexHull(all_verts_undistorted_normalized,clockwise=False)

        #simplify until we have excatly 4 verts
        if hull.shape[0]>4:
            new_hull = cv2.approxPolyDP(hull,epsilon=1,closed=True)
            if new_hull.shape[0]>=4:
                hull = new_hull
        if hull.shape[0]>4:
            curvature = abs(GetAnglesPolyline(hull,closed=True))
            most_acute_4_threshold = sorted(curvature)[3]
            hull = hull[curvature<=most_acute_4_threshold]


        # all_verts_undistorted_normalized space is flipped in y.
        # we need to change the order of the hull vertecies
        hull = hull[[1,0,3,2],:,:]

        # now we need to roll the hull verts until we have the right orientation:
        # all_verts_undistorted_normalized space has its origin at the image center.
        # adding 1 to the coordinates puts the origin at the top left.
        distance_to_top_left = np.sqrt((hull[:,:,0]+1)**2+(hull[:,:,1]+1)**2)
        bot_left_idx = np.argmin(distance_to_top_left)+1
        hull = np.roll(hull,-bot_left_idx,axis=0)

        #based on these 4 verts we calculate the transformations into a 0,0 1,1 square space
        m_from_undistored_norm_space = m_verts_from_screen(hull)
        self.detected = True
        # map the markers vertices in to the surface space (one can think of these as texture coordinates u,v)
        marker_uv_coords =  cv2.perspectiveTransform(all_verts_undistorted_normalized,m_from_undistored_norm_space)
        marker_uv_coords.shape = (-1,4,1,2) #[marker,marker...] marker = [ [[r,c]],[[r,c]] ]

        # build up a dict of discovered markers. Each with a history of uv coordinates
        for m,uv in zip (visible_markers,marker_uv_coords):
            try:
                self.markers[m['id']].add_uv_coords(uv)
            except KeyError:
                self.markers[m['id']] = Support_Marker(m['id'])
                self.markers[m['id']].add_uv_coords(uv)

        #average collection of uv correspondences accros detected markers
        self.build_up_status = sum([len(m.collected_uv_coords) for m in self.markers.values()])/float(len(self.markers))

        if self.build_up_status >= self.required_build_up:
            self.finalize_correnspondance()
예제 #12
0
    def undistortPoints(self, points):
        s = self.img.shape
        cam = self.coeffs['cameraMatrix']
        d = self.coeffs['distortionCoeffs']
 
        (newCameraMatrix, _) = cv2.getOptimalNewCameraMatrix(cam, 
                                    d, (s[1], s[0]), 1, 
                                    (s[1], s[0]))
        return cv2.undistortPoints(points,cam, d, P=newCameraMatrix)
예제 #13
0
    def undistort_points(self, src):
        """
        :param src: N source pixel points (u,v) as an Nx2 matrix
        :type src: :class:`cvMat`

        Apply the post-calibration undistortion to the source points
        """

        return cv2.undistortPoints(src, self.intrinsics, self.distortion, R = self.R, P = self.P)
예제 #14
0
    def find_main_marker(self, visible_markers):
        marker = None
        current_score = 0
        #the more dist_coeff is, the less main_marker will be distord
        dist_coeff = 5

        for m in visible_markers:
            verts = m['verts']
            size = (distance(verts[0], verts[1]) + distance(verts[1], verts[2]) + distance(verts[2], verts[3]) + distance(verts[3], verts[0])) / 4.

            vertical_distord = min(distance(verts[0], verts[1])/float(distance(verts[3], verts[2])),distance(verts[3], verts[2])/float(distance(verts[0], verts[1])))
            vertical_distord = vertical_distord * vertical_distord

            horizontal_distord = min(distance(verts[0], verts[3])/float(distance(verts[1], verts[2])),distance(verts[1], verts[2])/float(distance(verts[0], verts[3])))
            horizontal_distord = horizontal_distord * horizontal_distord
            distord_res = float(vertical_distord + horizontal_distord)/2
            distord_res = np.power(distord_res, dist_coeff)

            score = size * distord_res
            if score > current_score:
                current_score = score
                marker = m

        if marker != None:
            objp = gen_square_pattern_grid(marker['height'])
            # Find the rotation and translation vectors.

            if not self.show_undistord:
                verts = cv2.undistortPoints( marker['verts'], self.camera_intrinsics[0], self.camera_intrinsics[1], P=self.camera_intrinsics[0] )
            else:
                verts = np.zeros((4,1,2), dtype = np.float64)
                for i in range(4):
                    verts[i][0][0] = marker['verts'][i][0][0] + float(self.roi[0])
                    verts[i][0][1] = marker['verts'][i][0][1] + float(self.roi[1])

                verts = distortPoints(verts, self.camera_intrinsics[0], self.camera_intrinsics[1], new_cm=self.camera_intrinsics[4])
                verts = cv2.undistortPoints( verts, self.camera_intrinsics[0], self.camera_intrinsics[1], P=self.camera_intrinsics[0] )

            #deduce coordinate of the camera
            _, rvecs, tvecs = cv2.solvePnP(objp, verts, self.camera_intrinsics[0], None) #Already undistord, no need to give dist coeffs
            self.camera_coord = self.get_camera_coordinate(rvecs, tvecs)


        return marker
예제 #15
0
파일: types.py 프로젝트: htmlfusion/OpenSfM
 def pixel_bearings(self, pixels):
     """Unit vector pointing to the pixel viewing directions."""
     points = pixels.reshape((-1, 1, 2)).astype(np.float64)
     distortion = np.array([self.k1, self.k2, 0., 0.])
     up = cv2.undistortPoints(points, self.get_K(), distortion)
     up = up.reshape((-1, 2))
     x = up[:, 0]
     y = up[:, 1]
     l = np.sqrt(x * x + y * y + 1.0)
     return np.column_stack((x / l, y / l, 1.0 / l))
 def normalized_points(self):
     """
     Return the normalized (noisy) image points of this camera.
     """
     if not self.dist_coeffs[0]:  # if no distortion, take a shortcut
         u = np.array(self.points_2D)
         u[:, 0] -= self.c[0]
         u[:, 1] -= self.c[1]
         return u / self.f
     return cv2.undistortPoints(np.array([self.points_2D]), self.K, self.dist_coeffs)[0]
예제 #17
0
    def undistort(self, line):
        ## resize array to get into correct shape for cv2.undistortPoints
        line = line.copy()
        nb = line.shape[0]
        line.resize(nb, 1, 2)

        ## undistort and normalize
        ud = cv2.undistortPoints(line, self._cm, self._dc)
        ud.shape = (nb, 2)
        ud = np.hstack((ud, np.ones((nb, 1))))
        return ud
예제 #18
0
def undistortData(points, K, d):

    points = np.array(points, dtype='float32').reshape((-1, 1, 2))
    points = cv2.undistortPoints(
        src=points, cameraMatrix=K, distCoeffs=d, P=K).tolist()

    points_ = []
    for p in points:
        points_.append(p[0])

    return points_
 def undistort_points(self, scanline):
     length = len(scanline)
     # Create temp array
     temp = np.zeros((length,1,2), dtype=np.float32)
     # Copy scanline into temp array
     for i in range(length):
         temp[i][0][0] = i
         temp[i][0][1] = scanline[i]
     # Undistort and reproject points to pixel values by setting
     # P = camera_matrix
     ud = cv2.undistortPoints(temp, self.cm, self.dc, P=self.cm)
     return ud
 def add_marker(self,marker,visible_markers,camera_calibration,min_marker_perimeter):
     '''
     add marker to surface.
     '''
     res = self._get_location(visible_markers,camera_calibration,min_marker_perimeter,locate_3d=False)
     if res['detected']:
         support_marker = Support_Marker(marker['id'])
         marker_verts = np.array(marker['verts'])
         marker_verts.shape = (-1,1,2)
         marker_verts_undistorted_normalized = cv2.undistortPoints(marker_verts, camera_calibration['camera_matrix'],camera_calibration['dist_coefs'])
         marker_uv_coords =  cv2.perspectiveTransform(marker_verts_undistorted_normalized,res['m_from_undistored_norm_space'])
         support_marker.load_uv_coords(marker_uv_coords)
     self.markers[marker['id']] = support_marker
예제 #21
0
def angle(xpix,ypix, camera_matrix, dist_coeff):
	horizontalangleofview=84.0
	resolutionx=1280
	resolutiony=800
	degperpixel=float(horizontalangleofview/resolutionx)
	test2 = np.zeros((1,1,2), dtype=np.float32)
	test2[0]=[xpix,ypix]
	newpoint2=cv2.undistortPoints(test2,camera_matrixr, dist_coeffr, P=camera_matrixr)
	x = newpoint2[0][0][0]
	y = newpoint2[0][0][1]
	distance=x-640
	angle=distance*degperpixel
	return angle
예제 #22
0
def _apply_geometric_correction(params, points):
    # Undistort
    camera_matrix = numpy.array([[params['fx'], 0, params['cx']],
                                 [0, params['fy'], params['cy']],
                                 [0, 0, 1]])
    dist_coeffs = numpy.array(
        [params['k1'], params['k2'], params['p1'], params['p2'], params['k3']])
    points = cv2.undistortPoints(points, camera_matrix, dist_coeffs)

    # Apply perspective transformation:
    mat = numpy.array([[params['ihm11'], params['ihm21'], params['ihm31']],
                       [params['ihm12'], params['ihm22'], params['ihm32']],
                       [params['ihm13'], params['ihm23'], params['ihm33']]])
    return cv2.perspectiveTransform(points, numpy.linalg.inv(mat))[:, 0, :]
예제 #23
0
def pixel_position_matt(xpos, ypos, height, pitch, roll, yaw, C):
    '''
    find the offset on the ground in meters of a pixel in a ground image
    given height above the ground in meters, and pitch/roll/yaw in degrees, the
    lens and image parameters

    The xpos,ypos is from the top-left of the image
    The height is in meters
    
    The yaw is from grid north. Positive yaw is clockwise
    The roll is from horiznotal. Positive roll is down on the right
    The pitch is from horiznotal. Positive pitch is up in the front
    
    return result is a tuple, with meters east and north of current GPS position

    '''
    from numpy import array,eye, zeros, uint64
    from cuav.uav.uav import uavxfer
    from math import pi
  
    xfer = uavxfer()
    xfer.setCameraMatrix(C.K)
    xfer.setCameraOrientation( 0.0, 0.0, pi/2 )
    xfer.setFlatEarth(0);
    xfer.setPlatformPose(0, 0, -height, math.radians(roll), math.radians(pitch), math.radians(yaw))

    # compute the undistorted points for the ideal camera matrix
    #src = numpy.zeros((1,1,2), numpy.uint64) #cv.CreateMat(1, 1, cv.CV_64FC2)
    src = numpy.zeros((1,1, 2), numpy.float32)
    src[0,0] = (xpos, ypos)
    #dst = cv.CreateMat(1, 1, cv.CV_64FC2)
    R = eye(3)
    K = C.K
    D = C.D
    dst = cv2.undistortPoints(src, K, D, R, K)
    x = dst[0,0][0]
    y = dst[0,0][1]
    #print '(', xpos,',', ypos,') -> (', x, ',', y, ')'
    # negative scale means camera pointing above horizon
    # large scale means a long way away also unreliable
    (joe_w, scale) = xfer.imageToWorld(x, y)
    if (scale < 0 or scale > 500):
      return None

    #(te, tn) = pixel_position_tridge(xpos, ypos, height, pitch, roll, yaw, lens, sensorwidth, xresolution, yresolution)
    #diff = (te-joe_w[1], tn-joe_w[0])
    #print 'diff: ', diff

    # east and north
    return (joe_w[1], joe_w[0])
예제 #24
0
def calculate_calibration_params(frame):
    """
    Given an image of a chessboard (as generated by VIDEO) calculates the
    camera parameters to flatten the image.  Returns a dictionary of string to
    float with the keys:

    * fx, fy, cx, cy - The parameters of the camera matrix
    * k1, k2, p1, p2, k3 - The distortion parameters
    * ihm11, ihm12, ihm13, ihm21, ihm22, ihm23, ihm31, ihm32, ihm33 - The
      inverse homography projection matrix.

    If a chessboard cannot be found in frame this raises `NoChessboardError`.
    """
    ideal, corners = _find_chessboard(frame)
    resolution = (frame.shape[1], frame.shape[0])

    out = {}

    # Calculate distortion:
    ideal_3d = numpy.array([[[x, y, 0]] for x, y in ideal],
                           dtype=numpy.float32)
    _, cameraMatrix, distCoeffs, _, _ = cv2.calibrateCamera(
        [ideal_3d], [corners], resolution)

    out.update({
        'fx': cameraMatrix[0, 0],
        'fy': cameraMatrix[1, 1],
        'cx': cameraMatrix[0, 2],
        'cy': cameraMatrix[1, 2],

        'k1': distCoeffs[0, 0],
        'k2': distCoeffs[0, 1],
        'p1': distCoeffs[0, 2],
        'p2': distCoeffs[0, 3],
        'k3': distCoeffs[0, 4],
    })

    # Calculate perspective:
    undistorted_corners = cv2.undistortPoints(corners, cameraMatrix, distCoeffs)
    ideal_2d = numpy.array([[[x, y]] for x, y in ideal],
                           dtype=numpy.float32)
    mat, _ = cv2.findHomography(undistorted_corners, ideal_2d)
    ihm = numpy.linalg.inv(mat)

    for col in range(3):
        for row in range(3):
            out['ihm%i%i' % (col + 1, row + 1)] = ihm[row][col]

    return out
예제 #25
0
def pixel_bearings(p, camera):
    if camera.get('projection_type', 'perspective') in ['equirectangular', 'spherical']:
        lon = p[:, 0] * 2 * np.pi
        lat = -p[:, 1] * 2 * np.pi
        x = np.cos(lat) * np.sin(lon)
        y = -np.sin(lat)
        z = np.cos(lat) * np.cos(lon)
        return np.column_stack([x, y, z]).astype(float)
    else:
        points = p.reshape((-1, 1, 2))
        K = K_from_camera(camera)
        distortion = np.array([camera['k1'], camera['k2'], 0., 0.])
        up = cv2.undistortPoints(points, K, distortion).reshape((-1, 2))
        b = homogeneous(up)
        return b / np.linalg.norm(b, axis=1)[:, np.newaxis]
예제 #26
0
def findCameraCoords(point):  # M_K, depth_map, point):
    # extract point depth from map
    # transform the point from distorted to undistorted x,y coordinate
    #
    path = "../Git/METR4202_A2/"
    dist = np.load(path + "CalibrationImages/Caliboutput/dist.npy")
    M_K = np.load(path + "CalibrationImages/Caliboutput/mtx1.npy")
    print M_K
    print dist
    print point
    a = cv2.undistortPoints(point, M_K, dist)
    # cv2.undistortPoints(
    print a

    return (x, y, d)
예제 #27
0
def pixel_bearing(p, camera):
    if camera.get('projection_type', 'perspective') in ['equirectangular', 'spherical']:
        lon = p[0] * 2 * np.pi
        lat = -p[1] * 2 * np.pi
        x = np.cos(lat) * np.sin(lon)
        y = -np.sin(lat)
        z = np.cos(lat) * np.cos(lon)
        return np.array([x, y, z])
    else:
        points = np.asarray(p).reshape((1, 1, 2))
        K = K_from_camera(camera)
        distortion = np.array([camera['k1'], camera['k2'], 0., 0.])
        x, y = cv2.undistortPoints(points, K, distortion).flat
        l = np.sqrt(x * x + y * y + 1)
        return np.array([x / l, y / l, 1.0 / l])
예제 #28
0
def undistort_chessboard_corners(corners, intrinsics):
    '''
    Conducts undistortion of the observed coordinates of the chessboard 
    corners based on provided camera's intrinsic parameters
    
    Arguments:
    corners -- a list of tuples resulting from cv2.findChessboardCorners 
               function
    intrinsics -- a tuple (camera_matrix, dist_coefs)
                  containing intrinsic parameters of the camera: 
                  camera matrix and distortion coeffitient  
    '''
    cm, dc = intrinsics
    res = [(found, cv2.undistortPoints(matrix, cm, dc, P=cm)) for found, matrix in corners]
    return res
예제 #29
0
 def project_centroid(self,centroid):
   # project image coordinates into ray from camera, intersect with ground plane
   point = np.zeros((1,1,2))
   point[0,0,0] = centroid[0]
   point[0,0,1] = centroid[1]
   rospy.logerr(np.asarray(self.K).reshape((3,3)))
   rect_point = cv2.undistortPoints(point,np.asarray(self.K).reshape((3,3)),np.asarray(self.D))
   rospy.logerr(rect_point)
   x = rect_point[0,0,0]
   y = rect_point[0,0,1]
   r = np.sqrt(x**2 + y**2)
   theta = np.arctan(r)
   phi = np.arctan2(y,x)
   #self.tf_listener.lookupTransform('/base_link',self.frame_id)
   self.tf_listener.transform('/base_link',self.frame_id)
예제 #30
0
 def undistort_points(self, scanline):
     length = len(scanline)
     # Create temp array
     temp = np.zeros((length, 1, 2), dtype=np.float32)
     # Copy scanline into temp array
     for i in range(length):
         temp[i][0][0] = i
         temp[i][0][1] = scanline[i]
     # Undistort and reproject points to pixel values by setting
     # P = camera_matrix
     ud = cv2.undistortPoints(temp, self.cm, self.dc, P=self.cm)
     # Resize array to shape (-1, 2)
     ud.resize((length, 2))
     # Split array columnwise
     x, y = np.hsplit(ud, 2)
     return x, y
예제 #31
0
def undistortPoint(x, y, camera_matrix, dist_coefs, newcameramtx):
    src = np.zeros(shape=(1, 1, 2))
    src[0][0][0] = x
    src[0][0][1] = y
    undistort_point = cv2.undistortPoints(src, camera_matrix, dist_coefs, P=newcameramtx)
    return undistort_point[0][0][0], undistort_point[0][0][1]
예제 #32
0
    def _get_location(self,
                      visible_markers,
                      camera_calibration,
                      min_marker_perimeter,
                      min_id_confidence,
                      locate_3d=False):

        filtered_markers = [
            m for m in visible_markers
            if m['perimeter'] >= min_marker_perimeter
            and m['id_confidence'] > min_id_confidence
        ]
        marker_by_id = {}
        #if an id shows twice we use the bigger marker (usually this is a screen camera echo artifact.)
        for m in filtered_markers:
            if m["id"] in marker_by_id and m["perimeter"] < marker_by_id[
                    m['id']]['perimeter']:
                pass
            else:
                marker_by_id[m["id"]] = m

        visible_ids = set(marker_by_id.keys())
        requested_ids = set(self.markers.keys())
        overlap = visible_ids & requested_ids
        # need at least two markers per surface when the surface is more that 1 marker.
        if overlap and len(overlap) >= min(2, len(requested_ids)):
            detected = True
            xy = np.array([marker_by_id[i]['verts'] for i in overlap])
            uv = np.array([self.markers[i].uv_coords for i in overlap])
            uv.shape = (-1, 1, 2)

            # our camera lens creates distortions we want to get a good 2d estimate despite that so we:
            # compute the homography transform from marker into the undistored normalized image space
            # (the line below is the same as what you find in methods.undistort_unproject_pts, except that we ommit the z corrd as it is always one.)
            xy_undistorted_normalized = cv2.undistortPoints(
                xy.reshape(-1, 1, 2), camera_calibration['camera_matrix'],
                camera_calibration['dist_coefs'] * self.use_distortion)
            m_to_undistored_norm_space, mask = cv2.findHomography(
                uv,
                xy_undistorted_normalized,
                method=cv2.RANSAC,
                ransacReprojThreshold=0.1)
            if not mask.all():
                detected = False
            m_from_undistored_norm_space, mask = cv2.findHomography(
                xy_undistorted_normalized, uv)
            # project the corners of the surface to undistored space
            corners_undistored_space = cv2.perspectiveTransform(
                marker_corners_norm.reshape(-1, 1, 2),
                m_to_undistored_norm_space)
            # project and distort these points  and normalize them
            corners_redistorted, corners_redistorted_jacobian = cv2.projectPoints(
                cv2.convertPointsToHomogeneous(corners_undistored_space),
                np.array([0, 0, 0], dtype=np.float32),
                np.array([0, 0, 0], dtype=np.float32),
                camera_calibration['camera_matrix'],
                camera_calibration['dist_coefs'] * self.use_distortion)
            corners_nulldistorted, corners_nulldistorted_jacobian = cv2.projectPoints(
                cv2.convertPointsToHomogeneous(corners_undistored_space),
                np.array([0, 0, 0], dtype=np.float32),
                np.array([0, 0, 0], dtype=np.float32),
                camera_calibration['camera_matrix'],
                camera_calibration['dist_coefs'] * 0)

            #normalize to pupil norm space
            corners_redistorted.shape = -1, 2
            corners_redistorted /= camera_calibration['resolution']
            corners_redistorted[:, -1] = 1 - corners_redistorted[:, -1]

            #normalize to pupil norm space
            corners_nulldistorted.shape = -1, 2
            corners_nulldistorted /= camera_calibration['resolution']
            corners_nulldistorted[:, -1] = 1 - corners_nulldistorted[:, -1]

            # maps for extreme lens distortions will behave irratically beyond the image bounds
            # since our surfaces often extend beyond the screen we need to interpolate
            # between a distored projection and undistored one.

            # def ratio(val):
            #     centered_val = abs(.5 - val)
            #     # signed distance to img cennter .5 is imag bound
            #     # we look to interpolate between .7 and .9
            #     inter = max()

            corners_robust = []
            for nulldist, redist in zip(corners_nulldistorted,
                                        corners_redistorted):
                if -.4 < nulldist[0] < 1.4 and -.4 < nulldist[1] < 1.4:
                    corners_robust.append(redist)
                else:
                    corners_robust.append(nulldist)

            corners_robust = np.array(corners_robust)

            if self.old_corners_robust is not None and np.mean(
                    np.abs(corners_robust - self.old_corners_robust)) < 0.02:
                smooth_corners_robust = self.old_corners_robust
                smooth_corners_robust += .5 * (corners_robust -
                                               self.old_corners_robust)

                corners_robust = smooth_corners_robust
                self.old_corners_robust = smooth_corners_robust
            else:
                self.old_corners_robust = corners_robust

            #compute a perspective thransform from from the marker norm space to the apparent image.
            # The surface corners will be at the right points
            # However the space between the corners may be distored due to distortions of the lens,
            m_to_screen = m_verts_to_screen(corners_robust)
            m_from_screen = m_verts_from_screen(corners_robust)

            camera_pose_3d = None
            if locate_3d:
                dist_coef, = camera_calibration['dist_coefs']
                img_size = camera_calibration['resolution']
                K = camera_calibration['camera_matrix']

                # 3d marker support pose estimation:
                # scale normalized object points to world space units (think m,cm,mm)
                uv.shape = -1, 2
                uv *= [self.real_world_size['x'], self.real_world_size['y']]
                # convert object points to lie on z==0 plane in 3d space
                uv3d = np.zeros((uv.shape[0], uv.shape[1] + 1))
                uv3d[:, :-1] = uv
                xy.shape = -1, 1, 2
                # compute pose of object relative to camera center
                is3dPoseAvailable, rot3d_cam_to_object, translate3d_cam_to_object = cv2.solvePnP(
                    uv3d, xy, K, dist_coef, flags=cv2.SOLVEPNP_EPNP)

                if is3dPoseAvailable:

                    # not verifed, potentially usefull info: http://stackoverflow.com/questions/17423302/opencv-solvepnp-tvec-units-and-axes-directions

                    ###marker posed estimation from virtually projected points.
                    # object_pts = np.array([[[0,0],[0,1],[1,1],[1,0]]],dtype=np.float32)
                    # projected_pts = cv2.perspectiveTransform(object_pts,self.m_to_screen)
                    # projected_pts.shape = -1,2
                    # projected_pts *= img_size
                    # projected_pts.shape = -1, 1, 2
                    # # scale object points to world space units (think m,cm,mm)
                    # object_pts.shape = -1,2
                    # object_pts *= self.real_world_size
                    # # convert object points to lie on z==0 plane in 3d space
                    # object_pts_3d = np.zeros((4,3))
                    # object_pts_3d[:,:-1] = object_pts
                    # self.is3dPoseAvailable, rot3d_cam_to_object, translate3d_cam_to_object = cv2.solvePnP(object_pts_3d, projected_pts, K, dist_coef,flags=cv2.CV_EPNP)

                    # transformation from Camera Optical Center:
                    #   first: translate from Camera center to object origin.
                    #   second: rotate x,y,z
                    #   coordinate system is x,y,z where z goes out from the camera into the viewed volume.
                    # print rot3d_cam_to_object[0],rot3d_cam_to_object[1],rot3d_cam_to_object[2], translate3d_cam_to_object[0],translate3d_cam_to_object[1],translate3d_cam_to_object[2]

                    #turn translation vectors into 3x3 rot mat.
                    rot3d_cam_to_object_mat, _ = cv2.Rodrigues(
                        rot3d_cam_to_object)

                    #to get the transformation from object to camera we need to reverse rotation and translation
                    translate3d_object_to_cam = -translate3d_cam_to_object
                    # rotation matrix inverse == transpose
                    rot3d_object_to_cam_mat = rot3d_cam_to_object_mat.T

                    # we assume that the volume of the object grows out of the marker surface and not into it. We thus have to flip the z-Axis:
                    flip_z_axix_hm = np.eye(4, dtype=np.float32)
                    flip_z_axix_hm[2, 2] = -1
                    # create a homogenous tranformation matrix from the rotation mat
                    rot3d_object_to_cam_hm = np.eye(4, dtype=np.float32)
                    rot3d_object_to_cam_hm[:-1, :-1] = rot3d_object_to_cam_mat
                    # create a homogenous tranformation matrix from the translation vect
                    translate3d_object_to_cam_hm = np.eye(4, dtype=np.float32)
                    translate3d_object_to_cam_hm[:-1,
                                                 -1] = translate3d_object_to_cam.reshape(
                                                     3)

                    # combine all tranformations into transformation matrix that decribes the move from object origin and orientation to camera origin and orientation
                    tranform3d_object_to_cam = np.matrix(
                        flip_z_axix_hm) * np.matrix(
                            rot3d_object_to_cam_hm) * np.matrix(
                                translate3d_object_to_cam_hm)
                    camera_pose_3d = tranform3d_object_to_cam
            if detected == False:
                camera_pose_3d = None
                m_from_screen = None
                m_to_screen = None
                m_from_undistored_norm_space = None
                m_to_undistored_norm_space = None

        else:
            detected = False
            camera_pose_3d = None
            m_from_screen = None
            m_to_screen = None
            m_from_undistored_norm_space = None
            m_to_undistored_norm_space = None

        return {
            'detected': detected,
            'detected_markers': len(overlap),
            'm_from_undistored_norm_space': m_from_undistored_norm_space,
            'm_to_undistored_norm_space': m_to_undistored_norm_space,
            'm_from_screen': m_from_screen,
            'm_to_screen': m_to_screen,
            'camera_pose_3d': camera_pose_3d
        }
예제 #33
0
    for i in range(len(im1_pts)):
        im1_pts[i, 0] = correspondences[0][i][0]
        im1_pts[i, 1] = correspondences[0][i][1]
        im2_pts[i, 0] = correspondences[1][i][0]
        im2_pts[i, 1] = correspondences[1][i][1]

    im = np.array(np.hstack((im1, im2)))

    # add an extra dimension to the front of the points array to be
    # compatible with the OpenCV API
    im1_pts_augmented = np.zeros((1, im1_pts.shape[0], im1_pts.shape[1]))
    im1_pts_augmented[0, :, :] = im1_pts
    im2_pts_augmented = np.zeros((1, im2_pts.shape[0], im2_pts.shape[1]))
    im2_pts_augmented[0, :, :] = im2_pts

    im1_pts_ud = cv2.undistortPoints(im1_pts_augmented, K, D)
    im2_pts_ud = cv2.undistortPoints(im2_pts_augmented, K, D)
    print im1_pts_ud

    # find the essential matrix using OpenCV
    # Note: since we are using undistorted points this gives us E rather than F (as the function name would imply)
    E, mask = cv2.findFundamentalMat(im1_pts_ud, im2_pts_ud, cv2.FM_RANSAC)
    F = np.linalg.inv(K.T).dot(E).dot(np.linalg.inv(K))
    print F
    cv2.imshow("MYWIN", im)
    cv2.setMouseCallback("MYWIN", mouse_event, im)
    while not rospy.is_shutdown():
        cv2.imshow("MYWIN", im)
        cv2.waitKey(50)
    cv2.destroyAllWindows()
예제 #34
0
    def match(self, points):
        if len(points) != len(self._pattern):
            raise MatchError(
                "Matching pattern with incorrect number of points")

        # undistort points coordinates for better linear matching
        undist_points = cv2.undistortPoints(np.array(
            [points]), self._camera_mtx, self._camera_dist)[0] * 1000.0

        # indices of the convex hull points
        hull_i = cv2.convexHull(undist_points,
                                returnPoints=False,
                                clockwise=False)
        hull_i = [x[0] for x in hull_i]  # change array packing
        if len(hull_i) != len(self._hull_i):
            raise MatchError(
                "Cannot find convex hull with desired number of points")

        # find center point
        center_mass = utils.center_of_mass(
            np.float32([[undist_points[i]] for i in hull_i]))
        center_i = utils.find_in_prox(undist_points, center_mass,
                                      self._max_prox)
        if center_i == None:
            raise MatchError("Cannot find center of mass")

        # find rest of the points
        rest_i = list(
            set(range(len(self._pattern))) - set([center_i]) - set(hull_i))
        if len(rest_i) != len(self._pattern) - 1 - len(self._hull_i):
            raise MatchError("Incorrect number of rest points")

        # match points 10-13, 4-15, 1-14
        pairs_i = []
        for ri in rest_i:
            prediction = undist_points[center_i] + [2.0, 2.0] * (
                undist_points[ri] - undist_points[center_i])
            result = utils.find_in_prox(undist_points, prediction,
                                        self._max_prox)
            if result == None or result not in hull_i:
                raise MatchError("Cannot find relationship to rest points")
            pairs_i.append(result)

        # try to find the point #7 on the hull
        lonely_i = None
        for i in range(3):
            li = hull_i[(hull_i.index(pairs_i[i]) - 3) % len(hull_i)]
            if li not in pairs_i:
                lonely_i = li
                break
        if lonely_i == None:
            raise MatchError("Cannot locate lonely point on the hull")

        # rotate hull array knowing which is the lonely point
        rot_i = (hull_i.index(lonely_i) + len(hull_i) / 2) % len(hull_i)
        rot_hull_i = hull_i[rot_i:] + hull_i[:rot_i]

        # sort rest points having hull already sorted
        sort_rest_i = [None, None, None]
        sort_rest_i[0] = rest_i[pairs_i.index(rot_hull_i[9])]
        sort_rest_i[1] = rest_i[pairs_i.index(rot_hull_i[0])]
        sort_rest_i[2] = rest_i[pairs_i.index(rot_hull_i[3])]

        # merge final map
        match_map = [center_i] + rot_hull_i + sort_rest_i

        # check if projection matches pattern
        homography, mask = cv2.findHomography(
            np.float32([points[i] for i in match_map]), self._pattern,
            cv2.RANSAC, self._max_prox)
        if 0 in mask:
            raise MatchError("Reprojection error exceeds specified maximum")

        return match_map, homography
예제 #35
0
    def build_correspondance(self, visible_markers, camera_calibration,
                             min_marker_perimeter, min_id_confidence):
        """
        - use all visible markers
        - fit a convex quadrangle around it
        - use quadrangle verts to establish perpective transform
        - map all markers into surface space
        - build up list of found markers and their uv coords
        """

        all_verts = [
            m['verts'] for m in visible_markers
            if m['perimeter'] >= min_marker_perimeter
        ]
        if not all_verts:
            return
        all_verts = np.array(all_verts, dtype=np.float32)
        all_verts.shape = (
            -1, 1, 2)  # [vert,vert,vert,vert,vert...] with vert = [[r,c]]
        # all_verts_undistorted_normalized centered in img center flipped in y and range [-1,1]
        all_verts_undistorted_normalized = cv2.undistortPoints(
            all_verts, camera_calibration['camera_matrix'],
            camera_calibration['dist_coefs'] * self.use_distortion)
        hull = cv2.convexHull(all_verts_undistorted_normalized,
                              clockwise=False)

        #simplify until we have excatly 4 verts
        if hull.shape[0] > 4:
            new_hull = cv2.approxPolyDP(hull, epsilon=1, closed=True)
            if new_hull.shape[0] >= 4:
                hull = new_hull
        if hull.shape[0] > 4:
            curvature = abs(GetAnglesPolyline(hull, closed=True))
            most_acute_4_threshold = sorted(curvature)[3]
            hull = hull[curvature <= most_acute_4_threshold]

        # all_verts_undistorted_normalized space is flipped in y.
        # we need to change the order of the hull vertecies
        hull = hull[[1, 0, 3, 2], :, :]

        # now we need to roll the hull verts until we have the right orientation:
        # all_verts_undistorted_normalized space has its origin at the image center.
        # adding 1 to the coordinates puts the origin at the top left.
        distance_to_top_left = np.sqrt((hull[:, :, 0] + 1)**2 +
                                       (hull[:, :, 1] + 1)**2)
        bot_left_idx = np.argmin(distance_to_top_left) + 1
        hull = np.roll(hull, -bot_left_idx, axis=0)

        #based on these 4 verts we calculate the transformations into a 0,0 1,1 square space
        m_from_undistored_norm_space = m_verts_from_screen(hull)
        self.detected = True
        # map the markers vertices into the surface space (one can think of these as texture coordinates u,v)
        marker_uv_coords = cv2.perspectiveTransform(
            all_verts_undistorted_normalized, m_from_undistored_norm_space)
        marker_uv_coords.shape = (
            -1, 4, 1, 2)  #[marker,marker...] marker = [ [[r,c]],[[r,c]] ]

        # build up a dict of discovered markers. Each with a history of uv coordinates
        for m, uv in zip(visible_markers, marker_uv_coords):
            try:
                self.markers[m['id']].add_uv_coords(uv)
            except KeyError:
                self.markers[m['id']] = Support_Marker(m['id'])
                self.markers[m['id']].add_uv_coords(uv)

        #average collection of uv correspondences accros detected markers
        self.build_up_status = sum(
            [len(m.collected_uv_coords)
             for m in self.markers.values()]) / float(len(self.markers))

        if self.build_up_status >= self.required_build_up:
            self.finalize_correnspondance()
예제 #36
0
def odabirTocaka(event,x,y,flags,param):
    global tockaSlovo
    global slika
    global korekcijaUkljucena
    if event == cv2.EVENT_LBUTTONDOWN:
        slikaSiva = cv2.cvtColor(slikaOriginal, cv2.COLOR_BGR2GRAY)
        if pomocOdabir or korekcijaUkljucena:
            if korekcijaUkljucena and (listaKoordinata):     
                for i in range(len(listaKoordinata)):
                    xk,yk = listaKoordinata[i]
                    if x in range(int(round(xk))-10,int(round(xk))+10) and y in range(int(round(yk))-10,int(round(yk))+10):
                        x = int(round(xk))
                        y = int(round(yk))
                        indexKorekcije = i
                        slika = slikaOriginal.copy()
                        cv2.circle(slika,(int(round(x)),int(round(y))),1,[0,255,0],1)
                        break
            koordinate = prikaziUvecano([x,y])

            
            if koordinate:
                if korekcijaUkljucena:
                    listaKoordinata[indexKorekcije] = koordinate
                    tockaSlovo = 65
                    slika = slikaOriginal.copy()
                    for i in range(len(listaKoordinata)):
                        x,y = listaKoordinata[i]
                        cv2.circle(slika,(int(round(x)),int(round(y))),1,[0,255,0],1)
                        tockaSlovo += 1
                    korekcijaUkljucena = 0

                else:
                    listaKoordinata.append(koordinate)
                    cv2.circle(slika,(int(round(koordinate[0])),int(round(koordinate[1]))),1,(0,255,0),1)

                    #ZA SLIKU
                    tockaMatrica = np.array([[[x,y]]],dtype = np.float32)
                    matricaKamere = pickle.load(open( folder+"matricaKamere.dat", "rb" ))
                    koeficijentiIzoblicenja = pickle.load(open( folder+"koeficijentiIzoblicenja.dat", "rb" ))
                    tockaPopravljeneMatrica = cv2.undistortPoints(tockaMatrica,matricaKamere,koeficijentiIzoblicenja,P=matricaKamere)
                    koorBez = int(round(float( tockaPopravljeneMatrica[0][0][0]))),int(round(float(tockaPopravljeneMatrica[0][0][1])))
                    cv2.circle(slika,koorBez,1,(0,255,255),1)
                    #ZA SLIKU
                    
                    #cv2.circle(slika,(int(round(koordinate[0])),int(round(koordinate[1]))),5,(0,255,0),1)
                    #cv2.line(slika,(0,int(round(koordinate[1]))),(slika.shape[1],int(round(koordinate[1]))),(0,255,100),1)
                    #cv2.putText(slika,chr(tockaSlovo),(int(round(koordinate[0]))+5,int(round(koordinate[1]))+5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0),5,cv2.CV_AA)
                    #cv2.putText(slika,chr(tockaSlovo),(int(round(koordinate[0]))+4,int(round(koordinate[1]))+4),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255),1)
                    tockaSlovo += 1
 
        else:
            listaKoordinata.append([x,y])
            cv2.circle(slika,(x,y),1,(0,255,0),1)
            cv2.circle(slika,(x,y),5,(0,255,0),4)

            #ZA SLIKU
            tockaMatrica = np.array([[[x,y]]],dtype = np.float32)
            matricaKamere = pickle.load(open( folder+"matricaKamere.dat", "rb" ))
            koeficijentiIzoblicenja = pickle.load(open( folder+"koeficijentiIzoblicenja.dat", "rb" ))
            tockaPopravljeneMatrica = cv2.undistortPoints(tockaMatrica,matricaKamere,koeficijentiIzoblicenja,P=matricaKamere)
            koorBez = int(round(float( tockaPopravljeneMatrica[0][0][0]))),int(round(float(tockaPopravljeneMatrica[0][0][1])))
            if koorBez[0]>0 and koorBez[1]>0:
                #print 'Sa:',(x,y),'Bez:',koorBez
                cv2.circle(slika,koorBez,1,(0,255,255),1)
                cv2.circle(slika,koorBez,5,(0,255,255),4)
            else:
                pass
                #print 'Sa:',(x,y),'Bez:',koorBez
            #ZA SLIKU
            
            #cv2.line(slika,(0,y),(slika.shape[1],y),(0,255,100),1)
            #cv2.putText(slika,chr(tockaSlovo),(x+1,y+1),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0),5,cv2.CV_AA)
            #cv2.putText(slika,chr(tockaSlovo),(x,y),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255),1)
            tockaSlovo += 1
def distortionAdjust(x, y):
    src = np.array([x,y])
    p = cv2.undistortPoints(src, mtx, dist_coeffs)[0][0]
    center = p
    return center
예제 #38
0
 def calibrate_points(self, points):
     return cv2.undistortPoints(np.array([points]),
                                self.camera_matrix,
                                self.dist_coeffs,
                                P=self.new_camera_matrix)[0]
예제 #39
0
def solve_pnp(ps, Pws, K, dist, method='xyz'):
    """
    Parameters:
    ------
    ps: numpy.array (shape: [n, 2])
        2D positions of markers in pixel
    Pws: 
        3D positions of markers in world frame
    K:
        calibration matrix
    dist:
        distortion parameters
    method:
        [x, xyz, xyyaw]
    """
    R_test = np.array([
        [0.0, 1.0, 0.0],
        [1.0, 0.0, 0.0],
        [0.0, 0.0, 1.0]
    ])
    R = np.zeros((3, 3))
    T = np.zeros((3, 1))
    ps_undistort = cv2.undistortPoints(np.array([ps]).astype('float32'), K, dist, P=K)[0]
    Kinv = np.linalg.inv(K)
    if method=='xyzyaw':
        u1 = np.hstack([ps_undistort[0], 1]).reshape([-1, 1])
        u2 = np.hstack([ps_undistort[1], 1]).reshape([-1, 1])
        d_theta = np.array([
            [0, -1, 0], 
            [1, 0, 0],
            [0, 0, 0]
        ])
        A = np.block([
            [np.eye(3), np.dot(d_theta, Pws[0].reshape([-1, 1])), -np.dot(Kinv, u1), np.zeros((3, 1))], 
            [np.eye(3), np.dot(d_theta, Pws[1].reshape([-1, 1])), np.zeros((3, 1)), -np.dot(Kinv, u2)]
        ])
        b = - Pws.reshape([-1, 1])
        ans = np.dot(np.linalg.pinv(A), b)
        R = np.array([
            [np.cos(ans[3]), -np.sin(ans[3]), 0],
            [np.sin(ans[3]), np.cos(ans[3]), 0],
            [0, 0, 1]
        ])
        print("A:", A)
        print("b:", b)
        print("ans:", ans)
        T = ans[:3]
        return R, T, ans
    elif method=='xyz':
        u1 = np.hstack([ps_undistort[0], 1]).reshape([-1, 1])
        u2 = np.hstack([ps_undistort[1], 1]).reshape([-1, 1])
        A = np.block([
            [np.eye(3), -np.dot(Kinv, u1), np.zeros((3, 1))], 
            [np.eye(3), np.zeros((3, 1)), -np.dot(Kinv, u2)]
        ])
        b = - Pws.reshape([-1, 1])
        ans = np.dot(np.linalg.pinv(A), b)
        return np.eye(3), ans[:3], ans
    elif method=='xy':
        u = np.hstack([ps_undistort[0], 1]).reshape([-1, 1])
        A = np.block([np.array([[1, 0], [0, 1], [0, 0]]), -np.dot(Kinv, u)])
        b = - Pws[0].reshape([-1, 1])
                
        ans = np.dot(np.linalg.inv(A), b)[:3]
        T[0, 0] = ans[0, 0]
        T[1, 0] = ans[1, 0]
        return np.eye(3), T
    elif method=='x':
        u = np.hstack([ps_undistort[0], 1]).reshape([-1, 1])
        A = np.block([np.array([1, 0, 0]).reshape((-1, 1)), -np.dot(Kinv, u)])
        b = - Pws[0].reshape([-1, 1])
        ans = np.dot(np.linalg.pinv(A), b)[:3]
        T[0, 0] = ans[0, 0]
        return np.eye(3), T
    elif method=='y':
        u = np.hstack([ps_undistort[0], 1]).reshape([-1, 1])
        A = np.block([np.array([0, 1, 0]).reshape((-1, 1)), -np.dot(Kinv, u)])
        b = - Pws[0].reshape([-1, 1])
        ans = np.dot(np.linalg.pinv(A), b)[:3]
        T[1, 0] = ans[0, 0]
        return np.eye(3), T
    elif method=='zx':
        u = np.hstack([ps_undistort[0], 1]).reshape([-1, 1])
        A = np.block([np.array([[0, 1], [0, 0], [1, 0]]), -np.dot(Kinv, u)])
        b = - Pws[0].reshape([-1, 1])        
        ans = np.dot(np.linalg.inv(A), b)[:3]
        T[2, 0] = ans[0, 0]
        T[0, 0] = ans[1, 0]
        return np.eye(3), T
예제 #40
0
def unproject(K, p2d):
    rays = cv2.undistortPoints(p2d[:, None], K, None)
    return cv2.convertPointsToHomogeneous(rays).reshape(-1,
                                                        3).astype(np.float32)
예제 #41
0
objpoints = []
objpoints.append(objp)
imgpoints = []
imgpoints.append(np.array(corners2))
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
    objpoints, imgpoints, gray.shape[::-1], None, None)

# Find crop parameters

h, w = img.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
x, y, w, h = roi

# Find homography parameters

corners3 = cv2.undistortPoints(corners2, mtx, dist, None, newcameramtx)

pts_src = [[corners3[0][0][0] - x, corners3[0][0][1] - y],
           [corners3[CHK_SIZE[1] - 1][0][0] - x,
               corners3[CHK_SIZE[1] - 1][0][1] - y],
           [corners3[-CHK_SIZE[1]][0][0] - x, corners3[-CHK_SIZE[1]][0][1] - y],
           [corners3[-1][0][0] - x, corners3[-1][0][1] - y]]

xD = (CHK_SIZE[0]-1) * TILE_SIZE * ZOOM
yD = (CHK_SIZE[1]-1) * TILE_SIZE * ZOOM
pts_dest = [[0, 0],
            [yD, 0],
            [0, xD],
            [yD, xD]]

h2, status = cv2.findHomography(np.array(pts_src), np.array(pts_dest))
예제 #42
0
 def undistortPts(self, src):
     dst = cv2.undistortPoints(src, self.mtx, self.dist, P=self.mtx)
     return dst
예제 #43
0
    def click_and_pub_pixel_data(self, event, x, y, flags, param):
        N_targets = 9.0  #CHANGE THIS TO CORRECT NUMBER OF TARGETS (must be float)
        if event == cv2.EVENT_LBUTTONDOWN:

            if 80 <= x <= 560 and 80 <= y <= 400:
                src = np.array(
                    [[[x, y]]],
                    dtype=np.float64)  #src is input pixel coordinates

                #undistortPoints() returns a 3D array of the projection of the pixel, to the image sensor
                undistortedPixel = cv2.undistortPoints(src, self.camMatrix,
                                                       self.distCoeff)

                #multiply the projection by the focal length and then add the offset to convert back to pixels
                undistortedPixel1 = undistortedPixel[0][0][
                    0] * self.fx + self.ox
                undistortedPixel2 = undistortedPixel[0][0][
                    1] * self.fy + self.oy

                #the new undistorted pixel values
                x_new = undistortedPixel1
                y_new = undistortedPixel2

                self.pixPt = [x_new, y_new]

                #still annoyingly necessary
                phi = self.camera.phi
                theta = self.camera.theta
                psi = self.camera.psi

                R_b_i = np.array([[np.cos(theta)*np.cos(psi),np.cos(theta)*np.sin(psi),-np.sin(theta)], \
                                       [np.sin(phi)*np.sin(theta)*np.cos(psi)-np.cos(phi)*np.sin(psi),np.sin(phi)*np.sin(theta)*np.sin(psi) \
                                        +np.cos(phi)*np.cos(psi),np.sin(phi)*np.cos(theta)],[np.cos(phi)*np.sin(theta)*np.cos(psi) \
                                        +np.sin(phi)*np.sin(psi),np.cos(phi)*np.sin(theta)*np.sin(psi) \
                                        -np.sin(phi)*np.cos(psi), np.cos(phi)*np.cos(theta)]]).T

                size = self.frame.shape
                image_size = [size[0], size[1]]

                refPt = self.camera.getNED(self.pixPt, image_size, R_b_i)
                refPt.append(self.target_counter)
                self.refPt.data = refPt

                self.pub.publish(self.refPt)

            else:
                pass

        elif event == cv2.EVENT_RBUTTONDOWN:

            self.target_counter += 1.0
            if self.target_counter == N_targets + 1.0:  #assumes 9 targets
                self.target_counter = 1.0

        elif event == cv2.EVENT_MBUTTONDOWN:
            self.target_counter -= 1.0
            if self.target_counter == 0.0:
                self.target_counter = N_targets

        else:
            pass
예제 #44
0
def sgbm_depth(left_image,right_image,seqeunce_name='dhc',visualize=False):

    img1_out='./sgbm/'+seqeunce_name+'/cam1/'
    img2_out = './sgbm/'+seqeunce_name+'/cam2/'
    if not os.path.exists(img1_out):
        os.makedirs(img1_out)
    if not os.path.exists(img2_out):
        os.makedirs(img2_out)

    # SGM parameters
    minDisparity=0
    numDisparities=256
    SADWindowSize=5
    cn = 3
    P1_sg=8 * cn*SADWindowSize*SADWindowSize
    P2_sg=32 * cn*SADWindowSize*SADWindowSize
    preFilterCap=63
    uniquenessRatio=10
    speckleWindowSize=100
    speckleRange=32
    disp12MaxDiff=1

    #set parameter for stereo vision
    M1 = np.array([7600.84983839602, -2.99902176361271, 1363.98027137587,
                    0.000000, 7606.59010258463, 725.727691214881, 0.000000,
                    0.000000, 1.000000])
    M1=M1.reshape(3,3)
    D1 =np.array([-0.0550340193281919, -0.201838612399988, -0.00487395972599783,	0.00412750068646054])

    M2 = np.array([ 7604.52194213316, -5.65932956048017, 1396.85380783994,
                    0.000000, 7610.60877362907, 705.423206525307, 0.000000,
                    0.000000, 1.000000])
    M2=M2.reshape(3,3)
    D2 = np.array([-0.0320233169370680, -0.0383792527839777, -0.00644739641691392, 0.00447193518679759])
    R = np.array([ 0.999997140595277, 0.00225991554062962,
                   0.000782037735025578, -0.00224644495126731,
                   0.999856056670654,-0.0168172359230492,
                   -0.000819930698723269, 0.0168154310310438,
                   0.999858274453379])
    R=R.reshape(3,3).transpose() 
    T = np.array( [-500.588562682978, 4.46368597454194,	3.59227301902774])

    tic = time.time()

    img1 = left_image
    img2 = right_image
    #save image size

    img_size=img1.shape
    img_size=(img_size[1],img_size[0])

    #R1 is the rectified rotation matrix of camera 1
    #R2 is the rectified rotation matrix of camera 2
    #P1 is the rectified projection matrix of camera1
    #P2 is the rectified projection matrix of camera2
    #Q is the rectified reprojection matrix of camera1 and camera2

    #undistortion and rectified image

    R1, R2, P1, P2, Q,_,_=cv2.stereoRectify(cameraMatrix1=M1, distCoeffs1=D1,cameraMatrix2= M2,distCoeffs2=D2,imageSize= img_size, R=R, T=T, flags=cv2.CALIB_ZERO_DISPARITY,alpha=-1,newImageSize=img_size)
    map11,map12=cv2.initUndistortRectifyMap(cameraMatrix=M1, distCoeffs=D1, R=R1, newCameraMatrix=P1, size=img_size, m1type=cv2.CV_16SC2)
    map21,map22=cv2.initUndistortRectifyMap(cameraMatrix=M2, distCoeffs=D2, R=R2, newCameraMatrix=P2, size=img_size, m1type=cv2.CV_16SC2)

    #rectified image from original image
    #img1r is the rectified image from camera 1
    #img2r is the rectified image from camera 2
    print R1
    print P1

    img1r=cv2.remap(src=img1, map1=map11, map2=map12, interpolation=cv2.INTER_LINEAR)
    img2r=cv2.remap(src=img2, map1=map21, map2=map22, interpolation=cv2.INTER_LINEAR)


    x = np.zeros((1,1,2), dtype=np.float32)
    x[0,0,0]=400
    x[0,0,1]=1000
    x=cv2.undistortPoints(x,M1,D1,R=R1,P=P1)
    print x

    #overwrite img1r to img1
    #overwrite img2r to img2
    img1=img1r
    img2=img2r
    tmp=np.hstack((img1,img2))
    # tmp1=cv2.cvtColor(tmp,cv2.COLOR_BGR2RGB)

    sgbm=cv2.StereoSGBM_create(minDisparity=minDisparity, numDisparities=numDisparities, blockSize=SADWindowSize, P1=P1_sg, P2=P2_sg, disp12MaxDiff=disp12MaxDiff, preFilterCap=preFilterCap, uniquenessRatio=uniquenessRatio, speckleWindowSize=speckleWindowSize, speckleRange=speckleRange)
    #sgbm = cv2.StereoBM(cv2.STEREO_BM_BASIC_PRESET,ndisparities=16, SADWindowSize=15)
    #compute disparity map
    disp=sgbm.compute(img1,img2)
    print disp.shape
    #reproject disparity to 3D
    xyz=cv2.reprojectImageTo3D(disparity=disp,Q=Q,handleMissingValues=True)
    xyz=xyz*16

    print xyz[895][524]
    print 'execute time for computing distance is '+str(time.time() - tic)

    #For visualization SGBM depth image purpose only
    if(visualize==True):
        # visualization of alignment
        plt.figure()
        plt.imshow(tmp)
        #plt.plot([600, 1000], [3500, 500])
        #plt.show()

        #plt.imshow(disp)
        #plt.plot([647, 1055], [3500, 500])
        plt.show()
예제 #45
0
cv2.waitKey(0)
cv2.destroyAllWindows()
pts1 = np.float32(pts1)
pts2 = np.float32(pts2)

pts_l = np.array(pts1)
pts_r = np.array(pts2)
K_l = np.array([[2760, 0, 1520], [0, 2760, 1006], [0, 0, 1]])
K_r = np.array([[2760, 0, 1520], [0, 2760, 1006], [0, 0, 1]])

print("Camera Matrix:")
print(K_l)
'''https://stackoverflow.com/questions/33906111/how-do-i-estimate-positions-of-two-cameras-in-opencv
'''
pts_l_norm = cv2.undistortPoints(np.expand_dims(pts_l, axis=1),
                                 cameraMatrix=K_l,
                                 distCoeffs=None)
pts_r_norm = cv2.undistortPoints(np.expand_dims(pts_r, axis=1),
                                 cameraMatrix=K_r,
                                 distCoeffs=None)

E, mask = cv2.findEssentialMat(pts_l_norm,
                               pts_r_norm,
                               focal=1.0,
                               pp=(0., 0.),
                               method=cv2.RANSAC,
                               prob=0.999,
                               threshold=3.0)
print("Essential Matrix:")
print(E)
예제 #46
0
def process_trig_errors(config,
                        fname_dict,
                        cam_intrinsics,
                        extrinsics,
                        skip=20):
    minlen = np.inf
    caps = dict()
    for cam_name, fname in fname_dict.items():
        cap = cv2.VideoCapture(fname)
        caps[cam_name] = cap
        length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        minlen = min(length, minlen)

    cam_names = sorted(fname_dict.keys())

    board = get_calibration_board(config)

    cam_mats = []
    cam_mats_dist = []
    for cname in cam_names:
        mat = np.array(extrinsics[cname])
        left = np.array(cam_intrinsics[cname]["camera_mat"])
        cam_mats.append(mat)
        cam_mats_dist.append(left)

    cam_mats = np.array(cam_mats)
    cam_mats_dist = np.array(cam_mats_dist)

    go = skip
    all_points = []
    framenums = []
    all_rvecs = []
    all_tvecs = []
    for framenum in trange(minlen, desc="detecting", ncols=70):
        row = []
        rvecs = []
        tvecs = []

        for cam_name in cam_names:
            intrinsics = cam_intrinsics[cam_name]
            cap = caps[cam_name]
            ret, frame = cap.read()

            if framenum % skip != 0 and go <= 0:
                continue

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # corners, ids = detect_aruco(gray, intrinsics)
            detected, stuff = estimate_pose(gray, intrinsics, board)
            if detected:
                corners, ids, rvec, tvec = stuff
                rvec = rvec.flatten()
                tvec = tvec.flatten()
            else:
                corners = ids = None
                rvec = np.zeros(3) * np.nan
                tvec = np.zeros(3) * np.nan

            points = fill_points(corners, ids, board)
            points_flat = points.reshape(-1, 1, 2)
            points_new = cv2.undistortPoints(
                points_flat,
                np.array(intrinsics["camera_mat"]),
                np.array(intrinsics["dist_coeff"]),
            )

            row.append(points_new.reshape(points.shape))
            rvecs.append(rvec)
            tvecs.append(tvec)

        if ~np.all(np.isnan(row)):
            all_points.append(row)
            all_tvecs.append(tvecs)
            all_rvecs.append(rvecs)
            framenums.append(framenum)
            go = skip

        go = max(0, go - 1)

    all_points_raw = np.array(all_points)
    all_rvecs = np.array(all_rvecs)
    all_tvecs = np.array(all_tvecs)
    framenums = np.array(framenums)

    shape = all_points_raw.shape

    all_points_3d = np.zeros((shape[0], shape[2], 3))
    all_points_3d.fill(np.nan)

    num_cams = np.zeros((shape[0], shape[2]))
    num_cams.fill(np.nan)

    errors = np.zeros((shape[0], shape[2]))
    errors.fill(np.nan)

    for i in trange(all_points_raw.shape[0], desc="triangulating", ncols=70):
        for j in range(all_points_raw.shape[2]):
            pts = all_points_raw[i, :, j, :]
            good = ~np.isnan(pts[:, 0])
            if np.sum(good) >= 2:
                # p3d = triangulate_optim(pts, cam_mats)
                p3d = triangulate_simple(pts[good], cam_mats[good])
                all_points_3d[i, j] = p3d[:3]
                errors[i, j] = reprojection_error_und(p3d, pts[good],
                                                      cam_mats[good],
                                                      cam_mats_dist[good])
                num_cams[i, j] = np.sum(good)

    ## all_tvecs
    # framenum, camera num, axis

    dout = pd.DataFrame()
    for bp_num in range(shape[2]):
        bp = "corner_{}".format(bp_num)
        for ax_num, axis in enumerate(["x", "y", "z"]):
            dout[bp + "_" + axis] = all_points_3d[:, bp_num, ax_num]
        dout[bp + "_error"] = errors[:, bp_num]
        dout[bp + "_ncams"] = num_cams[:, bp_num]

    for cam_num in range(shape[1]):
        cname = cam_names[cam_num]
        for ax_num, axis in enumerate(["x", "y", "z"]):
            key = "cam_{}_r{}".format(cname, axis)
            dout[key] = all_rvecs[:, cam_num, ax_num]
            key = "cam_{}_t{}".format(cname, axis)
            dout[key] = all_tvecs[:, cam_num, ax_num]

    dout["fnum"] = framenums

    return dout
예제 #47
0
# calculate camera 2 and 3 in camera 1's frame
T_1_0 = T_inv(T_0_1)
T_1_2 = T_1_0 @ T_0_2
T_1_3 = T_1_0 @ T_0_3

T_2_0 = T_inv(T_0_2)
T_2_3 = T_2_0 @ T_0_3

fig, ax = initialize_3d_plot(
    number=2,
    title=
    'Chess board points trignulated using camera positions \n Camera 1 at origin',
    view=(-70, -90))

corners1_ud = cv2.undistortPoints(Cs[0], K, D)
corners2_ud = cv2.undistortPoints(Cs[1], K, D)
corners3_ud = cv2.undistortPoints(Cs[2], K, D)

# Plot camera 1 , camera 2 and trigulated chess board corners between 1 and 2 in green
corners_12, _ = triangulate(T_1_1, T_1_2, corners1_ud, corners2_ud)
graph = plot_3d_points(ax, corners_12, linestyle="", marker=".", color='g')
plot_pose3_on_axes(ax, T_1_1, axis_length=1.0)
plot_pose3_on_axes(ax, T_1_2, axis_length=1.0)

# Plot camera 3 and trigulated chess board corners between 1 and 3 in red
corners_13, _ = triangulate(T_1_1, T_1_3, corners1_ud, corners3_ud)
graph = plot_3d_points(ax, corners_13, linestyle="", marker=".", color='r')

# Plot rigulated chess board corners between 2 and 3 in cyan
plot_pose3_on_axes(ax, T_1_3, axis_length=1.0)
예제 #48
0
def undistortPoints(x, y, cameraMatrix, distCoeffs):
    p = np.float32([[[x, y]]])
    dst = cv2.undistortPoints(p, cameraMatrix, distCoeffs, None, cameraMatrix)
    return dst[0][0][0], dst[0][0][1]
예제 #49
0
def run():
    cam = cv2.VideoCapture('video/VIDEO0122.mp4')
    frame_idx = 0
    T = np.eye(4)
    T[:3,:3] = cameraMatrix
    match_stack_man_cur = match_stack_man
    tracking = False
    R_cur, t_cur = np.eye(3), np.zeros((3,1))
    while(cam.isOpened()):
        # Capturing each frame of our video stream
        ret, QueryImg = cam.read()
        frame_idx += 1
        if ret == True and frame_idx % 1 == 0:

            # grayscale image
            QueryImg = cv2.resize(QueryImg, (0,0), fx=0.2, fy=0.2)
            # cv2.imwrite("frames/%06d.jpg" % frame_idx, QueryImg)
            
            gray = cv2.cvtColor(QueryImg, cv2.COLOR_BGR2GRAY)
        
            # Detect Aruco markers
            corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)
    
            # Outline all of the markers detected in our image
            # QueryImg = aruco.drawDetectedMarkers(QueryImg, corners, borderColor=(0, 0, 255))
            # Require 15 markers before drawing axis
            if ids is not None and len(ids) > 3:
                # Display our image
                idx = np.where(ids == 98)
                if len(idx[0]) > 0::
                    idx = idx[0][0]
                    corners = np.array(corners).reshape((-1,2))
                    corners_sub = cv2.cornerSubPix(gray,corners,(5,5),(-1,-1),criteria)
                    corners_sub_undistort = cv2.undistortPoints(corners_sub, cameraMatrix, distCoeffs, P=cameraMatrix).reshape((-1,4,2))
                    rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers([corners_sub.reshape((-1,4,2))[idx]], 1, cameraMatrix, distCoeffs) 
                    brvec, btvec = rvecs[0], tvecs[0]
                    if not tracking or frame_idx % 10 == 0:
                        rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers([corners_sub.reshape((-1,4,2))[idx]], 1, cameraMatrix, distCoeffs) 
                        rvec, tvec = rvecs[0], tvecs[0]
                        R_cur, _ = cv2.Rodrigues(rvec)
                        t_cur = tvec.reshape((3,1))
                        tracking = True
                    else:
                        found_ids1 = []
                        found_corners1 = []
                        found_ids2 = []
                        found_corners2 = []
                        for i,id1 in enumerate(prev_ids):
                            idx = np.where(ids == id1[0])
                            if len(idx[0]) > 0:
                                idx = idx[0][0]
                                found_ids1.append(prev_ids[i])
                                found_corners1.append(prev_corners_sub[i])
                                found_ids2.append(ids[idx])
                                found_corners2.append(corners_sub_undistort[idx])
                        found_corners1 = np.array(found_corners1).reshape((-1,2))
                        found_corners2 = np.array(found_corners2).reshape((-1,2))
                        # print('----------------------')
                        # print(found_corners1)
                        # print(found_corners2)
                        # print('----------------------')
                        flag = False

                        if found_corners1.shape[0] >= 4:
                            H, _ = cv2.findHomography(found_corners1, found_corners2, method=cv2.RANSAC, ransacReprojThreshold=1)
                            # H, _ = cv2.findHomography(prev_corners_sub, corners_sub_undistort)
                            _, Rs, ts, ns = cv2.decomposeHomographyMat(H, cameraMatrix)
                            # print(Rs)
                            # print(ts)
                            # print('--------')
                            for R, t in zip(Rs, ts):
                                R_tmp = R @ R_cur
                                t_tmp = R @ t_cur + t.reshape((3,1))
                                z = cameraMatrix @ (R_tmp @ Z_axis + t_tmp)
                                z2d = z[:2] / z[2]
                                print(R_tmp, t_tmp)
                                if np.sum(z > 0) == 3 and 0 <= z2d[0] < gray.shape[1] and 0 <= z2d[1] < gray.shape[0] and t_tmp[2,0]:
                                    # print(R, t)
                                    # print(R_tmp, t_tmp)
                                    R_cur = R_tmp
                                    t_cur = t_tmp
                                    rvec, _ = cv2.Rodrigues(R_cur)
                                    tvec = t_cur.reshape(-1)
                                    tracking = True
                                    flag = True
                                    break
                        # print(R_cur, t_cur)
                        bR_cur, _ = cv2.Rodrigues(brvec)
                        bt_cur = btvec.reshape((3,1))
                        print(bR_cur, bt_cur)
                        print("--------------")
                        if not flag:
                            rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers([corners_sub], 1, cameraMatrix, distCoeffs) 
                            rvec, tvec = rvecs[0], tvecs[0]
                            R_cur, _ = cv2.Rodrigues(rvec)
                            t_cur = tvec.reshape((3,1))
                            tracking = True

                    # print(frame_idx, tracking)

                    # QueryImg = aruco.drawAxis(QueryImg, cameraMatrix, distCoeffs, rvec, tvec, 1)
                    # axis_cur = (rotating_matrix @ axis_cur.T).T
                    imgpts, jac = cv2.projectPoints(axis, rvec, tvec, cameraMatrix, distCoeffs)
                    draw(QueryImg, corners, imgpts)
                    # match_stack_man_cur = (rotating_matrix @ match_stack_man_cur.T).T
                    # imgpts, jac = cv2.projectPoints(match_stack_man_cur, rvec, tvec, cameraMatrix, distCoeffs)
                    # drawMatchStickMan(QueryImg, imgpts)
                    prev_corners_sub = corners_sub_undistort
                    prev_ids = ids
            

            cv2.imshow('QueryImage', QueryImg)
    
        # Exit at the end of the video on the 'q' keypress
        if cv2.waitKey(100) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
예제 #50
0
def undistort_points(config, dataframe, camera_pair, destfolder):
    cfg_3d = auxiliaryfunctions.read_config(config)
    img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(
        cfg_3d)
    ''' 
    path_undistort = destfolder
    filename_cam1 = Path(dataframe[0]).stem
    filename_cam2 = Path(dataframe[1]).stem

    #currently no interm. saving of this due to high speed.
    # check if the undistorted files are already present
    if os.path.exists(os.path.join(path_undistort,filename_cam1 + '_undistort.h5')) and os.path.exists(os.path.join(path_undistort,filename_cam2 + '_undistort.h5')):
        print("The undistorted files are already present at %s" % os.path.join(path_undistort,filename_cam1))
        dataFrame_cam1_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam1 + '_undistort.h5'))
        dataFrame_cam2_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam2 + '_undistort.h5'))
    else:
    '''
    if True:
        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        dataframe_cam1 = pd.read_hdf(dataframe[0])
        dataframe_cam2 = pd.read_hdf(dataframe[1])
        scorer_cam1 = dataframe_cam1.columns.get_level_values(0)[0]
        scorer_cam2 = dataframe_cam1.columns.get_level_values(0)[0]
        stereo_file = auxiliaryfunctions.read_pickle(
            os.path.join(path_camera_matrix, 'stereo_params.pickle'))
        path_stereo_file = os.path.join(path_camera_matrix,
                                        'stereo_params.pickle')
        stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
        mtx_l = stereo_file[camera_pair]['cameraMatrix1']
        dist_l = stereo_file[camera_pair]['distCoeffs1']

        mtx_r = stereo_file[camera_pair]['cameraMatrix2']
        dist_r = stereo_file[camera_pair]['distCoeffs2']

        R1 = stereo_file[camera_pair]['R1']
        P1 = stereo_file[camera_pair]['P1']

        R2 = stereo_file[camera_pair]['R2']
        P2 = stereo_file[camera_pair]['P2']

        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        dataFrame_cam1_undistort, scorer_cam1, bodyparts = auxiliaryfunctions_3d.create_empty_df(
            dataframe_cam1, scorer_cam1, flag='2d')
        dataFrame_cam2_undistort, scorer_cam2, bodyparts = auxiliaryfunctions_3d.create_empty_df(
            dataframe_cam2, scorer_cam2, flag='2d')

        for bpindex, bp in tqdm(enumerate(bodyparts)):
            # Undistorting the points from cam1 camera
            points_cam1 = np.array([
                dataframe_cam1[scorer_cam1][bp]['x'].values[:],
                dataframe_cam1[scorer_cam1][bp]['y'].values[:]
            ])
            points_cam1 = points_cam1.T
            points_cam1 = np.expand_dims(points_cam1, axis=1)
            points_cam1_remapped = cv2.undistortPoints(src=points_cam1,
                                                       cameraMatrix=mtx_l,
                                                       distCoeffs=dist_l,
                                                       P=P1,
                                                       R=R1)

            dataFrame_cam1_undistort.iloc[:][scorer_cam1, bp,
                                             'x'] = points_cam1_remapped[:, 0,
                                                                         0]
            dataFrame_cam1_undistort.iloc[:][scorer_cam1, bp,
                                             'y'] = points_cam1_remapped[:, 0,
                                                                         1]
            dataFrame_cam1_undistort.iloc[:][
                scorer_cam1, bp, 'likelihood'] = dataframe_cam1[scorer_cam1][
                    bp]['likelihood'].values[:]

            # Undistorting the points from cam2 camera
            points_cam2 = np.array([
                dataframe_cam2[scorer_cam2][bp]['x'].values[:],
                dataframe_cam2[scorer_cam2][bp]['y'].values[:]
            ])
            points_cam2 = points_cam2.T
            points_cam2 = np.expand_dims(points_cam2, axis=1)
            points_cam2_remapped = cv2.undistortPoints(src=points_cam2,
                                                       cameraMatrix=mtx_r,
                                                       distCoeffs=dist_r,
                                                       P=P2,
                                                       R=R2)

            dataFrame_cam2_undistort.iloc[:][scorer_cam2, bp,
                                             'x'] = points_cam2_remapped[:, 0,
                                                                         0]
            dataFrame_cam2_undistort.iloc[:][scorer_cam2, bp,
                                             'y'] = points_cam2_remapped[:, 0,
                                                                         1]
            dataFrame_cam2_undistort.iloc[:][
                scorer_cam2, bp, 'likelihood'] = dataframe_cam2[scorer_cam2][
                    bp]['likelihood'].values[:]

        # Save the undistorted files
        dataFrame_cam1_undistort.sort_index(inplace=True)
        dataFrame_cam2_undistort.sort_index(inplace=True)

    return (dataFrame_cam1_undistort, dataFrame_cam2_undistort,
            stereo_file[camera_pair], path_stereo_file)
rotate = np.identity(3)
proj1 = np.dot(intrinsic_matrix_left,np.concatenate((rotate,translate),axis=1))
R = np.asarray(R)
T = np.asanyarray(T)
#Ess = np.concatenate((R,T),axis=1)
#print(T)
# print(Ess.shape)
# print(T)

origin2 = np.dot(R,T)
#print(origin2)
proj2 = np.dot(intrinsic_matrix_right, np.concatenate((R,T),axis=1))

# print(image_points_left)

undistorted_left = cv2.undistortPoints(np.reshape(image_points_left,(54,1,2)), intrinsic_matrix_left, distortion_left)
undistorted_right = cv2.undistortPoints(np.reshape(image_points_right,(54,1,2)), intrinsic_matrix_right, distortion_right)
# print(np.shape(undistorted_left))

triangulate = cv2.triangulatePoints(proj1,proj2,undistorted_left,undistorted_right)
#print(undistorted_left.shape)
# print(triangulate)
#tasks 5-7

rotation1, rotation2, projection1, projection2, Q, roi1, roi2 = cv2.stereoRectify(cameraMatrix1=intrinsic_matrix_left,
                      distCoeffs1=distortion_left,
                      cameraMatrix2=intrinsic_matrix_right,
                      distCoeffs2=distortion_right,
                      imageSize=shape,
                      R=R,
                      T=T,
예제 #52
0
def reconstruct_from_binary_patterns():

    scale_factor = 1.0
    ref_white = cv2.resize(
        cv2.imread("images/aligned000.jpg", cv2.IMREAD_GRAYSCALE) / 255.0,
        (0, 0),
        fx=scale_factor,
        fy=scale_factor)
    ref_black = cv2.resize(
        cv2.imread("images/aligned001.jpg", cv2.IMREAD_GRAYSCALE) / 255.0,
        (0, 0),
        fx=scale_factor,
        fy=scale_factor)
    original_image = cv2.resize(cv2.imread("images/aligned001.jpg"), (0, 0),
                                fx=scale_factor,
                                fy=scale_factor)
    ref_avg = (ref_white + ref_black) / 2.0
    ref_on = ref_avg + 0.05  # a threshold for ON pixels
    ref_off = ref_avg - 0.05  # add a small buffer region

    h, w = ref_white.shape

    # mask of pixels where there is projection
    proj_mask = (ref_white > (ref_black + 0.05))
    #cv2.imshow("", np.array(proj_mask * 255, dtype=np.uint8))
    #cv2.waitKey()

    scan_bits = np.zeros((h, w), dtype=np.uint16)

    # analyze the binary patterns from the camera
    for i in range(0, 15):
        # read the file
        patt = cv2.resize(cv2.imread("images/aligned%03d.jpg" %
                                     (i + 2), cv2.IMREAD_GRAYSCALE) / 255.0,
                          (0, 0),
                          fx=scale_factor,
                          fy=scale_factor)

        # mask where the pixels are ON
        on_mask = (patt > ref_on) & proj_mask

        # this code corresponds with the binary pattern code
        bit_code = np.uint16(1 << i)

        scan_bits[on_mask] += bit_code

        # TODO: populate scan_bits by putting the bit_code according to on_mask

    print("load codebook")
    # the codebook translates from <binary code> to (x,y) in projector screen space
    with open("binary_codes_ids_codebook.pckl", "r") as f:
        binary_codes_ids_codebook = pickle.load(f)

    colors = []
    camera_points = []
    projector_points = []

    correspending_image = np.zeros((scan_bits.shape[0], scan_bits.shape[1], 3))
    for x in range(w):
        for y in range(h):
            if not proj_mask[y, x]:
                continue  # no projection here
            if scan_bits[y, x] not in binary_codes_ids_codebook:
                continue  # bad binary code
            x_p, y_p = binary_codes_ids_codebook[scan_bits[y, x]]
            if x_p >= 1279 or y_p >= 799:  # filter
                continue
            # TODO: use binary_codes_ids_codebook[...] and scan_bits[y,x] to
            # TODO: find for the camera (x,y) the projector (p_x, p_y).
            # TODO: store your points in camera_points and projector_points

            # IMPORTANT!!! : due to differences in calibration and acquisition - divide the camera points by 2
            camera_points.append([x / 2., y / 2.])
            projector_points.append([x_p, y_p])

            correspending_image[y, x] = np.array(
                [0, projector_points[-1][1], projector_points[-1][0]])
            colors.append(original_image[y, x][::-1])
    colors = np.array(colors)
    correspending_image[:, :, 1] = correspending_image[:, :, 1] / 800. * 255
    correspending_image[:, :, 2] = correspending_image[:, :, 2] / 1280. * 255
    #cv2.normalize(correspending_image,  correspending_image, 0, 255, cv2.NORM_MINMAX)
    #cv2.imshow("", np.array(correspending_image, dtype=np.uint8))
    #cv2.waitKey()
    correspondence_name = sys.argv[1] + "correspondence.jpg"
    cv2.imwrite(correspondence_name, correspending_image)
    # now that we have 2D-2D correspondances, we can triangulate 3D points!

    # load the prepared stereo calibration between projector and camera
    with open("stereo_calibration.pckl", "r") as f:
        d = pickle.load(f)
        camera_K = d['camera_K']
        camera_d = d['camera_d']
        projector_K = d['projector_K']
        projector_d = d['projector_d']
        projector_R = d['projector_R']
        projector_t = d['projector_t']

    camera_points = np.array(camera_points, dtype=np.float32)
    projector_points = np.array(projector_points, dtype=np.float32)

    camera_points = cv2.undistortPoints(
        camera_points.reshape((camera_points.shape[0], 1, 2)), camera_K,
        camera_d)
    projector_points = cv2.undistortPoints(
        projector_points.reshape((projector_points.shape[0], 1, 2)),
        projector_K, projector_d)

    camera_points = camera_points.reshape(camera_points.shape[0], 2)
    projector_points = projector_points.reshape(projector_points.shape[0], 2)

    camera_P = np.eye(4)[:3]
    projector_P = np.hstack((projector_R, projector_t))

    points_3d = cv2.triangulatePoints(camera_P, projector_P,
                                      camera_points.transpose(),
                                      projector_points.transpose())
    points_3d = cv2.convertPointsFromHomogeneous(points_3d.transpose())
    mask = (points_3d[:, :, 2] > 200) & (points_3d[:, :, 2] < 1400)
    mask = mask.reshape((mask.shape[0], ))
    camera_points = camera_points[mask]
    projector_points = projector_points[mask]
    points_3d = points_3d[mask]
    colors = colors[mask]
    assert (points_3d.shape[0] == colors.shape[0])
    return points_3d, colors
예제 #53
0
                                                  inliers, gradients)

                    # essential matrix from fundamental matrix (for evaluation)
                    E = K2[b].transpose(0, 1).mm(F.mm(K1[b]))

                    pts1 = correspondences[b, 0:2].numpy()
                    pts2 = correspondences[b, 2:4].numpy()

                    # compute fundamental matrix metrics if they are used as training signal
                    if opt.loss is not 'pose':
                        valid, F1, incount, epi_error = util.f_error(
                            pts1, pts2, F.numpy(), gt_F[b].numpy(),
                            opt.threshold)

                    # normalize correspondences using the calibration parameters for the calculation of pose errors
                    pts1 = cv2.undistortPoints(pts1.transpose(2, 1, 0),
                                               K1[b].numpy(), None)
                    pts2 = cv2.undistortPoints(pts2.transpose(2, 1, 0),
                                               K2[b].numpy(), None)

                else:

                    # === CASE ESSENTIAL MATRIX =========================================

                    # run NG-RANSAC
                    E = torch.zeros((3, 3)).float()
                    incount = ngransac.find_essential_mat(
                        correspondences[b], probs[b], rand_seed, opt.hyps,
                        opt.threshold, E, inliers, gradients)
                    incount /= correspondences.size(2)

                    pts1 = correspondences[b, 0:2].squeeze().numpy().T
예제 #54
0
def reconstruct_from_binary_patterns():
    scale_factor = 1.0
    ref_white = cv2.resize(cv2.imread("images/pattern000.jpg", cv2.IMREAD_GRAYSCALE) / 255.0, (0,0), fx=scale_factor,fy=scale_factor)
    ref_black = cv2.resize(cv2.imread("images/pattern001.jpg", cv2.IMREAD_GRAYSCALE) / 255.0, (0,0), fx=scale_factor,fy=scale_factor)
    ref_color = cv2.resize(cv2.imread("images/pattern001.jpg", cv2.IMREAD_COLOR), (0,0), fx=scale_factor,fy=scale_factor)
    ref_avg   = (ref_white + ref_black) / 2.0
    ref_on    = ref_avg + 0.05 # a threshold for ON pixels
    ref_off   = ref_avg - 0.05 # add a small buffer region
    h,w = ref_white.shape
    # mask of pixels where there is projection
    proj_mask = (ref_white > (ref_black + 0.05))

    scan_bits = np.zeros((h,w), dtype=np.uint16)

    # analyze the binary patterns from the camera15
    for i in range(0,15):
        # read the file
        patt_gray = cv2.resize(cv2.imread("images/pattern%03d.jpg"%(i+2),cv2.IMREAD_GRAYSCALE)/255.0, (0,0), fx=scale_factor,fy=scale_factor)
        # mask where the pixels are ON
        on_mask = (patt_gray > ref_on) & proj_mask

        # this code corresponds with the binary pattern code
        bit_code = np.uint16(1 << i)
        tmp = bit_code*on_mask
        scan_bits = scan_bits|tmp
        # TODO: populate scan_bits by putting the bit_code according to on_mask

    # the codebook translates from <binary code> to (x,y) in projector screen space
    with open("binary_codes_ids_codebook.pckl","r") as f:
        binary_codes_ids_codebook = pickle.load(f)
    camera_points = []
    projector_points = []
    corr_img = np.zeros((h,w,3), dtype=np.uint8)
    for x in range(w):
        for y in range(h):
            if not proj_mask[y,x]:
                continue # no projection here
            if scan_bits[y,x] not in binary_codes_ids_codebook:
                continue # bad binary code
            x_p, y_p = binary_codes_ids_codebook.get(scan_bits[y][x])
            if x_p >= 1279 or y_p >= 799: # filter
                continue
            ref_color[y][x]
            camera_points.append((x/2.0, y/2.0))
            projector_points.append((x_p, y_p))
            y_p = 255*y_p/(x_p+y_p)
            x_p = 255*x_p/(x_p+y_p)
            corr_img[y,x,:] = np.array([0,y_p,x_p])
            # TODO: use binary_codes_ids_codebook[...] and scan_bits[y,x] to
            # TODO: find for the camera (x,y) the projector (p_x, p_y).
            # TODO: store your points in camera_points and projector_points

            # IMPORTANT!!! : due to differences in calibration and acquisition - divide the camera points by 2
    output_name = sys.argv[1] + "correspondence.jpg"
    cv2.imwrite(output_name, corr_img)
    # load the prepared stereo calibration between projector and camera
    with open("stereo_calibration.pckl","r") as f:
        d = pickle.load(f)
        camera_K    = d['camera_K']
        camera_d    = d['camera_d']
        projector_K = d['projector_K']
        projector_d = d['projector_d']
        projector_R = d['projector_R']
        projector_t = d['projector_t']

    cam_pts = np.array(camera_points, np.float32)
    cam_pts = cam_pts.reshape(-1, 1, 2)
    proj_pts = np.array(projector_points, np.float32)
    proj_pts = proj_pts.reshape(-1, 1, 2)
    camera_pointsOut = cv2.undistortPoints(cam_pts, camera_K, camera_d)
    proj_pointsOut = cv2.undistortPoints(proj_pts, projector_K, projector_d)
    P_rt = np.append(projector_R, projector_t, axis=1)
    P_rt = np.vstack([P_rt, [0.,0.,0.,1.]])
    I = np.identity(4)
    P1 = I[:3]
    P2 = P_rt[:3]
    P1 = np.float32(P1)
    P2 = np.float32(P2)
    proj_pointsOut = np.float32(proj_pointsOut)
    camera_pointsOut = np.float32(camera_pointsOut)

    out_homo = cv2.triangulatePoints(P1, P2, camera_pointsOut, proj_pointsOut)
    out_homo=np.transpose(out_homo)
    points_3d = cv2.convertPointsFromHomogeneous(out_homo)
    # TODO: use cv2.undistortPoints to get normalized points for camera, use camera_K and camera_d
    # TODO: use cv2.undistortPoints to get normalized points for projector, use projector_K and projector_d
    # TODO: use cv2.triangulatePoints to triangulate the normalized points
    # TODO: use cv2.convertPointsFromHomogeneous to get real 3D points
    # TODO: name the resulted 3D points as "points_3d"
    mask = (points_3d[:,:,2] > 200) & (points_3d[:,:,2] < 1400)
    points_3d = points_3d[mask]
    pts_clrd = []
    for i in range(points_3d.shape[0]):

        x = int(points_3d[i][0]*2)
        y = int(camera_points[i][1]*2)
        pts_clrd.append(ref_color[y][x])
    pts_3dClrd = np.append(points_3d, pts_clrd, axis = 1)

    # print(points_3d.shape)
    output_name = sys.argv[1] + "output_rgb.xyz"
    with open(output_name,"w") as f:
        for p in pts_3dClrd:
            f.write("%d %d %d %d %d %d\n"%(p[0],p[1],p[2],p[3],p[4],p[5]))
    return points_3d
예제 #55
0
    def __init__(self,
                 mapp,
                 img,
                 K,
                 Kinv,
                 DistCoef,
                 pose=np.eye(4),
                 tid=None,
                 des=None):
        self.H, self.W = img.shape[0:2]
        self.K = np.array(K)
        # K = [[fx, 0,cx],
        #      [ 0,fy,cy],
        #      [ 0, 0, 1]]
        self.fx = K[0][0]
        self.fy = K[1][1]
        self.cx = K[0][2]
        self.cy = K[1][2]
        self.Kinv = np.array(Kinv)
        self.D = np.array(DistCoef)

        self.pose = np.array(pose)  # Tcw
        self.Ow = np.zeros((3, 1))
        self.Rwc = np.eye(3)

        # self.kps       keypoints
        # self.kpsu      [u]ndistorted keypoints
        # self.octaves   keypoint octaves
        # self.des       keypoint descriptors

        # self.points    map points
        # self.outliers  outliers flags for map points

        if img is not None:
            self.h, self.w = img.shape[0:2]
            if des is None:
                # convert to gray image
                if img.ndim > 2:
                    img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
                self.kps, self.des = Frame.tracker.detect(img)
                # convert from a list of keypoints to an array of points and octaves
                kps_data = np.array([[x.pt[0], x.pt[1], x.octave]
                                     for x in self.kps],
                                    dtype=np.float32)
                self.octaves = np.uint32(kps_data[:, 2].copy())
                #print('octaves: ', self.octaves)
                self.kps = kps_data[:, :2].copy()
                self.kpsu = cv2.undistortPoints(
                    np.expand_dims(self.kps, axis=1), self.K, self.D, None,
                    self.K)
                self.kpsu = self.kpsu.ravel().reshape(self.kps.shape[0], 2)
                #print('kpsu diff: ', self.kpsu-self.kps)
            else:
                #assert len(des) < 256
                self.kpsu, self.des = des, np.array(
                    list(range(len(des))) * 32,
                    np.uint8).reshape(32, len(des)).T
                self.octaves = np.full(self.kpsu.shape[0], 1, dtype=np.uint8)
            self.kpsn = normalize(self.Kinv, self.kpsu)
            self.points = [None] * len(self.kpsu)  # init points
            self.outliers = np.full(self.kpsu.shape[0], False, dtype=bool)
        else:
            # fill in later
            self.h, self.w = 0, 0
            self.kpsu, self.des, self.points = None, None, None
        self.id = Frame.next_id  #tid if tid is not None else mapp.add_frame(self)
        Frame.next_id += 1
예제 #56
0
def reconstruct_from_binary_patterns():
    scale_factor = 1.0
    ref_white = cv2.resize(cv2.imread("images/pattern000.jpg", cv2.IMREAD_GRAYSCALE) / 255.0, (0,0), fx=scale_factor,fy=scale_factor)
    ref_black = cv2.resize(cv2.imread("images/pattern001.jpg", cv2.IMREAD_GRAYSCALE) / 255.0, (0,0), fx=scale_factor,fy=scale_factor)
    ref_color = cv2.resize(cv2.imread("images/pattern001.jpg", cv2.IMREAD_COLOR) , (0,0), fx=scale_factor,fy=scale_factor)
    ref_avg   = (ref_white + ref_black) / 2.0
    ref_on    = ref_avg + 0.05 # a threshold for ON pixels
    ref_off   = ref_avg - 0.05 # add a small buffer region

    h,w = ref_white.shape

    # mask of pixels where there is projection
    proj_mask = (ref_white > (ref_black + 0.05))
    #print proj_mask
    scan_bits = np.zeros((h,w), dtype=np.uint16)

    # analyze the binary patterns from the camera
    for i in range(0,15):
        # read the file
        patt = cv2.resize(cv2.imread("images/pattern%03d.jpg"%(i+2)), (0,0), fx=scale_factor,fy=scale_factor)
        #patt_gray = cv2.cvtColor(patt, cv2.COLOR_BGR2GRAY)/255.0
        patt_gray = cv2.resize(cv2.imread("images/pattern%03d.jpg"%(i+2), cv2.IMREAD_GRAYSCALE) / 255.0, (0,0), fx=scale_factor,fy=scale_factor)
        # mask where the pixels are ON
        on_mask = (patt_gray > ref_on) & proj_mask
        #print on_mask

        # this code corresponds with the binary pattern code
        bit_code = np.uint16(1 << i)
        
        # populate scan_bits by putting the bit_code according to on_mask
        for i in range(h):
            for j in range(w):
                if on_mask[i][j] == True :                    
                #if [j for i in range(w) for j in range(h)] == True:
                    scan_bits[i][j] = scan_bits[i][j] | bit_code
                else:
                    continue
    print("load codebook")
    # the codebook translates from <binary code> to (x,y) in projector screen space
    with open("binary_codes_ids_codebook.pckl","r") as f:
        binary_codes_ids_codebook = pickle.load(f)

    camera_points = []
    projector_points = []
    data = np.zeros((h,w,3), dtype=np.float)
    color_points = []

    for x in range(w):
        for y in range(h):
            if not proj_mask[y,x]:
                continue # no projection here
                #projector_points[y/2][x/2] = proj_mask
            if scan_bits[y,x] not in binary_codes_ids_codebook:
                continue # bad binary code
            p_x, p_y = binary_codes_ids_codebook[scan_bits[y][x]]
            if p_x >= 1279 or p_y >= 799: # filter
                continue

            data[y][x] = [0,p_y*255/800.0,p_x*255/1280.0]
            # due to differences in calibration and acquisition - divide the camera points by 2
            # Store the points in camera_points and projector_points
            projector_points.append([[p_y , p_x]])
            camera_points.append([[y/2.0,x/2.0]])
            color_points.append(ref_color[y][x])

    cv2.imwrite("correspondance.jpg", data)
    # now that we have 2D-2D correspondances, we can triangulate 3D points!

    # load the prepared stereo calibration between projector and camera
    with open("stereo_calibration.pckl","r") as f:
        d = pickle.load(f)
        camera_K    = d['camera_K']
        camera_d    = d['camera_d']
        projector_K = d['projector_K']
        projector_d = d['projector_d']
        projector_R = d['projector_R']
        projector_t = d['projector_t']

        # Computes the ideal point coordinates from the observed point coordinates
    #ideal_camPoints = cv2.undistortPoints(np.reshape(np.array(camera_points, dtype = np.float32),(len(camera_points),1,2)),camera_K,camera_d)
    #ideal_projPoints = cv2.undistortPoints(np.reshape(np.array(projector_points, dtype = np.float32),(len(projector_points),1,2)),projector_K,projector_d)

    ideal_camPoints = cv2.undistortPoints(np.array(camera_points, dtype = np.float32),camera_K,camera_d)
    ideal_projPoints = cv2.undistortPoints(np.array(projector_points, dtype = np.float32),projector_K,projector_d)
    #Projection matrix
    # identity matrix
    P1 = np.eye(3,4)
    #Rotation_Translation matrix
    ProjectRT = np.column_stack((projector_R, projector_t))

    tri_output = cv2.triangulatePoints(P1, ProjectRT, ideal_projPoints, ideal_camPoints)
    points_3d = cv2.convertPointsFromHomogeneous(tri_output.transpose())

    #   Filter on z component
    mask = (points_3d[:,:,2] > 200) & (points_3d[:,:,2] < 1400)
    ideal_color_points = np.array(color_points)
    
    FilterPoints_3d = []
    #FiltercolorPoints_3d = []
    for i in range(len(mask)):
        if mask[i]== False:
            FilterPoints_3d.append(i)
        if points_3d[i][0][2] > 200 and points_3d[i][0][2] < 1400:
            Fcolor_points.append(ideal_color_points[i])
    points_3d = np.delete(points_3d, FilterPoints_3d, 0)
    #print Fcolor_points
    return points_3d
예제 #57
0
def check_undistortion(config, cbrow=8, cbcol=6, plot=True):
    """
    This function undistorts the calibration images based on the camera matrices and stores them in the project folder(defined in the config file) 
    to visually check if the camera matrices are correct. 

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    cbrow : int
        Int specifying the number of rows in the calibration image.
    
    cbcol : int
        Int specifying the number of columns in the calibration image.

    plot : bool
        If this is set to True, the results of undistortion are saved as plots. The default is ``True``; if provided it must be either ``True`` or ``False``.

    Example
    --------
    Linux/MacOs/Windows
    >>> deeplabcutcore.check_undistortion(config, cbrow = 8,cbcol = 6)

    """

    # Read the config file
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    cfg_3d = auxiliaryfunctions.read_config(config)
    img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(
        cfg_3d
    )

    # colormap = plt.get_cmap(cfg_3d['colormap'])
    markerSize = cfg_3d["dotsize"]
    alphaValue = cfg_3d["alphaValue"]
    markerType = cfg_3d["markerType"]
    markerColor = cfg_3d["markerColor"]
    cam_names = cfg_3d["camera_names"]

    images = glob.glob(os.path.join(img_path, "*.jpg"))

    # Sort the images
    images.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
    """
    for fname in images:
        for cam in cam_names:
            if cam in fname:
                filename = Path(fname).stem
                ext = Path(fname).suffix
                img = cv2.imread(fname)
                gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    """
    camera_pair = [[cam_names[0], cam_names[1]]]
    stereo_params = auxiliaryfunctions.read_pickle(
        os.path.join(path_camera_matrix, "stereo_params.pickle")
    )

    for pair in camera_pair:
        map1_x, map1_y = cv2.initUndistortRectifyMap(
            stereo_params[pair[0] + "-" + pair[1]]["cameraMatrix1"],
            stereo_params[pair[0] + "-" + pair[1]]["distCoeffs1"],
            stereo_params[pair[0] + "-" + pair[1]]["R1"],
            stereo_params[pair[0] + "-" + pair[1]]["P1"],
            (stereo_params[pair[0] + "-" + pair[1]]["image_shape"][0]),
            cv2.CV_16SC2,
        )
        map2_x, map2_y = cv2.initUndistortRectifyMap(
            stereo_params[pair[0] + "-" + pair[1]]["cameraMatrix2"],
            stereo_params[pair[0] + "-" + pair[1]]["distCoeffs2"],
            stereo_params[pair[0] + "-" + pair[1]]["R2"],
            stereo_params[pair[0] + "-" + pair[1]]["P2"],
            (stereo_params[pair[0] + "-" + pair[1]]["image_shape"][1]),
            cv2.CV_16SC2,
        )
        cam1_undistort = []
        cam2_undistort = []

        for fname in images:
            if pair[0] in fname:
                filename = Path(fname).stem
                img1 = cv2.imread(fname)
                gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
                h, w = img1.shape[:2]
                _, corners1 = cv2.findChessboardCorners(gray1, (cbcol, cbrow), None)
                corners_origin1 = cv2.cornerSubPix(
                    gray1, corners1, (11, 11), (-1, -1), criteria
                )

                # Remapping dataFrame_camera1_undistort
                im_remapped1 = cv2.remap(img1, map1_x, map1_y, cv2.INTER_LANCZOS4)
                imgpoints_proj_undistort = cv2.undistortPoints(
                    src=corners_origin1,
                    cameraMatrix=stereo_params[pair[0] + "-" + pair[1]][
                        "cameraMatrix1"
                    ],
                    distCoeffs=stereo_params[pair[0] + "-" + pair[1]]["distCoeffs1"],
                    P=stereo_params[pair[0] + "-" + pair[1]]["P1"],
                    R=stereo_params[pair[0] + "-" + pair[1]]["R1"],
                )
                cam1_undistort.append(imgpoints_proj_undistort)
                cv2.imwrite(
                    os.path.join(str(path_undistort), filename + "_undistort.jpg"),
                    im_remapped1,
                )
                imgpoints_proj_undistort = []

            elif pair[1] in fname:
                filename = Path(fname).stem
                img2 = cv2.imread(fname)
                gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
                h, w = img2.shape[:2]
                _, corners2 = cv2.findChessboardCorners(gray2, (cbcol, cbrow), None)
                corners_origin2 = cv2.cornerSubPix(
                    gray2, corners2, (11, 11), (-1, -1), criteria
                )

                # Remapping
                im_remapped2 = cv2.remap(img2, map2_x, map2_y, cv2.INTER_LANCZOS4)
                imgpoints_proj_undistort2 = cv2.undistortPoints(
                    src=corners_origin2,
                    cameraMatrix=stereo_params[pair[0] + "-" + pair[1]][
                        "cameraMatrix2"
                    ],
                    distCoeffs=stereo_params[pair[0] + "-" + pair[1]]["distCoeffs2"],
                    P=stereo_params[pair[0] + "-" + pair[1]]["P2"],
                    R=stereo_params[pair[0] + "-" + pair[1]]["R2"],
                )
                cam2_undistort.append(imgpoints_proj_undistort2)
                cv2.imwrite(
                    os.path.join(str(path_undistort), filename + "_undistort.jpg"),
                    im_remapped2,
                )
                imgpoints_proj_undistort2 = []

        cam1_undistort = np.array(cam1_undistort)
        cam2_undistort = np.array(cam2_undistort)
        print("All images are undistorted and stored in %s" % str(path_undistort))
        print(
            "Use the function ``triangulate`` to undistort the dataframes and compute the triangulation"
        )

        if plot == True:
            f1, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
            f1.suptitle(
                str("Original Image: Views from " + pair[0] + " and " + pair[1]),
                fontsize=25,
            )

            # Display images in RGB
            ax1.imshow(cv2.cvtColor(img1, cv2.COLOR_BGR2RGB))
            ax2.imshow(cv2.cvtColor(img2, cv2.COLOR_BGR2RGB))

            norm = mcolors.Normalize(vmin=0.0, vmax=cam1_undistort.shape[1])
            plt.savefig(os.path.join(str(path_undistort), "Original_Image.png"))

            # Plot the undistorted corner points
            f2, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
            f2.suptitle(
                "Undistorted corner points on camera-1 and camera-2", fontsize=25
            )
            ax1.imshow(cv2.cvtColor(im_remapped1, cv2.COLOR_BGR2RGB))
            ax2.imshow(cv2.cvtColor(im_remapped2, cv2.COLOR_BGR2RGB))
            for i in range(0, cam1_undistort.shape[1]):
                ax1.scatter(
                    [cam1_undistort[-1][i, 0, 0]],
                    [cam1_undistort[-1][i, 0, 1]],
                    marker=markerType,
                    s=markerSize,
                    color=markerColor,
                    alpha=alphaValue,
                )
                ax2.scatter(
                    [cam2_undistort[-1][i, 0, 0]],
                    [cam2_undistort[-1][i, 0, 1]],
                    marker=markerType,
                    s=markerSize,
                    color=markerColor,
                    alpha=alphaValue,
                )
            plt.savefig(os.path.join(str(path_undistort), "undistorted_points.png"))

            # Triangulate
            triangulate = auxiliaryfunctions_3d.compute_triangulation_calibration_images(
                stereo_params[pair[0] + "-" + pair[1]],
                cam1_undistort,
                cam2_undistort,
                path_undistort,
                cfg_3d,
                plot=True,
            )
            auxiliaryfunctions.write_pickle("triangulate.pickle", triangulate)
예제 #58
0
 def image_to_camera(self, points, depth=1):
     points = np.expand_dims(np.asarray(points, np.float32), 0)
     new_image_points = cv2.undistortPoints(
         points, self.intrinsic_matrix, self.distortion_coeffs, None, None, None)
     return cv2.convertPointsToHomogeneous(new_image_points)[:, 0, :] * depth
예제 #59
0
파일: scenario.py 프로젝트: lzb863/dr1ven
    def stereo_rectify(
        self,
        left,
        right,
        pts1,
        pts2,
    ):
        E, mask = cv2.findEssentialMat(
            pts1,
            pts2,
            self._A,
            method=cv2.RANSAC,
        )

        fpts1 = pts1[mask.ravel() == 1]
        fpts2 = pts2[mask.ravel() == 1]

        _, r, t, mask = cv2.recoverPose(E, fpts1, fpts2, self._A)
        # r1, r2, t = cv2.decomposeEssentialMat(E)

        R1, R2, P1, P2, Q, _, _ = cv2.stereoRectify(
            self._A,
            np.zeros(4),
            self._A,
            np.zeros(4),
            (left.shape[1], left.shape[0]),
            r,
            t,
            # newImageSize=(left.shape[1], left.shape[0]),
            flags=cv2.CALIB_ZERO_DISPARITY,
        )

        # import pdb; pdb.set_trace()

        map1x, map1y = cv2.initUndistortRectifyMap(
            self._A,
            np.zeros(4),
            R1,
            P1,
            (left.shape[1], left.shape[0]),
            cv2.CV_32FC1,
        )
        map2x, map2y = cv2.initUndistortRectifyMap(
            self._A,
            np.zeros(4),
            R2,
            P2,
            (left.shape[1], left.shape[0]),
            cv2.CV_32FC1,
        )

        # import pdb; pdb.set_trace()

        left = cv2.remap(left, map1x, map1y, cv2.INTER_LINEAR)
        right = cv2.remap(right, map2x, map2y, cv2.INTER_LINEAR)

        rpts1 = cv2.undistortPoints(
            np.float64(np.expand_dims(fpts1, 1)),
            self._A,
            np.zeros(4),
            R=R1,
            P=P1,
        ).squeeze(1)
        rpts2 = cv2.undistortPoints(
            np.float64(np.expand_dims(fpts2, 1)),
            self._A,
            np.zeros(4),
            R=R2,
            P=P2,
        ).squeeze(1)

        return left, right, fpts1, fpts2, rpts1, rpts2, Q
    def __getitem__(self, idx):
        if cfg.DATALOADER.BENCHMARK: self.timer0.tic()
        db_rec = copy.deepcopy(self.db[idx])
        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            if cfg.VIS.H36M:
                #seq = (db_rec['subject'], db_rec['action'], db_rec['subaction'])
                #if not seq in self.checked:
                #    print(seq)
                #    print(self.isdamaged(db_rec))
                #    self.checked.append(seq)
                #else:
                #    return np.ones(2)

                print(db_rec['image'])
            # print(db_rec['image'])

            if self.data_format == 'undistoredzip':
                image_dir = 'undistoredimages.zip@'
            elif self.data_format == 'zip':
                image_dir = 'images.zip@'
            else:
                image_dir = ''
            image_file = osp.join(self.root, db_rec['source'], image_dir,
                                  'images', db_rec['image'])
            if 'zip' in self.data_format:
                from utils import zipreader
                data_numpy = zipreader.imread(
                    image_file,
                    cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
            else:
                data_numpy = cv2.imread(
                    image_file,
                    cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
            # crop image from 1002 x 1000 to 1000 x 1000
            data_numpy = data_numpy[:1000]
            assert data_numpy.shape == (1000, 1000, 3), data_numpy.shape

        joints = db_rec['joints_2d'].copy()
        joints_3d = db_rec['joints_3d'].copy()
        joints_3d_camera = db_rec['joints_3d_camera'].copy()

        joints_3d_camera_normed = joints_3d_camera - joints_3d_camera[0]
        keypoint_scale = np.linalg.norm(joints_3d_camera_normed[8] -
                                        joints_3d_camera_normed[0])
        joints_3d_camera_normed /= keypoint_scale

        if cfg.DATALOADER.BENCHMARK:
            assert joints.shape[0] == cfg.KEYPOINT.NUM_PTS, joints.shape[0]
            #assert db_rec['joints_3d'].shape[0] == cfg.KEYPOINT.NUM_PTS,db_rec['joints_3d'].shape[0]
        center = np.array(db_rec['center']).copy()
        joints_vis = db_rec['joints_vis'].copy()
        scale = np.array(db_rec['scale']).copy()
        #undistort
        camera = db_rec['camera']
        R = camera['R'].copy()
        rotation = 0
        K = np.array([
            [float(camera['fx']), 0,
             float(camera['cx'])],
            [0, float(camera['fy']),
             float(camera['cy'])],
            [0, 0, 1.],
        ])
        T = camera['T'].copy()
        world3d = (R.T @ joints_3d_camera.T + T).T
        Rt = np.zeros((3, 4))
        Rt[:, :3] = R
        Rt[:, 3] = -R @ T.squeeze()
        # Rt[:, :3] = R.T
        # Rt[:, 3] = T.squeeze()

        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            if cfg.VIS.H36M:
                if not np.isclose(world3d, joints_3d).all():
                    print('world3d difference')
                    print(world3d)
                    print('joints_3d')
                    print(joints_3d)
                from IPython import embed
                import matplotlib.pyplot as plt
                import matplotlib.patches as patches
                fig = plt.figure(1)
                ax1 = fig.add_subplot(231)
                ax2 = fig.add_subplot(232)
                ax3 = fig.add_subplot(233)
                ax4 = fig.add_subplot(234)
                ax5 = fig.add_subplot(235)
                ax6 = fig.add_subplot(236)
                ax1.imshow(data_numpy[..., ::-1])
                ax1.set_title('raw')

        #0.058 s
        distCoeffs = np.array([
            float(i) for i in [
                camera['k'][0], camera['k'][1], camera['p'][0], camera['p'][1],
                camera['k'][2]
            ]
        ])

        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            if self.data_format != 'undistoredzip':
                data_numpy = cv2.undistort(data_numpy, K, distCoeffs)

        #0.30 s
        if cfg.DATALOADER.BENCHMARK: print('timer0', self.timer0.toc())
        if cfg.DATALOADER.BENCHMARK: self.timer.tic()

        if cfg.VIS.H36M:
            ax1.scatter(joints[:, 0], joints[:, 1], color='green')
            imagePoints, _ = cv2.projectPoints(joints_3d[:,
                                                         None, :], (0, 0, 0),
                                               (0, 0, 0), K, distCoeffs)
            imagePoints = imagePoints.squeeze()
            ax1.scatter(imagePoints[:, 0], imagePoints[:, 1], color='yellow')
            from vision.multiview import project_point_radial
            camera = db_rec['camera']
            f = (K[0, 0] + K[1, 1]) / 2.
            c = K[:2, 2].reshape((2, 1))
            iccv19Points = project_point_radial(joints_3d_camera, f, c,
                                                camera['k'], camera['p'])
            ax1.scatter(iccv19Points[:, 0], iccv19Points[:, 1], color='blue')
            # trans1 = get_affine_transform(center, scale, rotation, self.image_size)
            # box1 = affine_transform(np.array([[0, 0], [999, 999]]), trans1)
            # print(box1)
            # rect1 = patches.Rectangle(box1[0],box1[1][0] - box1[0][0],box1[1][1] - box1[0][1],linewidth=1,edgecolor='r',facecolor='none')
            # ax1.add_patch(rect1)
            # print(joints, joints.shape, center.shape)
        joints = cv2.undistortPoints(joints[:, None, :], K, distCoeffs,
                                     P=K).squeeze()
        center = cv2.undistortPoints(np.array(center)[None, None, :],
                                     K,
                                     distCoeffs,
                                     P=K).squeeze()
        #data_numpy  = self.compute_distorted_meshgrid(data_numpy ,
        #        float(camera['fx']),
        #        float(camera['fy']),
        #        float(camera['cx']),
        #        float(camera['cy']),
        #        np.array([float(i) for i in camera['k']]),
        #        np.array([float(i) for i in camera['p']]))
        if self.is_train:
            sf = self.scale_factor
            rf = self.rotation_factor
            scale = scale * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
            rotation = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) \
                if random.random() <= 0.6 else 0

        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            if cfg.VIS.H36M:
                # print(joints.shape, center.shape)
                # print(trans)
                ax2.imshow(data_numpy[..., ::-1])
                projected2d = K.dot(joints_3d_camera.T)
                projected2d[:2] = projected2d[:2] / projected2d[-1]
                ax1.scatter(projected2d[0], projected2d[1], color='red')
                ax2.scatter(joints[:, 0], joints[:, 1], color='green')
                ax2.scatter(projected2d[0], projected2d[1], color='red')
                # box1 = affine_transform(np.array([[0, 0], [999, 999]]), trans)
                # rect1 = patches.Rectangle(box1[0],box1[1][0] - box1[0][0],box1[1][1] - box1[0][1],linewidth=1,edgecolor='r',facecolor='none')
                # ax2.add_patch(rect1)
                ax2.set_title('undistort')

        #input = data_numpy
        trans = get_affine_transform(center, scale, rotation, self.image_size)
        cropK = np.concatenate((trans, np.array([[0., 0., 1.]])), 0).dot(K)
        KRT = cropK.dot(Rt)

        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            input = cv2.warpAffine(
                data_numpy,
                trans, (int(self.image_size[0]), int(self.image_size[1])),
                flags=cv2.INTER_LINEAR)

        # 0.31 s

        for i in range(self.num_joints):
            if joints_vis[i, 0] > 0.0:
                joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)
                if (np.min(joints[i, :2]) < 0
                        or joints[i, 0] >= self.image_size[0]
                        or joints[i, 1] >= self.image_size[1]):
                    joints_vis[i, :] = 0

        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            if cfg.VIS.H36M:
                ax3.imshow(input[..., ::-1])
                # ax3.scatter(joints[:, 0], joints[:, 1])
                # projected2d = KRT.dot(np.concatenate((db_rec['joints_3d'], np.ones( (len(db_rec['joints_3d']), 1))), 1).T)
                ax3.scatter(joints[:, 0], joints[:, 1])
                ax3.set_title('cropped')
                ax4.imshow(input[..., ::-1])
                # ax4.scatter(joints[:, 0], joints[:, 1])
                # projected2d = KRT.dot(np.concatenate((db_rec['joints_3d'], np.ones( (len(db_rec['joints_3d']), 1))), 1).T)
                projected2d = cropK.dot(joints_3d_camera.T)
                projected2d[:2] = projected2d[:2] / projected2d[-1]
                #ax4.scatter(joints[:, 0], joints[:, 1], color='green')
                #ax4.scatter(projected2d[0], projected2d[1], color='red')
                ax4.scatter(joints[-2:, 0], joints[-2:, 1], color='green')
                ax4.scatter(projected2d[0, -2:],
                            projected2d[1, -2:],
                            color='red')
                ax4.set_title('cropped, project 3d to 2d')

            if self.transform:
                input = self.transform(input)

        target = self.heatmapcreator.get(joints)
        target = target.reshape((-1, target.shape[1], target.shape[2]))
        target_weight = joints_vis[:, 0, None]
        ## inaccurate heatmap
        #target, target_weight = self.generate_target(joints, joints_vis)
        # target = torch.from_numpy(target).float()
        # target_weight = torch.from_numpy(target_weight)

        if cfg.VIS.H36M:
            #ax5.imshow(target.max(0)[0])
            #ax5.scatter(coord2pix(joints[:, 0], 4), coord2pix(joints[:, 1], 4), color='green')
            from modeling.backbones.basic_batch import find_tensor_peak_batch
            # pred_joints, _ = find_tensor_peak_batch(target, self.sigma, cfg.BACKBONE.DOWNSAMPLE)
            # ax5.scatter(coord2pix(pred_joints[:, 0], 4), coord2pix(pred_joints[:, 1], 4), color='blue')
            # ax6.scatter(coord2pix(pred_joints[:, 0], 4), coord2pix(pred_joints[:, 1], 4), color='blue')

            heatmap_by_creator = self.heatmapcreator.get(joints)
            heatmap_by_creator = heatmap_by_creator.reshape(
                (-1, heatmap_by_creator.shape[1], heatmap_by_creator.shape[2]))
            ax6.imshow(heatmap_by_creator.max(0))
            ax6.scatter(coord2pix(joints[:, 0], 4),
                        coord2pix(joints[:, 1], 4),
                        color='green')
            # pred_joints, _ = find_tensor_peak_batch(torch.from_numpy(heatmap_by_creator).float(), self.sigma, cfg.BACKBONE.DOWNSAMPLE)
            # print('creator found', pred_joints)
            # ax5.scatter(coord2pix(pred_joints[:, 0], 4), coord2pix(pred_joints[:, 1], 4), color='red')
            # ax6.scatter(coord2pix(pred_joints[:, 0], 4), coord2pix(pred_joints[:, 1], 4), color='red')
            plt.show()
        ret = {
            'heatmap':
            target,
            'visibility':
            target_weight,
            'KRT':
            KRT,
            'points-2d':
            joints,
            'points-3d':
            world3d.astype(np.double)
            if 'lifting' not in cfg.DATASETS.TASK else world3d,
            'camera-points-3d':
            joints_3d_camera,
            'normed-points-3d':
            joints_3d_camera_normed,
            'scale':
            keypoint_scale,
            'action':
            torch.tensor([db_rec['action']]),
            'img-path':
            db_rec['image'],
        }
        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            ret['img'] = input
        ret['K'] = cropK
        ret['RT'] = Rt
        if cfg.VIS.MULTIVIEWH36M:
            ret['T'] = T
            ret['R'] = R
            ret['original_image'] = data_numpy
        if cfg.KEYPOINT.TRIANGULATION == 'rpsm' and not self.is_train:
            ret['origK'] = K
            ret['crop_center'] = center
            ret['crop_scale'] = scale

        if cfg.DATALOADER.BENCHMARK: print('timer1', self.timer.toc())
        return ret