コード例 #1
0
ファイル: colorchecker.py プロジェクト: cedricjeanty/macbeth
def findBestGridCells(image, squares, grids, canvas):
    """ Find best grid cells that has colors matching ColorChecker samples
    """

    h, w = MACBETH_LAB.shape[:2]
    boxPoints = np.array([0, 0, w, 0, w, h, 0,
                          h]).reshape(4, 2).astype(np.float32)
    boxCells = np.array(list(np.ndindex(h, w)),
                        dtype=np.float32)[:, ::-1] + 0.5

    BestGrid = namedtuple("BestGrid", "grid cells transform cost")
    best = BestGrid(None, None, None, np.inf)

    for grid in grids:
        corners = restoreGridContour(squares, grid, canvas).astype(np.float32)
        assert len(corners) == 4

        # find the most appropriate orientation

        for i in range(4):

            # find perspective transform matrix
            M = cv2.getPerspectiveTransform(boxPoints, corners)

            # find color cells centers
            cells = cv2.transform(boxCells.reshape(-1, 1, 2), M)
            cells = cv2.convertPointsFromHomogeneous(cells).astype(
                int).reshape(-1, 2)

            # find sum of distances from color cells samples to expected color
            bgr = image[cells[:, 1], cells[:, 0]].reshape(-1, 1, 3)
            lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB).astype(int).reshape(
                MACBETH_LAB.shape)
            dist = np.sqrt(np.sum((MACBETH_LAB.astype(int) - lab)**2, axis=2))
            cost = np.sum(dist)

            if cost < best.cost:
                best = BestGrid(grid, cells, M, cost)

            corners = np.roll(corners, 1, axis=0)

    grid, cells, transform, _ = best
    assert grid is not None

    # find corresponding points between transformed and source grid cells centers
    srcPoints, dstPoints = [], []
    for i in grid:
        center = squares[i].rect.center
        dist = np.sqrt(np.sum((cells - center)**2, axis=1))
        srcPoints.append(cells[np.argmin(dist)])
        dstPoints.append(center)

    # find homography matrix to minimize cells positioning error
    srcPoints, dstPoints = np.array(srcPoints, dtype=np.float32), np.array(
        dstPoints, dtype=np.float32)
    M, _ = cv2.findHomography(srcPoints, dstPoints)
    cells = cv2.transform(boxCells.reshape(-1, 1, 2), M.dot(transform))
    cells = cv2.convertPointsFromHomogeneous(cells).astype(int).reshape(-1, 2)

    return cells
コード例 #2
0
def GetTriangulatedPts(img1pts,
                       img2pts,
                       K,
                       R,
                       t,
                       triangulateFunc,
                       Rbase=None,
                       tbase=None):

    if Rbase is None:
        Rbase = np.eye((3, 3))
    if tbase is None:
        tbase = np.zeros((3, 1))

    img1ptsHom = cv2.convertPointsToHomogeneous(img1pts)[:, 0, :]
    img2ptsHom = cv2.convertPointsToHomogeneous(img2pts)[:, 0, :]

    img1ptsNorm = (np.linalg.inv(K).dot(img1ptsHom.T)).T
    img2ptsNorm = (np.linalg.inv(K).dot(img2ptsHom.T)).T

    img1ptsNorm = cv2.convertPointsFromHomogeneous(img1ptsNorm)[:, 0, :]
    img2ptsNorm = cv2.convertPointsFromHomogeneous(img2ptsNorm)[:, 0, :]

    print(Rbase.shape, tbase.shape, R.shape, t.shape)
    pts4d = triangulateFunc(np.hstack((Rbase, tbase)), np.hstack((R, t)),
                            img1ptsNorm.T, img2ptsNorm.T)
    pts3d = cv2.convertPointsFromHomogeneous(pts4d.T)[:, 0, :]

    return pts3d
コード例 #3
0
ファイル: triangulation.py プロジェクト: zhevnerchuk/SFM
def triangulate_points(files, tracks, dir_proj):

    succesfull_triangulations = {}

    for i, track in enumerate(tracks):

        if len(track[-1]) < 3:
            continue

        image_coords = np.array([kp.pt for kp in tracks[i][-1]])
        image_indices = np.arange(tracks[i][1],
                                  tracks[i][1] + len(tracks[i][-1]))

        triangulations = []

        for j in range(len(image_indices) - 1):

            P = np.loadtxt(dir_proj + files[image_indices[j]].split('.')[0] +
                           '.txt',
                           skiprows=1)
            P_dash = np.loadtxt(
                dir_proj + files[image_indices[j + 1]].split('.')[0] + '.txt',
                skiprows=1)

            world_point = cv.triangulatePoints(
                P, P_dash, image_coords[j].reshape(2, -1),
                image_coords[j + 1].reshape(2, -1))

            world_point = cv.convertPointsFromHomogeneous(
                world_point.reshape(-1, 4))
            triangulations.append(world_point)

        triangulation = np.array(triangulations).mean(axis=0)
        world_point = cv.convertPointsToHomogeneous(triangulation).reshape(-1)

        for j in range(len(image_indices)):

            P = np.loadtxt(dir_proj + files[image_indices[j]].split('.')[0] +
                           '.txt',
                           skiprows=1)

            rep = cv.convertPointsFromHomogeneous(
                (P @ world_point).reshape(-1, 3)).reshape(-1)

            if np.linalg.norm(rep - image_coords[j]) > 10:
                break
        else:
            succesfull_triangulations[i] = triangulation.reshape(-1)

    return succesfull_triangulations
コード例 #4
0
ファイル: sfm.py プロジェクト: andsfonseca/example-python-sfm
def main(opts):
    #Loading 5th and 6th image data only (hardcoded for now)..
    with open('../data/fountain-P11/images/keypoints_descriptors/0005.pkl'
              ) as fileobj:
        data1 = pkl.load(fileobj)
        kp1, desc1 = data1
        kp1 = DeserializeKeypoints(kp1)

    with open('../data/fountain-P11/images/keypoints_descriptors/0006.pkl'
              ) as fileobj:
        data2 = pkl.load(fileobj)
        kp2, desc2 = data2
        kp2 = DeserializeKeypoints(kp2)

    with open('../data/fountain-P11/images/matches/matches.pkl') as fileobj:
        matches = pkl.load(fileobj)
        matches = matches[('0005.pkl', '0006.pkl')]
        matches = DeserializeMatchesDict(matches)

    #2/4. FUNDAMENTAL MATRIX ESTIMATION
    img1pts, img2pts = GetAlignedMatches(kp1, desc1, kp2, desc2, matches)
    F, mask = cv2.findFundamentalMat(img1pts,
                                     img2pts,
                                     method=cv2.FM_RANSAC,
                                     param1=opts.outlierThres,
                                     param2=opts.fundProb)

    #3/4. CAMERA POSE ESTIMATION
    K = np.array([[2759.48, 0, 1520.69], [0, 2764.16, 1006.81],
                  [0, 0, 1]])  #hardcoded for now, have to generalize..
    E = K.T.dot(F.dot(K))
    retval, R, t, mask2 = cv2.recoverPose(E, img1pts[mask], img2pts[mask], K)

    #4/4. TRIANGULATION.
    img1ptsHom = cv2.convertPointsToHomogeneous(img1pts[mask])[:, 0, :]
    img2ptsHom = cv2.convertPointsToHomogeneous(img2pts[mask])[:, 0, :]

    img1ptsNorm = (np.linalg.inv(K).dot(img1ptsHom.T)).T
    img2ptsNorm = (np.linalg.inv(K).dot(img2ptsHom.T)).T

    img1ptsNorm = cv2.convertPointsFromHomogeneous(img1ptsNorm)[:, 0, :]
    img2ptsNorm = cv2.convertPointsFromHomogeneous(img2ptsNorm)[:, 0, :]

    pts4d = cv2.triangulatePoints(np.eye(3, 4), np.hstack((R, t)),
                                  img1ptsNorm.T, img2ptsNorm.T)
    pts3d = cv2.convertPointsFromHomogeneous(pts4d.T)[:, 0, :]

    #Finally, saving 3d points in .ply format to view in meshlab software
    pts2ply(pts3d)
    def convert_from_homogeneous(self, kpts):
        # Convert homogeneous points to euclidean points
        # @param kpts: List of homogeneous points
        # @return pnh: list of euclidean points

        # Remember that every function in OpenCV need us to specify the data
        # type. In addition, convertPointsFromHomogeneous needs the shape of the
        # arrays to be correct. The function takes a vector of points in c++
        # (ie. a list of several points), so in numpy we need a multidimensional
        # array: a x b x c where a is the number of points, b=1, and c=2 to
        # represent 1x2 point data.

        if len(kpts[0]) == 3:

            for i in range(len(kpts)):

                kpts[i].reshape(-1, 1, 3)

                kpts[i] = np.array(kpts[i], np.float32).reshape(-1, 1, 3)

            pnh = [cv2.convertPointsFromHomogeneous(x) for x in kpts]

            for i in range(len(pnh)):

                pnh = np.array(pnh[i], np.float32).reshape(1, 2, 1)

        elif len(kpts[0]) == 4:

            for i in range(len(kpts)):

                kpts[i].reshape(-1, 1, 4)

                kpts[i] = np.array(kpts[i], np.float32).reshape(-1, 1, 4)

            pnh = [cv2.convertPointsFromHomogeneous(x) for x in kpts]

            for i in range(len(pnh)):

                pnh[i] = np.array(pnh[i], np.float32).reshape(1, 3, 1)

        elif len(kpts) == 3:

            pnh = np.zeros((2, len(kpts[0])))

            for i in range(len(kpts[0])):

                pnh[:, i] = kpts[:2, i]

        return pnh
コード例 #6
0
    def convert_from_homogeneous(self, kpts):
        # Convert homogeneous points to euclidean points
        # @param kpts: List of homogeneous points
        # @return pnh: list of euclidean points

        # Remember that every function in OpenCV need us to specify the data
        # type. In addition, convertPointsFromHomogeneous needs the shape of the
        # arrays to be correct. The function takes a vector of points in c++
        # (ie. a list of several points), so in numpy we need a multidimensional
        # array: a x b x c where a is the number of points, b=1, and c=2 to
        # represent 1x2 point data.

        if len(kpts[0]) == 3:

            for i in range(len(kpts)):

                kpts[i].reshape(-1, 1, 3)

                kpts[i] = np.array(kpts[i], np.float32).reshape(-1, 1, 3)

            pnh = [cv2.convertPointsFromHomogeneous(x) for x in kpts]

            for i in range(len(pnh)):

                pnh = np.array(pnh[i], np.float32).reshape(1, 2, 1)

        elif len(kpts[0]) == 4:

            for i in range(len(kpts)):

                kpts[i].reshape(-1, 1, 4)

                kpts[i] = np.array(kpts[i], np.float32).reshape(-1, 1, 4)

            pnh = [cv2.convertPointsFromHomogeneous(x) for x in kpts]

            for i in range(len(pnh)):

                pnh[i] = np.array(pnh[i], np.float32).reshape(1, 3, 1)

        elif len(kpts) == 3:

            pnh = np.zeros((2, len(kpts[0])))

            for i in range(len(kpts[0])):

                pnh[:, i] = kpts[:2, i]

        return pnh
コード例 #7
0
ファイル: utils.py プロジェクト: tanajikamble13/SFMbasics
def GetTriangulatedPts(img1pts, img2pts, K, R, t):
    img1ptsHom = cv2.convertPointsToHomogeneous(img1pts)[:, 0, :]
    img2ptsHom = cv2.convertPointsToHomogeneous(img2pts)[:, 0, :]

    img1ptsNorm = (np.linalg.inv(K).dot(img1ptsHom.T)).T
    img2ptsNorm = (np.linalg.inv(K).dot(img2ptsHom.T)).T

    img1ptsNorm = cv2.convertPointsFromHomogeneous(img1ptsNorm)[:, 0, :]
    img2ptsNorm = cv2.convertPointsFromHomogeneous(img2ptsNorm)[:, 0, :]

    pts4d = cv2.triangulatePoints(np.eye(3, 4), np.hstack((R, t)),
                                  img1ptsNorm.T, img2ptsNorm.T)
    pts3d = cv2.convertPointsFromHomogeneous(pts4d.T)[:, 0, :]

    return pts3d
コード例 #8
0
ファイル: utils.py プロジェクト: pupil-labs/pupil
def convert_marker_extrinsics_to_points_3d(marker_extrinsics):
    mat = convert_extrinsic_to_matrix(marker_extrinsics)
    marker_transformed_h = np.matmul(mat, get_marker_points_4d_origin().T)
    marker_points_3d = cv2.convertPointsFromHomogeneous(marker_transformed_h.T)
    marker_points_3d.shape = 4, 3

    return marker_points_3d
コード例 #9
0
ファイル: utils.py プロジェクト: YifanLiChina/pupil-1
def convert_marker_extrinsics_to_points_3d(marker_extrinsics):
    mat = convert_extrinsic_to_matrix(marker_extrinsics)
    marker_transformed_h = np.matmul(mat, get_marker_points_4d_origin().T)
    marker_points_3d = cv2.convertPointsFromHomogeneous(marker_transformed_h.T)
    marker_points_3d.shape = 4, 3

    return marker_points_3d
コード例 #10
0
ファイル: frame.py プロジェクト: bwharton97/IIB_project
 def triangulate_points(self, points):
     """Triangulate 3D location from points in two images. Would get more complicated if expanded to more views"""
     points4D = cv2.triangulatePoints(self.frames[0].corresponding_pi.P,
                                      self.frames[1].corresponding_pi.P,
                                      points[0].T, points[1].T).T
     points3D = cv2.convertPointsFromHomogeneous(points4D)
     return points3D
コード例 #11
0
def calculateCameraCenterAndPrincipalAxis(P):
    # Determine the camera center and orientation from the camera matrix
    camera_center = cv.convertPointsFromHomogeneous(np.transpose(
        null_space(P))).squeeze()
    principal_axis = P[2, 0:3]

    return camera_center, principal_axis
コード例 #12
0
def triangulate_point(pts_2d, views):
    '''Triangulate points from multiple views using either OpenCV.triangulatePoints or DLT.

    See https://github.com/opencv/opencv_contrib/blob/master/modules/sfm/src/triangulation.cpp for reference.
    '''

    assert len(pts_2d) == len(
        views), 'Number of 2d points and views did not match'
    n_views = len(views)
    if n_views > 2:
        design = np.zeros((3 * n_views, 4 + n_views))
        for i in range(n_views):
            for jj in range(3):
                for ii in range(4):
                    design[3 * i + jj, ii] = -views[i][jj, ii]
            design[3 * i + 0, 4 + i] = pts_2d[i][0]
            design[3 * i + 1, 4 + i] = pts_2d[i][1]
            design[3 * i + 2, 4 + i] = 1
        u, s, vh = svd(design, full_matrices=False)
        pt_4d = vh[-1, :4]
    else:
        pt_4d = cv2.triangulatePoints(views[0], views[1],
                                      np.array(pts_2d[0]).reshape(2, 1),
                                      np.array(pts_2d[1]).reshape(2, 1))
    pt_3d = cv2.convertPointsFromHomogeneous(pt_4d.reshape(1, 4)).ravel()
    return pt_3d
コード例 #13
0
def cvTriangulate(pts1, pts2, k, r, t):
    '''
    Takes pts1, pts2, k, r, t
    Returns triangulated points using openCV implementation
    '''
    proj1 = np.array([[1, 0, 0, 0],
                      [0, 1, 0, 0],
                      [0, 0, 1, 0]])
    proj2 = np.append(r, t, 1)
    pts1 = np.array(pts1).transpose()
    pts2 = np.array(pts2).transpose()

    homogeneous_4d_coords = cv2.triangulatePoints(proj1, proj2, pts1, pts2)
    # return triangulatePoints(proj1, proj2, pts1, pts2)

    threeD_coords = cv2.convertPointsFromHomogeneous(homogeneous_4d_coords.transpose())

    output_points = []

    # print threeD_coords
    for point in threeD_coords:
        output_points.append((point[0][0], point[0][1], point[0][2]))
        # output_points.append(point[0])
    # for coord in homogeneous_4d_coords:

    return output_points
コード例 #14
0
    def projectTo3D(self):
        P1 = self.camera_obj['camera_prop']['P1']
        P2 = self.camera_obj['camera_prop']['P2']

        ptsL = np.zeros((2, self.ptsL.shape[0]))
        ptsR = np.zeros((2, self.ptsR.shape[0]))
        for i, (ptL, ptR) in enumerate(zip(self.ptsL, self.ptsR)):
            ptsL[0, i] = ptL[0]
            ptsL[1, i] = ptL[1]
            ptsR[0, i] = ptR[0]
            ptsR[1, i] = ptR[1]

        self.pts3d = cv2.triangulatePoints(P1, P2, ptsL, ptsR)
        pts3d = []
        for i in range(0, self.pts3d.shape[1]):
            pts3d.append([
                self.pts3d[0, i], self.pts3d[1, i], self.pts3d[2, i],
                self.pts3d[3, i]
            ])
        self.pts3d = np.array(pts3d)
        self.pts3d = cv2.convertPointsFromHomogeneous(self.pts3d)
        self.pts3d = np.reshape(self.pts3d, (self.pts3d.shape[0], 3))
        self.depth = self.pts3d[:, 2]

        # Filter out negative depth
        posDepth = self.depth > 0
        self.ptsL = self.ptsL[posDepth]
        self.ptsR = self.ptsR[posDepth]
        self.pts3d = self.pts3d[posDepth]
        self.depth = self.depth[posDepth]
        self.distance = self.distance[posDepth]
コード例 #15
0
def reconstruction(img_pairs):
    for pathes in img_pairs:
        filename_w_ext1 = os.path.basename(pathes[0])
        filename_w_ext2 = os.path.basename(pathes[1])
        filename1 = os.path.splitext(filename_w_ext1)[0]
        filename2 = os.path.splitext(filename_w_ext2)[0]
        F = np.loadtxt(filename1 + "FMatrix.txt", dtype=float, delimiter=',')
        pts1 = np.loadtxt(filename1 + "Points1.txt", dtype=int, delimiter=',')
        pts2 = np.loadtxt(filename1 + "Points2.txt", dtype=int, delimiter=',')
        fx = fy = 721.5
        cx = 690.5
        cy = 172.8
        K = np.array([[fx, 0., cx], [0., fy, cy], [0., 0., 1.]])
        Rt = np.hstack((np.eye(3), np.zeros((3, 1))))
        P0 = K.dot(Rt)
        E = K.T * np.mat(F) * K
        R1, R2, t = cv2.decomposeEssentialMat(E)
        P1 = verify_camerapose(P0, K, R1, R2, t, pts1.reshape(-1, 1, 2),
                               pts2.reshape(-1, 1, 2))
        pointcloud = cv2.triangulatePoints(
            P0, P1, np.array(pts1.reshape(-1, 1, 2), dtype=np.float),
            np.array(pts2.reshape(-1, 1, 2), dtype=np.float))
        # 4D Punkt in 3D umwandeln
        pointcloud = cv2.convertPointsFromHomogeneous(pointcloud.T)
        write_ply(filename1 + 'punktwolke.ply', pointcloud)
コード例 #16
0
def triangulate_correspondences(correspondences: Correspondences,
                                view_mat_1: np.ndarray, view_mat_2: np.ndarray,
                                intrinsic_mat: np.ndarray,
                                parameters: TriangulationParameters) \
        -> Tuple[np.ndarray, np.ndarray, float]:
    points2d_1 = correspondences.points_1
    points2d_2 = correspondences.points_2

    normalized_points2d_1 = cv2.undistortPoints(points2d_1.reshape(-1, 1, 2),
                                                intrinsic_mat,
                                                np.array([])).reshape(-1, 2)
    normalized_points2d_2 = cv2.undistortPoints(points2d_2.reshape(-1, 1, 2),
                                                intrinsic_mat,
                                                np.array([])).reshape(-1, 2)

    points3d = cv2.triangulatePoints(view_mat_1, view_mat_2,
                                     normalized_points2d_1.T,
                                     normalized_points2d_2.T)
    points3d = cv2.convertPointsFromHomogeneous(points3d.T).reshape(-1, 3)

    reprojection_error_mask = _calc_reprojection_error_mask(
        points3d, points2d_1, points2d_2, view_mat_1, view_mat_2,
        intrinsic_mat, parameters.max_reprojection_error)
    z_mask = np.logical_and(
        _calc_z_mask(points3d, view_mat_1, parameters.min_depth),
        _calc_z_mask(points3d, view_mat_2, parameters.min_depth))
    angle_mask, median_cos = _calc_triangulation_angle_mask(
        view_mat_1, view_mat_2, points3d,
        parameters.min_triangulation_angle_deg)
    common_mask = reprojection_error_mask & z_mask & angle_mask

    return points3d[common_mask], correspondences.ids[common_mask], median_cos
コード例 #17
0
 def triangulate(cls, p1, p2, camera1, camera2):
     if p1.size == 0 or p2.size == 0:
         return np.zeros((0, 3))
     object_points_homogeneous = cv2.triangulatePoints(camera1.projection, camera2.projection, p1.T, p2.T)
     object_points = cv2.convertPointsFromHomogeneous(object_points_homogeneous.T)
     object_points = object_points.reshape(-1, 3)
     return object_points
コード例 #18
0
    def rectify_fusiello(self, d1=np.zeros(2), d2=np.zeros(2)):
        """
        Translation of Andre's IDL function rectify_fusello.
        """
        try:
            K1, R1, C1, _, _, _, _ = cv2.decomposeProjectionMatrix(
                self.view1.P)
            K2, R2, C2, _, _, _, _ = cv2.decomposeProjectionMatrix(
                self.view2.P)
        except:
            return

        C1 = cv2.convertPointsFromHomogeneous(C1.T).reshape(3, 1)
        C2 = cv2.convertPointsFromHomogeneous(C2.T).reshape(3, 1)

        oc1 = mdot(-R1.T, np.linalg.inv(K1), self.view1.P[:, 3])
        oc2 = mdot(-R2.T, np.linalg.inv(K2), self.view2.P[:, 3])

        v1 = (oc2 - oc1).T
        v2 = np.cross(R1[2, :], v1)
        v3 = np.cross(v1, v2)

        R = np.array([
            v1 / np.linalg.norm(v1), v2 / np.linalg.norm(v2),
            v3 / np.linalg.norm(v3)
        ]).reshape(3, 3)

        Kn1 = np.copy(K2)
        Kn1[0, 1] = 0
        Kn2 = np.copy(K2)
        Kn2[0, 1] = 0

        Kn1[0, 2] = Kn1[0, 2] + d1[0]
        Kn1[1, 2] = Kn1[1, 2] + d1[1]
        Kn2[0, 2] = Kn2[0, 2] + d2[0]
        Kn2[1, 2] = Kn2[1, 2] + d2[1]

        t1 = np.matmul(-R, C1)
        t2 = np.matmul(-R, C2)
        Rt1 = np.concatenate((R, t1), 1)
        Rt2 = np.concatenate((R, t2), 1)
        Prec1 = np.dot(Kn1, Rt1)
        Prec2 = np.dot(Kn2, Rt2)

        Tr1 = np.dot(Prec1[:3, :3], np.linalg.inv(self.view1.P[:3, :3]))
        Tr2 = np.dot(Prec2[:3, :3], np.linalg.inv(self.view2.P[:3, :3]))
        return Prec1, Prec2, Tr1, Tr2
コード例 #19
0
ファイル: nodes.py プロジェクト: openalea/openalea-opencv
def convertPointsFromHomogeneous(image):
    """The function is part of the camera calibration library of OpenCV and
    converts points from homogeneous space to Euclidean space.

    Conversion is done by perspective projection as described in the OpenCV
    documentation.
    """
    return cv2.convertPointsFromHomogeneous(image)
コード例 #20
0
def calculate_reprojection_error(point_3D, point_2D, K, R, t):
    """Calculates the reprojection error for a 3D point by projecting it back into the image plane"""

    reprojected_point = K.dot(R.dot(point_3D) + t)
    reprojected_point = cv2.convertPointsFromHomogeneous(
        reprojected_point.T)[:, 0, :].T
    error = np.linalg.norm(point_2D.reshape((2, 1)) - reprojected_point)
    return error
コード例 #21
0
def triangulate_points_and_reproject(R_l, t_l, R_r, t_r, K, points3d_with_views, img_idx1, img_idx2, kpts_i, kpts_j, kpts_i_idxs, kpts_j_idxs, reproject=True):
    """
    Triangulate points given 2D correspondences as well as camera parameters.

    :param R_l: Rotation matrix for left image
    :param t_l: Translation vector for left image
    :param R_r: Rotation matrix for right image
    :param t_r: Translation vector for right image
    :param K: Intrinsics matrix
    :param points3d_with_views: List of Point3D_with_views objects. Will have new points appended to it
    :param img_idx1: Index of left image
    :param img_idx2: Index of right image
    :param kpts_i: List of index-aligned keypoint coordinates in left image
    :param kpts_j: List of index-aligned keypoint coordinates in right image
    :param kpts_i_idxs: Indexes of keypoints for left image
    :param kpts_j_idxs: Indexes of keypoints for right image
    :param reproject: Boolean. True if reprojection errors desired
    """

    print(f"Triangulating: {len(kpts_i)} points.")
    P_l = np.dot(K, np.hstack((R_l, t_l)))
    P_r = np.dot(K, np.hstack((R_r, t_r)))

    kpts_i = np.squeeze(kpts_i)
    kpts_i = kpts_i.transpose()
    kpts_i = kpts_i.reshape(2,-1)
    kpts_j = np.squeeze(kpts_j)
    kpts_j = kpts_j.transpose()
    kpts_j = kpts_j.reshape(2,-1)

    point_4d_hom = cv2.triangulatePoints(P_l, P_r, kpts_i, kpts_j)
    points_3D = cv2.convertPointsFromHomogeneous(point_4d_hom.transpose())
    for i in range(kpts_i.shape[1]):
        source_2dpt_idxs = {img_idx1:kpts_i_idxs[i], img_idx2:kpts_j_idxs[i]}
        pt = Point3D_with_views(points_3D[i], source_2dpt_idxs)
        points3d_with_views.append(pt)

    if reproject:
        kpts_i = kpts_i.transpose()
        kpts_j = kpts_j.transpose()
        rvec_l, _ = cv2.Rodrigues(R_l)
        rvec_r, _ = cv2.Rodrigues(R_r)
        projPoints_l, _ = cv2.projectPoints(points_3D, rvec_l, t_l, K, distCoeffs=np.array([]))
        projPoints_r, _ = cv2.projectPoints(points_3D, rvec_r, t_r, K, distCoeffs=np.array([]))
        delta_l , delta_r = [], []
        for i in range(len(projPoints_l)):
            delta_l.append(abs(projPoints_l[i][0][0] - kpts_i[i][0]))
            delta_l.append(abs(projPoints_l[i][0][1] - kpts_i[i][1]))
            delta_r.append(abs(projPoints_r[i][0][0] - kpts_j[i][0]))
            delta_r.append(abs(projPoints_r[i][0][1] - kpts_j[i][1]))
        avg_error_l = sum(delta_l)/len(delta_l)
        avg_error_r = sum(delta_r)/len(delta_r)
        print(f"Average reprojection error for just-triangulated points on image {img_idx1} is:", avg_error_l, "pixels.")
        print(f"Average reprojection error for just-triangulated points on image {img_idx2} is:", avg_error_r, "pixels.")
        errors = list(zip(delta_l, delta_r))
        return points3d_with_views, errors, avg_error_l, avg_error_r

    return points3d_with_views
コード例 #22
0
        def __TriangulateTwoViews(img1pts, img2pts, R1, t1, R2, t2):
            img1ptsHom = cv2.convertPointsToHomogeneous(img1pts)[:, 0, :]
            img2ptsHom = cv2.convertPointsToHomogeneous(img2pts)[:, 0, :]

            img1ptsNorm = (np.linalg.inv(self.K).dot(img1ptsHom.T)).T
            img2ptsNorm = (np.linalg.inv(self.K).dot(img2ptsHom.T)).T

            img1ptsNorm = cv2.convertPointsFromHomogeneous(img1ptsNorm)[:,
                                                                        0, :]
            img2ptsNorm = cv2.convertPointsFromHomogeneous(img2ptsNorm)[:,
                                                                        0, :]

            pts4d = cv2.triangulatePoints(np.hstack((R1, t1)),
                                          np.hstack((R2, t2)), img1ptsNorm.T,
                                          img2ptsNorm.T)
            pts3d = cv2.convertPointsFromHomogeneous(pts4d.T)[:, 0, :]

            return pts3d
コード例 #23
0
def triangulate(p1, p2, projection_matrix1, projection_matrix2):
    if p1.size == 0 or p2.size == 0:
        return np.zeros((0, 3))
    object_points_homogeneous = cv2.triangulatePoints(projection_matrix1,
                                                      projection_matrix2, p1.T,
                                                      p2.T)
    object_points = cv2.convertPointsFromHomogeneous(
        object_points_homogeneous.T)
    object_points = object_points.reshape(-1, 3)
    return object_points
コード例 #24
0
    def triangulatePoints(self, P1, P2, points1, points2):

        pts1_norm = cv.undistortPoints(np.expand_dims(points1, axis=1),
                cameraMatrix=self.K, distCoeffs=self.distCoeffs)
        pts2_norm = cv.undistortPoints(np.expand_dims(points2, axis=1),
                cameraMatrix=self.K, distCoeffs=self.distCoeffs)

        points_4d_hom = cv.triangulatePoints(P1, P2, pts1_norm, pts2_norm)
        points_3d = cv.convertPointsFromHomogeneous(points_4d_hom.T).reshape(-1,3)

        return P2, points_3d
コード例 #25
0
def ComputeReprojections(X, R, t, K):
    """
    X: (n,3) 3D triangulated points in world coordinate system
    R: (3,3) Rotation Matrix to convert from world to camera coordinate system
    t: (3,1) Translation vector (from camera's origin to world's origin)
    K: (3,3) Camera calibration matrix
    
    out: (n,2) Projected points into image plane"""
    outh = K.dot(R.dot(X.T) + t)
    out = cv2.convertPointsFromHomogeneous(outh.T)[:, 0, :]
    return out
コード例 #26
0
def map_points(homography, coords_src):
    """Map points from coords_src to coords_dst using homography matrix

    Args:
        homography: (3, 3) cv2 homography matrix (see get_homography)
        coords_src: (N, 2) array of coordinates in source frame
    Returns:
        coords_dst (N, 2) array of coordinates in destination frame
    """
    coords_src = cv2.convertPointsToHomogeneous(coords_src).squeeze()
    coords_dst = np.dot(homography, coords_src.T).T
    return cv2.convertPointsFromHomogeneous(coords_dst).squeeze()
コード例 #27
0
ファイル: fundamental.py プロジェクト: s-low/squawkFly
def eightPointNormalisation(pts):
    print "> 8POINT NORMALISATION"

    cx = 0
    cy = 0
    pts_ = []

    for p in pts:
        cx += p[0]
        cy += p[1]

    cx = cx / len(pts)
    cy = cy / len(pts)

    # translation to (cx,cy) = (0,0)
    T = np.mat([[1, 0, -cx],
                [0, 1, -cy],
                [0, 0, 1]])

    print "Translate by:", -cx, -cy

    # now scale to rms_d = sqrt(2)
    total_d = 0
    for p in pts:
        d = math.hypot(p[0] - cx, p[1] - cy)
        total_d += (d * d)

    # square root of the mean of the squares
    rms_d = math.sqrt((total_d / len(pts)))

    scale_factor = math.sqrt(2) / rms_d
    print "Scale by:", scale_factor

    T = scale_factor * T
    T[2, 2] = 1
    print "T:\n", T

    # apply the transformation
    hom = cv2.convertPointsToHomogeneous(pts)
    for h in hom:
        h_ = T * h.T
        pts_.append(h_)

    pts_ = cv2.convertPointsFromHomogeneous(np.array(pts_, dtype='float32'))
    check8PointNormalisation(pts_)

    # make sure the normalised points are in the same format as original
    pts_r = []
    for p in pts_:
        pts_r.append(p[0])
    pts_r = np.array(pts_r, dtype='float32')

    return pts_r, T
コード例 #28
0
def eightPointNormalisation(pts):
    print "> 8POINT NORMALISATION"

    cx = 0
    cy = 0
    pts_ = []

    for p in pts:
        cx += p[0]
        cy += p[1]

    cx = cx / len(pts)
    cy = cy / len(pts)

    # translation to (cx,cy) = (0,0)
    T = np.mat([[1, 0, -cx], [0, 1, -cy], [0, 0, 1]])

    print "Translate by:", -cx, -cy

    # now scale to rms_d = sqrt(2)
    total_d = 0
    for p in pts:
        d = math.hypot(p[0] - cx, p[1] - cy)
        total_d += (d * d)

    # square root of the mean of the squares
    rms_d = math.sqrt((total_d / len(pts)))

    scale_factor = math.sqrt(2) / rms_d
    print "Scale by:", scale_factor

    T = scale_factor * T
    T[2, 2] = 1
    print "T:\n", T

    # apply the transformation
    hom = cv2.convertPointsToHomogeneous(pts)
    for h in hom:
        h_ = T * h.T
        pts_.append(h_)

    pts_ = cv2.convertPointsFromHomogeneous(np.array(pts_, dtype='float32'))
    check8PointNormalisation(pts_)

    # make sure the normalised points are in the same format as original
    pts_r = []
    for p in pts_:
        pts_r.append(p[0])
    pts_r = np.array(pts_r, dtype='float32')

    return pts_r, T
コード例 #29
0
    def find_cal_pattern_in_3space(self, stereo_cal, pair_image_path):
        """
        stereo_cal:  Projection matrices for cv2.triangulatePoints, stored in a dictionary like this:
        {
            'leftProjMat':leftProjMat ,
            'rightProjMat':rightProjMat,
        }
        pair_image_path  : a twople, of the form ("/path/to/one/left/image", "/path/to/one/right/image"),
        
        returns: a list of [x,y,z] coordinates in real-world space, in the form:
            np.array([[   7549.84  , -184252.69  ,   40687.215 ],
                   [   7626.0737, -185671.55  ,   41133.258 ],
                   [   7643.9023, -186005.36  ,   41351.223 ]])
        """

        # Find individual dots in all the images
        logger.info("Finding points in left images from pairs")
        all_points_in_3space, all_points_in_left_images = self._find_point_vectors(
            [pair_image_path[0]])
        logger.info("Finding points in right images from pairs")
        all_points_in_3space, all_points_in_right_images = self._find_point_vectors(
            [pair_image_path[1]])

        # for image_path,points in zip(pair_image_path, [all_points_in_left_images[0], all_points_in_right_images[0]]):
        # img = cv2.imread(image_path)
        # self._draw_points_on_image(img, points)
        # cv2.imshow(image_path, img)
        all_points_in_left_images = all_points_in_left_images[0]
        # logger.debug("Shape: %s"%repr(all_points_in_left_images.shape))
        # all_points_in_left_images = all_points_in_left_images[:,0,:]
        # logger.debug("Shape: %s"%repr(all_points_in_left_images.shape))
        all_points_in_right_images = all_points_in_right_images[0]
        # all_points_in_right_images = all_points_in_right_images[:,0,:]
        # Switch from x,y to row,col
        # all_points_in_left_images = all_points_in_left_images[:,[1,0]]
        # all_points_in_right_images = all_points_in_right_images[:,[1,0]]
        all_points_in_left_images = all_points_in_left_images.transpose()
        # logger.debug("Shape: %s"%repr(all_points_in_left_images.shape))
        all_points_in_right_images = all_points_in_right_images.transpose()

        # logger.debug('all_points_in_left_images: ' + repr(all_points_in_left_images))

        points4d = cv2.triangulatePoints(stereo_cal['leftProjMat'],
                                         stereo_cal['rightProjMat'],
                                         all_points_in_left_images,
                                         all_points_in_right_images)
        points3d = cv2.convertPointsFromHomogeneous(points4d.transpose())

        # logger.debug('points4d: ' + repr(points4d))
        logger.debug('points3d: ' + repr(points3d))
        return points3d[:, 0, :]
コード例 #30
0
ファイル: PointPairsClass.py プロジェクト: aktgpt/LiverVision
 def getCorrespodingPoints(self, leftCurves, rightCurves):
     if leftCurves.size and rightCurves.size:
         leftCurves = cv2.convertPointsToHomogeneous(leftCurves)
         rightCurves = cv2.convertPointsToHomogeneous(rightCurves)
         # distMat = np.empty([len(leftCurves),len(rightCurves)])
         distMatCV = np.empty([len(leftCurves),len(rightCurves)])
         # minDistMat = np.empty([len(leftCurves), 2])
         for i in range(0, len(leftCurves)):
             # lineRight = np.matmul(self.fundamentalMatrix, leftCurves[i])
             lineRightCV = cv2.computeCorrespondEpilines(leftCurves[i], 1, self.fundamentalMatrix)
             for j in range(0, len(rightCurves)):
                 # distMat[i, j] = np.abs(np.matmul(rightCurves[j], lineRight))
                 distMatCV[i, j] = np.abs(np.matmul(rightCurves[j], lineRightCV[0,0,:]))
         leftIdx = []
         rightIdx = []
         for i in range(len(leftCurves)):
             potentialRightIdx = np.argmin(distMatCV[i, :])
             potentialLeftIdx = np.argmin(distMatCV[:, potentialRightIdx])
             if potentialLeftIdx == i and distMatCV[i, potentialRightIdx] < 0.5:
                 leftIdx.append(potentialLeftIdx)
                 rightIdx.append(potentialRightIdx)
         if leftIdx and rightIdx:
             matchedLeftPoints = cv2.convertPointsFromHomogeneous(np.array([leftCurves[leftIdx[i]] for i in
                                                                            range(0, len(leftIdx))])).reshape([1, len(leftIdx), 2])
             matchedRightPoints = cv2.convertPointsFromHomogeneous(np.array([rightCurves[rightIdx[i]] for i in
                                                                             range(0, len(rightIdx))])).reshape([1, len(leftIdx), 2])
             matchedLeftPointsCV, matchedRightPointsCV = cv2.correctMatches(self.fundamentalMatrix, matchedLeftPoints
                                                                        , matchedRightPoints)
             matchedLeftPointsCV = matchedLeftPointsCV.reshape([len(leftIdx), 2])
             matchedRightPointsCV = matchedRightPointsCV.reshape(len(leftIdx), 2)
         else:
             matchedLeftPointsCV = []
             matchedRightPointsCV = []
     else:
         matchedLeftPointsCV = []
         matchedRightPointsCV = []
     return matchedLeftPointsCV, matchedRightPointsCV
コード例 #31
0
def linear_triangulation(K, R1, T1, R2, T2, uv1, uv2):
    RT1 = np.concatenate((R1, T1), axis=1)
    RT2 = np.concatenate((R2, T2), axis=1)
    proj1 = np.dot(K, RT1)
    proj2 = np.dot(K, RT2)
    proj1 = np.float32(proj1)
    proj2 = np.float32(proj2)
    uv1 = np.float32(uv1.T)
    uv2 = np.float32(uv2.T)
    s = cv2.triangulatePoints(proj1, proj2, uv1, uv2)
    X = s / s[3]
    uv1_recover = np.dot(proj1[:3], X)
    uv1_recover /= uv1_recover[2]
    point = cv2.convertPointsFromHomogeneous(s.T)
    return np.array(point)
コード例 #32
0
ファイル: Aufgabe_02.py プロジェクト: MarkusClln/CVIS
    def create_pointcloud(self):
        I = np.eye(3)
        O = np.zeros((3, 1))

        P0 = K.dot(np.hstack((I, O)))
        P1 = K.dot(np.hstack((self.R1, self.t)))
        P2 = K.dot(np.hstack((self.R1, -self.t)))
        P3 = K.dot(np.hstack((self.R2, self.t)))
        P4 = K.dot(np.hstack((self.R2, -self.t)))

        X1 = cv2.triangulatePoints(P0, P1, self.pts1.T, self.pts2.T)
        X2 = cv2.triangulatePoints(P0, P2, self.pts1.T, self.pts2.T)
        X3 = cv2.triangulatePoints(P0, P3, self.pts1.T, self.pts2.T)
        X4 = cv2.triangulatePoints(P0, P4, self.pts1.T, self.pts2.T)

        X1 = cv2.convertPointsFromHomogeneous(X1.T)
        X2 = cv2.convertPointsFromHomogeneous(X2.T)
        X3 = cv2.convertPointsFromHomogeneous(X3.T)
        X4 = cv2.convertPointsFromHomogeneous(X4.T)

        x1_valid = self.validate_points(X1.T)
        x2_valid = self.validate_points(X2.T)
        x3_valid = self.validate_points(X3.T)
        x4_valid = self.validate_points(X4.T)


        list_valids =[x1_valid, x2_valid, x3_valid, x4_valid]
        min_value = min(list_valids)
        if(min_value == x1_valid):
            return X1
        elif(min_value == x2_valid):
            return X2
        elif(min_value==x3_valid):
            return X3
        elif(min_value==x4_valid):
            return X4
コード例 #33
0
ファイル: _camtrack.py プロジェクト: Denzed/CV-8th-semester
def triangulate_correspondences(correspondences: Correspondences,
                                view_mat_1: np.ndarray, view_mat_2: np.ndarray,
                                intrinsic_mat: np.ndarray,
                                parameters: TriangulationParameters,
                                mask: np.ndarray=None) \
        -> Tuple[np.ndarray, np.ndarray]:
    points2d_1 = correspondences.points_1
    points2d_2 = correspondences.points_2

    normalized_points2d_1 = cv2.undistortPoints(
        points2d_1.reshape(-1, 1, 2),
        intrinsic_mat,
        np.array([])
    ).reshape(-1, 2)
    normalized_points2d_2 = cv2.undistortPoints(
        points2d_2.reshape(-1, 1, 2),
        intrinsic_mat,
        np.array([])
    ).reshape(-1, 2)

    points3d = cv2.triangulatePoints(view_mat_1, view_mat_2,
                                     normalized_points2d_1.T,
                                     normalized_points2d_2.T)
    points3d = cv2.convertPointsFromHomogeneous(points3d.T).reshape(-1, 3)

    reprojection_error_mask = _calc_reprojection_error_mask(
        points3d,
        points2d_1,
        points2d_2,
        view_mat_1,
        view_mat_2,
        intrinsic_mat,
        parameters.max_reprojection_error
    )
    z_mask_1 = _calc_z_mask(points3d, view_mat_1, parameters.min_depth)
    z_mask_2 = _calc_z_mask(points3d, view_mat_2, parameters.min_depth)
    angle_mask = _calc_triangulation_angle_mask(
        view_mat_1,
        view_mat_2,
        points3d,
        parameters.min_triangulation_angle_deg
    )
    if mask is not None:
        common_mask = mask & reprojection_error_mask & z_mask_1 & z_mask_2 & angle_mask
    else:
        common_mask = reprojection_error_mask & z_mask_1 & z_mask_2 & angle_mask

    return points3d[common_mask], correspondences.ids[common_mask]
コード例 #34
0
def triangulation_world(patternSize, squaresize, K1, K2, D1, D2, left, right):

    # LIRE IMAGES ET TROUVER POINTS --------------------------------------------
    grayl = cv.cvtColor(left, cv.COLOR_BGR2GRAY)
    grayr = cv.cvtColor(right, cv.COLOR_BGR2GRAY)
    # trouver
    criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    ret_l, corners_l = cv.findChessboardCorners(grayl, patternSize, None)
    ret_r, corners_r = cv.findChessboardCorners(grayr, patternSize, None)
    if ret_l * ret_r:
        pts_l = cv.cornerSubPix(grayl, corners_l, (11, 11), (-1, -1), criteria)
        pts_r = cv.cornerSubPix(grayr, corners_r, (11, 11), (-1, -1), criteria)
    # --------------------------------------------------------------------------

    # MATRICES DE PROJECTION ---------------------------------------------------
    objp = coins_damier(patternSize, squaresize)
    ret, rvec1, t1 = cv.solvePnP(objp, pts_l, K1, D1)
    ret, rvec2, t2 = cv.solvePnP(objp, pts_r, K2, D2)
    r1, _ = cv.Rodrigues(rvec1)
    r2, _ = cv.Rodrigues(rvec2)
    p1 = np.column_stack((r1, t1))
    Proj1 = K1 @ p1
    p2 = np.column_stack((r2, t2))
    Proj2 = K2 @ p2
    # --------------------------------------------------------------------------

    # UNDISTORT POINTS ---------------------------------------------------------
    ptsl = cv.undistortPoints(pts_l, K1, D1, None, None,
                              K1)  #not rectification
    ptsr = cv.undistortPoints(pts_r, K2, D2, None, None,
                              K2)  #not rectification
    N = ptsl.shape[0]
    projPoints1 = np.zeros((2, N))
    projPoints2 = np.zeros((2, N))
    for i in range(N):
        projPoints1[:, i] = ptsl[i, 0]
        projPoints2[:, i] = ptsr[i, 0]
    # --------------------------------------------------------------------------

    # TRIANGULATION ------------------------------------------------------------
    points4D = cv.triangulatePoints(Proj1, Proj2, projPoints1, projPoints2)
    points3D = cv.convertPointsFromHomogeneous(points4D.T)

    X, Y, Z = points3D[:, 0, 0], points3D[:, 0, 1], points3D[:, 0, 2]
    points = np.stack((X, Y, Z))
    # --------------------------------------------------------------------------

    return points
コード例 #35
0
    def get_world_image_points(self, image_1_feature_coordinates,
                               image_2_feature_coordinates, camera_matrix):
        (
            projection_matrix_1,
            projection_matrix_2,
            image_1_feature_coordinates,
            image_2_feature_coordinates,
        ) = self._get_projection_matrix(image_1_feature_coordinates,
                                        image_2_feature_coordinates,
                                        camera_matrix)
        homogenous_world_coordinates = cv2.triangulatePoints(
            projection_matrix_1, projection_matrix_2,
            image_1_feature_coordinates.T, image_2_feature_coordinates.T).T

        points = cv2.convertPointsFromHomogeneous(homogenous_world_coordinates)
        return [p[0] for p in points]
コード例 #36
0
ファイル: Camera.py プロジェクト: lmhonorio/py3DRec
	def project(self,X):
		"""
		project(self,X)
		Project a homogeneous set of points in R(4xn) on the current projective plane and normalize coordinates.

		PARAMETERS:
		@X: a set of R(4xn) points in homogeneous coordinates


		OUTPUT:
		returns a (2xn) matrix 
		"""

		x = np.dot(self.P,X.T)
		x = cv2.convertPointsFromHomogeneous(x.T).reshape(-1,2)

		return x
コード例 #37
0
def triangulate(im1, im2): #im1 and im2 must be black and white
    if show_rt:
        t = time.time()

    #compute Fundamental matrix
    F, pts1, pts2 = compute_F(im1, im2)

    #print '\npts1:\n', pts1
    #print '\npts2:\n', pts2

    #compute Essential matrix
    E = compute_E(F, im1, im2)

    #get translation matrix for translation in the x direction
    R, T = compute_R_T(E)

    #get K
    K = compute_K(im1)

    #get projection from K, R, T
    P = compute_P(K, R, T)

    print '\nP.shape:', P.shape, '\n'
    print '\npts1.T.shape:', pts1.T.shape, '\n'
    print '\npts2.T.shape:', pts2.T.shape, '\n'

    #if pts1.T.shape[1] == 0 or pts.T.shape[0] == 0:
     #   return None, None, None # None, np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])

    hom_pts = cv2.triangulatePoints(P, P, pts1.T, pts2.T)

    #print 'hom_pts:\n', hom_pts.T
    euclid_pts = cv2.convertPointsFromHomogeneous(hom_pts.T)

    xfrm = compute_transform(R, T)

    if show_rt:
        print '\ntriangulate() runtime:', time.time() - t, '\nsecs'

    return euclid_pts, hom_pts.T, xfrm
コード例 #38
0
def reconstruct_from_binary_patterns():
    scale_factor = 1.0
    ref_white = cv2.resize(
        cv2.imread("images/pattern000.jpg", cv2.IMREAD_GRAYSCALE) / 255.0, (0,
                                                                            0),
        fx=scale_factor,
        fy=scale_factor)
    ref_black = cv2.resize(
        cv2.imread("images/pattern001.jpg", cv2.IMREAD_GRAYSCALE) / 255.0, (0,
                                                                            0),
        fx=scale_factor,
        fy=scale_factor)
    ref_avg = (ref_white + ref_black) / 2.0
    ref_on = ref_avg + 0.05  # a threshold for ON pixels
    ref_off = ref_avg - 0.05  # add a small buffer region

    h, w = ref_white.shape

    # mask of pixels where there is projection
    proj_mask = (ref_white > (ref_black + 0.05))

    scan_bits = np.zeros((h, w), dtype=np.uint16)

    # analyze the binary patterns from the camera
    for i in range(0, 15):
        # read the file
        patt_gray = cv2.resize(
            cv2.imread("images/pattern%03d.jpg" %
                       (i + 2), cv2.IMREAD_GRAYSCALE) / 255.0, (0, 0),
            fx=scale_factor,
            fy=scale_factor)
        # mask where the pixels are ON

        on_mask = (patt_gray > ref_on) & proj_mask
        on_mask = (on_mask << i).astype(np.uint16)
        # this code corresponds with the binary pattern code
        bit_code = np.uint16(1 << i)
        scan_bits |= (bit_code & on_mask)
        # TODO: populate scan_bits by putting the bit_code according to on_mask

    # the codebook translates from <binary code> to (x,y) in projector screen space
    with open("binary_codes_ids_codebook.pckl", "r") as f:
        binary_codes_ids_codebook = pickle.load(f)

    camera_points = []
    projector_points = []
    correspondence = np.zeros((h, w, 3))
    a0 = cv2.imread('./images/pattern001.jpg', cv2.IMREAD_COLOR)
    color_points = []

    for x in range(w):
        for y in range(h):
            if not proj_mask[y, x]:
                continue  # no projection here
            if scan_bits[y, x] not in binary_codes_ids_codebook:
                continue  # bad binary code
            val = binary_codes_ids_codebook[scan_bits[y, x]]
            if val[0] >= 1279 or val[1] >= 799:  # filter
                continue
            # TODO: use binary_codes_ids_codebook[...] and scan_bits[y,x] to
            # TODO: find for the camera (x,y) the projector (p_x, p_y).
            # TODO: store your points in camera_points and projector_points
            # IMPORTANT!!! : due to differences in calibration and acquisition - divide the camera points by 2

            camera_points.append([x / 2.0, y / 2.0])
            projector_points.append([val[0] * 1.0, val[1] * 1.0])

            correspondence[y, x, 1] = val[1] / 960.0
            correspondence[y, x, 2] = val[0] / 1280.0

            color_points.append([a0[y, x, 2], a0[y, x, 1], a0[y, x, 0]])

    a, b = np.unique(correspondence, return_counts=True)
    correspondence = correspondence * 255
    cv2.imwrite(sys.argv[1] + 'correspondence.jpg',
                correspondence.astype(np.uint8))

    # now that we have 2D-2D correspondances, we can triangulate 3D points!

    # load the prepared stereo calibration between projector and camera

    with open("stereo_calibration.pckl", "r") as f:
        d = pickle.load(f)
        camera_K = d['camera_K']
        camera_d = d['camera_d']
        projector_K = d['projector_K']
        projector_d = d['projector_d']
        projector_R = d['projector_R']
        projector_t = d['projector_t']

    # TODO: use cv2.undistortPoints to get normalized points for camera, use camera_K and camera_d

    camera_points = np.array([camera_points])

    # TODO: use cv2.undistortPoints to get normalized points for projector, use projector_K and projector_d

    projector_points = np.array([projector_points])
    norm_cam_points = cv2.undistortPoints(camera_points, camera_K, camera_d)
    norm_proj_points = cv2.undistortPoints(projector_points, projector_K,
                                           projector_d)

    # TODO: use cv2.triangulatePoints to triangulate the normalized points

    homo_points = cv2.triangulatePoints(
        np.column_stack((np.eye(3), [0, 0, 1])),
        np.column_stack((projector_R, projector_t)), norm_cam_points,
        norm_proj_points)

    # TODO: use cv2.convertPointsFromHomogeneous to get real 3D points
    # TODO: name the resulted 3D points as "points_3d"

    points_3d = cv2.convertPointsFromHomogeneous(homo_points.T)
    color_points = np.array(color_points).reshape(-1, 1, 3)
    points_3d = np.append(points_3d, color_points, 2)

    mask = (points_3d[:, :, 2] > 200) & (points_3d[:, :, 2] < 1400)

    points_3d = points_3d[mask].reshape(-1, 1, 6)

    return points_3d
コード例 #39
0
            color1 = getKeypointColor(image1, kps1, row_num)
            color2 = getKeypointColor(image2, kps2, row_num)
            red = (color1[0] + color2[0]) / 2
            green = (color1[1] + color2[1]) / 2
            blue = (color1[2] + color2[1]) / 2
            line2 = "%i %i %i" % (red, green, blue)
            writeline(f, line + " " + line2)


points, r, t, newMask = cv2.recoverPose(E, new_pts1, new_pts2, mask=mask)
proj1mat = np.append(np.identity(3), np.zeros((3, 1)), 1)
proj2mat = np.append(r, t, 1)

m = ourTriangulatePoints(proj1mat, proj2mat, new_pts1, new_pts2)
# n = homogeneousCoordinatesToRegular(m)
n = cv2.convertPointsFromHomogeneous(m)
print n.shape
ptsToFile(n, 'pts_fixed.ply')
print "hello"

#cmd = "open -a meshlab.app pts_fixed.ply".split(" ")
#
#import subprocess
#p = subprocess.Popen(cmd)
# p.kill()


# print cv2.triangulatePoints(proj1mat,proj2mat,pts1.transpose(),pts2.transpose())


#### DRAWING EPIPOLAR LINES STUFF ####
コード例 #40
0
	def sparceRecostructionTrueCase(file1,file2,kdef):

		k = np.mat(clsReconstruction.loadData(kdef))
		ki = np.linalg.inv(k)

		im_1 = cv2.imread(file1)
		im_2 = cv2.imread(file2)

		im_b1 = cv2.cvtColor(im_1,cv2.COLOR_RGB2GRAY)
		im_b2 = cv2.cvtColor(im_2,cv2.COLOR_RGB2GRAY)

		myC1 = Camera.myCamera(k)
		myC2 = Camera.myCamera(k)


		#place camera 1 at origin
		myC1.projectiveMatrix(np.mat([0,0,0]).transpose(),[0, 0, 0])


		#return macthing points
		Xp_1, Xp_2 = clsReconstruction.getMatchingPoints(file1,file2,kdef,30)


		#evaluate the essential Matrix using the camera parameter(using the original points)
		E, mask0 = cv2.findEssentialMat(Xp_1,Xp_2,k,cv2.FM_RANSAC)


		#evaluate Fundamental to get the epipolar lines
		#since we already know the camera intrincics, it is better to evaluate F from the correspondence rather than from the 8 points routine
		F = ki.T*np.mat(E)*ki

		 
		#retrive R and t from E
		retval, R, t, mask2 = cv2.recoverPose(E,Xp_1,Xp_2)
		

		#place camera 2
		myC2.projectiveMatrix(np.mat(t),R)


		#clsReconstruction.drawEpipolarLines(Xp_1,Xp_2,F,im_1,im_2)


		#triangulate points
		Str_4D = cv2.triangulatePoints(myC1.P[:3],myC2.P[:3],Xp_1.transpose()[:2],Xp_2.transpose()[:2]).T


		#make them euclidian
		Str_3D = cv2.convertPointsFromHomogeneous(Str_4D).reshape(-1,3)


		#evaluate reprojection
		Xh_Reprojection_1 = myC1.project(Str_4D)
		Xh_Reprojection_2 = myC2.project(Str_4D)


		#three ways to carry on the bundle adjustment I am using R,t and K as parameters. using the points is too time 
		# consuming although the results are much better; 
		#nR,nt, R0, R1 = clsReconstruction.bundleAdjustment(Str_4D,Xp_1,Xp_2,k,R,t)
		#Str_4D, nR,nt, R0, R1 = clsReconstruction.bundleAdjustmentwithX(Str_4D,Xp_1,Xp_2,k,R,t)	#### not working right now... 

		nk, nR, nt, R0, R1 = clsReconstruction.bundleAdjustmentwithK(Str_4D,Xp_1,Xp_2,k,R,t)
		print('old value {0:.3f}, optimized pose: {1:.3f} \n'.format(R0,R1))
		nki = np.linalg.inv(nk)


		#reevaluate essential and fundamental matrixes
		nE = clsReconstruction.skew(nt)*np.mat(nR)
		nF = nki.T*np.mat(nE)*nki


		#if we use the 3th option, we should reinitiate the cameras	and the essential matrix, once the projective matrix will change
		myC1 = Camera.myCamera(nk)
		myC2 = Camera.myCamera(nk)
		myC1.projectiveMatrix(np.mat([0,0,0]).transpose(),[0, 0, 0])



		#reevaluate all variables based on new values of nR and nt
		myC2.projectiveMatrix(np.mat(nt),nR)
		Str_4D = cv2.triangulatePoints(myC1.P[:3],myC2.P[:3],Xp_1.transpose()[:2],Xp_2.transpose()[:2]).T
		Str_3D = cv2.convertPointsFromHomogeneous(Str_4D).reshape(-1,3)


		
		#Camera.myCamera.show3Dplot(Str_3D)
		Xh_Opt_1 = myC1.project(Str_4D)#.reshape(-1,2)
		Xh_Opt_2 = myC2.project(Str_4D)#.reshape(-1,2)


		#POSSIBLE IMPLEMENTATION find residuals bigger a threshould value and optimize their location in R3

		#clsReconstruction.drawEpipolarLines(Xp_1,Xp_2,nF,im_b1,im_b2)

		im = clsReconstruction.drawPoints(im_1,Xp_1,(50,50,250))
		im = clsReconstruction.drawPoints(im,Xh_Reprojection_1,(50,150,100))
		im = clsReconstruction.drawPoints(im,Xh_Opt_1,(250,250,50))

		im2 = clsReconstruction.drawPoints(im_2,Xp_2,(50,50,250))
		im2 = clsReconstruction.drawPoints(im2,Xh_Reprojection_2,(50,150,100))
		im2 = clsReconstruction.drawPoints(im2,Xh_Opt_2,(250,250,50))


		cv2.imshow("im",im)
		cv2.imshow("im2",im2)
		cv2.waitKey(0)
コード例 #41
0
def _run_triangulation(data_for_triangulation):
    marker_points_4d = cv2.triangulatePoints(*data_for_triangulation)
    marker_points_3d = cv2.convertPointsFromHomogeneous(marker_points_4d.T)
    marker_points_3d.shape = 4, 3
    return marker_points_3d
コード例 #42
0
ファイル: MatchLoader.py プロジェクト: icguy/RemoteSurf
def find_corners():
    objp_total = []
    imgpt_total = []
    files_dir = "out/2017_4_5__15_31_34/"
    files_dir = "out/2017_4_5__15_57_20/"
    # files_dir = "out/2017_3_8__14_51_22/"
    files = glob(join(files_dir, "*.jpg"))

    grid_size = (3, 6)
    real_size = 2.615
    objp_right = np.float32(np.mgrid[0:0 + grid_size[0], 0: grid_size[1]].T.reshape(-1, 2))
    objp_right_h = np.ones((grid_size[0] * grid_size[1], 3), np.float32)
    objp_right_h[:, :2] = objp_right
    objp_left = np.float32(np.mgrid[6:6 + grid_size[0], 0:grid_size[1]].T.reshape(-1, 2))
    objp_left_h = np.ones((grid_size[0] * grid_size[1], 3), np.float32)
    objp_left_h[:, :2] = objp_left
    objp_all = np.zeros((36, 3))
    objp_all[18:, :2] = objp_left
    objp_all[:18, :2] = objp_right
    objp_all *= real_size

    for f in files:
        print f
        img_orig = cv2.imread(f, 0)

        img_color = cv2.imread(f)
        scale = 2
        h, w = img_orig.shape
        img = cv2.resize(img_orig, (w / scale, h / scale))
        img_color = cv2.resize(img_color, (w / scale, h / scale))
        h, w = img.shape
        offset = 0
        cut = w / 2 + offset
        img_left = img[:, :cut]
        img_right = img[:, cut:]
        cv2.imshow("left", img_left)
        cv2.imshow("right", img_right)

        rret, rcorners = cv2.findChessboardCorners(img_right, grid_size,
                                                 flags=cv2.CALIB_CB_ADAPTIVE_THRESH | cv2.CALIB_CB_NORMALIZE_IMAGE)
        lret, lcorners = cv2.findChessboardCorners(img_left, grid_size,
                                                 flags=cv2.CALIB_CB_ADAPTIVE_THRESH | cv2.CALIB_CB_NORMALIZE_IMAGE)
        if not lret and not rret:
            print "ERR"
            continue
        print f

        left = lret
        print "left: ", left
        if left:
            ret, corners = lret, lcorners
        else:
            ret, corners = rret, rcorners

        corners = np.fliplr(np.flipud(corners))
        if not left:
            corners = corners.reshape(-1, 2)
            corners[:, 0] = corners[:, 0] + np.ones((18,)) * cut
        corners = corners.reshape(-1, 1, 2)

        if ret:
            corners_orig = corners * scale
            cv2.cornerSubPix(img_orig, corners_orig, (11, 11), (-1, -1),
                             criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1))
            corners = corners_orig.reshape(-1, 2) / scale
            cv2.drawChessboardCorners(img_color, grid_size, corners, ret)
            idx = 0
            offset = 0 if not left else len(corners)
            for i in range(grid_size[0]):
                for j in range(grid_size[1]):
                    cv2.putText(img_color, str(idx + offset), tuple(corners[idx, :]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255))
                    idx += 1

        if ret:
            if not left:
                H, mask = cv2.findHomography(objp_right, corners.reshape(-1, 2))
                corners2 = np.float32(H.dot(objp_left_h.T)).T
                corners2 = cv2.convertPointsFromHomogeneous(corners2).reshape((-1, 2))
            else:
                H, mask = cv2.findHomography(objp_left, corners.reshape(-1, 2))
                corners2 = np.float32(H.dot(objp_right_h.T)).T
                corners2 = cv2.convertPointsFromHomogeneous(corners2).reshape((-1, 2))

        if ret:
            corners2_orig = corners2 * scale
            cv2.cornerSubPix(img_orig, corners2_orig, (11, 11), (-1, -1),
                             criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1))
            corners2 = corners2_orig / scale
            cv2.drawChessboardCorners(img_color, grid_size, corners2, ret)
            idx = 0
            offset = 0 if left else len(corners)
            for i in range(grid_size[0]):
                for j in range(grid_size[1]):
                    cv2.putText(img_color, str(idx + offset), tuple(corners2[idx, :]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255))
                    idx += 1

        corners_all = np.zeros((36, 2))
        if not left:
            corners_all[:18, :] = corners_orig.reshape(-1, 2)
            corners_all[18:, :] = corners2_orig.reshape(-1, 2)
        else:
            corners_all[:18, :] = corners2_orig.reshape(-1, 2)
            corners_all[18:, :] = corners_orig.reshape(-1, 2)

        objp_total.append(objp_all)
        imgpt_total.append(corners_all)

        retval, rvec, tvec = cv2.solvePnP(objp_all, corners_all, Utils.camMtx, Utils.dist_coeffs, flags=cv2.ITERATIVE)
        # print objp_all
        # print corners_all
        datafilename = f.replace("\\", "/").replace("/", "_")
        f_handle = open("cache/points_%s.p" % datafilename, "wb")
        pickle.dump({"objp": objp_all, "imgp": corners_all, "rvec": rvec, "tvec": tvec}, f_handle)
        f_handle.close()
        # print rvec, tvec

        #0 15 20 35
        # cv2.putText(img_color, str("%.2f, %.2f, %.2f" % tuple(objp_all[0,:])), tuple(np.int32(corners_all[0, :]/2)), cv2.FONT_HERSHEY_SIMPLEX,                    0.5, (0, 0, 255))
        # cv2.putText(img_color, str("%.2f, %.2f, %.2f" % tuple(objp_all[15,:])), tuple(np.int32(corners_all[15, :]/2)), cv2.FONT_HERSHEY_SIMPLEX,                    0.5, (0, 0, 255))
        # cv2.putText(img_color, str("%.2f, %.2f, %.2f" % tuple(objp_all[20,:])), tuple(np.int32(corners_all[20, :]/2)), cv2.FONT_HERSHEY_SIMPLEX,                    0.5, (0, 0, 255))
        # cv2.putText(img_color, str("%.2f, %.2f, %.2f" % tuple(objp_all[35,:])), tuple(np.int32(corners_all[35, :]/2)), cv2.FONT_HERSHEY_SIMPLEX,                    0.5, (0, 0, 255))
        cv2.imshow("asd", img_color)
コード例 #43
0
        t_hom_cld = np.dot(x, n_hom_cld.T)
        #print '\nt_hom_cld.shape:', t_hom_cld.shape, '\n'
        h_size = hom_cld.shape[0]

        hom_cld = np.r_[hom_cld, t_hom_cld.T]
        #print '\nhom_cld.shape:', hom_cld.shape, '\n'

        if hom_cld.shape[0] > h_size:
            print '\nIt\'s growing...\n'
            print '\n\tIteration', i, '\n'

        #pdb.set_trace()
    #print '\nlen(hom_cld):', len(hom_cld), '\n'
    #print '\nhom_cld.dtype:', hom_cld.dtype, '\n'
    cld = cld.astype(float)
    cld = cv2.convertPointsFromHomogeneous(hom_cld.astype(np.float32))
    ply_from_list(cld)

    cld = get_cld(im1, im2)
    ply_from_im(cld)



if MEM_TEST:
    #im1 = cv2.imread('scene_r.bmp', 0)
    #im2 = cv2.imread('scene_l.bmp', 0)

    im1 = cv2.imread('frame1.jpg', 0)
    im2 = cv2.imread('frame2.jpg', 0)
    cld = get_cld(im1, im2)
    ply_from_im(cld)