def get_target_pose(cam):
    # Populate object_points
    object_points = cv.fromarray(
        numpy.array([[p.x, p.y, p.z] for p in cam.features.object_points]))
    image_points = cv.fromarray(
        numpy.array([[p.x, p.y] for p in cam.features.image_points]))
    dist_coeffs = cv.fromarray(numpy.array([[0.0, 0.0, 0.0, 0.0]]))
    camera_matrix = numpy.array(
        [[cam.cam_info.P[0], cam.cam_info.P[1], cam.cam_info.P[2]],
         [cam.cam_info.P[4], cam.cam_info.P[5], cam.cam_info.P[6]],
         [cam.cam_info.P[8], cam.cam_info.P[9], cam.cam_info.P[10]]])
    rot = cv.CreateMat(3, 1, cv.CV_32FC1)
    trans = cv.CreateMat(3, 1, cv.CV_32FC1)
    cv.FindExtrinsicCameraParams2(object_points, image_points,
                                  cv.fromarray(camera_matrix), dist_coeffs,
                                  rot, trans)
    rot3x3 = cv.CreateMat(3, 3, cv.CV_32FC1)
    cv.Rodrigues2(rot, rot3x3)
    frame = PyKDL.Frame()
    for i in range(3):
        frame.p[i] = trans[i, 0]
    for i in range(3):
        for j in range(3):
            frame.M[i, j] = rot3x3[i, j]
    return frame
Esempio n. 2
0
    def handle_monocular(self, msg):
        def pt2line(x0, y0, x1, y1, x2, y2):
            """ point is (x0, y0), line is (x1, y1, x2, y2) """
            return abs((x2 - x1) * (y1 - y0) - (x1 - x0) *
                       (y2 - y1)) / math.sqrt((x2 - x1)**2 + (y2 - y1)**2)

        (image, camera) = msg
        rgb = self.mkgray(image)
        C = self.image_corners(rgb)
        if C:
            cc = self.board.n_cols
            cr = self.board.n_rows
            errors = []
            for r in range(cr):
                (x1, y1) = C[(cc * r) + 0]
                (x2, y2) = C[(cc * r) + cc - 1]
                for i in range(1, cc - 1):
                    (x0, y0) = C[(cc * r) + i]
                    errors.append(pt2line(x0, y0, x1, y1, x2, y2))
            linearity_rms = math.sqrt(
                sum([e**2 for e in errors]) / len(errors))

            # Add in reprojection check
            A = cv.CreateMat(3, 3, 0)
            image_points = cv.fromarray(numpy.array(C))
            object_points = cv.fromarray(numpy.zeros([cc * cr, 3]))
            for i in range(cr):
                for j in range(cc):
                    object_points[i * cc + j, 0] = j * self.board.dim
                    object_points[i * cc + j, 1] = i * self.board.dim
                    object_points[i * cc + j, 2] = 0.0
            dist_coeffs = cv.fromarray(numpy.array([[0.0, 0.0, 0.0, 0.0]]))
            camera_matrix = numpy.array(
                [[camera.P[0], camera.P[1], camera.P[2]],
                 [camera.P[4], camera.P[5], camera.P[6]],
                 [camera.P[8], camera.P[9], camera.P[10]]])
            rot = cv.CreateMat(3, 1, cv.CV_32FC1)
            trans = cv.CreateMat(3, 1, cv.CV_32FC1)
            cv.FindExtrinsicCameraParams2(object_points, image_points,
                                          cv.fromarray(camera_matrix),
                                          dist_coeffs, rot, trans)
            # Convert rotation into a 3x3 Rotation Matrix
            rot3x3 = cv.CreateMat(3, 3, cv.CV_32FC1)
            cv.Rodrigues2(rot, rot3x3)
            # Reproject model points into image
            object_points_world = numpy.asmatrix(rot3x3) * (
                numpy.asmatrix(object_points).T) + numpy.asmatrix(trans)
            reprojected_h = camera_matrix * object_points_world
            reprojected = (reprojected_h[0:2, :] / reprojected_h[2, :])
            reprojection_errors = image_points - reprojected.T
            reprojection_rms = numpy.sqrt(
                numpy.sum(numpy.array(reprojection_errors)**2) /
                numpy.product(reprojection_errors.shape))

            # Print the results
            print "Linearity RMS Error: %.3f Pixels      Reprojection RMS Error: %.3f Pixels" % (
                linearity_rms, reprojection_rms)
        else:
            print 'no chessboard'
Esempio n. 3
0
 def rod(p):
     m = cv.CreateMat(3, 3, cv.CV_32FC1)
     for i in range(3):
         for j in range(3):
             m[i, j] = p.M[i, j]
     r = cv.CreateMat(3, 1, cv.CV_32FC1)
     cv.Rodrigues2(m, r)
     return [r[0, 0], r[1, 0], r[2, 0]]
Esempio n. 4
0
def check_projection(cam_opts, distorted=True):
    cam = _build_test_camera(**cam_opts)
    R = cam.get_rect()
    if not np.allclose(R, np.eye(3)):
        # opencv's ProjectPoints2 does not take into account
        # rectifciation matrix, thus we skip this test.
        from nose.plugins.skip import SkipTest
        raise SkipTest("Test %s is skipped: %s" %
                       (check_projection.__name__,
                        'cannot check if rectification matrix is not unity'))

    pts3D = _build_points_3d()
    n_pts = len(pts3D)

    src = numpy2opencv_image(pts3D)
    dst = numpy2opencv_pointmat(np.empty((n_pts, 2)))

    t = np.array(cam.get_translation(), copy=True)
    t.shape = 3, 1

    R = cam.get_rotation()
    rvec = numpy2opencv_image(np.empty((1, 3)))
    cv.Rodrigues2(numpy2opencv_image(R), rvec)

    if distorted:
        K = cam.get_K()
        cv_distortion = numpy2opencv_image(cam.get_D())
    else:
        K = cam.get_P()[:3, :3]
        cv_distortion = numpy2opencv_image(np.zeros((5, 1)))

    cv.ProjectPoints2(src, rvec, numpy2opencv_image(t), numpy2opencv_image(K),
                      cv_distortion, dst)
    result_cv = opencv_pointmat2numpy(dst)

    result_np = cam.project_3d_to_pixel(pts3D, distorted=distorted)
    assert result_cv.shape == result_np.shape
    if cam.is_opencv_compatible():
        try:
            assert np.allclose(result_cv, result_np)
        except:
            debug()
            debug('result_cv')
            debug(result_cv)
            debug('result_np')
            debug(result_np)
            debug()
            raise
    else:
        from nose.plugins.skip import SkipTest
        raise SkipTest(
            "Test %s is skipped: %s" %
            (check_projection.__name__,
             'camera model is not OpenCV compatible, skipping test'))
def read_observations(meas):
    # Stores the checkerboards observed by two cameras
    # camera_id -> camera_id -> [ (cb pose, cb pose, cb id) ]
    mutual_observations = collections.defaultdict(
        lambda: collections.defaultdict(list))

    # Checkerboard frame
    # 1 0 0 -1
    # 0 1 0 -1
    # 0 0 1 -1
    # 0 0 0  1
    ch = PyKDL.Frame()
    ch.p[0] = 0
    ch.p[1] = 0
    ch.p[2] = 0
    ch.p[3] = 1
    rot = cv.CreateMat(3, 1, cv.CV_32FC1)
    cv.Set(rot, 0)
    rot3x3 = cv.CreateMat(3, 3, cv.CV_32FC1)
    cv.Rodrigues2(rot, rot3x3)
    for i in range(3):
        for j in range(3):
            ch.M[i, j] = rot3x3[i, j]

    checkerboard_id = 0
    for msg in meas:

        # dummy checkeboard observation
        for cam in msg.M_cam:
            p = get_target_pose(cam)
            mutual_observations["checkerboard_id"][cam.camera_id].append(
                (ch, p, checkerboard_id))
        '''
        for M_cam1, M_cam2 in itertools.combinations(msg.M_cam, 2):
            cam1 = M_cam1.camera_id
            cam2 = M_cam2.camera_id
            p1 = get_target_pose(M_cam1)
            p2 = get_target_pose(M_cam2)
            
            mutual_observations[cam1][cam2].append( (p1, p2, checkerboard_id) )
            mutual_observations[cam2][cam1].append( (p2, p1, checkerboard_id) )
        '''

        checkerboard_id += 1

    return mutual_observations
Esempio n. 6
0
def calibrate(cam):
    print "\n\nCamera calibration " + str(cam)

    intrinsicPath = "/var/www/MuseumVisitors/calibration/calibIntrCam" + str(
        cam) + "-R1280x800.yml"
    extrinsicPath = "/var/www/MuseumVisitors/calibration/calibExtr_" + str(
        cam) + ".yaml"

    print "Rotation matrix:"
    VectRot = cv2.cv.Load(extrinsicPath,
                          cv.CreateMemStorage(),
                          name="Rotation")
    MatRot = cv.CreateMat(3, 3, cv.CV_64FC1)
    cv.Rodrigues2(VectRot, MatRot)
    MatRot = np.matrix(MatRot)
    print MatRot

    print "Translation vector:"
    VectTrans = cv2.cv.Load(extrinsicPath,
                            cv.CreateMemStorage(),
                            name="Translation")
    VectTrans = np.matrix(VectTrans)
    print VectTrans

    print "Camera matrix:"
    MatCam = cv2.cv.Load(intrinsicPath,
                         cv.CreateMemStorage(),
                         name="camera_matrix")
    MatCam = np.matrix(MatCam)
    print MatCam

    RotTrans = np.concatenate((MatRot[0:3, 0:2], VectTrans), axis=1)
    print(RotTrans)

    Hw = MatCam * RotTrans
    print(Hw)
    HT = (Hw.T).I
    HI2W = Hw.I
    HW2I = (HI2W.I).T
    print "HI2W:"
    print(HI2W)

    print "HW2I:"
    print(HW2I)
Esempio n. 7
0
def CombineParams(p):
    K = [[p[0], p[1], p[3]], [0, p[2], p[4]], [0, 0, 1]]
    E = []
    for loopVar0 in range(0, int((len(p) - 5) / 6)):
        temp_E = zeros((3, 4))
        R = cv.fromarray(zeros((3, 3)))
        R_vec = zeros((1, 3))
        #print p[(5+(loopVar0*6)):(8+(loopVar0*6))]
        R_vec[0] = copy(p[(5 + (loopVar0 * 6)):(8 + (loopVar0 * 6))])
        R_vec = cv.fromarray(R_vec)
        cv.Rodrigues2(R_vec, R)
        R = asarray(R)
        t = p[(8 + (loopVar0 * 6)):(11 + (loopVar0 * 6))]
        temp_E[:, 0] = R[:, 0]
        temp_E[:, 1] = R[:, 1]
        temp_E[:, 2] = R[:, 2]
        temp_E[:, 3] = t
        E.append(array(temp_E).tolist())
    return K, E
Esempio n. 8
0
def ExtractParams(K, E):
    p = []
    p.append(K[0][0])
    p.append(K[0][1])
    p.append(K[1][1])
    p.append(K[0][2])
    p.append(K[1][2])
    for loopVar0 in range(len(E)):
        R = cv.fromarray(copy(matrix(E[loopVar0])[:, 0:3]))
        R_vec = cv.fromarray(zeros((1, 3)))
        cv.Rodrigues2(R, R_vec)
        R_vec = asarray(R_vec)
        p.append(R_vec[0][0])
        p.append(R_vec[0][1])
        p.append(R_vec[0][2])
        p.append(E[loopVar0][0][3])
        p.append(E[loopVar0][1][3])
        p.append(E[loopVar0][2][3])
    return p
Esempio n. 9
0
def get_image():
    im = cv.QueryFrame(camera)
    # cv.Flip(im, flipMode=1) # mirror effect
    success, corners = cv.FindChessboardCorners(im, PAT_SIZE, findFlags)
    if success:
        cv.DrawChessboardCorners(im, PAT_SIZE, corners, success)
        cv.PutText(im, "Found: (%.1f, %.1f)" %  corners[0], infoOrigin,
            infoFont, (0, 255, 0))
        cam_matrix = cv.CreateMat(3, 3, cv.CV_32F)
        dist_coeff = cv.CreateMat(1, 4, cv.CV_32F)
        rvecs = cv.CreateMat(1, 9, cv.CV_32F)
        tvecs = cv.CreateMat(1, 3, cv.CV_32F)
        pointArr = numpy.array([(x, y, 0) for y in xrange(PAT_SIZE[1]) for x in xrange(PAT_SIZE[0])], numpy.float32)
        objectPoints = cv.fromarray(pointArr)
        imgPointArr = numpy.array(corners, numpy.float32)
        imagePoints = cv.fromarray(imgPointArr)
        pointCounts = cv.CreateMat(1, 1, cv.CV_32S)
        pointCounts[0, 0] = PAT_SIZE[0] * PAT_SIZE[1]
        # Rodrigues version:
        rvecs3 = cv.CreateMat(1, 3, cv.CV_32F)
        cv.CalibrateCamera2(objectPoints, imagePoints, pointCounts, IMG_SIZE, cam_matrix, dist_coeff, rvecs3, tvecs)
        rmat3 = cv.CreateMat(3, 3, cv.CV_32F)
        cv.Rodrigues2(rvecs3, rmat3)
        # end Rodrigues version
        #cv.CalibrateCamera2(objectPoints, imagePoints, pointCounts, IMG_SIZE, cam_matrix, dist_coeff, rvecs, tvecs)
        #rmat = numpy.asarray(rvecs).reshape((3, 3), order='C')
        #print "RVecs:"
        #print rmat
        print "TVecs:", numpy.asarray(tvecs)
        # 3. column of R == rotated z versor, angle toward x; z=(2, 2), x=(0, 2)
        yaw = math.atan2(rmat3[0, 2], rmat3[2, 2]) * 180 / math.pi
        # rotated z versor, height y=(1, 2) to length==1
        pitch = math.asin(rmat3[1, 2]) * 180 / math.pi
        # 1. column of R = rotated x versor, height y = (1, 0) to length==1
        roll = math.asin(rmat3[1, 0]) * 180 / math.pi
        print "Yaw %5.2f, Pitch %5.2f, Roll %5.2f" % (yaw, pitch, roll)
        #import pdb; pdb.set_trace()
        print '-'*40
    else:
        cv.PutText(im, "Not found.", infoOrigin, infoFont, (0, 255, 0))
    return cv2pygame(im)
Esempio n. 10
0
def qsolve(C, Q, matches, dbsiftpath, dbimgpath):

    # Get image information.
    info = os.path.join(C.infodir, os.path.basename(dbimgpath)[:-4] + '.info')
    qsource = render_tags.QueryImageInfo(Q.datasource)
    dbsource = render_tags.EarthmineImageInfo(dbimgpath, info)
    map3d = C.pixelmap.open(dbsiftpath)

    # Get 2d pixels and 3d locations of feature inliers
    matches = [(m['query'][0],m['query'][1],m['db'][0],m['db'][1]) for m in matches]
    matches = list(set(matches))
    q2d = [arr([m[0],m[1]]) for m in matches]
    db2d = [arr([m[2],m[3]]) for m in matches]
    db3d = [map3d[int(d[0]),int(d[1])] for d in db2d]
    i = 0
    while i < len(db3d):
        if db3d[i] is None:
            q2d.pop(i)
            db2d.pop(i)
            db3d.pop(i)
        else:
            i = i+1
    olat,olon,oalt = dbsource.lat,dbsource.lon,dbsource.alt
    qlat,qlon = qsource.lat,qsource.lon
    qzx = geom.lltom(olat,olon,qlat,qlon)
    zx = [geom.lltom(olat,olon,d['lat'],d['lon']) for d in db3d]
    y = [dbsource.alt-d['alt'] for d in db3d]
    xyz = [[zx[i][1],y[i],zx[i][0]] for i in range(len(y))]
    print len(xyz)

    # Set K, Rhat
    wx,wy = qsource.pgmsize[0], qsource.pgmsize[1]
    tx,ty = qsource.view_angle[0], qsource.view_angle[1]
    f1, f2 = (wx-1)/(2*np.tan(tx/2)), (wy-1)/(2*np.tan(ty/2))
    f = (f1+f2) / 2
    K = arr([[f,0,(wx-1)/2.0],
             [0,f,(wy-1)/2.0],
             [0,0,1]])
    y,p,r = qsource.yaw, qsource.pitch, qsource.roll
    print [180*y/np.pi,180*p/np.pi,180*r/np.pi]
    Ry = arr([[np.cos(y),0,np.sin(y)],
              [0,1,0],
              [-np.sin(y),0,np.cos(y)]])
    Rx = arr([[1,0,0],
              [0,np.cos(p),-np.sin(p)],
              [0,np.sin(p),np.cos(p)]])
    Rz = arr([[np.cos(r),-np.sin(r),0],
              [np.sin(r),np.cos(r),0],
              [0,0,1]])
    Rhat = dot(Ry,dot(Rx,Rz)) # camera orientation (camera to world)
    tRhat = tp(Rhat)

    # reference camera matrix
    # f 0 cx
    # 0 f cy
    # 0 0 1
    A = cv.CreateMat(3, 3, cv.CV_64F)
    cv.SetZero(A)
    cv.Set2D(A, 0, 0, cv.Scalar(f))
    cv.Set2D(A, 1, 1, cv.Scalar(f))
    cv.Set2D(A, 2, 2, cv.Scalar(1))
    cv.Set2D(A, 0, 2, cv.Scalar((wx-1)/2.0))
    cv.Set2D(A, 1, 2, cv.Scalar((wy-1)/2.0))

    # convert 2d, 3d points to cvMats
    objectPoints3d = cv.CreateMat(len(xyz), 1, cv.CV_64FC3)
    imagePoints2d = cv.CreateMat(len(xyz), 1, cv.CV_64FC2)
    for i in range(len(xyz)):
        cv.Set2D(imagePoints2d, i, 0, cv.Scalar(*q2d[i]))
        cv.Set2D(objectPoints3d, i, 0, cv.Scalar(*xyz[i]))

    # set initial rotation and translation vectors, distortion coefficients
    coeff = cv.CreateMat(4, 1, cv.CV_64F)
    cv.SetZero(coeff)
    rmat = cv.CreateMat(3, 3, cv.CV_64F)
    cv.Set2D(rmat, 0, 0, cv.Scalar(tRhat[0,0]))
    cv.Set2D(rmat, 0, 1, cv.Scalar(tRhat[0,1]))
    cv.Set2D(rmat, 0, 2, cv.Scalar(tRhat[0,2]))
    cv.Set2D(rmat, 1, 0, cv.Scalar(tRhat[1,0]))
    cv.Set2D(rmat, 1, 1, cv.Scalar(tRhat[1,1]))
    cv.Set2D(rmat, 1, 2, cv.Scalar(tRhat[1,2]))
    cv.Set2D(rmat, 2, 0, cv.Scalar(tRhat[2,0]))
    cv.Set2D(rmat, 2, 1, cv.Scalar(tRhat[2,1]))
    cv.Set2D(rmat, 2, 2, cv.Scalar(tRhat[2,2]))
    print 'YPR init for PnP'
    print 180*np.array(geom.ypr_fromR(Rhat))/np.pi
    rvec = cv.CreateMat(3, 1, cv.CV_64F)
    cv.SetZero(rvec)
    cv.Rodrigues2(rmat,rvec) # convert from rotation matrix to Rodrigues vector
    tvec = cv.CreateMat(3, 1, cv.CV_64F)
    cv.SetZero(tvec)

    # solvepnp
    ret = cv.FindExtrinsicCameraParams2(objectPoints3d, imagePoints2d, A,
        coeff, rvec, tvec, useExtrinsicGuess=False)
    np_rvec = np.matrix(rvec).A
    cv.Rodrigues2(rvec,rmat)
    np_rmat = np.transpose(np.matrix(rmat)).A
    np_tvec = np.dot(np_rmat,np.squeeze(np.matrix(tvec).A))
    print 'Rq from PnP'
    print np_rmat
    print 'YPR from PnP'
    print 180*np.array(geom.ypr_fromR(np_rmat))/np.pi

    return np_rmat, np_tvec
Esempio n. 11
0
def rodrigues(vec):
    cvvec = ut.numpymat2cvmat(vec)
    dst = cv.CreateMat(3, 3, cv.CV_32FC1)
    cv.Rodrigues2(cvvec, dst)
    return ut.cvmat2numpymat(dst).T
Esempio n. 12
0
	def getMatRot(self):
		if not self.MatRot:
			self.MatRot=cv.CreateMat(3,3,cv.CV_64FC1)
			cv.Rodrigues2(self.getVectRot(),self.MatRot)
		return self.MatRot
class CrCalibration:
    def __init__(self, size, dim):

        self.chess_size = size
        self.dim = dim

        left_ns = rospy.remap_name('left')
        range_ns = rospy.remap_name('range')
        limg_sub = message_filters.Subscriber(left_ns + '/image_rect',
                                              sensor_msgs.msg.Image)
        rimg_sub = message_filters.Subscriber(range_ns + '/image_rect',
                                              sensor_msgs.msg.Image)
        linfo_sub = message_filters.Subscriber(left_ns + '/camera_info',
                                               sensor_msgs.msg.CameraInfo)
        rinfo_sub = message_filters.Subscriber(range_ns + '/camera_info',
                                               sensor_msgs.msg.CameraInfo)

        ts = message_filters.TimeSynchronizer(
            [limg_sub, linfo_sub, rimg_sub, rinfo_sub], 16)
        ts.registerCallback(self.queue_cr)
        self.bridge = cv_bridge.CvBridge()

        self.frame_list = []
        self.static_pose = None
        self.result_list = []
        self.last_err = 0

    def calc_frame(self, lst):
        pos = PyKDL.Vector()
        ex = 0
        ey = 0
        ez = 0
        for f in lst:
            pos += f.p
            z, y, x = f.M.GetEulerZYX()
            ex += x
            ey += y
            ez += z

        size = len(lst)
        return PyKDL.Frame(
            PyKDL.Rotation.EulerZYX(ez / size, ey / size, ex / size),
            pos / size)

    def calc_distance(self, f1, f2):
        test = f1 * f2.Inverse()
        angle, axis = test.M.GetRotAngle()
        norm = test.p.Norm()
        return (norm + (angle / 10.0))

    def queue_cr(self, limg, linfo, rimg, rinfo):
        #
        lpose = self.find_checkerboard_pose(limg, linfo)
        rpose = self.find_checkerboard_pose(rimg, rinfo)
        if lpose == None or rpose == None:
            if lpose == None:
                rospy.loginfo("can't find CB on left camera image")
            if rpose == None:
                rospy.loginfo("can't find CB on range image")
            return False

        # calcurate pose
        lframe = pm.fromMsg(lpose.pose)
        rframe = pm.fromMsg(rpose.pose)
        frame = lframe * rframe.Inverse()

        # check pose euniqness
        for r in self.result_list:
            if self.calc_distance(r[0], lframe) < 0.075:
                return False

        # check pose_movement
        if len(self.frame_list) == 0:
            self.static_pose = lframe
            self.frame_list.append(frame)
            return False

        dist = self.calc_distance(self.static_pose, lframe)
        print dist
        if dist > 0.012:
            self.frame_list = []
            self.static_pose = None
            return False
        self.frame_list.append(frame)

        if len(self.frame_list) > 5:
            self.result_list.append(
                [self.static_pose,
                 self.calc_frame(self.frame_list)])
            self.frame_list = []
            self.static_pose = None
            # check resut list num
            ret = self.calc_frame([r[1] for r in self.result_list])
            err = 0.0
            for r in self.result_list:
                err += self.calc_distance(r[1], ret)

            qx, qy, qz, qw = ret.M.GetQuaternion()
            rospy.loginfo("%f %f %f %f %f %f %f / err = %f" %
                          (ret.p.x(), ret.p.y(), ret.p.z(), qx, qy, qz, qw,
                           err / len(self.result_list)))
            self.last_err = err / len(self.result_list)

            # finish check
            if len(self.result_list) > 7 and self.last_err < 0.1:
                rospy.loginfo("Finished size = %d, err = %f" %
                              (len(self.result_list), self.last_err))
                print "translation: [%f, %f, %f]\nrotation: [%f, %f, %f, %f]" % (
                    ret.p.x(), ret.p.y(), ret.p.z(), qx, qy, qz, qw)
                print "(make-coords :pos #f(%f %f %f) :rot (quaternion2matrix #f(%f %f %f %f)))" % (
                    1000 * ret.p.x(), 1000 * ret.p.y(), 1000 * ret.p.z(), qw,
                    qx, qy, qz)
                #print "<node pkg=\"tf\" type=\"static_transform_publisher\" name=\"cam_link_broadcaster\" args=\"%f %f %f %f %f %f %f link1 link2 30\" />" % (ret.p.x(), ret.p.y(), ret.p.z(), qw, qx, qy, qz)
                exit(-1)

            return True

        return True

    def detect(self, image):
        corners_x = self.chess_size[0]
        corners_y = self.chess_size[1]

        #Here, we'll actually call the openCV detector
        found, corners = cv.FindChessboardCorners(
            image, (corners_x, corners_y), cv.CV_CALIB_CB_ADAPTIVE_THRESH)

        if found:
            board_corners = (corners[0], corners[corners_x - 1],
                             corners[(corners_y - 1) * corners_x],
                             corners[len(corners) - 1])

            #find the perimeter of the checkerboard
            perimeter = 0.0
            for i in range(len(board_corners)):
                next = (i + 1) % 4
                xdiff = board_corners[i][0] - board_corners[next][0]
                ydiff = board_corners[i][1] - board_corners[next][1]
                perimeter += math.sqrt(xdiff * xdiff + ydiff * ydiff)

            #estimate the square size in pixels
            square_size = perimeter / ((corners_x - 1 + corners_y - 1) * 2)
            radius = int(square_size * 0.5 + 0.5)

            corners = cv.FindCornerSubPix(
                image, corners, (radius, radius), (-1, -1),
                (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 30, 0.1))

            #uncomment to debug chessboard detection
            #cv.DrawChessboardCorners(image, (corners_x, corners_y), corners, 1)
            #cv.NamedWindow("image")
            #cv.ShowImage("image", image)
            #cv.WaitKey(600)

            #we'll also generate the object points if the user has specified spacing
            object_points = cv.CreateMat(3, corners_x * corners_y, cv.CV_32FC1)

            for y in range(corners_y):
                for x in range(corners_x):
                    cv.SetReal2D(object_points, 0, y * corners_x + x,
                                 x * self.dim)
                    cv.SetReal2D(object_points, 1, y * corners_x + x,
                                 y * self.dim)
                    cv.SetReal2D(object_points, 2, y * corners_x + x, 0.0)

            #not sure why opencv functions return non opencv compatible datatypes... but they do so we'll convert
            corners_cv = cv.CreateMat(2, corners_x * corners_y, cv.CV_32FC1)
            for i in range(corners_x * corners_y):
                cv.SetReal2D(corners_cv, 0, i, corners[i][0])
                cv.SetReal2D(corners_cv, 1, i, corners[i][1])

            return (corners_cv, object_points)
        else:
            #cv.NamedWindow("image_scaled")
            #cv.ShowImage("image_scaled", image_scaled)
            #cv.WaitKey(600)
            rospy.logwarn("Didn't find checkerboard")
            return (None, None)
        return

    def intrinsic_matrix_from_info(self, cam_info):
        intrinsic_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)

        #Because we only want the upper 3x3 (normal) portion of the rectified intrinsic matrix
        for i in range(0, 3):
            for j in range(0, 3):
                intrinsic_matrix[i, j] = cam_info.P[4 * i + j]
        return intrinsic_matrix

    def find_checkerboard_pose(self, ros_image, cam_info):
        #we need to convert the ros image to an opencv image
        try:
            image = self.bridge.imgmsg_to_cv(ros_image, "mono8")
        except CvBridgeError, e:
            rospy.logerror("Error importing image %s" % e)
            return

        corners, model = self.detect(image)

        if corners == None or model == None:
            return None
        else:
            #find the pose of the checkerboard
            rot = cv.CreateMat(3, 1, cv.CV_32FC1)
            trans = cv.CreateMat(3, 1, cv.CV_32FC1)
            kc = cv.CreateMat(1, 4, cv.CV_32FC1)
            cv.Set(kc, 0.0)
            intrinsic_matrix = self.intrinsic_matrix_from_info(cam_info)

            cv.FindExtrinsicCameraParams2(model, corners, intrinsic_matrix, kc,
                                          rot, trans)

            #We want to build a transform now, but first we have to convert the
            #rotation vector we get back from OpenCV into a rotation matrix
            rot_mat = cv.CreateMat(3, 3, cv.CV_32FC1)
            cv.Rodrigues2(rot, rot_mat)

            #Now we need to convert this rotation matrix into a quaternion
            #This can be done better in never versions of opencv, but we need to be boxturtle compatible
            numpy_mat = numpy.fromstring(rot_mat.tostring(),
                                         dtype=numpy.float32).reshape((3, 3))

            #of course, tf expects all matricies passed to it to be 4x4... so we'll build a full matrix here
            full_pose = numpy.zeros((4, 4))

            #first, copy in the rotation matrix
            full_pose[0:3, 0:3] = numpy_mat[0:3, 0:3]

            #next, we'll copy in the translation
            full_pose[0:3, 3] = [trans[i, 0] for i in range(3)]

            #and make sure to add a 1 in the lower right corner
            full_pose[3][3] = 1.0

            rospy.logdebug("%s" % numpy_mat)
            rospy.logdebug("%s" % full_pose)

            tf_trans = tf.transformations.translation_from_matrix(full_pose)
            tf_rot = tf.transformations.quaternion_from_matrix(full_pose)

            #and now we'll actually build our pose stamped
            board_pose = geometry_msgs.msg.PoseStamped()
            board_pose.header = ros_image.header
            board_pose.pose.position.x = tf_trans[0]
            board_pose.pose.position.y = tf_trans[1]
            board_pose.pose.position.z = tf_trans[2]
            board_pose.pose.orientation.x = tf_rot[0]
            board_pose.pose.orientation.y = tf_rot[1]
            board_pose.pose.orientation.z = tf_rot[2]
            board_pose.pose.orientation.w = tf_rot[3]
            rospy.logdebug("%s" % board_pose)

            #we'll publish the pose so we can display it in rviz
            # self.pose_pub.publish(board_pose)
            return board_pose
Esempio n. 14
0
class ImageCbDetectorNode:
    def __init__(self):
        self.corners_x = rospy.get_param('~corners_x', 6)
        self.corners_y = rospy.get_param('~corners_y', 6)
        self.spacing_x = rospy.get_param('~spacing_x', None)
        self.spacing_y = rospy.get_param('~spacing_y', None)
        self.width_scaling = rospy.get_param('~width_scaling', 1)
        self.height_scaling = rospy.get_param('~height_scaling', 1)

        self.im_cb_detector = ImageCbDetector()

        self.image_sub = rospy.Subscriber("image_stream", Image, self.callback)
        self.caminfo_sub = rospy.Subscriber("camera_info", CameraInfo,
                                            self.cam_info_cb)
        self.pose_pub = rospy.Publisher("board_pose", PoseStamped, latch=True)
        self.pose_srv = None
        self.bridge = CvBridge()
        self.mutex = threading.RLock()
        self.cam_info = None
        self.ros_image = None

    def intrinsic_matrix_from_info(self, cam_info):
        intrinsic_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)

        #Because we only want the upper 3x3 (normal) portion of the rectified intrinsic matrix
        for i in range(0, 3):
            for j in range(0, 3):
                intrinsic_matrix[i, j] = cam_info.P[4 * i + j]

        return intrinsic_matrix

    def attempt_to_advertise(self):
        #we'll only advertise the service after we've received a cam_info/image pair
        if self.ros_image != None and self.cam_info != None:
            self.pose_srv = rospy.Service("get_checkerboard_pose",
                                          GetCheckerboardPose,
                                          self.find_checkerboard_service)

    def cam_info_cb(self, cam_info):
        with self.mutex:
            self.cam_info = cam_info
            if self.pose_srv == None:
                self.attempt_to_advertise()

    def callback(self, ros_image):
        #copy over the latest image to be used when a service call is made
        with self.mutex:
            self.ros_image = ros_image
            if self.pose_srv == None:
                self.attempt_to_advertise()

    def find_checkerboard_service(self, req):
        poses = []
        min_x = self.ros_image.width
        min_y = self.ros_image.height
        max_x = 0
        max_y = 0
        for i in range(10):
            rospy.sleep(0.5)

            #copy the image over
            with self.mutex:
                image = self.ros_image

            result = self.find_checkerboard_pose(image, req.corners_x,
                                                 req.corners_y, req.spacing_x,
                                                 req.spacing_y,
                                                 self.width_scaling,
                                                 self.height_scaling)
            if result is None:
                rospy.logerr("Couldn't find a checkerboard")
                continue

            p, corners = result
            poses.append(p)
            for j in range(corners.cols):
                x = cv.GetReal2D(corners, 0, j)
                y = cv.GetReal2D(corners, 1, j)
                min_x = min(min_x, x)
                max_x = max(max_x, x)
                min_y = min(min_y, y)
                max_y = max(max_y, y)

        # convert all poses to twists
        twists = []
        for p in poses:
            twists.append(
                PyKDL.diff(PyKDL.Frame.Identity(), posemath.fromMsg(p.pose)))

        # get average twist
        twist_res = PyKDL.Twist()
        PyKDL.SetToZero(twist_res)
        for t in twists:
            for i in range(3):
                twist_res.vel[i] += t.vel[i] / len(poses)
                twist_res.rot[i] += t.rot[i] / len(poses)

        # get noise
        noise_rot = 0
        noise_vel = 0
        for t in twists:
            n_vel = (t.vel - twist_res.vel).Norm()
            n_rot = (t.rot - twist_res.rot).Norm()
            if n_vel > noise_vel:
                noise_vel = n_vel
            if n_rot > noise_rot:
                noise_rot = n_rot

        # set service resul
        pose_res = PyKDL.addDelta(PyKDL.Frame.Identity(), twist_res, 1.0)
        pose_msg = PoseStamped()
        pose_msg.header = poses[0].header
        pose_msg.pose = posemath.toMsg(pose_res)
        return GetCheckerboardPoseResponse(pose_msg, min_x, max_x, min_y,
                                           max_y, noise_vel, noise_rot)

    def find_checkerboard_pose(self, ros_image, corners_x, corners_y,
                               spacing_x, spacing_y, width_scaling,
                               height_scaling):
        #we need to convert the ros image to an opencv image
        try:
            image = self.bridge.imgmsg_to_cv(ros_image, "mono8")
        except CvBridgeError, e:
            rospy.logerror("Error importing image %s" % e)
            return

        corners, model = self.im_cb_detector.detect(image, corners_x,
                                                    corners_y, spacing_x,
                                                    spacing_y, width_scaling,
                                                    height_scaling)

        if corners != None and model != None and self.cam_info != None:
            #find the pose of the checkerboard
            rot = cv.CreateMat(3, 1, cv.CV_32FC1)
            trans = cv.CreateMat(3, 1, cv.CV_32FC1)
            with self.mutex:
                intrinsic_matrix = self.intrinsic_matrix_from_info(
                    self.cam_info)
                kc = cv.CreateMat(1, 4, cv.CV_32FC1)
                cv.Set(kc, 0.0)
                cv.FindExtrinsicCameraParams2(model, corners, intrinsic_matrix,
                                              kc, rot, trans)

            #We want to build a transform now, but first we have to convert the
            #rotation vector we get back from OpenCV into a rotation matrix
            rot_mat = cv.CreateMat(3, 3, cv.CV_32FC1)
            cv.Rodrigues2(rot, rot_mat)

            #Now we need to convert this rotation matrix into a quaternion
            #This can be done better in never versions of opencv, but we need to be boxturtle compatible
            numpy_mat = numpy.fromstring(rot_mat.tostring(),
                                         dtype=numpy.float32).reshape((3, 3))

            #of course, tf expects all matricies passed to it to be 4x4... so we'll build a full matrix here
            full_pose = numpy.zeros((4, 4))

            #first, copy in the rotation matrix
            full_pose[0:3, 0:3] = numpy_mat[0:3, 0:3]

            #next, we'll copy in the translation
            full_pose[0:3, 3] = [trans[i, 0] for i in range(3)]

            #and make sure to add a 1 in the lower right corner
            full_pose[3][3] = 1.0

            rospy.logdebug("%s" % numpy_mat)
            rospy.logdebug("%s" % full_pose)

            tf_trans = tf.transformations.translation_from_matrix(full_pose)
            tf_rot = tf.transformations.quaternion_from_matrix(full_pose)

            #and now we'll actually build our pose stamped
            board_pose = PoseStamped()
            board_pose.header = ros_image.header
            board_pose.pose.position.x = tf_trans[0]
            board_pose.pose.position.y = tf_trans[1]
            board_pose.pose.position.z = tf_trans[2]
            board_pose.pose.orientation.x = tf_rot[0]
            board_pose.pose.orientation.y = tf_rot[1]
            board_pose.pose.orientation.z = tf_rot[2]
            board_pose.pose.orientation.w = tf_rot[3]
            rospy.logdebug("%s" % board_pose)

            #we'll publish the pose so we can display it in rviz
            self.pose_pub.publish(board_pose)
            return (board_pose, corners)
    def calibrate(self):
        self.projector_to_camera_translation_vector = cv.CreateMat(
            3, 1, cv.CV_32FC1)
        self.projector_to_camera_rotation_vector = cv.CreateMat(
            3, 1, cv.CV_32FC1)
        cv.SetZero(self.projector_to_camera_translation_vector)
        cv.SetZero(self.projector_to_camera_rotation_vector)
        for i in range(self.number_of_scenes):
            camera_tvec = cv.CreateMat(3, 1, cv.CV_32FC1)
            camera_rvec = cv.CreateMat(3, 1, cv.CV_32FC1)
            cv.FindExtrinsicCameraParams2(self.object_points[i],
                                          self.camera_image_points[i],
                                          self.camera_model.intrinsicMatrix(),
                                          self.camera_model.distortionCoeffs(),
                                          camera_rvec, camera_tvec)

            print "Camera To Board Vector:"
            for row in range(camera_tvec.height):
                for col in range(camera_tvec.width):
                    print camera_tvec[row, col],
                print
            print
            projector_tvec = cv.CreateMat(3, 1, cv.CV_32FC1)
            projector_rvec = cv.CreateMat(3, 1, cv.CV_32FC1)
            cv.FindExtrinsicCameraParams2(
                self.object_points[i], self.projector_image_points[i],
                self.projector_model.intrinsicMatrix(),
                self.projector_model.distortionCoeffs(), projector_rvec,
                projector_tvec)

            print "Projector To Board Vector:"
            for row in range(projector_tvec.height):
                for col in range(projector_tvec.width):
                    print projector_tvec[row, col],
                print
            print

            camera_rmat = cv.CreateMat(3, 3, cv.CV_32FC1)
            cv.Rodrigues2(camera_rvec, camera_rmat)

            projector_rmat = cv.CreateMat(3, 3, cv.CV_32FC1)
            cv.Rodrigues2(projector_rvec, projector_rmat)

            scene_projector_to_camera_rotation_matrix = cv.CreateMat(
                3, 3, cv.CV_32FC1)
            cv.GEMM(camera_rmat, projector_rmat, 1, None, 0,
                    scene_projector_to_camera_rotation_matrix, cv.CV_GEMM_B_T)
            scene_projector_to_camera_rotation_vector = cv.CreateMat(
                3, 1, cv.CV_32FC1)
            for i in range(3):
                for j in range(3):
                    print scene_projector_to_camera_rotation_matrix[i, j],
                print
            print
            cv.Rodrigues2(scene_projector_to_camera_rotation_matrix,
                          scene_projector_to_camera_rotation_vector)
            print "Scene Rotation Vector:"
            for row in range(scene_projector_to_camera_rotation_vector.height):
                for col in range(
                        scene_projector_to_camera_rotation_vector.width):
                    print scene_projector_to_camera_rotation_vector[row, col],
                print
            print

            scene_projector_to_camera_translation_vector = cv.CreateMat(
                3, 1, cv.CV_32FC1)
            cv.GEMM(projector_rmat, projector_tvec, -1, None, 0,
                    scene_projector_to_camera_translation_vector,
                    cv.CV_GEMM_A_T)
            cv.GEMM(camera_rmat, scene_projector_to_camera_translation_vector,
                    1, camera_tvec, 1,
                    scene_projector_to_camera_translation_vector, 0)
            print "Scene Translation Vector:"
            for row in range(
                    scene_projector_to_camera_translation_vector.height):
                for col in range(
                        scene_projector_to_camera_translation_vector.width):
                    print scene_projector_to_camera_translation_vector[row,
                                                                       col],
                print
            print

            cv.Add(scene_projector_to_camera_translation_vector,
                   self.projector_to_camera_translation_vector,
                   self.projector_to_camera_translation_vector)
            cv.Add(scene_projector_to_camera_rotation_vector,
                   self.projector_to_camera_rotation_vector,
                   self.projector_to_camera_rotation_vector)

        cv.ConvertScale(self.projector_to_camera_translation_vector,
                        self.projector_to_camera_translation_vector,
                        scale=1.0 / self.number_of_scenes)
        cv.ConvertScale(self.projector_to_camera_rotation_vector,
                        self.projector_to_camera_rotation_vector,
                        scale=1.0 / self.number_of_scenes)

        print "Final Translation Vector:"
        for row in range(self.projector_to_camera_translation_vector.height):
            for col in range(
                    self.projector_to_camera_translation_vector.width):
                print self.projector_to_camera_translation_vector[row, col],
            print
        print

        print "Final Rotation Vector:"
        for row in range(self.projector_to_camera_rotation_vector.height):
            for col in range(self.projector_to_camera_rotation_vector.width):
                print self.projector_to_camera_rotation_vector[row, col],
            print
    def __init__(self):
        self.pixels_per_scanline = rospy.get_param('~pixels_per_scanline')
        self.scanner_info_file_name = rospy.get_param(
            '~scanner_info_file_name')
        self.threshold = rospy.get_param('~threshold')

        self.mutex = threading.RLock()
        self.image_update_flag = threading.Event()
        self.bridge = CvBridge()
        rospy.Subscriber("image_stream", sensor_msgs.msg.Image,
                         self.update_image)
        rospy.loginfo("Waiting for camera info...")
        self.camera_info = rospy.wait_for_message('camera_info',
                                                  sensor_msgs.msg.CameraInfo)
        rospy.loginfo("Camera info received.")

        rospy.loginfo("Waiting for projector info service...")
        rospy.wait_for_service('get_projector_info')
        rospy.loginfo("Projector info service found.")
        get_projector_info = rospy.ServiceProxy('get_projector_info',
                                                projector.srv.GetProjectorInfo)
        self.projector_info = get_projector_info().projector_info

        self.projector_model = PinholeCameraModel()
        self.projector_model.fromCameraInfo(self.projector_info)

        self.read_scanner_info()

        self.projector_to_camera_rotation_matrix = cv.CreateMat(
            3, 3, cv.CV_32FC1)
        cv.Rodrigues2(self.projector_to_camera_rotation_vector,
                      self.projector_to_camera_rotation_matrix)

        predistortmap_x = cv.CreateMat(self.projector_info.height,
                                       self.projector_info.width, cv.CV_32FC1)
        predistortmap_y = cv.CreateMat(self.projector_info.height,
                                       self.projector_info.width, cv.CV_32FC1)
        InitPredistortMap(self.projector_model.intrinsicMatrix(),
                          self.projector_model.distortionCoeffs(),
                          predistortmap_x, predistortmap_y)

        minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(predistortmap_x)
        cv.AddS(predistortmap_x, -minVal, predistortmap_x)
        uncropped_projection_width = int(math.ceil(maxVal - minVal))
        self.center_pixel = self.projector_model.cx() - minVal

        minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(predistortmap_y)
        cv.AddS(predistortmap_y, -minVal, predistortmap_y)
        uncropped_projection_height = int(math.ceil(maxVal - minVal))

        self.number_of_scanlines = int(
            math.ceil(
                float(uncropped_projection_width) / self.pixels_per_scanline))

        rospy.loginfo("Generating projection patterns...")

        graycodes = []
        for i in range(self.number_of_scanlines):
            graycodes.append(graycodemath.generate_gray_code(i))

        self.number_of_projection_patterns = int(
            math.ceil(math.log(self.number_of_scanlines, 2)))
        self.predistorted_positive_projections = []
        self.predistorted_negative_projections = []

        for i in range(self.number_of_projection_patterns):
            cross_section = cv.CreateMat(1, uncropped_projection_width,
                                         cv.CV_8UC1)

            #Fill in cross section with the associated bit of each gray code
            for pixel in range(uncropped_projection_width):
                scanline = pixel // self.pixels_per_scanline
                scanline_value = graycodemath.get_bit(graycodes[scanline],
                                                      i) * 255
                cross_section[0, pixel] = scanline_value

            #Repeat the cross section over the entire image
            positive_projection = cv.CreateMat(uncropped_projection_height,
                                               uncropped_projection_width,
                                               cv.CV_8UC1)
            cv.Repeat(cross_section, positive_projection)

            #Predistort the projections to compensate for projector optics so that that the scanlines are approximately planar
            predistorted_positive_projection = cv.CreateMat(
                self.projector_info.height, self.projector_info.width,
                cv.CV_8UC1)
            cv.Remap(positive_projection,
                     predistorted_positive_projection,
                     predistortmap_x,
                     predistortmap_y,
                     flags=cv.CV_INTER_NN)

            #Create a negative of the pattern for thresholding
            predistorted_negative_projection = cv.CreateMat(
                self.projector_info.height, self.projector_info.width,
                cv.CV_8UC1)
            cv.Not(predistorted_positive_projection,
                   predistorted_negative_projection)

            #Fade the borders of the patterns to avoid artifacts at the edges of projection
            predistorted_positive_projection_faded = fade_edges(
                predistorted_positive_projection, 20)
            predistorted_negative_projection_faded = fade_edges(
                predistorted_negative_projection, 20)

            self.predistorted_positive_projections.append(
                predistorted_positive_projection_faded)
            self.predistorted_negative_projections.append(
                predistorted_negative_projection_faded)

        rospy.loginfo("Waiting for projection setting service...")
        rospy.wait_for_service('set_projection')
        rospy.loginfo("Projection setting service found.")
        self.set_projection = rospy.ServiceProxy('set_projection',
                                                 projector.srv.SetProjection)

        self.pixel_associations_msg = None
        self.point_cloud_msg = None

        rospy.Service("~get_point_cloud", graycode_scanner.srv.GetPointCloud,
                      self.handle_point_cloud_srv)

        point_cloud_pub = rospy.Publisher('~point_cloud',
                                          sensor_msgs.msg.PointCloud)

        rospy.loginfo("Ready.")

        rate = rospy.Rate(1)
        while not rospy.is_shutdown():
            if self.point_cloud_msg is not None:
                point_cloud_pub.publish(self.point_cloud_msg)
            rate.sleep()
Esempio n. 17
0
	def getMatTrans(self):
		if not self.MatTrans:
			self.MatTrans=cv.CreateMat(3,3,cv.CV_64FC1)
			cv.Rodrigues2(self.getVectTrans(),self.MatTrans)
		return self.MatTrans
Esempio n. 18
0
def dbsolve(C, Rhat, matches, dbsiftpath, dbimgpath):

    # Get image information.
    info = os.path.join(C.infodir, os.path.basename(dbimgpath)[:-4] + '.info')
    dbsource = render_tags.EarthmineImageInfo(dbimgpath, info)
    map3d = C.pixelmap.open(dbsiftpath)

    # Get 2d pixels and 3d locations of feature inliers
    matches = [(m['query'][0],m['query'][1],m['db'][0],m['db'][1]) for m in matches]
    matches = list(set(matches))
    db2d = [arr([m[2],m[3]]) for m in matches]
    db3d = [map3d[int(d[0]),int(d[1])] for d in db2d]
    i = 0
    while i < len(db3d):
        if db3d[i] is None:
            db2d.pop(i)
            db3d.pop(i)
        else:
            i = i+1
    olat,olon,oalt = dbsource.lat,dbsource.lon,dbsource.alt
    zx = [geom.lltom(olat,olon,d['lat'],d['lon']) for d in db3d]
    y = [dbsource.alt-d['alt'] for d in db3d]
    xyz = [[zx[i][1],y[i],zx[i][0]] for i in range(len(y))]

    # reference camera matrix Kd
    wx,wy = dbsource.image.size
    fov = dbsource.fov
    f = (wx-1)/(2*np.tan(fov/2))
    # f 0 cx
    # 0 f cy
    # 0 0 1
    A = cv.CreateMat(3, 3, cv.CV_64F)
    cv.SetZero(A)
    cv.Set2D(A, 0, 0, cv.Scalar(f))
    cv.Set2D(A, 1, 1, cv.Scalar(f))
    cv.Set2D(A, 2, 2, cv.Scalar(1))
    cv.Set2D(A, 0, 2, cv.Scalar((wx-1)/2.0))
    cv.Set2D(A, 1, 2, cv.Scalar((wy-1)/2.0))

    # convert 2d, 3d points to cvMats
    objectPoints3d = cv.CreateMat(len(xyz), 1, cv.CV_64FC3)
    imagePoints2d = cv.CreateMat(len(xyz), 1, cv.CV_64FC2)
    for i in range(len(xyz)):
        cv.Set2D(imagePoints2d, i, 0, cv.Scalar(*db2d[i]))
        cv.Set2D(objectPoints3d, i, 0, cv.Scalar(*xyz[i]))

    # set initial rotation and translation vectors, distortion coefficients
    coeff = cv.CreateMat(4, 1, cv.CV_64F)
    cv.SetZero(coeff)
    rmat = cv.CreateMat(3, 3, cv.CV_64F)
    tRhat = tp(Rhat)
    cv.Set2D(rmat, 0, 0, cv.Scalar(tRhat[0,0]))
    cv.Set2D(rmat, 0, 1, cv.Scalar(tRhat[0,1]))
    cv.Set2D(rmat, 0, 2, cv.Scalar(tRhat[0,2]))
    cv.Set2D(rmat, 1, 0, cv.Scalar(tRhat[1,0]))
    cv.Set2D(rmat, 1, 1, cv.Scalar(tRhat[1,1]))
    cv.Set2D(rmat, 1, 2, cv.Scalar(tRhat[1,2]))
    cv.Set2D(rmat, 2, 0, cv.Scalar(tRhat[2,0]))
    cv.Set2D(rmat, 2, 1, cv.Scalar(tRhat[2,1]))
    cv.Set2D(rmat, 2, 2, cv.Scalar(tRhat[2,2]))
    rvec = cv.CreateMat(3, 1, cv.CV_64F)
    cv.SetZero(rvec)
    cv.Rodrigues2(rmat,rvec) # convert from rotation matrix to Rodrigues vector
    tvec = cv.CreateMat(3, 1, cv.CV_64F)
    cv.SetZero(tvec)

    #print Rhat
    # solvepnp
    ret = cv.FindExtrinsicCameraParams2(objectPoints3d, imagePoints2d, A,
        coeff, rvec, tvec, useExtrinsicGuess=False)
    np_rvec = np.matrix(rvec).A
    cv.Rodrigues2(rvec,rmat)
    np_rmat = np.transpose(np.matrix(rmat)).A
    np_tvec = np.dot(np_rmat,np.squeeze(np.matrix(tvec).A))

    return np_rmat, np_tvec
    def find_checkerboard_pose(self, ros_image, cam_info):
        #we need to convert the ros image to an opencv image
        try:
            image = self.bridge.imgmsg_to_cv(ros_image, "mono8")
        except CvBridgeError as e:
            rospy.logerror("Error importing image %s" % e)
            return

        corners, model = self.detect(image)

        if corners == None or model == None:
            return None
        else:
            #find the pose of the checkerboard
            rot = cv.CreateMat(3, 1, cv.CV_32FC1)
            trans = cv.CreateMat(3, 1, cv.CV_32FC1)
            kc = cv.CreateMat(1, 4, cv.CV_32FC1)
            cv.Set(kc, 0.0)
            intrinsic_matrix = self.intrinsic_matrix_from_info(cam_info)

            cv.FindExtrinsicCameraParams2(model, corners, intrinsic_matrix, kc,
                                          rot, trans)

            #We want to build a transform now, but first we have to convert the
            #rotation vector we get back from OpenCV into a rotation matrix
            rot_mat = cv.CreateMat(3, 3, cv.CV_32FC1)
            cv.Rodrigues2(rot, rot_mat)

            #Now we need to convert this rotation matrix into a quaternion
            #This can be done better in never versions of opencv, but we need to be boxturtle compatible
            numpy_mat = numpy.fromstring(rot_mat.tostring(),
                                         dtype=numpy.float32).reshape((3, 3))

            #of course, tf expects all matricies passed to it to be 4x4... so we'll build a full matrix here
            full_pose = numpy.zeros((4, 4))

            #first, copy in the rotation matrix
            full_pose[0:3, 0:3] = numpy_mat[0:3, 0:3]

            #next, we'll copy in the translation
            full_pose[0:3, 3] = [trans[i, 0] for i in range(3)]

            #and make sure to add a 1 in the lower right corner
            full_pose[3][3] = 1.0

            rospy.logdebug("%s" % numpy_mat)
            rospy.logdebug("%s" % full_pose)

            tf_trans = tf.transformations.translation_from_matrix(full_pose)
            tf_rot = tf.transformations.quaternion_from_matrix(full_pose)

            #and now we'll actually build our pose stamped
            board_pose = geometry_msgs.msg.PoseStamped()
            board_pose.header = ros_image.header
            board_pose.pose.position.x = tf_trans[0]
            board_pose.pose.position.y = tf_trans[1]
            board_pose.pose.position.z = tf_trans[2]
            board_pose.pose.orientation.x = tf_rot[0]
            board_pose.pose.orientation.y = tf_rot[1]
            board_pose.pose.orientation.z = tf_rot[2]
            board_pose.pose.orientation.w = tf_rot[3]
            rospy.logdebug("%s" % board_pose)

            #we'll publish the pose so we can display it in rviz
            # self.pose_pub.publish(board_pose)
            return board_pose
Esempio n. 20
0
def really_do_posit(locsar,
                    tags,
                    viewpoint,
                    FOCAL_LENGTH,
                    refpoints=[],
                    qimgsize=(0, 0)):

    # change 3d coordinate systems
    c = map(lambda entry: ({
        'x': entry[0][0],
        'y': entry[0][1]
    }, entry[1]), locsar)
    locpairs = em.ddImageLocstoLPT(viewpoint, c)
    #translate to POSIT Specs
    pts2d = map(lambda x: x[0], locpairs)
    translation2d = (qimgsize[0] / 2.0, qimgsize[1] / 2.0)
    pts2d = map(
        lambda x: (x[0][0] - translation2d[0], x[0][1] - translation2d[1]),
        locpairs)
    translation3d = locpairs[0][1]
    pts3d = map(lambda x: tuple(x[1] - translation3d), locpairs)

    #convert tag coordinate system
    c = map(
        lambda x: ({
            'x': 0,
            'y': 0
        }, {
            'lat': x.lat,
            'lon': x.lon,
            'alt': x.alt
        }), tags)
    c2 = map(
        lambda x: ({
            'x': 0,
            'y': 0
        }, {
            'lat': x['lat'],
            'lon': x['lon'],
            'alt': x['alt']
        }), refpoints)
    taglocpairs = em.ddImageLocstoLPT(viewpoint, c)
    reflocpairs = em.ddImageLocstoLPT(viewpoint, c2)
    tagpts3d = map(lambda x: tuple(x[1] - translation3d), taglocpairs)
    refpts3d = map(lambda x: tuple(x[1] - translation3d), reflocpairs)

    #compute POSIT
    positobj = cv.CreatePOSITObject(pts3d)
    rotMat, transVec = cv.POSIT(positobj, pts2d, FOCAL_LENGTH,
                                (cv.CV_TERMCRIT_EPS, 0, 0.000001))

    #    print "rotation matrix:\t{0}".format(rotMat)
    #    print "translation matrix:\t{0}".format(transVec)

    #change rotMat to cvMat
    rotMatCV = cv.CreateMat(3, 3, cv.CV_64F)
    for i in range(0, 3):
        for j in range(0, 3):
            cv.Set2D(rotMatCV, i, j, rotMat[i][j])
    #convert rotMatrix to rotVector
    rotVec = cv.CreateMat(3, 1, cv.CV_64F)
    cv.Rodrigues2(rotMatCV, rotVec)

    #change transVec to cvMat
    transVecCV = cv.CreateMat(1, 3, cv.CV_64F)
    for i in range(0, 3):
        transVecCV[0, i] = transVec[i]

    #camera matrix
    cameratrans = cv.CreateMat(3, 3, cv.CV_64F)
    cv.SetIdentity(cameratrans)
    cameratrans[0, 0] = FOCAL_LENGTH
    cameratrans[1, 1] = FOCAL_LENGTH

    #distCoefs
    distCoef = cv.CreateMat(4, 1, cv.CV_64F)
    cv.SetZero(distCoef)

    #change 3d coordinate data format
    pts3d_mat = cv.CreateMat(len(pts3d), 1, cv.CV_64FC3)
    for i, m in enumerate(pts3d):
        cv.Set2D(pts3d_mat, i, 0, cv.Scalar(*m))

    #project points
    d2 = cv.CreateMat(pts3d_mat.rows, 1, cv.CV_64FC2)
    cv.ProjectPoints2(pts3d_mat, rotVec, transVecCV, cameratrans, distCoef, d2)

    #compute self errors
    xerrors = []
    yerrors = []
    for i in range(0, d2.rows):
        xerror = abs(pts2d[i][0] - (d2[i, 0][0]))
        yerror = abs(pts2d[i][1] - (d2[i, 0][1]))
        xerrors.append(xerror)
        yerrors.append(yerror)
    print "avg xerror:\t {0}".format(sum(xerrors) / len(xerrors))
    print "avg yerror:\t {0}".format(sum(yerrors) / len(yerrors))

    #change tag 3d coordinate data format
    tagpts3d_mat = cv.CreateMat(len(tagpts3d), 1, cv.CV_64FC3)
    for i, m in enumerate(tagpts3d):
        cv.Set2D(tagpts3d_mat, i, 0, cv.Scalar(*m))
    refpts3d_mat = cv.CreateMat(len(refpts3d), 1, cv.CV_64FC3)
    for i, m in enumerate(refpts3d):
        cv.Set2D(refpts3d_mat, i, 0, cv.Scalar(*m))
    #project points
    d2 = cv.CreateMat(tagpts3d_mat.rows, 1, cv.CV_64FC2)
    cv.ProjectPoints2(tagpts3d_mat, rotVec, transVecCV, cameratrans, distCoef,
                      d2)
    d22 = cv.CreateMat(refpts3d_mat.rows, 1, cv.CV_64FC2)
    cv.ProjectPoints2(refpts3d_mat, rotVec, transVecCV, cameratrans, distCoef,
                      d22)
    ntags = []
    nrefs = []
    for i in range(0, d2.rows):
        ntags.append((tags[i], (0, (d2[i, 0][0] + translation2d[0],
                                    d2[i, 0][1] + translation2d[1]))))
    for i in range(0, d22.rows):
        nrefs.append((refpoints[i], (0, (d22[i, 0][0] + translation2d[0],
                                         d22[i, 0][1] + translation2d[1]))))
    return ntags, nrefs
Esempio n. 21
0
    def get_point_cloud(self):

        # Scan the scene
        pixel_associations = self.get_pixel_associations()

        # Project white light onto the scene to get the intensities of each picture (for coloring our point cloud)
        illumination_projection = cv.CreateMat(self.projector_info.height,
                                               self.projector_info.width,
                                               cv.CV_8UC1)
        cv.Set(illumination_projection, 255)
        intensities_image = self.get_picture_of_projection(
            illumination_projection)

        # Turn projector off when done with it
        off_projection = cv.CreateMat(self.projector_info.height,
                                      self.projector_info.width, cv.CV_8UC1)
        cv.SetZero(off_projection)
        off_image_message = self.bridge.cv_to_imgmsg(off_projection,
                                                     encoding="mono8")
        self.set_projection(off_image_message)

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(self.camera_info)

        projector_model = PinholeCameraModel()
        projector_model.fromCameraInfo(self.projector_info)

        projector_to_camera_rotation_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
        cv.Rodrigues2(self.projector_to_camera_rotation_vector,
                      projector_to_camera_rotation_matrix)

        pixel_associations_x = cv.CreateMat(self.camera_info.height,
                                            self.camera_info.width,
                                            cv.CV_32SC1)
        pixel_associations_y = cv.CreateMat(self.camera_info.height,
                                            self.camera_info.width,
                                            cv.CV_32SC1)
        cv.Split(pixel_associations, pixel_associations_x,
                 pixel_associations_y, None, None)
        valid_points_mask_x = cv.CreateMat(self.camera_info.height,
                                           self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(pixel_associations_x, -1, valid_points_mask_x, cv.CV_CMP_NE)
        valid_points_mask_y = cv.CreateMat(self.camera_info.height,
                                           self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(pixel_associations_y, -1, valid_points_mask_y, cv.CV_CMP_NE)
        valid_points_mask = cv.CreateMat(self.camera_info.height,
                                         self.camera_info.width, cv.CV_8UC1)
        cv.And(valid_points_mask_x, valid_points_mask_y, valid_points_mask)

        number_of_valid_points = cv.CountNonZero(valid_points_mask)

        rectified_camera_coordinates = cv.CreateMat(1, number_of_valid_points,
                                                    cv.CV_32FC2)
        rectified_projector_coordinates = cv.CreateMat(1,
                                                       number_of_valid_points,
                                                       cv.CV_32FC2)
        intensities = cv.CreateMat(1, number_of_valid_points, cv.CV_8UC1)
        i = 0
        for row in range(self.camera_info.height):
            for col in range(self.camera_info.width):
                if valid_points_mask[row, col] != 0:
                    rectified_camera_coordinates[0, i] = (col, row)
                    rectified_projector_coordinates[0, i] = pixel_associations[
                        row, col]
                    intensities[0, i] = intensities_image[row, col]
                    i += 1

        cv.UndistortPoints(rectified_camera_coordinates,
                           rectified_camera_coordinates,
                           camera_model.intrinsicMatrix(),
                           camera_model.distortionCoeffs())

        # Convert scanline numbers to pixel numbers
        cv.AddS(rectified_projector_coordinates, 0.5,
                rectified_projector_coordinates)
        cv.ConvertScale(rectified_projector_coordinates,
                        rectified_projector_coordinates,
                        self.pixels_per_scanline)

        # Rectify the projector pixels
        cv.UndistortPoints(rectified_projector_coordinates,
                           rectified_projector_coordinates,
                           projector_model.intrinsicMatrix(),
                           projector_model.distortionCoeffs())

        camera_rays = projectPixelsTo3dRays(rectified_camera_coordinates,
                                            camera_model)

        projector_rays = projectPixelsTo3dRays(rectified_projector_coordinates,
                                               projector_model)

        # Bring the projector rays into camera coordinates
        cv.Transform(projector_rays, projector_rays,
                     projector_to_camera_rotation_matrix)

        camera_centers = cv.CreateMat(1, number_of_valid_points, cv.CV_32FC3)
        cv.SetZero(camera_centers)

        projector_centers = cv.CreateMat(1, number_of_valid_points,
                                         cv.CV_32FC3)
        cv.Set(projector_centers, self.projector_to_camera_translation_vector)

        intersection_points = line_line_intersections(camera_centers,
                                                      camera_rays,
                                                      projector_centers,
                                                      projector_rays)

        self.point_cloud_msg = sensor_msgs.msg.PointCloud()
        self.point_cloud_msg.header.stamp = rospy.Time.now()
        self.point_cloud_msg.header.frame_id = 'base_link'
        channels = []
        channel = sensor_msgs.msg.ChannelFloat32()
        channel.name = "intensity"
        points = []
        intensities_list = []

        for i in range(number_of_valid_points):
            point = geometry_msgs.msg.Point32()
            point.x = intersection_points[0, i][0]
            point.y = intersection_points[0, i][1]
            point.z = intersection_points[0, i][2]
            points.append(point)
            intensity = intensities[0, i]
            intensities_list.append(intensity)

        channel.values = intensities_list
        channels.append(channel)
        self.point_cloud_msg.channels = channels
        self.point_cloud_msg.points = points

        return self.point_cloud_msg
class ImageCbDetectorNode:
    def __init__(self):
        self.mutex = threading.RLock()
        self.corners_x = rospy.get_param('~corners_x', 6)
        self.corners_y = rospy.get_param('~corners_y', 6)
        self.spacing_x = rospy.get_param('~spacing_x', 0.022)
        self.spacing_y = rospy.get_param('~spacing_y', 0.022)
        self.publish_period = rospy.Duration(
            rospy.get_param('~publish_period', 0))
        self.last_publish_time = rospy.Time()
        self.width_scaling = rospy.get_param('~width_scaling', 1)
        self.height_scaling = rospy.get_param('~height_scaling', 1)

        self.im_cb_detector = ImageCbDetector()

        self.pose_srv = None
        self.bridge = CvBridge()
        self.cam_info = None
        self.ros_image = None

        self.image_sub = rospy.Subscriber("image_stream", Image, self.callback)
        self.caminfo_sub = rospy.Subscriber("camera_info", CameraInfo,
                                            self.cam_info_cb)
        self.pose_pub = rospy.Publisher("board_pose", PoseStamped)

    def intrinsic_matrix_from_info(self, cam_info):
        intrinsic_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)

        #Because we only want the upper 3x3 (normal) portion of the rectified intrinsic matrix
        for i in range(0, 3):
            for j in range(0, 3):
                intrinsic_matrix[i, j] = cam_info.P[4 * i + j]

        return intrinsic_matrix

    def attempt_to_advertise(self):
        #we'll only advertise the service after we've received a cam_info/image pair
        if self.ros_image != None and self.cam_info != None:
            self.pose_srv = rospy.Service("get_checkerboard_pose",
                                          GetCheckerboardPose,
                                          self.find_checkerboard_service)

    def cam_info_cb(self, cam_info):
        with self.mutex:
            self.cam_info = cam_info
            if self.pose_srv == None:
                self.attempt_to_advertise()

    def callback(self, ros_image):
        #copy over the latest image to be used when a service call is made
        with self.mutex:
            self.ros_image = ros_image
            if self.pose_srv == None:
                self.attempt_to_advertise()
            if rospy.Duration(
            ) != self.publish_period and rospy.rostime.get_rostime(
            ) - self.last_publish_time > self.publish_period:
                pose = self.find_checkerboard_pose(
                    ros_image, self.corners_x, self.corners_y, self.spacing_x,
                    self.spacing_y, self.width_scaling, self.height_scaling)
                if pose:
                    self.pose_pub.publish(pose)
                    self.last_publish_time = rospy.rostime.get_rostime()

    def find_checkerboard_service(self, req):
        #copy the image over
        with self.mutex:
            image = self.ros_image

        pose = self.find_checkerboard_pose(image, req.corners_x, req.corners_y,
                                           req.spacing_x, req.spacing_y,
                                           self.width_scaling,
                                           self.height_scaling)
        return GetCheckerboardPoseResponse(pose)

    def find_checkerboard_pose(self, ros_image, corners_x, corners_y,
                               spacing_x, spacing_y, width_scaling,
                               height_scaling):
        #we need to convert the ros image to an opencv image
        try:
            image = self.bridge.imgmsg_to_cv(ros_image, "mono8")
        except CvBridgeError, e:
            rospy.logerror("Error importing image %s" % e)
            return

        corners, model = self.im_cb_detector.detect(image, corners_x,
                                                    corners_y, spacing_x,
                                                    spacing_y, width_scaling,
                                                    height_scaling)

        if corners != None and model != None and self.cam_info != None:
            #find the pose of the checkerboard
            rot = cv.CreateMat(3, 1, cv.CV_32FC1)
            trans = cv.CreateMat(3, 1, cv.CV_32FC1)
            with self.mutex:
                kc = cv.CreateMat(1, 4, cv.CV_32FC1)
                cv.Set(kc, 0.0)
                intrinsic_matrix = self.intrinsic_matrix_from_info(
                    self.cam_info)

                cv.FindExtrinsicCameraParams2(model, corners, intrinsic_matrix,
                                              kc, rot, trans)

            #We want to build a transform now, but first we have to convert the
            #rotation vector we get back from OpenCV into a rotation matrix
            rot_mat = cv.CreateMat(3, 3, cv.CV_32FC1)
            cv.Rodrigues2(rot, rot_mat)

            #Now we need to convert this rotation matrix into a quaternion
            #This can be done better in never versions of opencv, but we need to be boxturtle compatible
            numpy_mat = numpy.fromstring(rot_mat.tostring(),
                                         dtype=numpy.float32).reshape((3, 3))

            #of course, tf expects all matricies passed to it to be 4x4... so we'll build a full matrix here
            full_pose = numpy.zeros((4, 4))

            #first, copy in the rotation matrix
            full_pose[0:3, 0:3] = numpy_mat[0:3, 0:3]

            #next, we'll copy in the translation
            full_pose[0:3, 3] = [trans[i, 0] for i in range(3)]

            #and make sure to add a 1 in the lower right corner
            full_pose[3][3] = 1.0

            rospy.logdebug("%s" % numpy_mat)
            rospy.logdebug("%s" % full_pose)

            tf_trans = tf.transformations.translation_from_matrix(full_pose)
            tf_rot = tf.transformations.quaternion_from_matrix(full_pose)

            #and now we'll actually build our pose stamped
            board_pose = PoseStamped()
            board_pose.header = ros_image.header
            board_pose.pose.position.x = tf_trans[0]
            board_pose.pose.position.y = tf_trans[1]
            board_pose.pose.position.z = tf_trans[2]
            board_pose.pose.orientation.x = tf_rot[0]
            board_pose.pose.orientation.y = tf_rot[1]
            board_pose.pose.orientation.z = tf_rot[2]
            board_pose.pose.orientation.w = tf_rot[3]
            rospy.logdebug("%s" % board_pose)

            #we'll publish the pose so we can display it in rviz
            self.pose_pub.publish(board_pose)
            return board_pose