Ejemplo n.º 1
0
    def test_ops(self):
        p0 = almath.Position3D(0.1, 0.2, 0.4)
        p1 = almath.Position3D(0.3, -0.2, 0.9)
        pAdd = p0 + p1
        pExp = almath.Position3D(p0.x + p1.x, p0.y + p1.y, p0.z + p1.z)

        self.assertTrue(pAdd.isNear(pExp))

        factor = 0.2
        pFact = p0 * factor
        pExp = almath.Position3D(p0.x * factor, p0.y * factor, p0.z * factor)
        self.assertTrue(pFact.isNear(pExp))
Ejemplo n.º 2
0
 def convert_laser_position(self, sensor_device, laser_x, laser_y):
     t_world2robot = almath.transformFromPose2D(almath.Pose2D(self.robot_pose))
     t_device2point = almath.transformFromPosition3D(almath.Position3D(laser_x, laser_y, 0))
     t_robot2device = almath.transformFromPosition6D(self.device_position_list[sensor_device])
     t_world2point = t_world2robot * t_robot2device * t_device2point
     p2d_world2point = almath.pose2DFromTransform(t_world2point)
     return (p2d_world2point.x, p2d_world2point.y)
Ejemplo n.º 3
0
 def callback(self, poseStamped):
     pose = Pose(poseStamped.pose.position, poseStamped.pose.orientation)
     quat = almath.Quaternion(pose.orientation.w, pose.orientation.x,
                              pose.orientation.y, pose.orientation.z)
     rotation = almath.rotation3DFromQuaternion(quat)
     position3d = almath.Position3D(pose.position.x, pose.position.y,
                                    pose.position.z)
     worldToTarget = almath.Pose2D(position3d.x, position3d.y, rotation.wz)
     worldToRobot = almath.Pose2D(self.motionProxy.getRobotPosition(True))
     robotToTarget = almath.pinv(worldToRobot) * worldToTarget
     robotToTarget.theta = almath.modulo2PI(robotToTarget.theta)
     self.motionProxy.moveTo(robotToTarget.x, robotToTarget.y,
                             robotToTarget.theta)
Ejemplo n.º 4
0
 def convert_laser_position(self, sensor_device, laser_x, laser_y):
     # ワールド座標系におけるロボット位置の変換行列
     t_world2robot = almath.transformFromPose2D(
         almath.Pose2D(self.robot_pose))
     # デバイス→レーザー点群
     t_device2point = almath.transformFromPosition3D(
         almath.Position3D(laser_x, laser_y, 0))
     # ロボット→デバイス
     t_robot2device = almath.transformFromPosition6D(
         self.device_position_list[sensor_device])
     # ワールド座標系におけるレーザー点群
     t_world2point = t_world2robot * t_robot2device * t_device2point
     p2d_world2point = almath.pose2DFromTransform(t_world2point)
     return (p2d_world2point.x, p2d_world2point.y)
Ejemplo n.º 5
0
 def convert_laser_position(self, laser_values):
     try:
         # ロボットの位置
         t_world2robot = almath.transformFromPose2D(almath.Pose2D(self.robot_pose))
         for sensor_device in range(1):
             for point_num in range(15):
                 laser = laser_values[sensor_device][point_num]
                 # センサデバイスから点群までの距離
                 t_device2point = almath.transformFromPosition3D(almath.Position3D(laser[0], laser[1], 0))
                 # センサデバイスの位置・姿勢
                 t_robot2device = almath.transformFromPosition6D(self.device_position_list[sensor_device])
                 # ワールド座標系におけるレーザー点群の位置
                 t_world2point = t_world2robot * t_robot2device * t_device2point
                 p2d_world2point = almath.pose2DFromTransform(t_world2point)
                 laser_values[sensor_device][point_num] = (p2d_world2point.x, p2d_world2point.y)
     except Exception as errorMsg:
         failure(errorMsg)
     return laser_values
Ejemplo n.º 6
0
 def test_distance(self):
     p0 = almath.Position3D()
     self.assertAlmostEqual(p0.distance(p0), 0.0)
     p1 = almath.Position3D(0.2, 0.0, 0.0)
     self.assertAlmostEqual(p0.distance(p1), p1.x)
Ejemplo n.º 7
0
 def test_init(self):
     p = almath.Position3D()
     self.assertAlmostEqual(p.x, 0.0)
     self.assertAlmostEqual(p.y, 0.0)
     self.assertAlmostEqual(p.z, 0.0)
Ejemplo n.º 8
0
    def calc_marker(self):
        print("calculation marker information")
        start_time = time.time()

        try:
            self.image_remote = self.camera.getImageRemote(self.subscriber_id)
        except Exception as message:
            self.unsubscribe()
            print(str(message))
        if not self.image_remote:
            self.unsubscribe()
            raise Exception("No data in image")
        camera = self.params["camera"]
        camera_name = CAMERAS[camera]
        seconds = self.image_remote[4]
        micro_seconds = self.image_remote[5]
        t_world2camera = almath.Transform(
            self.motion._getSensorTransformAtTime(
                camera_name, seconds * 10e8 + micro_seconds * 10e2))
        t_robot2camera = almath.Transform(
            self.motion.getTransform(camera_name, 2, True))
        resolution = self.params["resolution"]
        x, y = CAMERA_DATAS_AT_RESOLUTION[resolution]["image_size"]
        color_space, channels = self.params["color_space_and_channels"]
        image = numpy.frombuffer(self.image_remote[6],
                                 dtype=numpy.uint8).reshape(y, x, channels)
        if self.params["color"] and color_space == vd.kBGRColorSpace:
            print("Thresholding image...")
            lower_b = tuple([int(val) for val in self.params["color"]])
            upper_b = (255, 255, 255)
            image = cv2.inRange(image, lower_b, upper_b)
            print("Thresholding image done")

        p6Ds = dict()
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            image, self.params["dictionary"])

        result = False
        if ids is not None:
            try:
                if [328] in ids:
                    count = 0
                    for _id in ids:
                        print(_id)
                        if _id == [328]:
                            break
                        count = count + 1

                    rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(corners, \
                                                                          self.params["size"], \
                                                                          CAMERA_DATAS_AT_RESOLUTION[resolution]["matrix"], \
                                                                          CAMERA_DISTORTION_COEFF)

                    tvec = tvecs[count][0]
                    x, y, z = tvec[2], -tvec[0], -tvec[1]
                    p3d_camera2target = almath.Position3D(x, y, z)

                    rvec = rvecs[count][0]
                    wx, wy, wz = rvec[2], -rvec[0], -rvec[1]
                    proj_rvec, _ = cv2.Rodrigues(numpy.array([wx, wy, wz]))

                    r_camera2target = almath.Rotation(proj_rvec.flatten())
                    t_camera2target = almath.transformFromRotationPosition3D(
                        r_camera2target, p3d_camera2target)

                    r3d_correction = almath.Rotation3D(0., 3 * math.pi / 2, 0)

                    t_corretion = almath.transformFromRotation3D(
                        r3d_correction)
                    t_world2target = t_world2camera * t_camera2target * t_corretion
                    t_robot2target = t_robot2camera * t_camera2target * t_corretion

                    p6D_world2target = almath.position6DFromTransform(
                        t_world2target)
                    p6D_robot2target = almath.position6DFromTransform(
                        t_robot2target)

                    print("[x,y,theta] = [{},{},{}]".format(
                        p6D_robot2target.x, p6D_robot2target.y,
                        math.degrees(p6D_robot2target.wz)))

                    #p6Ds[ids] = {
                    #    "robot2target": list(p6D_robot2target.toVector())
                    #"world2target": list(p6D_world2target.toVector())
                    #}
                    result = True
                print("ID:" + str(ids))
            except Exception as message:
                print("failed: {}".format(str(message)))
                self.unsubscribe()
        else:
            result = False
            print("No Marker")
        delta_time = time.time() - start_time
        return result
Ejemplo n.º 9
0
print 'Check print of almath struct'
print 'Check Pose2D'
pose2D = m.Pose2D(0.0, 1.3, 2.9)
print pose2D.__repr__()
print pose2D
print ''

print 'Check Position2D'
position2D = m.Position2D(0.1, 0.2)
print position2D.__repr__()
print position2D
print ''

print 'Check Position3D'
position3D = m.Position3D(0.1, 0.2, 0.3)
print position3D.__repr__()
print position3D
print ''

print 'Check Position6D'
position6D = m.Position6D(0.1, 0.2, 0.3, 0.4, 0.5, 0.6)
print position6D.__repr__()
print position6D
print ''

print 'Check PositionAndVelocity'
PositionAndVelocity = m.PositionAndVelocity(0.1, 0.2)
print PositionAndVelocity.__repr__()
print PositionAndVelocity
print ''
Ejemplo n.º 10
0
    def _detect_markers(self, params, image, t_world2camera, timestamp):
        t = time.time()
        # self.logger.info("_detect_markers...")
        p6Ds = dict()
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            image, params["dictionary"])

        try:
            ids = [int(_id) for _id in ids.flatten()]
        except Exception as e:
            raise Exception("No markers found")

        # self.logger.info("@@@@@@@@@ IDs: %s" % ids)
        if ids:
            if params["ids"]:
                id_indices = [
                    indice for (indice, _id) in enumerate(ids)
                    if _id in set(params["ids"]).intersection(set(ids))
                ]
            else:
                id_indices = [indice for (indice, _id) in enumerate(ids)]

            # self.logger.info("@@@@@@@@@ IDs indices: %s" % id_indices)

            if id_indices:
                resolution = params["resolution"]
                rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(corners, \
                                                                        params["size"], \
                                                                        CAMERA_DATAS_AT_RESOLUTION[resolution]["matrix"], \
                                                                        CAMERA_DISTORTION_COEFF)
                for indice in id_indices:
                    # switch from opencv coordinates to NAOqi coordinates
                    tvec = tvecs[indice][0]  # translation vectors
                    x, y, z = tvec[2], -tvec[0], -tvec[1]
                    p3d_camera2target = almath.Position3D(x, y, z)

                    rvec = rvecs[indice][0]  # rotation vectors
                    wx, wy, wz = rvec[2], -rvec[0], -rvec[1]
                    proj_rvec, _ = cv2.Rodrigues(numpy.array([wx, wy, wz]))

                    r_camera2target = almath.Rotation(proj_rvec.flatten())
                    t_camera2target = almath.transformFromRotationPosition3D(
                        r_camera2target, p3d_camera2target)

                    if params["position"] == "floor":
                        r3d_correction = almath.Rotation3D(
                            0., math.pi / 2,
                            0.)  # we have 90° on y axis that is invalid
                    elif params["position"] == "wall":
                        r3d_correction = almath.Rotation3D(
                            0., math.pi,
                            0.)  # we have 180° on y axis that is invalid

                    t_correction = almath.transformFromRotation3D(
                        r3d_correction)
                    t_world2target = t_world2camera * t_camera2target * t_correction

                    p6D_world2target = almath.position6DFromTransform(
                        t_world2target)
                    self.logger.info(
                        "@@@@@@@@@ ID: %s - P6D_WORLD2TARGET: %s" %
                        (ids[indice], p6D_world2target))

                    p6Ds[ids[indice]] = {
                        "world2target": list(p6D_world2target.toVector()),
                        "timestamp": timestamp,
                    }
            else:
                raise Exception("No markers with IDs %s found" % params["ids"])

        d = time.time() - t
        self.logger.info("_detect_markers done %s" % d)
        return p6Ds