Пример #1
0
 def updateLandMarkData(self):
     """
     更新LandMark信息
     """
     try:
         currentCamera = "CameraTop"
         self.landmarkProxy.subscribe("landmark")
         markData = self.memoryProxy.getData("LandmarkDetected")
         if markData is None:
             self.disX = 0
             self.disY = 0
             self.dist = 0
             self.yawAngle = 0
             return
         wzCamera = markData[1][0][0][1]
         wyCamera = markData[1][0][0][2]
         angularSize = markData[1][0][0][3]
         distanceFromCameraToLandmark = self.landMarkSize / (2 * math.tan(angularSize / 2))
         transform = self.motionProxy.getTransform(currentCamera, 2, True)
         transformList = almath.vectorFloat(transform)
         robotToCamera = almath.Transform(transformList)
         cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)
         cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)
         robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
         landMarkX = robotToLandmark.r1_c4
         landMarkY = robotToLandmark.r2_c4
         self.landmarkProxy.unsubscribe("landmark")
         yawAngle = math.atan2(landMarkY, landMarkX)
         self.disX = landMarkX
         self.disY = landMarkY
         self.dist = distanceFromCameraToLandmark
         self.yawAngle = yawAngle
     except Exception, err:
         print(err)
Пример #2
0
def robotTolandmark(allmarkdata):
    currentCamera = "CameraTop"
    landmarkTheoreticalSize = 0.105  # in meters
    wzCamera = allmarkdata[0]
    wyCamera = allmarkdata[1]
    angularSize = allmarkdata[2]
    # 机器人与红球xdistance ,机器人与mark之间sdistance , 两条线的夹角为angle
    # 计算距离
    distanceFromCameraToLandmark = landmarkTheoreticalSize / (2 * math.tan(angularSize / 2))

    # 获取当前机器人到摄像头的距离的变换矩阵
    transform = motionPrx.getTransform(currentCamera, 2, True)
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)

    # 计算指向NAOmark的旋转矩阵
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)

    # 摄像头到NAOmark的矩阵
    cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

    # 机器人到NAOmark的矩阵
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
    x = robotToLandmark.r1_c4
    y = robotToLandmark.r2_c4
    z = robotToLandmark.r3_c4
    sdistance = math.sqrt(x * x + y * y)  # 机器人与mark的距离sdistance
    angle = math.atan(y/x)
    robotTolandmarkdata = [angle, sdistance, x, y, z]
    print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
    print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
    print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
    return robotTolandmarkdata
Пример #3
0
def getMarkXYZ (IP, portNumber, markData, landmarkSize):
    print "0"
    currentCamera = "CameraTop"
    print "1"
    # Retrieve landmark angular size in radians.
    angularSize = markData[1][0][0][3]
    print "2"
    # Compute distance to landmark.
    distanceFromCameraToLandmark = landmarkSize / ( 2 * math.tan( angularSize / 2))
    print "3"
    motionProxy = ALProxy("ALMotion", IP, portNumber)
    print "4"
    # Retrieve landmark center position in radians.
    wzCamera = markData[1][0][0][1]
    print "5"
    wyCamera = markData[1][0][0][2]
    print "6"
    # Get current camera position in NAO space.
    transform = motionProxy.getTransform(currentCamera, 2, True)
    print "7"
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)

    # Compute the rotation to point towards the landmark.
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)

    # Compute the translation to reach the landmark.
    cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

    # Combine all transformations to get the landmark position in NAO space.
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform

    return robotToLandmark.r1_c4, robotToLandmark.r2_c4, robotToLandmark.r3_c4
  def getNaoSpaceLandmark(self):
    # Wait for a mark to be detected.
    landmarkTheoreticalSize = 0.06 #in meters
    currentCamera = "CameraTop"
    markData = self.memory.getData("LandmarkDetected")
    while (len(markData) == 0):
        markData = self.memory.getData("LandmarkDetected")
    print markData
    # Retrieve landmark center position in radians.
    wzCamera = markData[1][0][0][1]
    wyCamera = markData[1][0][0][2]

    # Retrieve landmark angular size in radians.
    angularSize = markData[1][0][0][3]

    # Compute distance to landmark.
    distanceFromCameraToLandmark = landmarkTheoreticalSize / ( 2 * math.tan( angularSize / 2))

    # Get current camera position in NAO space.
    transform = self.motion.getTransform(currentCamera, 2, True)
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)

    # Compute the rotation to point towards the landmark.
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)

    # Compute the translation to reach the landmark.
    cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

    # Combine all transformations to get the landmark position in NAO space.
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform

    print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
    print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
    print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
Пример #5
0
def detectLandmark(landmarkProxy):
    ''' Function to make NAO detect NAO marks '''

    # Subscribe to LandmarkDetected event from ALLandMarkDetection proxy.
    landmarkProxy.subscribe("landmarkTest")

    # Wait for a mark to be detected.
    markData = memoryProxy.getData("LandmarkDetected")
    while (len(markData) == 0):
        markData = memoryProxy.getData("LandmarkDetected")

    # Retrieve landmark center position in radians.
    wzCamera = markData[1][0][0][1]
    wyCamera = markData[1][0][0][2]

    # Retrieve landmark angular size in radians.
    angularSize = markData[1][0][0][3]

    # Compute distance to landmark.
    distanceFromCameraToLandmark = LANDMARK_SIZE / ( 2 * math.tan( angularSize / 2))

    # Get current camera position in NAO space.
    transform = motionProxy.getTransform(cameraTop, 2, True)
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)

    # Compute the rotation to point towards the landmark.
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)

    # Compute the translation to reach the landmark.
    cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

    # Combine all transformations to get the landmark position in NAO space.
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform

    print "x " + str(round(robotToLandmark.r1_c4/0.0254, 2)) + " (in inches)"
    print "y " + str(round(robotToLandmark.r2_c4/0.0254, 2)) + " (in inches)"
    print "z " + str(round(robotToLandmark.r3_c4/0.0254, 2)) + " (in inches)"

    speakProxy.say("I have detected a landmark.")

    speakProxy.say("It is located at " + str(round(robotToLandmark.r1_c4/0.0254, 2)) + " inches away on my ex axis")
    speakProxy.say("And at " + str(round(robotToLandmark.r2_c4/0.0254, 2)) + " inches away on my why axis")
    speakProxy.say("And at " + str(round(robotToLandmark.r3_c4/0.0254, 2)) + " inches away on my zee axis")

    # Subscribe to sonars, this will launch sonars (at hardware level) and start data acquisition.
    sonarProxy.subscribe("sonarTest")

    # Get sonar left first echo (distance in meters to the first obstacle).
    print "left sonar data = " + str(round(memoryProxy.getData("Device/SubDeviceList/US/Left/Sensor/Value"), 2))

    # Same thing for right.
    print "right sonar data = " + str(round(memoryProxy.getData("Device/SubDeviceList/US/Right/Sensor/Value"), 2))

    speakProxy.say("My left sonar sensor detected at " + str(round(memoryProxy.getData("Device/SubDeviceList/US/Left/Sensor/Value"), 2)))
    speakProxy.say("And my right sonar sensor detected at " + str(round(memoryProxy.getData("Device/SubDeviceList/US/Right/Sensor/Value"), 2)))

    sonarProxy.unsubscribe("sonarTest")
    landmarkProxy.unsubscribe("landmarkTest")
Пример #6
0
 def landmarkYmovement(self, detection):
     zCamera = detection[1][0][0][1]
     yCamera = detection[1][0][0][2]
     angularSize = detection[1][0][0][3]
     distance = .06 / (2 * math.tan(angularSize / 2))
     transform = self.motionProxy.getTransform("CameraTop", 2, True)
     transformList = almath.vectorFloat(transform)
     robotToCamera = almath.Transform(transformList)
     cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
         0, yCamera, zCamera)
     cameraToLandmarkTranslationTransform = almath.Transform(distance, 0, 0)
     robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
     if math.fabs(robotToLandmark.r2_c4) > .2 and math.fabs(
             robotToLandmark.r2_c4) < .45:
         self.motionProxy.moveToward(0, 0, 0)
         self.motionProxy.moveTo(0, robotToLandmark.r2_c4, 0)
         firstDetection = robotToLandmark.r2_c4
         secondDetection = self.memoryProxy.getData("LandmarkDetected", 0)
         if not secondDetection == []:
             zCamera = secondDetection[1][0][0][1]
             yCamera = secondDetection[1][0][0][2]
             angularSize = secondDetection[1][0][0][3]
             distance = .06 / (2 * math.tan(angularSize / 2))
             transform = self.motionProxy.getTransform("CameraTop", 2, True)
             transformList = almath.vectorFloat(transform)
             robotToCamera = almath.Transform(transformList)
             cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
                 0, yCamera, zCamera)
             cameraToLandmarkTranslationTransform = almath.Transform(
                 distance, 0, 0)
             robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
             if not robotToLandmark < math.fabs(.2):
                 if robotToLandmark.r2_c4 < 0:
                     self.motionProxy.moveTo(
                         0, 0, ((robotToLandmark.r2_c4 / 1.06) * 5))
                 else:
                     self.motionProxy.moveTo(
                         0, 0, -((robotToLandmark.r2_c4 / 1.06) * 5))
         else:
             if firstDetection < 0:
                 self.motionProxy.moveTo(0, 0,
                                         ((firstDetection / 1.06) * 5))
             else:
                 self.motionProxy.moveTo(0, 0,
                                         -((firstDetection / 1.06) * 5))
         self.motionProxy.moveToward(.5, .02, -.03)
Пример #7
0
    def detectLandmark(self):
        ''' Function to make NAO detect NAO marks '''

        # Subscribe to LandmarkDetected event from ALLandMarkDetection proxy.
        self.landmarkProxy.subscribe("landmarkTest")

        # Wait for a mark to be detected.
        markData = self.memoryProxy.getData("LandmarkDetected")

        while (len(markData) == 0):
            markData = self.memoryProxy.getData("LandmarkDetected")

        # Retrieve landmark center position in radians.
        wzCamera = markData[1][0][0][1]
        wyCamera = markData[1][0][0][2]

        # Retrieve landmark angular size in radians.
        angularSize = markData[1][0][0][3]

        # Compute distance to landmark.
        distanceFromCameraToLandmark = self.LANDMARK_SIZE / (
            2 * m.tan(angularSize / 2))

        # Get current camera position in NAO space.
        transform = self.motionProxy.getTransform(cameraTop, 2, True)
        transformList = almath.vectorFloat(transform)
        robotToCamera = almath.Transform(transformList)

        # Compute the rotation to point towards the landmark.
        cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
            0, wyCamera, wzCamera)

        # Compute the translation to reach the landmark.
        cameraToLandmarkTranslationTransform = almath.Transform(
            distanceFromCameraToLandmark, 0, 0)

        # Combine all transformations to get the landmark position in NAO space.
        robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform

        x = round(robotToLandmark.r1_c4 / 0.0254, 2)
        y = round(robotToLandmark.r2_c4 / 0.0254, 2)
        z = round(robotToLandmark.r3_c4 / 0.0254, 2)

        print "x " + str(x) + " (in inches)"
        print "y " + str(y) + " (in inches)"
        print "z " + str(z) + " (in inches)"

        # Subscribe to sonars, this will launch sonars (at hardware level) and start data acquisition.
        self.sonarProxy.subscribe("sonarTest")

        l = round(
            memoryProxy.getData("Device/SubDeviceList/US/Left/Sensor/Value"),
            2)
        r = round(
            memoryProxy.getData("Device/SubDeviceList/US/Right/Sensor/Value"),
            2)

        return x, y, z, l, r
Пример #8
0
def arm_dist(motionProxy,effectorName,x,y,z):
    # arm transformation in robot space
    arm_transform = motionProxy.getTransform(effectorName,2,True)
    robot2Arm = almath.Transform(almath.vectorFloat(arm_transform))
    rot=almath.position6DFromTransform(robot2Arm)
    print effectorName,"position",robot2Arm.r1_c4,robot2Arm.r2_c4,robot2Arm.r3_c4
    mx,my,mz=rot.wx,rot.wy,rot.wz
    x_d,y_d,z_d=x-robot2Arm.r1_c4,y-robot2Arm.r2_c4,z-robot2Arm.r3_c4
    print "distances update:"
    print x_d,y_d,z_d
    return [x_d,y_d,z_d],mx
Пример #9
0
 def landmarkYmovement(self, detection):
     zCamera = detection[1][0][0][1]
     yCamera = detection[1][0][0][2]
     angularSize = detection[1][0][0][3]
     distance = .06 / ( 2 * math.tan( angularSize / 2))
     transform = self.motionProxy.getTransform("CameraTop", 2, True)
     transformList = almath.vectorFloat(transform)
     robotToCamera = almath.Transform(transformList)
     cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, yCamera, zCamera)
     cameraToLandmarkTranslationTransform = almath.Transform(distance, 0, 0)
     robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform
     if math.fabs(robotToLandmark.r2_c4) > .2 and math.fabs(robotToLandmark.r2_c4) < .45:
         self.motionProxy.moveToward(0, 0, 0)
         self.motionProxy.moveTo(0, robotToLandmark.r2_c4, 0)
         firstDetection = robotToLandmark.r2_c4
         secondDetection = self.memoryProxy.getData("LandmarkDetected", 0)
         if not secondDetection == []:
             zCamera = secondDetection[1][0][0][1]
             yCamera = secondDetection[1][0][0][2]
             angularSize = secondDetection[1][0][0][3]
             distance = .06 / ( 2 * math.tan( angularSize / 2))
             transform = self.motionProxy.getTransform("CameraTop", 2, True)
             transformList = almath.vectorFloat(transform)
             robotToCamera = almath.Transform(transformList)
             cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, yCamera, zCamera)
             cameraToLandmarkTranslationTransform = almath.Transform(distance, 0, 0)
             robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform
             if not robotToLandmark < math.fabs(.2):
                 if robotToLandmark.r2_c4 < 0:
                     self.motionProxy.moveTo(0, 0, ((robotToLandmark.r2_c4 / 1.06) * 5))
                 else:
                     self.motionProxy.moveTo(0, 0, -((robotToLandmark.r2_c4 / 1.06) * 5))
         else:
             if firstDetection < 0:
                 self.motionProxy.moveTo(0, 0, ((firstDetection / 1.06) * 5))
             else:
                 self.motionProxy.moveTo(0, 0, -((firstDetection / 1.06) * 5))
         self.motionProxy.moveToward(.5, .02, -.03)
Пример #10
0
def get_dot(area):
    x, y, w, h = area
    center_x = x + w / 2
    center_y = y + h / 2

    landmark_width_in_pic = w
    currentCamera = "CameraTop"
    # headAngle=headg_motion.getAngles("HeadYaw", True)
    head_yaw_Angle = 10
    # markWithHeadAngle = alpha + headAngle[0]  # landmark相对机器人头的角度
    markWithHeadAngle = center_x + head_yaw_Angle
    # 头部正对landmark
    g_motion.angleInterpolationWithSpeed("HeadYaw", markWithHeadAngle, 0.2)
    # ----------------------------------计算距离-----------------------------------------------#
    # distanceFromCameraToLandmark = landmark["size"] / (2 * math.tan(landmarkWidthInPic / 2))
    distanceFromCameraToLandmark = mark_width / (
        2 * math.tan(landmark_width_in_pic / 2))
    # 获取当前机器人到摄像头的距离的变换矩阵
    transform = g_motion.getTransform(currentCamera, 2, True)
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)
    # 打印
    # print("transform:", transform)
    # print("transformList :", transformList)
    # print("robotToCamera :", robotToCamera)

    # 计算指向landmark的旋转矩阵
    # cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, beta, alpha)
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
        0, center_x, center_y)

    # 摄像头到landmark的矩阵
    cameraToLandmarkTranslationTransform = almath.Transform(
        distanceFromCameraToLandmark, 0, 0)

    # 机器人到landmark的矩阵
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
    # 打印
    # print("cameraToLandmarkRotationTransform: " ,cameraToLandmarkRotationTransform)
    # print("cameraToLandmarkTranslationTransform: ",cameraToLandmarkTranslationTransform)
    # print("robotToLandmark",robotToLandmark )
    x = robotToLandmark.r1_c4
    y = robotToLandmark.r2_c4
    z = robotToLandmark.r3_c4

    distance = math.sqrt(x**2 + y * y) * 100

    markWithHeadAngle = round(markWithHeadAngle, 2)
    # 将数据存入列表
    return distance, markWithHeadAngle
Пример #11
0
def detectLandmark(landmarkProxy, currentCamera='CameraTop'):
    x = 0
    y = 0
    z = 0

    # Wait for a mark to be detected.
    cycles = 0
    markData = memoryProxy.getData("LandmarkDetected")
    while (len(markData) == 0):
        markData = memoryProxy.getData("LandmarkDetected")
        #print cycles
        cycles = cycles + 1
        if (cycles > 10):
            return 0, 0, 0
    #print("number of detections = "+str(len(markData)))

    # Retrieve landmark center position in radians.
    wzCamera = markData[1][0][0][1]
    wyCamera = markData[1][0][0][2]

    # Retrieve landmark angular size in radians.
    angularSize = markData[1][0][0][3]

    # Compute distance to landmark.
    distanceFromCameraToLandmark = LANDMARK_SIZE / (2 *
                                                    math.tan(angularSize / 2))

    # Get current camera position in NAO space.
    transform = motionProxy.getTransform(currentCamera, 2, True)
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)

    # Compute the rotation to point towards the landmark.
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
        0, wyCamera, wzCamera)

    # Compute the translation to reach the landmark.
    cameraToLandmarkTranslationTransform = almath.Transform(
        distanceFromCameraToLandmark, 0, 0)

    # Combine all transformations to get the landmark position in NAO space.
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform

    #print "Finish Landmark detect"

    return metersToInches(robotToLandmark.r1_c4), metersToInches(
        robotToLandmark.r2_c4), metersToInches(robotToLandmark.r3_c4)
Пример #12
0
    def on_landmark_detected(self, markData):
        """
        Callback for event LandmarkDetected.
        """
        if markData == []:  # empty value when the landmark disappears
            self.got_landmark = False
        elif not self.got_landmark:  # only speak the first time a landmark appears
            self.got_landmark = True
            print "I saw a landmark! "
            self.tts.say("I saw a landmark! ")

            # Retrieve landmark center position in radians.
            wzCamera = markData[1][0][0][1]
            wyCamera = markData[1][0][0][2]

            # Retrieve landmark angular size in radians.
            angularSize = markData[1][0][0][3]

            # Compute distance to landmark.
            distanceFromCameraToLandmark = self.landmarkTheoreticalSize / (
                2 * math.tan(angularSize / 2))

            # Get current camera position in NAO space.
            transform = self.motion_service.getTransform(
                self.currentCamera, 2, True)
            transformList = almath.vectorFloat(transform)
            robotToCamera = almath.Transform(transformList)

            # Compute the rotation to point towards the landmark.
            cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
                0, wyCamera, wzCamera)

            # Compute the translation to reach the landmark.
            cameraToLandmarkTranslationTransform = almath.Transform(
                distanceFromCameraToLandmark, 0, 0)

            # Combine all transformations to get the landmark position in NAO space.
            robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform

            print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
            print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
            print "z " + str(robotToLandmark.r3_c4) + " (in meters)"

            self.motion_service.moveTo(robotToLandmark.r1_c4,
                                       robotToLandmark.r2_c4,
                                       robotToLandmark.r3_c4)
Пример #13
0
    def updateLandMarkData(self, client="landMark"):
        """
		update landMark information

		Args:
			client: client name
		Return:
			None.
		"""
        if self.cameraProxy.getActiveCamera() != self.cameraId:
            self.cameraProxy.setActiveCamera(self.cameraId)
            time.sleep(1)
        self.landMarkProxy.subscribe(client)
        markData = self.memoryProxy.getData("LandmarkDetected")
        self.cameraProxy.unsubscribe(client)
        if (markData is None or len(markData) == 0):
            self.disX = 0
            self.disY = 0
            self.dist = 0
            self.yawAngle = 0
        else:
            wzCamera = markData[1][0][0][1]
            wyCamera = markData[1][0][0][2]
            angularSize = markData[1][0][0][3]
            distCameraToLandmark = self.landMarkSize / (
                2 * math.tan(angularSize / 2))
            transform = self.motionProxy.getTransform(self.cameraName, 2, True)
            transformList = almath.vectorFloat(transform)
            robotToCamera = almath.Transform(transformList)
            cameraToLandmarkRotTrans = almath.Transform_from3DRotation(
                0, wyCamera, wzCamera)
            cameraToLandmarkTranslationTrans = almath.Transform(
                distCameraToLandmark, 0, 0)
            robotToLandmark = robotToCamera * \
                  cameraToLandmarkRotTrans * \
                  cameraToLandmarkTranslationTrans
            self.disX = robotToLandmark.r1_c4
            self.disY = robotToLandmark.r2_c4
            self.dist = np.sqrt(self.disX**2 + self.disY**2)
            self.yawAngle = math.atan2(self.disY, self.disX)
Пример #14
0
def CalculateDistanceNew(Weight,Alpha,Beta,IP,PORT,landmarkTheoreticalSize):
    currentCamera = "CameraBottom"
    memoryProxy = ALProxy("ALMemory", IP,PORT)
    motionProxy=ALProxy("ALMotion",IP,PORT)
    angularSize = (Weight/ 640.0) * 60.97 * almath.PI / 180
    distanceFromCameraToLandmark = landmarkTheoreticalSize / (2 * math.tan(angularSize / 2))
    # Get current camera position in NAO space.
    transform = motionProxy.getTransform(currentCamera, 2, True)
    print "transform=",transform
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)
    # Compute the rotation to point towards the landmark.
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, Beta, Alpha)

    # Compute the translation to reach the landmark.
    cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

    # Combine all transformations to get the landmark position in NAO space.
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
    print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
    print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
    print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
    print "robotToLandmark=",robotToLandmark
    # 从机器人端读取相关传感器的角度和位置数据
    headAgl = motionProxy.getAngles(["HeadYaw", "HeadPitch"], True)
    # ColumnAglY = Alpha + headAgl[0]     #水平方向角度差
    # print "ColumnAglY=",ColumnAglY
    # Position3D=almath.Position3D(robotToLandmark.r1_c4,robotToLandmark.r2_c4,robotToLandmark.r3_c4)
    # Position6D=almath.Position6D(0,0,0,0,0,0)
    # almath.position6DFromPosition3DInPlace(Position3D,Position6D)
    Position6D=almath.position6DFromTransform(robotToLandmark)
    # position6D=almath.vectorFloat(Position6D)
    # ColumnAglY=position6D
    print "Position6D.wz=",Position6D.wz
    print "Position6D",Position6D
    # print "type of Position6D=",type(Position6D)
    ColumnAglY=Position6D.wz
    # print "ColumnAglY=",ColumnAglY
    return robotToLandmark.r1_c4*1000,robotToLandmark.r2_c4*1000,robotToLandmark.r3_c4*1000,ColumnAglY     #传出Torso_X和Torso_Y的毫米值
    def getNaoSpaceLandmark(self):
        # Wait for a mark to be detected.
        landmarkTheoreticalSize = 0.06  #in meters
        currentCamera = "CameraTop"
        markData = self.memory.getData("LandmarkDetected")
        while (len(markData) == 0):
            markData = self.memory.getData("LandmarkDetected")
        print markData
        # Retrieve landmark center position in radians.
        wzCamera = markData[1][0][0][1]
        wyCamera = markData[1][0][0][2]

        # Retrieve landmark angular size in radians.
        angularSize = markData[1][0][0][3]

        # Compute distance to landmark.
        distanceFromCameraToLandmark = landmarkTheoreticalSize / (
            2 * math.tan(angularSize / 2))

        # Get current camera position in NAO space.
        transform = self.motion.getTransform(currentCamera, 2, True)
        transformList = almath.vectorFloat(transform)
        robotToCamera = almath.Transform(transformList)

        # Compute the rotation to point towards the landmark.
        cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
            0, wyCamera, wzCamera)

        # Compute the translation to reach the landmark.
        cameraToLandmarkTranslationTransform = almath.Transform(
            distanceFromCameraToLandmark, 0, 0)

        # Combine all transformations to get the landmark position in NAO space.
        robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform

        print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
        print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
        print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
Пример #16
0
def DetecteLandmark():
    camProxy.setParam(18, 0)
    landmarkTheoreticalSize = 0.1
    currentCamera = "CameraTop"
    ip = robotIP
    period = 500
    landMarkProxy.subscribe("Test_LandMark", period, 0.0)
    for i in range(0, 10):
        time.sleep(0.5)
        markData = memoryProxy.getData("LandmarkDetected")
        if (markData is None or len(markData) == 0):
            continue
        else:
            if (len(markData) == 1):
                wzCamera = markData[1][0][0][1]
                wyCamera = markData[1][0][0][2]

                # Retrieve landmark angular size in radians.
                angularSize = markData[1][0][0][3]

                # Compute distance to landmark.
                distanceFromCameraToLandmark = landmarkTheoreticalSize / (
                    2 * math.tan(angularSize / 2))

                motionProxy = ALProxy("ALMotion", ip, 9559)

                # Get current camera position in NAO space.
                transform = motionProxy.getTransform(currentCamera, 2, True)
                transformList = almath.vectorFloat(transform)
                robotToCamera = almath.Transform(transformList)

                # Compute the rotation to point towards the landmark.
                cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
                    0, wyCamera, wzCamera)

                # Compute the translation to reach the landmark.
                cameraToLandmarkTranslationTransform = almath.Transform(
                    distanceFromCameraToLandmark, 0, 0)

                # Combine all transformations to get the landmark position in NAO space.

                robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
                landMarkProxy.unsubscribe("Test_LandMark")
                # print  "landmark detect",robotToLandmark.r1_c4,robotToLandmark.r2_c4,robotToLandmark.r3_c4,distanceFromCameraToLandmark
                return 1, robotToLandmark.r1_c4, robotToLandmark.r2_c4, robotToLandmark.r3_c4, distanceFromCameraToLandmark
            else:
                wzCamera = markData[1][0][0][1]
                wyCamera = markData[1][0][0][2]

                # Retrieve landmark angular size in radians.
                angularSize = markData[1][0][0][3]

                # Compute distance to landmark.
                distanceFromCameraToLandmark = landmarkTheoreticalSize / (
                    2 * math.tan(angularSize / 2))

                motionProxy = ALProxy("ALMotion", ip, 9559)

                # Get current camera position in NAO space.
                transform = motionProxy.getTransform(currentCamera, 2, True)
                transformList = almath.vectorFloat(transform)
                robotToCamera = almath.Transform(transformList)

                # Compute the rotation to point towards the landmark.
                cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
                    0, wyCamera, wzCamera)

                # Compute the translation to reach the landmark.
                cameraToLandmarkTranslationTransform = almath.Transform(
                    distanceFromCameraToLandmark, 0, 0)

                # Combine all transformations to get the landmark position in NAO space.

                robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
                landMarkProxy.unsubscribe("Test_LandMark")
                # print  "landmark detect", robotToLandmark.r1_c4, robotToLandmark.r2_c4, robotToLandmark.r3_c4, distanceFromCameraToLandmark
                return 1, robotToLandmark.r1_c4, robotToLandmark.r2_c4, robotToLandmark.r3_c4, distanceFromCameraToLandmark

    landMarkProxy.unsubscribe("Test_LandMark")
    # print  "landmark detect fff"
    return 0, 0, 0, 0, 0
Пример #17
0
def searchLandmark(turn='left'):
    """
    搜索Landmark,返回
    :return:landmarkData(角度,距离),isFindLandMark
    """
    isFindLandmark = False  # landmark识别符0代表识别到,1代表未识别到。
    robotToLandmarkData = []

    # 设置刚度为 1 保证其头部能够运转
    g_motion.setStiffnesses("Head", 1.0)
    # 开启上摄像头
    g_camera.setActiveCamera(0)
    # 注册事件
    g_landmarkDetection.subscribe("landmarkTest")

    if turn == 'left':
        a = 1
    elif turn == 'right':
        a = -1
    else:
        a = 0

    # g_tts.say('begin to find landmark in %s' %turn)
    print('begin to find landmark in %s' % turn)

    for i in range(8):
        j = i / 4
        k = i % 4
        if j == 0:
            anger = almath.TO_RAD * (k * 30) * a
        else:
            anger = almath.TO_RAD * (90 - k * 30) * a

        # g_motion.angleInterpolationWithSpeed("HeadPitch", anger, 0.1)   #
        g_motion.angleInterpolationWithSpeed("HeadYaw", anger, 0.1)  #
        time.sleep(3)
        markData = g_memory.getData("LandmarkDetected")

        if markData and isinstance(markData, list) and len(markData) >= 2:
            # 提示
            # g_tts.say("find landmark!")
            print("find landmark!")
            # 置标志为rue
            isFindLandmark = True  # landmark识别符1代表识别到,0代表未识别到。
            # Retrieve landmark center position in radians.
            # 获取数据
            alpha = markData[1][0][0][1]
            beta = markData[1][0][0][2]
            landmarkWidthInPic = markData[1][0][0][
                3]  # Retrieve landmark angular size in radians.
            print('there', alpha, beta, landmarkWidthInPic)

            # 获取头转动角度
            headAngle = g_motion.getAngles("HeadYaw", True)  # 视觉中心
            markWithHeadAngle = alpha + headAngle[
                0]  # landmark相对机器人头的角度  ,视觉中心 + 视觉中的角度  ,为毛不需要上下的角度
            print('markWithHeadAngle:  ', markWithHeadAngle)
            # 头部正对landmark
            g_motion.angleInterpolationWithSpeed("HeadYaw", markWithHeadAngle,
                                                 0.2)

            # ----------------------------------计算距离-----------------------------------------------#
            distanceFromCameraToLandmark = landmark["size"] / (
                2 * math.tan(landmarkWidthInPic / 2))

            # 获取当前机器人到摄像头的距离的变换矩阵
            transform = g_motion.getTransform("CameraTop", 2, True)
            transformList = almath.vectorFloat(transform)
            robotToCamera = almath.Transform(transformList)

            # 计算指向landmark的旋转矩阵
            cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
                0, beta, alpha)  # anger x ,anger y, anger z
            print('cameraToLandmarkRotationTransform:  ',
                  cameraToLandmarkRotationTransform)

            # 摄像头到landmark的矩阵
            cameraToLandmarkTranslationTransform = almath.Transform(
                distanceFromCameraToLandmark, 0, 0)
            print('cameraToLandmarkTranslationTransform: ',
                  cameraToLandmarkTranslationTransform)

            # 机器人到landmark的矩阵
            robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
            print('robotToLandmark:  ', robotToLandmark)

            x = robotToLandmark.r1_c4  # 1row  4cloumn
            y = robotToLandmark.r2_c4
            z = robotToLandmark.r3_c4
            distance = math.sqrt(x**2 + y * y) * 100
            print('distance  ', distance)

            markWithHeadAngle = round(markWithHeadAngle, 2)
            # 将数据存入列表
            robotToLandmarkData.append(distance)
            robotToLandmarkData.append(markWithHeadAngle)
            # 取消事件
            g_landmarkDetection.unsubscribe("landmarkTest")
            g_camera.unsubscribe("AL::kTopCamera")
            # 调整头的角度
            g_motion.angleInterpolationWithSpeed("HeadPitch", 0.0, 0.2)
            g_motion.angleInterpolationWithSpeed("HeadYaw", 0.0, 0.2)
            return robotToLandmarkData, isFindLandmark
        else:
            # g_tts.say('not find landmark %d time' % i)
            print('not find landmark %d time' % i)

    g_tts.say("landmark is not in sight,over")
    return -1, -1
Пример #18
0
def searchLandmarkInRight():
    """
    搜索Landmark,返回
    :return:landmarkData(角度,距离),isFindLandMark
    """
    global g_motion, g_landmarkDetection, g_camera
    isFindLandmark = False  # landmark识别符0代表识别到,1代表未识别到。
    robotToLandmarkData = []
    headYawAngle = -110 * almath.TO_RAD  # 摆头角度,从右往左扫
    currentCamera = "CameraTop"
    # 初始化
    # # 重置头部角度
    # g_motion.angleInterpolationWithSpeed("HeadPitch", 0.0, 0.3)
    # g_motion.angleInterpolationWithSpeed("HeadYaw", 0.0, 0.3)
    # 设置刚度为 1 保证其头部能够运转
    g_motion.setStiffnesses("Head", 1.0)
    # 开启上摄像头
    g_camera.setActiveCamera(0)
    # 注册事件
    g_landmarkDetection.subscribe("landmarkTest")

    g_motion.angleInterpolationWithSpeed("HeadPitch", -0.1, 0.3)
    # time.sleep(2)
    # 初始为-1.5,及从由往左观察
    times = 0
    addAngle = 39 * almath.TO_RAD
    while times < 2:
        time.sleep(3)
        markData = g_memory.getData("LandmarkDetected")
        # 找到landmark
        if markData and isinstance(markData, list) and len(markData) >= 2:
            # 提示
            g_tts.say("find landmark!")
            # 置标志为rue
            isFindLandmark = True  # landmark识别符1代表识别到,0代表未识别到。
            # Retrieve landmark center position in radians.
            # 获取数据
            alpha = markData[1][0][0][1]
            beta = markData[1][0][0][2]
            # Retrieve landmark angular size in radians.
            landmarkWidthInPic = markData[1][0][0][3]
            print ("alpha: ",alpha)
            print ("beta: ", beta)
            print landmarkWidthInPic

            # 获取头转动角度
            headAngle = g_motion.getAngles("HeadYaw", True)

            print ("headAngle:", headAngle)
            markWithHeadAngle = alpha + headAngle[0]  # landmark相对机器人头的角度

            # 头部正对landmark
            g_motion.angleInterpolationWithSpeed("HeadYaw", markWithHeadAngle, 0.2)

            # ----------------------------------计算距离-----------------------------------------------#
            distanceFromCameraToLandmark = landmark["size"] / (2 * math.tan(landmarkWidthInPic / 2))

            # 获取当前机器人到摄像头的距离的变换矩阵
            transform = g_motion.getTransform(currentCamera, 2, True)
            transformList = almath.vectorFloat(transform)
            robotToCamera = almath.Transform(transformList)
            # 打印
            # print("transform:", transform)
            # print("transformList :", transformList)
            # print("robotToCamera :", robotToCamera)

            # 计算指向landmark的旋转矩阵
            cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, beta, alpha)

            # 摄像头到landmark的矩阵
            cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

            # 机器人到landmark的矩阵
            robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
            # 打印
            # print("cameraToLandmarkRotationTransform: " ,cameraToLandmarkRotationTransform)
            # print("cameraToLandmarkTranslationTransform: ",cameraToLandmarkTranslationTransform)
            # print("robotToLandmark",robotToLandmark )
            x = robotToLandmark.r1_c4
            y = robotToLandmark.r2_c4
            z = robotToLandmark.r3_c4

            distance = math.sqrt(x ** 2 + y * y) * 100

            markWithHeadAngle = round(markWithHeadAngle, 2)
            # 将数据存入列表
            robotToLandmarkData.append(distance)
            robotToLandmarkData.append(markWithHeadAngle)
            # 记录日志
            logging.info("x = " + str(x))
            logging.info("y = " + str(y))
            logging.info("z = " + str(z))
            logging.info("nao与landmark的距离 :: " + str(distance) + "   角度:: " + str(markWithHeadAngle * almath.TO_DEG))
            # 找到landmark,跳出循环
            break

        # 没找到landmark,该变角度继续扫视
        g_motion.angleInterpolationWithSpeed("HeadYaw", headYawAngle, 0.12)  # 改变头的角度
        if headYawAngle * almath.TO_DEG > 130:
            headYawAngle = -90 * almath.TO_RAD  # 摆头角度,从右往左扫
            times += 1
            continue
        # elif times == 0:
        #     headYawAngle = headYawAngle + (55 * almath.TO_RAD)
        # elif times == 1:
        headYawAngle = headYawAngle + (39 * almath.TO_RAD)
        # if headYawAngle
        # else:
        #     headYawAngle = headYawAngle - (20 * almath.TO_RAD)
    # 提示
    if not isFindLandmark:
        print "landmark is not in sight !"
        g_tts.say("landmark is not in sight")
    # 取消事件
    g_landmarkDetection.unsubscribe("landmarkTest")
    g_camera.unsubscribe("AL::kTopCamera")

    # 调整头的角度
    g_motion.angleInterpolationWithSpeed("HeadPitch", 0.0, 0.2)
    g_motion.angleInterpolationWithSpeed("HeadYaw", 0.0, 0.2)
    return robotToLandmarkData, isFindLandmark
Пример #19
0
def searchLandmarkForAvoid():
    '''
    搜索Landmark,返回
    :return:landmarkData(角度,距离),isFindLandMark
    '''
    global g_motion, g_landmarkDetection, g_camera
    isFindLandmark = False  # landmark识别符0代表识别到,1代表未识别到。
    robotToLandmarkData = []
    headYawAngle = 110 * almath.TO_RAD  # 摆头角度,从右往左扫
    currentCamera = "CameraTop"
    # 初始化
    # # 重置头部角度
    # g_motion.angleInterpolationWithSpeed("HeadPitch", 0.0, 0.3)
    # g_motion.angleInterpolationWithSpeed("HeadYaw", 0.0, 0.3)
    # 设置刚度为 1 保证其头部能够运转
    g_motion.setStiffnesses("Head", 1.0)
    # 开启上摄像头
    g_camera.setActiveCamera(0)
    # 注册事件
    g_landmarkDetection.subscribe("landmarkTest")

    g_motion.angleInterpolationWithSpeed("HeadPitch", -0.1, 0.3)
    # time.sleep(2)
    # 初始为-1.5,及从由往左观察
    times = 0
    addAngle = -39 * almath.TO_RAD
    time.sleep(2.5)
    markData = g_memory.getData("LandmarkDetected")
    # 找到landmark
    if (markData and isinstance(markData, list) and len(markData) >= 2):
        # 提示
        g_tts.say("find landmark!")
        # 置标志为rue
        isFindLandmark = True  # landmark识别符1代表识别到,0代表未识别到。
        # Retrieve landmark center position in radians.
        # 获取数据
        alpha = markData[1][0][0][1]
        beta = markData[1][0][0][2]
        # Retrieve landmark angular size in radians.
        landmarkWidthInPic = markData[1][0][0][3]
        # print ("alpha: ",alpha)
        # print ("beta: ", beta)
        # print landmarkWidthInPic

        # 获取头转动角度
        headAngle = g_motion.getAngles("HeadYaw", True)

        print ("headAngle:", headAngle)
        markWithHeadAngle = alpha + headAngle[0]  # landmark相对机器人头的角度

        # 头部正对landmark
        g_motion.angleInterpolationWithSpeed("HeadYaw", markWithHeadAngle, 0.2)

        # ----------------------------------计算距离-----------------------------------------------#
        distanceFromCameraToLandmark = landmark["size"] / (2 * math.tan(landmarkWidthInPic / 2))

        # 获取当前机器人到摄像头的距离的变换矩阵
        transform = g_motion.getTransform(currentCamera, 2, True)
        transformList = almath.vectorFloat(transform)
        robotToCamera = almath.Transform(transformList)
        # 打印
        # print("transform:", transform)
        # print("transformList :", transformList)
        # print("robotToCamera :", robotToCamera)

        # 计算指向landmark的旋转矩阵
        cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, beta, alpha)

        # 摄像头到landmark的矩阵
        cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

        # 机器人到landmark的矩阵
        robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
        # 打印
        # print("cameraToLandmarkRotationTransform: " ,cameraToLandmarkRotationTransform)
        # print("cameraToLandmarkTranslationTransform: ",cameraToLandmarkTranslationTransform)
        # print("robotToLandmark",robotToLandmark )
        x = robotToLandmark.r1_c4
        y = robotToLandmark.r2_c4
        z = robotToLandmark.r3_c4

        distance = math.sqrt(x ** 2 + y * y) * 100  # 机器人与mark的距离distance
        # distance = math.sqrt((x+0.055)**2 + y * y) * 100  # 机器人与mark的距离distance
        # # 一些修正处理
        # x*=100
        # y*=100
        # l  = math.sqrt(x**2 + y * y)
        # modifyAngle =  math.asin((x+5.5)/distance) - math.asin(x/l)
        # logging.debug("modifyAngle:::::" + str(modifyAngle * almath.TO_DEG))
        # if markWithHeadAngle > 0:
        #     markWithHeadAngle-=modifyAngle
        # else:
        #     markWithHeadAngle+=modifyAngle
        # 修正值
        # distance = round( getRealDistanceForLandmark(distance, int(markWithHeadAngle * almath.TO_DEG) ), 2)

        markWithHeadAngle = round(markWithHeadAngle, 2)
        # 将数据存入列表
        robotToLandmarkData.append(distance)
        robotToLandmarkData.append(markWithHeadAngle)
        # 记录日志
        logging.info("x = " + str(x))
        logging.info("y = " + str(y))
        logging.info("z = " + str(z))
        logging.info("nao与landmark的距离 :: " + str(distance) + "   角度:: " + str(markWithHeadAngle * almath.TO_DEG))
        # 找到landmark,跳出循环

    # 提示
    if not isFindLandmark:
        print "landmark is not in sight !"
        g_tts.say("landmark is not in sight")
    # 取消事件
    g_landmarkDetection.unsubscribe("landmarkTest")
    g_camera.closeCamera(0)
    # 调整头的角度
    g_motion.angleInterpolationWithSpeed("HeadPitch", 0.0, 0.2)
    g_motion.angleInterpolationWithSpeed("HeadYaw", 0.0, 0.2)
    return robotToLandmarkData, isFindLandmark
Пример #20
0
def landmark():
    # Set here your robto's ip.
    ip = "192.168.3.8"
    # Set here the size of the landmark in meters.
    landmarkTheoreticalSize = 0.06  # in meters
    # Set here the current camera ("CameraTop" or "CameraBottom").
    currentCamera = "CameraTop"

    memoryProxy = ALProxy("ALMemory", ip, 9559)
    landmarkProxy = ALProxy("ALLandMarkDetection", ip, 9559)

    motionProxy.setMoveArmsEnabled(False, False)

    # Subscribe to LandmarkDetected event from ALLandMarkDetection proxy.
    landmarkProxy.subscribe("landmarkTest")

    # Wait for a mark to be detected.
    markData = memoryProxy.getData("LandmarkDetected")
    while (markData is None or len(markData) == 0):
        markData = memoryProxy.getData("LandmarkDetected")

    # Retrieve landmark center position in radians.

    #####################
    markInfoArray = markData[1]
    for markInfo in markInfoArray:
        markShapeInfo = markInfo[0]
        markExtraInfo = markInfo[1]
        alpha = markShapeInfo[1]
        beta = markShapeInfo[2]
        print "mark  ID: %d" % (markExtraInfo[0])
        print "  alpha %.3f - beta %.3f" % (markShapeInfo[1], markShapeInfo[2])
        print "  width %.3f - height %.3f" % (markShapeInfo[3], markShapeInfo[4])

    ############
    wzCamera = markData[1][0][0][1]
    wyCamera = markData[1][0][0][2]

    # Retrieve landmark angular size in radians.
    angularSize = markData[1][0][0][3]

    # Compute distance to landmark.
    distanceFromCameraToLandmark = landmarkTheoreticalSize / (2 * math.tan(angularSize / 2))

    # Get current camera position in NAO space.
    transform = motionProxy.getTransform(currentCamera, 2, True)
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)

    # Compute the rotation to point towards the landmark.
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)

    # Compute the translation to reach the landmark.
    cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

    # Combine all transformations to get the landmark position in NAO space.
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform

    print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
    print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
    print "z " + str(robotToLandmark.r3_c4) + " (in meters)"

    x = float(robotToLandmark.r1_c4)
    y = float(robotToLandmark.r2_c4)
    z = float(robotToLandmark.r3_c4)
    print(x, y)
    # print("xxxxxxxxxxxxxxxxxxxxx")
    # print math.sqrt(1 - (math.cos(alpha))**2 + (math.cos(beta))**2)
    # theta = math.acos(math.sqrt(1 - (math.cos(alpha))**2 - (math.cos(beta))**2))
    theta = math.atan(y / x)

    motionProxy.moveTo(x, y, theta)
    # motionProxy.moveTo(x, y, theta)

    landmarkProxy.unsubscribe("landmarkTest")
Пример #21
0
def getMarkXYZ(motionProxyPR, markDataPR, landmarkSizePR):
    """
    Finds and returns the x,y,z position of a nao mark
    relative to nao's camera
    :param motionProxyPR:
    :param markDataPR:
    :param landmarkSizePR:
    """
    '''
    REQUIREMENTS:
    R01 Must return x, y, and z coordinates of the mark relative to
          the NAO

    DESIGN:

    Algorithm
    ---------
    A01 Instantiate currentCamera string
    A02 Instantiate angularSize (in radians)
          from a spot in the 4-dimensional list markDataPR
    A03 Instantiate distanceToLandmark by calculating from the angle
    A04 Instantiate wzCamera and wyCamera as the
          vertical and horizontal offset, respectively,
          of the mark in radians
    A05 Instantiate transform representing position of camera in
          NAO space
    A06 Instantiate transformList
    A07 Instantiate robotToCamera as the transform
          to get robot position from the camera position
    A08 Instantiate cameraToLandmarkRotationTransform as the
          transform to rotate to point at the landmark
    A09 Instantiate cameraToLandmarkTranslationTransform as the
          transform to move tot he landmark
    A10 Instantiate robotToLandmark as the combined transformations
    A11 Return x, y, and z coordinates of the transform
    '''
    currentCamera = "CameraTop"  #L01

    angularSize = markDataPR[1][0][0][3]  #L02
    distanceToLandmark = landmarkSizePR / \
                         (2 * math.tan(angularSize / 2))               #L03

    wzCamera, wyCamera = getMarkAngles(markDataPR)  #L04

    transform = motionProxyPR.getTransform(currentCamera, 2, True)  #L05
    transformList = almath.vectorFloat(transform)  #L06
    robotToCamera = almath.Transform(transformList)  #L08

    cameraToLandmarkRotationTransform = \
        almath.Transform_from3DRotation(0, wyCamera, wzCamera)         #L09
    cameraToLandmarkTranslationTransform = \
        almath.Transform(distanceToLandmark, 0, 0)                     #L10

    robotToLandmark = robotToCamera * \
                      cameraToLandmarkRotationTransform * \
                      cameraToLandmarkTranslationTransform             #L11

    return robotToLandmark.r1_c4, \
           robotToLandmark.r2_c4, robotToLandmark.r3_c4                #L12


#getMarkXYZ

#end naoMarkModule.py
Пример #22
0
# Retrieve landmark center position in radians.
wzCamera = markData[1][0][0][1]
wyCamera = markData[1][0][0][2]

# Retrieve landmark angular size in radians.
angularSize = markData[1][0][0][3]

# Compute distance to landmark.
distanceFromCameraToLandmark = landmarkTheoreticalSize / ( 2 * math.tan( angularSize / 2))

motionProxy = ALProxy("ALMotion", ip, 9559)

# Get current camera position in NAO space.
transform = motionProxy.getTransform(currentCamera, 2, True)
transformList = almath.vectorFloat(transform)
robotToCamera = almath.Transform(transformList)

# Compute the rotation to point towards the landmark.
cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)

# Compute the translation to reach the landmark.
cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

# Combine all transformations to get the landmark position in NAO space.
robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform

print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
Пример #23
0
    distanceFromCameraToLandmark = landmarkTheoreticalSize / (
        2 * math.tan(angularSize / 2))

    # Get current camera position in NAO space.
    # print 4X4 matrix values
    # after getTrasform, the value has to converted to vectorFloat
    transform = motionProxy.getTransform(currentCamera, 2, True)
    ##    print "the transform: ",transform
    ##    for i in range(len(transform)):
    ##        if i+4>16:
    ##            break
    ##        temp=[transform[idx] for idx in range(i,i+4)]
    ##        print "-"*len(str(temp))
    ##        print temp
    ##        i=i+4
    transformList = almath.vectorFloat(transform)
    ##    print transformList
    robotToCamera = almath.Transform(transformList)

    ##    print "Robot 2 camera: ",robotToCamera
    # Compute the rotation to point towards the landmark.
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
        0, wyCamera, wzCamera)
    print cameraToLandmarkRotationTransform
    print "--"
    ##    print type(cameraToLandmarkRotationTransform)
    # Compute the translation to reach the landmark.
    cameraToLandmarkTranslationTransform = almath.Transform(
        distanceFromCameraToLandmark, 0, 0)
    print "translation: ", cameraToLandmarkTranslationTransform
Пример #24
0
    # Retrieve landmark angular size in radians.
    angularSize = markData[1][0][0][3]

    # print "angularSize"
    # print angularSize

    # Compute distance to landmark.
    distanceFromCameraToLandmark = landmarkTheoreticalSize / ( 2 * math.tan( angularSize / 2))
##    print "Distance to landMark: ",distanceFromCameraToLandmark


    # Get current camera position in NAO space.
    transform = motionProxy.getTransform(currentCamera, 2, True)#"RHand",2,True)#

##    print "get current cam: ",transform
    transformList = almath.vectorFloat(transform)
##    print "and list: ", transformList
    robotToCamera = almath.Transform(transformList)
##    print "robot to cam: ", robotToCamera

    # Compute the rotation to point towards the landmark.
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)
##    print "Rotation transform: ",cameraToLandmarkRotationTransform
    # Compute the translation to reach the landmark.
    cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

    # Combine all transformations to get the landmark position in NAO space.
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform

    x,y,z=robotToLandmark.r1_c4,robotToLandmark.r2_c4,robotToLandmark.r3_c4
    # print "ROBOT TO LANDMARK"
Пример #25
0
import naoqi