예제 #1
0
def main(robotIP, PORT=9559):
    ''' Example showing a path of five positions
    Needs a PoseInit before execution
    '''

    motionProxy = ALProxy("ALMotion", robotIP, PORT)
    postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)

    # Wake up robot
    motionProxy.wakeUp()

    # Send robot to Stand Init
    postureProxy.goToPosture("StandInit", 0.5)

    effector = "Torso"
    frame = motion.FRAME_WORLD
    axisMask = almath.AXIS_MASK_ALL  # full control
    useSensorValues = False

    # Define the changes relative to the current position
    dx = 0.045  # translation axis X (meter)
    dy = 0.050  # translation axis Y (meter)

    path = []
    currentTf = motionProxy.getTransform(effector, frame, useSensorValues)

    # point 1
    targetTf = almath.Transform(currentTf)
    targetTf.r1_c4 += dx
    path.append(list(targetTf.toVector()))

    # point 2
    targetTf = almath.Transform(currentTf)
    targetTf.r2_c4 -= dy
    path.append(list(targetTf.toVector()))

    # point 3
    targetTf = almath.Transform(currentTf)
    targetTf.r1_c4 -= dx
    path.append(list(targetTf.toVector()))

    # point 4
    targetTf = almath.Transform(currentTf)
    targetTf.r2_c4 += dy
    path.append(list(targetTf.toVector()))

    # point 5
    targetTf = almath.Transform(currentTf)
    targetTf.r1_c4 += dx
    path.append(list(targetTf.toVector()))

    # point 6
    path.append(currentTf)

    times = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]  # seconds

    motionProxy.transformInterpolations(effector, frame, path, axisMask, times)

    # Go to rest position
    motionProxy.rest()
예제 #2
0
    def computePath(self, effector, frame):
        dx = 0.05  # translation axis X (meters)
        dz = 0.05  # translation axis Z (meters)
        dwy = 5.0 * almath.TO_RAD  # rotation axis Y (radian)

        path = []

        # Usign sensor values False
        currentTf = self.motion.getTransform(effector, frame, False)

        # 1
        targetTf = almath.Transform(currentTf)
        targetTf *= almath.Transform(-dx, 0.0, dz)
        targetTf *= almath.Transform().fromRotY(dwy)
        path.append(list(targetTf.toVector()))

        # 2
        targetTf = almath.Transform(currentTf)
        targetTf *= almath.Transform(dx, 0.0, dz)
        path.append(list(targetTf.toVector()))

        # 3
        path.append(currentTf)

        return path
예제 #3
0
def robotTolandmark(allmarkdata):
    currentCamera = "CameraTop"
    landmarkTheoreticalSize = 0.105  # in meters
    wzCamera = allmarkdata[0]
    wyCamera = allmarkdata[1]
    angularSize = allmarkdata[2]
    # 机器人与红球xdistance ,机器人与mark之间sdistance , 两条线的夹角为angle
    # 计算距离
    distanceFromCameraToLandmark = landmarkTheoreticalSize / (2 * math.tan(angularSize / 2))

    # 获取当前机器人到摄像头的距离的变换矩阵
    transform = motionPrx.getTransform(currentCamera, 2, True)
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)

    # 计算指向NAOmark的旋转矩阵
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)

    # 摄像头到NAOmark的矩阵
    cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

    # 机器人到NAOmark的矩阵
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
    x = robotToLandmark.r1_c4
    y = robotToLandmark.r2_c4
    z = robotToLandmark.r3_c4
    sdistance = math.sqrt(x * x + y * y)  # 机器人与mark的距离sdistance
    angle = math.atan(y/x)
    robotTolandmarkdata = [angle, sdistance, x, y, z]
    print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
    print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
    print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
    return robotTolandmarkdata
예제 #4
0
 def updateLandMarkData(self):
     """
     更新LandMark信息
     """
     try:
         currentCamera = "CameraTop"
         self.landmarkProxy.subscribe("landmark")
         markData = self.memoryProxy.getData("LandmarkDetected")
         if markData is None:
             self.disX = 0
             self.disY = 0
             self.dist = 0
             self.yawAngle = 0
             return
         wzCamera = markData[1][0][0][1]
         wyCamera = markData[1][0][0][2]
         angularSize = markData[1][0][0][3]
         distanceFromCameraToLandmark = self.landMarkSize / (2 * math.tan(angularSize / 2))
         transform = self.motionProxy.getTransform(currentCamera, 2, True)
         transformList = almath.vectorFloat(transform)
         robotToCamera = almath.Transform(transformList)
         cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)
         cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)
         robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
         landMarkX = robotToLandmark.r1_c4
         landMarkY = robotToLandmark.r2_c4
         self.landmarkProxy.unsubscribe("landmark")
         yawAngle = math.atan2(landMarkY, landMarkX)
         self.disX = landMarkX
         self.disY = landMarkY
         self.dist = distanceFromCameraToLandmark
         self.yawAngle = yawAngle
     except Exception, err:
         print(err)
예제 #5
0
class kickBall:

    precondition = "at_Ball"
    effect = "ball_Kicked"
    cost = 2
    ball_Kicked = False

    def kick(self):
        print "moving to kick ball"
        motionProxy.moveTo(0.4, 0.0, 0)
        speechProxy.say("Kicking the ball")
        frame = motion.FRAME_TORSO
        axisMask = almath.AXIS_MASK_ALL
        useSensorValues = False
        effector = "LLeg"
        initTf = almath.Transform()

        try:
            initTf = almath.Transform(
                motionProxy.getTransform(effector, frame, useSensorValues))
        except Exception, errorMsg:
            print str(errorMsg)
            print "This action will not work on this Nao version."
            exit()

        deltaTf = almath.Transform(0.4, 0, 0.0) * almath.Transform().fromRotZ(
            0.0 * almath.TO_RAD)
        targetTf = initTf * deltaTf
        path = list(targetTf.toVector())
        times = 0.2

        motionProxy.transformInterpolations(effector, frame, path, axisMask,
                                            times)
        kickBall.ball_Kicked = True
        stand.stadning = False
예제 #6
0
def main(robotIP, PORT=9559):
    ''' Example showing a hand ellipsoid
    Warning: Needs a PoseInit before executing
    '''

    motionProxy  = ALProxy("ALMotion", robotIP, PORT)
    postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)

    # Wake up robot
    motionProxy.wakeUp()

    # Send robot to Stand Init
    postureProxy.goToPosture("StandInit", 0.5)

    #tts = ALProxy("ALTextToSpeech", "192.168.2.17", 9559)
    tts = ALProxy("ALTextToSpeech", "168.156.34.195", 9559)
    tts.say("Hello, I'm going to move my left arm.")
    tts.say("Then I will go back to the rest position!")
    
    effector   = "LArm"
    frame      = motion.FRAME_TORSO
    axisMask   = almath.AXIS_MASK_VEL    # just control position
    useSensorValues = False

    path = []
    currentTf = motionProxy.getTransform(effector, frame, useSensorValues)
    # point 1
    targetTf  = almath.Transform(currentTf)
    targetTf.r2_c4 -= 0.05 # y
    path.append(list(targetTf.toVector()))

    # point 2
    targetTf  = almath.Transform(currentTf)
    targetTf.r3_c4 += 0.04 # z
    path.append(list(targetTf.toVector()))

    # point 3
    targetTf  = almath.Transform(currentTf)
    targetTf.r2_c4 += 0.04 # y
    path.append(list(targetTf.toVector()))

    # point 4
    targetTf  = almath.Transform(currentTf)
    targetTf.r3_c4 -= 0.02 # z
    path.append(list(targetTf.toVector()))

    # point 5
    targetTf  = almath.Transform(currentTf)
    targetTf.r2_c4 -= 0.05 # y
    path.append(list(targetTf.toVector()))

    # point 6
    path.append(currentTf)

    times = [0.5, 1.0, 2.0, 3.0, 4.0, 4.5] # seconds

    motionProxy.transformInterpolations(effector, frame, path, axisMask, times)

    # Go to rest position
    motionProxy.rest()
예제 #7
0
    def shift_weight(self, use_sensor_values, motion_proxy):
        axis_mask = almath.AXIS_MASK_ALL  # full control

        # Lower the Torso and move to the side
        effector = "Torso"
        frame = mot.FRAME_ROBOT
        times = 2.0  # seconds
        try:
            init_tf = almath.Transform(
                motion_proxy.getTransform(effector, frame, use_sensor_values))
        except Exception as e:
            sys.exit(e)
        delta_tf = almath.Transform(0.0, -0.06, -0.03)  # x, y, z
        target_tf = init_tf * delta_tf
        path = list(target_tf.toVector())
        motion_proxy.transformInterpolation(effector, frame, path, axis_mask,
                                            times, True)

        # Lift LLeg
        effector = "LLeg"
        frame = mot.FRAME_TORSO
        times = 2.0  # seconds
        try:
            init_tf = almath.Transform(
                motion_proxy.getTransform(effector, frame, use_sensor_values))
        except Exception as e:
            sys.exit(e)
        delta_tf = almath.Transform(0.0, 0.0, 0.04)
        target_tf = init_tf * delta_tf
        path = list(target_tf.toVector())
        motion_proxy.transformInterpolation(effector, frame, path, axis_mask,
                                            times, True)
예제 #8
0
def detectLandmark(landmarkProxy):
    ''' Function to make NAO detect NAO marks '''

    # Subscribe to LandmarkDetected event from ALLandMarkDetection proxy.
    landmarkProxy.subscribe("landmarkTest")

    # Wait for a mark to be detected.
    markData = memoryProxy.getData("LandmarkDetected")
    while (len(markData) == 0):
        markData = memoryProxy.getData("LandmarkDetected")

    # Retrieve landmark center position in radians.
    wzCamera = markData[1][0][0][1]
    wyCamera = markData[1][0][0][2]

    # Retrieve landmark angular size in radians.
    angularSize = markData[1][0][0][3]

    # Compute distance to landmark.
    distanceFromCameraToLandmark = LANDMARK_SIZE / ( 2 * math.tan( angularSize / 2))

    # Get current camera position in NAO space.
    transform = motionProxy.getTransform(cameraTop, 2, True)
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)

    # Compute the rotation to point towards the landmark.
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)

    # Compute the translation to reach the landmark.
    cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

    # Combine all transformations to get the landmark position in NAO space.
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform

    print "x " + str(round(robotToLandmark.r1_c4/0.0254, 2)) + " (in inches)"
    print "y " + str(round(robotToLandmark.r2_c4/0.0254, 2)) + " (in inches)"
    print "z " + str(round(robotToLandmark.r3_c4/0.0254, 2)) + " (in inches)"

    speakProxy.say("I have detected a landmark.")

    speakProxy.say("It is located at " + str(round(robotToLandmark.r1_c4/0.0254, 2)) + " inches away on my ex axis")
    speakProxy.say("And at " + str(round(robotToLandmark.r2_c4/0.0254, 2)) + " inches away on my why axis")
    speakProxy.say("And at " + str(round(robotToLandmark.r3_c4/0.0254, 2)) + " inches away on my zee axis")

    # Subscribe to sonars, this will launch sonars (at hardware level) and start data acquisition.
    sonarProxy.subscribe("sonarTest")

    # Get sonar left first echo (distance in meters to the first obstacle).
    print "left sonar data = " + str(round(memoryProxy.getData("Device/SubDeviceList/US/Left/Sensor/Value"), 2))

    # Same thing for right.
    print "right sonar data = " + str(round(memoryProxy.getData("Device/SubDeviceList/US/Right/Sensor/Value"), 2))

    speakProxy.say("My left sonar sensor detected at " + str(round(memoryProxy.getData("Device/SubDeviceList/US/Left/Sensor/Value"), 2)))
    speakProxy.say("And my right sonar sensor detected at " + str(round(memoryProxy.getData("Device/SubDeviceList/US/Right/Sensor/Value"), 2)))

    sonarProxy.unsubscribe("sonarTest")
    landmarkProxy.unsubscribe("landmarkTest")
예제 #9
0
    def detectLandmark(self):
        ''' Function to make NAO detect NAO marks '''

        # Subscribe to LandmarkDetected event from ALLandMarkDetection proxy.
        self.landmarkProxy.subscribe("landmarkTest")

        # Wait for a mark to be detected.
        markData = self.memoryProxy.getData("LandmarkDetected")

        while (len(markData) == 0):
            markData = self.memoryProxy.getData("LandmarkDetected")

        # Retrieve landmark center position in radians.
        wzCamera = markData[1][0][0][1]
        wyCamera = markData[1][0][0][2]

        # Retrieve landmark angular size in radians.
        angularSize = markData[1][0][0][3]

        # Compute distance to landmark.
        distanceFromCameraToLandmark = self.LANDMARK_SIZE / (
            2 * m.tan(angularSize / 2))

        # Get current camera position in NAO space.
        transform = self.motionProxy.getTransform(cameraTop, 2, True)
        transformList = almath.vectorFloat(transform)
        robotToCamera = almath.Transform(transformList)

        # Compute the rotation to point towards the landmark.
        cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
            0, wyCamera, wzCamera)

        # Compute the translation to reach the landmark.
        cameraToLandmarkTranslationTransform = almath.Transform(
            distanceFromCameraToLandmark, 0, 0)

        # Combine all transformations to get the landmark position in NAO space.
        robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform

        x = round(robotToLandmark.r1_c4 / 0.0254, 2)
        y = round(robotToLandmark.r2_c4 / 0.0254, 2)
        z = round(robotToLandmark.r3_c4 / 0.0254, 2)

        print "x " + str(x) + " (in inches)"
        print "y " + str(y) + " (in inches)"
        print "z " + str(z) + " (in inches)"

        # Subscribe to sonars, this will launch sonars (at hardware level) and start data acquisition.
        self.sonarProxy.subscribe("sonarTest")

        l = round(
            memoryProxy.getData("Device/SubDeviceList/US/Left/Sensor/Value"),
            2)
        r = round(
            memoryProxy.getData("Device/SubDeviceList/US/Right/Sensor/Value"),
            2)

        return x, y, z, l, r
예제 #10
0
def main(session):
    """
    Use transformInterpolations Method on Arm
    """
    # Get the services ALMotion & ALRobotPosture.

    motion_service = session.service("ALMotion")
    posture_service = session.service("ALRobotPosture")

    # Wake up robot
    motion_service.wakeUp()

    # Send robot to Stand Init
    posture_service.goToPosture("StandInit", 0.5)

    effector = "LArm"
    frame = motion.FRAME_TORSO
    axisMask = almath.AXIS_MASK_VEL  # just control position
    useSensorValues = False

    path = []
    currentTf = motion_service.getTransform(effector, frame, useSensorValues)
    # point 1
    targetTf = almath.Transform(currentTf)
    targetTf.r2_c4 -= 0.05  # y
    path.append(list(targetTf.toVector()))

    # point 2
    targetTf = almath.Transform(currentTf)
    targetTf.r3_c4 += 0.04  # z
    path.append(list(targetTf.toVector()))

    # point 3
    targetTf = almath.Transform(currentTf)
    targetTf.r2_c4 += 0.04  # y
    path.append(list(targetTf.toVector()))

    # point 4
    targetTf = almath.Transform(currentTf)
    targetTf.r3_c4 -= 0.02  # z
    path.append(list(targetTf.toVector()))

    # point 5
    targetTf = almath.Transform(currentTf)
    targetTf.r2_c4 -= 0.05  # y
    path.append(list(targetTf.toVector()))

    # point 6
    path.append(currentTf)

    times = [0.5, 1.0, 2.0, 3.0, 4.0, 4.5]  # seconds

    motion_service.transformInterpolations(effector, frame, path, axisMask,
                                           times)

    # Go to rest position
    motion_service.rest()
예제 #11
0
def get_dot(area):
    x, y, w, h = area
    center_x = x + w / 2
    center_y = y + h / 2

    landmark_width_in_pic = w
    currentCamera = "CameraTop"
    # headAngle=headg_motion.getAngles("HeadYaw", True)
    head_yaw_Angle = 10
    # markWithHeadAngle = alpha + headAngle[0]  # landmark相对机器人头的角度
    markWithHeadAngle = center_x + head_yaw_Angle
    # 头部正对landmark
    g_motion.angleInterpolationWithSpeed("HeadYaw", markWithHeadAngle, 0.2)
    # ----------------------------------计算距离-----------------------------------------------#
    # distanceFromCameraToLandmark = landmark["size"] / (2 * math.tan(landmarkWidthInPic / 2))
    distanceFromCameraToLandmark = mark_width / (
        2 * math.tan(landmark_width_in_pic / 2))
    # 获取当前机器人到摄像头的距离的变换矩阵
    transform = g_motion.getTransform(currentCamera, 2, True)
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)
    # 打印
    # print("transform:", transform)
    # print("transformList :", transformList)
    # print("robotToCamera :", robotToCamera)

    # 计算指向landmark的旋转矩阵
    # cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, beta, alpha)
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
        0, center_x, center_y)

    # 摄像头到landmark的矩阵
    cameraToLandmarkTranslationTransform = almath.Transform(
        distanceFromCameraToLandmark, 0, 0)

    # 机器人到landmark的矩阵
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
    # 打印
    # print("cameraToLandmarkRotationTransform: " ,cameraToLandmarkRotationTransform)
    # print("cameraToLandmarkTranslationTransform: ",cameraToLandmarkTranslationTransform)
    # print("robotToLandmark",robotToLandmark )
    x = robotToLandmark.r1_c4
    y = robotToLandmark.r2_c4
    z = robotToLandmark.r3_c4

    distance = math.sqrt(x**2 + y * y) * 100

    markWithHeadAngle = round(markWithHeadAngle, 2)
    # 将数据存入列表
    return distance, markWithHeadAngle
    def _handle_hand(self, form):
        """Move the hand/arm of the bot.
        """
        proxies['motion'].wakeUp()

        if form['hand'].value.lower() == 'left':
            name = 'LArm'
        else:
            name = 'RArm'
        frame = motion.FRAME_TORSO
        useSensorValues = True
        currentTf = proxies['motion'].getTransform(name, frame,
                                                   useSensorValues)

        dx = dy = dz = 0
        if 'dx' in form: dx = float(form['dx'].value)
        if 'dy' in form: dy = float(form['dy'].value)
        if 'dz' in form: dz = float(form['dz'].value)

        targetTf = almath.Transform(currentTf)
        targetTf.r1_c4 += dx  # x
        targetTf.r2_c4 += dy  # y
        targetTf.r3_c4 += dz  # z (I hope)

        proxies['motion'].setTransform(name, frame, targetTf.toVector(), 0.5,
                                       almath.AXIS_MASK_VEL)

        self.response('Moved {} arm: {} {} {}'.format(form['hand'].value, dx,
                                                      dy, dz))
예제 #13
0
    def _get_image_world2camera_and_timestamp(self, subscriber_id, params):
        t = time.time()
        # self.logger.info("_get_image_world2camera_and_timestamp %s..." % subscriber_id)
        image_remote = self.ALVideoDevice.getImageRemote(subscriber_id)
        if not image_remote:
            raise Exception("No data in image")
        camera = params["camera"]
        camera_name = CAMERAS[camera]
        seconds = image_remote[4]
        micro_seconds = image_remote[5]
        t_world2camera = almath.Transform(
            self.ALMotion._getSensorTransformAtTime(
                camera_name, seconds * 10e8 + micro_seconds * 10e2))
        timestamp = [seconds, micro_seconds]  # we store this for TrackEvent
        resolution = params["resolution"]
        x, y = CAMERA_DATAS_AT_RESOLUTION[resolution]["image_size"]
        color_space, channels = params["color_space_and_channels"]
        image = numpy.frombuffer(image_remote[6],
                                 dtype=numpy.uint8).reshape(y, x, channels)
        if params["color"] and color_space == vd.kBGRColorSpace:
            # self.logger.warning("Thresholding image...")
            lower_b = tuple([int(val) for val in params["color"]])
            upper_b = (255, 255, 255)
            image = cv2.inRange(image, lower_b, upper_b)
            # self.logger.warning("Thresholding image done")

        # cv2.imwrite("/home/nao/picture.png", image)
        d = time.time() - t
        self.logger.info("_get_image_world2camera_and_timestamp %s done %s" %
                         (subscriber_id, d))
        return image, t_world2camera, timestamp
def main(robotIP, PORT=9559):
    ''' Example showing a path of two positions
    Warning: Needs a PoseInit before executing
    '''

    motionProxy = ALProxy("ALMotion", robotIP, PORT)
    postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)

    # Wake up robot
    motionProxy.wakeUp()

    # Send robot to Stand Init
    postureProxy.goToPosture("StandInit", 0.5)

    effector = "LArm"
    frame = motion.FRAME_TORSO
    axisMask = almath.AXIS_MASK_VEL  # just control position
    useSensorValues = False

    path = []
    currentTf = motionProxy.getTransform(effector, frame, useSensorValues)
    targetTf = almath.Transform(currentTf)
    targetTf.r1_c4 += 0.03  # x
    targetTf.r2_c4 += 0.03  # y

    path.append(list(targetTf.toVector()))
    path.append(currentTf)

    # Go to the target and back again
    times = [2.0, 4.0]  # seconds

    motionProxy.transformInterpolations(effector, frame, path, axisMask, times)

    # Go to rest position
    motionProxy.rest()
예제 #15
0
def main(robotIP, PORT=NAO_DEF_PORT):

    proxTTS = ALProxy("ALTextToSpeech", robotIP, PORT)
    proxMotion = ALProxy("ALMotion", robotIP, PORT)
    proxPosture = ALProxy("ALRobotPosture", robotIP, PORT)

    proxMotion.wakeUp()
    # print(proxPosture.getPostureList())
    proxPosture.goToPosture("StandInit", 0.5)

    proxTTS.say("Variable initialization")
    effector = "LArm"
    frame = motion.FRAME_TORSO
    axisMask = almath.AXIS_MASK_VEL  # just control position
    useSensorValues = False

    proxTTS.say("Trajectory computation")
    path = []
    currentTf = proxMotion.getTransform(effector, frame, useSensorValues)
    targetTf = almath.Transform(currentTf)
    targetTf.r1_c4 += 0.03  # x
    targetTf.r2_c4 += 0.03  # y

    path.append(list(targetTf.toVector()))
    path.append(currentTf)

    # Go to the target and back again
    times = [2.0, 4.0]  # seconds

    proxTTS.say("Starting movement")
    for _ in range(5):
        proxMotion.transformInterpolations(effector, frame, path, axisMask,
                                           times)

    proxMotion.rest()
def main(robotIP, PORT=9559):
    motionProxy = ALProxy("ALMotion", robotIP, PORT)
    postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)

    # Wake up robot
    motionProxy.wakeUp()

    # Send NAO to Pose Init
    postureProxy.goToPosture("StandInit", 0.5)

    # Get transform of Left Arm in Torso frame
    chainName = "LArm"
    frame     = motion.FRAME_TORSO
    useSensor = False
    tf = almath.Transform(motionProxy.getTransform(chainName, frame, useSensor))

    # Compute desired transform: rotation of -20 degrees around the Z axis
    tfEnd = almath.Transform.fromRotZ(-20.0*almath.TO_RAD)*tf
    tfEnd.r1_c4 = tf.r1_c4
    tfEnd.r2_c4 = tf.r2_c4
    tfEnd.r3_c4 = tf.r3_c4

    # Set the desired target
    axisMask = 63 # rotation
    fractionMaxSpeed = 0.1
    transform = [val for val in tfEnd.toVector()]
    motionProxy.setTransforms(chainName, frame, transform, fractionMaxSpeed, axisMask)

    # Go to rest position
    motionProxy.rest()
예제 #17
0
def detectLandmark(landmarkProxy, currentCamera='CameraTop'):
    x = 0
    y = 0
    z = 0

    # Wait for a mark to be detected.
    cycles = 0
    markData = memoryProxy.getData("LandmarkDetected")
    while (len(markData) == 0):
        markData = memoryProxy.getData("LandmarkDetected")
        #print cycles
        cycles = cycles + 1
        if (cycles > 10):
            return 0, 0, 0
    #print("number of detections = "+str(len(markData)))

    # Retrieve landmark center position in radians.
    wzCamera = markData[1][0][0][1]
    wyCamera = markData[1][0][0][2]

    # Retrieve landmark angular size in radians.
    angularSize = markData[1][0][0][3]

    # Compute distance to landmark.
    distanceFromCameraToLandmark = LANDMARK_SIZE / (2 *
                                                    math.tan(angularSize / 2))

    # Get current camera position in NAO space.
    transform = motionProxy.getTransform(currentCamera, 2, True)
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)

    # Compute the rotation to point towards the landmark.
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
        0, wyCamera, wzCamera)

    # Compute the translation to reach the landmark.
    cameraToLandmarkTranslationTransform = almath.Transform(
        distanceFromCameraToLandmark, 0, 0)

    # Combine all transformations to get the landmark position in NAO space.
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform

    #print "Finish Landmark detect"

    return metersToInches(robotToLandmark.r1_c4), metersToInches(
        robotToLandmark.r2_c4), metersToInches(robotToLandmark.r3_c4)
예제 #18
0
    def kick(self):
        print "moving to kick ball"
        motionProxy.moveTo(0.4, 0.0, 0)
        speechProxy.say("Kicking the ball")
        frame = motion.FRAME_TORSO
        axisMask = almath.AXIS_MASK_ALL
        useSensorValues = False
        effector = "LLeg"
        initTf = almath.Transform()

        try:
            initTf = almath.Transform(
                motionProxy.getTransform(effector, frame, useSensorValues))
        except Exception, errorMsg:
            print str(errorMsg)
            print "This action will not work on this Nao version."
            exit()
예제 #19
0
 def landmarkYmovement(self, detection):
     zCamera = detection[1][0][0][1]
     yCamera = detection[1][0][0][2]
     angularSize = detection[1][0][0][3]
     distance = .06 / (2 * math.tan(angularSize / 2))
     transform = self.motionProxy.getTransform("CameraTop", 2, True)
     transformList = almath.vectorFloat(transform)
     robotToCamera = almath.Transform(transformList)
     cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
         0, yCamera, zCamera)
     cameraToLandmarkTranslationTransform = almath.Transform(distance, 0, 0)
     robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
     if math.fabs(robotToLandmark.r2_c4) > .2 and math.fabs(
             robotToLandmark.r2_c4) < .45:
         self.motionProxy.moveToward(0, 0, 0)
         self.motionProxy.moveTo(0, robotToLandmark.r2_c4, 0)
         firstDetection = robotToLandmark.r2_c4
         secondDetection = self.memoryProxy.getData("LandmarkDetected", 0)
         if not secondDetection == []:
             zCamera = secondDetection[1][0][0][1]
             yCamera = secondDetection[1][0][0][2]
             angularSize = secondDetection[1][0][0][3]
             distance = .06 / (2 * math.tan(angularSize / 2))
             transform = self.motionProxy.getTransform("CameraTop", 2, True)
             transformList = almath.vectorFloat(transform)
             robotToCamera = almath.Transform(transformList)
             cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
                 0, yCamera, zCamera)
             cameraToLandmarkTranslationTransform = almath.Transform(
                 distance, 0, 0)
             robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
             if not robotToLandmark < math.fabs(.2):
                 if robotToLandmark.r2_c4 < 0:
                     self.motionProxy.moveTo(
                         0, 0, ((robotToLandmark.r2_c4 / 1.06) * 5))
                 else:
                     self.motionProxy.moveTo(
                         0, 0, -((robotToLandmark.r2_c4 / 1.06) * 5))
         else:
             if firstDetection < 0:
                 self.motionProxy.moveTo(0, 0,
                                         ((firstDetection / 1.06) * 5))
             else:
                 self.motionProxy.moveTo(0, 0,
                                         -((firstDetection / 1.06) * 5))
         self.motionProxy.moveToward(.5, .02, -.03)
예제 #20
0
    def on_landmark_detected(self, markData):
        """
        Callback for event LandmarkDetected.
        """
        if markData == []:  # empty value when the landmark disappears
            self.got_landmark = False
        elif not self.got_landmark:  # only speak the first time a landmark appears
            self.got_landmark = True
            print "I saw a landmark! "
            self.tts.say("I saw a landmark! ")

            # Retrieve landmark center position in radians.
            wzCamera = markData[1][0][0][1]
            wyCamera = markData[1][0][0][2]

            # Retrieve landmark angular size in radians.
            angularSize = markData[1][0][0][3]

            # Compute distance to landmark.
            distanceFromCameraToLandmark = self.landmarkTheoreticalSize / (
                2 * math.tan(angularSize / 2))

            # Get current camera position in NAO space.
            transform = self.motion_service.getTransform(
                self.currentCamera, 2, True)
            transformList = almath.vectorFloat(transform)
            robotToCamera = almath.Transform(transformList)

            # Compute the rotation to point towards the landmark.
            cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
                0, wyCamera, wzCamera)

            # Compute the translation to reach the landmark.
            cameraToLandmarkTranslationTransform = almath.Transform(
                distanceFromCameraToLandmark, 0, 0)

            # Combine all transformations to get the landmark position in NAO space.
            robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform

            print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
            print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
            print "z " + str(robotToLandmark.r3_c4) + " (in meters)"

            self.motion_service.moveTo(robotToLandmark.r1_c4,
                                       robotToLandmark.r2_c4,
                                       robotToLandmark.r3_c4)
예제 #21
0
def userArmsCartesian(motionProxy):
    effector = ["LArm", "RArm"]
    frame = motion.FRAME_TORSO
    useSensorValues = False

    # just control position
    axisMask = [motion.AXIS_MASK_VEL, motion.AXIS_MASK_VEL]

    # LArm path
    pathLArm = []
    initTf = almath.Transform(
        motionProxy.getTransform("LArm", frame, useSensorValues))
    targetTf = almath.Transform(
        motionProxy.getTransform("LArm", frame, useSensorValues))
    targetTf.r1_c4 += 0.04  # x
    targetTf.r2_c4 -= 0.10  # y
    targetTf.r3_c4 += 0.10  # z
    pathLArm.append(list(targetTf.toVector()))
    pathLArm.append(list(initTf.toVector()))
    pathLArm.append(list(targetTf.toVector()))
    pathLArm.append(list(initTf.toVector()))

    # RArm path
    pathRArm = []
    initTf = almath.Transform(
        motionProxy.getTransform("RArm", frame, useSensorValues))
    targetTf = almath.Transform(
        motionProxy.getTransform("RArm", frame, useSensorValues))
    targetTf.r1_c4 += 0.04  # x
    targetTf.r2_c4 += 0.10  # y
    targetTf.r3_c4 += 0.10  # z
    pathRArm.append(list(targetTf.toVector()))
    pathRArm.append(list(initTf.toVector()))
    pathRArm.append(list(targetTf.toVector()))
    pathRArm.append(list(initTf.toVector()))

    pathList = []
    pathList.append(pathLArm)
    pathList.append(pathRArm)

    # Go to the target and back again
    timesList = [[1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0]]  # seconds

    motionProxy.transformInterpolations(effector, frame, pathList, axisMask,
                                        timesList)
def armMotionXY(motionNaoProxy):
    affectedArm = ["LArm", "RArm"]
    frameNao = motion.FRAME_TORSO
    usvals = False

    # just control position
    axmsk = [motion.AXIS_MASK_VEL, motion.AXIS_MASK_VEL]

    # LArm path
    armLPath = []
    initialTf = almath.Transform(
        motionNaoProxy.getTransform("LArm", frameNao, usvals))
    destTf = almath.Transform(
        motionNaoProxy.getTransform("LArm", frameNao, usvals))
    destTf.r1_c4 += 0.04  # x
    destTf.r2_c4 -= 0.10  # y
    destTf.r3_c4 += 0.10  # z
    armLPath.append(list(destTf.toVector()))
    armLPath.append(list(initialTf.toVector()))
    armLPath.append(list(destTf.toVector()))
    armLPath.append(list(initialTf.toVector()))

    # RArm path
    armRPath = []
    initialTf = almath.Transform(
        motionNaoProxy.getTransform("RArm", frameNao, usvals))
    destTf = almath.Transform(
        motionNaoProxy.getTransform("RArm", frameNao, usvals))
    destTf.r1_c4 += 0.04  # x
    destTf.r2_c4 += 0.10  # y
    destTf.r3_c4 += 0.10  # z
    armRPath.append(list(destTf.toVector()))
    armRPath.append(list(initialTf.toVector()))
    armRPath.append(list(destTf.toVector()))
    armRPath.append(list(initialTf.toVector()))

    addPaths = []
    addPaths.append(armLPath)
    addPaths.append(armRPath)

    # Go to the target and back again
    timesList = [[1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0]]  # seconds

    motionNaoProxy.transformInterpolations(affectedArm, frameNao, addPaths,
                                           axmsk, timesList)
예제 #23
0
def arm_dist(motionProxy,effectorName,x,y,z):
    # arm transformation in robot space
    arm_transform = motionProxy.getTransform(effectorName,2,True)
    robot2Arm = almath.Transform(almath.vectorFloat(arm_transform))
    rot=almath.position6DFromTransform(robot2Arm)
    print effectorName,"position",robot2Arm.r1_c4,robot2Arm.r2_c4,robot2Arm.r3_c4
    mx,my,mz=rot.wx,rot.wy,rot.wz
    x_d,y_d,z_d=x-robot2Arm.r1_c4,y-robot2Arm.r2_c4,z-robot2Arm.r3_c4
    print "distances update:"
    print x_d,y_d,z_d
    return [x_d,y_d,z_d],mx
def main(session):
    """
    Use transformInterpolations Method on Foot.
    """
    # Get the services ALMotion & ALRobotPosture.

    motion_service = session.service("ALMotion")
    posture_service = session.service("ALRobotPosture")

    # Wake up robot
    motion_service.wakeUp()

    # Send robot to Stand Init
    posture_service.goToPosture("StandInit", 0.5)

    frame = motion.FRAME_WORLD
    axisMask = almath.AXIS_MASK_ALL  # full control
    useSensorValues = False

    # Lower the Torso and move to the side
    effector = "Torso"
    initTf = almath.Transform(
        motion_service.getTransform(effector, frame, useSensorValues))
    deltaTf = almath.Transform(0.0, -0.06, -0.03)  # x, y, z
    targetTf = initTf * deltaTf
    path = list(targetTf.toVector())
    times = 2.0  # seconds
    motion_service.transformInterpolations(effector, frame, path, axisMask,
                                           times)

    # LLeg motion
    effector = "LLeg"
    initTf = almath.Transform()

    try:
        initTf = almath.Transform(
            motion_service.getTransform(effector, frame, useSensorValues))
    except Exception, errorMsg:
        print str(errorMsg)
        print "This example is not allowed on this robot."
        exit()
예제 #25
0
    def updateLandMarkData(self, client="landMark"):
        """
		update landMark information

		Args:
			client: client name
		Return:
			None.
		"""
        if self.cameraProxy.getActiveCamera() != self.cameraId:
            self.cameraProxy.setActiveCamera(self.cameraId)
            time.sleep(1)
        self.landMarkProxy.subscribe(client)
        markData = self.memoryProxy.getData("LandmarkDetected")
        self.cameraProxy.unsubscribe(client)
        if (markData is None or len(markData) == 0):
            self.disX = 0
            self.disY = 0
            self.dist = 0
            self.yawAngle = 0
        else:
            wzCamera = markData[1][0][0][1]
            wyCamera = markData[1][0][0][2]
            angularSize = markData[1][0][0][3]
            distCameraToLandmark = self.landMarkSize / (
                2 * math.tan(angularSize / 2))
            transform = self.motionProxy.getTransform(self.cameraName, 2, True)
            transformList = almath.vectorFloat(transform)
            robotToCamera = almath.Transform(transformList)
            cameraToLandmarkRotTrans = almath.Transform_from3DRotation(
                0, wyCamera, wzCamera)
            cameraToLandmarkTranslationTrans = almath.Transform(
                distCameraToLandmark, 0, 0)
            robotToLandmark = robotToCamera * \
                  cameraToLandmarkRotTrans * \
                  cameraToLandmarkTranslationTrans
            self.disX = robotToLandmark.r1_c4
            self.disY = robotToLandmark.r2_c4
            self.dist = np.sqrt(self.disX**2 + self.disY**2)
            self.yawAngle = math.atan2(self.disY, self.disX)
예제 #26
0
def CalculateDistanceNew(Weight,Alpha,Beta,IP,PORT,landmarkTheoreticalSize):
    currentCamera = "CameraBottom"
    memoryProxy = ALProxy("ALMemory", IP,PORT)
    motionProxy=ALProxy("ALMotion",IP,PORT)
    angularSize = (Weight/ 640.0) * 60.97 * almath.PI / 180
    distanceFromCameraToLandmark = landmarkTheoreticalSize / (2 * math.tan(angularSize / 2))
    # Get current camera position in NAO space.
    transform = motionProxy.getTransform(currentCamera, 2, True)
    print "transform=",transform
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)
    # Compute the rotation to point towards the landmark.
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, Beta, Alpha)

    # Compute the translation to reach the landmark.
    cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

    # Combine all transformations to get the landmark position in NAO space.
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
    print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
    print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
    print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
    print "robotToLandmark=",robotToLandmark
    # 从机器人端读取相关传感器的角度和位置数据
    headAgl = motionProxy.getAngles(["HeadYaw", "HeadPitch"], True)
    # ColumnAglY = Alpha + headAgl[0]     #水平方向角度差
    # print "ColumnAglY=",ColumnAglY
    # Position3D=almath.Position3D(robotToLandmark.r1_c4,robotToLandmark.r2_c4,robotToLandmark.r3_c4)
    # Position6D=almath.Position6D(0,0,0,0,0,0)
    # almath.position6DFromPosition3DInPlace(Position3D,Position6D)
    Position6D=almath.position6DFromTransform(robotToLandmark)
    # position6D=almath.vectorFloat(Position6D)
    # ColumnAglY=position6D
    print "Position6D.wz=",Position6D.wz
    print "Position6D",Position6D
    # print "type of Position6D=",type(Position6D)
    ColumnAglY=Position6D.wz
    # print "ColumnAglY=",ColumnAglY
    return robotToLandmark.r1_c4*1000,robotToLandmark.r2_c4*1000,robotToLandmark.r3_c4*1000,ColumnAglY     #传出Torso_X和Torso_Y的毫米值
예제 #27
0
def moveArm(motionProxy, arm):
    effector   = arm
    frame      = motion.FRAME_TORSO
    axisMask   = almath.AXIS_MASK_VEL    # just control position
    useSensorValues = False

    path = []
    currentTf = motionProxy.getTransform(effector, frame, useSensorValues)
    # point 1
    targetTf  = almath.Transform(currentTf)
    targetTf.r2_c4 -= 0.05 # y
    path.append(list(targetTf.toVector()))

    # point 2
    targetTf  = almath.Transform(currentTf)
    targetTf.r3_c4 += 0.04 # z
    path.append(list(targetTf.toVector()))

    # point 3
    targetTf  = almath.Transform(currentTf)
    targetTf.r2_c4 += 0.04 # y
    path.append(list(targetTf.toVector()))

    # point 4
    targetTf  = almath.Transform(currentTf)
    targetTf.r3_c4 -= 0.02 # z
    path.append(list(targetTf.toVector()))

    # point 5
    targetTf  = almath.Transform(currentTf)
    targetTf.r2_c4 -= 0.05 # y
    path.append(list(targetTf.toVector()))

    # point 6
    path.append(currentTf)

    times = [0.5, 1.0, 2.0, 3.0, 4.0, 4.5] # seconds

    motionProxy.transformInterpolations(effector, frame, path, axisMask, times)
def main(robotIP, PORT=9559):
    ''' Example of a cartesian foot trajectory
    Warning: Needs a PoseInit before executing
    '''

    motionProxy  = ALProxy("ALMotion", robotIP, PORT)
    postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)

    # Wake up robot
    motionProxy.wakeUp()

    # Send robot to Stand Init
    postureProxy.goToPosture("StandInit", 0.5)

    frame      = motion.FRAME_WORLD
    axisMask   = almath.AXIS_MASK_ALL   # full control
    useSensorValues = False

    # Lower the Torso and move to the side
    effector = "Torso"
    initTf   = almath.Transform(
        motionProxy.getTransform(effector, frame, useSensorValues))
    deltaTf  = almath.Transform(0.0, -0.06, -0.03) # x, y, z
    targetTf = initTf*deltaTf
    path     = list(targetTf.toVector())
    times    = 2.0 # seconds
    motionProxy.transformInterpolations(effector, frame, path, axisMask, times)

    # LLeg motion
    effector = "LLeg"
    initTf = almath.Transform()

    try:
        initTf = almath.Transform(motionProxy.getTransform(effector, frame, useSensorValues))
    except Exception, errorMsg:
        print str(errorMsg)
        print "This example is not allowed on this robot."
        exit()
    def getNaoSpaceLandmark(self):
        # Wait for a mark to be detected.
        landmarkTheoreticalSize = 0.06  #in meters
        currentCamera = "CameraTop"
        markData = self.memory.getData("LandmarkDetected")
        while (len(markData) == 0):
            markData = self.memory.getData("LandmarkDetected")
        print markData
        # Retrieve landmark center position in radians.
        wzCamera = markData[1][0][0][1]
        wyCamera = markData[1][0][0][2]

        # Retrieve landmark angular size in radians.
        angularSize = markData[1][0][0][3]

        # Compute distance to landmark.
        distanceFromCameraToLandmark = landmarkTheoreticalSize / (
            2 * math.tan(angularSize / 2))

        # Get current camera position in NAO space.
        transform = self.motion.getTransform(currentCamera, 2, True)
        transformList = almath.vectorFloat(transform)
        robotToCamera = almath.Transform(transformList)

        # Compute the rotation to point towards the landmark.
        cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(
            0, wyCamera, wzCamera)

        # Compute the translation to reach the landmark.
        cameraToLandmarkTranslationTransform = almath.Transform(
            distanceFromCameraToLandmark, 0, 0)

        # Combine all transformations to get the landmark position in NAO space.
        robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform

        print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
        print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
        print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
예제 #30
0
    def test_init(self):
        t = almath.Transform()

        self.assertAlmostEqual(t.r1_c1, 1.0)
        self.assertAlmostEqual(t.r1_c2, 0.0)
        self.assertAlmostEqual(t.r1_c3, 0.0)
        self.assertAlmostEqual(t.r1_c4, 0.0)

        self.assertAlmostEqual(t.r2_c1, 0.0)
        self.assertAlmostEqual(t.r2_c2, 1.0)
        self.assertAlmostEqual(t.r2_c3, 0.0)
        self.assertAlmostEqual(t.r2_c4, 0.0)

        self.assertAlmostEqual(t.r3_c1, 0.0)
        self.assertAlmostEqual(t.r3_c2, 0.0)
        self.assertAlmostEqual(t.r3_c3, 1.0)
        self.assertAlmostEqual(t.r3_c4, 0.0)