Ejemplo n.º 1
0
def endPosture(ip, port=9559):
    posture = ALProxy("ALRobotPosture", ip, port)
    posture.goToPosture("Crouch", 0.5)
Ejemplo n.º 2
0
def yellowStickDetection(yellow_thresholdMin,
                         yellow_thresholdMax,
                         robotIP="127.0.0.1",
                         port=9559):
    MOTION = ALProxy("ALMotion", robotIP, port)
    CAMERA = ALProxy("ALVideoDevice", robotIP, port)
    TTS = ALProxy("ALTextToSpeech", robotIP, port)
    #初始化机器人姿势
    standWithStick(0.1, robotIP, port)
    MOTION.angleInterpolation(["HeadPitch", "HeadYaw"], 0, 0.5, True)
    imageContours = []
    angleFlag = 0
    stickHeight = 0.46
    robotHeight = 0.478 + 0.05
    imageWidth = 640
    imageHeight = 480
    for i in range(0, 5):
        #检测正前方
        MOTION.angleInterpolation("HeadYaw", 0, 0.5, True)
        #获取照片轮廓特征点
        time.sleep(2)
        imageContours = pictureHandle(0, yellow_thresholdMin,
                                      yellow_thresholdMax, robotIP, port)
        if (imageContours != []):
            angleFlag = 0
            break
        #检测左前方45°
        MOTION.angleInterpolation("HeadYaw", 45 * math.pi / 180.0, 0.5, True)
        time.sleep(2)
        imageContours = pictureHandle(0, yellow_thresholdMin,
                                      yellow_thresholdMax, robotIP, port)
        if (imageContours != []):
            angleFlag = 1
            break
        #检测右前方45°
        MOTION.angleInterpolation("HeadYaw", -45 * math.pi / 180.0, 0.5, True)
        time.sleep(2)
        imageContours = pictureHandle(0, yellow_thresholdMin,
                                      yellow_thresholdMax, robotIP, port)
        if (imageContours != []):
            angleFlag = -1
            break
        TTS.say("没有检测到黄杆")
    #检测5次都没有检测到,就返回False
    if (i == 4):
        return False
    #让机器人看向正前方
    MOTION.angleInterpolation("HeadYaw", 0, 0.5, True)

    TTS.say("哈哈,我看到了")
    cnt = imageContours[0]
    #获取图像的距
    M = cv2.moments(cnt)

    #黄杆重心
    cx = int(M['m10'] / M['m00'])
    cy = int(M['m01'] / M['m00'])

    #计算黄杆重心和图片中心的距离,正:黄杆在中心左边   负:黄杆在中心右边
    disX = imageWidth / 2 - cx
    disY = cy - imageHeight / 2
    #计算黄杆重心和图片中心的偏转角度,机器人水平视角为60.97°
    stickAngleH = 60.97 / 640 * disX
    stickAngleV = 47.64 / 480 * disY + 1.2
    if (angleFlag == 1):
        stickAngleH = stickAngleH + 45
    elif (angleFlag == -1):
        stickAngleH = stickAngleH - 45
    else:
        stickAngleH = stickAngleH

    distStick = (robotHeight - stickHeight / 2) / (math.tan(
        stickAngleV * math.pi / 180.0))

    #--------------------------测试代码-----------------------------------#
    print "distStick = ", distStick
    print "stickAngleH = ", stickAngleH
    #imgTest = cv2.imread("camImage.png",1)
    #cv2.circle(imgTest,(cx,cy), 5, (0,0,255), -1)
    #cv2.namedWindow("image",cv2.WINDOW_NORMAL)
    #cv2.namedWindow("image2",cv2.WINDOW_NORMAL)
    #cv2.namedWindow("image3",cv2.WINDOW_NORMAL)
    #cv2.imshow("image",dilation)
    #cv2.imshow("image",imgTest)
    #cv2.imshow("image3",th1)
    #cv2.waitKey(0)
    #--------------------------测试结束-----------------------------------#
    return [distStick, stickAngleH * math.pi / 180]
import time
from naoqi import ALProxy


def main(robotIP):
    PORT = 9559

    try:
        motionProxy = ALProxy("ALMotion", robotIP, PORT)
    except Exception,e:
        print "Could not create proxy to ALMotion"
        print "Error was: ",e
        sys.exit(1)

    try:
        postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)
    except Exception, e:
        print "Could not create proxy to ALRobotPosture"
        print "Error was: ", e

    # Send NAO to Pose Init
    postureProxy.goToPosture("StandInit", 0.5)

    #####################################
    # A small example using getFootSteps
    #####################################

    # First call of move API
    # with post prefix to not be bloquing here.
    motionProxy.post.moveTo(0.3, 0.0, 0.5)
Ejemplo n.º 4
0
# -*- encoding: UTF-8 -*-

import sys
import time
import random
import color
import motion
from naoqi import ALProxy

tts = ALProxy("ALTextToSpeech",config.ipnao,9559)
sr = ALProxy("ALSpeechRecognition",config.ipnao,9559)
memProxy = ALProxy("ALMemory",config.ipnao,9559)
motion = ALProxy("ALMotion",config.ipnao,9559)

sr.setVocabulary(["oui","non","rouge","bleu","vert","jaune","orange","rose","blanc"],True)
sr.setAudioExpression(True)
sr.setVisualExpression(True)

fini = False
x = "WordRecognized"
cpt = 3

def onWordRecognized(x,total):
    """Ici on regarde le mot reconnu si l'indice de fiabilité est supérieur à 0.5 """
    retVal = memProxy.getData("WordRecognized")
    print x, retVal[0], retVal[1]
    print total
    if(retVal[0] != "" and retVal[1]>0.5):
      if(retVal[0] == "rouge"):
		  return True
      elif retVal[0] == "bleu":
Ejemplo n.º 5
0
def redBallDetection(red_thresholdMin,
                     red_thresholdMax,
                     method=1,
                     addAngle=0,
                     robotIP="127.0.0.1",
                     port=9559):
    MOTION = ALProxy("ALMotion", robotIP, port)
    CAMERA = ALProxy("ALVideoDevice", robotIP, port)
    TTS = ALProxy("ALTextToSpeech", robotIP, port)
    #初始化机器人姿势
    MOTION.setMoveArmsEnabled(False, False)
    standWithStick(0.3, robotIP, port)
    MOTION.angleInterpolation("HeadPitch", 0, 0.5, True)
    MOTION.angleInterpolation("HeadYaw", 0, 0.5, True)

    MOTION.angleInterpolation("HeadPitch", addAngle * math.pi / 180.0, 0.5,
                              False)

    angleFlag = 0
    cameraID = 1
    val = []
    robotHeight = 0.478

    TTS.say("开始检测红球")
    if method == 0:
        #首先识别正前方,然后是左边45度,最后是右边45度,三个方向由标志位angleFlag进行标记
        for i in range(0, 5):
            #先打开下方摄像头进行检测
            CAMERA.setActiveCamera(1)
            cameraID = 1
            val = searchBall(0, True, robotIP, port)
            time.sleep(1)
            if (val and isinstance(val, list) and len(val) >= 2):
                angleFlag = 0
                break

            val = searchBall(45, True, robotIP, port)
            time.sleep(1)
            if (val and isinstance(val, list) and len(val) >= 2):
                angleFlag = 1
                break

            val = searchBall(-45, True, robotIP, port)
            time.sleep(1)
            if (val and isinstance(val, list) and len(val) >= 2):
                angleFlag = -1
                break

            #先打开上方摄像头进行检测
            CAMERA.setActiveCamera(0)
            cameraID = 1
            val = searchBall(0, True, robotIP, port)
            time.sleep(1)
            if (val and isinstance(val, list) and len(val) >= 2):
                angleFlag = -1
                break

            val = searchBall(45, True, robotIP, port)
            time.sleep(1)
            if (val and isinstance(val, list) and len(val) >= 2):
                angleFlag = -1
                break

            val = searchBall(-45, True, robotIP, port)
            time.sleep(1)
            if (val and isinstance(val, list) and len(val) >= 2):
                angleFlag = -1
                break
            TTS.say("球在哪里呢")
        #如果5次都没有检测到就返回False
        if (i == 4):
            TTS.say("红球检测失败")
            return False
        #检测完红球后,最后看向正前方
        MOTION.angleInterpolation("HeadYaw", 0, 0.5, True)
        time.sleep(0.5)
        TTS.say("哈哈!我看到了!")
        #看到球之后,获取球的水平垂直偏角
        ballinfo = val[1]
        thetah = ballinfo[0]
        thetav = ballinfo[1]

        #根据红球方向标志位确定红球具体角度值
        if (cameraID == 0):
            thetav = thetav + 39.7 * math.pi / 180.0 + addAngle * math.pi / 180.0
        elif (cameraID == 1):
            thetav = thetav + 1.2 * math.pi / 180.0 + addAngle * math.pi / 180.0
            robotHeight = robotHeight + 0.05
        if (angleFlag == 1):
            theta = thetah + 45 * math.pi / 180.0
        elif (angleFlag == -1):
            theta = thetah - 45 * math.pi / 180.0
        else:
            theta = thetah

        distBall = robotHeight / (math.tan(thetav))
        return [distBall, theta]

    elif method == 1:

        redBallContours = []
        imageWidth = 640
        imageHeight = 480
        for i in range(0, 5):
            CAMERA.setActiveCamera(1)
            cameraID = 1
            MOTION.angleInterpolation("HeadYaw", 0, 0.5, True)
            redBallContours = pictureHandle(cameraID, red_thresholdMin,
                                            red_thresholdMax, robotIP, port)
            if (redBallContours != []):
                angleFlag = 0
                break
            #检测左前方45°
            MOTION.angleInterpolation("HeadYaw", 45 * math.pi / 180, 0.5, True)
            redBallContours = pictureHandle(cameraID, red_thresholdMin,
                                            red_thresholdMax, robotIP, port)
            if (redBallContours != []):
                angleFlag = 1
                break
            #检测右前方45°
            MOTION.angleInterpolation("HeadYaw", -45 * math.pi / 180, 0.5,
                                      True)
            redBallContours = pictureHandle(cameraID, red_thresholdMin,
                                            red_thresholdMax, robotIP, port)
            if (redBallContours != []):
                angleFlag = -1
                break

            CAMERA.setActiveCamera(0)
            cameraID = 0
            MOTION.angleInterpolation("HeadYaw", 0, 0.5, True)
            redBallContours = pictureHandle(cameraID, red_thresholdMin,
                                            red_thresholdMax, robotIP, port)
            if (redBallContours != []):
                angleFlag = 0
                break
            #检测左前方45°
            MOTION.angleInterpolation("HeadYaw", 45 * math.pi / 180, 0.5, True)
            redBallContours = pictureHandle(cameraID, red_thresholdMin,
                                            red_thresholdMax, robotIP, port)
            if (redBallContours != []):
                angleFlag = 1
                break
            #检测右前方45°
            MOTION.angleInterpolation("HeadYaw", -45 * math.pi / 180, 0.5,
                                      True)
            redBallContours = pictureHandle(cameraID, red_thresholdMin,
                                            red_thresholdMax, robotIP, port)
            if (redBallContours != []):
                angleFlag = -1
                break
            TTS.say("球在哪里呢")
        #检测5次都没有检测到,就返回False
        if (i == 4):
            TTS.say("红球检测失败")
            return False
        #让机器人看向正前方
        MOTION.angleInterpolation("HeadYaw", 0, 0.5, True)
        TTS.say("哈哈,我看到了")

        cnt = redBallContours[0]
        #获取图像的距
        M = cv2.moments(cnt)

        #红球重心
        cx = int(M['m10'] / M['m00'])
        cy = int(M['m01'] / M['m00'])

        #计算红球重心和图片中心的距离,正:红球在中心左边   负:红球在中心右边
        disX = imageWidth / 2 - cx
        disY = cy - imageHeight / 2

        #计算红球重心和图片中心的偏转角度,机器人水平视角为60.97°
        ballAngleH = 60.97 / 640 * disX
        ballAngleV = 47.64 / 480 * disY
        if (cameraID == 0):
            ballAngleV = ballAngleV + 1.2 + addAngle
        elif (cameraID == 1):
            ballAngleV = ballAngleV + 39.7 + addAngle
        if (angleFlag == 1):
            ballAngleH = ballAngleH + 45
        elif (angleFlag == -1):
            ballAngleH = ballAngleH - 45
        else:
            ballAngleH = ballAngleH

        #机器人到球的距离
        distBall = robotHeight / (math.tan(ballAngleV * math.pi / 180.0))

        #---------------------------------测试代码--------------------------------------------#
        print "distBall = ", distBall
        print "ballAngleH = ", ballAngleH * math.pi / 180.0
        #imgTest = cv2.imread("camImage.png",1)
        #cv2.circle(imgTest,(cx,cy), 5, (0,255,255), -1)
        #cv2.namedWindow("image",cv2.WINDOW_NORMAL)
        #cv2.namedWindow("image2",cv2.WINDOW_NORMAL)
        #cv2.namedWindow("image3",cv2.WINDOW_NORMAL)
        #cv2.imshow("image",dilation)
        #cv2.imshow("image",imgTest)
        #cv2.imshow("image3",th1)
        #cv2.waitKey(0)
        #---------------------------------测试结束--------------------------------------------#
        return [distBall, ballAngleH * math.pi / 180.0]
Ejemplo n.º 6
0
 def new(robotIP, port=9559):
     tts = ALProxy("ALTextToSpeech", robotIP, port)
     motionProxy  = ALProxy("ALMotion", robotIP, port)
     postureProxy = ALProxy("ALRobotPosture", robotIP, port)
     Low_Level(motionProxy, postureProxy, tts)
    def exe(self, args=None, addr=None):

        # create proxy
        motion = ALProxy("ALMotion", Settings.naoHostName, Settings.naoPort)
        motion.stopMove()
Ejemplo n.º 8
0
def Scan(state):
    global tracker
    global cameraNo, bar, z, headMovement, posBar
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-o", "--output", type=str, default="barcodes.csv",
            help="path to output CSV file containing barcodes")
    args = vars(ap.parse_args())

    try:
        # Proxy to camera and motion
        print("Starting video stream...")
        camera = ALProxy("ALVideoDevice", "Mario.local", 9559)
        headMovement = ALProxy("ALMotion", "Mario.local", 9559)
        tracker = ALProxy("ALTracker", "Mario.local", 9559)
    except:
        print("Error connecting to proxies")

    csv = open(args["output"], "w")
    found = set()
    targetName = "Face"
    faceWidth = 0.1 # default
    tracker.registerTarget(targetName, faceWidth)
    # Subscribe to camera
    if state == "unsure":
        cameraNo = 0
    elif state == "deal" or state == "boneyard":
        cameraNo = 1
    # mainDomino.cameraNo
    # mainDomino.cameraNo
    AL_kQVGA = 3 # 1280x960
    AL_kBGRColorSpace = 13

    try:
        captureDevice = camera.subscribeCamera("captureDevice", cameraNo, AL_kQVGA, AL_kBGRColorSpace, 10)
    except:
        print("Error subscribing to camera")
    # init = False
    # Create image
    width = 1280
    height = 960
    image = np.zeros((height, width, 3), np.uint8)

    bar = []
    posBar = []
    
    while True: 
        # Position head at center
        moveHeadCenter() 


        # Get the image
        try:
            result = camera.getImageRemote(captureDevice)
        except:
            print("Error getting image remotely")

        if result == None:
            print 'Error: Cannot capture.'
        elif result[6] == None:
            print 'Error: No image data string.'
        else:
            # Translate value to mat
            values = map(ord, list(result[6]))
            i = 0
            for y in range(0, height):
                for x in range(0, width):
                    image.itemset((y, x, 0), values[i + 0])
                    image.itemset((y, x, 1), values[i + 1])
                    image.itemset((y, x, 2), values[i + 2])
                    i += 3
                    
            # if cameraNo == 1:
            #     tiles = image[300:800, 0:1280]
            # else:
            tiles = image

            grey = cv2.cvtColor(tiles, cv2.COLOR_BGR2GRAY) # convert image to grey
            ret,thresh1 = cv2.threshold(grey,127,255,cv2.THRESH_BINARY)
            ret3,th3 = cv2.threshold(grey,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
            
            barcodes = pyzbar.decode(th3) # find and decode barcodes

            # Loop over detected barcodes
            for barcode in barcodes:

                (x, y, w, h) = barcode.rect
                # Convert data into string
                barcodeData = barcode.data.decode("utf-8")
                barcodeType = barcode.type
                
                # Put barcode data, x and y coords on the image
                text = "{}".format(barcodeData)
                cv2.putText(image, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
 
                if barcodeData not in found:
                    found.add(barcodeData)
                    # Put new found barcodes into an array                        
                    barcode = map(int, barcodeData) 
                    # Put the barcode x,y position into array
                    posXYZ = [x, y, z]
                    # Add info to barcode position array, so we know where the barcode was found
                    if len(barcode) > 2:
                        break
                    else:
                        bar.insert(0, barcode)
                        posBar.insert(0, posXYZ)
                    print bar
                    print posBar
                    #moveHeadLeft()
                   # moveHeadRight()

                    if state == "deal":
                        # If we haven't seen all tiles that have been dealt, move head from side to side.
                        # if len(bar) < 7:
                        #     moveHeadLeft()
                        #     moveHeadRight()
                        # If we have seen them all, then stop video stream.
                        if len(bar) == 7:

                            # tracker.track(targetName)
                            camera.releaseImage(captureDevice)
                            camera.unsubscribe(captureDevice)
                            # camera.stop()
                            csv.close()
                            cv2.destroyAllWindows()
                            #camera.stop()
                            return bar, posBar
                    elif state == "boneyard" or state == "unsure":
                        if len(bar) == 1:
                            # tracker.track(targetName)
                            camera.releaseImage(captureDevice)
                            camera.unsubscribe(captureDevice)
                            csv.close()
                            cv2.destroyAllWindows()
                            #camera.stop()
                            # camera.stop()
                            return bar, posBar

                        
            # Show video stream

        cv2.imshow("Barcode Scanner", th3)
        #cv2.imshow("Barcode Scanner", image)
        #cv2.imshow("new image", new_image)

            # cv2.waitKey(1)
        key = cv2.waitKey(1) & 0xFF
        
     
            # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
     
    # close the output CSV file do a bit of cleanup
    # print("[INFO] cleaning up...")
    csv.close()
    cv2.destroyAllWindows()
    camera.stop()
Ejemplo n.º 9
0
from naoqi import ALProxy
view = ALProxy("ALTabletService", "192.168.1.19", 9559)
view.showWebview()
view.loadUrl("http://bot-front.eu-gb.mybluemix.net")
Ejemplo n.º 10
0
#                   report.txt for details.
# How to run    :   Fix ip variable, then: python run.py

from time import time, sleep
import random, sys
from naoqi import ALModule, ALProxy, ALBroker

# our modules
from gestureRecognition import *

# global variables
ip = "192.168.1.102"
port = 9559

# proxies
postureProxy = ALProxy("ALRobotPosture", ip, port)
motionProxy = ALProxy("ALMotion", ip, port)
tts = ALProxy("ALTextToSpeech", ip, port)
memory = ALProxy("ALMemory", ip, port)
aup = ALProxy("ALAudioPlayer", ip, port)
LED = ALProxy("ALLeds", ip, port)


################################################################################
# Communication functions
################################################################################
# blink eyes based on a probability to keep it random
def blinkEyes():
    LED.off("FaceLeds")
    sleep(0.15)
    LED.on("FaceLeds")
Ejemplo n.º 11
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from naoqi import ALProxy

# プロキシのインスタンス生成
photo = ALProxy("ALPhotoCapture", "192.168.10.74", 9559)

# フォーマットの指定(“bmp”, “dib”, “jpeg”, “jpg”, “jpe”, “png”, “pbm”, “pgm”, “ppm”, “sr”, “ras”, “tiff”, “tif”が指定可能)
photo.setPictureFormat("jpeg")

# 解像度の指定(0 = kQQVGA, 1 = kQVGA, 2 = kVGA)
photo.setResolution(4)

# 保存先を指定して画像をキャプチャ
photo.takePicture("/home/nao/recordings/cameras/", "image1")

Ejemplo n.º 12
0
frame_window = 10
emotion_offsets = (20, 40)

# loading models
face_detection = load_detection_model(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)

# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]

# starting lists for calculating modes
emotion_window = []
emotion_window1 = []

# starting video streaming
camProxy = ALProxy("ALVideoDevice", "127.0.0.1", 55955)
cameraIndex = 0
resolution = vision_definitions.kVGA
colorSpace = vision_definitions.kRGBColorSpace
resolution = 2
colorSpace = 3
cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)
if video_capture.isOpened():
 frame = video_capture.read()
else:
 rval = False
while True:
    rval, frame = video_capture.read()
    gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
Ejemplo n.º 13
0
def main(robotIP, port):
    names = list()
    times = list()
    keys = list()

    names.append("HeadPitch")
    times.append([
        0.833333, 1.16667, 1.5, 1.83333, 2.16667, 2.83333, 3.16667, 3.5,
        3.83333, 4.16667, 4.5, 5.16667, 5.5
    ])
    keys.append([[-0.00730551, [3, -0.277778, 0], [3, 0.111111, 0]],
                 [-0.00730551, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-0.00730551, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-0.00730551, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-0.00730551, [3, -0.111111, 0], [3, 0.222222, 0]],
                 [-0.00730551, [3, -0.222222, 0], [3, 0.111111, 0]],
                 [-0.00730551, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-0.00730551, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-0.00730551, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-0.00730551, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-0.00730551, [3, -0.111111, 0], [3, 0.222222, 0]],
                 [-0.00730551, [3, -0.222222, 0], [3, 0.111111, 0]],
                 [-0.00730551, [3, -0.111111, 0], [3, 0, 0]]])

    names.append("HeadYaw")
    times.append([
        0.833333, 1.16667, 1.5, 1.83333, 2.16667, 2.83333, 3.16667, 3.5,
        3.83333, 4.16667, 4.5, 5.16667, 5.5
    ])
    keys.append(
        [[0, [3, -0.277778, 0], [3, 0.111111, 0]],
         [-0.441568, [3, -0.111111, 0.104138], [3, 0.111111, -0.104138]],
         [-0.624828, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.403339, [3, -0.111111, -0.104138], [3, 0.111111, 0.104138]],
         [0, [3, -0.111111, -0.114241], [3, 0.222222, 0.228481]],
         [0.624828, [3, -0.222222, 0], [3, 0.111111, 0]],
         [0, [3, -0.111111, 0.177733], [3, 0.111111, -0.177733]],
         [-0.441568, [3, -0.111111, 0.104138], [3, 0.111111, -0.104138]],
         [-0.624828, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.403339, [3, -0.111111, -0.104138], [3, 0.111111, 0.104138]],
         [0, [3, -0.111111, -0.114241], [3, 0.222222, 0.228481]],
         [0.624828, [3, -0.222222, 0], [3, 0.111111, 0]],
         [0.00643452, [3, -0.111111, 0], [3, 0, 0]]])

    names.append("LAnklePitch")
    times.append([5.5])
    keys.append([[-0.340758, [3, -1.83333, 0], [3, 0, 0]]])

    names.append("LAnkleRoll")
    times.append([5.5])
    keys.append([[-0.00211309, [3, -1.83333, 0], [3, 0, 0]]])

    names.append("LElbowRoll")
    times.append([
        0.833333, 1.16667, 1.5, 2.16667, 2.5, 2.83333, 3.16667, 3.5, 3.83333,
        4.5, 4.83333, 5.16667, 5.5
    ])
    keys.append([[-1.12892, [3, -0.277778, 0], [3, 0.111111, 0]],
                 [-1.12944, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-1.10481, [3, -0.111111, 0], [3, 0.222222, 0]],
                 [-1.4848, [3, -0.222222, 0], [3, 0.111111, 0]],
                 [-1.39704, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-1.39704, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-1.12892, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-1.12944, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-1.10481, [3, -0.111111, 0], [3, 0.222222, 0]],
                 [-1.4848, [3, -0.222222, 0], [3, 0.111111, 0]],
                 [-1.39704, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-1.39704, [3, -0.111111, 0], [3, 0.111111, 0]],
                 [-1.01329, [3, -0.111111, 0], [3, 0, 0]]])

    names.append("LElbowYaw")
    times.append([
        0.833333, 1.16667, 1.5, 2.16667, 2.5, 2.83333, 3.16667, 3.5, 3.83333,
        4.5, 4.83333, 5.16667, 5.5
    ])
    keys.append(
        [[-0.0381204, [3, -0.277778, 0], [3, 0.111111, 0]],
         [-0.0482739, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.000871307, [3, -0.111111, 0], [3, 0.222222, 0]],
         [-0.464231, [3, -0.222222, 0.00273673], [3, 0.111111, -0.00136837]],
         [-0.4656, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.4656, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.0381204, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.0482739, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.000871307, [3, -0.111111, 0], [3, 0.222222, 0]],
         [-0.464231, [3, -0.222222, 0.00273673], [3, 0.111111, -0.00136837]],
         [-0.4656, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.4656, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-1.38861, [3, -0.111111, 0], [3, 0, 0]]])

    names.append("LHand")
    times.append([
        0.833333, 1.16667, 1.5, 2.16667, 2.5, 2.83333, 3.16667, 3.5, 3.83333,
        4.5, 4.83333, 5.16667, 5.5
    ])
    keys.append(
        [[0.29, [3, -0.277778, 0], [3, 0.111111, 0]],
         [0.29, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.314233, [3, -0.111111, -0.024233], [3, 0.222222, 0.048466]],
         [0.740441, [3, -0.222222, -0.112393], [3, 0.111111, 0.0561963]],
         [0.82, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.82, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.29, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.29, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.314233, [3, -0.111111, -0.024233], [3, 0.222222, 0.048466]],
         [0.740441, [3, -0.222222, -0.112393], [3, 0.111111, 0.0561963]],
         [0.82, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.82, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.259986, [3, -0.111111, 0], [3, 0, 0]]])

    names.append("LHipPitch")
    times.append([5.5])
    keys.append([[-0.45, [3, -1.83333, 0], [3, 0, 0]]])

    names.append("LHipRoll")
    times.append([5.5])
    keys.append([[0.00848469, [3, -1.83333, 0], [3, 0, 0]]])

    names.append("LHipYawPitch")
    times.append([5.5])
    keys.append([[-0.00730551, [3, -1.83333, 0], [3, 0, 0]]])

    names.append("LKneePitch")
    times.append([5.5])
    keys.append([[0.709886, [3, -1.83333, 0], [3, 0, 0]]])

    names.append("LShoulderPitch")
    times.append([
        0.833333, 1.16667, 1.5, 2.16667, 2.5, 2.83333, 3.16667, 3.5, 3.83333,
        4.5, 4.83333, 5.16667, 5.5
    ])
    keys.append(
        [[0.896904, [3, -0.277778, 0], [3, 0.111111, 0]],
         [0.901058, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.841751, [3, -0.111111, 0.0593063], [3, 0.222222, -0.118613]],
         [-0.0317612, [3, -0.222222, 0], [3, 0.111111, 0]],
         [0.0200533, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.0200533, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.896904, [3, -0.111111, -0.00415387], [3, 0.111111, 0.00415387]],
         [0.901058, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.841751, [3, -0.111111, 0.0593063], [3, 0.222222, -0.118613]],
         [-0.0317612, [3, -0.222222, 0], [3, 0.111111, 0]],
         [0.0200533, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.0200533, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.39796, [3, -0.111111, 0], [3, 0, 0]]])

    names.append("LShoulderRoll")
    times.append([
        0.833333, 1.16667, 1.5, 2.16667, 2.5, 2.83333, 3.16667, 3.5, 3.83333,
        4.5, 4.83333, 5.16667, 5.5
    ])
    keys.append(
        [[0.323134, [3, -0.277778,
                     0], [3, 0.111111,
                          0]], [0.322036, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.331618, [3, -0.111111, 0],
          [3, 0.222222,
           0]],
         [-0.243711, [3, -0.222222, 0],
          [3, 0.111111,
           0]], [0.172022, [3, -0.111111, -0.162022], [3, 0.111111, 0.162022]],
         [0.728418, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.323134, [3, -0.111111, 0.00109782], [3, 0.111111, -0.00109782]],
         [0.322036, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.331618, [3, -0.111111, 0], [3, 0.222222, 0]],
         [-0.243711, [3, -0.222222, 0], [3, 0.111111, 0]],
         [0.172022, [3, -0.111111, -0.162022], [3, 0.111111, 0.162022]],
         [0.728418, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.300643, [3, -0.111111, 0], [3, 0, 0]]])

    names.append("LWristYaw")
    times.append([
        0.833333, 1.16667, 1.5, 2.16667, 2.5, 2.83333, 3.16667, 3.5, 3.83333,
        4.5, 4.83333, 5.16667, 5.5
    ])
    keys.append(
        [[-0.28573, [3, -0.277778, 0], [3, 0.111111, 0]],
         [-0.277897, [3, -0.111111, -0.00783302], [3, 0.111111, 0.00783302]],
         [0.305228, [3, -0.111111, -0.15925], [3, 0.222222, 0.3185]],
         [1.15535, [3, -0.222222, 0], [3, 0.111111, 0]],
         [1.15175, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.15175, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.28573, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.277897, [3, -0.111111, -0.00783302], [3, 0.111111, 0.00783302]],
         [0.305228, [3, -0.111111, -0.15925], [3, 0.222222, 0.3185]],
         [1.15535, [3, -0.222222, 0], [3, 0.111111, 0]],
         [1.15175, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.15175, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.00264126, [3, -0.111111, 0], [3, 0, 0]]])

    names.append("RAnklePitch")
    times.append([5.5])
    keys.append([[-0.342235, [3, -1.83333, 0], [3, 0, 0]]])

    names.append("RAnkleRoll")
    times.append([5.5])
    keys.append([[-0.00532818, [3, -1.83333, 0], [3, 0, 0]]])

    names.append("RElbowRoll")
    times.append([
        0.833333, 1.16667, 1.5, 1.83333, 2.16667, 2.5, 2.83333, 3.16667, 3.5,
        3.83333, 4.16667, 4.5, 4.83333, 5.16667, 5.5
    ])
    keys.append(
        [[1.46317, [3, -0.277778, 0], [3, 0.111111, 0]],
         [1.48296, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.39704, [3, -0.111111, 0.0859244], [3, 0.111111, -0.0859244]],
         [0.666716, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.11667, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.10481, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.10481, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.46317, [3, -0.111111, -0.0197868], [3, 0.111111, 0.0197868]],
         [1.48296, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.39704, [3, -0.111111, 0.0859244], [3, 0.111111, -0.0859244]],
         [0.666716, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.11667, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.10481, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.10481, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.01181, [3, -0.111111, 0], [3, 0, 0]]])

    names.append("RElbowYaw")
    times.append([
        0.833333, 1.16667, 1.5, 2.16667, 2.5, 2.83333, 3.16667, 3.5, 3.83333,
        4.5, 4.83333, 5.16667, 5.5
    ])
    keys.append(
        [[0.47155, [3, -0.277778, 0], [3, 0.111111, 0]],
         [0.470375, [3, -0.111111, 0.000991634], [3, 0.111111, -0.000991634]],
         [0.4656, [3, -0.111111, 0.00477521], [3, 0.222222, -0.00955043]],
         [-0.00480866, [3, -0.222222, 0], [3, 0.111111, 0]],
         [0.000871307, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.000871307, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.47155, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.470375, [3, -0.111111, 0.000991634], [3, 0.111111, -0.000991634]],
         [0.4656, [3, -0.111111, 0.00477521], [3, 0.222222, -0.00955043]],
         [-0.00480866, [3, -0.222222, 0], [3, 0.111111, 0]],
         [0.000871307, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.000871307, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.38165, [3, -0.111111, 0], [3, 0, 0]]])

    names.append("RHand")
    times.append([
        0.833333, 1.16667, 1.5, 2.16667, 2.5, 2.83333, 3.16667, 3.5, 3.83333,
        4.5, 4.83333, 5.16667, 5.5
    ])
    keys.append(
        [[0.751334, [3, -0.277778, 0], [3, 0.111111, 0]],
         [0.753008, [3, -0.111111, -0.001674], [3, 0.111111, 0.001674]],
         [0.82, [3, -0.111111, 0], [3, 0.222222, 0]],
         [0.308975, [3, -0.222222, 0], [3, 0.111111, 0]],
         [0.314233, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.314233, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.751334, [3, -0.111111, -0.001674], [3, 0.111111, 0.001674]],
         [0.753008, [3, -0.111111, -0.001674], [3, 0.111111, 0.001674]],
         [0.82, [3, -0.111111, 0], [3, 0.222222, 0]],
         [0.308975, [3, -0.222222, 0], [3, 0.111111, 0]],
         [0.314233, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.314233, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.256119, [3, -0.111111, 0], [3, 0, 0]]])

    names.append("RHipPitch")
    times.append([5.5])
    keys.append([[-0.45526, [3, -1.83333, 0], [3, 0, 0]]])

    names.append("RHipRoll")
    times.append([5.5])
    keys.append([[-0.00467092, [3, -1.83333, 0], [3, 0, 0]]])

    names.append("RHipYawPitch")
    times.append([5.5])
    keys.append([[-0.00847434, [3, -1.83333, 0], [3, 0, 0]]])

    names.append("RKneePitch")
    times.append([5.5])
    keys.append([[0.708487, [3, -1.83333, 0], [3, 0, 0]]])

    names.append("RShoulderPitch")
    times.append([
        0.833333, 1.16667, 1.5, 1.83333, 2.16667, 2.5, 2.83333, 3.16667, 3.5,
        3.83333, 4.16667, 4.5, 4.83333, 5.16667, 5.5
    ])
    keys.append(
        [[-0.0943158, [3, -0.277778, 0], [3, 0.111111, 0]],
         [-0.01006, [3, -0.111111, -0.0190615], [3, 0.111111, 0.0190615]],
         [0.0200533, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.10472, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.850918, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.841751, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.841751, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.0943158, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.01006, [3, -0.111111, -0.0190615], [3, 0.111111, 0.0190615]],
         [0.0200533, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.10472, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.850918, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.841751, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.841751, [3, -0.111111, 0], [3, 0.111111, 0]],
         [1.3964, [3, -0.111111, 0], [3, 0, 0]]])

    names.append("RShoulderRoll")
    times.append([
        0.833333, 1.16667, 1.5, 1.83333, 2.16667, 2.5, 2.83333, 3.16667, 3.5,
        3.83333, 4.16667, 4.5, 4.83333, 5.16667, 5.5
    ])
    keys.append(
        [[0.246735, [3, -0.277778, 0], [3, 0.111111, 0]],
         [-0.0620962, [3, -0.111111, 0.162526], [3, 0.111111, -0.162526]],
         [-0.728418, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.267035, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.336307, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.331618, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.331618, [3, -0.111111, 0], [3, 0.111111, 0]],
         [0.246735, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.0620962, [3, -0.111111, 0.162526], [3, 0.111111, -0.162526]],
         [-0.728418, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.267035, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.336307, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.331618, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.331618, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.294826, [3, -0.111111, 0], [3, 0, 0]]])

    names.append("RWristYaw")
    times.append([
        0.833333, 1.16667, 1.5, 2.16667, 2.5, 2.83333, 3.16667, 3.5, 3.83333,
        4.5, 4.83333, 5.16667, 5.5
    ])
    keys.append(
        [[-1.17584, [3, -0.277778, 0], [3, 0.111111, 0]],
         [-1.17422, [3, -0.111111, -0.00161095], [3, 0.111111, 0.00161095]],
         [-1.15175, [3, -0.111111, -0.0224728], [3, 0.222222, 0.0449456]],
         [-0.302801, [3, -0.222222, 0], [3, 0.111111, 0]],
         [-0.305228, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.305228, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-1.17584, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-1.17422, [3, -0.111111, -0.00161095], [3, 0.111111, 0.00161095]],
         [-1.15175, [3, -0.111111, -0.0224728], [3, 0.222222, 0.0449456]],
         [-0.302801, [3, -0.222222, 0], [3, 0.111111, 0]],
         [-0.305228, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.305228, [3, -0.111111, 0], [3, 0.111111, 0]],
         [-0.000403613, [3, -0.111111, 0], [3, 0, 0]]])

    try:
        motion = ALProxy("ALMotion", robotIP, port)
        motion.angleInterpolationBezier(names, times, keys)
        posture = ALProxy("ALRobotPosture", robotIP, port)
        #posture.goToPosture("StandInit", 1.0)
    except BaseException, err:
        print err
                      type="string",
                      dest="IP",
                      default="127.0.0.1")
    parser.add_option("-p",
                      "--broker-port",
                      action="store",
                      type="int",
                      dest="PORT",
                      default=9559)
    parser.add_option("-n", "--no-arms", action="store_true", dest="NOARMS")
    (options, args) = parser.parse_args()

    print("----- Started")

    try:
        navigation_proxy = ALProxy("ALNavigation", options.IP, options.PORT)
        motion_proxy = ALProxy("ALMotion", options.IP, options.PORT)
    except Exception, e:
        print("Could not create proxies to ALNavigation or ALMotion")
        print(str(e))
        exit(1)

    if options.NOARMS:
        print("----- Moving with arms disabled")
        motion_proxy.setMoveArmsEnabled(False, False)

    # get an event
    e = threading.Event()

    # spawn looging thread
    thread = LoggingThread(options.IP, options.PORT, "log.m")
Ejemplo n.º 15
0
import vision_definitions
import time
import Image, cv2
import numpy as np
import sys

IP = "172.20.25.152"  # NAOqi's IP address.
PORT = 9559

if (len(sys.argv) >= 2):
    print("On prend l'IP entree")
    IP = sys.argv[1]

# Create proxy on ALVideoDevice
print("Creating ALVideoDevice proxy to ", IP)
camProxy = ALProxy("ALVideoDevice", IP, PORT)


# Register a Generic Video Module
camProxy = ALProxy("ALVideoDevice", IP, PORT)
camProxy.setParam(18, 0)	# "kCameraSelectID", 0 : camera top, 1 : camera bottom
resolution = 0			# 0 : QQVGA, 1 : QVGA, 2 : VGA
colorSpace = 11			# RGB
mini = 25
maxi = 33
seuil = 4

try :
    videoClient = camProxy.subscribeCamera("python_client",1, resolution, colorSpace, 5)
    print(videoClient)
except RuntimeError:
Ejemplo n.º 16
0
    # Init proxies.

    try:

        motionProxy = ALProxy("ALMotion", robotIP, port)

    except Exception, e:

        print "Could not create proxy to ALMotion"

        print "Error was: ", e

    try:

        postureProxy = ALProxy("ALRobotPosture", robotIP, port)

    except Exception, e:

        print "Could not create proxy to ALRobotPosture"

        print "Error was: ", e

    try:
        ttsProxy = ALProxy("ALTextToSpeech", robotIP, port)
    except Exception, e:
        print "Could not create proxy to ALTextToSpeech"
        print "Error was: ", e


# Set NAO in Stiffness On
Ejemplo n.º 17
0
import sys
from naoqi import ALProxy

try:
    proxy = ALProxy("ALLeds", IP, PORT)
except Exception,e:
    print "Could not create proxy to ALLeds"
    print "Error was: ",e
    sys.exit(1)

while(true)
proxy.rotateEyes(const int& rgb, const float& timeForRotation, const float& totalDuration);


""" Say 'hello, you' each time a human face is detected

"""

import sys
import time

from naoqi import ALProxy
from naoqi import ALBroker
from naoqi import ALModule

from optparse import OptionParser

NAO_IP = "nao.local"


# Global variable to store the HumanGreeter module instance
def main(robotIP, port):
    names = list()
    times = list()
    keys = list()

    names.append("HeadPitch")
    times.append([0.64, 1.16, 1.64, 2.16, 2.64, 3.16, 3.64, 5])
    keys.append([
        -0.0782759, -0.50166, -0.0782759, -0.50166, -0.0782759, -0.50166,
        -0.656595, -0.0245859
    ])

    names.append("HeadYaw")
    times.append([0.64, 1.16, 1.64, 2.16, 2.64, 3.16, 3.64, 5])
    keys.append([
        -0.0859459, -0.0706059, -0.0859459, -0.0706059, -0.0859459, -0.0706059,
        -0.0782759, 0.00609404
    ])

    names.append("LAnklePitch")
    times.append([0.72, 1.72, 3.24, 3.72, 5])
    keys.append([-0.0910584, -0.298148, -0.298148, -0.293215, -0.104485])

    names.append("LAnkleRoll")
    times.append([0.72, 1.72, 3.24, 3.72, 5])
    keys.append([-0.0329368, 0.0192192, 0.0192192, 0.0667732, 0.0092244])

    names.append("LElbowRoll")
    times.append([0.68, 1.2, 1.68, 2.2, 2.68, 3.2, 3.68, 5])
    keys.append(
        [0, -0.612024, 0, -0.612024, 0, -0.612024, -0.523053, -0.326699])

    names.append("LElbowYaw")
    times.append([0.68, 1.2, 1.68, 2.2, 2.68, 3.2, 3.68, 5])
    keys.append([
        -0.420357, -0.374338, -0.420357, -0.374338, -0.420357, -0.374338,
        -0.819198, -0.759372
    ])

    names.append("LHand")
    times.append([1.12, 3.12, 4.12, 5])
    keys.append([0.796751, 0.796751, 0.295298, 0.918933])

    names.append("LHipPitch")
    times.append([0.72, 1.72, 3.24, 3.72, 5])
    keys.append([0.340577, 0.25774, 0.25774, -0.13343, 0.0596046])

    names.append("LHipRoll")
    times.append([0.72, 1.72, 3.24, 3.72, 5])
    keys.append([0.090152, 0.0671419, 0.0671419, -0.0555781, -0.0324061])

    names.append("LHipYawPitch")
    times.append([0.72, 1.72, 3.24, 3.72, 5])
    keys.append([-0.360449, -0.384992, -0.384992, -0.483168, 0.0183645])

    names.append("LKneePitch")
    times.append([0.72, 1.72, 3.24, 3.72, 5])
    keys.append([0.0714636, 0.405876, 0.405876, 0.671257, 0.0702441])

    names.append("LShoulderPitch")
    times.append([0.68, 1.2, 1.68, 2.2, 2.68, 3.2, 3.68, 5])
    keys.append([
        -1.05237, -0.823801, -1.05237, -0.823801, -1.05237, -0.823801,
        -0.510863, 1.56771
    ])

    names.append("LShoulderRoll")
    times.append([0.68, 1.2, 1.68, 2.2, 2.68, 3.2, 3.68, 5])
    keys.append([
        0.432547, 0.0352401, 0.432547, 0.0352401, 0.432547, 0.0352401,
        0.507713, 0.329768
    ])

    names.append("LWristYaw")
    times.append([1.12, 3.12, 4.12, 5])
    keys.append([-1.30394, -1.30394, 0.676451, -1.02629])

    names.append("RAnklePitch")
    times.append([0.72, 1.72, 3.24, 3.72, 5])
    keys.append([-0.0787807, -0.292008, -0.292008, -0.256563, -0.0951351])

    names.append("RAnkleRoll")
    times.append([0.72, 1.72, 3.24, 3.72, 5])
    keys.append([0.165806, 0.0680678, 0.0558505, -0.0933421, -0.00302827])

    names.append("RElbowRoll")
    times.append([0.68, 1.2, 1.68, 2.2, 2.68, 3.2, 3.68, 5])
    keys.append([
        4.19617e-05, 0.656595, 4.19617e-05, 0.656595, 4.19617e-05, 0.656595,
        0.598302, 0.291501
    ])

    names.append("RElbowYaw")
    times.append([0.68, 1.2, 1.68, 2.2, 2.68, 3.2, 3.68, 5])
    keys.append([
        0.233125, 0.170232, 0.233125, 0.170232, 0.233125, 0.170232, 0.984786,
        0.77923
    ])

    names.append("RHand")
    times.append([1.12, 3.12, 4.12, 5])
    keys.append([0.849115, 0.849115, 0.340389, 0.918205])

    names.append("RHipPitch")
    times.append([0.72, 1.72, 3.24, 3.72, 5])
    keys.append([0.355635, 0.26973, 0.26973, -0.185867, 0.0367591])

    names.append("RHipRoll")
    times.append([0.72, 1.72, 3.24, 3.72, 5])
    keys.append([-0.119116, -0.0853684, -0.0853684, 0.0496235, 0.0107584])

    names.append("RKneePitch")
    times.append([0.72, 1.72, 3.24, 3.72, 5])
    keys.append([0.0476684, 0.388217, 0.388217, 0.699619, 0.0839559])

    names.append("RShoulderPitch")
    times.append([0.68, 1.2, 1.68, 2.2, 2.68, 3.2, 3.68, 5])
    keys.append([
        -1.11978, -0.926494, -1.11978, -0.926494, -1.11978, -0.926494,
        -0.404934, 1.56779
    ])

    names.append("RShoulderRoll")
    times.append([0.68, 1.2, 1.68, 2.2, 2.68, 3.2, 3.68, 5])
    keys.append([
        -0.454107, -0.0245859, -0.454107, -0.0245859, -0.454107, -0.0245859,
        -0.604437, -0.320648
    ])

    names.append("RWristYaw")
    times.append([1.12, 3.12, 4.12, 5])
    keys.append([1.31153, 1.31153, -0.421891, 0.967912])

    try:
        motion = ALProxy("ALMotion", robotIP, port)
        motion.angleInterpolation(names, keys, times, True)
    except BaseException, err:
        print err
Ejemplo n.º 19
0
#-*- coding: utf-8 -*-
from naoqi import ALProxy

# ----------> Connect to robot <----------
robot_ip = "192.168.1.100"
robot_port = 9559  # default port : 9559

motion = ALProxy("ALMotion", robot_ip, robot_port)

sensorList = motion.getSensorNames()
for sensor in sensorList:
    print sensor
Ejemplo n.º 20
0
def main(robotIP, port, unit_time):
    names = list()
    times = list()
    keys = list()

    names.append("HeadPitch")
    times.append([0.64, 1.12, 1.6, 2.12, 2.64, 3.12, 3.88])
    keys.append([[-0.066004, [3, -0.213333, 0], [3, 0.16, 0]], [0.217786, [3, -0.16, 0], [3, 0.16, 0]], [-0.066004, [3, -0.16, 0.0949853], [3, 0.173333, -0.102901]], [-0.375872, [3, -0.173333, 0], [3, 0.173333, 0]], [0.00456004, [3, -0.173333, 0], [3, 0.16, 0]], [-0.365133, [3, -0.16, 0], [3, 0.253333, 0]], [-0.144238, [3, -0.253333, 0], [3, 0, 0]]])

    names.append("HeadYaw")
    times.append([0.64, 1.12, 1.6, 2.12, 2.64, 3.12, 3.88])
    keys.append([[-0.0629359, [3, -0.213333, 0], [3, 0.16, 0]], [-0.918907, [3, -0.16, 0], [3, 0.16, 0]], [-0.0629359, [3, -0.16, 0], [3, 0.173333, 0]], [-1.02936, [3, -0.173333, 0], [3, 0.173333, 0]], [-0.016916, [3, -0.173333, 0], [3, 0.16, 0]], [-1.02015, [3, -0.16, 0], [3, 0.253333, 0]], [-0.032256, [3, -0.253333, 0], [3, 0, 0]]])

    names.append("LAnklePitch")
    times.append([1.2, 2.2, 2.72, 3.2, 3.88])
    keys.append([[-0.0158923, [3, -0.4, 0], [3, 0.333333, 0]], [-0.493928, [3, -0.333333, 0], [3, 0.173333, 0]], [-0.430072, [3, -0.173333, 0], [3, 0.16, 0]], [-0.493928, [3, -0.16, 0], [3, 0.226667, 0]], [0.093532, [3, -0.226667, 0], [3, 0, 0]]])

    names.append("LAnkleRoll")
    times.append([1.2, 2.2, 2.72, 3.2, 3.88])
    keys.append([[-0.0973648, [3, -0.4, 0], [3, 0.333333, 0]], [0.0729092, [3, -0.333333, -0.0528154], [3, 0.173333, 0.027464]], [0.143473, [3, -0.173333, 0], [3, 0.16, 0]], [0.0729092, [3, -0.16, 0.0358642], [3, 0.226667, -0.0508076]], [-0.116542, [3, -0.226667, 0], [3, 0, 0]]])

    names.append("LElbowRoll")
    times.append([0.68, 1.16, 1.64, 2.16, 2.68, 3.16, 3.88])
    keys.append([[-1.44345, [3, -0.226667, 0], [3, 0.16, 0]], [-0.800706, [3, -0.16, 0], [3, 0.16, 0]], [-0.969447, [3, -0.16, 0], [3, 0.173333, 0]], [-0.6335, [3, -0.173333, -0.139594], [3, 0.173333, 0.139594]], [-0.131882, [3, -0.173333, 0], [3, 0.16, 0]], [-0.6335, [3, -0.16, 0], [3, 0.24, 0]], [-0.427944, [3, -0.24, 0], [3, 0, 0]]])

    names.append("LElbowYaw")
    times.append([0.68, 1.16, 1.64, 2.16, 2.68, 3.16, 3.88])
    keys.append([[-0.744032, [3, -0.226667, 0], [3, 0.16, 0]], [-2.05101, [3, -0.16, 0], [3, 0.16, 0]], [-0.0245859, [3, -0.16, 0], [3, 0.173333, 0]], [-0.483252, [3, -0.173333, 0], [3, 0.173333, 0]], [-0.481718, [3, -0.173333, 0], [3, 0.16, 0]], [-0.483252, [3, -0.16, 0.00153411], [3, 0.24, -0.00230117]], [-1.17509, [3, -0.24, 0], [3, 0, 0]]])

    names.append("LHand")
    times.append([0.6, 1.08, 1.56, 2.08, 3.08, 3.88])
    keys.append([[0.613843, [3, -0.2, 0], [3, 0.16, 0]], [0.613843, [3, -0.16, 0], [3, 0.16, 0]], [0.613843, [3, -0.16, 0], [3, 0.173333, 0]], [0.614934, [3, -0.173333, 0], [3, 0.333333, 0]], [0.613479, [3, -0.333333, 0.00145501], [3, 0.266667, -0.00116401]], [0.3048, [3, -0.266667, 0], [3, 0, 0]]])

    names.append("LHipPitch")
    times.append([1.2, 2.2, 2.72, 3.2, 3.88])
    keys.append([[0.274614, [3, -0.4, 0], [3, 0.333333, 0]], [0.34978, [3, -0.333333, -0.0289308], [3, 0.173333, 0.015044]], [0.406538, [3, -0.173333, 0], [3, 0.16, 0]], [0.34978, [3, -0.16, 0.0370256], [3, 0.226667, -0.052453]], [0.138102, [3, -0.226667, 0], [3, 0, 0]]])

    names.append("LHipRoll")
    times.append([1.2, 2.2, 2.72, 3.2, 3.88])
    keys.append([[0.134638, [3, -0.4, 0], [3, 0.333333, 0]], [0.014986, [3, -0.333333, 0], [3, 0.173333, 0]], [0.0870839, [3, -0.173333, 0], [3, 0.16, 0]], [0.014986, [3, -0.16, 0], [3, 0.226667, 0]], [0.11816, [3, -0.226667, 0], [3, 0, 0]]])

    names.append("LHipYawPitch")
    times.append([1.2, 2.2, 2.72, 3.2, 3.88])
    keys.append([[-0.289883, [3, -0.4, 0], [3, 0.333333, 0]], [-0.54146, [3, -0.333333, 0.14694], [3, 0.173333, -0.0764089]], [-0.959931, [3, -0.173333, 0], [3, 0.16, 0]], [-0.54146, [3, -0.16, -0.108712], [3, 0.226667, 0.154009]], [-0.171766, [3, -0.226667, 0], [3, 0, 0]]])

    names.append("LKneePitch")
    times.append([1.2, 2.2, 2.72, 3.2, 3.88])
    keys.append([[0.0346476, [3, -0.4, 0], [3, 0.333333, 0]], [0.692733, [3, -0.333333, 0], [3, 0.173333, 0]], [0.67586, [3, -0.173333, 0], [3, 0.16, 0]], [0.692733, [3, -0.16, 0], [3, 0.226667, 0]], [-0.090548, [3, -0.226667, 0], [3, 0, 0]]])

    names.append("LShoulderPitch")
    times.append([0.68, 1.16, 1.64, 2.16, 2.68, 3.16, 3.88])
    keys.append([[1.72417, [3, -0.226667, 0], [3, 0.16, 0]], [1.54623, [3, -0.16, 0.177943], [3, 0.16, -0.177943]], [0.651908, [3, -0.16, 0], [3, 0.173333, 0]], [1.64287, [3, -0.173333, 0], [3, 0.173333, 0]], [1.64287, [3, -0.173333, 0], [3, 0.16, 0]], [1.64287, [3, -0.16, 0], [3, 0.24, 0]], [1.48027, [3, -0.24, 0], [3, 0, 0]]])

    names.append("LShoulderRoll")
    times.append([0.68, 1.16, 1.64, 2.16, 2.68, 3.16, 3.88])
    keys.append([[0.789967, [3, -0.226667, 0], [3, 0.16, 0]], [0.484702, [3, -0.16, 0.128089], [3, 0.16, -0.128089]], [0.021434, [3, -0.16, 0], [3, 0.173333, 0]], [0.911154, [3, -0.173333, -0.194562], [3, 0.173333, 0.194562]], [1.18881, [3, -0.173333, 0], [3, 0.16, 0]], [0.911154, [3, -0.16, 0.146855], [3, 0.24, -0.220282]], [0.0873961, [3, -0.24, 0], [3, 0, 0]]])

    names.append("LWristYaw")
    times.append([0.6, 1.08, 1.56, 2.08, 3.08, 3.88])
    keys.append([[-0.79312, [3, -0.2, 0], [3, 0.16, 0]], [-0.813062, [3, -0.16, 0], [3, 0.16, 0]], [-0.79312, [3, -0.16, 0], [3, 0.173333, 0]], [-1.42666, [3, -0.173333, 0], [3, 0.333333, 0]], [-1.15514, [3, -0.333333, -0.271517], [3, 0.266667, 0.217214]], [0.145688, [3, -0.266667, 0], [3, 0, 0]]])

    names.append("RAnklePitch")
    times.append([1.2, 2.2, 2.72, 3.2, 3.88])
    keys.append([[0.00098731, [3, -0.4, 0], [3, 0.333333, 0]], [-0.0802851, [3, -0.333333, 0], [3, 0.173333, 0]], [0.258309, [3, -0.173333, 0], [3, 0.16, 0]], [-0.0802851, [3, -0.16, 0], [3, 0.226667, 0]], [0.107422, [3, -0.226667, 0], [3, 0, 0]]])

    names.append("RAnkleRoll")
    times.append([1.2, 2.2, 2.72, 3.2, 3.88])
    keys.append([[0.179769, [3, -0.4, 0], [3, 0.333333, 0]], [0.261799, [3, -0.333333, -0.0271751], [3, 0.173333, 0.014131]], [0.303687, [3, -0.173333, 0], [3, 0.16, 0]], [0.303687, [3, -0.16, 0], [3, 0.226667, 0]], [0.07214, [3, -0.226667, 0], [3, 0, 0]]])

    names.append("RElbowRoll")
    times.append([0.68, 1.16, 1.64, 2.16, 2.68, 3.16, 3.88])
    keys.append([[1.24718, [3, -0.226667, 0], [3, 0.16, 0]], [0.673468, [3, -0.16, 0], [3, 0.16, 0]], [0.833004, [3, -0.16, 0], [3, 0.173333, 0]], [0.398883, [3, -0.173333, 0], [3, 0.173333, 0]], [0.658129, [3, -0.173333, 0], [3, 0.16, 0]], [0.398883, [3, -0.16, 0.00613657], [3, 0.24, -0.00920485]], [0.389678, [3, -0.24, 0], [3, 0, 0]]])

    names.append("RElbowYaw")
    times.append([0.68, 1.16, 1.64, 2.16, 2.68, 3.16, 3.88])
    keys.append([[0.785367, [3, -0.226667, 0], [3, 0.16, 0]], [2.08926, [3, -0.16, 0], [3, 0.16, 0]], [-0.273093, [3, -0.16, 0], [3, 0.173333, 0]], [0.516916, [3, -0.173333, -0.263081], [3, 0.173333, 0.263081]], [1.30539, [3, -0.173333, 0], [3, 0.16, 0]], [0.516916, [3, -0.16, 0], [3, 0.24, 0]], [1.17654, [3, -0.24, 0], [3, 0, 0]]])

    names.append("RHand")
    times.append([0.6, 1.08, 1.56, 2.08, 3.08, 3.88])
    keys.append([[0.585115, [3, -0.2, 0], [3, 0.16, 0]], [0.585115, [3, -0.16, 0], [3, 0.16, 0]], [0.585115, [3, -0.16, 0], [3, 0.173333, 0]], [0.587661, [3, -0.173333, -0.000189252], [3, 0.333333, 0.000363946]], [0.588025, [3, -0.333333, 0], [3, 0.266667, 0]], [0.3068, [3, -0.266667, 0], [3, 0, 0]]])

    names.append("RHipPitch")
    times.append([1.2, 2.2, 2.72, 3.2, 3.88])
    keys.append([[0.277401, [3, -0.4, 0], [3, 0.333333, 0]], [0.0288929, [3, -0.333333, 0], [3, 0.173333, 0]], [0.355635, [3, -0.173333, 0], [3, 0.16, 0]], [0.0288929, [3, -0.16, 0], [3, 0.226667, 0]], [0.131882, [3, -0.226667, 0], [3, 0, 0]]])

    names.append("RHipRoll")
    times.append([1.2, 2.2, 2.72, 3.2, 3.88])
    keys.append([[-0.159001, [3, -0.4, 0], [3, 0.333333, 0]], [-0.439722, [3, -0.333333, 0.107313], [3, 0.173333, -0.0558026]], [-0.648346, [3, -0.173333, 0], [3, 0.16, 0]], [-0.439722, [3, -0.16, -0.0803346], [3, 0.226667, 0.113807]], [-0.06592, [3, -0.226667, 0], [3, 0, 0]]])

    names.append("RHipYawPitch")
    times.append([3.88])
    keys.append([[-0.171766, [3, -1.29333, 0], [3, 0, 0]]])

    names.append("RKneePitch")
    times.append([1.2, 2.2, 2.72, 3.2, 3.88])
    keys.append([[0.0277265, [3, -0.4, 0], [3, 0.333333, 0]], [0.606045, [3, -0.333333, 0], [3, 0.173333, 0]], [0.142776, [3, -0.173333, 0], [3, 0.16, 0]], [0.606045, [3, -0.16, 0], [3, 0.226667, 0]], [-0.091998, [3, -0.226667, 0], [3, 0, 0]]])

    names.append("RShoulderPitch")
    times.append([0.68, 1.16, 1.64, 2.16, 2.68, 3.16, 3.88])
    keys.append([[1.78715, [3, -0.226667, 0], [3, 0.16, 0]], [1.5095, [3, -0.16, 0.137548], [3, 0.16, -0.137548]], [0.96186, [3, -0.16, 0], [3, 0.173333, 0]], [1.17815, [3, -0.173333, -0.055224], [3, 0.173333, 0.055224]], [1.2932, [3, -0.173333, 0], [3, 0.16, 0]], [1.17815, [3, -0.16, 0], [3, 0.24, 0]], [1.49569, [3, -0.24, 0], [3, 0, 0]]])

    names.append("RShoulderRoll")
    times.append([0.68, 1.16, 1.64, 2.16, 2.68, 3.16, 3.88])
    keys.append([[-0.710284, [3, -0.226667, 0], [3, 0.16, 0]], [-0.382007, [3, -0.16, -0.116584], [3, 0.16, 0.116584]], [-0.01078, [3, -0.16, 0], [3, 0.173333, 0]], [-1.63068, [3, -0.173333, 0], [3, 0.173333, 0]], [-1.57546, [3, -0.173333, 0], [3, 0.16, 0]], [-1.63068, [3, -0.16, 0], [3, 0.24, 0]], [-0.104354, [3, -0.24, 0], [3, 0, 0]]])

    names.append("RWristYaw")
    times.append([0.6, 1.08, 1.56, 2.08, 3.08, 3.88])
    keys.append([[0.826783, [3, -0.2, 0], [3, 0.16, 0]], [0.849794, [3, -0.16, 0], [3, 0.16, 0]], [0.826783, [3, -0.16, 0], [3, 0.173333, 0]], [1.0937, [3, -0.173333, 0], [3, 0.333333, 0]], [0.27301, [3, -0.333333, 0.189193], [3, 0.266667, -0.151355]], [0.0720561, [3, -0.266667, 0], [3, 0, 0]]])

    mul = unit_time/3.95
    times_unit = [list(np.array(t, dtype=float)*mul) for t in times]

    try:
        motion = ALProxy("ALMotion", robotIP, port)
        motion.angleInterpolationBezier(names, times_unit, keys)
    except BaseException, err:
        print err
Ejemplo n.º 21
0
def main(IP, PORT):

    ##在planB中没有标定,只有阈值来确定目标物与机器人的相对位置

    ##设置定时器计算程序运行总时间
    t0 = time.time()

    ##这里阈值有两个,一个是棒棒的shB;一个是桶的shT;shW为阈值宽度
    ##这两个阈值的作用:
    ##shB为机器人到达相对于目标物(棒棒)指定位置时目标物在底部摄像头拍摄到画面的位置
    ##shT为机器人到达相对于目标物(桶)指定位置时目标物在底部摄像头拍摄到画面的位置
    ##其中棒棒在定位过程中先使棒棒出现在底部摄像头视野内,再使目标在图像中处于阈值位置
    ##桶的原理同上,但在定位过程中,头部向下转20度(Pitch)
    shB = 262
    shT = 270
    shW = 20

    ##该程序中有三个标志位:
    ##flag 0代表顶部摄像头 1代表底部摄像头
    ##flag2 0代表机器人没有低头发现目标 1代表低头20度后发现目标
    ##flag3 机器人在丢失目标时,0代表因为太远而丢失目标 1代表因为太近而丢失目标
    flag = 0
    flag2 = 0
    flag3 = 0

    ##调用ALTextSpeech模块,机器人说hello,说明程序开始
    tts = ALProxy("ALTextToSpeech", IP, PORT)
    tts.say("Hello!")

    ##第一步:找到棒棒+粗定位
    ##转换姿态找到目标:
    for xunzhaocishu in range(4):
        ##第1步:找到目标
        for t in range(4):
            pt.posinit()  #调整姿态
            #前看
            angle0 = 0
            flag = 0
            point, detection = gp1.checkexist(IP, PORT, flag)
            if detection == 1:
                break
            flag = 1
            point, detection = gp1.checkexist(IP, PORT, flag)
            if detection == 1:
                break

            #下看
            pt.posinit2(0.0, 20)
            flag = 1
            point, detection = gp1.checkexist(IP, PORT, flag)
            if detection == 1:
                flag2 = 1
                break
            flag = 0
            point, detection = gp1.checkexist(IP, PORT, flag)
            if detection == 1:
                flag2 = 1
                break
            pt.posinit2(0.0, 0.0)
            ##左看
            angle0 = 30
            headm.headcheck(0, 60)
            flag = 0
            point, detection = gp1.checkexist(IP, PORT, flag)
            if detection == 1:
                break
            flag = 1
            point, detection = gp1.checkexist(IP, PORT, flag)
            if detection == 1:
                break
            ##右看
            angle0 = -30
            headm.headcheck(0, -60)
            flag = 1
            point, detection = gp1.checkexist(IP, PORT, flag)
            if detection == 1:
                break
            flag = 0
            point, detection = gp1.checkexist(IP, PORT, flag)
            if detection == 1:
                break
            ##向左转90度
            mt.move(0, 0, 1.57079633)
        print '找到目标'

        ##找到目标,机器人说I found the bar!
        tts.say("I found the bar!")

        ##第2步:看到了目标向目标转动
        ##开始时如果头偏了应将头偏回并将身子转过去,并重新计算距离角度

        if flag2 == 1:
            if flag == 0:
                #如果是机器人低头且用顶部摄像头发现的先往前走20cm
                distance, angle, x, y = gp2.getPosition(point, flag)
                mt.move(0, 0, -angle)
                mt.move(20, 0, 0)
            if flag == 1:
                #如果是机器人低头且用底部摄像头发现的先往后走20cm
                distance, angle, x, y = gp2.getPosition(point, flag)
                mt.move(0, 0, -angle)
                mt.move(-20, 0, 0)

        if angle0 == 0:  #如果是转头发现的先转过身去
            distance, angle, x, y = gp1.getPosition(point, flag)
        elif angle0 != 0:
            if angle0 == 30:
                pt.posinit()  #调整姿态
                mt.move(0, 0, 0.5236)
            elif angle0 == -30:
                pt.posinit()  #调整姿态
                mt.move(0, 0, -0.5236)

        pt.posinit()  #调整姿态
        point1, detection1 = gp1.checkexist(IP, PORT, 1)
        point0, detection0 = gp1.checkexist(IP, PORT, 0)
        if detection1 == 1:
            flag = 1
            point = point1
            break
        elif detection0 == 1:
            flag = 0
            point = point0
            break

    error = aa.adjustment(IP, PORT, 0, flag)
    if error == 0:
        mt.move(10, 0, 0)
        flag = 1

    #第3步:粗定位之后走到目标前,即底部摄像头可以捕捉到目标
    st = 25
    if flag == 0:  ##如果顶部摄像头出现目标0->顶部 1->底部
        detection = 0
        flag = 1
        while detection == 0:
            print '第一次调整距离'
            mt.move(st, 0, 0)
            pt.posinit()
            point, detection = gp1.checkexist(IP, PORT, flag)
            if detection == 1:
                distance, angle, x, y = gp1.getPosition(point, flag)
                if y > 80:
                    print '底部摄像头出现目标'
                    break
            if detection == 0:
                print '底部摄像头目标没有出现'
                error = aa.adjustment(IP, PORT, 0, 0)
                if error == 0 and detection == 0:
                    error = aa.adjustment2(IP, PORT, 0, 0, 20)

        print '底部摄像头出现目标'

##第二步:走近棒棒+精定位(近,底部摄像头)
#循环调整,精确定位棒的位置,使目标物到达底部摄像头阈值范围内

    ad = 0
    pt.posinit()
    detection = 0
    while detection == 0:
        point, detection = gp1.checkexist(IP, PORT, flag)
        if detection == 1:
            distance, angle, x, y = gp1.getPosition(point, flag)
            break
        else:
            mt.move(10, 0, 0)

    flag3 = 0
    while y > shB + shW / 2 or y < shB - shW / 2:
        aa.adjustment(IP, PORT, 0, flag)
        print '第二次调整距离'
        print y
        if y > 180:
            flag3 = 1
        elif y <= 180:
            flag3 = 0

        ##按接近程度选择步长
        st = 5
        if -40 < shB - y < 40:
            st = 2

        ##根据目标在图像中位置移动
        if y > shB + shW / 2:
            mt.move(-st, 0, 0)
        elif y < shB - shW / 2:
            mt.move(st, 0, 0)

        ##获得目标在图像中位置
        pt.posinit()
        point, detection = gp1.checkexist(IP, PORT, flag)
        while detection == 0:
            print '丢失目标'
            if flag3 == 1:
                mt.move(-st, 0, 0)
            else:
                mt.move(st, 0, 0)
            point, detection = gp1.checkexist(IP, PORT, flag)

        distance, angle, x, y = gp1.getPosition(point, flag)

##第三步:捡棒棒
    mt.move(0, 0, 0.1 - angle)
    gt.grab()
    tts.say("I got it!")
    mt.move(-20, 0, 0.0)

    ##第四步:判断是否成功,修补失误*

    ##第五步:找桶+粗定位(远,顶部摄像头,或近,底部摄像头)
    #转换姿态找到目标:
    #第1步:找到桶
    flag2 = 0
    for xunzhaocishu in range(4):
        for t in range(4):
            pt.posinit()  #调整姿态
            #前看
            angle0 = 0
            flag = 0
            point, detection = gp2.find_boxcontours(IP, PORT, flag)
            if detection == 1:
                break

            flag = 1
            point, detection = gp2.find_boxcontours(IP, PORT, flag)
            if detection == 1:
                break

            #下看
            pt.posinit2(0.0, 20)
            flag = 1
            point, detection = gp2.find_boxcontours2(IP, PORT, flag)
            if detection == 1:
                flag2 = 1
                break
            flag = 0
            point, detection = gp2.find_boxcontours2(IP, PORT, flag)
            if detection == 1:
                flag2 = 1
                break
            pt.posinit2(0.0, 0)

            #左看
            angle0 = 30
            headm.headcheck(0, 30)
            flag = 0
            point, detection = gp2.find_boxcontours(IP, PORT, flag)
            if detection == 1:
                break
            flag = 1
            point, detection = gp2.find_boxcontours(IP, PORT, flag)
            if detection == 1:
                break
            #右看
            angle0 = -30
            headm.headcheck(0, -30)
            flag = 0
            point, detection = gp2.find_boxcontours(IP, PORT, flag)
            if detection == 1:
                break
            flag = 1
            point, detection = gp2.find_boxcontours(IP, PORT, flag)
            if detection == 1:
                break

            #向左转90度
            mt.move(0, 0, 1.57079633)

        print '找到目标'
        tts.say("I find the can!")

        #第2步:看到了目标向目标转动

        if flag2 == 1:
            if flag == 0:  #如果是低头发现的先往前走20cm
                distance, angle, x, y = gp2.getPosition(point, flag)
                mt.move(0, 0, -angle)
                mt.move(20, 0, 0)
            if flag == 1:
                distance, angle, x, y = gp2.getPosition(point, flag)
                mt.move(0, 0, -angle)

    #开始时如果头偏了应将头偏回并将身子转过去,并重新计算距离角度
        if angle0 != 0:
            if angle0 == 30:
                headm.headcheck(0, 0)
                mt.move(0, 0, 0.5236)
            elif angle0 == -30:
                headm.headcheck(0, 0)
                mt.move(0, 0, -0.5236)

        if flag2 == 0 or (flag2 == 1 and flag == 0):

            ##1判断是哪个摄像头捕获目标2确认是否真的识别正确
            pt.posinit()  #调整姿态
            point1, detection1 = gp2.find_boxcontours(IP, PORT, 1)
            point0, detection0 = gp2.find_boxcontours(IP, PORT, 0)
            if detection1 == 1:
                print '摄像头1'
                flag = 1

            elif detection0 == 1:
                print '摄像头0'
                flag = 0

            ##调整角度
            error = aa.adjustment(IP, PORT, 1, flag)
            if error == 0:
                mt.move(-10, 0, 0)
            elif error == 1:
                break

    #第3步:向目标物移动,使之出现在底部摄像头内
    #如果目标在上摄像头视野中,则向前走,步长为10cm,直到下摄像头视野中出现目标
    st = 50
    if flag == 0:
        flag = 1
        detection == 0
        while (detection == 0):
            print '第一次调整距离'
            mt.move(st, 0, 0)
            pt.posinit()  #调整姿态
            point, detection = gp2.find_boxcontours(IP, PORT, flag)

            if detection == 1:
                distance, angle, x, y = gp2.getPosition(point, flag)
                if y > 50:
                    print '底部摄像头出现目标'
                    break
            elif detection == 0:
                print '底部摄像头目标没有出现'
                error = aa.adjustment(IP, PORT, 1, 0)
                if error == 0 and detection == 0:
                    mt.move(-10, 0, 0)
        print '底部摄像头出现目标'

##第六步:走近桶+精定位(近,底部摄像头,头向下转20°)
#循环调整,走到使桶出现在阈值范围内的位置(**需进一步标定,求出shT,以及要走的步长和方向)

#一边调整一边低头至20度,且底部摄像头出现桶
    for i in range(1, 3):
        st = 10
        error = aa.adjustment2(IP, PORT, 1, 1, 10 * t)
        if error == 0:
            error = aa.adjustment2(IP, PORT, 1, 0, 10 * t)
            if error == 0:
                mt.move(st, 0, 0)

    pt.posinit2(0.0, 20)
    point, detection = gp2.find_boxcontours(IP, PORT, flag)
    while detection == 0:
        mt.move(10, 0, 0)
        pt.posinit2(0.0, 20)
        point, detection = gp2.find_boxcontours(IP, PORT, flag)

    ad = 0
    if detection == 1:
        distance, angle, x, y = gp2.getPosition(point, flag)
        flag3 = 0
        while y > shT + shW / 2 or y < shT - shW / 2:
            aa.adjustment2(IP, PORT, 1, flag, 20)
            print '第二次调整距离'
            print y
            if y > 240:
                flag3 == 1
            ##按接近程度选择步长

            st2 = 4
            if -40 < shT - y < 40:
                st2 = 2
            ##根据目标在图像中位置移动
            if y > shT + shW / 2:
                mt.move(-st2, 0, 0)
            elif y < shT - shW / 2:
                mt.move(st2, 0, 0)
            ##获得目标在图像中位置
            pt.posinit2(0.0, 20)
            point, detection = gp2.find_boxcontours(IP, PORT, flag)
            while detection == 0:
                print '丢失目标'
                if flag3 == 1:
                    mt.move(-st, 0, 0)
                else:
                    mt.move(st, 0, 0)

                pt.posinit2(0.0, 20)
                point, detection = gp1.checkexist(IP, PORT, flag)

            distance, angle, x, y = gp1.getPosition(point, flag)


##第七步:判断是否可扔*

##第八步:扔棒棒
    mt.move(0, 0, 0.1 - angle)
    gt.drop()
    t1 = time.time()
    # Time the image transfer.
    print "acquisition delay ", t1 - t0

    mt.move(-10, 0, 0)

    pt.posinit()
    tts.say("I threw it!")
Ejemplo n.º 22
0
            else:
                print("Given argument is gibberish, ignoring...")
        else:
            print("Given argument is gibberish, ignoring...")

    # Load pepper ip
    if load_ip() == 1:
        print(
            "Something went wrong while loading pepper's IP, using '127.0.0.1' instead..."
        )
    else:
        print("Load succes (" + _IP + ")")

    # Setup sound
    try:
        audio = ALProxy("ALAudioPlayer", _IP, 9559)
        meowID = audio.loadFile("/home/nao/audio/cat_meow2.wav")
        audio.setVolume(meowID, 0.25)
        kgggID = audio.loadFile("/home/nao/audio/cat_screech.wav")
        audio.setVolume(kgggID, 0.25)
    except:
        print("Could not connect to Pepper (is her IP correct?)")

    led_thread = LedThread()
    connection = Connection()
    connection.start()

    # Begin a ros subscriber
    rospy.init_node("pepper_animation", anonymous=True)
    # Also start a publisher to push the thing
    #animationPublisher = rospy.Publisher("/animation", String, queue_size=5)
Ejemplo n.º 23
0
# -*- encoding: UTF-8 -*-
from naoqi import ALProxy
ip = "192.168.3.2"
port = 9559

# motion
motion = ALProxy("ALMotion", ip, port)
#motion.moveToward(0.2,0.0,0.0) # left
#motion.moveToward(-0.2,0.0,0.0) # right
motion.moveToward(0.0, 0.1, 0.0)  # left
#motion.moveToward(0.0,-0.1,0.0) # right
Ejemplo n.º 24
0
def find_objects():
    """
	Process the recorded video and determine object information
	Returns an array of Objects
	"""

    broker = ALBroker("broker", "0.0.0.0", 0, "localhost", 9559)

    motion = ALProxy("ALMotion")
    posture = ALProxy("ALRobotPosture")
    camera = ALProxy("ALVideoDevice", "localhost", 9559)

    posture.goToPosture("Stand", 0.5)

    images = record_video(camera, motion)

    fourcc = cv2.cv.CV_FOURCC('M', 'J', 'P', 'G')
    out_video = cv2.VideoWriter('/home/nao/output.avi', fourcc, 20.0,
                                (640, 480))

    objects = []
    old_objects = []

    for pair in images:
        nao_image = pair[0]
        angle = pair[1]
        # Convert NAO Format to OpenCV format
        frame = np.reshape(
            np.frombuffer(nao_image[6], dtype='%iuint8' % nao_image[2]),
            (nao_image[1], nao_image[0], nao_image[2]))

        video_frame = frame.copy()
        cv2.putText(video_frame, str(angle), (20, 460),
                    cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255))
        out_video.write(video_frame)

        old_found = []
        new_contours = []

        contours = find_contours(frame)

        for i in range(len(contours)):
            contour = contours[i]
            x, y, w, h = cv2.boundingRect(contour)
            cx = x + w / 2
            cy = y + h / 2

            a = camera.getAngularPositionFromImagePosition(
                1, (float(cx) / 640, float(cy) / 480))
            a[0] += angle

            best = (None, 99999)
            for j in range(len(old_objects)):
                if j in old_found:
                    continue
                u = old_objects[j]
                dist = math.sqrt((cx - u.position[0])**2 +
                                 (cy - u.position[1])**2)
                if dist < best[1]:
                    best = (u, dist)
            if best[1] < 150:
                old_found.append(best[0])
                best[0].update(contour, (cx, cy), frame[y:(y + h), x:(x + w)],
                               a)
            else:
                print best[1]
                new_contours.append(i)

        new_objects = list(old_found)

        for contour in new_contours:
            contour = contours[contour]
            x, y, w, h = cv2.boundingRect(contour)
            cx = x + w / 2
            cy = y + h / 2
            a = camera.getAngularPositionFromImagePosition(
                1, (float(cx) / 640, float(cy) / 480))
            a[0] += angle
            roi = frame[y:(y + h), x:(x + w)]
            obj = Object(contour, (cx, cy), roi, a)
            objects.append(obj)
            new_objects.append(obj)

        old_objects = new_objects
    out_video.release()
    return objects
Ejemplo n.º 25
0
def naoMarkDetection(robotIP="127.0.0.1", port=9559):
    MOTION = ALProxy("ALMotion", robotIP, port)
    CAMERA = ALProxy("ALVideoDevice", robotIP, port)
    TTS = ALProxy("ALTextToSpeech", robotIP, port)
    LANDMARK = ALProxy("ALLandMarkDetection", robotIP, port)
    MEMORY = ALProxy("ALMemory", robotIP, port)
    #初始化机器人姿势
    standWithStick(0.1, robotIP, port)
    #设置机器人上方摄像头开启
    CAMERA.setActiveCamera(0)
    #加载mark识别EVENT
    memvalue1 = "LandmarkDetected"
    alpha = 0
    size = 0
    period = 200
    LANDMARK.subscribe("Test_LandMark", period, 0.0)

    MOTION.angleInterpolation("HeadPitch", 0, 0.5, True)
    time.sleep(1.5)

    for i in range(0, 5):
        #检测正前方
        MOTION.angleInterpolation("HeadYaw", 0, 0.5, True)
        time.sleep(2.5)
        val = MEMORY.getData(memvalue1)
        if (val and isinstance(val, list) and len(val) >= 2):
            TTS.say("前方我看到了")
            markInfo = val[1]
            shapeInfo = markInfo[0]
            rshapeInfo = shapeInfo[0]
            alpha = rshapeInfo[1]
            size = rshapeInfo[4]
            angleFlag = 0
            break
        #检测左边45度
        MOTION.angleInterpolation("HeadYaw", 45 * math.pi / 180.0, 0.5, True)
        time.sleep(2.5)
        val = MEMORY.getData(memvalue1)
        if (val and isinstance(val, list) and len(val) >= 2):
            TTS.say("左边我看到了")
            markInfo = val[1]
            shapeInfo = markInfo[0]
            rshapeInfo = shapeInfo[0]
            alpha = rshapeInfo[1]
            size = rshapeInfo[4]
            angleFlag = 1
            break
        #检测右边45度
        MOTION.angleInterpolation("HeadYaw", -45 * math.pi / 180.0, 0.5, True)
        time.sleep(2.5)
        val = MEMORY.getData(memvalue1)
        if (val and isinstance(val, list) and len(val) >= 2):
            TTS.say("右边我看到了")
            markInfo = val[1]
            shapeInfo = markInfo[0]
            rshapeInfo = shapeInfo[0]
            alpha = rshapeInfo[1]
            size = rshapeInfo[4]
            angleFlag = -1
            break

        TTS.say("没有检测到标志")
    #如果5次都没检测到mark就返回False
    if (i == 4):
        return False

    if (angleFlag == 1):
        alpha += math.pi / 4
    elif (angleFlag == -1):
        alpha -= math.pi / 4
    else:
        alpha = alpha

    #mark的半径,在赛场上应该改为0.12
    r = 0.093 / 2
    #mark与机器人摄像头的高度差,场上应该改为0
    h = 0.05

    #将机器人的头对准mark
    #MOTION.angleInterpolation("HeadYaw",alpha,0.5,True)

    #time.sleep(1.5)
    #val1 = MEMORY.getData(memvalue1)
    #markinfo1 = val1[1]
    #shapeinfo1 = markinfo1[0]
    #rshapeinfo1 = shapeinfo1[0]
    k = 0.0913

    #利用等比例法测出机器人到mark的距离
    #根据标志成像的大小和实际大小的比例
    #size = rshapeInfo[4]
    d1 = k / size

    #返回nao mark距离和角度值
    print "the disMark = ", d1
    print "the markAngle = ", alpha
    return [d1, alpha]
Ejemplo n.º 26
0
 def force_connect(self):
     self.proxy = ALProxy("ALPeoplePerception")
Ejemplo n.º 27
0
def pictureHandle(cameraID,
                  thresholdMin,
                  thresholdMax,
                  robotIP="127.0.0.1",
                  port=9559):
    CAMERA = ALProxy("ALVideoDevice", robotIP, port)
    CAMERA.setActiveCamera(cameraID)
    # VGA  设置分辨率为2:640*480 0:160*120
    resolution = 2
    # RGB  设置颜色空间为RGB
    colorSpace = 11
    stickAngle = 0.0
    videoClient = CAMERA.subscribe("python_client", resolution, colorSpace, 5)
    #设置曝光度模式
    CAMERA.setCamerasParameter(videoClient, 22, 2)
    time.sleep(0.5)
    #获取照片
    naoImage = CAMERA.getImageRemote(videoClient)
    CAMERA.unsubscribe(videoClient)

    imageWidth = naoImage[0]
    imageHeight = naoImage[1]
    array = naoImage[6]

    #装换为PIL图片格式
    img = Image.fromstring("RGB", (imageWidth, imageHeight), array)
    img.save("camImage.png", "PNG")

    imgTest = cv2.imread("camImage.png", 1)
    #RGB转换为HSV颜色空间
    hsv = cv2.cvtColor(imgTest, cv2.COLOR_BGR2HSV)
    #获取掩膜
    mask = cv2.inRange(hsv, thresholdMin, thresholdMax)
    #原始图像与掩膜做位运算,提取色块
    res = cv2.bitwise_and(imgTest, imgTest, mask=mask)
    #将hsv转化为rgb
    color = cv2.cvtColor(res, cv2.COLOR_HSV2RGB)
    #转换为灰度图
    gray = cv2.cvtColor(color, cv2.COLOR_RGB2GRAY)
    #二值化
    ret, th1 = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)

    #开运算,去除噪声(开运算=先腐蚀再膨胀)
    #kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3,8))
    #opening = cv2.morphologyEx(th1, cv2.MORPH_OPEN, kernel)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
    erosion = cv2.erode(th1, kernel, iterations=1)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 8))
    dilation = cv2.dilate(erosion, kernel, iterations=1)

    #--------------------------------测试代码-----------------------------------#
    #cv2.namedWindow("image",cv2.WINDOW_NORMAL)
    #cv2.namedWindow("image1",cv2.WINDOW_NORMAL)
    #cv2.imshow("image",th1)
    #cv2.imshow("image1",dilation)
    #cv2.waitKey(0)
    #--------------------------------测试结束-----------------------------------#

    #获取轮廓
    contours, hierarchy = cv2.findContours(dilation, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    return contours
Ejemplo n.º 28
0
 def resetPopulation(self):
     """Empties the tracked population.
     """
     if not self.proxy:
         self.proxy = ALProxy("ALPeoplePerception")
     return self.proxy.resetPopulation()
	keys.append([[0.166571, [3, -0.255556, 0], [3, 0.344444, 0]], [0.166208, [3, -0.344444, 0], [3, 0.188889, 0]], [0.166571, [3, -0.188889, 0], [3, 0.311111, 0]], [0.166208, [3, -0.311111, 0], [3, 0.233333, 0]], [0.25, [3, -0.233333, 0], [3, 0, 0]]])

	names.append("RShoulderPitch")
	times.append([0.166667, 0.766667, 1.8, 2.36667, 2.83333, 3.3, 4])
	keys.append([[0.0767419, [3, -0.0555556, 0], [3, 0.2, 0]], [-0.59515, [3, -0.2, 0.11552], [3, 0.344444, -0.19895]], [-0.866668, [3, -0.344444, 0], [3, 0.188889, 0]], [-0.613558, [3, -0.188889, -0.253109], [3, 0.155556, 0.208443]], [0.584497, [3, -0.155556, -0.249275], [3, 0.155556, 0.249275]], [0.882091, [3, -0.155556, -0.108169], [3, 0.233333, 0.162253]], [1.39576, [3, -0.233333, 0], [3, 0, 0]]])

	names.append("RShoulderRoll")
	times.append([0.166667, 0.766667, 1.8, 2.36667, 2.83333, 3.3, 4])
	keys.append([[-0.019984, [3, -0.0555556, 0], [3, 0.2, 0]], [-0.019984, [3, -0.2, 0], [3, 0.344444, 0]], [-0.615176, [3, -0.344444, 0.175025], [3, 0.188889, -0.0959815]], [-0.833004, [3, -0.188889, 0], [3, 0.155556, 0]], [-0.224006, [3, -0.155556, -0.00920487], [3, 0.155556, 0.00920487]], [-0.214801, [3, -0.155556, 0], [3, 0.233333, 0]], [-0.297251, [3, -0.233333, 0], [3, 0, 0]]])

	names.append("RWristYaw")
	times.append([0.766667, 1.8, 2.36667, 3.3, 4])
	keys.append([[-0.058334, [3, -0.255556, 0], [3, 0.344444, 0]], [-0.0521979, [3, -0.344444, 0], [3, 0.188889, 0]], [-0.067538, [3, -0.188889, 0], [3, 0.311111, 0]], [-0.038392, [3, -0.311111, -0.0121571], [3, 0.233333, 0.00911784]], [-0.00371307, [3, -0.233333, 0], [3, 0, 0]]])

	try:
		motion = ALProxy("ALMotion",robotIP,port)
		motion.angleInterpolationBezier(names, times, keys)
	except BaseException, err:
		print err
  
  
if __name__ == "__main__":
	robotIP = "127.0.0.1"
	port = 9559

	if len(sys.argv) <= 1:
		print "(robotIP default: 127.0.0.1)"
	elif len(sys.argv) <= 2:
		robotIP = sys.argv[1]
	else:
		port = int(sys.argv[2])
Ejemplo n.º 30
0
def startPosture(ip, port=9559):
    posture = ALProxy("ALRobotPosture", ip, port)
    posture.goToPosture("StandInit", 0.5)