コード例 #1
0
 def __init__(self):
     self.robot = Robot()
     self.current_angles = [0 for i in range(NUM_JOINTS)]
     self.target_angles = [0 for i in range(NUM_JOINTS)]
     self.target_end_effector_state = EndEffectorStateList.IDLE
     self.selected = UD2  # default select JOINT2
     self.shutdown = False
コード例 #2
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()

    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # The variable for counting loop
    cnt = 0

    #Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        if (len(dets) > 0):
            face_tracking = None
            distanceFromCenter_min = 1000
            # Find a face near image center
            for face in dets:
                # Draw Rectangle
                cv2.rectangle(img, (face.left(), face.top()),
                              (face.right(), face.bottom()), color_green, 3)

                face_x = (face.left() + face.right()) / 2

                #TODO: write a distance between face and center, center is 0.5*width of image.
                distanceFromCenter = abs(face_x - camera.width / 2)

                # Find a face that has the smallest distance
                if distanceFromCenter < distanceFromCenter_min:
                    distanceFromCenter_min = distanceFromCenter
                    face_tracking = face

            # Estimate pose
            (success, rotation_vector, translation_vector,
             image_points) = face_detector.estimate_pose(img, face_tracking)
            # Draw pose
            img = face_detector.draw_pose(img, rotation_vector,
                                          translation_vector, image_points)

        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
コード例 #3
0
class client:
    def __init__(self):

        self.robot = Robot()
        #rospy.Timer(rospy.Duration(1.0), self.robotUpdate)

    def robotUpdate(self, event):
        # self.robot.test()
        self.robot.move(0, 0)
コード例 #4
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    # Uncomment/comment
    startNod(robot)
コード例 #5
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()

    # TODO: change the values in lookatpoint to look at the red square.
    robot.lookatpoint(,,)
    time.sleep(1) # wait a second

    # TODO: change the values in lookatpoint to look at the green square.
    robot.lookatpoint(,,)
コード例 #6
0
    def __init__(self):
        self.initParam()
        self.pan_speed = 0
        self.tilt_speed = 0

        # init
        self.robot = Robot()
        self.ball_detector = BallDetector()
        self.camera = Camera(self.imageCallback)

        # move robot head to center
        self.robot.center()

        # load timer
        rospy.Timer(rospy.Duration(1.0 / self.rate), self.robotUpdate)
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()

    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    # Initalize face detector
    detector = dlib.get_frontal_face_detector()

    # The variable for counting loop
    cnt = 0

    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = detector(img, 1)
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        if (len(dets) > 0):
            tracked_face = dets[0]
            tracked_face_x = (tracked_face.left() + tracked_face.right()) / 2
            tracked_face_y = (tracked_face.top() + tracked_face.bottom()) / 2
            #TODO: convert 2d point to 3d point on the camera coordinates system

            #TODO: convert the 3d point on the camera point system to the robot coordinates system

            #TODO: move the robot so that it tracks the face

        # Show image
        cv2.imshow("Frame", img[..., ::-1])

        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    #We need to initalize ROS environment for AICoRE to connect
    ROSEnvironment()

    # Start robot
    robot = Robot()
    robot.start()

    #Initalize AICoRe client
    client = AICoRE()


    #TODO: Set up client name
    client.setUserName('username')

    #Initalize speeech recogniton
    r = sr.Recognizer()

    #List all the microphone hardware
    for i, item in enumerate(sr.Microphone.list_microphone_names()):
        print( i, item)

    #TODO: Initalize mic and set the device_index
    mic = sr.Microphone(device_index=7)

    print "I'm listening "

    with mic as source:
        r.adjust_for_ambient_noise(source)
        audio = r.listen(source)
    text = r.recognize_google(audio)

    client.send(text)
    answer = client.answer()

    tts = gTTS(answer)
    tts.save('answer.mp3')
    playsound.playsound('answer.mp3')

    #TODO: check if 'yes' in voice input
    if in answer.lower():
        #TODO: robot should nod

    #TODO: check if 'no' in voice input
    elif  in answer.lower():
コード例 #9
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()

    robot.center()
    time.sleep(1)

    #TODO: change the values in left
    robot.left(0.2)
    time.sleep(1)#wait a second

    #TODO: change the values in right
    robot.right(0.2)
    time.sleep(1)
コード例 #10
0
    def __init__(self):
        self.initParam()
        self.pan_speed = 0
        self.tilt_speed = 0
        self.latestImage = None

        # init 
        self.robot = Robot()
        self.ball_detector = BallDetector()
        self.camera = Camera()
コード例 #11
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()

    #TODO: change the values in left
    robot.move(0, 0)
    time.sleep(1)
    robot.move(1, 1)
コード例 #12
0
    def init_gui(self):
        self.setting_frame = Frame(self.root)
        
        self.robot = Robot()
        Label(self.setting_frame, text="类型").grid(row=0, column=0, pady=20, sticky=W)
        self.robot_model = ttk.Combobox(self.setting_frame, values=MODEL_LIST)
        self.robot_model.grid(row=0, column=1, pady=20)
        self.robot_model.current(0)

        Label(self.setting_frame, text="端口").grid(row=1, column=0, sticky=W)
        self.port = ttk.Combobox(self.setting_frame, values=PORT_LIST)
        self.port.grid(row=1, column=1, sticky=W+E)
        self.port.current(0)

        Label(self.setting_frame, text="波特率").grid(row=2, column=0, pady=5, sticky=W)
        self.baudrate = ttk.Combobox(self.setting_frame, values=BAUDRATE_LIST)
        self.baudrate.grid(row=2, column=1, pady=5, sticky=W+E)
        self.baudrate.current(4)    # default option 9600

        Button(self.setting_frame, text="连接", width=10, command=self.start_main).grid(row=3, column=1, pady=10, sticky=E)
        
        self.setting_frame.pack()
コード例 #13
0
ファイル: armMaster.py プロジェクト: unirlm/apx_robot_arm
    def init_gui(self):
        self.setting_frame = Frame(self.root)
        
        self.robot = Robot()
        Label(self.setting_frame, text="类型").grid(row=0, column=0, pady=20, sticky=W)
        self.robot_model = ttk.Combobox(self.setting_frame, values=MODEL_LIST)
        self.robot_model.current(0)
        self.robot_model.grid(row=0, column=1, pady=20)

        self.port = StringVar(value="/dev/tty")
        Label(self.setting_frame, text="端口").grid(row=1, column=0, sticky=W)
        self.port_entry = Entry(self.setting_frame, textvariable=self.port)
        self.port_entry.grid(row=1, column=1, sticky=W+E)

        self.baudrate = StringVar(value="9600")
        Label(self.setting_frame, text="波特率").grid(row=2, column=0, pady=5, sticky=W)
        self.baudrate_entry = Entry(self.setting_frame, textvariable=self.baudrate)
        self.baudrate_entry.grid(row=2, column=1, pady=5, sticky=W+E)

        Button(self.setting_frame, text="连接", width=10, command=self.start_main).grid(row=3, column=1, pady=10, sticky=E)
        
        self.setting_frame.pack()
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()

    
    time.sleep(1) # wait a second

    #TODO: remember the current position
    curr_pos = 
    curr_pan = 
    curr_tilt = 

    #TODO: look somewhere else other than the current position
    robot.move(,)
    time.sleep(1) # wait a second

    #TODO: return back to the current position stored
    robot.move(,)
    time.sleep(1) # wait a second
コード例 #15
0
class System:
    def __init__(self):
        print('Hello')
        self.robot = Robot()
        # self.camera = Camera()
        rospy.Subscriber("/key_vel", Twist, self.keyCallback, queue_size=10)

    def keyCallback(self, data):

        if data.angular.z > 0:
            self.robot.left()
        elif data.angular.z < 0:
            self.robot.right()
        elif data.linear.x > 0:
            self.robot.down()
        elif data.linear.x < 0:
            self.robot.up()
コード例 #16
0
def main():
    #We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    #Initalize camera
    camera = Camera()
    #start camera
    focal_length = 640

    camera.start()

    robot = Robot()
    #start robot
    robot.start()
    #initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    #counter
    cnt = 1
    #loop
    while True:
        #get image
        img = camera.getImage()

        #gets face detections
        dets = face_detector.detect(img)

        #draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        #we only use 1 face to estimate pose
        if (len(dets) > 0):
            det0 = dets[0]
            #estimate pose
            (success, rotation_vector, translation_vector,
             image_points) = face_detector.estimate_pose(img, det0)
            #draw pose
            img = face_detector.draw_pose(img, rotation_vector,
                                          translation_vector, image_points)

            #converts 2d coordinates to 3d coordinates on camera axis
            (x, y, z) = camera.convert2d_3d((det0.left() + det0.right()) / 2,
                                            (det0.top() + det0.bottom()) / 2)
            print(x, y, z, 'on camera axis')
            #converts 3d coordinates on camera axis to 3d coordinates on robot axis
            (x, y, z) = camera.convert3d_3d(x, y, z)
            print(x, y, z, 'on robot axis')
            #move robot
            robot.lookatpoint(x, y, z, 4)

        cv2.imshow("Frame", img[..., ::-1])
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()

    #The parameters are 3d coordinates in the real world
    #TODO Change the values in lookatpoint
    robot.lookatpoint(1, 1, 1)
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    robot.move(0, 0.5)

    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        # We only use 1 face to estimate pose
        if (len(dets) > 0):
            # Estimate pose
            (success, rotation_vector, translation_vector,
             image_points) = face_detector.estimate_pose(img, dets[0])
            # Draw pose
            img = face_detector.draw_pose(img, rotation_vector,
                                          translation_vector, image_points)

            #TODO: find a yaw value from rotation_vector
            print rotation_vector
            yaw = rotation_vector[2]

            #TODO: remember current position
            print("Pan angle is ",
                  robot.getPosition()[0], "Tilt angle is",
                  robot.getPosition()[1])
            current_pan = robot.getPosition()[0]
            current_tilt = robot.getPosition()[1]

            #TODO: insert the condition for looking at right
            if yaw > 0.3:
                print('You are looking at right.')
                #TODO: add motion for looking at right
                robot.move(0.5, 0.5)

            #TODO: insert the condition for looking at left
            elif yaw < -0.3:
                print('You are looking at left.')
                #TODO: add motion for looking at left
                robot.move(-0.5, 0.5)

            time.sleep(3)

            #TODO: Looking at the position that is stored.
            robot.move(current_pan, current_tilt)
            time.sleep(5)

        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
コード例 #19
0
#!/usr/bin/env python
import cv2
import sys
from example_2_ball_detector import BallDetector
sys.path.append('..')
from lib.camera_v2 import Camera
from lib.robot import Robot
from lib.ros_environment import ROSEnvironment

# Initalize camera
camera = Camera()
# Initalize robot
robot = Robot()


def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Start camera
    camera.start()
    # Start robot
    robot.start()
    # Initalize ball detector
    ball_detector = BallDetector()

    # Loop
    while True:
        # Get image from camera
        img = camera.getImage()
        # Get image with ball detected,
        (img, center) = ball_detector.detect(img, 640)
コード例 #20
0
 def __init__(self):
     print('Hello')
     self.robot = Robot()
     # self.camera = Camera()
     rospy.Subscriber("/key_vel", Twist, self.keyCallback, queue_size=10)
コード例 #21
0
def main():
    ROSEnvironment()
    camera = Camera()
    camera.start()
    robot = Robot()
    robot.start()

    # Get image from camera
    cam_image = camera.getImage()

    # Get width and height of image
    input_image = cam_image
    width = input_image.shape[1]
    height = input_image.shape[0]

    # Check deep neural network with weight and configure file
    net = cv2.dnn.readNet(weight_path, cfg_path)

    #creates a "bob" that is the input image after mean subtraction, normalizing, channel swapping
    #0.00392 is the scale factor
    #(416,416) is the size of the output image
    #(0,0,0) are the mean values that will be subtracted for each channel RGB
    blob = cv2.dnn.blobFromImage(input_image,
                                 0.00392, (416, 416), (0, 0, 0),
                                 True,
                                 crop=False)

    #Inputs blob into the neural network
    net.setInput(blob)

    #gets the output layers "yolo_82', 'yolo_94', 'yolo_106"
    #output layer contains the detection/prediction information
    layer_names = net.getLayerNames()
    #getUnconnectedOutLayers() returns indices of unconnected layers
    #layer_names[i[0] - 1] gets the name of the layers of the indices
    #net.getUnconnectedOutLayers() returns [[200], [227], [254]]
    output_layers = [
        layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()
    ]

    #Runs forward pass to compute output of layer
    #returns predictions/detections at 32, 16 and 8 scale
    preds = net.forward(output_layers)

    #Initialize list that contains class id, confidence values, bounding boxes
    class_ids = []
    confidence_values = []
    bounding_boxes = []

    # TODO: initialize confidence threshold and threshold for non maximal suppresion
    conf_threshold = 0.5
    nms_threshold = 0.4

    #for each scale, we go through the detections
    for pred in preds:
        for detection in pred:
            #Use the max score as confidence
            scores = detection[5:]
            class_id = np.argmax(scores)
            confidence = scores[class_id]
            #Check if confidence is greather than threshold
            if confidence > conf_threshold:
                #Compute x,y, widht, height, class id, confidence value
                center_x = int(detection[0] * width)
                center_y = int(detection[1] * height)
                w = int(detection[2] * width)
                h = int(detection[3] * height)
                x = center_x - w / 2
                y = center_y - h / 2
                class_ids.append(class_id)
                confidence_values.append(float(confidence))
                bounding_boxes.append([x, y, w, h])

    # check your threshold for non maximal suppression
    indices = cv2.dnn.NMSBoxes(bounding_boxes, confidence_values,
                               conf_threshold, nms_threshold)

    #draw results
    #tracked_object flag for if object is already tracked
    tracked_object = 0
    for i in indices:
        i = i[0]
        box = bounding_boxes[i]
        x = box[0]
        y = box[1]
        w = box[2]
        h = box[3]
        center_x = x + w / 2.0
        center_y = y + h / 2.0
        classid = class_ids[i]
        class_name = str(classes[classid])

        print(class_name)
        conf_value = confidence_values[i]
        draw_boundingbox(input_image, classid, conf_value, round(x), round(y),
                         round(x + w), round(y + h))

    #show image
    cv2.imshow("Object Detection Window", input_image[..., ::-1])
    key = cv2.waitKey(0)
    cv2.imwrite("detected_object.jpg", input_image)
コード例 #22
0
from lib.robot import Robot
import sys

interactive = False
if len(sys.argv) > 1:
    if sys.argv[1] == "-i":
        interactive = True
robot = Robot(interactive)
robot.start()

def main():
    faceInCenter_count = 0
    current_pan = 0
    current_tilt = 0
    #We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    #Initalize camera
    camera = Camera()
    #start camera
    camera.start()
    #Initalize robot
    robot = Robot()
    #start robot
    robot.start()
    #initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')
    #face detection result
    dets = None
    # current tracking state
    Tracking = False
    # the time when motion for looking left/right runs
    motion_start_time = None
    cnt = 0
    #loop
    while True:
        #get image
        img = camera.getImage()
        if frame_skip(img):
            continue
        #gets detect face
        dets = face_detector.detect(img)

        #If the number of face detected is greater than 0
        if len(dets)>0:

            #We select the first face detected
            tracked_face  =  dets[0]
            #Get the x, y position
            tracked_face_X = (tracked_face.left()+tracked_face.right())/2
            tracked_face_y = (tracked_face.top()+tracked_face.bottom())/2

            # Estimate head pose
            (success, rotation_vector, translation_vector, image_points) = face_detector.estimate_pose(img, tracked_face)

            # Draw bounding box
            cv2.rectangle(img,(tracked_face.left(), tracked_face.top()), (tracked_face.right(), tracked_face.bottom()), color_green, 3)
            # Draw head pose
            img = face_detector.draw_pose(img, rotation_vector, translation_vector, image_points)

            #Check if head is in the center, returns how many times the head was in the center
            faceInCenter_count =faceInCenter(camera, tracked_face_X, tracked_face_y, faceInCenter_count)
            print faceInCenter_count
            print ("{} in the center for {} times".format("Face as been", (faceInCenter_count))  )

            #We track when the head is in the center for a certain period of time and there is not head motion activated
            if(faceInCenter_count<5 and motion_start_time == None):
                Tracking = True
            else:
                Tracking = False

            #Start tracking
            if Tracking:
                print("Tracking the Person")
                #TODO: converts 2d coordinates to 3d coordinates on camera axis
                (x,y,z) = camera.convert2d_3d(tracked_face_X, tracked_face_y)
                #TODO: converts 3d coordinates on camera axis to 3d coordinates on robot axis
                (x,y,z) = camera.convert3d_3d(x,y,z)
                #TODO: move robot to track your face
                robot.lookatpoint(x,y,z, 1)

            #When tracking is turned off, estimate the head pose and perform head motion if conditions meet
            elif Tracking is False:
                print "Stopped Tracking, Starting Head Pose Estimation"
                # yaw is angle of face on z-axis
                yaw = rotation_vector[2]

                #Condition for user looking towards the right
                if (yaw > 0.3 and motion_start_time == None):
                    print ('You are looking towards the right.')
                    #TODO: Remember the current position
                    current_position = robot.getPosition()
                    current_pan = current_position[0]
                    current_tilt = current_position[1]
                    print "Starting head motion to look right"
                    #TODO: add motion for looking right
                    robot.move(0.8,0.5)
                    motion_start_time = current_time()

                #Condition for user looking towards the left
                elif (yaw < -0.3 and motion_start_time == None):
                    print ('You are looking towards the left.')
                    #TODO: Remember the current position
                    current_position = robot.getPosition()
                    current_pan = current_position[0]
                    current_tilt = current_position[1]
                    print "Starting head motion to look left"
                    #TODO: add motion for looking left
                    robot.move(-0.8,0.5)
                    motion_start_time = current_time()

        #When head motion is activated we start the counter
        if(motion_start_time != None):
            print ("{} and its been {} seconds".format("Look motion activated ", (current_time()-motion_start_time))  )

        #After 3 seconds, we have to return to the current position
        if(motion_start_time != None and ((current_time()-motion_start_time) > 3) ):
            #Looking at the position that is stored.
            print "Robot is going back "
            #TODO: make the robot move to the stored current position
            robot.move(current_pan, current_tilt)
            motion_start_time = None
            #Tracking = True

        #Start tracking again
        if (cnt>10 and motion_start_time == None):
            Tracking = True
            cnt = cnt+1
        sleep(0.08)

        #show image
        cv2.imshow("Frame", img[...,::-1])
        #Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
コード例 #24
0
class BallTrackingSystem:
    def __init__(self):
        self.initParam()
        self.pan_speed = 0
        self.tilt_speed = 0

        # init
        self.robot = Robot()
        self.ball_detector = BallDetector()
        self.camera = Camera(self.imageCallback)

        # move robot head to center
        self.robot.center()

        # load timer
        rospy.Timer(rospy.Duration(1.0 / self.rate), self.robotUpdate)

    def robotUpdate(self, event):
        # self.robot.test()
        self.robot.move(self.pan_speed, self.tilt_speed)

    def imageCallback(self, frame):
        results = self.ball_detector.detect(frame, 640)
        frame = results[0]
        if results[1] != None:
            self.set_joint_cmd(results)
        return frame

    def set_joint_cmd(self, msg):

        ball = msg[1]
        frame = msg[0]
        (image_height, image_width) = frame.shape[:2]

        target_offset_x = ball[0] - image_width / 2
        target_offset_y = ball[1] - image_height / 2

        try:
            percent_offset_x = float(target_offset_x) / (float(image_width) /
                                                         2.0)
            percent_offset_y = float(target_offset_y) / (float(image_height) /
                                                         2.0)
        except:
            percent_offset_x = 0
            percent_offset_y = 0

        if abs(percent_offset_x) > self.pan_threshold:
            if target_offset_x > 0:
                self.pan_speed = min(
                    self.max_joint_speed,
                    max(0, self.gain_pan * abs(percent_offset_x)))
            else:
                self.pan_speed = -1 * min(
                    self.max_joint_speed,
                    max(0, self.gain_pan * abs(percent_offset_x)))
        else:
            self.pan_speed = 0

        if abs(percent_offset_y) > self.tilt_threshold:
            if target_offset_y > 0:
                self.tilt_speed = min(
                    self.max_joint_speed,
                    max(0, self.gain_tilt * abs(percent_offset_y)))
            else:
                self.tilt_speed = -1 * min(
                    self.max_joint_speed,
                    max(0, self.gain_tilt * abs(percent_offset_y)))
        else:
            self.tilt_speed = 0

    def initParam(self):
        self.rate = rospy.get_param("~rate", 20)

        # Joint speeds are given in radians per second
        self.max_joint_speed = rospy.get_param('~max_joint_speed', 0.1)

        # The pan/tilt thresholds indicate what percentage of the image window
        # the ROI needs to be off-center before we make a movement
        self.pan_threshold = rospy.get_param("~pan_threshold", 0.05)
        self.tilt_threshold = rospy.get_param("~tilt_threshold", 0.05)

        # The gain_pan and gain_tilt parameter determine how responsive the
        # servo movements are. If these are set too high, oscillation can result.
        self.gain_pan = rospy.get_param("~gain_pan", 1.0)
        self.gain_tilt = rospy.get_param("~gain_tilt", 1.0)

        self.max_pan_angle_radian = rospy.get_param("~max_pan_angle_radian",
                                                    1.0)
        self.max_tilt_angle_radian = rospy.get_param("~max_tilt_angle_radian",
                                                     1.0)
コード例 #25
0
    def __init__(self):

        self.robot = Robot()
コード例 #26
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    robot.move(0,0.3)


    # the time when motion runs
    motion_start_time = None

    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img,(det.left(), det.top()), (det.right(), det.bottom()), color_green, 3)

        # We only use 1 face to estimate pose
        if(len(dets)>0):
            # Estimate pose
            (success, rotation_vector, translation_vector, image_points) = face_detector.estimate_pose(img, dets[0])
            # Draw pose
            img = face_detector.draw_pose(img, rotation_vector, translation_vector, image_points)

            #The yaw value is the 2nd value in the rotation_vector
            print rotation_vector
            yaw = rotation_vector[2]

            #prints the current position
            print ("Pan angle is ",robot.getPosition()[0], "Tilt angle is", robot.getPosition()[1])

            #condition when the user is looking right
            if (yaw > 0.3 and motion_start_time == None):
                print ('You are looking at right.')
                #TODO: store the current position in current_pan and current_tilt
                current_pos = robot.getPosition()
                current_pan =
                current_tilt =

                #TODO: add motion for looking right
                robot.move(,)

                motion_start_time = current_time()

            #condition when the user is looking left
            elif (yaw < -0.3 and motion_start_time == None):
                    print ('You are looking at left.')
                    #TODO: store the current position in current_pan and current_tilt
                    current_pos = robot.getPosition()
                    current_pan =
                    current_tilt =

                    #TODO: add motion for looking at left
                    robot.move(,)

                    motion_start_time = current_time()
            if(motion_start_time !=None):
                print current_time()- motion_start_time

        # After the motion runs, check if 3 seconds have passed.
        if(motion_start_time != None and current_time()-motion_start_time > 3 ):
            #TODO: move the robot so that it returns to the stored current position
            robot.move(, )
            motion_start_time = None

        sleep(0.05)
        # Show image
        cv2.imshow("Frame", img[...,::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
コード例 #27
0
class KeyboardController():
    def __init__(self):
        self.robot = Robot()
        self.current_angles = [0 for i in range(NUM_JOINTS)]
        self.target_angles = [0 for i in range(NUM_JOINTS)]
        self.target_end_effector_state = EndEffectorStateList.IDLE
        self.selected = UD2  # default select JOINT2
        self.shutdown = False

    def setup(self):
        self.__load_params()
        self.__setup_robot()

    def start(self):
        print(msg)

        feedback_thread = threading.Thread(name='feedback',
                                           target=self.__feedback)
        feedback_thread.setDaemon(True)
        feedback_thread.start()

        listen_key_thread = threading.Thread(name='listen_key',
                                             target=self.__listen_key)
        listen_key_thread.setDaemon(True)
        listen_key_thread.start()

        try:
            while not self.shutdown:
                if not operator.eq(self.current_angles, self.target_angles):
                    self.robot.set_joint_angles(self.target_angles)

                if self.target_end_effector_state != EndEffectorStateList.IDLE:
                    self.robot.set_end_effector_state(
                        self.target_end_effector_state)

                self.target_end_effector_state = EndEffectorStateList.IDLE
                self.cmd = ''  # clear command

        except Exception as e:
            print(e)

        finally:
            shutdown()

    def __load_params(self):
        # get parameters from server
        self.param_arm_model = rospy.get_param('~arm_model', 'unspecified')
        self.param_port = rospy.get_param('~port', '/dev/ttyUSB0')
        self.param_baudrate = rospy.get_param('~baudrate', 9600)

        if self.param_arm_model == 'unspecified':
            print('*** Error: Robot arm model not specified.')
            quit()

    def __setup_robot(self):
        # initialize robot
        if not self.robot.configure(name=self.param_arm_model,
                                    port=self.param_port,
                                    baudrate=self.param_baudrate):
            shutdown()

        if not self.robot.initialize():
            shutdown()

        if not self.robot.start():
            shutdown()

        print("Wait for robot to reset.")
        sleep(ROBOT_INIT_TIME)

    def __feedback(self):
        t = now()
        while (1):
            if (now() - t) > UPDATE_INTERVAL:
                current_angles = self.robot.get_joint_angles()
                if current_angles:
                    for i in range(NUM_JOINTS):
                        self.current_angles[i] = current_angles[i]
                t = now()

    def __listen_key(self):
        try:
            while (1):
                key = getKey()
                if key == '\x03':
                    break

                if key in 'wasd':
                    if key == 'a':  # move left
                        if self.target_angles[LR] > self.robot.robot_config(
                        )['Joints'][LR]['params']['min_angle']:
                            self.target_angles[LR] -= 1
                    elif key == 'd':  # move right
                        if self.target_angles[LR] < self.robot.robot_config(
                        )['Joints'][LR]['params']['max_angle']:
                            self.target_angles[LR] += 1
                    elif key == 'w':  # move up
                        if self.target_angles[
                                self.selected] > self.robot.robot_config(
                                )['Joints'][
                                    self.selected]['params']['min_angle']:
                            self.target_angles[self.selected] -= 1
                    elif key == 's':  # move down
                        if self.target_angles[
                                self.selected] < self.robot.robot_config(
                                )['Joints'][
                                    self.selected]['params']['max_angle']:
                            self.target_angles[self.selected] += 1
                elif key == 'u':
                    self.selected = UD1
                    print('**** JOINT 1 selected.')
                elif key == 'i':
                    self.selected = UD2
                    print('**** JOINT 2 selected.')
                elif key == 'j':
                    self.target_end_effector_state = EndEffectorStateList.GRIP
                elif key == 'k':
                    self.target_end_effector_state = EndEffectorStateList.RELEASE
                else:
                    continue

        except Exception as e:
            print(e)

        finally:
            self.shutdown = True
コード例 #28
0
ファイル: armMaster.py プロジェクト: unirlm/apx_robot_arm
class Application(object):
    def __init__(self):
        self.root = tk.Tk()
        self.root.geometry(WINDOW_SIZE_1)
        self.root.title("ArmMaster - OPENARM control console")
        self.root.resizable(0, 0)
        sysstr = platform.system()
        if sysstr == "Windows":
            self.root.iconbitmap(bitmap='res/logo.ico')
        elif sysstr == "Linux":
            self.root.iconbitmap(bitmap='@res/logo.xbm')

        self.init_gui()

    def init_gui(self):
        self.setting_frame = Frame(self.root)
        
        self.robot = Robot()
        Label(self.setting_frame, text="类型").grid(row=0, column=0, pady=20, sticky=W)
        self.robot_model = ttk.Combobox(self.setting_frame, values=MODEL_LIST)
        self.robot_model.current(0)
        self.robot_model.grid(row=0, column=1, pady=20)

        self.port = StringVar(value="/dev/tty")
        Label(self.setting_frame, text="端口").grid(row=1, column=0, sticky=W)
        self.port_entry = Entry(self.setting_frame, textvariable=self.port)
        self.port_entry.grid(row=1, column=1, sticky=W+E)

        self.baudrate = StringVar(value="9600")
        Label(self.setting_frame, text="波特率").grid(row=2, column=0, pady=5, sticky=W)
        self.baudrate_entry = Entry(self.setting_frame, textvariable=self.baudrate)
        self.baudrate_entry.grid(row=2, column=1, pady=5, sticky=W+E)

        Button(self.setting_frame, text="连接", width=10, command=self.start_main).grid(row=3, column=1, pady=10, sticky=E)
        
        self.setting_frame.pack()

    def start_main(self):
        if self.robot_model.current() == -1:
            return

        model = self.robot_model.get()
        port = self.port.get()
        baudrate = int(self.baudrate.get())

        print("Configuring robot ...")
        if not self.robot.configure(model, port, baudrate):
            return
        
        print("Initializing robot ...")
        if not self.robot.initialize():
            return
        
        sleep(2)
        print("Loading ...")
        self.robot.start()

        config_file = 'config/'+model+'.yaml'
        with open(config_file) as f:
            conf = yaml.safe_load(f)
            f.close()

        self.joints = conf['Joints']
        self.num_joints = len(self.joints)
        self.joint_scales = []
        self.joint_angles = [IntVar() for i in range(self.num_joints)]
        self.current_angles = [IntVar() for i in range(self.num_joints)]
        self.joint_progresses = []
        self.joint_feedback = []

        self.main_frame = Frame(self.root)

    # control container
        self.control_container = Frame(self.main_frame)
        # joint scales
        for i in range(self.num_joints):
            Label(self.control_container, text=self.joints[i]["name"]).grid(row=i, column=0, padx=10, pady=10)
            sc = Scale(self.control_container, orient=HORIZONTAL, sliderlength=10, resolution=1, \
                            from_=self.joints[i]['params']['min_angle'], \
                                to=self.joints[i]['params']['max_angle'], \
                                    variable=self.joint_angles[i])
            sc.grid(row=i, column=1, pady=10)
            sc.set(self.joints[i]['params']['norm'])
            self.joint_scales.append(sc)
        
        # set button
        Button(self.control_container, text="设置", activebackground='grey', command=self.set_joints).grid(row=6, column=0, pady=20, sticky=W+E+N+S)
        Button(self.control_container, text="重置", activebackground='grey', command=self.reset_joints).grid(row=6, column=1, pady=20, sticky=W+E+N+S)
        
        self.control_container.grid(row=0, column=0, rowspan=6, columnspan=2, padx=10, sticky=W+E+N+S)

    # monitor container
        self.monitor_contrainer = Frame(self.main_frame)
        # current angles info
        Label(self.monitor_contrainer, text='当前关节角度: ').grid(row=0, column=1)
        for i in range(self.num_joints):
            pb = ttk.Progressbar(self.monitor_contrainer, orient=HORIZONTAL, maximum=180, length=300, mode='determinate', variable=self.current_angles[i])
            pb.grid(row=i+1, column=1, pady=20)
            self.joint_progresses.append(pb)
            lb = Label(self.monitor_contrainer, width=10, textvariable=self.current_angles[i])
            lb.grid(row=i+1, column=2, pady=20)
            self.joint_feedback.append(lb)

        self.monitor_contrainer.grid(row=0, column=2, rowspan=6, columnspan=3, padx=30, sticky=W+E+N+S)

        self.setting_frame.pack_forget()
        self.root.geometry(WINDOW_SIZE_2)
        self.main_frame.pack()

    # monitor thread
        self.monit_loop = threading.Thread(name='monitor', target=self.monitor)
        self.monit_loop.setDaemon(True)
        self.monit_loop.start()

    def loop(self):
        self.root.mainloop()
    
    def init_robot(self):
        self.robot.configure()
        self.robot.initialize()
        sleep(2)
        self.robot.start()

    def set_joints(self):
        angles = []
        for i in range(self.num_joints):
            angles.append(self.joint_angles[i].get())
        self.robot.set_joint_angles(angles)

    def reset_joints(self):
        angles = []
        for i in range(self.num_joints):
            angle = self.joints[i]['params']['norm']
            self.joint_angles[i].set(angle)
            angles.append(angle)
        self.robot.set_joint_angles(angles) 

    def monitor(self):
        t = now()
        while True:
            if (now()- t)>UPDATE_INTERVAL:
                current_angles = self.robot.get_joint_angles()
                if current_angles:
                    for i in range(self.num_joints):
                        self.current_angles[i].set(current_angles[i])
                    t = now()