def __init__(self):

        # This checks if the head camera is open.  If it isn't, this will
        # close the left hand camera.
        try:
            CameraController('head_camera')
        except AttributeError:
            left_cam = CameraController('left_hand_camera')
            left_cam.close()

        head_cam = CameraController('head_camera')
        head_cam.resolution = (1280, 800)
        head_cam.open()

        # Creating the head variable to mess with later
        self.head = Head()
        self.head.set_pan(angle=0.)

        # Field of view
        self.CENTER_X = int(1280 / 2)
        self.FOV = pi / 3

        # Range away from center you want the arm to stop within
        self.FACE_RANGE = 50

        self.FACE_CASCADE = cv2.CascadeClassifier(
            'src/baxter_face_tracking_demos/src' +
            '/haarcascade_frontalface_default.xml')
        self.EYE_CASCADE = cv2.CascadeClassifier(
            'src/baxter_face_tracking_demos/src/haarcascade_eye.xml')

        self.subscription = rospy.Subscriber('/cameras/head_camera/image',
                                             Image, self.send)
        rospy.on_shutdown(self.leave_subs_n_pubs)
def track():
    def leave_subs_n_pubs():
        subscription.unregister()
        right_hand_pub.unregister()
        display_pub.unregister()

    # Initializing nodes
    rospy.init_node('wave_for_faces', anonymous=True)

    # Defining global publishers
    global display_pub
    display_pub = rospy.Publisher('/robot/xdisplay', Image, queue_size=0)
    global right_hand_pub
    right_hand_pub = rospy.Publisher('/ein/right/forth_commands',
                                     String,
                                     queue_size=0)
    # Needs time to start up
    sleep(3)
    right_hand_pub.publish(String('goHome'))
    # Needs time to go home
    sleep(3)

    # Setting and opening camera
    global camera_name
    camera_name = 'head_camera'
    head_cam = CameraController(camera_name)
    head_cam.resolution = (1280, 800)
    head_cam.open()

    rospy.on_shutdown(leave_subs_n_pubs)

    # Setting subscription
    subscription = rospy.Subscriber('/cameras/' + camera_name + '/image',
                                    Image, send)
    rospy.spin()
Esempio n. 3
0
    def __init__(self, usegripper = True):
        rospy.init_node("Baxter")
        threading.Thread(target=self.__jointtraj_server).start()
        self.trajrgt = Trajectory("right")
        self.trajlft = Trajectory("left")
        if usegripper:
            self.__right_hand = Gripper("right")
            self.__left_hand = Gripper("left")
            self.__right_hand.calibrate()
            self.__left_hand.calibrate()

        self.__right_limb = Limb("right")
        self.__left_limb = Limb("left")
        self.baxter = RobotEnable()
        self.__enabled = self.baxter.state().enabled
        self.enable_baxter()

        self.img = None

        for camname in ['head_camera', 'left_hand_camera', 'right_hand_camera']:
            try:
                cam = CameraController(camname)
                # cam.resolution = (1280, 800)
                cam.resolution = (320, 200)
            except:
                pass

        print("baxter opened")
def set_camera(camera_name, res):
    print "setting", camera_name
    camera = CameraController(camera_name)
    # camera.resolution = (1280,800)
    camera.resolution = res
    camera.exposure = 50
    camera.gain = 30  #left_hand_camera.gain = CameraController.CONTROL_AUTO
    camera.white_balance_red = CameraController.CONTROL_AUTO
    camera.white_balance_green = CameraController.CONTROL_AUTO
    camera.white_balance_blue = CameraController.CONTROL_AUTO
    camera.open()
    print "done", camera_name
Esempio n. 5
0
    def __init__(self, arm, starting_pose):
        # Range away from center you want the arm to stop within
        self.FACE_RANGE = 50
        # Denominator for movement, when rotational.
        self.MOVEMENT_DENOMINATOR = 24

        self.face_count = 0

        # Starts at the end point so that it will see a new face automatically
        self.face_time_counter = 80

        rospy.on_shutdown(self.leave_subs_n_pubs)

        # Finding center point
        desired_resolution = (640, 400)

        # If the arm's camera is not available, the other arm's camera will be
        # closed.
        try:
            CameraController(arm + '_hand_camera')
        except AttributeError:
            if arm == 'right':
                left_cam = CameraController('left_hand_camera')
                left_cam.close()
            elif arm == 'left':
                right_cam = CameraController('right_hand_camera')
                right_cam.close()

        cam = CameraController(arm + '_hand_camera')
        cam.resolution = desired_resolution
        cam.open()

        # Getting the center of the camera
        (x, y) = desired_resolution
        self.CENTER_X = x / 2
        self.CENTER_Y = y / 2

        # Position of last face seen.  Initialized as something impossible.
        self.last_face_x = -maxint
        self.last_face_y = -maxint

        self.FACE_CASCADE = cv2.CascadeClassifier(
            'src/baxter_face_tracking_demos/src' +
            '/haarcascade_frontalface_default.xml')
        self.EYE_CASCADE = cv2.CascadeClassifier(
            'src/baxter_face_tracking_demos/src/haarcascade_eye.xml')

        self.cam_sub = rospy.Subscriber(
            '/cameras/' + arm + '_hand_camera/image', Image, self.follow)
        self.display_pub = rospy.Publisher('/robot/xdisplay',
                                           Image,
                                           queue_size=0)
        self.hand_pub = rospy.Publisher('/ein/' + arm + '/forth_commands',
                                        String,
                                        queue_size=0)
        sleep(3)
        # Assume starting position
        self.hand_pub.publish(
            String(starting_pose + ' createEEPose moveEeToPoseWord'))
        # It takes time to get there
        sleep(1)

        # Setting good camera settings
        exposure_and_gain = '65 40'
        self.hand_pub.publish(
            String(exposure_and_gain + ' 1024 1024 2048 fixCameraLighting'))
Esempio n. 6
0
#!/usr/bin/env python

from baxter_interface import CameraController

if __name__ == '__main__':
    camera = CameraController("left_hand_camera")
    camera.open()
    res = (1280, 800)
    camera.resolution = res
    camera.exposure = camera.CONTROL_AUTO
    camera.fps = 1
    camera.gain = camera.CONTROL_AUTO
Esempio n. 7
0
              ([25, 146, 190], [62, 174, 250]), ([103, 86,
                                                  65], [145, 133, 128])]
low1 = [10, 10, 70]
up1 = [70, 70, 205]  #190
lower1 = np.array(low1, dtype="uint8")
upper1 = np.array(up1, dtype="uint8")
low2 = [10, 10, 125]
up2 = [120, 120, 205]  #190
lower2 = np.array(low2, dtype="uint8")
upper2 = np.array(up2, dtype="uint8")
head_camera = CameraController('head_camera')
#right_camera.close()
print("closed right")
left_camera = CameraController('left_hand_camera')
head_camera.open()
left_camera.resolution = (640, 400)
left_camera.open()
print("camera open")

camera_image = None
cv2.namedWindow('image', cv2.WINDOW_NORMAL)


def get_img(msg):
    global camera_image
    camera_image = msg_to_cv(msg)
    mask = cv2.inRange(camera_image, lower1, upper1)
    mask2 = cv2.inRange(camera_image, lower2, upper2)
    mask3 = cv2.bitwise_or(mask, mask2)
    blur = cv2.GaussianBlur(camera_image, (5, 5), 0)
    output = cv2.bitwise_and(camera_image, camera_image, mask=mask3)
Esempio n. 8
0
def main():
    """
    Main Script
    """
    # Setting up head camera
    head_camera = CameraController("left_hand_camera")
    head_camera.resolution = (1280, 800)
    head_camera.open()

    print("setting balance")
    happy = False

    while not happy:

        e = head_camera.exposure
        print("exposue value: " + str(e))

        e_val = int(input("new exposure value"))

        head_camera.exposure = e_val
        satisfaction = str(raw_input("satified? y/n"))
        happy = 'y' == satisfaction


    planner = PathPlanner("right_arm")
    listener = tf.TransformListener()
    grip = Gripper('right')


    grip.calibrate()
    rospy.sleep(3)
    grip.open()


    # creating the table obstacle so that Baxter doesn't hit it
    table_size = np.array([.5, 1, 10])
    table_pose = PoseStamped()
    table_pose.header.frame_id = "base"
    thickness = 1

    table_pose.pose.position.x = .9
    table_pose.pose.position.y = 0.0
    table_pose.pose.position.z = -.112 - thickness / 2
    table_size = np.array([.5, 1, thickness])

    planner.add_box_obstacle(table_size, "table", table_pose)


    raw_input("gripper close")
    grip.close()


    # ###load cup positions found using cup_detection.py ran in virtual environment
    # start_pos_xy = np.load(POURING_CUP_PATH)
    # goal_pos_xy = np.load(RECEIVING_CUP_PATH)

    # start_pos = np.append(start_pos_xy, OBJECT_HEIGHT - GRABBING_OFFSET)
    # goal_pos = np.append(start_pos_xy, OBJECT_HEIGHT - GRABBING_OFFSET)
    # #### END LOADING CUP POSITIONS

    camera_subscriber = rospy.Subscriber("cameras/left_hand_camera/image", Image, get_img)


    Kp = 0.1 * np.array([0.3, 2, 1, 1.5, 2, 2, 3]) # Stolen from 106B Students
    Kd = 0.01 * np.array([2, 1, 2, 0.5, 0.5, 0.5, 0.5]) # Stolen from 106B Students
    Ki = 0.01 * np.array([1, 1, 1, 1, 1, 1, 1]) # Untuned
    Kw = np.array([0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]) # Untuned
    cam_pos= [0.188, -0.012, 0.750]

    ##
    ## Add the obstacle to the planning scene here
    ##

    # Create a path constraint for the arm
    orien_const = OrientationConstraint()
    orien_const.link_name = "right_gripper";
    orien_const.header.frame_id = "base";
    orien_const.orientation.y = -1.0;
    orien_const.absolute_x_axis_tolerance = 0.1;
    orien_const.absolute_y_axis_tolerance = 0.1;
    orien_const.absolute_z_axis_tolerance = 0.1;
    orien_const.weight = 1.0;
    horizontal = getQuaternion(np.array([0,1,0]), np.pi)

    z_rot_pos = getQuaternion(np.array([0,0,1]), np.pi / 2)

    orig = quatMult(z_rot_pos, horizontal)
    orig = getQuaternion(np.array([0,1,0]), np.pi / 2)

    #IN THE VIEW OF THE CAMERA
    #CORNER1--------->CORNER2
    #   |                |
    #   |                |
    #   |                |
    #CORNER3 ------------|
    width = 0.3
    length = 0.6
    CORNER1 =  np.array([0.799, -0.524, -0.03])
    CORNER2 = CORNER1 + np.array([-width, 0, 0])
    CORNER3 = CORNER1 + np.array([0, length, 0])

    #CREATE THE GRID
    dir1 = CORNER2 - CORNER1
    dir2 = CORNER3 - CORNER1

    grid_vals = []
    ret_vals = []

    for i in range(0, 3):
        for j in range(0, 4):
            grid = CORNER1 + i * dir1 / 2 + j * dir2 / 3
            grid_vals.append(grid) 

            ret_vals.append(np.array([grid[0], grid[1], OBJECT_HEIGHT]))

    i = -1
    while not rospy.is_shutdown():
        for g in grid_vals:
            i += 1
            while not rospy.is_shutdown():
                try:
                    goal_1 = PoseStamped()
                    goal_1.header.frame_id = "base"

                    #x, y, and z position
                    goal_1.pose.position.x = g[0]
                    goal_1.pose.position.y = g[1]
                    goal_1.pose.position.z = g[2]

                    y = - g[1] + cam_pos[1]
                    x = g[0] - cam_pos[0]
                    q = orig

                    #Orientation as a quaternion

                    goal_1.pose.orientation.x = q[1][0]
                    goal_1.pose.orientation.y = q[1][1]
                    goal_1.pose.orientation.z = q[1][2]
                    goal_1.pose.orientation.w = q[0]
                    plan = planner.plan_to_pose(goal_1, [])

                    if planner.execute_plan(plan):
                    # raise Exception("Execution failed")
                        rospy.sleep(1)
                        
                        #grip.open()
                        raw_input("open")
                        rospy.sleep(1)
                        # plt.imshow(camera_image)
                        print("yay: " + str(i))
                       
                        pos = listener.lookupTransform("/reference/right_gripper", "/reference/base", rospy.Time(0))[0]
                        print("move succesfully to " + str(pos))
                        fname = os.path.join(IMAGE_DIR, "calib_{}.jpg".format(i))
                        skimage.io.imsave(fname, camera_image)


                    print(e)
                    print("index: ", i)
                else:
                    break

        print(np.array(ret_vals))
        # save the positions of the gripper so that the homography matrix can be calculated
        np.save(POINTS_DIR, np.array(ret_vals))
        print(np.load(POINTS_DIR))
        break