コード例 #1
0
    def __init__(self, simulation=True):
        """Initializes various aspects of the Fetch.
        
        TODOs: get things working, also use `simulation` flag to change ROS
        topic names if necessary (especially for the cameras!). UPDATE: actually
        I don't think this is necessary now, they have the same topic names.
        """
        rospy.init_node("fetch")
        self.arm = Arm()
        self.arm_joints = ArmJoints()
        self.base = Base()
        self.camera = RGBD()
        self.head = Head()
        self.gripper = Gripper(self.camera)
        self.torso = Torso()
        self.joint_reader = JointStateReader()

        # Tucked arm starting joint angle configuration
        self.names = ArmJoints().names()
        self.tucked = [1.3200, 1.3999, -0.1998, 1.7199, 0.0, 1.6600, 0.0]
        self.tucked_list = [(x, y) for (x, y) in zip(self.names, self.tucked)]

        # Initial (x,y,yaw) position of the robot wrt map origin. We keep this
        # fixed so that we can reset to this position as needed. The HSR's
        # `omni_base.pose` (i.e., the start pose) returns (x,y,yaw) where yaw is
        # the rotation about that axis (intuitively, the z axis). For the base,
        # `base.odom` supplies both `position` and `orientation` attributes.
        start = copy.deepcopy(self.base.odom.position)
        yaw = Base._yaw_from_quaternion(self.base.odom.orientation)
        self.start_pose = np.array([start.x, start.y, yaw])
        self.TURN_SPEED = 0.3

        self.num_restarts = 0
コード例 #2
0
ファイル: skeleton.py プロジェクト: robvcc/cloth-manipulation
    def __init__(self, simulation=True):
        """Initializes various aspects of the Fetch."""
        rospy.init_node("fetch")
        rospy.loginfo("initializing the Fetch...")
        self.arm = Arm()
        self.arm_joints = ArmJoints()
        self.base = Base()
        self.camera = RGBD()
        self.head = Head()
        self.gripper = Gripper(self.camera)
        self.torso = Torso()
        self.joint_reader = JointStateReader()

        # Tucked arm starting joint angle configuration
        self.names = ArmJoints().names()
        self.tucked = [1.3200, 1.3999, -0.1998, 1.7199, 0.0, 1.6600, 0.0]
        self.tucked_list = [(x,y) for (x,y) in zip(self.names, self.tucked)]

        # Initial (x,y,yaw) position of the robot wrt map origin. We keep this
        # fixed so that we can reset to this position as needed. The HSR's
        # `omni_base.pose` (i.e., the start pose) returns (x,y,yaw) where yaw is
        # the rotation about that axis (intuitively, the z axis). For the base,
        # `base.odom` supplies both `position` and `orientation` attributes.
        start = copy.deepcopy(self.base.odom.position)
        yaw = Base._yaw_from_quaternion(self.base.odom.orientation)
        self.start_pose = np.array([start.x, start.y, yaw])
        rospy.loginfo("...finished initialization!")
コード例 #3
0
 def __init__(self, service='/ur_driver/set_io'):
     self.gripper = Gripper(service)
     rospy.init_node('gripper_node', anonymous=True)
     self.subscriber = rospy.Subscriber('/gripper/command',
                                        String,
                                        self.callback,
                                        queue_size=1)
     self.time_wait = 0
     print('Created gripper_node')
     rospy.spin()
コード例 #4
0
ファイル: scoop.py プロジェクト: zi-w/Kitchen2D
    def check_legal(self, x):
        rel_x1, rel_y1, rel_x2, rel_y2, rel_x3, rel_y3, grasp_ratio, cw1, ch1 = x
        settings[0]['do_gui'] = self.do_gui
        kitchen = Kitchen2D(**settings[0])
        gripper = Gripper(kitchen, (5, 8), 0)
        cup = ks.make_cup(kitchen, (0, 0), 0, cw1, ch1, 0.5)
        spoon = ks.make_spoon(kitchen, (5, 10), 0, 0.2, 3, 1.)
        gripper.set_grasped(spoon, grasp_ratio, (5, 10), 0)
        dposa1, dposa2 = gripper.get_scoop_init_end_pose(
            cup, (rel_x1, rel_y1), (rel_x3, rel_y3))
        gripper.set_grasped(spoon, grasp_ratio, dposa1[:2], dposa1[2])
        g2 = gripper.simulate_itself()
        collision = g2.check_point_collision(dposa1[:2], dposa1[2])

        if collision:
            return False
        collision = g2.check_point_collision(dposa2[:2], dposa2[2])

        if collision:
            return False
        self.kitchen = kitchen
        self.gripper = gripper
        self.cup = cup
        self.spoon = spoon
        return True
コード例 #5
0
    def __init__(self):
        """ Initializes the environment. """

        # the state space (=observation space) are all possible depth images of the kinect camera
        if (self.flattenImage):
            self.observation_space = spaces.Box(
                low=0,
                high=255,
                shape=[self.imageHeight * self.imageWidth],
                dtype=np.uint8)
        else:
            self.observation_space = spaces.Box(
                low=0,
                high=255,
                shape=[self.imageHeight, self.imageWidth],
                dtype=np.uint8)

        boundaries_r = [self.gripperRadiusMin, self.gripperRadiusMax]
        boundaries_phi = [0, np.pi]

        low = np.array([boundaries_r[0], boundaries_phi[0]])
        high = np.array([boundaries_r[1], boundaries_phi[1]])
        self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)

        self.reward_range = (-np.inf, np.inf)

        self.seed()

        # create object for box (object to pick up)
        self.box = ObjectToPickUp(length=self.boxLength,
                                  width=self.boxWidth,
                                  height=self.boxHeight,
                                  gripperRadius=self.gripperRadiusMax)
        # create object for kinect
        self.kinect = Kinect(self.imageWidth,
                             self.imageHeight,
                             x=0.0,
                             y=0.0,
                             z=1.0)
        # create object for gripper
        self.gripper = Gripper(self.gripperDistance,
                               self.gripperWidth,
                               self.gripperHeight,
                               r=0.0,
                               phi=0.0)
コード例 #6
0
ファイル: fetch.py プロジェクト: AugustineLe0501/Fetch_Demo
    def __init__ (self):
        rospy.Subscriber("/head_camera/rgb/image_raw",Image,self.RGBImageCallback)
        rospy.Subscriber("/head_camera/depth_registered/image_raw",Image,self.DepthImageCallback)
        rospy.Subscriber("/ar_pose_marker",AlvarMarkers,self.GetArPosesCallBack)
        self.bridge = CvBridge()

        self.Arm = Arm()
        self.Gripper = Gripper()
        self.Head = Head()
        self.PoseProcessing = PoseProcessing()
コード例 #7
0
def make_body(kitchen, name, pose, args):
    '''
    A wrapper to create Box2d bodies based on their name, pose and size.
    '''
    if 'gripper' in name:
        from gripper import Gripper
        body = Gripper(kitchen, init_pos=pose[:2], init_angle=pose[2])
    elif 'cup' in name:
        body = make_cup(kitchen, pose[:2], pose[2], *args[name])
    elif 'spoon' in name:
        body = make_spoon(kitchen, pose[:2], pose[2], *args[name])
    elif 'stir' in name:
        body = make_stirrer(kitchen, pose[:2], pose[2], *args[name])
    elif 'block' in name:
        body = make_block(kitchen, pose[:2], pose[2], *args[name])
    else:
        raise NotImplementedError(name)
    body.name = name
    return body
コード例 #8
0
    def __init__(self):
        # create a RobotCommander
        self.robot = RobotCommander()

        # create a PlanningSceneInterface object
        self.scene = PlanningSceneInterface()

        # create arm
        self.arm = Arm("manipulator")

        # create gripper
        self.gripper = Gripper()
コード例 #9
0
ファイル: gripperberry.py プロジェクト: IzzyBrand/dro.ne
    def start(self):
        self.server = ServerInterface()
        # self.pixhawk = connect('/dev/cu.usbmodem1', baud = 115200, wait_ready=True) 	# for on mac via USB
        # self.pixhawk = connect('/dev/ttyS0', baud = 57600, wait_ready=True) 			# for on the raspberry PI via telem2
        # # self.pixhawk = connect('/dev/tty.usbserial-DA00BL49', baud = 57600)			# telem radio on mac
        # # self.pixhawk = connect('/dev/tty.SLAB_USBtoUART', baud = 57600)				# telem radio on mac
        # self.pixhawk.wait_ready(timeout=60)
        # self.pixhawk.commands.download()
        #self._log('Connected to pixhawk.')
        #self._prev_pixhawk_mode = ''
        self._prev_command = ''
        #self._arming_window_start = 0
        self._server_connect_timer = time.time()
        #self.current_action = 'idle'
        config_loaded = self._load_config()  # load info about the uid and auth
        online = True  # TODO: verify internet connection
        self.gripper = Gripper(18)  # set up the gripper
        self.button = Button(2)  # set up the button
        self.button.when_pressed = self.gripper.open
        self.button.when_released = self.gripper.close

        return config_loaded and online
コード例 #10
0
ファイル: world.py プロジェクト: pinhlin/environment
 def __init__(self):
     # PyGame Initialization
     pygame.display.init()
     self.clock = pygame.time.Clock()
     self.gui = Gui()
     self.gripper = Gripper()
     self.plan = []
     self.cur_plan = []
     self.cur_action = ''
     self.target_name = None
     self.beput_name = None
     self.action_status = True  #to check if the current action is done
     self.plan_status = True  #to check if the plan is done
コード例 #11
0
class GripperNode:
    def __init__(self, service='/ur_driver/set_io'):
        self.gripper = Gripper(service)
        rospy.init_node('gripper_node', anonymous=True)
        self.subscriber = rospy.Subscriber('/gripper/command',
                                           String,
                                           self.callback,
                                           queue_size=1)
        self.time_wait = 0
        print('Created gripper_node')
        rospy.spin()

    def callback(self, data):
        if data.data == 'open':
            self.gripper.open()
        elif data.data == 'close':
            self.gripper.close()
        elif data.data == 'softGrip':
            self.gripper.set_soft_grip()
        elif data.data == 'strongGrip':
            self.gripper.set_strong_grip()
コード例 #12
0
def main():
    gripper = Gripper()
    rospy.init_node('gripper_node', anonymous=True)
    rate = rospy.Rate(500)
    cmds = JointCommands()
    #    time_interval = 0.0
    #    vel_ref = np.array([0.0, 0.0])
    pos_ref = [0.0, 0.0]
    stiffness = [1.0, 1.0]

    while not rospy.is_shutdown():
        #        vel_ref[0] = -4.0 * np.sin(time_interval)
        #        vel_ref[1] = 0.0
        #        time_interval += 2e-3
        #        cmds.vel_sat = vel_ref
        cmds.pos_ref = pos_ref
        cmds.stiffness = stiffness
        gripper.gripperPub.publish(cmds)
    joint_cmds = JointCommands()
    time_interval = 0.0
    pos_ref = np.array([0.0, 0.0])
    vel_sat = np.array([3.0, 3.0])
    tau_sat = np.array([5.0, 5.0])
    stiffness = np.array([0.1, 0.1])
    freq_anti_alias = 500.0

    while not rospy.is_shutdown():
        time_interval += 2e-3
        pos_ref[0] = -1.0 * np.sin(time_interval)
        joint_cmds.pos_ref = pos_ref
        joint_cmds.vel_sat = vel_sat
        joint_cmds.tau_sat = tau_sat
        joint_cmds.stiffness = stiffness
        joint_cmds.freq_anti_alias = freq_anti_alias
        gripper.gripperPub.publish(joint_cmds)
        rospy.loginfo("pos_ref[0]: {}".format(joint_cmds.pos_ref[0]))
        rate.sleep()
コード例 #13
0
    def check_legal(self, x):
        grasp_ratio, rel_x, rel_y, dangle, cw1, ch1, cw2, ch2 = x
        dangle *= np.sign(rel_x)
        settings[0]['do_gui'] = self.do_gui
        kitchen = Kitchen2D(**settings[0])
        gripper = Gripper(kitchen, (5, 8), 0)
        cup1 = ks.make_cup(kitchen, (0, 0), 0, cw1, ch1, 0.5)
        cup2 = ks.make_cup(kitchen, (-15, 0), 0, cw2, ch2, 0.5)
        gripper.set_grasped(cup2, grasp_ratio, (-15, 0), 0)
        gripper.set_position((rel_x, rel_y), 0)
        if not kitchen.planning:
            g2 = gripper.simulate_itself()
            _, collision = g2.check_path_collision((rel_x, rel_y), 0,
                                                   (rel_x, rel_y), dangle)

            if collision:
                return False
        self.kitchen = kitchen
        self.gripper = gripper
        self.cup1 = cup1
        self.cup2 = cup2
        return True
コード例 #14
0
def createScene(rootNode):
    """This is my first scene"""
    MainHeader(rootNode, gravity=[0.0, -981.0, 0.0], plugins=["SoftRobots"])
    ContactHeader(rootNode,
                  alarmDistance=4,
                  contactDistance=3,
                  frictionCoef=0.08)

    Gripper(rootNode)

    Floor(rootNode,
          color=[1.0, 0.0, 0.0],
          translation=[0.0, -160.0, 0.0],
          isAStaticObject=True)

    Cube(rootNode,
         uniformScale=20.0,
         color=[1.0, 1.0, 0.0],
         totalMass=0.03,
         volume=20,
         inertiaMatrix=[1000.0, 0.0, 0.0, 0.0, 1000.0, 0.0, 0.0, 0.0, 1000.0],
         translation=[0.0, -130.0, 10.0])

    return rootNode
コード例 #15
0
    def check_legal(self, x):
        # for now, check always returns true
        # n_stirs are always 5, ignore it for now
        rel_pos1_x, rel_pos1_y, rel_pos2_x, rel_pos2_y, cup_w, cup_h, grasp_ratio = x

        # creates the Box2D world objects to execute this action
        kitchen = Kitchen2D(**settings[0])
        gripper = Gripper(kitchen, (5, 8), 0)
        cup = ks.make_cup(kitchen, (0, 0), 0, cup_w, cup_h, 0.5)

        stirrer = ks.make_stirrer(kitchen, (0, 3.5), 0., 0.2, 5., 0.5)
        gripper.set_grasped(stirrer, 0.8, (10, 10), 0)

        # stirrer = ks.make_stirrer(kitchen, (0.5,1.0), 0, 0.2, 5., 0.5)
        # print gripper.set_grasped(stirrer, 0.8, (0.5,1.0), 0)
        dposa1, dposa2 = gripper.get_stir_init_end_pose(
            cup, (rel_pos1_x, rel_pos1_y), (rel_pos2_x, rel_pos2_y))
        # print gripper.set_grasped(stirrer, grasp_ratio, dposa1[:2], dposa1[2])

        # seems that it almost never has collision
        g2 = gripper.simulate_itself()
        collision = g2.check_point_collision(
            dposa1[:2], dposa1[2]) and g2.check_point_collision(
                dposa2[:2], dposa2[2])
        if collision:
            return False

        # print {"rel_pos1_x": rel_pos1_x, "rel_pos1_y": rel_pos1_y, "rel_pos2_x" : rel_pos2_x, \
        # "rel_pos2_y": rel_pos2_y, "cup_w": cup_w, "cup_h": cup_h, "grasp_ratio": grasp_ratio, \
        # "dposa1": dposa1, "dposa2": dposa2}

        self.kitchen = kitchen
        self.gripper = gripper
        self.cup = cup
        self.stirrer = stirrer
        return True
コード例 #16
0
class SmartBotEnv(gym.Env):

    rewardSuccess = 500
    rewardFailure = 0
    rewardUnreachablePosition = -5  # TODO: do we need this anymore?
    # when set to True, the reward will be rewardSuccess if gripper could grasp the object, rewardFailure otherwise
    # when set to False, the reward will be calculated from the distance between the gripper & the position of success
    binaryReward = False

    # some algorithms (like the ddpg from /home/joel/Documents/gym-gazebo/examples/pincher_arm/smartbot_pincher_kinect_ddpg.py)
    # currently assume that the observation_space has shape (x,) instead of (220,300), so for those algorithms set this to True
    flattenImage = False

    state = np.array([])
    imageWidth = 320
    imageHeight = 160
    boxLength = 0.02
    boxWidth = 0.02
    boxHeight = 0.03
    gripperDistance = 0.032  # between the fingers
    gripperHeight = 0.035  # from base to finger tip
    gripperWidth = 0.03
    gripperRadiusMax = 0.2  # maximal distance between robot base and gripper on the floor
    gripperRadiusMin = 0.04  # minimal distance between robot base and gripper on the floor

    firstRender = True

    def __init__(self):
        """ Initializes the environment. """

        # the state space (=observation space) are all possible depth images of the kinect camera
        if (self.flattenImage):
            self.observation_space = spaces.Box(
                low=0,
                high=255,
                shape=[self.imageHeight * self.imageWidth],
                dtype=np.uint8)
        else:
            self.observation_space = spaces.Box(
                low=0,
                high=255,
                shape=[self.imageHeight, self.imageWidth],
                dtype=np.uint8)

        boundaries_r = [self.gripperRadiusMin, self.gripperRadiusMax]
        boundaries_phi = [0, np.pi]

        low = np.array([boundaries_r[0], boundaries_phi[0]])
        high = np.array([boundaries_r[1], boundaries_phi[1]])
        self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)

        self.reward_range = (-np.inf, np.inf)

        self.seed()

        # create object for box (object to pick up)
        self.box = ObjectToPickUp(length=self.boxLength,
                                  width=self.boxWidth,
                                  height=self.boxHeight,
                                  gripperRadius=self.gripperRadiusMax)
        # create object for kinect
        self.kinect = Kinect(self.imageWidth,
                             self.imageHeight,
                             x=0.0,
                             y=0.0,
                             z=1.0)
        # create object for gripper
        self.gripper = Gripper(self.gripperDistance,
                               self.gripperWidth,
                               self.gripperHeight,
                               r=0.0,
                               phi=0.0)

    # __init__

    def seed(self, seed=None):
        """ Seeds the environment (for replicating the pseudo-random processes). """
        self.np_random, seed = seeding.np_random(seed)
        return [seed]

    # seed

    def reset(self):
        """ Resets the state of the environment and returns an initial observation."""
        # place box
        self.box.place(randomPlacement=True)

        # for testing purposes
        # self.box.place(randomPlacement=False, x=0.0, y=0.1, phi=np.pi/2)

        # get depth image
        image = self.kinect.getImage(self.box,
                                     filter=False,
                                     flattenImage=self.flattenImage,
                                     saveImage=True)

        self.state = image
        return self.state

    # reset

    def close(self):
        """ Closes the environment and shuts down the simulation. """
        logging.info("closing SmartBotEnv")
        super(gym.Env, self).close()

    # close

    stepcount = 0
    winkel = np.linspace(0, np.pi)

    def step(self, action):
        """ Executes the action (i.e. moves the arm to the pose) and returns the reward and a new state (depth image). """

        # determine if gripper could grasp the ObjectToPickUp
        gripperR = action[0].astype(np.float64)
        gripperPhi = action[1].astype(np.float64)

        # # for testing purposes
        # gripperR = 0.1
        # gripperPhi = self.winkel[self.stepcount]
        # self.stepcount += 1

        self.gripper.place(gripperR, gripperPhi)

        logging.debug("moving arm to position: [{0} {1}]".format(
            gripperR, gripperPhi))
        logging.debug("box position: [{0} {1} {2}]".format(
            self.box.pos[0], self.box.pos[1], self.box.phi))

        reward, graspSuccess = self.calculateReward()
        logging.debug("received reward: " + str(reward))

        # re-place object to pick up if grasp was successful
        # if(graspSuccess):
        self.box.place(randomPlacement=True)

        # get depth image
        image = self.kinect.getImage(self.box,
                                     filter=False,
                                     flattenImage=self.flattenImage,
                                     saveImage=True)

        self.state = image
        done = True
        info = {}

        return self.state, reward, done, info

    # step

    def calculateReward(self):
        """ Calculates the reward for the current timestep, according to the gripper position and the pickup position. 
            A high reward is given if the gripper could grasp the box (pickup) if it would close the gripper. """

        # check if center of gravity of box is between the gripper fingers (i.e. inside the ag-bg-cg-dg polygon)
        # see e.g.: https://stackoverflow.com/a/23453678
        bbPath_gripper = mplPath.Path(
            np.array([
                self.gripper.a, self.gripper.b, self.gripper.c, self.gripper.d
            ]))
        # one finger is between a & b, the other finger is between c & d
        cogBetweenFingers = bbPath_gripper.contains_point(
            (self.box.pos[0], self.box.pos[1]))
        logging.debug("center of gravity is between the fingers: {}".format(
            cogBetweenFingers))

        # check if both gripper fingers don't intersect with the box
        bbPath_box = mplPath.Path(
            np.array([self.box.a, self.box.b, self.box.c, self.box.d]))
        bbPath_gripper_left = mplPath.Path(
            np.array([self.gripper.a, self.gripper.b]))
        bbPath_gripper_right = mplPath.Path(
            np.array([self.gripper.c, self.gripper.d]))
        leftGripperCrashes = bbPath_box.intersects_path(bbPath_gripper_left,
                                                        filled=True)
        rightGripperCrashes = bbPath_box.intersects_path(bbPath_gripper_right,
                                                         filled=True)
        logging.debug("left gripper crashes: {}".format(leftGripperCrashes))
        logging.debug("right gripper crashes: {}".format(rightGripperCrashes))

        # if the center of gravity of the box is between the gripper fingers and none of the fingers collide with the box, we are able to grasp the box
        if (cogBetweenFingers and not leftGripperCrashes
                and not rightGripperCrashes):
            logging.info(
                "********************************************* grasping would be successful! *********************************************"
            )
            graspSuccess = True
        else:
            graspSuccess = False

        if (self.binaryReward):
            if (graspSuccess):
                return self.rewardSuccess, graspSuccess
            else:
                return self.rewardFailure, graspSuccess
        else:
            # calculate reward according to the distance from the gripper to the center of gravity of the box
            distance = np.linalg.norm(self.gripper.pos -
                                      self.box.pos)  # e.g. 0.025
            # invert the distance, because smaller distance == closer to the goal == more reward
            reward = 1.0 / (2 * distance)
            # scale the reward if grasping would be successful
            if (graspSuccess):
                reward = 50 * reward
            # elif(leftGripperCrashes or rightGripperCrashes): # "punishement" for crashing into the box
            #     reward = reward / 5
            reward = min(reward, 1000)
            return reward, graspSuccess
        # if

    # calculateReward

    def render(self, mode='human'):
        if (self.firstRender):
            # display stuff
            plt.ion()
            self.fig, self.ax = plt.subplots()
            self.ax.axis("equal")
            self.ax.set_xlim([
                -self.box.gripperRadius - self.box.length - 0.2,
                self.box.gripperRadius + self.box.length + 0.2
            ])
            self.ax.set_ylim([
                0 - self.box.length - 0.2,
                self.box.gripperRadius + self.box.length + 0.2
            ])
            self.gripperLeftPoly = patches.Polygon(
                [self.gripper.a, self.gripper.b], closed=True, color="black")
            self.gripperRightPoly = patches.Polygon(
                [self.gripper.c, self.gripper.d], closed=True, color="black")
            self.pickMeUpPoly = patches.Polygon(
                [self.box.a, self.box.b, self.box.c, self.box.d],
                closed=True,
                color="red")
            self.ax.add_artist(self.gripperLeftPoly)
            self.ax.add_artist(self.gripperRightPoly)
            self.ax.add_artist(self.pickMeUpPoly)
            self.firstRender = False
        # if
        plt.ion()
        plt.cla()
        self.gripperLeftPoly.set_xy([self.gripper.a, self.gripper.b])
        self.gripperRightPoly.set_xy([self.gripper.c, self.gripper.d])
        self.pickMeUpPoly.set_xy(
            [self.box.a, self.box.b, self.box.c, self.box.d])
        self.ax.add_artist(self.gripperLeftPoly)
        self.ax.add_artist(self.gripperRightPoly)
        self.ax.add_artist(self.pickMeUpPoly)
        # self.fig.canvas.draw()
        plt.pause(0.0001)
        plt.draw()
コード例 #17
0
def main():
    gripper = Gripper()
    rospy.init_node('close', anonymous=True)

    gripper.close()
コード例 #18
0
def startMission():

    vrep.simxFinish(-1)

    clientID = vrep.simxStart('127.0.0.1', 19999, True, True, 5000, 5)
    while clientID <= -1:

        clientID = vrep.simxStart('127.0.0.1', 19999, True, True, 5000, 5)

    if clientID != -1:
        print('Successful !!!')

    else:
        print('connection not successful')
        #sys.exit('could not connect')

    print('connected to remote api server')

    #vrep.simxStartSimulation(clientID,vrep.simx_opmode_oneshot)
    sens = ProxSensor(clientID)
    lineSensors = IRSensors(clientID)
    imu = IMU(clientID)
    regulator = pidControl(0.006, 0.001, 0.001)
    cameraAreaControl = relle(10000, 10500, 0.03)
    cameraPosControl = relle(124, 126, 0.005)
    car = Robot(clientID, 0.04)
    camera = Cam(clientID)
    imuFilter = compliment()
    gripper = Gripper(clientID)

    step = 1
    velocity = 0.1

    crater1 = np.array([3, 4])
    crater2 = np.array([3, 7])

    def moveDir(vel, pos):
        dt = 0
        edgeS = False  #машинка на склоне
        edgeS2 = False  #машинка проехала склон

        position = 1
        while (position < pos):
            ts = vrep.simxGetLastCmdTime(clientID)

            camera.getCameraImage()
            sign, x, y, area = camera.trackObject('red')
            cameraPosControl.control(y)
            if area > 120 and area < 140:
                position = 3
            elif area > 180 and area < 200:
                position = 4
            elif area > 260 and area < 300:
                position = 5
            elif area > 450 and area < 550:
                position = 6
            elif area > 1300 and area < 1800:
                position = 7
            elif area > 2400 and area < 3000:
                position = 8

            if (sign):
                U1 = cameraPosControl.U

            else:
                U1 = 0.02

            car.move(-U1 - vel, +U1 - vel, 0, dt)
            print(area, position)
            #if(position>=pos and y<128 and y>122):
            #    break

            #sens.updateAllSensors()
            #regulator.pid(sens.lsVal-0.4,dt)
            #imu.upgrateIMU()
            #imuFilter.filter(imu.accelData,imu.gyroData,dt)
            #if(imuFilter.roll>0.2 and not lineSensors.getGreenColorSignal()):
            #    U=0
            #    if not edgeS2 and not edgeS:
            #        edgeS=True
            #else:
            #    #U=regulator.U
            #    U=0
            #if(imuFilter.roll<0.2 and edgeS):
            #    edgeS2=True
            #    car.position=0
            #    edgeS=False
            #if edgeS:
            #    car.position=0

            #car.move(-vel-U,-vel+U,0,dt)

            dt = (vrep.simxGetLastCmdTime(clientID) - ts) / 1000
        car.stop()
        car.position = 0

    def moveWhile(color):
        dt = 0.001
        lineSensors.updateLineSensors()
        lineSensors.getBlackColorSignal()
        lineSensors.getGreenColorSignal()
        signal = False
        while (not signal):
            ts = vrep.simxGetLastCmdTime(clientID)
            lineSensors.getBlackColorSignal()
            lineSensors.getGreenColorSignal()
            if color == 'green':
                signal = lineSensors.greenSignal
            elif color == 'black':
                signal = lineSensors.blackSignal
                #print(lineSensors.blackSignal )
            car.move(-velocity, -velocity, 0, dt)
            dt = (vrep.simxGetLastCmdTime(clientID) - ts) / 1000
        time.sleep(2)
        car.stop()

    def getBall():
        dt = 0.001
        camera.setCameraPosition(0, 0)
        while True:
            ts = vrep.simxGetLastCmdTime(clientID)
            camera.getCameraImage()
            sign, x, y, area = camera.trackObject('blue')

            cameraAreaControl.control(area)
            cameraPosControl.control(y)
            if (sign):
                U1 = cameraPosControl.U
                U2 = cameraAreaControl.U
                #print(y)
            else:
                U1 = 0.02
                U2 = 0
            car.move(-U2 - U1, -U2 + U1, 0, dt)
            #print(area)
            if (area > 6700 and y < 126 and y > 122):
                car.stop()
                gripper.giveBall()
                break

            camera.showStream()
            dt = (vrep.simxGetLastCmdTime(clientID) - ts) / 1000

    def searchDir(color):
        camera.setCameraPosition(30, 0.8)
        dt = 0.001
        while True:
            ts = vrep.simxGetLastCmdTime(clientID)
            camera.getCameraImage()
            sign, x, y, area = camera.trackObject(color)

            cameraAreaControl.control(area)
            cameraPosControl.control(y)
            if (sign):
                U1 = cameraPosControl.U
                U2 = cameraAreaControl.U

            else:
                U1 = 0.02
                U2 = 0
            car.move(-U2 - U1, -U2 + U1, 0, dt)
            #print(area)
            if (area > 1000 and y < 128 and y > 122):
                car.move(-velocity, -velocity, 0, dt)
                break

            camera.showStream()
            dt = (vrep.simxGetLastCmdTime(clientID) - ts) / 1000

    def colibrate():
        camera.setCameraPosition(30, 0.8)
        dt = 0.001
        while True:
            ts = vrep.simxGetLastCmdTime(clientID)
            camera.getCameraImage()
            sign, x, y, area = camera.trackObject('red')

            cameraAreaControl.control(area)
            cameraPosControl.control(y)
            if (sign):
                U1 = cameraPosControl.U
                U2 = cameraAreaControl.U

            else:
                U1 = 0.02
                U2 = 0
            car.move(-2 * U2 - U1, -2 * U2 + U1, 0, dt)
            #print(area)
            if (y < 128 and y > 122):
                car.stop()
                camera.destroyAllStream()
                break

            camera.showStream()
            dt = (vrep.simxGetLastCmdTime(clientID) - ts) / 1000

    def makeOperation(pos):
        colibrate()
        moveDir(velocity, pos)
        car.rotate(80)
        moveWhile('black')
        getBall()
        if pos > 5:
            searchDir('blue')
        searchDir('green')
        moveWhile('green')
        gripper.brokeBall()

    makeOperation(crater1[1])
    makeOperation(crater2[1])

    car.stop()
    camera.destroyAllStream()
    time.sleep(1)
    vrep.simxFinish(clientID)
コード例 #19
0
import time

from gripper import Gripper
from sensor_system import SensorSystem

gripper = Gripper()
sensors = SensorSystem()
gripper.move_clockwise()
print("moving")
while True:
    if gripper.check_dir() == 0:
        if sensors.detect_bottom():
            print("bottom")
            gripper.stop()
            time.sleep(2)
            if sensors.detect_ball():
                gripper.move_counter_clockwise()
                print("ball caught")
        if sensors.detect_max_pos_d():
            print("maxd")
            gripper.move_counter_clockwise()
    else:
        if sensors.detect_max_pos_u():
            print("maxu")
            gripper.stop()
            time.sleep(3)
            gripper.move_clockwise()
            print("moving")
    time.sleep(0.01)
コード例 #20
0
ファイル: simple_gui_good.py プロジェクト: jkurina/cse481
    def __init__(self, context):
        super(SimpleGUI, self).__init__(context)
        self.setObjectName('SimpleGUI')
        self._widget = QWidget()
        self._widget.setFixedSize(600, 600)
        self._sound_client = SoundClient()
        rospy.Subscriber('robotsound', SoundRequest, self.sound_cb)

        QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10))
        self.sound_sig.connect(self.sound_sig_cb)

        large_box = QtGui.QVBoxLayout()

        #Sound textbox
        sound_textbox = QtGui.QLineEdit("Squirtle Squirtle")  #Default Text
        sound_textbox.setFixedWidth(450)
        #Set a handle on the textbox to retrieve the text when button clicked
        self.sound_textbox = sound_textbox

        button_box = QtGui.QHBoxLayout()
        button_box.addItem(QtGui.QSpacerItem(15, 20))
        button_box.addWidget(self.create_button('Speak', self.command_cb))
        button_box.addWidget(sound_textbox)
        button_box.addStretch(1)
        large_box.addLayout(button_box)

        speech_box = QtGui.QHBoxLayout()
        speech_box.addItem(QtGui.QSpacerItem(15, 20))
        self.speech_label = QtGui.QLabel('Robot has not spoken yet.')
        palette = QtGui.QPalette()
        palette.setColor(QtGui.QPalette.Foreground, QtCore.Qt.blue)
        self.speech_label.setPalette(palette)
        speech_box.addWidget(self.speech_label)

        large_box.addLayout(speech_box)
        large_box.addStretch(1)

        #large_box.addItem(QtGui.QSpacerItem(50,20))

        up_head = Head(Head.UP)
        head_box = QtGui.QVBoxLayout()
        up_head_box = QtGui.QHBoxLayout()
        up_head_button = self.create_button('^', up_head.create_closure())
        #large_box.addWidget(up_head_button)
        down_head = Head(Head.DOWN)
        down_head_box = QtGui.QHBoxLayout()
        down_head_button = self.create_button('v', down_head.create_closure())
        #large_box.addWidget(down_head_button)
        right_head = Head(Head.RIGHT)
        right_head_button = self.create_button('>',
                                               right_head.create_closure())
        #large_box.addWidget(right_head_button)
        left_head = Head(Head.LEFT)
        left_head_button = self.create_button('<', left_head.create_closure())
        left_right_head_box = QtGui.QHBoxLayout()

        up_head_box.addItem(QtGui.QSpacerItem(235, 20))
        up_head_box.addWidget(up_head_button)
        up_head_box.addItem(QtGui.QSpacerItem(275, 20))
        left_right_head_box.addItem(QtGui.QSpacerItem(160, 20))
        left_right_head_box.addWidget(left_head_button)
        left_right_head_box.addItem(QtGui.QSpacerItem(60, 20))
        left_right_head_box.addWidget(right_head_button)
        left_right_head_box.addItem(QtGui.QSpacerItem(225, 20))
        down_head_box.addItem(QtGui.QSpacerItem(235, 20))
        down_head_box.addWidget(down_head_button)
        down_head_box.addItem(QtGui.QSpacerItem(275, 20))
        head_box.addLayout(up_head_box)
        head_box.addLayout(left_right_head_box)
        head_box.addLayout(down_head_box)
        large_box.addLayout(head_box)
        #large_box.addItem(QtGui.QSpacerItem(500,20))
        #large_box.addWidget(left_head_button)

        gripper = Gripper(Gripper.RIGHT, Gripper.OPEN)
        right_gripper = self.create_button('Right gripper!',
                                           gripper.create_closure())
        gripper = Gripper(Gripper.LEFT, Gripper.OPEN)
        left_gripper = self.create_button('Left gripper!',
                                          gripper.create_closure())
        large_box.addItem(QtGui.QSpacerItem(100, 250))

        gripper_box = QtGui.QHBoxLayout()
        gripper_box.addItem(QtGui.QSpacerItem(75, 20))
        gripper_box.addWidget(left_gripper)
        gripper_box.addItem(QtGui.QSpacerItem(450, 20))
        gripper_box.addWidget(right_gripper)
        gripper_box.addItem(QtGui.QSpacerItem(75, 20))
        large_box.addLayout(gripper_box)

        base_box = QtGui.QVBoxLayout()

        large_box.addItem(QtGui.QSpacerItem(100, 100))

        #forward
        forward_base_box = QtGui.QHBoxLayout()
        forward_base = Base(Base.FORWARD)
        forward_base_button = self.create_button('move forward',
                                                 forward_base.create_closure())
        forward_base_box.addItem(QtGui.QSpacerItem(400, 20))
        forward_base_box.addWidget(forward_base_button)
        forward_base_box.addItem(QtGui.QSpacerItem(400, 20))
        base_box.addLayout(forward_base_box)
        #large_box.addWidget(forward_base_button)

        #left
        left_right_base_box = QtGui.QHBoxLayout()
        left_base = Base(Base.LEFT)
        left_base_button = self.create_button('move left',
                                              left_base.create_closure())
        #large_box.addWidget(left_base_button)

        #right
        right_base = Base(Base.RIGHT)
        right_base_button = self.create_button('move right',
                                               right_base.create_closure())
        left_right_base_box.addItem(QtGui.QSpacerItem(300, 20))
        left_right_base_box.addWidget(left_base_button)
        left_right_base_box.addItem(QtGui.QSpacerItem(50, 20))
        left_right_base_box.addWidget(right_base_button)
        left_right_base_box.addItem(QtGui.QSpacerItem(300, 20))
        base_box.addLayout(left_right_base_box)
        #large_box.addWidget(right_base_button)

        #backward
        backward_base_box = QtGui.QHBoxLayout()
        backward_base = Base(Base.BACKWARD)
        backward_base_button = self.create_button(
            'move backward', backward_base.create_closure())
        backward_base_box.addItem(QtGui.QSpacerItem(400, 20))
        backward_base_box.addWidget(backward_base_button)
        backward_base_box.addItem(QtGui.QSpacerItem(400, 20))
        base_box.addLayout(backward_base_box)
        #large_box.addWidget(backward_base_button)

        large_box.addLayout(base_box)

        turn_base_box = QtGui.QHBoxLayout()

        #turn left
        turnleft_base = Base(Base.TURNLEFT)
        turnleft_base_button = self.create_button(
            '        /\n<--', turnleft_base.create_closure())
        #large_box.addWidget(turnleft_base_button)

        #turn right
        turnright_base = Base(Base.TURNRIGHT)
        turnright_base_button = self.create_button(
            '\\\n        -->', turnright_base.create_closure())
        turn_base_box.addItem(QtGui.QSpacerItem(75, 20))
        turn_base_box.addWidget(turnright_base_button)
        turn_base_box.addItem(QtGui.QSpacerItem(225, 20))
        turn_base_box.addWidget(turnleft_base_button)
        turn_base_box.addItem(QtGui.QSpacerItem(100, 20))
        large_box.addLayout(turn_base_box)
        #large_box.addWidget(turnright_base_button)
        self._widget.setObjectName('SimpleGUI')
        self._widget.setLayout(large_box)
        context.add_widget(self._widget)
        self._widget.setStyleSheet(
            "QWidget { image: url(%s) }" %
            "/home/vjampala/catkin_ws/src/cse481/hw1/rqt_simplegui/rosie_background.jpg"
        )
コード例 #21
0
ファイル: skeleton.py プロジェクト: robvcc/cloth-manipulation
class Robot_Skeleton(object):
    """Basic bare-bones solution for the Fetch robot interface.
    
    We recommend extending this class with additional convenience methods based
    on your application needs.
    """

    def __init__(self, simulation=True):
        """Initializes various aspects of the Fetch."""
        rospy.init_node("fetch")
        rospy.loginfo("initializing the Fetch...")
        self.arm = Arm()
        self.arm_joints = ArmJoints()
        self.base = Base()
        self.camera = RGBD()
        self.head = Head()
        self.gripper = Gripper(self.camera)
        self.torso = Torso()
        self.joint_reader = JointStateReader()

        # Tucked arm starting joint angle configuration
        self.names = ArmJoints().names()
        self.tucked = [1.3200, 1.3999, -0.1998, 1.7199, 0.0, 1.6600, 0.0]
        self.tucked_list = [(x,y) for (x,y) in zip(self.names, self.tucked)]

        # Initial (x,y,yaw) position of the robot wrt map origin. We keep this
        # fixed so that we can reset to this position as needed. The HSR's
        # `omni_base.pose` (i.e., the start pose) returns (x,y,yaw) where yaw is
        # the rotation about that axis (intuitively, the z axis). For the base,
        # `base.odom` supplies both `position` and `orientation` attributes.
        start = copy.deepcopy(self.base.odom.position)
        yaw = Base._yaw_from_quaternion(self.base.odom.orientation)
        self.start_pose = np.array([start.x, start.y, yaw])
        rospy.loginfo("...finished initialization!")


    def body_start_pose(self, start_height=0.10, end_height=0.10, velocity_factor=0.5):
        """Sets the robot's body to some initial configuration.

        Tucks the arm using motion planning. NEVER directly set joints as that
        often leads to collisions.

        Args:
            start_height: Height in meters for Fetch before arm-tuck.
            end_height: Height in meters for Fetch after arm-tuck.
            velocity_factor: controls the speed, closer to 0 means slower,
                closer to 1 means faster. (If 0.0, then it turns into 1.0 for
                some reason.) Values greater than 1.0 are cut to 1.0.
        """
        self.torso.set_height(start_height)
        self.arm.move_to_joint_goal(self.tucked_list, velocity_factor=velocity_factor)
        self.torso.set_height(end_height)


    def head_start_pose(self, pan=0.0, tilt=0.0):
        """Sets the robot's head to some initial configuration.

        Args: 
            pan: Value in radians for head sideways rotation, counterclockwise
                when looking at robot from an aerial view.
            tilt: Value in radians for head up/down movement, positive means
                looking downwards.
        """
        self.head.pan_tilt(pan=pan, tilt=tilt)


    def get_img_data(self):
        """Obtain camera and depth image.
        
        Returns:
            Tuple containing RGB camera image and corresponding depth image.
        """
        c_img = self.camera.read_color_data()
        d_img = self.camera.read_depth_data()
        return (c_img, d_img)


    def create_grasp_pose(self, x, y, z, rot_x, rot_y, rot_z):
        """Creates a pose in the world for the robot's end-effect to go to.
        
        Args:
            x, y, z, rot_x, rot_y, rot_z: A 6-D pose description.
        """
        pose_name = self.gripper.create_grasp_pose_intuitive(
                x, y, z, rot_x, rot_y, rot_z)
        return pose_name


    def move_to_pose(self, pose_name, velocity_factor=None):
        """Moves to a pose.
 
        In the HSR, moved the `hand_palm_link` to the frame named `pose_name` at
        the correct pose. For the Fetch we should be able to extract the pose
        from `pose_name` and then call the Arm's `move_to_pose` method.
        
        Args:
            pose_name: A string name for the pose to go 
            velocity_factor: controls the speed, closer to 0 means slower,
                closer to 1 means faster. (If 0.0, then it turns into 1.0 for
                some reason.) Values greater than 1.0 are cut to 1.0.
        """
        # See: 
        #   http://wiki.ros.org/tf/Tutorials/Writing%20a%20tf%20listener%20%28Python%29
        #   https://answers.ros.org/question/256354/does-tftransformlistenerlookuptransform-return-quaternion-position-or-translation-and-rotation/
        # First frame should be the reference frame, use `base_link`, not `odom`.
        point, quat = self.gripper.tl.lookupTransform('base_link', pose_name, rospy.Time(0))

        # See:
        #   https://github.com/cse481wi18/cse481wi18/blob/indigo-devel/applications/scripts/cart_arm_demo.py
        #   https://github.com/cse481wi18/cse481wi18/wiki/Lab-19%3A-Cartesian-space-manipulation
        ps = PoseStamped()
        ps.header.frame_id = 'base_link'
        ps.pose = Pose(
                Point(point[0], point[1], point[2]),
                Quaternion(quat[0], quat[1], quat[2], quat[3])
        )

        # See `arm.py` written by Justin Huang
        error = self.arm.move_to_pose(pose_stamped=ps, velocity_factor=velocity_factor)
        if error is not None:
            rospy.logerr(error)

    def open_gripper(self):
        self.gripper.open()

    def close_gripper(self, width=0.0, max_effort=100):
        self.gripper.close(width=width, max_effort=max_effort)
コード例 #22
0
ファイル: waterpulse.py プロジェクト: vjampala/cse481
    def __init__(self, context):
        super(WaterPulse, self).__init__(context)
        self.setObjectName('WaterPulse')
        self._widget = QWidget()
        self._widget.setFixedSize(600, 600)
        self._sound_client = SoundClient()
        rospy.Subscriber('robotsound', SoundRequest, self.sound_cb)

        self.l_joint_names = ['l_shoulder_pan_joint',
                              'l_shoulder_lift_joint',
                              'l_upper_arm_roll_joint',
                              'l_elbow_flex_joint',
                              'l_forearm_roll_joint',
                              'l_wrist_flex_joint',
                              'l_wrist_roll_joint']
 
        l_traj_contoller_name = None
        l_traj_controller_name = '/l_arm_controller/joint_trajectory_action'
	self.l_traj_action_client = SimpleActionClient(l_traj_controller_name,
		JointTrajectoryAction)
	rospy.loginfo('Waiting for a response from the trajectory '
		+ 'action server for LEFT arm...')
	self.l_traj_action_client.wait_for_server()

         
        QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10))
        self.sound_sig.connect(self.sound_sig_cb)
        
        large_box = QtGui.QVBoxLayout()

        # Textbox to enter words for PR2 to say 
        sound_textbox = QtGui.QLineEdit("Squirtle Squirtle")  # Default Text
        sound_textbox.setFixedWidth(450)
        self.marker_publisher = rospy.Publisher('visualization_marker', Marker)
        
        # Set a handler on the textbox to retrieve the text when button clicked
        self.sound_textbox = sound_textbox

        button_box = QtGui.QHBoxLayout()
        button_box.addItem(QtGui.QSpacerItem(15,20))
        button_box.addWidget(self.create_button('Speak', self.command_cb))
        button_box.addWidget(sound_textbox)
        button_box.addStretch(1)
        large_box.addLayout(button_box)
        
        # 'Trick or Treat' & 'Thank You' Buttons
        halloween_box = QtGui.QHBoxLayout()
        halloween_box.addItem(QtGui.QSpacerItem(15,20))
        halloween_box.addWidget(self.create_button('Trick or Treat', self.command_cb))
        halloween_box.addWidget(self.create_button('Thank You', self.command_cb))
        halloween_box.addStretch(1)
        large_box.addLayout(halloween_box)
        
        # Buttons to move the PR2's head
        up_head = Head(Head.UP, self)
        head_box = QtGui.QVBoxLayout()
        up_head_box = QtGui.QHBoxLayout()
        up_head_button = self.create_button('^', up_head.create_closure())
        down_head = Head(Head.DOWN, self)
        down_head_box = QtGui.QHBoxLayout()
        down_head_button = self.create_button('v', down_head.create_closure())
        right_head = Head(Head.RIGHT, self)
        right_head_button = self.create_button('>', right_head.create_closure())
        left_head = Head(Head.LEFT, self)
        left_head_button = self.create_button('<', left_head.create_closure())
        left_right_head_box = QtGui.QHBoxLayout()

        head_speed_sld = QtGui.QSlider(QtCore.Qt.Horizontal)
        head_speed_sld.setFocusPolicy(QtCore.Qt.NoFocus)
        head_speed_sld.setMinimum(1)
        head_speed_sld.setMaximum(5)
        head_speed_sld.valueChanged[int].connect(Head.set_speed)

        up_head_box.addItem(QtGui.QSpacerItem(235,20))
        up_head_box.addWidget(up_head_button)
        up_head_box.addItem(QtGui.QSpacerItem(275,20))
        left_right_head_box.addItem(QtGui.QSpacerItem(160,20))
        left_right_head_box.addWidget(left_head_button)
        left_right_head_box.addItem(QtGui.QSpacerItem(60,20))
        left_right_head_box.addWidget(right_head_button)
        left_right_head_box.addItem(QtGui.QSpacerItem(225,20))
        down_head_box.addItem(QtGui.QSpacerItem(235,20))
        down_head_box.addWidget(down_head_button)
        down_head_box.addItem(QtGui.QSpacerItem(275,20))
        head_sld_box = QtGui.QHBoxLayout()
        head_sld_box.addItem(QtGui.QSpacerItem(225,20))
        head_sld_box.addWidget(head_speed_sld)
        head_sld_box.addItem(QtGui.QSpacerItem(225,20))
        head_box.addLayout(up_head_box)
        head_box.addLayout(left_right_head_box)
        head_box.addLayout(down_head_box)
        head_box.addLayout(head_sld_box)
        large_box.addLayout(head_box)        

        # Buttons to move the grippers
        gripper = Gripper(Gripper.RIGHT, Gripper.OPEN, self)
        right_gripper = self.create_button('Right gripper',
                gripper.create_closure())
        gripper = Gripper(Gripper.LEFT, Gripper.OPEN, self)
        left_gripper = self.create_button('Left gripper', gripper.create_closure()) 
        knock_button = self.create_button('Knock', self.knock)
        large_box.addItem(QtGui.QSpacerItem(100,250))
        gripper_box = QtGui.QHBoxLayout()
        gripper_box.addItem(QtGui.QSpacerItem(75,20))
        gripper_box.addWidget(left_gripper)
        gripper_box.addWidget(knock_button)
        gripper_box.addItem(QtGui.QSpacerItem(450,20))
        gripper_box.addWidget(right_gripper)
        gripper_box.addItem(QtGui.QSpacerItem(75,20))
        large_box.addLayout(gripper_box)

        # Buttons to move the base
        base_box = QtGui.QVBoxLayout()
        large_box.addItem(QtGui.QSpacerItem(100,100))
        forward_base_box = QtGui.QHBoxLayout()
        forward_base = Base(Base.FORWARD, self)
        forward_base_button = self.create_button('move forward', forward_base.create_closure())
        forward_base_box.addItem(QtGui.QSpacerItem(400,20))
        forward_base_box.addWidget(forward_base_button)
        forward_base_box.addItem(QtGui.QSpacerItem(400,20))
        base_box.addLayout(forward_base_box)

        left_right_base_box = QtGui.QHBoxLayout()
        left_base= Base(Base.LEFT, self)
        left_base_button = self.create_button('move left',
                left_base.create_closure())

        right_base= Base(Base.RIGHT, self)
        right_base_button= self.create_button('move right',
                right_base.create_closure())
        left_right_base_box.addItem(QtGui.QSpacerItem(300,20))
        left_right_base_box.addWidget(left_base_button)
        left_right_base_box.addItem(QtGui.QSpacerItem(50,20))
        left_right_base_box.addWidget(right_base_button)
        left_right_base_box.addItem(QtGui.QSpacerItem(300,20))
        base_box.addLayout(left_right_base_box)

        backward_base_box = QtGui.QHBoxLayout()
        backward_base= Base(Base.BACKWARD, self)
        backward_base_button = self.create_button('move backward',
                backward_base.create_closure())
        backward_base_box.addItem(QtGui.QSpacerItem(400,20))
        backward_base_box.addWidget(backward_base_button)
        backward_base_box.addItem(QtGui.QSpacerItem(400,20))
        base_box.addLayout(backward_base_box)

        large_box.addLayout(base_box)
        
        turn_base_box = QtGui.QHBoxLayout()

        counter_base= Base(Base.COUNTER, self)
        counter_base_button = self.create_button('\\\n        -->',
                counter_base.create_closure())
        
        clockwise_base= Base(Base.CLOCKWISE, self)
        clockwise_base_button = self.create_button('        /\n<--',
                clockwise_base.create_closure())
        turn_base_box.addItem(QtGui.QSpacerItem(75,20))
        turn_base_box.addWidget(counter_base_button)
        turn_base_box.addItem(QtGui.QSpacerItem(225,20))
        turn_base_box.addWidget(clockwise_base_button)
        turn_base_box.addItem(QtGui.QSpacerItem(100,20))
        large_box.addLayout(turn_base_box)

        self._widget.setObjectName('WaterPulse')
        self._widget.setLayout(large_box)
        context.add_widget(self._widget)
        self._widget.setStyleSheet("QWidget { image: url(%s) }" %
                (str(os.path.dirname(os.path.realpath(__file__))) +
                "/../../rosie_background.jpg"))
コード例 #23
0
ファイル: simple_gui_good.py プロジェクト: vjampala/cse481
    def __init__(self, context):
        super(SimpleGUI, self).__init__(context)
        self.setObjectName('SimpleGUI')
        self._widget = QWidget()
	self._widget.setFixedSize(600, 600)
        self._sound_client = SoundClient()
        rospy.Subscriber('robotsound', SoundRequest, self.sound_cb)

        QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10))
        self.sound_sig.connect(self.sound_sig_cb)
        
        large_box = QtGui.QVBoxLayout()

        #Sound textbox
        sound_textbox = QtGui.QLineEdit("Squirtle Squirtle") #Default Text
        sound_textbox.setFixedWidth(450)
        #Set a handle on the textbox to retrieve the text when button clicked
        self.sound_textbox = sound_textbox

        button_box = QtGui.QHBoxLayout()
	button_box.addItem(QtGui.QSpacerItem(15,20))
        button_box.addWidget(self.create_button('Speak', self.command_cb))
    	button_box.addWidget(sound_textbox)
        button_box.addStretch(1)
        large_box.addLayout(button_box)

	speech_box = QtGui.QHBoxLayout()
	speech_box.addItem(QtGui.QSpacerItem(15, 20))
        self.speech_label = QtGui.QLabel('Robot has not spoken yet.')
        palette = QtGui.QPalette()
        palette.setColor(QtGui.QPalette.Foreground,QtCore.Qt.blue)
        self.speech_label.setPalette(palette)
        speech_box.addWidget(self.speech_label)

        large_box.addLayout(speech_box)
        large_box.addStretch(1)

	#large_box.addItem(QtGui.QSpacerItem(50,20))

        up_head = Head(Head.UP)
	head_box = QtGui.QVBoxLayout()
	up_head_box = QtGui.QHBoxLayout()
        up_head_button = self.create_button('^', up_head.create_closure())
        #large_box.addWidget(up_head_button)
        down_head = Head(Head.DOWN)
	down_head_box = QtGui.QHBoxLayout()
        down_head_button = self.create_button('v', down_head.create_closure())
        #large_box.addWidget(down_head_button)
        right_head = Head(Head.RIGHT)
        right_head_button = self.create_button('>', right_head.create_closure())
        #large_box.addWidget(right_head_button)
        left_head = Head(Head.LEFT)
        left_head_button = self.create_button('<', left_head.create_closure())
	left_right_head_box = QtGui.QHBoxLayout()

	up_head_box.addItem(QtGui.QSpacerItem(235,20))
	up_head_box.addWidget(up_head_button)
	up_head_box.addItem(QtGui.QSpacerItem(275,20))
	left_right_head_box.addItem(QtGui.QSpacerItem(160,20))
	left_right_head_box.addWidget(left_head_button)
	left_right_head_box.addItem(QtGui.QSpacerItem(60,20))
	left_right_head_box.addWidget(right_head_button)
	left_right_head_box.addItem(QtGui.QSpacerItem(225,20))
	down_head_box.addItem(QtGui.QSpacerItem(235,20))
	down_head_box.addWidget(down_head_button)
	down_head_box.addItem(QtGui.QSpacerItem(275,20))
	head_box.addLayout(up_head_box)
	head_box.addLayout(left_right_head_box)
	head_box.addLayout(down_head_box)
	large_box.addLayout(head_box)
	#large_box.addItem(QtGui.QSpacerItem(500,20))
        #large_box.addWidget(left_head_button)

        gripper = Gripper(Gripper.RIGHT, Gripper.OPEN)
        right_gripper = self.create_button('Right gripper!', gripper.create_closure())
        gripper = Gripper(Gripper.LEFT, Gripper.OPEN)
        left_gripper = self.create_button('Left gripper!', gripper.create_closure()) 
	large_box.addItem(QtGui.QSpacerItem(100,250))

	gripper_box = QtGui.QHBoxLayout()
	gripper_box.addItem(QtGui.QSpacerItem(75,20))
        gripper_box.addWidget(left_gripper)
	gripper_box.addItem(QtGui.QSpacerItem(450,20))
        gripper_box.addWidget(right_gripper)
	gripper_box.addItem(QtGui.QSpacerItem(75,20))
        large_box.addLayout(gripper_box)
	

	base_box = QtGui.QVBoxLayout()

	large_box.addItem(QtGui.QSpacerItem(100,100))

        #forward
	forward_base_box = QtGui.QHBoxLayout()
        forward_base = Base(Base.FORWARD)
        forward_base_button = self.create_button('move forward', forward_base.create_closure())
	forward_base_box.addItem(QtGui.QSpacerItem(400,20))
	forward_base_box.addWidget(forward_base_button)
	forward_base_box.addItem(QtGui.QSpacerItem(400,20))
	base_box.addLayout(forward_base_box)
        #large_box.addWidget(forward_base_button)

        #left
	left_right_base_box = QtGui.QHBoxLayout()
        left_base= Base(Base.LEFT)
      	left_base_button = self.create_button('move left', left_base.create_closure())
        #large_box.addWidget(left_base_button)

        #right
        right_base= Base(Base.RIGHT)
      	right_base_button= self.create_button('move right', right_base.create_closure())
	left_right_base_box.addItem(QtGui.QSpacerItem(300,20))
	left_right_base_box.addWidget(left_base_button)
	left_right_base_box.addItem(QtGui.QSpacerItem(50,20))
	left_right_base_box.addWidget(right_base_button)
	left_right_base_box.addItem(QtGui.QSpacerItem(300,20))
        base_box.addLayout(left_right_base_box)
	#large_box.addWidget(right_base_button)

        #backward
	backward_base_box = QtGui.QHBoxLayout()
        backward_base= Base(Base.BACKWARD)
      	backward_base_button = self.create_button('move backward', backward_base.create_closure())
	backward_base_box.addItem(QtGui.QSpacerItem(400,20))
	backward_base_box.addWidget(backward_base_button)
	backward_base_box.addItem(QtGui.QSpacerItem(400,20))
        base_box.addLayout(backward_base_box)
	#large_box.addWidget(backward_base_button)

	large_box.addLayout(base_box)
        
	turn_base_box = QtGui.QHBoxLayout()

	#turn left
        turnleft_base= Base(Base.TURNLEFT)
      	turnleft_base_button = self.create_button('        /\n<--', turnleft_base.create_closure())
	#large_box.addWidget(turnleft_base_button)
        
	#turn right
        turnright_base= Base(Base.TURNRIGHT)
      	turnright_base_button = self.create_button('\\\n        -->', turnright_base.create_closure())
	turn_base_box.addItem(QtGui.QSpacerItem(75,20))
	turn_base_box.addWidget(turnright_base_button)
	turn_base_box.addItem(QtGui.QSpacerItem(225,20))
	turn_base_box.addWidget(turnleft_base_button)
	turn_base_box.addItem(QtGui.QSpacerItem(100,20))
	large_box.addLayout(turn_base_box)
	#large_box.addWidget(turnright_base_button)
	self._widget.setObjectName('SimpleGUI')
        self._widget.setLayout(large_box)
        context.add_widget(self._widget)
 	self._widget.setStyleSheet("QWidget { image: url(%s) }" % "/home/vjampala/catkin_ws/src/cse481/hw1/rqt_simplegui/rosie_background.jpg")
コード例 #24
0
    def step(self, action, gripper_type=None, is_position=False, **kwargs):
        """Execute action and return reward and next observation.

        Args:
            action: tuple or list (y_pix, x_pix, angle, open_scale).
                y_pix is the first channel of the heightmap.
                x_pix is the second channel of the heightmap.
                angle is the rotation angle (radians) around z axis.

        Returns:
            reward: float number
            observation: dict, scene observation + gripper observation
        """
        if self.gripper_selection:
            if len(kwargs) == 0:
                if gripper_type.startswith('barrett_hand-'):
                    palm_joint = float(gripper_type.split('-')[-1])
                    gripper_type = 'barrett_hand'
                    gripper_record = self._gripper_record_dict[(gripper_type,
                                                                palm_joint)]
                else:
                    gripper_record = self._gripper_record_dict[gripper_type]
            else:
                gripper_record = kwargs

            self._gripper = Gripper(gripper_type=gripper_type,
                                    bullet_client=self.bullet_client,
                                    home_position=self._gripper_home_position,
                                    num_side_images=8,
                                    **gripper_record)
            self._gripper.move(self._gripper_home_position, 0)

        a_y, a_x, angle, open_scale = action
        if is_position:
            y, x = a_y, a_x
            y_pix = int(
                (y - self._view_bounds[1, 0]) / self._heightmap_pix_size)
            x_pix = int(
                (x - self._view_bounds[1, 0]) / self._heightmap_pix_size)
        else:
            y_pix, x_pix = a_y, a_x
            x = x_pix * self._heightmap_pix_size + self._view_bounds[0, 0]
            y = y_pix * self._heightmap_pix_size + self._view_bounds[1, 0]

        grasp_pix = np.array([y_pix, x_pix])
        valid_pix = np.array(np.where(self._depth_heightmap > 0)).T  # y,x
        dist_to_valid = np.sqrt(np.sum((valid_pix - grasp_pix)**2, axis=1))
        closest_valid_pix = grasp_pix
        if np.min(dist_to_valid
                  ) < 2:  # get nearest non-zero pixel less than 2 pixels away
            closest_valid_pix = valid_pix[np.argmin(dist_to_valid), :]

        z = self._depth_heightmap[closest_valid_pix[0],
                                  closest_valid_pix[1]] - 0.05
        z = max(z, 0)
        grasp_position = np.array([x, y, z])
        grasp_position = np.clip(
            grasp_position, self._workspace_bounds[:, 0],
            self._workspace_bounds[:, 1]
        )  # clamp grasp position w.r.t. workspace bounds
        self._gripper.primitive_grasping(grasp_position,
                                         angle % (2 * np.pi),
                                         open_scale=open_scale,
                                         stop_at_contact=True)
        try:
            self.wait_till_stable()
        except:
            for _ in range(240 * 2):
                self.bullet_client.stepSimulation()

        objects_lifted = 0
        target_ids_copy = list(self._target_ids)
        for target_id in target_ids_copy:
            pos = self.bullet_client.getBasePositionAndOrientation(
                target_id)[0]
            if pos[-1] > 0.2:
                self.bullet_client.resetBasePositionAndOrientation(
                    target_id, [1, target_id, 0.3], [0, 0, 0, 1])
                self.bullet_client.stepSimulation()
                objects_lifted += 1
                self._target_ids.remove(target_id)
            elif np.any(pos < self._workspace_bounds[:, 0]) or np.any(
                    pos > self._workspace_bounds[:, 1]):
                self.bullet_client.resetBasePositionAndOrientation(
                    target_id, [1, target_id, 0.3], [0, 0, 0, 1])
                self.bullet_client.stepSimulation()
                self._target_ids.remove(target_id)

        # should not lift obstacles
        for target_id in self._obstacle_ids:
            pos = self.bullet_client.getBasePositionAndOrientation(
                target_id)[0]
            if pos[-1] > 0.2:
                self.bullet_client.resetBasePositionAndOrientation(
                    target_id, [1, target_id, 0.3], [0, 0, 0, 1])
                self.bullet_client.stepSimulation()
                objects_lifted = -1

        reward = objects_lifted == 1

        scene_observation = self._get_scene_observation()
        observation = {**self._gripper_observation, **scene_observation}

        if self.gripper_selection:
            self._gripper.remove()
            self._gripper = None

        return reward, observation
コード例 #25
0
class Robot_Interface(object):
    """For usage with the Fetch robot."""
    def __init__(self, simulation=True):
        """Initializes various aspects of the Fetch.
        
        TODOs: get things working, also use `simulation` flag to change ROS
        topic names if necessary (especially for the cameras!). UPDATE: actually
        I don't think this is necessary now, they have the same topic names.
        """
        rospy.init_node("fetch")
        self.arm = Arm()
        self.arm_joints = ArmJoints()
        self.base = Base()
        self.camera = RGBD()
        self.head = Head()
        self.gripper = Gripper(self.camera)
        self.torso = Torso()
        self.joint_reader = JointStateReader()

        # Tucked arm starting joint angle configuration
        self.names = ArmJoints().names()
        self.tucked = [1.3200, 1.3999, -0.1998, 1.7199, 0.0, 1.6600, 0.0]
        self.tucked_list = [(x, y) for (x, y) in zip(self.names, self.tucked)]

        # Initial (x,y,yaw) position of the robot wrt map origin. We keep this
        # fixed so that we can reset to this position as needed. The HSR's
        # `omni_base.pose` (i.e., the start pose) returns (x,y,yaw) where yaw is
        # the rotation about that axis (intuitively, the z axis). For the base,
        # `base.odom` supplies both `position` and `orientation` attributes.
        start = copy.deepcopy(self.base.odom.position)
        yaw = Base._yaw_from_quaternion(self.base.odom.orientation)
        self.start_pose = np.array([start.x, start.y, yaw])
        self.TURN_SPEED = 0.3

        self.num_restarts = 0

    def body_start_pose(self,
                        start_height=0.10,
                        end_height=0.10,
                        velocity_factor=None):
        """Sets the robot's body to some initial configuration.
        
        The HSR uses `whole_body.move_to_go()` which initializes an appropriate
        posture so that the hand doesn't collide with movement. For the Fetch,
        we should probably make the torso extend, so the arms can extend more
        easily without collisions. Use `move_to_joint_goal` since that uses
        motion planning. Do NOT directly set the joints without planning!!
        """
        self.torso.set_height(start_height)
        self.arm.move_to_joint_goal(self.tucked_list,
                                    velocity_factor=velocity_factor)
        self.torso.set_height(end_height)
        # Specific to the siemens challenge (actually a lot of this stuff is ...)
        if self.num_restarts == 0:
            self.base.turn(angular_distance=45 * DEG_TO_RAD)
            self.num_restarts += 1

    def head_start_pose(self):
        """Hard-coded starting pose for the robot's head.
        
        These values are from the HSR. The Fetch needs a different pan and tilt.
        Positive pan means rotating counterclockwise when looking at robot from
        an aerial view.
        """
        self.head.pan_tilt(pan=0.0, tilt=0.8)

    def position_start_pose(self, offsets=None, do_something=False):
        """Assigns the robot's base to some starting pose.

        Mainly to "reset" the robot to the original starting position (and also,
        rotation about base axis) after it has moved, usually w/no offsets.
        
        Ugly workaround: we have target (x,y), and compute the distance to the
        point and the angle. We turn the Fetch according to that angle, and go
        forward. Finally, we do a second turn which corresponds to the target
        yaw at the end. This turns w.r.t. the current angle, so we undo the
        effect of the first turn.  See `examples/test_position_start_pose.py`
        for tests.
        
        Args:
            offsets: a list of length 3, indicating offsets in the x, y, and
            yaws, respectively, to be added onto the starting pose.
        """
        # Causing problems during my tests of the Siemens demo.
        if not do_something:
            return

        current_pos = copy.deepcopy(self.base.odom.position)
        current_theta = Base._yaw_from_quaternion(
            self.base.odom.orientation)  # [-pi, pi]
        ss = np.array([current_pos.x, current_pos.y, current_theta])

        # Absolute target position and orientation specified with `pp`.
        pp = np.copy(self.start_pose)
        if offsets:
            pp += np.array(offsets)

        # Get distance to travel, critically assumes `pp` is starting position.
        dist = np.sqrt((ss[0] - pp[0])**2 + (ss[1] - pp[1])**2)
        rel_x = ss[0] - pp[0]
        rel_y = ss[1] - pp[1]
        assert -1 <= rel_x / dist <= 1
        assert -1 <= rel_y / dist <= 1

        # But we also need to be *facing* the correct direction, w/input [-1,1].
        # First, get the opposite view (facing "outwards"), then flip by 180.
        desired_facing = np.arctan2(rel_y, rel_x)  # [-pi, pi], facing outward
        desired_theta = math.pi + desired_facing  # [0, 2*pi], flip by 180
        if desired_theta > math.pi:
            desired_theta -= 2 * math.pi  # [-pi, pi]

        # Reconcile with the current theta. Got this by basically trial/error
        angle = desired_theta - current_theta  # [-2*pi, 2*pi]
        if angle > math.pi:
            angle -= 2 * math.pi
        elif angle < -math.pi:
            angle += 2 * math.pi

        self.base.turn(angular_distance=angle, speed=self.TURN_SPEED)
        self.base.go_forward(distance=dist, speed=0.2)

        # Back at the start x, y, but now need to consider the _second_ turn.
        # The robot is facing at `desired_theta` rads, but wants `pp[2]` rads.
        final_angle = pp[2] - desired_theta
        if final_angle > math.pi:
            final_angle -= 2 * math.pi
        elif final_angle < -math.pi:
            final_angle += 2 * math.pi
        self.base.turn(angular_distance=final_angle, speed=self.TURN_SPEED)

    def get_img_data(self):
        """Obtain camera and depth image.
        
        Returns:
            Tuple containing RGB camera image and corresponding depth image.
        """
        c_img = self.camera.read_color_data()
        d_img = self.camera.read_depth_data()
        return (c_img, d_img)

    def get_depth(self, point, d_img):
        """Compute mean depth near grasp point.

        NOTE: assumes that we have a simlar `cfg.ZRANGE` as with the HSR. I'm
        not sure where exactly this comes from.
        """
        y, x = int(point[0]), int(point[1])
        # z_box = d_img[y-ZRANGE:y+ZRANGE, x-ZRANGE:x+ZRANGE]
        z_box = d_img[y - 20:y + 20, x - 20:x + 20]
        indx = np.nonzero(z_box)
        z = np.mean(z_box[indx])
        return z
        # y, x = int(point[0]), int(point[1])
        # # z_box = d_img[y-ZRANGE:y+ZRANGE, x-ZRANGE:x+ZRANGE]
        # z_box = d_img[y-10:y + 10, x - 10:x + 10]
        # indx = np.nonzero(z_box)
        # z = np.mean(z_box[indx])
        # return z

    def get_rot(self, direction):
        """Compute rotation of gripper such that given vector is grasped.

        Currently this directly follows the HSR code as there's nothing
        Fetch-dependent.
        """
        dy, dx = direction[0], direction[1]
        dx *= -1
        if dy < 0:
            dx *= -1
            dy *= -1
        rot = np.arctan2(dy, dx)
        rot = np.pi - rot
        return rot

    def create_grasp_pose(self, x, y, z, rot):
        """ If `intuitive=True` then x,y,z,rot interpreted wrt some link in the
        world, e.g., 'odom' for the Fetch. It's False by default to maintain
        backwards compatibility w/Siemens-based code.
        """
        pose_name = self.gripper.create_grasp_pose(x, y, z, rot)
        return pose_name

    def open_gripper(self):
        self.gripper.open()

    def close_gripper(self):
        self.gripper.close()

    def move_to_pose(self, pose_name, z_offset, velocity_factor=None):
        """Moves to a pose.
 
        In the HSR, moved the `hand_palm_link` to the frame named `pose_name` at
        the correct pose. For the Fetch we should be able to extract the pose
        from `pose_name` and then call the Arm's `move_to_pose` method.
        
        Args:
            pose_name: A string name for the pose to go 
            z_offset: Scalar offset in z-direction, offset is wrt the pose
                specified by `pose_name`.
            velocity_factor: controls the speed, closer to 0 means slower,
                closer to 1 means faster. (If 0.0, then it turns into 1.0 for
                some reason.) Values greater than 1.0 are cut to 1.0.
        """
        # See:
        #   http://wiki.ros.org/tf/Tutorials/Writing%20a%20tf%20listener%20%28Python%29
        #   https://answers.ros.org/question/256354/does-tftransformlistenerlookuptransform-return-quaternion-position-or-translation-and-rotation/
        # First frame should be the reference frame, use `base_link`, not `odom`.
        point, quat = self.gripper.tl.lookupTransform('base_link', pose_name,
                                                      rospy.Time(0))
        z_point = point[2] + z_offset

        # See:
        #   https://github.com/cse481wi18/cse481wi18/blob/indigo-devel/applications/scripts/cart_arm_demo.py
        #   https://github.com/cse481wi18/cse481wi18/wiki/Lab-19%3A-Cartesian-space-manipulation
        ps = PoseStamped()
        ps.header.frame_id = 'base_link'
        ps.pose = Pose(Point(point[0], point[1], z_point),
                       Quaternion(quat[0], quat[1], quat[2], quat[3]))

        # See `arm.py` written by Justin Huang
        error = self.arm.move_to_pose(pose_stamped=ps,
                                      velocity_factor=velocity_factor)
        if error is not None:
            rospy.logerr(error)

    def find_ar(self, ar_number, velocity_factor=None):
        try:
            ar_name = 'ar_marker/' + str(ar_number)

            # HSR code, with two hard-coded offsets?
            #self.whole_body.move_end_effector_pose(geometry.pose(y=0.08, z=-0.3), ar_name)

            # Fetch 'translation'. Note the `ar_name` for pose name.
            point, quat = self.gripper.tl.lookupTransform(
                'base_link', ar_name, rospy.Time(0))
            y_point = point[1] + 0.08
            z_point = point[2] - 0.3

            ps = PoseStamped()
            ps.header.frame_id = 'base_link'
            ps.pose = Pose(Point(point[0], y_point, z_point),
                           Quaternion(quat[0], quat[1], quat[2], quat[3]))

            error = self.arm.move_to_pose(pose_stamped=ps,
                                          velocity_factor=velocity_factor)
            if error is not None:
                rospy.logerr(error)

            return True
        except:
            return False

    def pan_head(self, tilt):
        """Adjusts tilt of the robot, AND set pan at zero.
        
        Args: 
            tilt: Value in radians, positive means looking downwards.
        """
        self.head.pan_tilt(pan=0, tilt=tilt)
コード例 #26
0
import rtde_control
import time
import json
from gripper import Gripper
import numpy as np

print("Connecting?")
rtde_c = rtde_control.RTDEControlInterface("192.168.1.6")
print("Connected?")

gripper = Gripper(2, 1)

# Parameters
velocity = 0.25
acceleration = 0.25
dt = 1.0 / 10  # 2ms
lookahead_time = 0.2
gain = 200
joint_q = [-1.54, -1.4, -2.28, -0.59, 1.60, 0.023]

sequence = json.load(open("records/20201217-171704/arm_trajectory.json"))

input("Play?")

#offset = [0.25493689, -0.56119273, -0.02703432, 0, 0, 0]
offset = [0, 0, 0, 0, 0, 0]
# Move to initial joint position with a regular moveJ
rtde_c.moveL(np.add(offset, sequence[0][0]).tolist())
print("moved?")
time.sleep(1)
print("Moved to inital position")
コード例 #27
0
ファイル: milestone1.py プロジェクト: vjampala/cse481
class Milestone1GUI(Plugin):

    RECEIVE_FROM_HUMAN_R_POS = [0.00952670015493673, 0.3270780665526253,
    0.03185028324084582, -1.3968658009779173, -3.058799671876592,
    -1.1083678332942686, -1.6314425515258866]
    
    READ_FIDUCIAL_R_POS = [0.41004856860653505, 0.29772362823537935, 
    -0.019944325676627628, -1.8394298656773618, -0.44139252862458106,
    -0.09922194050427624, -4.761735317011306]
    
    NAVIGATE_R_POS = [-0.3594077470836499, 0.971353000916152, -1.9647276598906076,
    -1.431900313132731, -1.1839177367219755, -0.09817772642188527, -1.622044624784374]
    
    PLACE_ON_SHELF_R_POS = [-0.15015144487461773, 0.4539704512093072,
    -0.10846018983280459, -0.9819529421527269, -3.0207362886631235, -0.4990689162195487,
    -1.6026396464199553]

    LOWER_ON_SHELF_R_POS = [-0.2159792990560645, 0.6221451583409631,
    -0.1886376030177479, -0.959223940465513, 9.690004126983537, -0.2866303982732683,
    111.39703078836274]
    
    RELEASE_BOOK_R_POS = [-0.15545746838546493, 0.44145040258984786,
    -0.13267376861465774, -0.972108533778647, -3.028545645401449, -0.38572817936010495,
    0.008017066746929924]
    
    TUCKED_UNDER_L_POS = [0.05688828299939419, 1.2955758539090194, 1.7180547710154033,
    -1.4511548177467404, -0.28718096455759035, -0.0938208188421713, -10.980395466225648]
    
    sound_sig = Signal(SoundRequest)

    def __init__(self, context):
        super(Milestone1GUI, self).__init__(context)
        self._sound_client = SoundClient()
        rospy.Subscriber('robotsound', SoundRequest, self.sound_cb)
        switch_srv_name = 'pr2_controller_manager/switch_controller'
        rospy.loginfo('Waiting for switch controller service...')
        rospy.wait_for_service(switch_srv_name)
        self.switch_service_client = rospy.ServiceProxy(switch_srv_name,
                                                 SwitchController)

        self.r_joint_names = ['r_shoulder_pan_joint',
                              'r_shoulder_lift_joint',
                              'r_upper_arm_roll_joint',
                              'r_elbow_flex_joint',
                              'r_forearm_roll_joint',
                              'r_wrist_flex_joint',
                              'r_wrist_roll_joint']
        self.l_joint_names = ['l_shoulder_pan_joint',
                              'l_shoulder_lift_joint',
                              'l_upper_arm_roll_joint',
                              'l_elbow_flex_joint',
                              'l_forearm_roll_joint',
                              'l_wrist_flex_joint',
                              'l_wrist_roll_joint']

        # Create a trajectory action client
        r_traj_controller_name = '/r_arm_controller/joint_trajectory_action'
        self.r_traj_action_client = SimpleActionClient(r_traj_controller_name,
                JointTrajectoryAction)
        rospy.loginfo('Waiting for a response from the trajectory action server for RIGHT arm...')
        self.r_traj_action_client.wait_for_server()
        
        l_traj_controller_name = '/l_arm_controller/joint_trajectory_action'
        self.l_traj_action_client = SimpleActionClient(l_traj_controller_name,
                JointTrajectoryAction)
        rospy.loginfo('Waiting for a response from the trajectory action server for LEFT arm...')
        self.l_traj_action_client.wait_for_server()

        #init torso
        self.torso = Torso()

        #init gripper
        self.l_gripper = Gripper('l')
        self.r_gripper = Gripper('r')

        #init navigation
        self.navigation = Navigation()

        self._widget = QWidget()
        self._widget.setFixedSize(600, 600)
        QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10))
        
        large_box = QtGui.QVBoxLayout()

        #Button for outreaching to recieve book from Human
        button_box = QtGui.QVBoxLayout()
        box_1 = QtGui.QHBoxLayout()
        box_2 = QtGui.QHBoxLayout()
        box_3 = QtGui.QHBoxLayout()
        box_4 = QtGui.QHBoxLayout()
        box_5 = QtGui.QHBoxLayout()
        box_6 = QtGui.QHBoxLayout()
        box_7 = QtGui.QHBoxLayout()
        box_8 = QtGui.QHBoxLayout()
        box_9 = QtGui.QHBoxLayout()
        box_10 = QtGui.QHBoxLayout()

        box_1.addItem(QtGui.QSpacerItem(15,2))
        box_1.addWidget(self.create_button('Prepare To Take', self.prepare_to_take))
        box_1.addItem(QtGui.QSpacerItem(445,2))

        box_2.addItem(QtGui.QSpacerItem(15,2))
        box_2.addWidget(self.create_button('Take From Human', self.take_from_human))
        box_2.addItem(QtGui.QSpacerItem(445,2))

        box_3.addItem(QtGui.QSpacerItem(15,2))
        box_3.addWidget(self.create_button('Prepare To Navigate', self.prepare_to_navigate))
        box_3.addItem(QtGui.QSpacerItem(445,2))

        # Button to move to shelf
        box_5.addItem(QtGui.QSpacerItem(15,2))
        box_5.addWidget(self.create_button('Navigate To Shelf', self.navigate_to_shelf))
        box_5.addItem(QtGui.QSpacerItem(445,2))

        box_4.addItem(QtGui.QSpacerItem(15,2))
        box_4.addWidget(self.create_button('Place On Shelf', self.place_on_shelf))
        box_4.addItem(QtGui.QSpacerItem(445,2))

        box_6.addItem(QtGui.QSpacerItem(15,2))
        box_6.addWidget(self.create_button('Give Information', self.give_information))
        box_6.addItem(QtGui.QSpacerItem(445,2))

        box_7.addItem(QtGui.QSpacerItem(15,2))
        box_7.addWidget(self.create_button('Navigate To Person', self.navigate_to_person))
        box_7.addItem(QtGui.QSpacerItem(445,2))

        self.book_textbox = QtGui.QLineEdit()
        self.book_textbox.setFixedWidth(100)

        box_8.addItem(QtGui.QSpacerItem(15,2))
        box_8.addWidget(self.book_textbox)
        box_8.addWidget(self.create_button('Pick Up Book', self.pick_up_from_shelf_button))
        box_8.addItem(QtGui.QSpacerItem(445,2))
        
        box_9.addItem(QtGui.QSpacerItem(15,2))
        box_9.addWidget(self.create_button('Localize', self.spin_base))
        box_9.addItem(QtGui.QSpacerItem(445,2))
       
        box_10.addItem(QtGui.QSpacerItem(15,2))
        box_10.addWidget(self.create_button('Non-nav Pick Up', self.pick_up_from_shelf))
        box_10.addItem(QtGui.QSpacerItem(445,2))

        button_box.addItem(QtGui.QSpacerItem(20,120))
        button_box.addLayout(box_1)
        button_box.addLayout(box_2)
        button_box.addLayout(box_3)
        button_box.addLayout(box_5)
        button_box.addLayout(box_4)
        button_box.addLayout(box_6)
        button_box.addLayout(box_7)
        button_box.addLayout(box_8)
        button_box.addLayout(box_9)
        button_box.addLayout(box_10)
        button_box.addItem(QtGui.QSpacerItem(20,240))
        large_box.addLayout(button_box)
        self.marker_perception = ReadMarkers()
        self.speech_recognition = SpeechRecognition(self)
        self.bookDB = BookDB()
        self.book_map = self.bookDB.getAllBookCodes()
        self._widget.setObjectName('Milestone1GUI')
        self._widget.setLayout(large_box)
        context.add_widget(self._widget)
        self._widget.setStyleSheet("QWidget { image: url(%s) }" %
                (str(os.path.dirname(os.path.realpath(__file__))) +
                "/../../librarian_gui_background.jpg"))

    def get_function(self, text):
        functions = {
            "bring-me-a-book":
                self.pick_up_from_shelf_routine,
            "put-this-book-back":
                self.prepare_to_take,
            "give-me-information":
                self.give_information_routine,
            "here-you-go":
                self.put_this_book_back_routine,
        }
        if text not in functions:
            print ("Could not find key %s" % text)
            return None
        return functions[text]

    def sound_cb(self, sound_request):
        qWarning('Received sound.')
        self.sound_sig.emit(sound_request)
        
    def create_button(self, name, method):
        btn = QtGui.QPushButton(name, self._widget)
        btn.clicked.connect(method)
        btn.setAutoRepeat(True)
        return btn

    def prepare_to_take(self):
        # Open gripper and move arms to take book
        self.torso.move(.15) # move torso .1 meter from base (MAX is .2)
        self.saved_l_arm_pose = Milestone1GUI.TUCKED_UNDER_L_POS
        self.saved_r_arm_pose = Milestone1GUI.RECEIVE_FROM_HUMAN_R_POS       
        self.move_arm('l', 5.0)
        self.move_arm('r', 5.0)  # Increase these numbers for slower movement
        self.l_gripper.close_gripper()
        self.r_gripper.open_gripper(True)
        self._sound_client.say("Please give me a book")

    def take_from_human(self):
        self.marker_perception.is_listening = True
        # Close gripper and move arms to see book
        self.r_gripper.close_gripper(True)
        self._sound_client.say("Thank you")
        time.sleep(1)
        self.saved_r_arm_pose = Milestone1GUI.READ_FIDUCIAL_R_POS
        self.move_arm('r', 5.0, True)  # Increase these numbers for slower movement
        self.look_at_r_gripper()
        rospy.loginfo("marker id returned by get_marker_id is: " + 
                 str(self.marker_perception.get_marker_id()))

    def look_at_r_gripper(self):
        name_space = '/head_traj_controller/point_head_action'

        head_client = SimpleActionClient(name_space, PointHeadAction)
        head_client.wait_for_server()

        head_goal = PointHeadGoal()
        head_goal.target.header.frame_id = "r_gripper_tool_frame"
        head_goal.min_duration = rospy.Duration(0.3)
        head_goal.target.point = Point(0, 0, 0)
        head_goal.max_velocity = 1.0
        head_client.send_goal(head_goal)
        head_client.wait_for_result(rospy.Duration(5.0))

        if (head_client.get_state() != GoalStatus.SUCCEEDED):
            rospy.logwarn('Head action unsuccessful.')
        
    def prepare_to_navigate(self):
        self.marker_perception.is_listening = False
        # Tuck arms
        self.saved_r_arm_pose = Milestone1GUI.NAVIGATE_R_POS
        self.move_arm('r', 5.0)  # Increase these numbers for slower movement
        
    #spin 360 * rotate_count degress clock wise
    def spin_base(self, rotate_count):
        # Adjust height and tuck arms before localization
        self._sound_client.say("Please wait while I orient myself.")
        self.torso.move(.15)
        self.saved_l_arm_pose = Milestone1GUI.TUCKED_UNDER_L_POS
        self.saved_r_arm_pose = Milestone1GUI.NAVIGATE_R_POS       
        self.move_arm('l', 5.0, True)
        self.move_arm('r', 5.0, True)
        self.l_gripper.close_gripper()
        self.r_gripper.close_gripper()
        
        if not rotate_count:
            rotate_count = 2
        topic_name = '/base_controller/command'
        base_publisher = rospy.Publisher(topic_name, Twist)

        twist_msg = Twist()
        twist_msg.linear = Vector3(0.0, 0.0, 0.0)
        twist_msg.angular = Vector3(0.0, 0.0, 0.5)
        start_time = rospy.get_rostime()
        while rospy.get_rostime() < start_time + rospy.Duration(15.0 * rotate_count):
            base_publisher.publish(twist_msg)

    def navigate_to_shelf(self, marker_id = None):
        # Move forward, place book on the shelf, and move back
        if marker_id is None or marker_id is False:
            marker_id = self.marker_perception.get_marker_id()
        rospy.loginfo("marker id returned by get_marker_id is: " + str(marker_id))
        if marker_id is False:
            rospy.loginfo("wuuuuuuuut")
        if marker_id is None:
            self._sound_client.say("I don't think I am holding a book "
                    "right now")
            rospy.logwarn("Navigate to shelf called when marker id is None")
            return
              
        book = self.book_map.get(unicode(marker_id))
        if book is None:
            self._sound_client.say("The book that I am holding is unknown "
                    "to me")
            rospy.logwarn("Navigate to shelf called when marker id is not in database")
            return
        x = book.getXCoordinate()
        y = book.getYCoordinate()
        self.navigation.move_to_shelf(x, y)

    # Ye Olde Dropping way of putting a book on the shelf. Deprecating.
    def drop_on_shelf(self):
        self.saved_r_arm_pose = Milestone1GUI.PLACE_ON_SHELF_R_POS
        self.move_arm('r', 4.0, True)  # Increase these numbers for slower movement
        self.move_base(True)
        self.r_gripper.open_gripper(True)
        self.saved_r_arm_pose = Milestone1GUI.RELEASE_BOOK_R_POS
        self.move_arm('r', 2.0, True)  # Increase these numbers for slower movement
        self.move_base(False)
        self.marker_perception.reset_marker_id()

    def place_on_shelf(self):
        self.saved_r_arm_pose = Milestone1GUI.PLACE_ON_SHELF_R_POS
        self.move_arm('r', 4.0, True)  # Increase these numbers for slower movement
        self.move_base(True)
        self.saved_r_arm_pose = Milestone1GUI.LOWER_ON_SHELF_R_POS
        self.move_arm('r', 2.0, True)  # Increase these numbers for slower movement
        self.r_gripper.open_gripper(True)
        self.move_base(False)
        self.marker_perception.reset_marker_id()

    def give_information(self):
        marker_id = self.marker_perception.get_marker_id()
        rospy.loginfo("marker id returned by get_marker_id is: " + str(marker_id))
        if marker_id is not None:
            book = self.book_map.get(unicode(marker_id))
            if book is None:
                rospy.logwarn("Give information called when marker id is not in database")
                self._sound_client.say("The book that I am holding is unknown to me")
            else:
                self._sound_client.say(book.getInformation())
        else:
            rospy.logwarn("Give information called when marker id is None")
            self._sound_client.say("I don't think I am holding a book right now")
    
    def pick_up_from_shelf_button(self):
        self.pick_up_from_shelf_routine(self.book_textbox.text())

    def pick_up_from_shelf_routine(self, book_title):
        book_id = self.bookDB.getBookIdByTitle(book_title)
        if book_id is None:
            rospy.logwarn("Book asked for was not present in database")
            self._sound_client.say("The book you requested is not present in the database.")
        else:
            self.torso.move(.15) # move torso .1 meter from base (MAX is .2)
            self.saved_l_arm_pose = Milestone1GUI.TUCKED_UNDER_L_POS
            self.move_arm('l', 5.0)
            self.l_gripper.close_gripper()
            
            #  self.marker_perception.set_marker_id(book_id)
            self.prepare_to_navigate() 
            time.sleep(5)
            self.navigate_to_shelf(book_id)  # Navigate to book location
            self.pick_up_from_shelf()  # Pick up from the shelf 
            self.prepare_to_navigate()
            time.sleep(5)
            self.navigate_to_person()  # Navigate to designated help desk location
            self.give_book()  # Give Book to Human 

    def put_this_book_back_routine(self):
        self.take_from_human()
        time.sleep(5)
        self.prepare_to_navigate()
        time.sleep(5)
        self.navigate_to_shelf()
        self.place_on_shelf()
        self.prepare_to_navigate()
        # TODO: return to human?

    def give_information_routine(self, book_title):
	book_id = self.bookDB.getBookIdByTitle(book_title)
        if book_id is None:
            rospy.logwarn("Book asked for was not present in database")
            self._sound_client.say("The book you requested is not present in the database.")
	    return
	book = self.book_map.get(unicode(book_id))
	self._sound_client.say(book.getInformation())
	

    def pick_up_from_shelf(self):
        self.saved_r_arm_pose = Milestone1GUI.LOWER_ON_SHELF_R_POS
        self.move_arm('r', 4.0, True)  # Increase these numbers for slower movement
        self.r_gripper.open_gripper(True)
        self.move_base(True)
        self.r_gripper.close_gripper(True)
        self.move_base(False)

    def navigate_to_person(self):
        x = 2.82690143585
        y = -0.416650295258
        z = 0.252587109056
        w = 0.967574158573
        self.navigation.move_to_shelf(x, y, z, w)
    
    def give_book(self):
        self.saved_r_arm_pose = Milestone1GUI.RECEIVE_FROM_HUMAN_R_POS
        self.move_arm('r', 4.0, True)
        self.r_gripper.open_gripper(True)
        
    # Moves forward to the bookshelf (or backward if isForward is false)
    def move_base(self, isForward):
        topic_name = '/base_controller/command'
        base_publisher = rospy.Publisher(topic_name, Twist)

        distance = 0.25
        if not isForward:
            distance *= -1

        twist_msg = Twist()
        twist_msg.linear = Vector3(distance, 0.0, 0.0)
        twist_msg.angular = Vector3(0.0, 0.0, 0.0)

        for x in range(0, 15):
            rospy.loginfo("Moving the base")
            base_publisher.publish(twist_msg)
            time.sleep(0.15)
        time.sleep(1.5)

    # Moves arms using kinematics
    def move_arm(self, side_prefix, time_to_joints = 2.0, wait = False):
        # forward kinematics
        if (side_prefix == 'r'):
            if self.saved_r_arm_pose is None:
                rospy.logerr('Target pose for right arm is None, cannot move.')
            else:
                self.freeze_arm(side_prefix)
                self.move_to_joints(side_prefix, self.saved_r_arm_pose, time_to_joints, wait)
        else: # side_prefix == 'l'
            if self.saved_l_arm_pose is None:
                rospy.logerr('Target pose for left arm is None, cannot move.')
            else:
                self.freeze_arm(side_prefix)
                self.move_to_joints(side_prefix, self.saved_l_arm_pose, time_to_joints, wait)
                pass

    def freeze_arm(self, side_prefix):
        controller_name = side_prefix + '_arm_controller'
        start_controllers = [controller_name]
        stop_controllers = []
        self.set_arm_mode(start_controllers, stop_controllers)

    def set_arm_mode(self, start_controllers, stop_controllers):
        try:
            self.switch_service_client(start_controllers, stop_controllers, 1)
        except rospy.ServiceException:
            rospy.logerr('Could not change arm mode.')

    def joint_states_cb(self, msg):
        '''Callback function that saves the joint positions when a
            joint_states message is received'''
        self.lock.acquire()
        self.all_joint_names = msg.name
        self.all_joint_poses = msg.position
        self.joint_sig.emit(msg)
        self.lock.release()

    def joint_sig_cb(self, msg):
        pass

    def get_joint_state(self, side_prefix):
        '''Returns position for arm joints on the requested side (r/l)'''
        if side_prefix == 'r':
            joint_names = self.r_joint_names
        else:
            joint_names = self.l_joint_names

        if self.all_joint_names == []:
            rospy.logerr("No robot_state messages received yet!\n")
            return None
    
        positions = []
        self.lock.acquire()
        for joint_name in joint_names:
            if joint_name in self.all_joint_names:
                index = self.all_joint_names.index(joint_name)
                position = self.all_joint_poses[index]
                positions.append(position)
            else:
                rospy.logerr("Joint %s not found!", joint_name)
                self.lock.release()
                return None

        self.lock.release()
        return positions

    def move_to_joints(self, side_prefix, positions, time_to_joint, wait = False):
        '''Moves the arm to the desired joints'''
        velocities = [0] * len(positions)
        traj_goal = JointTrajectoryGoal()
        traj_goal.trajectory.header.stamp = (rospy.Time.now() + rospy.Duration(0.1))
        traj_goal.trajectory.points.append(JointTrajectoryPoint(positions=positions,
                    velocities=velocities, time_from_start=rospy.Duration(time_to_joint)))
       
        if (side_prefix == 'r'):
            traj_goal.trajectory.joint_names = self.r_joint_names
            self.r_traj_action_client.send_goal(traj_goal)
        else:
            traj_goal.trajectory.joint_names = self.l_joint_names
            self.l_traj_action_client.send_goal(traj_goal)
        
        if side_prefix == 'r':
            traj_goal.trajectory.joint_names = self.r_joint_names 
            action_client = self.r_traj_action_client
        else:
            traj_goal.trajectory.joint_names = self.l_joint_names 
            action_client = self.l_traj_action_client
        action_client.send_goal(traj_goal)
        if wait:
            time.sleep(time_to_joint)
コード例 #28
0
ファイル: visionPick.py プロジェクト: samsgood0310/ur5-ros
#Q2 = [-5.773678247128622, -1.7172182242022913, 2.291447162628174, -2.1666935125934046, -1.5579717795001429, -0.2839697043048304]
#Q2 = [-5.80492484966387, -1.7727258841144007, 2.351362943649292, -2.23500901857485, -1.5296443144427698, -0.3075040022479456]
Q2 = [-5.802346740161077, -1.727706257496969, 2.3336830139160156, -2.251301113759176, -1.537558380757467, -0.30245906511415654]
#Q2 = (-5.790848229323522, -1.6641319433795374, 2.2818641662597656, -2.133308235798971, -1.57269794145693, -0.289194409047262)
#Q2 = [-5.807982299719946, -1.7263277212726038, 2.412261724472046, -2.3110459486590784, -1.5310810248004358, -0.2967436949359339]

Q3 = [i-j for i, j in zip(Q1, Q2)]

Q5 = [-5.233350698147909, -2.263735596333639, 2.0414814949035645, -1.8042591253863733, -1.5754278341876429, -1.077151123677389]
Q6 = [-3.9884613196002405, -1.7695258299456995, 1.7130355834960938, -1.5220916906939905, -1.5590489546405237, -1.6418374220477503]

# moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('move_group_python_interface_tutorial',
                 anonymous=True)

grip = Gripper()
grip.open()
# robot = moveit_commander.RobotCommander()
# scene = moveit_commander.PlanningSceneInterface()
# group_name = "manipulator"
# group = moveit_commander.MoveGroupCommander(group_name)
# display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
#                                                moveit_msgs.msg.DisplayTrajectory,
#                                                queue_size=20)

JOINT_NAMES = ['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint',
               'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']

client = actionlib.SimpleActionClient('follow_joint_trajectory', FollowJointTrajectoryAction)
client.wait_for_server()
コード例 #29
0
from robot_utils import UR5
import _thread
import time
import json
from gripper import Gripper
import numpy as np

gripper = Gripper(2, 1)

robot = UR5(robot_ip="192.168.1.6")

freedrive = False
thread_flag = False


def wait_for_enter():
    print("Starting wait?")
    _thread.start_new_thread(wait_for_enter_thread, ())
    return


def wait_for_enter_thread():
    global gripper, thread_flag
    print("Started wait for enter...")
    thread_flag = True
    while True:
        c = input()
        if c == "t":
            gripper.toggle()
        else:
            break
コード例 #30
0
ファイル: gripperberry.py プロジェクト: IzzyBrand/dro.ne
class Drone:
    _COMMAND_SET_MISSION = 'updatezone'
    _COMMAND_START = 'start'
    _COMMAND_TAKEOFF = 'takeoff'
    _COMMAND_LAND = 'land'
    _COMMAND_PAUSE = 'pause'
    _COMMAND_RTL = 'rtl'
    _COMMAND_ARM = 'arm'
    _COMMAND_DISARM = 'disarm'
    _COMMAND_OPEN = 'gripperOpen'
    _COMMAND_CLOSE = 'gripperClose'
    _COMMAND_MOTORS_ON = 'motorsOn'
    _COMMAND_MOTORS_OFF = 'motorsOff'

    def start(self):
        self.server = ServerInterface()
        # self.pixhawk = connect('/dev/cu.usbmodem1', baud = 115200, wait_ready=True) 	# for on mac via USB
        # self.pixhawk = connect('/dev/ttyS0', baud = 57600, wait_ready=True) 			# for on the raspberry PI via telem2
        # # self.pixhawk = connect('/dev/tty.usbserial-DA00BL49', baud = 57600)			# telem radio on mac
        # # self.pixhawk = connect('/dev/tty.SLAB_USBtoUART', baud = 57600)				# telem radio on mac
        # self.pixhawk.wait_ready(timeout=60)
        # self.pixhawk.commands.download()
        #self._log('Connected to pixhawk.')
        #self._prev_pixhawk_mode = ''
        self._prev_command = ''
        #self._arming_window_start = 0
        self._server_connect_timer = time.time()
        #self.current_action = 'idle'
        config_loaded = self._load_config()  # load info about the uid and auth
        online = True  # TODO: verify internet connection
        self.gripper = Gripper(18)  # set up the gripper
        self.button = Button(2)  # set up the button
        self.button.when_pressed = self.gripper.open
        self.button.when_released = self.gripper.close

        return config_loaded and online

    def stop(self):
        #self.pixhawk.close()
        self.server.disconnect()
        self._log('Stopped.')

    #################################################################################
    # STEP
    #################################################################################
    def step(self):
        # UPDATE PIXHAWK INFORMATION
        #self._read_from_pixhawk()
        #self.print_mode_change()
        # if self.state['voltage'] < voltage_emergency_threshold:
        # RTL

        # SEND STATE UPDATE
        #if self.pixhawk.armed:
        #	if self.pixhawk.channels.overrides['3'] == 1500:
        #		self.current_action = 'throttled'
        #	else: self.current_action = 'armed'
        #else: self.current_action = 'disarmed'
        #self.server.post(self.state)

        # REQUEST COMMAND
        received_command = self.server.get_command()
        # Disarm if we haven't received a new command in more than 30 seconds
        if received_command != None: self._server_connect_timer = time.time()
        #elif time.time() - self._server_connect_timer > 30: self.pixhawk.armed = False

        # ACT ON NEW COMMAND COMMAND
        if received_command != self._prev_command:
            self._log('received command - ' + str(received_command))
            #if received_command == self._COMMAND_ARM:
            #	self.pixhawk.mode = VehicleMode('STABILIZE')
            #	self.pixhawk.channels.overrides['3'] = 1000
            #	self.pixhawk.armed = True
            #elif received_command == self._COMMAND_DISARM:
            #	self.pixhawk.armed = False
            if received_command == self._COMMAND_OPEN:
                self.gripper.open()
            elif received_command == self._COMMAND_CLOSE:
                self.gripper.close()
            #elif received_command == self._COMMAND_MOTORS_ON:
            #	self.pixhawk.channels.overrides['3'] = 1500
            #elif received_command == self._COMMAND_MOTORS_OFF:
            #	self.pixhawk.channels.overrides['3'] = 1000

            self._prev_command = received_command

        # # ACT ON ONGOING ACTION
        # # the self.current_action allows us to have ongoing instructions
        # if self.current_action == 'prearm':
        # 	self.pixhawk.mode = VehicleMode('GUIDED') # we arm and takeoff in guided mode
        # 	try:
        # 		self.pixhawk.commands.wait_ready() # we can't fly until we have commands list
        # 		self._arming_window_start = time.time()
        # 		self.set_action('wait_arm')
        # 	except APIException:
        # 		print 'commands still downloading.'
        # 		self.set_action('idle')

        # elif self.current_action == 'wait_arm' and time.time() - self._arming_window_start > 60:
        # 	self._log('TIMEOUT - revert to idle')
        # 	self.set_action('idle')

        # elif self.current_action == 'arm':
        # 	self.pixhawk.armed = True
        # 	self._log('arm')
        # 	self.set_action('takeoff')

        # elif self.current_action == 'takeoff' \
        # 	and self.pixhawk.armed: # we need to verify that the pixhawk is armed
        # 	self.pixhawk.simple_takeoff(20)
        # 	self.set_action('mission_start')

        # elif self.current_action == 'mission_start':
        # 	self.pixhawk.commands.next = 0	# start from the first waypoint
        # 	self.pixhawk.mode = VehicleMode('AUTO')
        # 	self.set_action('flying')

        # elif self.current_action == 'flying':
        # 	next_cmd = self.pixhawk.commands.next
        # 	print next_cmd, self.pixhawk.commands[next_cmd].command
        # 	# TODO - Determine if this should be mavutil.mavlink.MAV_CMD_NAV_LAND instead?
        # 	if self.pixhawk.commands[next_cmd].command == mavutil.mavlink.MAV_CMD_NAV_LOITER_UNLIM:
        # 		# we've reached the loiter waypoint
        # 		self.set_action('wait_land')

        # elif self.current_action == 'landing' and not self.pixhawk.armed:
        # 	# we were landing and now we're disarmed, so we must have landed
        # 	# self.gripper.open()
        # 	self.set_action('idle')

        # elif self.current_action == 'disarm':
        # 	self._log('disarm')
        # 	self.pixhawk.armed = False
        # 	if not self.pixhawk.armed: self.set_action('idle')
        # 	else:	self._log('DISARM FAILED')

        # elif self.current_action == 'pause':
        # 	self.pixhawk.mode = VehicleMode('LOITER')
        # 	self.set_action('loiter')

        # elif self.current_action == 'start_rtl':
        # 	self.pixhawk.mode = VehicleMode('RTL')
        # 	self.set_action('rtl')

        # elif (self.current_action == 'rtl' or self.current_action == 'loiter') \
        # and not self.pixhawk.armed:
        # 	self._log('Detected disarm while ' + self.current_action + '. Changing to idle.')
        # 	self.set_action('idle')

    ################################################################################
    # UTIL
    #################################################################################
    # load configuration from file
    def _load_config(self, file='droneconfig.json'):
        # TODO: error check all of this
        config_json = json.loads(open(file).read())
        self.server.config(config_json['uid'], config_json['auth'],
                           config_json['api_url'], config_json['name'])
        self.status = config_json['startup_status']
        self.wp_path = config_json['wp_path']
        self._log('Successfully loaded config from ' + file)
        return True

    # logging abstracted so we can change where we are logging
    def _log(self, msg):
        print "[DEBUG]: {0}".format(msg)

    #################################################################################
    # PIXHAWK CONNECTIVITY
    #################################################################################
    def _read_from_pixhawk(self):
        # TODO: add timeout on this?
        self.state = {
            "status": self.current_action,
            "timestamp": str(datetime.datetime.now()),
            "latitude": self.pixhawk.location.global_relative_frame.lat,
            "longitude": self.pixhawk.location.global_relative_frame.lon,
            "altitude": self.pixhawk.location.global_relative_frame.alt,
            "voltage": self.pixhawk.battery.voltage,
            "speed": self.pixhawk.groundspeed,
            "rssi": self.pixhawk.
            last_heartbeat  # use the time since the last heartbeat becase we don't have internet connectivity
        }
        return self.state

    def print_mode_change(self):
        if self.pixhawk.mode.name != self._prev_pixhawk_mode:
            print 'MODE CHANGED TO', self.pixhawk.mode.name
            self._prev_pixhawk_mode = self.pixhawk.mode.name
コード例 #31
0
ファイル: demo.py プロジェクト: ryanhoque/cloth_simulation
def move(c, args):
    """If you want to let the cloth settle, just run `c.update()` beforehand.

    Careful, changing width/height will add more points but not make it stable;
    the cloth 'collapses' ... need to investigate code?

    For tensioning, wherever tension it by default has z-coordinate of 0,
    because we assume a tool has pinched it at that point.
    """
    grip = Gripper(cloth=c)
    start_t = time.time()

    # Will put this in a separate class soon. Need a 'pin' and then we can pull.
    circlex = 300
    circley = 300
    c.pin_position(circlex, circley)
    tensioner = c.tensioners[0]

    if args.viz_tool == 'matplotlib':
        if not args.norender:
            # Use `plt.ion()` for interactive plots, requires `plt.pause(...)` later.
            nrows, ncols = 1, 2
            fig = plt.figure(figsize=(12 * ncols, 12 * nrows))
            ax1 = fig.add_subplot(1, 2, 1)
            ax2 = fig.add_subplot(1, 2, 2, projection='3d')
            plt.ion()
            plt.tight_layout()
            cid = fig.canvas.mpl_connect('button_press_event', mouse.clicked)
            rid = fig.canvas.mpl_connect('button_release_event',
                                         mouse.released)
            mid = fig.canvas.mpl_connect('motion_notify_event', mouse.moved)

        for i in range(args.num_sim_iters):
            if i % 10 == 0:
                elapsed_time = (time.time() - start_t) / 60.0
                print("Iteration {}, minutes: {:.1f}".format(i, elapsed_time))
                z_vals = [p.z for p in c.shapepts]
                print("  average z: {}".format(np.mean(z_vals)))
                print("  median z:  {}".format(np.median(z_vals)))
            if not args.norender:
                ax1.cla()
                ax2.cla()
            pull(i, tensioner)
            # ----------------------------------------------------------------------
            # Re-insert the points, with appropriate colors. 2D AND 3D together.
            # ----------------------------------------------------------------------
            if not args.norender:
                pts = np.array([[p.x, p.y, p.z] for p in c.normalpts])
                cpts = np.array([[p.x, p.y, p.z] for p in c.shapepts])
                if len(pts) > 0:
                    ax1.scatter(pts[:, 0], pts[:, 1], c='g')
                    ax2.scatter(pts[:, 0], pts[:, 1], pts[:, 2], c='g')
                if len(cpts) > 0:
                    ax1.scatter(cpts[:, 0], cpts[:, 1], c='b')
                    ax2.scatter(cpts[:, 0], cpts[:, 1], cpts[:, 2], c='b')
                ax2.set_zlim([0, 300])  # only for visualization purposes
                plt.pause(0.001)
            # ----------------------------------------------------------------------
            # Updates (+5 extra) to allow cloth to respond to environment. Think of
            # it as like a 'frame skip' parameter.
            for _ in range(args.updates_per_move):
                c.simulate()

        if not args.norender:
            fig.canvas.mpl_disconnect(cid)
            fig.canvas.mpl_disconnect(mid)
            fig.canvas.mpl_disconnect(rid)

    elif args.viz_tool == 'pyopengl':
        # Note: 1/60 ~ 0.016 so we might as well try this way ...
        target_fps = 60
        clock = pygame.time.Clock()
        dt = 1.0 / float(target_fps)

        # Sequence of update then draw commands
        for i in range(args.num_sim_iters):
            if not get_input(): break

            if i % 10 == 0:
                elapsed_time = (time.time() - start_t) / 60.0
                print("Iteration {}, minutes: {:.1f}".format(i, elapsed_time))
            pull(i, tensioner)
            for _ in range(args.updates_per_move):
                c.simulate()

            # Draw
            draw(c)
            clock.tick(target_fps)
        pygame.quit()

    elapsed_time = (time.time() - start_t) / 60.0
    print("Total time, minutes: {:.2f}".format(elapsed_time))
コード例 #32
0
ファイル: milestone1.py プロジェクト: vjampala/cse481
    def __init__(self, context):
        super(Milestone1GUI, self).__init__(context)
        self._sound_client = SoundClient()
        rospy.Subscriber('robotsound', SoundRequest, self.sound_cb)
        switch_srv_name = 'pr2_controller_manager/switch_controller'
        rospy.loginfo('Waiting for switch controller service...')
        rospy.wait_for_service(switch_srv_name)
        self.switch_service_client = rospy.ServiceProxy(switch_srv_name,
                                                 SwitchController)

        self.r_joint_names = ['r_shoulder_pan_joint',
                              'r_shoulder_lift_joint',
                              'r_upper_arm_roll_joint',
                              'r_elbow_flex_joint',
                              'r_forearm_roll_joint',
                              'r_wrist_flex_joint',
                              'r_wrist_roll_joint']
        self.l_joint_names = ['l_shoulder_pan_joint',
                              'l_shoulder_lift_joint',
                              'l_upper_arm_roll_joint',
                              'l_elbow_flex_joint',
                              'l_forearm_roll_joint',
                              'l_wrist_flex_joint',
                              'l_wrist_roll_joint']

        # Create a trajectory action client
        r_traj_controller_name = '/r_arm_controller/joint_trajectory_action'
        self.r_traj_action_client = SimpleActionClient(r_traj_controller_name,
                JointTrajectoryAction)
        rospy.loginfo('Waiting for a response from the trajectory action server for RIGHT arm...')
        self.r_traj_action_client.wait_for_server()
        
        l_traj_controller_name = '/l_arm_controller/joint_trajectory_action'
        self.l_traj_action_client = SimpleActionClient(l_traj_controller_name,
                JointTrajectoryAction)
        rospy.loginfo('Waiting for a response from the trajectory action server for LEFT arm...')
        self.l_traj_action_client.wait_for_server()

        #init torso
        self.torso = Torso()

        #init gripper
        self.l_gripper = Gripper('l')
        self.r_gripper = Gripper('r')

        #init navigation
        self.navigation = Navigation()

        self._widget = QWidget()
        self._widget.setFixedSize(600, 600)
        QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10))
        
        large_box = QtGui.QVBoxLayout()

        #Button for outreaching to recieve book from Human
        button_box = QtGui.QVBoxLayout()
        box_1 = QtGui.QHBoxLayout()
        box_2 = QtGui.QHBoxLayout()
        box_3 = QtGui.QHBoxLayout()
        box_4 = QtGui.QHBoxLayout()
        box_5 = QtGui.QHBoxLayout()
        box_6 = QtGui.QHBoxLayout()
        box_7 = QtGui.QHBoxLayout()
        box_8 = QtGui.QHBoxLayout()
        box_9 = QtGui.QHBoxLayout()
        box_10 = QtGui.QHBoxLayout()

        box_1.addItem(QtGui.QSpacerItem(15,2))
        box_1.addWidget(self.create_button('Prepare To Take', self.prepare_to_take))
        box_1.addItem(QtGui.QSpacerItem(445,2))

        box_2.addItem(QtGui.QSpacerItem(15,2))
        box_2.addWidget(self.create_button('Take From Human', self.take_from_human))
        box_2.addItem(QtGui.QSpacerItem(445,2))

        box_3.addItem(QtGui.QSpacerItem(15,2))
        box_3.addWidget(self.create_button('Prepare To Navigate', self.prepare_to_navigate))
        box_3.addItem(QtGui.QSpacerItem(445,2))

        # Button to move to shelf
        box_5.addItem(QtGui.QSpacerItem(15,2))
        box_5.addWidget(self.create_button('Navigate To Shelf', self.navigate_to_shelf))
        box_5.addItem(QtGui.QSpacerItem(445,2))

        box_4.addItem(QtGui.QSpacerItem(15,2))
        box_4.addWidget(self.create_button('Place On Shelf', self.place_on_shelf))
        box_4.addItem(QtGui.QSpacerItem(445,2))

        box_6.addItem(QtGui.QSpacerItem(15,2))
        box_6.addWidget(self.create_button('Give Information', self.give_information))
        box_6.addItem(QtGui.QSpacerItem(445,2))

        box_7.addItem(QtGui.QSpacerItem(15,2))
        box_7.addWidget(self.create_button('Navigate To Person', self.navigate_to_person))
        box_7.addItem(QtGui.QSpacerItem(445,2))

        self.book_textbox = QtGui.QLineEdit()
        self.book_textbox.setFixedWidth(100)

        box_8.addItem(QtGui.QSpacerItem(15,2))
        box_8.addWidget(self.book_textbox)
        box_8.addWidget(self.create_button('Pick Up Book', self.pick_up_from_shelf_button))
        box_8.addItem(QtGui.QSpacerItem(445,2))
        
        box_9.addItem(QtGui.QSpacerItem(15,2))
        box_9.addWidget(self.create_button('Localize', self.spin_base))
        box_9.addItem(QtGui.QSpacerItem(445,2))
       
        box_10.addItem(QtGui.QSpacerItem(15,2))
        box_10.addWidget(self.create_button('Non-nav Pick Up', self.pick_up_from_shelf))
        box_10.addItem(QtGui.QSpacerItem(445,2))

        button_box.addItem(QtGui.QSpacerItem(20,120))
        button_box.addLayout(box_1)
        button_box.addLayout(box_2)
        button_box.addLayout(box_3)
        button_box.addLayout(box_5)
        button_box.addLayout(box_4)
        button_box.addLayout(box_6)
        button_box.addLayout(box_7)
        button_box.addLayout(box_8)
        button_box.addLayout(box_9)
        button_box.addLayout(box_10)
        button_box.addItem(QtGui.QSpacerItem(20,240))
        large_box.addLayout(button_box)
        self.marker_perception = ReadMarkers()
        self.speech_recognition = SpeechRecognition(self)
        self.bookDB = BookDB()
        self.book_map = self.bookDB.getAllBookCodes()
        self._widget.setObjectName('Milestone1GUI')
        self._widget.setLayout(large_box)
        context.add_widget(self._widget)
        self._widget.setStyleSheet("QWidget { image: url(%s) }" %
                (str(os.path.dirname(os.path.realpath(__file__))) +
                "/../../librarian_gui_background.jpg"))
コード例 #33
0
import rospy
import time

from robot_arm import RobotArm
from ROSInterface import ROSInterface
from gripper import Gripper
from headController import FetchHeadController
from gripInterface import GripInterface
from originInterface import OriginInterface

if __name__ == "__main__":
    # Setup clients
    Ros = ROSInterface()
    Arm = RobotArm()
    GripRos = GripInterface()
    GripHand = Gripper()
    HeadTilt = FetchHeadController()
    OriginROS = OriginInterface()
    HeadTilt.look_at(0.7, 0, 0.7, "base_link")
    print("Yes I looked")

    while 1:
        try:
            Ros.Subscriber()
            GripRos.GripSubscriber()
            break
        except:
            rospy.loginfo('waiting for matlab')

    while not rospy.is_shutdown():
        Ros.PPublisher()
コード例 #34
0
ファイル: simple_gui.py プロジェクト: vjampala/cse481
    def __init__(self, context):
        super(SimpleGUI, self).__init__(context)
        self.setObjectName('SimpleGUI')
        self._widget = QWidget()
        self._widget.setFixedSize(600, 600)
        self._sound_client = SoundClient()
        rospy.Subscriber('robotsound', SoundRequest, self.sound_cb)

        QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10))
        self.sound_sig.connect(self.sound_sig_cb)
        
        large_box = QtGui.QVBoxLayout()

        # Textbox to enter words for PR2 to say 
        sound_textbox = QtGui.QLineEdit("Squirtle Squirtle")  # Default Text
        sound_textbox.setFixedWidth(450)
        self.marker_publisher = rospy.Publisher('visualization_marker', Marker)
        
        # Set a handler on the textbox to retrieve the text when button clicked
        self.sound_textbox = sound_textbox

        button_box = QtGui.QHBoxLayout()
        button_box.addItem(QtGui.QSpacerItem(15,20))
        button_box.addWidget(self.create_button('Speak', self.command_cb))
        button_box.addWidget(sound_textbox)
        button_box.addStretch(1)
        large_box.addLayout(button_box)

        speech_box = QtGui.QHBoxLayout()
        speech_box.addItem(QtGui.QSpacerItem(15, 20))
        self.speech_label = QtGui.QLabel('Robot has not spoken yet')
        palette = QtGui.QPalette()
        palette.setColor(QtGui.QPalette.Foreground,QtCore.Qt.blue)
        self.speech_label.setPalette(palette)
        speech_box.addWidget(self.speech_label)

        large_box.addLayout(speech_box)
        large_box.addStretch(1)

        #large_box.addItem(QtGui.QSpacerItem(50,20))

        # Buttons to move the PR2's head
        up_head = Head(Head.UP, self)
        head_box = QtGui.QVBoxLayout()
        up_head_box = QtGui.QHBoxLayout()
        up_head_button = self.create_button('^', up_head.create_closure())
        down_head = Head(Head.DOWN, self)
        down_head_box = QtGui.QHBoxLayout()
        down_head_button = self.create_button('v', down_head.create_closure())
        right_head = Head(Head.RIGHT, self)
        right_head_button = self.create_button('>', right_head.create_closure())
        left_head = Head(Head.LEFT, self)
        left_head_button = self.create_button('<', left_head.create_closure())
        left_right_head_box = QtGui.QHBoxLayout()

        up_head_box.addItem(QtGui.QSpacerItem(235,20))
        up_head_box.addWidget(up_head_button)
        up_head_box.addItem(QtGui.QSpacerItem(275,20))
        left_right_head_box.addItem(QtGui.QSpacerItem(160,20))
        left_right_head_box.addWidget(left_head_button)
        left_right_head_box.addItem(QtGui.QSpacerItem(60,20))
        left_right_head_box.addWidget(right_head_button)
        left_right_head_box.addItem(QtGui.QSpacerItem(225,20))
        down_head_box.addItem(QtGui.QSpacerItem(235,20))
        down_head_box.addWidget(down_head_button)
        down_head_box.addItem(QtGui.QSpacerItem(275,20))
        head_box.addLayout(up_head_box)
        head_box.addLayout(left_right_head_box)
        head_box.addLayout(down_head_box)
        large_box.addLayout(head_box)

        # Buttons to move the grippers
        gripper = Gripper(Gripper.RIGHT, Gripper.OPEN, self)
        right_gripper = self.create_button('Right gripper',
                gripper.create_closure())
        gripper = Gripper(Gripper.LEFT, Gripper.OPEN, self)
        left_gripper = self.create_button('Left gripper', gripper.create_closure()) 
        large_box.addItem(QtGui.QSpacerItem(100,250))
        gripper_box = QtGui.QHBoxLayout()
        gripper_box.addItem(QtGui.QSpacerItem(75,20))
        gripper_box.addWidget(left_gripper)
        gripper_box.addItem(QtGui.QSpacerItem(450,20))
        gripper_box.addWidget(right_gripper)
        gripper_box.addItem(QtGui.QSpacerItem(75,20))
        large_box.addLayout(gripper_box)

        # Buttons to move the base
        base_box = QtGui.QVBoxLayout()
        large_box.addItem(QtGui.QSpacerItem(100,100))
        forward_base_box = QtGui.QHBoxLayout()
        forward_base = Base(Base.FORWARD, self)
        forward_base_button = self.create_button('move forward', forward_base.create_closure())
        forward_base_box.addItem(QtGui.QSpacerItem(400,20))
        forward_base_box.addWidget(forward_base_button)
        forward_base_box.addItem(QtGui.QSpacerItem(400,20))
        base_box.addLayout(forward_base_box)

        left_right_base_box = QtGui.QHBoxLayout()
        left_base= Base(Base.LEFT, self)
        left_base_button = self.create_button('move left',
                left_base.create_closure())

        right_base= Base(Base.RIGHT, self)
        right_base_button= self.create_button('move right',
                right_base.create_closure())
        left_right_base_box.addItem(QtGui.QSpacerItem(300,20))
        left_right_base_box.addWidget(left_base_button)
        left_right_base_box.addItem(QtGui.QSpacerItem(50,20))
        left_right_base_box.addWidget(right_base_button)
        left_right_base_box.addItem(QtGui.QSpacerItem(300,20))
        base_box.addLayout(left_right_base_box)

        backward_base_box = QtGui.QHBoxLayout()
        backward_base= Base(Base.BACKWARD, self)
        backward_base_button = self.create_button('move backward',
                backward_base.create_closure())
        backward_base_box.addItem(QtGui.QSpacerItem(400,20))
        backward_base_box.addWidget(backward_base_button)
        backward_base_box.addItem(QtGui.QSpacerItem(400,20))
        base_box.addLayout(backward_base_box)

        large_box.addLayout(base_box)
        
        turn_base_box = QtGui.QHBoxLayout()

        counter_base= Base(Base.COUNTER, self)
        counter_base_button = self.create_button('\\\n        -->',
                counter_base.create_closure())
        
        clockwise_base= Base(Base.CLOCKWISE, self)
        clockwise_base_button = self.create_button('        /\n<--',
                clockwise_base.create_closure())
        turn_base_box.addItem(QtGui.QSpacerItem(75,20))
        turn_base_box.addWidget(counter_base_button)
        turn_base_box.addItem(QtGui.QSpacerItem(225,20))
        turn_base_box.addWidget(clockwise_base_button)
        turn_base_box.addItem(QtGui.QSpacerItem(100,20))
        large_box.addLayout(turn_base_box)

        self._widget.setObjectName('SimpleGUI')
        self._widget.setLayout(large_box)
        context.add_widget(self._widget)
        self._widget.setStyleSheet("QWidget { image: url(%s) }" %
                (str(os.path.dirname(os.path.realpath(__file__))) +
                "/../../rosie_background.jpg"))
コード例 #35
0
    def animate(i):
        global ax12, plt

        ax12_lock.acquire()
        loads.append(ax12.read_load(1))
        ax12_lock.release()
        fig.clear()
        plt.plot(loads)

    ani = animation.FuncAnimation(fig, animate, interval=100)
    plt.show()


ttyPort = "/dev/serial/by-id/usb-FTDI_USB__-__Serial_Converter_FT3M4HIL-if00-port0"

gripper = Gripper(2, 1)

print("Test:", gripper.is_connected())

gripper.activate()

toggle = False

while True:
    command = input("Press enter to toggle or enter a command:")
    toggle = not toggle

    if command == "load":
        test = Process(target=load_plotting)
        test.start()
    else: