class GripperNode: def __init__(self, service='/ur_driver/set_io'): self.gripper = Gripper(service) rospy.init_node('gripper_node', anonymous=True) self.subscriber = rospy.Subscriber('/gripper/command', String, self.callback, queue_size=1) self.time_wait = 0 print('Created gripper_node') rospy.spin() def callback(self, data): if data.data == 'open': self.gripper.open() elif data.data == 'close': self.gripper.close() elif data.data == 'softGrip': self.gripper.set_soft_grip() elif data.data == 'strongGrip': self.gripper.set_strong_grip()
class Robot_Skeleton(object): """Basic bare-bones solution for the Fetch robot interface. We recommend extending this class with additional convenience methods based on your application needs. """ def __init__(self, simulation=True): """Initializes various aspects of the Fetch.""" rospy.init_node("fetch") rospy.loginfo("initializing the Fetch...") self.arm = Arm() self.arm_joints = ArmJoints() self.base = Base() self.camera = RGBD() self.head = Head() self.gripper = Gripper(self.camera) self.torso = Torso() self.joint_reader = JointStateReader() # Tucked arm starting joint angle configuration self.names = ArmJoints().names() self.tucked = [1.3200, 1.3999, -0.1998, 1.7199, 0.0, 1.6600, 0.0] self.tucked_list = [(x,y) for (x,y) in zip(self.names, self.tucked)] # Initial (x,y,yaw) position of the robot wrt map origin. We keep this # fixed so that we can reset to this position as needed. The HSR's # `omni_base.pose` (i.e., the start pose) returns (x,y,yaw) where yaw is # the rotation about that axis (intuitively, the z axis). For the base, # `base.odom` supplies both `position` and `orientation` attributes. start = copy.deepcopy(self.base.odom.position) yaw = Base._yaw_from_quaternion(self.base.odom.orientation) self.start_pose = np.array([start.x, start.y, yaw]) rospy.loginfo("...finished initialization!") def body_start_pose(self, start_height=0.10, end_height=0.10, velocity_factor=0.5): """Sets the robot's body to some initial configuration. Tucks the arm using motion planning. NEVER directly set joints as that often leads to collisions. Args: start_height: Height in meters for Fetch before arm-tuck. end_height: Height in meters for Fetch after arm-tuck. velocity_factor: controls the speed, closer to 0 means slower, closer to 1 means faster. (If 0.0, then it turns into 1.0 for some reason.) Values greater than 1.0 are cut to 1.0. """ self.torso.set_height(start_height) self.arm.move_to_joint_goal(self.tucked_list, velocity_factor=velocity_factor) self.torso.set_height(end_height) def head_start_pose(self, pan=0.0, tilt=0.0): """Sets the robot's head to some initial configuration. Args: pan: Value in radians for head sideways rotation, counterclockwise when looking at robot from an aerial view. tilt: Value in radians for head up/down movement, positive means looking downwards. """ self.head.pan_tilt(pan=pan, tilt=tilt) def get_img_data(self): """Obtain camera and depth image. Returns: Tuple containing RGB camera image and corresponding depth image. """ c_img = self.camera.read_color_data() d_img = self.camera.read_depth_data() return (c_img, d_img) def create_grasp_pose(self, x, y, z, rot_x, rot_y, rot_z): """Creates a pose in the world for the robot's end-effect to go to. Args: x, y, z, rot_x, rot_y, rot_z: A 6-D pose description. """ pose_name = self.gripper.create_grasp_pose_intuitive( x, y, z, rot_x, rot_y, rot_z) return pose_name def move_to_pose(self, pose_name, velocity_factor=None): """Moves to a pose. In the HSR, moved the `hand_palm_link` to the frame named `pose_name` at the correct pose. For the Fetch we should be able to extract the pose from `pose_name` and then call the Arm's `move_to_pose` method. Args: pose_name: A string name for the pose to go velocity_factor: controls the speed, closer to 0 means slower, closer to 1 means faster. (If 0.0, then it turns into 1.0 for some reason.) Values greater than 1.0 are cut to 1.0. """ # See: # http://wiki.ros.org/tf/Tutorials/Writing%20a%20tf%20listener%20%28Python%29 # https://answers.ros.org/question/256354/does-tftransformlistenerlookuptransform-return-quaternion-position-or-translation-and-rotation/ # First frame should be the reference frame, use `base_link`, not `odom`. point, quat = self.gripper.tl.lookupTransform('base_link', pose_name, rospy.Time(0)) # See: # https://github.com/cse481wi18/cse481wi18/blob/indigo-devel/applications/scripts/cart_arm_demo.py # https://github.com/cse481wi18/cse481wi18/wiki/Lab-19%3A-Cartesian-space-manipulation ps = PoseStamped() ps.header.frame_id = 'base_link' ps.pose = Pose( Point(point[0], point[1], point[2]), Quaternion(quat[0], quat[1], quat[2], quat[3]) ) # See `arm.py` written by Justin Huang error = self.arm.move_to_pose(pose_stamped=ps, velocity_factor=velocity_factor) if error is not None: rospy.logerr(error) def open_gripper(self): self.gripper.open() def close_gripper(self, width=0.0, max_effort=100): self.gripper.close(width=width, max_effort=max_effort)
class Drone: _COMMAND_SET_MISSION = 'updatezone' _COMMAND_START = 'start' _COMMAND_TAKEOFF = 'takeoff' _COMMAND_LAND = 'land' _COMMAND_PAUSE = 'pause' _COMMAND_RTL = 'rtl' _COMMAND_ARM = 'arm' _COMMAND_DISARM = 'disarm' _COMMAND_OPEN = 'gripperOpen' _COMMAND_CLOSE = 'gripperClose' _COMMAND_MOTORS_ON = 'motorsOn' _COMMAND_MOTORS_OFF = 'motorsOff' def start(self): self.server = ServerInterface() # self.pixhawk = connect('/dev/cu.usbmodem1', baud = 115200, wait_ready=True) # for on mac via USB # self.pixhawk = connect('/dev/ttyS0', baud = 57600, wait_ready=True) # for on the raspberry PI via telem2 # # self.pixhawk = connect('/dev/tty.usbserial-DA00BL49', baud = 57600) # telem radio on mac # # self.pixhawk = connect('/dev/tty.SLAB_USBtoUART', baud = 57600) # telem radio on mac # self.pixhawk.wait_ready(timeout=60) # self.pixhawk.commands.download() #self._log('Connected to pixhawk.') #self._prev_pixhawk_mode = '' self._prev_command = '' #self._arming_window_start = 0 self._server_connect_timer = time.time() #self.current_action = 'idle' config_loaded = self._load_config() # load info about the uid and auth online = True # TODO: verify internet connection self.gripper = Gripper(18) # set up the gripper self.button = Button(2) # set up the button self.button.when_pressed = self.gripper.open self.button.when_released = self.gripper.close return config_loaded and online def stop(self): #self.pixhawk.close() self.server.disconnect() self._log('Stopped.') ################################################################################# # STEP ################################################################################# def step(self): # UPDATE PIXHAWK INFORMATION #self._read_from_pixhawk() #self.print_mode_change() # if self.state['voltage'] < voltage_emergency_threshold: # RTL # SEND STATE UPDATE #if self.pixhawk.armed: # if self.pixhawk.channels.overrides['3'] == 1500: # self.current_action = 'throttled' # else: self.current_action = 'armed' #else: self.current_action = 'disarmed' #self.server.post(self.state) # REQUEST COMMAND received_command = self.server.get_command() # Disarm if we haven't received a new command in more than 30 seconds if received_command != None: self._server_connect_timer = time.time() #elif time.time() - self._server_connect_timer > 30: self.pixhawk.armed = False # ACT ON NEW COMMAND COMMAND if received_command != self._prev_command: self._log('received command - ' + str(received_command)) #if received_command == self._COMMAND_ARM: # self.pixhawk.mode = VehicleMode('STABILIZE') # self.pixhawk.channels.overrides['3'] = 1000 # self.pixhawk.armed = True #elif received_command == self._COMMAND_DISARM: # self.pixhawk.armed = False if received_command == self._COMMAND_OPEN: self.gripper.open() elif received_command == self._COMMAND_CLOSE: self.gripper.close() #elif received_command == self._COMMAND_MOTORS_ON: # self.pixhawk.channels.overrides['3'] = 1500 #elif received_command == self._COMMAND_MOTORS_OFF: # self.pixhawk.channels.overrides['3'] = 1000 self._prev_command = received_command # # ACT ON ONGOING ACTION # # the self.current_action allows us to have ongoing instructions # if self.current_action == 'prearm': # self.pixhawk.mode = VehicleMode('GUIDED') # we arm and takeoff in guided mode # try: # self.pixhawk.commands.wait_ready() # we can't fly until we have commands list # self._arming_window_start = time.time() # self.set_action('wait_arm') # except APIException: # print 'commands still downloading.' # self.set_action('idle') # elif self.current_action == 'wait_arm' and time.time() - self._arming_window_start > 60: # self._log('TIMEOUT - revert to idle') # self.set_action('idle') # elif self.current_action == 'arm': # self.pixhawk.armed = True # self._log('arm') # self.set_action('takeoff') # elif self.current_action == 'takeoff' \ # and self.pixhawk.armed: # we need to verify that the pixhawk is armed # self.pixhawk.simple_takeoff(20) # self.set_action('mission_start') # elif self.current_action == 'mission_start': # self.pixhawk.commands.next = 0 # start from the first waypoint # self.pixhawk.mode = VehicleMode('AUTO') # self.set_action('flying') # elif self.current_action == 'flying': # next_cmd = self.pixhawk.commands.next # print next_cmd, self.pixhawk.commands[next_cmd].command # # TODO - Determine if this should be mavutil.mavlink.MAV_CMD_NAV_LAND instead? # if self.pixhawk.commands[next_cmd].command == mavutil.mavlink.MAV_CMD_NAV_LOITER_UNLIM: # # we've reached the loiter waypoint # self.set_action('wait_land') # elif self.current_action == 'landing' and not self.pixhawk.armed: # # we were landing and now we're disarmed, so we must have landed # # self.gripper.open() # self.set_action('idle') # elif self.current_action == 'disarm': # self._log('disarm') # self.pixhawk.armed = False # if not self.pixhawk.armed: self.set_action('idle') # else: self._log('DISARM FAILED') # elif self.current_action == 'pause': # self.pixhawk.mode = VehicleMode('LOITER') # self.set_action('loiter') # elif self.current_action == 'start_rtl': # self.pixhawk.mode = VehicleMode('RTL') # self.set_action('rtl') # elif (self.current_action == 'rtl' or self.current_action == 'loiter') \ # and not self.pixhawk.armed: # self._log('Detected disarm while ' + self.current_action + '. Changing to idle.') # self.set_action('idle') ################################################################################ # UTIL ################################################################################# # load configuration from file def _load_config(self, file='droneconfig.json'): # TODO: error check all of this config_json = json.loads(open(file).read()) self.server.config(config_json['uid'], config_json['auth'], config_json['api_url'], config_json['name']) self.status = config_json['startup_status'] self.wp_path = config_json['wp_path'] self._log('Successfully loaded config from ' + file) return True # logging abstracted so we can change where we are logging def _log(self, msg): print "[DEBUG]: {0}".format(msg) ################################################################################# # PIXHAWK CONNECTIVITY ################################################################################# def _read_from_pixhawk(self): # TODO: add timeout on this? self.state = { "status": self.current_action, "timestamp": str(datetime.datetime.now()), "latitude": self.pixhawk.location.global_relative_frame.lat, "longitude": self.pixhawk.location.global_relative_frame.lon, "altitude": self.pixhawk.location.global_relative_frame.alt, "voltage": self.pixhawk.battery.voltage, "speed": self.pixhawk.groundspeed, "rssi": self.pixhawk. last_heartbeat # use the time since the last heartbeat becase we don't have internet connectivity } return self.state def print_mode_change(self): if self.pixhawk.mode.name != self._prev_pixhawk_mode: print 'MODE CHANGED TO', self.pixhawk.mode.name self._prev_pixhawk_mode = self.pixhawk.mode.name
def main(): gripper = Gripper() rospy.init_node('close', anonymous=True) gripper.close()
class Robot_Interface(object): """For usage with the Fetch robot.""" def __init__(self, simulation=True): """Initializes various aspects of the Fetch. TODOs: get things working, also use `simulation` flag to change ROS topic names if necessary (especially for the cameras!). UPDATE: actually I don't think this is necessary now, they have the same topic names. """ rospy.init_node("fetch") self.arm = Arm() self.arm_joints = ArmJoints() self.base = Base() self.camera = RGBD() self.head = Head() self.gripper = Gripper(self.camera) self.torso = Torso() self.joint_reader = JointStateReader() # Tucked arm starting joint angle configuration self.names = ArmJoints().names() self.tucked = [1.3200, 1.3999, -0.1998, 1.7199, 0.0, 1.6600, 0.0] self.tucked_list = [(x, y) for (x, y) in zip(self.names, self.tucked)] # Initial (x,y,yaw) position of the robot wrt map origin. We keep this # fixed so that we can reset to this position as needed. The HSR's # `omni_base.pose` (i.e., the start pose) returns (x,y,yaw) where yaw is # the rotation about that axis (intuitively, the z axis). For the base, # `base.odom` supplies both `position` and `orientation` attributes. start = copy.deepcopy(self.base.odom.position) yaw = Base._yaw_from_quaternion(self.base.odom.orientation) self.start_pose = np.array([start.x, start.y, yaw]) self.TURN_SPEED = 0.3 self.num_restarts = 0 def body_start_pose(self, start_height=0.10, end_height=0.10, velocity_factor=None): """Sets the robot's body to some initial configuration. The HSR uses `whole_body.move_to_go()` which initializes an appropriate posture so that the hand doesn't collide with movement. For the Fetch, we should probably make the torso extend, so the arms can extend more easily without collisions. Use `move_to_joint_goal` since that uses motion planning. Do NOT directly set the joints without planning!! """ self.torso.set_height(start_height) self.arm.move_to_joint_goal(self.tucked_list, velocity_factor=velocity_factor) self.torso.set_height(end_height) # Specific to the siemens challenge (actually a lot of this stuff is ...) if self.num_restarts == 0: self.base.turn(angular_distance=45 * DEG_TO_RAD) self.num_restarts += 1 def head_start_pose(self): """Hard-coded starting pose for the robot's head. These values are from the HSR. The Fetch needs a different pan and tilt. Positive pan means rotating counterclockwise when looking at robot from an aerial view. """ self.head.pan_tilt(pan=0.0, tilt=0.8) def position_start_pose(self, offsets=None, do_something=False): """Assigns the robot's base to some starting pose. Mainly to "reset" the robot to the original starting position (and also, rotation about base axis) after it has moved, usually w/no offsets. Ugly workaround: we have target (x,y), and compute the distance to the point and the angle. We turn the Fetch according to that angle, and go forward. Finally, we do a second turn which corresponds to the target yaw at the end. This turns w.r.t. the current angle, so we undo the effect of the first turn. See `examples/test_position_start_pose.py` for tests. Args: offsets: a list of length 3, indicating offsets in the x, y, and yaws, respectively, to be added onto the starting pose. """ # Causing problems during my tests of the Siemens demo. if not do_something: return current_pos = copy.deepcopy(self.base.odom.position) current_theta = Base._yaw_from_quaternion( self.base.odom.orientation) # [-pi, pi] ss = np.array([current_pos.x, current_pos.y, current_theta]) # Absolute target position and orientation specified with `pp`. pp = np.copy(self.start_pose) if offsets: pp += np.array(offsets) # Get distance to travel, critically assumes `pp` is starting position. dist = np.sqrt((ss[0] - pp[0])**2 + (ss[1] - pp[1])**2) rel_x = ss[0] - pp[0] rel_y = ss[1] - pp[1] assert -1 <= rel_x / dist <= 1 assert -1 <= rel_y / dist <= 1 # But we also need to be *facing* the correct direction, w/input [-1,1]. # First, get the opposite view (facing "outwards"), then flip by 180. desired_facing = np.arctan2(rel_y, rel_x) # [-pi, pi], facing outward desired_theta = math.pi + desired_facing # [0, 2*pi], flip by 180 if desired_theta > math.pi: desired_theta -= 2 * math.pi # [-pi, pi] # Reconcile with the current theta. Got this by basically trial/error angle = desired_theta - current_theta # [-2*pi, 2*pi] if angle > math.pi: angle -= 2 * math.pi elif angle < -math.pi: angle += 2 * math.pi self.base.turn(angular_distance=angle, speed=self.TURN_SPEED) self.base.go_forward(distance=dist, speed=0.2) # Back at the start x, y, but now need to consider the _second_ turn. # The robot is facing at `desired_theta` rads, but wants `pp[2]` rads. final_angle = pp[2] - desired_theta if final_angle > math.pi: final_angle -= 2 * math.pi elif final_angle < -math.pi: final_angle += 2 * math.pi self.base.turn(angular_distance=final_angle, speed=self.TURN_SPEED) def get_img_data(self): """Obtain camera and depth image. Returns: Tuple containing RGB camera image and corresponding depth image. """ c_img = self.camera.read_color_data() d_img = self.camera.read_depth_data() return (c_img, d_img) def get_depth(self, point, d_img): """Compute mean depth near grasp point. NOTE: assumes that we have a simlar `cfg.ZRANGE` as with the HSR. I'm not sure where exactly this comes from. """ y, x = int(point[0]), int(point[1]) # z_box = d_img[y-ZRANGE:y+ZRANGE, x-ZRANGE:x+ZRANGE] z_box = d_img[y - 20:y + 20, x - 20:x + 20] indx = np.nonzero(z_box) z = np.mean(z_box[indx]) return z # y, x = int(point[0]), int(point[1]) # # z_box = d_img[y-ZRANGE:y+ZRANGE, x-ZRANGE:x+ZRANGE] # z_box = d_img[y-10:y + 10, x - 10:x + 10] # indx = np.nonzero(z_box) # z = np.mean(z_box[indx]) # return z def get_rot(self, direction): """Compute rotation of gripper such that given vector is grasped. Currently this directly follows the HSR code as there's nothing Fetch-dependent. """ dy, dx = direction[0], direction[1] dx *= -1 if dy < 0: dx *= -1 dy *= -1 rot = np.arctan2(dy, dx) rot = np.pi - rot return rot def create_grasp_pose(self, x, y, z, rot): """ If `intuitive=True` then x,y,z,rot interpreted wrt some link in the world, e.g., 'odom' for the Fetch. It's False by default to maintain backwards compatibility w/Siemens-based code. """ pose_name = self.gripper.create_grasp_pose(x, y, z, rot) return pose_name def open_gripper(self): self.gripper.open() def close_gripper(self): self.gripper.close() def move_to_pose(self, pose_name, z_offset, velocity_factor=None): """Moves to a pose. In the HSR, moved the `hand_palm_link` to the frame named `pose_name` at the correct pose. For the Fetch we should be able to extract the pose from `pose_name` and then call the Arm's `move_to_pose` method. Args: pose_name: A string name for the pose to go z_offset: Scalar offset in z-direction, offset is wrt the pose specified by `pose_name`. velocity_factor: controls the speed, closer to 0 means slower, closer to 1 means faster. (If 0.0, then it turns into 1.0 for some reason.) Values greater than 1.0 are cut to 1.0. """ # See: # http://wiki.ros.org/tf/Tutorials/Writing%20a%20tf%20listener%20%28Python%29 # https://answers.ros.org/question/256354/does-tftransformlistenerlookuptransform-return-quaternion-position-or-translation-and-rotation/ # First frame should be the reference frame, use `base_link`, not `odom`. point, quat = self.gripper.tl.lookupTransform('base_link', pose_name, rospy.Time(0)) z_point = point[2] + z_offset # See: # https://github.com/cse481wi18/cse481wi18/blob/indigo-devel/applications/scripts/cart_arm_demo.py # https://github.com/cse481wi18/cse481wi18/wiki/Lab-19%3A-Cartesian-space-manipulation ps = PoseStamped() ps.header.frame_id = 'base_link' ps.pose = Pose(Point(point[0], point[1], z_point), Quaternion(quat[0], quat[1], quat[2], quat[3])) # See `arm.py` written by Justin Huang error = self.arm.move_to_pose(pose_stamped=ps, velocity_factor=velocity_factor) if error is not None: rospy.logerr(error) def find_ar(self, ar_number, velocity_factor=None): try: ar_name = 'ar_marker/' + str(ar_number) # HSR code, with two hard-coded offsets? #self.whole_body.move_end_effector_pose(geometry.pose(y=0.08, z=-0.3), ar_name) # Fetch 'translation'. Note the `ar_name` for pose name. point, quat = self.gripper.tl.lookupTransform( 'base_link', ar_name, rospy.Time(0)) y_point = point[1] + 0.08 z_point = point[2] - 0.3 ps = PoseStamped() ps.header.frame_id = 'base_link' ps.pose = Pose(Point(point[0], y_point, z_point), Quaternion(quat[0], quat[1], quat[2], quat[3])) error = self.arm.move_to_pose(pose_stamped=ps, velocity_factor=velocity_factor) if error is not None: rospy.logerr(error) return True except: return False def pan_head(self, tilt): """Adjusts tilt of the robot, AND set pan at zero. Args: tilt: Value in radians, positive means looking downwards. """ self.head.pan_tilt(pan=0, tilt=tilt)
g = FollowJointTrajectoryGoal() g.trajectory = JointTrajectory() joint_states = rospy.wait_for_message("joint_states", JointState) joints_pos = joint_states.position g.trajectory.joint_names = JOINT_NAMES g.trajectory.points = [JointTrajectoryPoint(positions=joints_pos, velocities=[0]*6, time_from_start=rospy.Duration(0))] g.trajectory.points.append( JointTrajectoryPoint(positions=Q4, velocities=[0]*6, time_from_start=rospy.Duration(1))) print("sending...") client.send_goal(g) print("waiting for finish") client.wait_for_result() print("finished") grip.close() # action = raw_input("Continue?") # if action != '': # grip.open() # sys.exit(0) g = FollowJointTrajectoryGoal() g.trajectory = JointTrajectory() joint_states = rospy.wait_for_message("joint_states", JointState) joints_pos = joint_states.position g.trajectory.joint_names = JOINT_NAMES g.trajectory.points = [JointTrajectoryPoint(positions=joints_pos, velocities=[0]*6, time_from_start=rospy.Duration(0))] g.trajectory.points.append( JointTrajectoryPoint(positions=Q5, velocities=[0]*6, time_from_start=rospy.Duration(10))) g.trajectory.points.append(