def __init__(self, offset=None):
        super(hsr_PoseDecisionConveni, self).__init__(
            outcomes=['continue', 'plan_failed', 'detect_failed'],
            input_keys=[
                'target_poses', 'target_object_names', 'failed_objects'
            ],
            output_keys=[
                'selected_pose_approach', 'selected_pose_grasp',
                'selected_object_name', 'selected_object_status'
            ])
        self._offset = offset

        self.selected_object_name = None
        self.selected_object_status = 'not_detect'
        self.selected_pose_grasp = None
        self.selected_pose_approach = None
        self._move_group = 'whole_body'
        group = moveit_commander.MoveGroupCommander(self._move_group)

        self.eef_link = group.get_end_effector_link()
        self.planning_frame = group.get_planning_frame()
        self.planning_frame = '/map'
        self._tolerance = 0.01

        self.robot = hsrb_interface.Robot()
        self.current_pose = None

        self._action_topic = '/move_group'
        self._client = actionlib.SimpleActionClient(self._action_topic,
                                                    MoveGroupAction)
        self._client.wait_for_server()
示例#2
0
	def __init__(self,features,user_name = None):
		
		self.com = COM()
		self.robot = hsrb_interface.Robot()

		self.noise = 0.1
		self.features = features#self.com.binary_image
		self.count = 0
	
		if(not user_name == None):
			self.com.Options.setup(self.com.Options.root_dir,user_name)

		

		self.omni_base = self.robot.get('omni_base')
		self.whole_body = self.robot.get('whole_body')
		self.gripper = self.robot.get('gripper')

		self.com.go_to_initial_state(self.whole_body,self.gripper)

		self.joystick = JoyStick_X(self.com)

		self.policy = Policy(self.com,self.features)

		self.com.change_grip(self.gripper)
示例#3
0
 def __init__(self, ref_frame_id):
     super(hsr_HandPoseRecord, self).__init__(outcomes=['completed'],
                                              output_keys=['hand_pose'])
     self.robot = hsrb_interface.Robot()
     self.whole_body = self.robot.get('whole_body')
     self._ref_frame_id = ref_frame_id
     self.pose = Pose()
示例#4
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

        self.cam = RGBD()

        self.wl = Python_Labeler(cam=self.cam)

        self.gp = GraspPlanner()
示例#5
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        #PARAMETERS TO CHANGE
        self.side = 'TOP'

        self.r_count = 0

        self.grasp_count = 0
        self.success_count = 0

        self.true_count = 0
        self.grasp = True

        self.r_count = self.get_rollout_number()

        self.cam = RGBD()
        self.com = COM()

        self.joystick = JoyStick_X(self.com)

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(cam=self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)
        self.position_head()

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        #self.test_current_point()
        time.sleep(4)

        #thread.start_new_thread(self.ql.run,())
        print "after thread"
示例#6
0
 def __init__(self, ref_frame_id):
     super(hsr_XtionPoint, self).__init__(outcomes=['finish'],
                                          input_keys=['xtion_point'])
     self.robot = hsrb_interface.Robot()
     self.whole_body = self.robot.get('whole_body')
     self._ref_frame_id = ref_frame_id
     self.goal = Point()
示例#7
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)
        self.ins = InitialSampler(self.cam)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   self.robot.get('gripper'))

        self.g_detector = Analytic_Grasp()

        self.sn = Success_Net(self.whole_body, self.tt, self.cam,
                              self.omni_base)

        c_img = self.cam.read_color_data()

        #self.test_current_point()
        time.sleep(4)
        #thread.start_new_thread(self.ql.run,())
        print "after thread"
示例#8
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()

        self.suction = Suction(self.gp, self.cam)

        #thread.start_new_thread(self.ql.run,())
        print "after thread"
    def __init__(self):

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.web = Web_Labeler()
        print "after thread"
示例#10
0
    def __init__(self, select="hsr"):

        global omnibase
        global robot
        global whole_body
        self.navigation_setter = select
        self.goal = Float32MultiArray()
        omnibase = 0
        robot = hsrb_interface.Robot()
        omnibase = robot.get("omni_base")
        whole_body = robot.get("whole_body")
        self.pubGlobalGoalXYZ = rospy.Publisher(
            '/navigation/mvn_pln/get_close_xya',
            Float32MultiArray,
            queue_size=1)
        self.pubDistAngle = rospy.Publisher(
            '/navigation/path_planning/simple_move/goal_dist_angle',
            Float32MultiArray,
            queue_size=1)
        self.pubRobotStop = rospy.Publisher('/hardware/robot_state/stop',
                                            Empty,
                                            queue_size=1)
        rospy.Subscriber("/navigation/global_goal_reached", Bool,
                         callback_global_goal_reached)
        rospy.Subscriber("/navigation/goal_reached", Bool,
                         callback_goal_reached)
        rospy.Subscriber("/hardware/robot_state/stop", Empty, callback_stop)
        self.set_navigation_type(select)
示例#11
0
def robot_init:
    robot = hsrb_interface.Robot()
    omni_base = robot.get('omni_base')
    whole_body = robot.get('whole_body')
    gripper = robot.get('gripper')
    tts = robot.get('default', robot.Items.TEXT_TO_SPEECH)
    manip_pos = (5.40, 4.30, 0.0)
示例#12
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        #self.com.go_to_initial_state(self.whole_body)

        self.count = 425

        self.joystick = JoyStick_X(self.com)
        self.true_count = 0
示例#13
0
    def __init__(self, user_name=None, inject_noise=False, noise_scale=1.0):
        self.robot = hsrb_interface.Robot()

        self.noise = 0.1

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.gripper = self.robot.get('gripper')
        self.tl = TransformListener()
        self.cam = RGBD()
        time.sleep(5)
        self.b_d = Bottle_Detect(self.cam.read_info_data())

        self.start_recording = False
        self.stop_recording = False

        self.com = COM()

        if (not user_name == None):
            self.com.Options.setup(self.com.Options.root_dir, user_name)

        #self.com.go_to_initial_state(self.whole_body,self.gripper)

        #self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.3})

        self.joystick = JoyStick_X(self.com,
                                   inject_noise=inject_noise,
                                   noise_scale=noise_scale)
        self.torque = Gripper_Torque()
        self.joints = Joint_Positions()
 def __init__(self):
     self.robot = robot = hsrb_interface.Robot()
     self.omni_base = robot.get('omni_base')
     self.whole_body = robot.get('whole_body')
     self.cam = RGBD()
     self.data = dict() # list of processed depth images and actions taken (I_t, a_t)
     print("Initialized the data collector! Resting for 2 seconds...")
     time.sleep(2)
 def __init__(self, x=0.0, y=0.0, yaw=0.0, time_out=30.0):
     super(hsr_OmniBase,self).__init__(outcomes=['finish'])
     self.robot       = hsrb_interface.Robot()
     self.omni_base   = self.robot.get('omni_base')
     self._x          = x
     self._y          = y
     self._yaw        = yaw
     self._time_out   = time_out
示例#16
0
def opendoor(req):
	# main(whole_body,  gripper,wrist_wrench)
	frame = req.handle_pose.header.frame_id
	hanlde_pos = req.handle_pose.pose
	# hanlde_pos=geometry_msgs.msg.PoseStamped()
        res = OpendoorResponse()

        cli = actionlib.SimpleActionClient('/hsrb/omni_base_controller/follow_joint_trajectory', control_msgs.msg.FollowJointTrajectoryAction)
        vel_pub = rospy.Publisher('/hsrb/command_velocity', geometry_msgs.msg.Twist, queue_size=10)
        armPub = rospy.Publisher('/hsrb/arm_trajectory_controller/command', JointTrajectory, queue_size=1)
	
        robot = hsrb_interface.Robot()
        whole_body = robot.get('whole_body')
        gripper = robot.get('gripper')
        wrist_wrench = robot.get('wrist_wrench')
        base=robot.get('omni_base')
        start_theta=base.pose[2]
	# with hsrb_interface.Robot() as robot:
            # whole_body = robot.get('whole_body')
            # gripper = robot.get('gripper')
            # wrist_wrench = robot.get('wrist_wrench')

	try: 
		# Open the gripper 
                whole_body.move_to_neutral()
                grasp_point_client()
                global recog_pos
                global is_found
                print recog_pos.pose.position

		print("Opening the gripper")
		whole_body.move_to_neutral()
                rospy.sleep(2.5)
		switch = ImpedanceControlSwitch() 
		gripper.command(1.0)

		# Approach to the door
		print("Approach to the door")
		grab_pose = geometry.multiply_tuples(geometry.pose(x=recog_pos.pose.position.x-HANDLE_TO_HAND_POS_X,
                    y=recog_pos.pose.position.y-HANDLE_TO_HAND_POS_Y,
                    z=recog_pos.pose.position.z,
                    ej=math.pi/2),
                    geometry.pose(ek=math.pi/2))
		
		whole_body.move_end_effector_pose(grab_pose, _ORIGIN_TF)
		# Close the gripper
		wrist_wrench.reset()
		switch.activate("grasping")
		gripper.grasp(-0.01)
		rospy.sleep(1.0)
		switch.inactivate()
		# Rotate the handle
                whole_body.impedance_config = 'grasping'
                traj = JointTrajectory()
	
                # This controller requires that all joints have values
                traj.joint_names = ["arm_lift_joint", "arm_flex_joint",
                                                        "arm_roll_joint", "wrist_flex_joint", "wrist_roll_joint"]
示例#17
0
    def __init__(self):


        self.xbox = XboxController()
        self.robot = hsrb_interface.Robot()
        self.whole_body = self.robot.get('whole_body')
        self.gripper = self.robot.get('gripper')

        self.pubTwist = rospy.Publisher('hsrb/command_velocity',Twist,queue_size=1)
示例#18
0
    def __init__(self, args):
        """For data collection of bed-making, NOT the deployment.

        Assumes we roll out the robot's policy via code (not via human touch).
        This is the 'slower' way where we have the python interface that the
        human clicks on to indicate grasping points. Good news is, our deployment
        code is probably going to be similar to this.

        For joystick: you only need it plugged in for the initial state sampler,
        which (at the moment) we are not even using.
        """
        self.robot = robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()
        self.omni_base = robot.get('omni_base')
        self.whole_body = robot.get('whole_body')
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # View mode: STANDARD (the way I was doing earlier), CLOSE (the way they want).
        self.view_mode = cfg.VIEW_MODE

        # Set up initial state, table, etc.
        self.com.go_to_initial_state(self.whole_body)
        self.tt = TableTop()

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        #self.ins = InitialSampler(self.cam)
        self.side = 'BOTTOM'
        self.grasp_count = 0

        # Bells and whistles; note the 'success check' to check if transitioning
        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   robot.get('gripper'))
        self.sc = Success_Check(self.whole_body, self.tt, self.cam,
                                self.omni_base)

        time.sleep(4)
        print(
            "Finished creating BedMaker()! Get the bed set up and run bed-making!"
        )
        if cfg.INS_SAMPLE:
            print("TODO: we don't have sampling code here.")

        # When we start, spin this so we can check the frames. Then un-comment,
        # etc. It's the current hack we have to get around crummy AR marker detection.
        if args.phase == 1:
            print("Now doing rospy.spin() because phase = 1.")
            rospy.spin()
示例#19
0
 def __init__(self, offset_z=0.3, offset_dist=0.1, width=0.7, mode='C'):
     super(hsr_CollisionBox, self).__init__(outcomes=['continue'],
                                            input_keys=['box_pose'])
     self.robot = hsrb_interface.Robot()
     self._box_frame_id = 'map'
     self.offset_z = offset_z
     self.offset_dist = offset_dist
     self.width = width
     self.mode = mode
     self.scene = moveit_commander.PlanningSceneInterface()
示例#20
0
 def __init__(self, arm_lift_joint=0.0, arm_flex_joint=0.0, arm_roll_joint=-3.14/2, wrist_flex_joint=-3.14/2, wrist_roll_joint=0.0, head_pan_joint=0.0, head_tilt_joint=0.0):
     super(hsr_JointPose,self).__init__(outcomes=['continue'])
     self.robot = hsrb_interface.Robot()
     self.whole_body = self.robot.get('whole_body')
     self._arm_lift_joint = arm_lift_joint
     self._arm_flex_joint = arm_flex_joint
     self._arm_roll_joint = arm_roll_joint
     self._wrist_flex_joint = wrist_flex_joint
     self._wrist_roll_joint = wrist_roll_joint
     self._head_pan_joint = head_pan_joint
     self._head_tilt_joint = head_tilt_joint
    def __init__(self):
        super(hsr_SelectTidyObjectandPosefromDatasetUltra,
              self).__init__(outcomes=['completed'],
                             input_keys=['load_file', 'detect_objects'],
                             output_keys=['tidy_name_db', 'tidy_pose_db'])
        self.robot = hsrb_interface.Robot()
        rospy.wait_for_service('em_spco_tidy_up/tidy_pose_from_dataset')
        self.select_tidy_pose_from_dataset = rospy.ServiceProxy(
            'em_spco_tidy_up/tidy_pose_from_dataset', spacoty_object2place)

        self.distance_idx = np.array([])
示例#22
0
 def __init__(self, name):
     self._action_name = name
     self._as = actionlib.SimpleActionServer(self._action_name,
                                             TakePoseAction,
                                             execute_cb=self.execute_cb,
                                             auto_start=False)
     self._as.start()
     self._giskard_wrapper = GiskardWrapper()
     self._robot = hsrb_interface.Robot()
     self._whole_body = self._robot.get('whole_body')
     rospy.loginfo("{} is ready and waiting for orders.".format(
         self._action_name))
示例#23
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        # if cfg.USE_WEB_INTERFACE:
        #     self.wl = Web_Labeler()
        # else:
        #     self.wl = Python_Labeler(cam = self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Lego_Gripper(self.gp, self.cam, self.com.Options,
                                    self.robot.get('gripper'))

        self.RCNN = Depth_Object("bottle")
        #self.test_current_point()

        #thread.start_new_thread(self.ql.run,())
        print "after thread"
示例#24
0
 def __init__(self, grasp_force=0.7, mode=True):
     super(hsr_GrippingObject,
           self).__init__(outcomes=['continue', 'failed'],
                          output_keys=['grasp_success'])
     self._grasp_force = grasp_force
     self.grasp_success = False
     self._mode = mode
     self.robot = hsrb_interface.Robot()
     self.omni_base = self.robot.get('omni_base')
     self.whole_body = self.robot.get('whole_body')
     self.gripper = self.robot.get('gripper')
     self.tts = self.robot.get('default_tts')
    def __init__(self, config={}):
        self.map_frame = FLAGS.map_frame
        self.base_frame = FLAGS.base_frame
        self.sensor_frame = FLAGS.sensor_frame
        self.end_effector_frame = FLAGS.end_effector_frame

        rospy.wait_for_service(FLAGS.rs_service)
        self.detector = rospy.ServiceProxy(FLAGS.rs_service, RSQueryService)
        self.robot = hsrb_interface.Robot()
        self.omni_base = self.robot.get(FLAGS.omni_base)
        self.whole_body = self.robot.get(FLAGS.whole_body)
        self.gripper = self.robot.get(FLAGS.gripper)
        self.tf_env = TransformListener()
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot
        com : The common class for the robot
        cam : An open bincam class

        debug : bool

            A bool to indicate whether or not to display a training set point for
            debuging.

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if not DEBUG:
            self.com.go_to_initial_state(self.whole_body)

            self.tt = TableTop()
            self.tt.find_table(self.robot)

        self.wl = Python_Labeler(cam=self.cam)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.ds = data_saver('tpc_rollouts/rollouts')

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, cam, options,
                                     robot.get('gripper'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.whole_body,
                                   self.omni_base, self.tt)

        print "after thread"
    def __init__(self, offset=0.3):
        super(hsr_CreateApproach, self).__init__(
            outcomes=['continue', 'failed'],
            input_keys=['target_pose'],
            output_keys=['selected_pose_approach', 'selected_pose_grasp'])
        self._offset = offset

        self.selected_pose_approach = None
        self.selected_pose_grasp = None
        self._move_group = 'whole_body'
        group = moveit_commander.MoveGroupCommander(self._move_group)

        self.robot = hsrb_interface.Robot()
        self.current_pose = None
示例#28
0
 def set_robot(cls, robot=None):
     if robot:
         cls.robot = robot
     else:
         cls.robot = hsrb_interface.Robot()
     cls.whole_body = cls.robot.get("whole_body")
     cls.omni_base = cls.robot.get("omni_base")
     cls.gripper = cls.robot.get("gripper")
     cls.wrist_wrench = cls.robot.try_get("wrist_wrench")
     cls.collision_world = cls.robot.get("global_collision_world")
     cls.tts = cls.robot.get("default_tts")
     cls.tts.language = cls.tts.ENGLISH
     #cls.tts.language = cls.tts.JAPANESE
     cls.tfl = cls.robot._get_tf2_buffer()
示例#29
0
    def __init__(self, args):
        self.args = args
        self.robot = robot = hsrb_interface.Robot()
        self.omni_base = robot.get('omni_base')
        self.whole_body = robot.get('whole_body')
        self.cam = RGBD()
        self.data = dict() # list of processed depth images and actions taken (I_t, a_t)

        # We don't use directly, but it makes a frame that we need for pixels -> world grasp poses.
        self.rgbd_map = RGBD2Map() # makes frame we need but we don't use it otherwise

        # TODO: eventually we need to remove this. Doing this to let us go from
        # camera coordinates to world frame, but we need HSR_CORE to support it.
        # But, should be easy because the grasp planner is pretty simple and we
        # only use it to compute the average depth values in a region.
        self.gp = GraspPlanner()
        self.gripper = Bed_Gripper(self.gp, self.cam, options=None, gripper=robot.get('gripper'))

        # Also this is a bit hacky. We want the HSR to rotate so that it's
        # _facing_ the bed now, whereas it started facing 'sideways'. Makes a
        # target pose for the robot so it goes there, before grasp executiuon.
        # TODO: make pose here. I think we can get away with rotating wrt the
        # map but in general we want to create our own poses.

        print("Initialized the data collector! Resting for 2 seconds...")
        time.sleep(2)

        # Hande the part about loading the network and pretrained model.
        HEAD  = '/nfs/diskstation/seita/bedmake_ssl'
        MODEL = 'resnet18_2018-11-18-09-50_000'
        PATH  = join(HEAD, MODEL, 'act_predictor.pt')

        # Get old args we used, and put into a newer Namespace object.
        with open(join(HEAD, MODEL, 'args.json'), 'r') as fh:
            saved_args = json.load(fh)
        self.netargs = opt._json_to_args(jsonfile=saved_args)
        
        # Load the pretrained model.
        model = opt.get_pretrained_model(self.netargs)
        self.act_predictor = ActPredictorNet(model, self.netargs)
        self.act_predictor.load_state_dict(torch.load(PATH))
        self.act_predictor.eval()

        self.transforms_valid = transforms.Compose([
            CT.Rescale((256,256)),
            CT.CenterCrop((224,224)),
            CT.ToTensor(),
            CT.Normalize(opt.MEAN, opt.STD),
        ])
示例#30
0
    def __init__(self):

        self.server = actionlib.SimpleActionServer('em_follow_me_action',
                                                   follow_meAction,
                                                   self.execute, False)
        self.server.start()
        print "start server"

        self.movebase = actionlib.SimpleActionClient('/move_base/move',
                                                     MoveBaseAction)
        self.movebase.wait_for_server()

        self.robot = hsrb_interface.Robot()
        self.omni_base = self.robot.get('omni_base')

        self.pos_list = np.empty(0)