def __init__(self, controller_commander, urdf_xml_string, srdf_xml_string,
                 camera_image_topic, camera_trigger_topic, camera_info_topic):
        self.controller_commander = controller_commander
        self.planner = Planner(controller_commander, urdf_xml_string,
                               srdf_xml_string)

        self.tf_listener = PayloadTransformListener()

        self.camera_info = self.get_camera_info(camera_info_topic)

        self.img_queue = Queue.Queue(1)
        self.camera_trigger = rospy.ServiceProxy(camera_trigger_topic, Trigger)
        self.camera_sub = rospy.Subscriber(camera_image_topic, Image,
                                           self._ros_img_cb)

        self.controller_state_sub = rospy.Subscriber("controller_state",
                                                     controllerstate,
                                                     self.ft_cb)
        self.FTdata = None
        self.ft_flag = False
        self.FTdata_0 = self.FTdata
        # Compliance controller parameters
        self.F_d_set1 = -100
        self.F_d_set2 = -200
        self.Kc = 0.00002

        self.client = actionlib.SimpleActionClient(
            "joint_trajectory_action", FollowJointTrajectoryAction)
        self.K_pbvs = 0.25
    def __init__(self, disable_ft=False):
        self.urdf = URDF.from_parameter_server()
        self.overhead_vision_client = actionlib.SimpleActionClient(
            "recognize_objects", ObjectRecognitionAction)
        self.execute_trajectory_action = actionlib.SimpleActionClient(
            "execute_trajectory", ExecuteTrajectoryAction)
        self.rapid_node = rapid_node_pkg.RAPIDCommander()
        self.controller_commander = controller_commander_pkg.ControllerCommander(
        )
        self.state = 'init'
        self.current_target = None
        self.current_payload = None
        self.available_payloads = {
            'leeward_mid_panel': 'leeward_mid_panel',
            'leeward_tip_panel': 'leeward_tip_panel'
        }
        self.desired_controller_mode = self.controller_commander.MODE_AUTO_TRAJECTORY
        self.speed_scalar = 1.0
        self.disable_ft = disable_ft

        self.tf_listener = PayloadTransformListener()
        self._process_state_pub = rospy.Publisher("process_state",
                                                  ProcessState,
                                                  queue_size=100,
                                                  latch=True)
        self.publish_process_state()

        self.update_payload_pose_srv = rospy.ServiceProxy(
            "update_payload_pose", UpdatePayloadPose)
        self.get_payload_array_srv = rospy.ServiceProxy(
            "get_payload_array", GetPayloadArray)

        self.plan_dictionary = {}
Ejemplo n.º 3
0
    def __init__(self):

        #Subscribe to controller_state 
        self.controller_state_sub = rospy.Subscriber("controller_state", controllerstate, self.callback)
        self.last_ros_image_stamp = rospy.Time(0)
        self.goal_handle=None
        
               
        self.listener = PayloadTransformListener()
        self.rapid_node = rapid_node_pkg.RAPIDCommander()
        self.controller_commander = controller_commander_pkg.ControllerCommander()
        self.object_commander = ObjectRecognitionCommander() 
        # Initilialize aruco boards and parameters
        self.aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
        self.parameters =  cv2.aruco.DetectorParameters_create()
        self.parameters.cornerRefinementMethod=cv2.aruco.CORNER_REFINE_SUBPIX
        self.parameters.adaptiveThreshWinSizeMax=30
        self.parameters.adaptiveThreshWinSizeStep=7
        # ================== Cam 636
        # --- Subscribe to Gripper camera node for image acquisition
        #TODO: Take in camera names and make subscribers and triggers accordingly
        self.ros_gripper_2_img_sub = rospy.Subscriber('/gripper_camera_2/image', Image, self.object_commander.ros_raw_gripper_2_image_cb)
        self.ros_gripper_2_trigger = rospy.ServiceProxy('/gripper_camera_2/trigger', Trigger)
        
        # --- Camera parameters
        self.CamParam = CameraParams()
        # --- Camera pose
        #TODO: Substitute transform in here
        R_Jcam = np.array([[0.9995,-0.0187,-0.0263],[-0.0191,-0.9997,-0.0135],[-0.0261,0.0140,-0.9996]])
        r_cam = rox.hat(np.array([0.0707, 1.1395, 0.2747]))#rox.hat(np.array([- 0.2811, -1.1397,0.0335]))#rox.hat(np.array([- 0.2811, 1.1397,0.0335]))
        self.R_Jcam = np.linalg.inv(np.vstack([ np.hstack([R_Jcam,np.zeros([3,3])]), np.hstack([np.dot(r_cam,R_Jcam),R_Jcam]) ]))
        self.dt = None
        self.iteration=0
        
        self.board_ground = None
        # functions like a gain, used with velocity to track position
        self.FTdata = None
        self.ft_flag = False
        #self.FTdata_0 = self.FTdata
        #self.FTdata_0est = self.compute_ft_est()
        #self.result = self.take_image()
        #TODO: replace with trajopt code
        self.client = actionlib.SimpleActionClient("joint_trajectory_action", FollowJointTrajectoryAction)
        # IBVS parameters
        self.du_converge_TH = None
        self.dv_converge_TH = None
        self.iteration_limit = None
        self.Ki = None   
        # Compliance controller parameters
        self.F_d_set1 = None
        self.F_d_set2 = None
        self.Kc = None
        self.ros_data=None
        self.camMatrix=None
        self.distCoeffs=None
        self.ros_gripper_2_cam_info_sub=rospy.Subscriber('/gripper_camera_2/camera_info', CameraInfo, self.fill_camera_data)
    def __init__(self, disable_ft=False):
        self.lock = threading.Lock()
        urdf_xml_string = rospy.get_param("robot_description")
        srdf_xml_string = rospy.get_param("robot_description_semantic")
        self.urdf = URDF.from_parameter_server()
        self.overhead_vision_client = actionlib.SimpleActionClient(
            "recognize_objects", ObjectRecognitionAction)
        self.rapid_node = rapid_node_pkg.RAPIDCommander()
        self.controller_commander = ControllerCommander()
        self.state = 'init'
        self.current_target = None
        self.current_payload = None
        self.available_payloads = {
            'leeward_mid_panel': 'leeward_mid_panel',
            'leeward_tip_panel': 'leeward_tip_panel'
        }
        self.desired_controller_mode = self.controller_commander.MODE_AUTO_TRAJECTORY
        self.speed_scalar = 1.0
        self.disable_ft = disable_ft

        self.tf_listener = PayloadTransformListener()
        self._process_state_pub = rospy.Publisher("process_state",
                                                  ProcessState,
                                                  queue_size=100,
                                                  latch=True)
        self.publish_process_state()
        self.placement_client = actionlib.SimpleActionClient(
            'placement_step', PBVSPlacementAction)
        #self.placement_client.wait_for_server()
        self.update_payload_pose_srv = rospy.ServiceProxy(
            "update_payload_pose", UpdatePayloadPose)
        self.get_payload_array_srv = rospy.ServiceProxy(
            "get_payload_array", GetPayloadArray)
        self._goal_handle = None
        self._goal_handle_lock = threading.RLock()
        self.subprocess_handle = None
        self.plan_dictionary = {}
        self.process_starts = {}
        self.process_index = None
        self.process_states = [
            "reset_position", "pickup_prepare", "pickup_lower",
            "pickup_grab_first_step", "pickup_grab_second_step",
            "pickup_raise", "transport_payload", "place_payload",
            "gripper_release"
        ]

        self.planner = Planner(self.controller_commander, urdf_xml_string,
                               srdf_xml_string)
Ejemplo n.º 5
0
class ObjectRecognitionCommander(object):
    def __init__(self):
        self.client = actionlib.SimpleActionClient("recognize_objects",
                                                   ObjectRecognitionAction)
        self.listener = PayloadTransformListener()

    def get_object_pose(self, key):
        self.client.wait_for_server()

        goal = ObjectRecognitionGoal(False, [-1e10, 1e10, -1e10, 1e10])
        self.client.send_goal(goal)
        self.client.wait_for_result()
        ret = self.client.get_result()

        for r in ret.recognized_objects.objects:
            if r.type.key == key:
                return rox_msg.msg2transform(r.pose.pose.pose)

        raise Exception("Requested object not found")

    def get_object_gripper_target_pose(self, key):

        object_pose = self.get_object_pose(key)

        tag_rel_pose = self.listener.lookupTransform(key,
                                                     key + "_gripper_target",
                                                     rospy.Time(0))
        return object_pose * tag_rel_pose
Ejemplo n.º 6
0
def main():
    rospy.init_node("pbvs_object_placement")

    urdf_xml_string = rospy.get_param("robot_description")
    srdf_xml_string = rospy.get_param("robot_description_semantic")
    controller_commander = ControllerCommander()

    #planner = Planner(controller_commander, urdf_xml_string, srdf_xml_string)

    transform_fname = sys.argv[1]
    camera_image_topic = sys.argv[2]
    camera_trigger_topic = sys.argv[3]
    camera_info_topic = sys.argv[4]

    tf_listener = PayloadTransformListener()

    desired_transform_msg = TransformStamped()

    with open(transform_fname, 'r') as f:
        transform_yaml = yaml.load(f)

    genpy.message.fill_message_args(desired_transform_msg, transform_yaml)
    desired_transform = rox_msg.msg2transform(desired_transform_msg)

    markers = get_all_payload_markers()

    fixed_marker = markers[desired_transform.parent_frame_id]
    payload_marker = markers[desired_transform.child_frame_id]

    aruco_dict = get_aruco_dictionary(fixed_marker)
    camera_info = get_camera_info(camera_info_topic)

    time.sleep(1)

    dx = np.array([10000] * 6)
    while True:
        img = receive_image(camera_image_topic, camera_trigger_topic)

        target_pose = compute_step_gripper_target_pose(img, fixed_marker, payload_marker, desired_transform, \
                                      camera_info, aruco_dict, np.array([0.2]*6), tf_listener)

        plan = planner.trajopt_plan(target_pose,
                                    json_config_name="panel_pickup")
        controller_commander.set_controller_mode(
            controller_commander.MODE_HALT, 1, [], [])
        controller_commander.set_controller_mode(
            controller_commander.MODE_AUTO_TRAJECTORY, 1, [], [])
        controller_commander.execute_trajectory(plan)
Ejemplo n.º 7
0
class TestTesseractDiff(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(TestTesseractDiff, self).__init__(*args, **kwargs)
        self.tf_listener = PayloadTransformListener()

    def test_tesseract_diff(self):

        expected_state_1 = rospy.wait_for_message("/expected_tesseract_diff_1",
                                                  TesseractState,
                                                  timeout=10)
        state_1 = rospy.wait_for_message('/tesseract_diff',
                                         TesseractState,
                                         timeout=10)
        self._assert_tesseract_state_equal(expected_state_1, state_1)

        rospy.sleep(rospy.Duration(1))

        update_pose_proxy = rospy.ServiceProxy("update_payload_pose",
                                               UpdatePayloadPose)

        payload2_to_gripper_target_tf = self.tf_listener.lookupTransform(
            "payload2", "payload2_gripper_target", rospy.Time(0))

        req = UpdatePayloadPoseRequest()
        req.header.frame_id = "vacuum_gripper_tool"
        req.name = "payload2"
        req.pose = rox_msg.transform2pose_msg(
            payload2_to_gripper_target_tf.inv())
        req.confidence = 0.5
        res = update_pose_proxy(req)
        assert res.success

        expected_state_2 = rospy.wait_for_message("/expected_tesseract_diff_2",
                                                  TesseractState,
                                                  timeout=10)
        state_2 = rospy.wait_for_message('/tesseract_diff',
                                         TesseractState,
                                         timeout=10)
        self._assert_tesseract_state_equal(expected_state_2, state_2)

        world_to_fixture2_payload2_target_tf = self.tf_listener.lookupTransform(
            "world", "fixture2_payload2_target", rospy.Time(0))

        #Add in an offset to represent a final placement error
        fixture2_payload2_target_to_payload2_tf = rox.Transform(
            rox.rot([0, 0, 1], np.deg2rad(5)), [0.1, 0, 0],
            "fixture2_payload2_target", "payload2")

        req2 = UpdatePayloadPoseRequest()
        req2.header.frame_id = "world"
        req2.name = "payload2"
        req2.pose = rox_msg.transform2pose_msg(
            world_to_fixture2_payload2_target_tf *
            fixture2_payload2_target_to_payload2_tf)
        res2 = update_pose_proxy(req2)
        assert res2.success

        expected_state_3 = rospy.wait_for_message("/expected_tesseract_diff_3",
                                                  TesseractState,
                                                  timeout=10)
        state_3 = rospy.wait_for_message('/tesseract_diff',
                                         TesseractState,
                                         timeout=10)
        self._assert_tesseract_state_equal(expected_state_3, state_3)

    def _assert_tesseract_state_equal(self, state1, state2):
        self.assertEqual(state1.name, state2.name)
        self.assertEqual(state1.urdf_name, state2.urdf_name)
        self.assertEqual(state1.object_colors, state2.object_colors)
        self.assertEqual(state1.highlight_links, state2.object_colors)
        self.assertEqual(state1.is_diff, state2.is_diff)

        self.assertEqual(len(state1.attachable_objects),
                         len(state2.attachable_objects))
        for i in xrange(len(state1.attachable_objects)):
            self._assert_attachable_object_equal(state1.attachable_objects[i],
                                                 state2.attachable_objects[i])

        self.assertEqual(len(state1.attached_bodies),
                         len(state2.attached_bodies))
        for i in xrange(len(state1.attached_bodies)):
            self._assert_attached_body_equal(state1.attached_bodies[i],
                                             state2.attached_bodies[i])

    def _assert_attachable_object_equal(self, obj1, obj2):
        self.assertEqual(obj1.name, obj2.name)
        self._assert_visual_geometry_equal(obj1.visual, obj2.visual)
        self._assert_collision_geometry_equal(obj1.collision, obj2.collision)

        self._assert_inertia_almost_equal(obj1.inertia, obj2.inertia)

        self.assertEqual(obj1.operation, obj2.operation)

    def _assert_visual_geometry_equal(self, vis1, vis2):
        self.assertEqual(vis1.primitives, vis2.primitives)
        self._assert_pose_almost_equal(vis1.primitive_poses,
                                       vis2.primitive_poses)
        self.assertEqual(vis1.primitive_colors, vis2.primitive_colors)
        for m1, m2 in zip(vis1.meshes, vis2.meshes):
            self._assert_mesh_almost_equal(m1, m2)
        self._assert_pose_almost_equal(vis1.mesh_poses, vis2.mesh_poses)
        self.assertEqual(vis1.mesh_colors, vis2.mesh_colors)
        self.assertEqual(vis1.planes, vis2.planes)
        self._assert_pose_almost_equal(vis1.plane_poses, vis2.plane_poses)
        self.assertEqual(vis1.plane_colors, vis2.plane_colors)
        self.assertEqual(vis1.octomaps, vis2.octomaps)
        self._assert_pose_almost_equal(vis1.octomap_poses, vis2.octomap_poses)
        self.assertEqual(vis1.octomap_colors, vis2.octomap_colors)

    def _assert_collision_geometry_equal(self, col1, col2):
        self.assertEqual(col1.primitives, col2.primitives)
        self._assert_pose_almost_equal(col1.primitive_poses,
                                       col2.primitive_poses)
        self.assertEqual(col1.primitive_collision_object_types,
                         col2.primitive_collision_object_types)
        self.assertEqual(col1.primitive_colors, col2.primitive_colors)
        for m1, m2 in zip(col1.meshes, col2.meshes):
            self._assert_mesh_almost_equal(m1, m2)
        self._assert_pose_almost_equal(col1.mesh_poses, col2.mesh_poses)
        self.assertEqual(col1.mesh_collision_object_types,
                         col2.mesh_collision_object_types)
        self.assertEqual(col1.mesh_colors, col2.mesh_colors)
        self.assertEqual(col1.planes, col2.planes)
        self._assert_pose_almost_equal(col1.plane_poses, col2.plane_poses)
        self.assertEqual(col1.plane_collision_object_types,
                         col2.plane_collision_object_types)
        self.assertEqual(col1.plane_colors, col2.plane_colors)
        self.assertEqual(col1.octomaps, col2.octomaps)
        self._assert_pose_almost_equal(col1.octomap_poses, col2.octomap_poses)
        self.assertEqual(col1.octomap_collision_object_types,
                         col2.octomap_collision_object_types)
        self.assertEqual(col1.octomap_colors, col2.octomap_colors)

    def _assert_attached_body_equal(self, body1, body2):
        self.assertEqual(body1.object_name, body2.object_name)
        self.assertEqual(body1.parent_link_name, body2.parent_link_name)
        self._assert_pose_almost_equal(body1.transform, body2.transform)
        self.assertEqual(body1.touch_links, body2.touch_links)
        self.assertEqual(body1.operation, body2.operation)

    def _assert_mesh_almost_equal(self, m1, m2):
        for t1, t2 in zip(m1.triangles, m2.triangles):
            np.testing.assert_equal(t1.vertex_indices, t2.vertex_indices)
        for v1, v2 in zip(m1.vertices, m2.vertices):
            np.testing.assert_allclose([v1.x, v1.y, v1.z], [v2.x, v2.y, v2.z])

    def _assert_pose_almost_equal(self, pose1, pose2):
        if isinstance(pose1, list):
            for p1, p2 in zip(pose1, pose2):
                self._assert_pose_almost_equal(p1, p2)
            return
        np.testing.assert_allclose([pose1.position.x, pose1.position.y, pose1.position.z], \
                                    [pose2.position.x, pose2.position.y, pose2.position.z])
        np.testing.assert_allclose([pose1.orientation.x, pose1.orientation.y, \
                                pose1.orientation.z, pose1.orientation.w], \
                               [pose2.orientation.x, pose2.orientation.y, \
                                pose2.orientation.z, pose2.orientation.w] )

    def _assert_inertia_almost_equal(self, i1, i2):
        self.assertAlmostEqual(i1.m, i2.m)
        self.assertAlmostEqual(i1.com.x, i2.com.x)
        self.assertAlmostEqual(i1.com.y, i2.com.y)
        self.assertAlmostEqual(i1.com.z, i2.com.z)
        self.assertAlmostEqual(i1.ixx, i2.ixx)
        self.assertAlmostEqual(i1.ixy, i2.ixy)
        self.assertAlmostEqual(i1.ixz, i2.ixz)
        self.assertAlmostEqual(i1.ixy, i2.ixy)
        self.assertAlmostEqual(i1.iyy, i2.iyy)
        self.assertAlmostEqual(i1.iyz, i2.iyz)
        self.assertAlmostEqual(i1.izz, i2.izz)
class ProcessController(object):
    def __init__(self, disable_ft=False):
        self.urdf = URDF.from_parameter_server()
        self.overhead_vision_client = actionlib.SimpleActionClient(
            "recognize_objects", ObjectRecognitionAction)
        self.execute_trajectory_action = actionlib.SimpleActionClient(
            "execute_trajectory", ExecuteTrajectoryAction)
        self.rapid_node = rapid_node_pkg.RAPIDCommander()
        self.controller_commander = controller_commander_pkg.ControllerCommander(
        )
        self.state = 'init'
        self.current_target = None
        self.current_payload = None
        self.available_payloads = {
            'leeward_mid_panel': 'leeward_mid_panel',
            'leeward_tip_panel': 'leeward_tip_panel'
        }
        self.desired_controller_mode = self.controller_commander.MODE_AUTO_TRAJECTORY
        self.speed_scalar = 1.0
        self.disable_ft = disable_ft

        self.tf_listener = PayloadTransformListener()
        self._process_state_pub = rospy.Publisher("process_state",
                                                  ProcessState,
                                                  queue_size=100,
                                                  latch=True)
        self.publish_process_state()

        self.update_payload_pose_srv = rospy.ServiceProxy(
            "update_payload_pose", UpdatePayloadPose)
        self.get_payload_array_srv = rospy.ServiceProxy(
            "get_payload_array", GetPayloadArray)

        self.plan_dictionary = {}

    def _vision_get_object_pose(self, key):
        self.overhead_vision_client.wait_for_server()

        goal = ObjectRecognitionGoal(False, [-1e10, 1e10, -1e10, 1e10])
        self.overhead_vision_client.send_goal(goal)
        self.overhead_vision_client.wait_for_result()
        ret = self.overhead_vision_client.get_result()

        for r in ret.recognized_objects.objects:
            if r.type.key == key:
                rox_pose = rox_msg.msg2transform(r.pose.pose.pose)
                rox_pose.parent_frame_id = r.pose.header.frame_id
                rox_pose.child_frame_id = key
                return rox_pose

        raise Exception("Requested object not found")

    def _vision_get_object_gripper_target_pose(self, key):

        object_pose = self._vision_get_object_pose(key)

        tag_rel_pose = self.tf_listener.lookupTransform(
            key, key + "_gripper_target", rospy.Time(0))
        return object_pose * tag_rel_pose, object_pose

    def _tf_get_object_gripper_target_pose(self, key):

        payload = self._get_payload(key)
        if payload.confidence < 0.8:
            raise Exception("Payload confidence too low for tf lookup")

        object_pose = self.tf_listener.lookupTransform("/world", key,
                                                       rospy.Time(0))

        tag_rel_pose = self.tf_listener.lookupTransform(
            key, key + "_gripper_target", rospy.Time(0))
        return object_pose * tag_rel_pose, object_pose

    def get_payload_pickup_ft_threshold(self, payload):
        if self.disable_ft:
            return []
        return self._get_payload(
            payload).gripper_targets[0].pickup_ft_threshold

    def _active_client(self):
        self.state = "moving"
        #self.publish_process_state()

    def _finished_client(self, state, result):
        #if(state== actionlib.GoalStatus.SUCCEEDED):

        self.publish_process_state()
        if (result != MoveItErrorCodes.SUCCESS):
            rospy.loginfo(MoveItErrorCodes.SUCCESS)
            rospy.loginfo("MoveItErrorCode generated: %s", str(result))

    def get_state(self):
        return self.state

    def get_current_pose(self):
        return self.controller_commander.get_current_pose()

    def plan_to_reset_position(self):
        #TODO: Implement reset movement in process controller
        rospy.loginfo("Planning to reset position")
        self.state = "reset_position"
        self.plan_dictionary['reset'] = path
        self.publish_process_state()

    def plan_pickup_prepare(self, target_payload):

        #TODO: check state and payload

        rospy.loginfo("Begin pickup_prepare for payload %s", target_payload)

        object_target, object_pose = self._vision_get_object_gripper_target_pose(
            target_payload)

        self._update_payload_pose(target_payload,
                                  object_pose,
                                  confidence=0.8,
                                  parent_frame_id=object_pose.parent_frame_id)

        rospy.loginfo("Found payload %s at pose %s", target_payload,
                      object_target)

        self.pose_target = copy.deepcopy(object_target)
        pose_target = self.pose_target
        pose_target.p[2] += 0.5

        rospy.loginfo("Prepare pickup %s at pose %s", target_payload,
                      object_target)
        print pose_target.p

        path = self.controller_commander.plan(pose_target)

        self.current_target = target_payload
        self.state = "plan_pickup_prepare"
        self.plan_dictionary['pickup_prepare'] = path

        rospy.loginfo("Finish pickup prepare for payload %s", target_payload)
        self.publish_process_state()

    def move_pickup_prepare(self):
        self.controller_commander.set_controller_mode(
            self.desired_controller_mode, self.speed_scalar, [], [])
        result = None
        self.state = "pickup_prepare"
        goal = ExecuteTrajectoryGoal()
        goal.trajectory = self.plan_dictionary['pickup_prepare']
        self.execute_trajectory_action.send_goal(goal,
                                                 active_cb=self._active_client,
                                                 done_cb=self._finished_client)
        #self.controller_commander.async_execute(self.plan_dictionary['pickup_prepare'],result)

    def plan_pickup_lower(self):

        #TODO: check change state and target

        rospy.loginfo("Begin pickup_lower for payload %s", self.current_target)

        object_target, _ = self._tf_get_object_gripper_target_pose(
            self.current_target)
        pose_target2 = copy.deepcopy(object_target)
        pose_target2.p[2] += 0.15
        print pose_target2.p

        path = self.controller_commander.compute_cartesian_path(
            pose_target2, avoid_collisions=False)

        self.state = "plan_pickup_lower"
        self.plan_dictionary['pickup_lower'] = path
        rospy.loginfo("Finish pickup_lower for payload %s",
                      self.current_target)
        self.publish_process_state()

    def move_pickup_lower(self):
        self.controller_commander.set_controller_mode(
            self.desired_controller_mode, 0.8 * self.speed_scalar, [],
            self.get_payload_pickup_ft_threshold(self.current_target))
        result = None
        self.state = "pickup_lower"
        goal = ExecuteTrajectoryGoal()
        goal.trajectory = self.plan_dictionary['pickup_lower']
        self.execute_trajectory_action.send_goal(goal,
                                                 active_cb=self._active_client,
                                                 done_cb=self._finished_client)
        #self.controller_commander.execute(self.plan_dictionary['pickup_lower'])

    def plan_pickup_grab_first_step(self):
        #TODO: check change state and target

        rospy.loginfo("Begin pickup_grab for payload %s", self.current_target)

        self.object_target, _ = self._tf_get_object_gripper_target_pose(
            self.current_target)
        pose_target2 = copy.deepcopy(self.object_target)
        pose_target2.p[2] -= 0.15

        path = self.controller_commander.compute_cartesian_path(
            pose_target2, avoid_collisions=False)
        self.state = "plan_pickup_grab_first_step"
        self.plan_dictionary['pickup_grab_first_step'] = path
        self.publish_process_state()

    def move_pickup_grab_first_step(self):
        self.controller_commander.set_controller_mode(self.desired_controller_mode, 0.4*self.speed_scalar, [],\
                                                      self.get_payload_pickup_ft_threshold(self.current_target))
        result = None

        self.state = "pickup_grab_first_step"
        try:
            goal = ExecuteTrajectoryGoal()
            goal.trajectory = self.plan_dictionary['pickup_grab_first_step']
            self.execute_trajectory_action.send_goal(
                goal,
                active_cb=self._active_client,
                done_cb=self._finished_client)
            #self.controller_commander.execute(self.plan_dictionary['pickup_grab_first_step'])
        except Exception as err:
            print err

    def plan_pickup_grab_second_step(self):
        self.rapid_node.set_digital_io("Vacuum_enable", 1)
        time.sleep(1)

        #TODO: check vacuum feedback to make sure we have the panel

        world_to_panel_tf = self.tf_listener.lookupTransform(
            "world", self.current_target, rospy.Time(0))
        world_to_gripper_tf = self.tf_listener.lookupTransform(
            "world", "vacuum_gripper_tool", rospy.Time(0))
        panel_to_gripper_tf = world_to_gripper_tf.inv() * world_to_panel_tf

        self.current_payload = self.current_target
        self.current_target = None

        self._update_payload_pose(self.current_payload, panel_to_gripper_tf,
                                  "vacuum_gripper_tool", 0.5)
        self.controller_commander.set_controller_mode(
            self.controller_commander.MODE_HALT, 1, [], [])
        time.sleep(1)

        pose_target2 = copy.deepcopy(self.object_target)
        pose_target2.p[2] += 0.15

        path = self.controller_commander.compute_cartesian_path(
            pose_target2, avoid_collisions=False)

        self.state = "plan_pickup_grab_second_step"
        self.plan_dictionary['pickup_grab_second_step'] = path
        rospy.loginfo("Finish pickup_grab for payload %s", self.current_target)
        self.publish_process_state()

    def move_pickup_grab_second_step(self):

        self.controller_commander.set_controller_mode(
            self.desired_controller_mode, 0.4 * self.speed_scalar, [], [])
        result = None
        self.state = "pickup_grab_second_step"
        goal = ExecuteTrajectoryGoal()
        goal.trajectory = self.plan_dictionary['pickup_grab_second_step']
        self.execute_trajectory_action.send_goal(goal,
                                                 active_cb=self._active_client,
                                                 done_cb=self._finished_client)
        #self.controller_commander.execute(self.plan_dictionary['pickup_grab_second_step'])

    def plan_pickup_raise(self):

        #TODO: check change state and target

        rospy.loginfo("Begin pickup_raise for payload %s",
                      self.current_payload)

        #Just use gripper position for now, think up a better way in future
        object_target = self.tf_listener.lookupTransform(
            "world", "vacuum_gripper_tool", rospy.Time(0))
        pose_target2 = copy.deepcopy(object_target)
        pose_target2.p[2] += 0.8
        pose_target2.p = np.array([-0.02285, -1.840, 1.0])
        pose_target2.R = rox.q2R([0.0, 0.707, 0.707, 0.0])

        path = self.controller_commander.compute_cartesian_path(
            pose_target2, avoid_collisions=False)

        self.state = "plan_pickup_raise"
        self.plan_dictionary['pickup_raise'] = path
        rospy.loginfo("Finish pickup_raise for payload %s",
                      self.current_target)
        self.publish_process_state()

    def move_pickup_raise(self):
        self.controller_commander.set_controller_mode(
            self.desired_controller_mode, 0.8 * self.speed_scalar, [], [])
        result = None
        self.state = "pickup_raise"
        goal = ExecuteTrajectoryGoal()
        goal.trajectory = self.plan_dictionary['pickup_raise']
        self.execute_trajectory_action.send_goal(goal,
                                                 active_cb=self._active_client,
                                                 done_cb=self._finished_client)
        #self.controller_commander.async_execute(self.plan_dictionary['pickup_raise'],result)

    def plan_transport_payload(self, target):

        #TODO: check state and payload

        rospy.loginfo("Begin transport_panel for payload %s to %s",
                      self.current_payload, target)

        panel_target_pose = self.tf_listener.lookupTransform(
            "world", target, rospy.Time(0))
        panel_gripper_pose = self.tf_listener.lookupTransform(
            self.current_payload, "vacuum_gripper_tool", rospy.Time(0))
        pose_target = panel_target_pose * panel_gripper_pose
        pose_target.p = [
            2.197026484647054, 1.2179574262842452, 0.12376598588449844
        ]
        pose_target.R = np.array([[-0.99804142, 0.00642963, 0.06222524],
                                  [0.00583933, 0.99993626, -0.00966372],
                                  [-0.06228341, -0.00928144, -0.99801535]])
        pose_target.p[2] += 0.35

        plan = self.controller_commander.plan(pose_target)

        self.current_target = target
        self.state = "plan_transport_panel"
        self.plan_dictionary['transport_payload'] = plan
        rospy.loginfo("Finish transport_panel for payload %s to %s",
                      self.current_payload, target)
        self.publish_process_state()

    def move_transport_payload(self):
        self.controller_commander.set_controller_mode(
            self.desired_controller_mode, 0.8 * self.speed_scalar, [], [])
        result = None
        self.state = "transport_panel"
        self.controller_commander.async_execute(
            self.plan_dictionary['transport_payload'], result)
        self.publish_process_state()

    def plan_place_lower(self):

        #TODO: check state and payload

        rospy.loginfo("Begin place_lower for payload %s to %s",
                      self.current_payload, self.current_target)

        panel_target_pose = self.tf_listener.lookupTransform(
            "world", self.current_target, rospy.Time(0))
        panel_gripper_pose = self.tf_listener.lookupTransform(
            self.current_payload, "vacuum_gripper_tool", rospy.Time(0))
        pose_target = panel_target_pose * panel_gripper_pose

        pose_target.p[2] += 0.15

        path = self.controller_commander.plan(pose_target)

        self.state = "plan_place_lower"
        self.plan_dictionary['place_lower'] = path
        rospy.loginfo("Finish place_lower for payload %s to %s",
                      self.current_payload, self.current_target)
        self.publish_process_state()

    def move_place_lower(self):
        self.controller_commander.set_controller_mode(
            self.desired_controller_mode, 0.8 * self.speed_scalar, [], [])
        result = None
        self.state = "place_lower"
        self.controller_commander.async_execute(
            self.plan_dictionary['place_lower'], result)
        self.publish_process_state()

    def plan_place_set_first_step(self):

        #TODO: check change state and target

        rospy.loginfo("Begin place_set for payload %s target %s",
                      self.current_payload, self.current_target)

        panel_target_pose = self.tf_listener.lookupTransform(
            "world", self.current_target, rospy.Time(0))
        panel_gripper_pose = self.tf_listener.lookupTransform(
            self.current_payload, "vacuum_gripper_tool", rospy.Time(0))
        pose_target = panel_target_pose * panel_gripper_pose
        pose_target2 = copy.deepcopy(pose_target)
        pose_target2.p[2] -= 0.15

        path = self.controller_commander.compute_cartesian_path(
            pose_target2, avoid_collisions=False)
        self.state = "plan_place_set_first_step"
        self.plan_dictionary['place_set_first_step'] = path
        self.publish_process_state()

    def move_place_set_first_step(self):
        self.controller_commander.set_controller_mode(self.desired_controller_mode, 0.4*self.speed_scalar, [], \
                                                      self.get_payload_pickup_ft_threshold(self.current_target))
        result = None
        self.state = "place_set_first_step"
        self.controller_commander.async_execute(
            self.plan_dictionary['place_set_first_step'], result)
        self.rapid_node.set_digital_io("Vacuum_enable", 0)
        time.sleep(1)
        self.publish_process_state()

    def plan_place_set_second_step(self):

        #TODO: check vacuum feedback to make sure we have the panel

        gripper_to_panel_tf = self.tf_listener.lookupTransform(
            "vacuum_gripper_tool", self.current_payload, rospy.Time(0))
        world_to_gripper_tf = self.tf_listener.lookupTransform(
            "world", "vacuum_gripper_tool", rospy.Time(0))
        world_to_panel_nest_tf = self.tf_listener.lookupTransform(
            "world", "panel_nest", rospy.Time(0))
        panel_to_nest_tf = world_to_panel_nest_tf.inv(
        ) * world_to_gripper_tf * gripper_to_panel_tf

        self._update_payload_pose(self.current_payload, panel_to_nest_tf,
                                  "panel_nest", 0.5)

        self.current_payload = None
        self.current_target = None
        '''
        time.sleep(1)

        pose_target2=copy.deepcopy(pose_target)
        pose_target2.p[2] += 0.15


        path=self.controller_commander.compute_cartesian_path(pose_target2, avoid_collisions=False)


        self.plan_dictionary['place_set_second_step']=path
        rospy.loginfo("Finish place_set for payload %s", self.current_target)
        '''
        self.state = "plan_place_set_second_step"
        self.publish_process_state()

    def move_place_set_second_step(self):
        self.controller_commander.set_controller_mode(
            self.desired_controller_mode, 0.4 * self.speed_scalar, [], [])
        result = None
        self.state = "place_set_second_step"
        self.controller_commander.async_execute(
            self.plan_dictionary['place_set_second_step'], result)
        self.publish_process_state()

    def plan_place_raise(self):

        #TODO: check change state and target

        rospy.loginfo("Begin place_raise for payload %s", self.current_payload)

        #Just use gripper position for now, think up a better way in future
        object_target = self.tf_listener.lookupTransform(
            "world", "vacuum_gripper_tool", rospy.Time(0))
        pose_target2 = copy.deepcopy(object_target)
        pose_target2.p[2] += 0.5

        path = self.controller_commander.compute_cartesian_path(
            pose_target2, avoid_collisions=False)

        self.state = "plan_place_raise"
        self.plan_dictionary['place_raise'] = path
        rospy.loginfo("Finish place_raise for payload %s", self.current_target)
        self.publish_process_state()

    def move_place_raise(self):
        self.controller_commander.set_controller_mode(
            self.desired_controller_mode, 0.8 * self.speed_scalar, [], [])
        result = None
        self.state = "place_raise"
        self.controller_commander.async_execute(
            self.plan_dictionary['place_raise'], result)
        self.publish_process_state()

    def _fill_process_state(self):
        s = ProcessState()
        s.state = self.state if self.state is not None else ""
        s.payload = self.current_payload if self.current_payload is not None else ""
        s.target = self.current_target if self.current_target is not None else ""
        return s

    def publish_process_state(self):
        s = self._fill_process_state()
        self._process_state_pub.publish(s)

    def place_lower_temp(self):
        UV = np.zeros([32, 2])
        P = np.zeros([32, 3])

    def _update_payload_pose(self,
                             payload_name,
                             pose,
                             parent_frame_id=None,
                             confidence=0.1):

        payload = self._get_payload(payload_name)

        if parent_frame_id is None:
            parent_frame_id = payload.header.frame_id

        parent_tf = self.tf_listener.lookupTransform(parent_frame_id,
                                                     pose.parent_frame_id,
                                                     rospy.Time(0))
        pose2 = parent_tf.inv() * pose

        req = UpdatePayloadPoseRequest()
        req.name = payload_name
        req.pose = rox_msg.transform2pose_msg(pose2)
        req.header.frame_id = parent_frame_id
        req.confidence = confidence

        res = self.update_payload_pose_srv(req)
        if not res.success:
            raise Exception("Could not update payload pose")

    def _get_payload(self, payload_name):
        payload_array_res = self.get_payload_array_srv(
            GetPayloadArrayRequest([payload_name]))
        if len(payload_array_res.payload_array.payloads
               ) != 1 or payload_array_res.payload_array.payloads[
                   0].name != payload_name:
            raise Exception("Invalid payload specified")

        return payload_array_res.payload_array.payloads[0]
class SimulatedVisionServer(object):
    def ros_image_cb(self, ros_data):
        np_arr = np.fromstring(ros_data.data, np.uint8)
        ros_image1 = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)

        if len(ros_image1) > 2 and ros_image1.shape[2] == 4:
            self.ros_image = cv2.cvtColor(ros_image1, cv2.COLOR_BGRA2BGR)
        else:
            self.ros_image = ros_image1
            self.ros_image_stamp = ros_data.header.stamp

    def ros_raw_overhead_image_cb(self, ros_data):
        self.ros_overhead_cam_info.take_image(ros_data)
        self.ros_image_stamp = ros_data.header.stamp

    def ros_raw_gripper_1_image_cb(self, ros_data):
        self.ros_gripper_1_cam_info.take_image(ros_data)
        self.ros_image_stamp = ros_data.header.stamp

    def ros_raw_gripper_2_image_cb(self, ros_data):
        self.ros_gripper_2_cam_info.take_image(ros_data)
        self.ros_image_stamp = ros_data.header.stamp

    def ros_cam_info_cb(self, ros_data):
        if (ros_data.header.frame_id == "overhead_camera"):
            self.ros_overhead_cam_info.fill_data(ros_data)
        if (ros_data.header.frame_id == "gripper_camera_1"):
            self.ros_gripper_1_cam_info.fill_data(ros_data)
        if (ros_data.header.frame_id == "gripper_camera_2"):
            self.ros_gripper_2_cam_info.fill_data(ros_data)

    # TODO: handle different action namespaces, camera namespaces, etc
    def __init__(self,
                 frame_id="world",
                 action_ns_1="recognize_objects",
                 action_ns_2="recognize_objects_gripper"):

        self.bridge = CvBridge()
        self.overhead_server = SimpleActionServer(
            action_ns_1,
            ObjectRecognitionAction,
            execute_cb=self.execute_overhead_callback)
        self.gripper_server = SimpleActionServer(
            action_ns_2,
            ObjectRecognitionAction,
            execute_cb=self.execute_gripper_callback)
        self.recognized_objects = dict()

        self.listener = PayloadTransformListener()
        self.frame_id = "world"
        self.ros_image = None
        self.ros_image_stamp = rospy.Time(0)
        self.last_ros_image_stamp = rospy.Time(0)
        self.ros_overhead_cam_info = CameraInfoStorage()
        self.ros_gripper_1_cam_info = CameraInfoStorage()
        self.ros_gripper_2_cam_info = CameraInfoStorage()

        #TODO: Handle compressed images vs uncompressed images better. Note that
        #the uncompressed 20 MP color images don't seem to be received properly.
        if not "compressed-image" in sys.argv:
            self.ros_overhead_img_sub = rospy.Subscriber(
                '/overhead_camera/image', Image,
                self.ros_raw_overhead_image_cb)
            self.ros_gripper_1_img_sub = rospy.Subscriber(
                '/gripper_camera_1/image', Image,
                self.ros_raw_gripper_1_image_cb)
            self.ros_gripper_2_img_sub = rospy.Subscriber(
                '/gripper_camera_2/image', Image,
                self.ros_raw_gripper_2_image_cb)
        else:
            self.ros_overhead_img_sub = rospy.Subscriber(
                '/overhead_camera/image/compressed', CompressedImage,
                self.ros_image_cb)
            self.ros_gripper_1_img_sub = rospy.Subscriber(
                '/gripper_camera_1/image/compressed', CompressedImage,
                self.ros_image_cb)
            self.ros_gripper_2_img_sub = rospy.Subscriber(
                '/gripper_camera_2/image/compressed', CompressedImage,
                self.ros_image_cb)
        self.ros_overhead_trigger = rospy.ServiceProxy(
            '/overhead_camera/trigger', Trigger)

        self.ros_overhead_cam_info_sub = rospy.Subscriber(
            '/overhead_camera/camera_info', CameraInfo, self.ros_cam_info_cb)

        self.ros_gripper_1_trigger = rospy.ServiceProxy(
            '/gripper_camera_1/trigger', Trigger)
        self.ros_gripper_2_trigger = rospy.ServiceProxy(
            '/gripper_camera_2/trigger', Trigger)
        self.ros_gripper_1_cam_info_sub = rospy.Subscriber(
            '/gripper_camera_1/camera_info', CameraInfo, self.ros_cam_info_cb)
        self.ros_gripper_2_cam_info_sub = rospy.Subscriber(
            '/gripper_camera_2/camera_info', CameraInfo, self.ros_cam_info_cb)

        #self.aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250)

        self.payloads = dict()
        self.link_markers = dict()
        self.payloads_lock = threading.Lock()
        self.payloads_sub = rospy.Subscriber("payload", PayloadArray,
                                             self._payload_msg_cb)

    def _payload_msg_cb(self, msg):
        with self.payloads_lock:
            for p in msg.payloads:
                if p.name not in self.payloads:
                    self.payloads[p.name] = p
                else:
                    payload = self.payloads[p.name]
                    #ignore stale data
                    if payload.header.stamp > p.header.stamp:
                        continue
                    self.payloads[p.name] = p

            for l in msg.link_markers:
                if l.header.frame_id not in self.link_markers:
                    self.link_markers[l.header.frame_id] = l
                else:
                    ll = self.link_markers[l.header.frame_id]
                    #ignore stale data
                    if ll.header.stamp > l.header.stamp:
                        continue
                    self.link_markers[l.header.frame_id] = l

            for d in msg.delete_payloads:
                if d in self.payloads:

                    del self.payloads[d]
                if d in self.link_markers:
                    del self.link_markers[d]

    def execute_overhead_callback(self, goal):

        now = rospy.Time.now()

        if self.ros_overhead_cam_info is None:
            raise Exception("Camera Info not received")

        self.last_ros_image_stamp = self.ros_image_stamp

        try:
            self.ros_overhead_trigger.wait_for_service(timeout=0.1)
            self.ros_overhead_trigger()
        except:
            pass

        wait_count = 0
        while self.ros_overhead_cam_info.ros_image is None or self.ros_image_stamp == self.last_ros_image_stamp:
            if wait_count > 250:
                raise Exception("Image receive timeout")
            time.sleep(0.25)
            wait_count += 1

        print "Past timeout"
        #stored data in local function variable, so changed all references off of self.ros_image
        img = self.ros_overhead_cam_info.ros_image

        if img is None:
            raise Exception("Camera image data not received")
        print "image received"
        r_array = RecognizedObjectArray()
        r_array.header.stamp = now
        r_array.header.frame_id = self.frame_id

        if goal.use_roi:
            raise Warning("use_roi in ObjectRecognitionRequest ignored")

        search_objects = dict()
        with self.payloads_lock:
            for _, p in self.payloads.items():
                search_objects[p.name] = p.markers
            for _, l in self.link_markers.items():
                search_objects[l.header.frame_id] = l.markers

                #TODO: handle multiple aruco dictionaries, currently only use one
        print "past payload lock"
        aruco_dicts = set()
        for m in search_objects.itervalues():
            for m2 in m:
                aruco_dicts.add(m2.marker.dictionary)
        print len(aruco_dicts)
        aruco_dicts.add("DICT_ARUCO_ORIGINAL")
        assert len(
            aruco_dicts
        ) == 1, "Currently all tags must be from the same dictionary"

        if not hasattr(cv2.aruco, next(iter(aruco_dicts))):
            raise ValueError("Invalid aruco-dict value")
        aruco_dict_id = getattr(cv2.aruco, next(iter(aruco_dicts)))
        aruco_dict = cv2.aruco.Dictionary_get(aruco_dict_id)

        c_pose = self.listener.lookupTransform(
            "/world", self.ros_overhead_cam_info.ros_data.header.frame_id,
            rospy.Time(0))
        print "c_pose"
        print c_pose.p
        print c_pose.R
        parameters = cv2.aruco.DetectorParameters_create()
        parameters.cornerRefinementWinSize = 32

        parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            img, aruco_dict, parameters=parameters)
        print ids
        for object_name, payload_markers in search_objects.items():

            tag_object_poses = dict()

            for m in payload_markers:
                board = get_aruco_gridboard(m.marker, aruco_dict)
                #board = cv2.aruco.GridBoard_create(4, 4, .04, .0075, aruco_dict, 16)

                retval, rvec, tvec = cv2.aruco.estimatePoseBoard(
                    corners, ids, board, self.ros_overhead_cam_info.camMatrix,
                    self.ros_overhead_cam_info.distCoeffs)
                print retval
                print rvec
                print tvec
                if (retval > 0):
                    if (m.name == "leeward_mid_panel_marker_1"
                            or m.name == "leeward_tip_panel_marker_1"):
                        print "Found tag: " + m.name

                        try:
                            #o_pose = self.listener.lookupTransform(m.name, object_name, rospy.Time(0))
                            o_pose = rox_msg.msg2transform(m.pose).inv()
                            print object_name
                            print o_pose
                            print ""
                            print "o_pose"
                            print o_pose.p
                            print o_pose.R
                            Ra, b = cv2.Rodrigues(rvec)
                            a_pose = rox.Transform(Ra, tvec)
                            print "a_pose"
                            print a_pose.p
                            print a_pose.R
                            tag_object_poses[m.name] = c_pose * (a_pose *
                                                                 o_pose)

                        except (tf.LookupException, tf.ConnectivityException,
                                tf.ExtrapolationException):
                            continue

                    #TODO: merge tag data if multiple tags detected
            if len(tag_object_poses) > 0:
                p = PoseWithCovarianceStamped()
                p.pose.pose = rox_msg.transform2pose_msg(
                    tag_object_poses.itervalues().next())
                p.header.stamp = now
                p.header.frame_id = self.frame_id

                r = RecognizedObject()
                r.header.stamp = now
                r.header.frame_id = self.frame_id
                r.type.key = object_name
                r.confidence = 1
                r.pose = p

                r_array.objects.append(r)
                """for o in self.object_names:
            try:
                (trans,rot) = self.listener.lookupTransform("/world", o, rospy.Time(0))
                print o
                print trans
                print rot
                print ""

                p=PoseWithCovarianceStamped()
                p.pose.pose=Pose(Point(trans[0], trans[1], trans[2]), Quaternion(rot[0], rot[1], rot[2], rot[3]))
                p.header.stamp=now
                p.header.frame_id=self.frame_id

                r=RecognizedObject()
                r.header.stamp=now
                r.header.frame_id=self.frame_id
                r.type.key=o
                r.confidence=1
                r.pose=p

                r_array.objects.append(r)

                except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                continue"""
        print "Succeeded in finding tags"
        result = ObjectRecognitionResult()
        result.recognized_objects = r_array

        self.overhead_server.set_succeeded(result=result)

    def execute_gripper_callback(self, goal):

        now = rospy.Time.now()

        if self.ros_gripper_1_cam_info is None or self.ros_gripper_2_cam_info is None:
            raise Exception("Camera Info not received")

        self.last_ros_image_stamp = self.ros_image_stamp

        try:
            self.ros_gripper_1_trigger.wait_for_service(timeout=0.1)
            self.ros_gripper_1_trigger()
            self.ros_gripper_2_trigger.wait_for_service(timeout=0.1)
            self.ros_gripper_2_trigger()
        except:
            pass

        wait_count = 0
        while ((self.ros_gripper_1_cam_info.ros_image is None
                and self.ros_gripper_2_cam_info.ros_image is None)
               or self.ros_image_stamp == self.last_ros_image_stamp):
            if wait_count > 250:
                raise Exception("Image receive timeout")
            time.sleep(0.25)
            wait_count += 1

        print "Past timeout"
        gripper_1_img = ros_gripper_1_cam_info.ros_image
        gripper_2_img = ros_gripper_1_cam_info.ros_image

        if gripper_1_img is None or gripper_2_img is None:
            raise Exception("Camera image data not received")
        print "image received"
        #TODO replace code with gripper camera code for placement
        '''
        r_array=RecognizedObjectArray()
        r_array.header.stamp=now
        r_array.header.frame_id=self.frame_id

        if goal.use_roi:
            raise Warning("use_roi in ObjectRecognitionRequest ignored")


        search_objects=dict()
        with self.payloads_lock:
            for _, p in self.payloads.items():
                search_objects[p.name] = p.markers
            for _, l in self.link_markers.items():
                search_objects[l.header.frame_id] = l.markers

                #TODO: handle multiple aruco dictionaries, currently only use one
        print "past payload lock"
        aruco_dicts=set()
        for m in search_objects.itervalues():
            for m2 in m:
                aruco_dicts.add(m2.marker.dictionary)
        print len(aruco_dicts)

        assert len(aruco_dicts) == 1, "Currently all tags must be from the same dictionary"

        if not hasattr(cv2.aruco, next(iter(aruco_dicts))):
            raise ValueError("Invalid aruco-dict value")
        aruco_dict_id=getattr(cv2.aruco, next(iter(aruco_dicts)))
        aruco_dict = cv2.aruco.Dictionary_get(aruco_dict_id)

        c_pose = self.listener.lookupTransform("/world", self.ros_overhead_cam_info.ros_data.header.frame_id, rospy.Time(0))
        print "c_pose"
        print c_pose.p
        print c_pose.R
        parameters =  cv2.aruco.DetectorParameters_create()
        parameters.cornerRefinementWinSize=32
        parameters.cornerRefinementMethod=cv2.aruco.CORNER_REFINE_CONTOUR
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(self.ros_image, aruco_dict, parameters=parameters)
        for object_name, payload_markers in search_objects.items():

            tag_object_poses=dict()

            for m in payload_markers:
                board = get_aruco_gridboard(m.marker, aruco_dict)
                retval, rvec, tvec = cv2.aruco.estimatePoseBoard(corners, ids, board, self.ros_overhead_cam_info.camMatrix, self.ros_overhead_cam_info.distCoeffs)
                if (retval > 0):
                    if(m.name=="leeward_mid_panel_marker_5"):
                        print "Found tag: " + m.name

                        try:
                            #o_pose = self.listener.lookupTransform(m.name, object_name, rospy.Time(0))
                            o_pose = rox_msg.msg2transform(m.pose).inv()
                            print object_name
                            print o_pose
                            print ""
                            print "o_pose"
                            print o_pose.p
                            print o_pose.R
                            Ra, b = cv2.Rodrigues(rvec)
                            a_pose=rox.Transform(Ra,tvec)
                            print "a_pose"
                            print a_pose.p
                            print a_pose.R
                            tag_object_poses[m.name]=c_pose*(a_pose*o_pose)

                        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                            continue

                    #TODO: merge tag data if multiple tags detected
            if len(tag_object_poses) > 0:
                p=PoseWithCovarianceStamped()
                p.pose.pose=rox_msg.transform2pose_msg(tag_object_poses.itervalues().next())
                p.header.stamp=now
                p.header.frame_id=self.frame_id

                r=RecognizedObject()
                r.header.stamp=now
                r.header.frame_id=self.frker, aruco_dict)
                board = cv2.aruco.GridBoard_createame_id
                r.type.key=object_name
                r.confidence=1
                r.pose=p

                r_array.objects.append(r)

                """for o in self.object_names:
            try:
                (trans,rot) = self.listener.lookupTransform("/world", o, rospy.Time(0))
                print o
                print trans
                print rot
                print ""

                p=PoseWithCovarianceStamped()
                p.pose.pose=Pose(Point(trans[0], trans[1], trans[2]), Quaternion(rot[0], rot[1], rot[2], rot[3]))
                p.header.stamp=now
                p.header.frame_id=self.frame_id

                r=RecognizedObject()
                r.header.stamp=now
                r.header.frame_id=self.frame_id
                r.type.key=o
                r.confidence=1
                r.pose=p

                r_array.objects.append(r)

                except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                continue"""

        result = ObjectRecognitionResult()
        result.recognized_objects=r_array
        '''
        self.gripper_server.set_succeeded(result=result)
class PBVSPlacementController(object):
    def __init__(self, controller_commander, urdf_xml_string, srdf_xml_string,
                 camera_image_topic, camera_trigger_topic, camera_info_topic):
        self.controller_commander = controller_commander
        self.planner = Planner(controller_commander, urdf_xml_string,
                               srdf_xml_string)

        self.tf_listener = PayloadTransformListener()

        self.camera_info = self.get_camera_info(camera_info_topic)

        self.img_queue = Queue.Queue(1)
        self.camera_trigger = rospy.ServiceProxy(camera_trigger_topic, Trigger)
        self.camera_sub = rospy.Subscriber(camera_image_topic, Image,
                                           self._ros_img_cb)

        self.controller_state_sub = rospy.Subscriber("controller_state",
                                                     controllerstate,
                                                     self.ft_cb)
        self.FTdata = None
        self.ft_flag = False
        self.FTdata_0 = self.FTdata
        # Compliance controller parameters
        self.F_d_set1 = -100
        self.F_d_set2 = -200
        self.Kc = 0.00002

        self.client = actionlib.SimpleActionClient(
            "joint_trajectory_action", FollowJointTrajectoryAction)
        self.K_pbvs = 0.25

    def _ros_img_cb(self, ros_img):
        img1 = CvBridge().imgmsg_to_cv2(ros_img)
        try:
            self.img_queue.get_nowait()
        except Queue.Empty:
            pass
        self.img_queue.put_nowait(img1)

    def set_goal(self, params_msg):

        self.desired_transform = rox_msg.msg2transform(
            params_msg.desired_transform)
        self.desired_camera2_transform = rox_msg.msg2transform(
            params_msg.desired_camera2_transform)

        self.markers = self.get_all_payload_markers()

        self.fixed_marker = self.markers[
            self.desired_transform.parent_frame_id]
        self.payload_marker = self.markers[
            self.desired_transform.child_frame_id]

        self.aruco_dict = self.get_aruco_dictionary(self.fixed_marker)

        self.stage1_tol_p = params_msg.stage1_tol_p
        self.stage1_tol_r = params_msg.stage1_tol_r
        self.stage2_tol_p = params_msg.stage2_tol_p
        self.stage2_tol_r = params_msg.stage2_tol_r
        self.stage3_tol_p = params_msg.stage3_tol_p
        self.stage3_tol_r = params_msg.stage3_tol_r

        self.stage1_kp = params_msg.stage1_kp
        self.stage2_kp = params_msg.stage2_kp
        self.stage3_kp = params_msg.stage3_kp

        self.stage2_z_offset = params_msg.stage2_z_offset
        self.force_ki = params_msg.force_ki

        def unpack_wrench(w):
            return np.array([
                w.torque.x, w.torque.y, w.torque.z, w.force.x, w.force.y,
                w.force.z
            ])

        self.abort_force = unpack_wrench(params_msg.abort_force)
        self.placement_force = unpack_wrench(params_msg.placement_force)

        self._aborted = False

    def compute_step_gripper_target_pose(self,
                                         img,
                                         Kp,
                                         no_z=False,
                                         z_offset=0):

        fixed_marker_transform, payload_marker_transform, error_transform = self.compute_error_transform(
            img)

        if no_z:
            error_transform.p[2] = 0
        else:
            error_transform.p[2] -= z_offset

        gripper_to_camera_tf = self.tf_listener.lookupTransform(
            "vacuum_gripper_tool", "gripper_camera_2", rospy.Time(0))

        world_to_vacuum_gripper_tool_tf = self.tf_listener.lookupTransform(
            "world", "vacuum_gripper_tool", rospy.Time(0))

        #Scale by Kp
        k, theta = rox.R2rot(error_transform.R)
        r = np.multiply(k * theta, Kp[0:3])
        r_norm = np.linalg.norm(r)
        if (r_norm < 1e-6):
            error_transform2_R = np.eye(3)
        else:
            error_transform2_R = rox.rot(r / r_norm, r_norm)
        error_transform2_p = np.multiply(error_transform.p, (Kp[3:6]))

        error_transform2 = rox.Transform(error_transform2_R,
                                         error_transform2_p)

        gripper_to_fixed_marker_tf = gripper_to_camera_tf * fixed_marker_transform
        gripper_to_desired_fixed_marker_tf = gripper_to_fixed_marker_tf * error_transform2

        #print gripper_to_fixed_marker_tf

        ret = world_to_vacuum_gripper_tool_tf * (
            gripper_to_desired_fixed_marker_tf *
            gripper_to_fixed_marker_tf.inv()).inv()

        #print world_to_vacuum_gripper_tool_tf
        #print ret

        #print error_transform

        return ret, error_transform

    def compute_error_transform(self, img):

        fixed_marker_transform = self.detect_marker(img, self.fixed_marker)
        payload_marker_transform = self.detect_marker(img, self.payload_marker)
        tag_to_tag_transform = fixed_marker_transform.inv(
        ) * payload_marker_transform
        error_transform = tag_to_tag_transform * self.desired_transform.inv()
        return fixed_marker_transform, payload_marker_transform, error_transform

    def receive_image(self):
        try:
            self.img_queue.get_nowait()
        except Queue.Empty:
            pass
        self.camera_trigger()
        return self.img_queue.get(timeout=10.0)

    def get_all_payload_markers(self):
        payload_array = rospy.wait_for_message("payload",
                                               PayloadArray,
                                               timeout=1)

        markers = dict()

        for p in payload_array.payloads:
            for m in p.markers:
                markers[m.name] = m.marker

        for p in payload_array.link_markers:
            for m in p.markers:
                markers[m.name] = m.marker

        return markers

    def get_aruco_dictionary(self, marker):

        if not hasattr(cv2.aruco, marker.dictionary):
            raise ValueError("Invalid aruco-dict value")
        aruco_dict_id = getattr(cv2.aruco, marker.dictionary)
        aruco_dict = cv2.aruco.Dictionary_get(aruco_dict_id)
        return aruco_dict

    def get_aruco_gridboard(self, marker):
        #Create grid board representing the calibration target

        aruco_dict = self.get_aruco_dictionary(marker)

        if isinstance(marker.dictionary, basestring):
            if not marker.dictionary.startswith('DICT_'):
                raise ValueError("Invalid aruco-dict value")

        elif isinstance(marker.dictionary, int):
            aruco_dict = cv2.aruco.Dictionary_get(marker.dictionary)
        else:
            aruco_dict_id = marker.dictionary
        board=cv2.aruco.GridBoard_create(marker.markersX, marker.markersY, \
                                         marker.markerLength, marker.markerSpacing, aruco_dict,\
                                         marker.firstMarker)
        return board

    def get_camera_info(self, camera_info_topic):
        return rospy.wait_for_message(camera_info_topic, CameraInfo, timeout=1)

    def detect_marker(self, img, marker):

        camMatrix = np.reshape(self.camera_info.K, (3, 3))
        distCoeffs = np.array(self.camera_info.D)

        parameters = cv2.aruco.DetectorParameters_create()
        parameters.cornerRefinementWinSize = 32
        parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_CONTOUR
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            img, self.aruco_dict, parameters=parameters)

        img2 = cv2.aruco.drawDetectedMarkers(img, corners, ids)
        img3 = cv2.resize(img2, (0, 0), fx=0.25, fy=0.25)
        cv2.imshow("", img3)
        cv2.waitKey(1)

        board = self.get_aruco_gridboard(marker)

        retval, rvec, tvec = cv2.aruco.estimatePoseBoard(
            corners, ids, board, camMatrix, distCoeffs)

        if (retval <= 0):
            #cv2.waitKey()
            raise Exception("Invalid image")

        Ra, b = cv2.Rodrigues(rvec)
        a_pose = rox.Transform(Ra, tvec)

        frame_with_markers_and_axis = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        frame_with_markers_and_axis = cv2.aruco.drawAxis(
            frame_with_markers_and_axis, camMatrix, distCoeffs, rvec, tvec,
            0.2)
        frame_with_markers_and_axis = cv2.resize(frame_with_markers_and_axis,
                                                 (0, 0),
                                                 fx=0.25,
                                                 fy=0.25)
        cv2.imshow("transform", frame_with_markers_and_axis)
        cv2.waitKey(1)

        return a_pose

    def pbvs(self,
             kp,
             tols_p,
             tols_r,
             abort_force,
             max_iters=25,
             no_z=False,
             z_offset=0):

        i = 0
        while True:

            if i > max_iters:
                raise Exception("Placement controller timeout")

            if self._aborted:
                raise Exception("Operation aborted")

            img = self.receive_image()

            target_pose, err = self.compute_step_gripper_target_pose(
                img, kp, no_z=no_z, z_offset=z_offset)

            err_p = np.linalg.norm(err.p)
            if no_z:
                err_p = np.linalg.norm(err.p[0:2])
            err_r = np.abs(rox.R2rot(err.R)[1])

            if err_p < tols_p and err_r < tols_r:
                return err_p, err_r

            #target_pose.p[1]-=0.01 # Offset a little to avoid panel overlap
            plan = self.planner.trajopt_plan(
                target_pose, json_config_name="panel_placement")

            if self._aborted:
                raise Exception("Operation aborted")
            self.controller_commander.set_controller_mode(
                self.controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [],
                abort_force)
            self.controller_commander.execute_trajectory(plan)
            i += 1

    def pbvs_stage1(self):
        print "stage 1"
        return self.pbvs(self.stage1_kp,
                         self.stage1_tol_p,
                         self.stage1_tol_r,
                         self.abort_force,
                         no_z=True)

    def pbvs_stage2(self):
        print "stage 2"
        return self.pbvs(self.stage2_kp,
                         self.stage2_tol_p,
                         self.stage2_tol_r,
                         self.abort_force,
                         z_offset=self.stage2_z_offset)

    def pbvs_stage3(self):
        print "stage 3"
        return self.pbvs(self.stage3_kp, self.stage3_tol_p, self.stage3_tol_r,
                         self.abort_force)

    def abort(self):
        self._aborted = True
        self.controller_commander.stop_trajectory()

    # Added by YC
    def ft_cb(self, data):
        self.FTdata = np.array([data.ft_wrench.torque.x,data.ft_wrench.torque.y,data.ft_wrench.torque.z,\
        data.ft_wrench.force.x,data.ft_wrench.force.y,data.ft_wrench.force.z])
        self.ft_flag = True

    def trapezoid_gen(self, target, current_joint_angles, acc, dcc, vmax):
        goal = FollowJointTrajectoryGoal()
        goal.trajectory.joint_names = [
            'joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6'
        ]
        goal.trajectory.header.frame_id = '/world'

        dist = np.array(target - current_joint_angles)
        xf = max(abs(dist))
        #print "Dist:====================", xf
        [x0, v0, a0, ta, tb, tf] = trapgen(0, xf, 0, 0, vmax, acc, dcc, 0)
        [xa, va, aa, ta, tb, tf] = trapgen(0, xf, 0, 0, vmax, acc, dcc, ta)
        [xb, vb, ab, ta, tb, tf] = trapgen(0, xf, 0, 0, vmax, acc, dcc, tb)

        p1 = JointTrajectoryPoint()
        p1.positions = current_joint_angles
        p1.time_from_start = rospy.Duration(0)

        p2 = JointTrajectoryPoint()
        p2.positions = np.array(p1.positions) + dist * xa
        p2.time_from_start = rospy.Duration(ta)

        p3 = JointTrajectoryPoint()
        p3.positions = np.array(p1.positions) + dist * xb
        p3.time_from_start = rospy.Duration(tb)

        p4 = JointTrajectoryPoint()
        p4.positions = target
        p4.velocities = np.zeros((6, ))
        p4.accelerations = np.zeros((6, ))
        p4.time_from_start = rospy.Duration(tf)

        goal.trajectory.points.append(p1)
        goal.trajectory.points.append(p2)
        goal.trajectory.points.append(p3)
        goal.trajectory.points.append(p4)

        return goal

    def test(self):
        img = cv2.imread('12_57_46_523.png')
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        fixed_marker_transform, payload_marker_transform, error_transform = self.compute_error_transform(
            img)
        tag_to_tag_transform = fixed_marker_transform.inv(
        ) * payload_marker_transform
        print "Test1:", fixed_marker_transform, payload_marker_transform
        print "Test2:", tag_to_tag_transform
        print "Test3:", error_transform

    def pbvs_jacobian(self):
        self.controller_commander.set_controller_mode(
            self.controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [], [])
        tvec_err = [100, 100, 100]
        rvec_err = [100, 100, 100]

        self.FTdata_0 = self.FTdata

        error_transform = rox.Transform(rox.rpy2R([2, 2, 2]),
                                        np.array([100, 100, 100]))

        FT_data_ori = []
        FT_data_biased = []
        err_data_p = []
        err_data_rpy = []
        joint_data = []
        time_data = []
        #TODO: should listen to stage_3_tol_r not 1 degree
        print self.desired_camera2_transform
        #R_desired_cam = self.desired_camera2_transform.R.dot(self.desired_transform.R)
        #R_desired_cam = R_desired_cam.dot(self.desired_camera2_transform.R.transpose())
        #t_desired_cam = -self.desired_camera2_transform.R.dot(self.desired_transform.p)

        while (error_transform.p[2] > 0.01 or np.linalg.norm(
            [error_transform.p[0], error_transform.p[1]]) > self.stage3_tol_p
               or
               np.linalg.norm(rox.R2rpy(error_transform.R)) > np.deg2rad(1)):

            img = self.receive_image()

            fixed_marker_transform, payload_marker_transform, error_transform = self.compute_error_transform(
                img)
            #print self.desired_transform.R.T, -fixed_marker_transform.R.dot(self.desired_transform.p)

            R_desired_cam = fixed_marker_transform.R.dot(
                self.desired_transform.R)
            R_desired_cam = R_desired_cam.dot(
                fixed_marker_transform.R.transpose())
            t_desired_cam = -fixed_marker_transform.R.dot(
                self.desired_transform.p)

            # Compute error directly in the camera frame
            observed_R_difference = np.dot(
                payload_marker_transform.R,
                fixed_marker_transform.R.transpose())
            k, theta = rox.R2rot(
                np.dot(observed_R_difference, R_desired_cam.transpose())
            )  #np.array(rox.R2rpy(rvec_err1))
            rvec_err1 = k * theta

            observed_tvec_difference = fixed_marker_transform.p - payload_marker_transform.p
            tvec_err1 = -fixed_marker_transform.R.dot(
                self.desired_transform.p) - observed_tvec_difference
            #print 'SPOT: ',tvec_err1, rvec_err1
            # Map error to the robot spatial velocity
            world_to_camera_tf = self.tf_listener.lookupTransform(
                "world", "gripper_camera_2", rospy.Time(0))
            camera_to_link6_tf = self.tf_listener.lookupTransform(
                "gripper_camera_2", "link_6", rospy.Time(0))

            t21 = -np.dot(
                rox.hat(
                    np.dot(world_to_camera_tf.R,
                           (camera_to_link6_tf.p -
                            payload_marker_transform.p.reshape((1, 3))).T)),
                world_to_camera_tf.R)  #np.zeros((3,3))#

            # v = R_oc(vc)c + R_oc(omeega_c)_c x (r_pe)_o = R_oc(vc)c - (r_pe)_o x R_oc(omeega_c)_c
            tvec_err = t21.dot(rvec_err1).reshape(
                (3, )) + world_to_camera_tf.R.dot(tvec_err1).reshape((3, ))
            # omeega = R_oc(omeega_c)_c
            rvec_err = world_to_camera_tf.R.dot(rvec_err1).reshape((3, ))

            tvec_err = np.clip(tvec_err, -0.2, 0.2)
            rvec_err = np.clip(rvec_err, -np.deg2rad(5), np.deg2rad(5))

            if tvec_err[2] < 0.03:
                rospy.loginfo("Only Compliance Control")
                tvec_err[2] = 0

            rot_err = rox.R2rpy(error_transform.R)
            rospy.loginfo("tvec difference: %f, %f, %f", error_transform.p[0],
                          error_transform.p[1], error_transform.p[2])
            rospy.loginfo("rvec difference: %f, %f, %f", rot_err[0],
                          rot_err[1], rot_err[2])

            dx = -np.concatenate((rvec_err, tvec_err)) * self.K_pbvs

            print np.linalg.norm([error_transform.p[0], error_transform.p[1]])
            print np.linalg.norm(rox.R2rpy(error_transform.R))

            # Compliance Force Control
            if (not self.ft_flag):
                raise Exception("havent reached FT callback")
            # Compute the exteranl force
            FTread = self.FTdata - self.FTdata_0  # (F)-(F0)
            print '================ FT1 =============:', FTread
            print '================ FT2 =============:', self.FTdata

            if FTread[-1] > (self.F_d_set1 + 50):
                F_d = self.F_d_set1
            else:
                F_d = self.F_d_set2

            if (self.FTdata == 0).all():
                rospy.loginfo("FT data overflow")
                dx[-1] += self.K_pbvs * 0.004
            else:
                tx_correct = 0
                if abs(self.FTdata[0]) > 80:
                    tx_correct = 0.0002 * (abs(self.FTdata[0]) - 80)

                Vz = self.Kc * (F_d - FTread[-1]) + tx_correct
                dx[-1] = dx[-1] + Vz

            print "dx:", dx

            current_joint_angles = self.controller_commander.get_current_joint_values(
            )
            joints_vel = QP_abbirb6640(
                np.array(current_joint_angles).reshape(6, 1), np.array(dx))
            goal = self.trapezoid_gen(
                np.array(current_joint_angles) + joints_vel.dot(1),
                np.array(current_joint_angles), 0.008, 0.008,
                0.015)  #acc,dcc,vmax)

            print "joints_vel:", joints_vel

            self.client.wait_for_server()
            self.client.send_goal(goal)
            self.client.wait_for_result()
            res = self.client.get_result()
            if (res.error_code != 0):
                raise Exception("Trajectory execution returned error")

            FT_data_ori.append(self.FTdata)
            FT_data_biased.append(FTread)
            err_data_p.append(error_transform.p)
            err_data_rpy.append(rot_err)
            joint_data.append(current_joint_angles)
            time_data.append(time.time())

        filename_pose = "/home/rpi-cats/Desktop/YC/Data/Panel2_Placement_In_Nest_Pose_" + str(
            time.time()) + ".mat"
        scipy.io.savemat(filename_pose,
                         mdict={
                             'FT_data_ori': FT_data_ori,
                             'FT_data_biased': FT_data_biased,
                             'err_data_p': err_data_p,
                             'err_data_rpy': err_data_rpy,
                             'joint_data': joint_data,
                             'time_data': time_data
                         })

        rospy.loginfo("End  ====================")
    def __init__(self,
                 frame_id="world",
                 action_ns_1="recognize_objects",
                 action_ns_2="recognize_objects_gripper"):

        self.bridge = CvBridge()
        self.overhead_server = SimpleActionServer(
            action_ns_1,
            ObjectRecognitionAction,
            execute_cb=self.execute_overhead_callback)
        self.gripper_server = SimpleActionServer(
            action_ns_2,
            ObjectRecognitionAction,
            execute_cb=self.execute_gripper_callback)
        self.recognized_objects = dict()

        self.listener = PayloadTransformListener()
        self.frame_id = "world"
        self.ros_image = None
        self.ros_image_stamp = rospy.Time(0)
        self.last_ros_image_stamp = rospy.Time(0)
        self.ros_overhead_cam_info = CameraInfoStorage()
        self.ros_gripper_1_cam_info = CameraInfoStorage()
        self.ros_gripper_2_cam_info = CameraInfoStorage()

        #TODO: Handle compressed images vs uncompressed images better. Note that
        #the uncompressed 20 MP color images don't seem to be received properly.
        if not "compressed-image" in sys.argv:
            self.ros_overhead_img_sub = rospy.Subscriber(
                '/overhead_camera/image', Image,
                self.ros_raw_overhead_image_cb)
            self.ros_gripper_1_img_sub = rospy.Subscriber(
                '/gripper_camera_1/image', Image,
                self.ros_raw_gripper_1_image_cb)
            self.ros_gripper_2_img_sub = rospy.Subscriber(
                '/gripper_camera_2/image', Image,
                self.ros_raw_gripper_2_image_cb)
        else:
            self.ros_overhead_img_sub = rospy.Subscriber(
                '/overhead_camera/image/compressed', CompressedImage,
                self.ros_image_cb)
            self.ros_gripper_1_img_sub = rospy.Subscriber(
                '/gripper_camera_1/image/compressed', CompressedImage,
                self.ros_image_cb)
            self.ros_gripper_2_img_sub = rospy.Subscriber(
                '/gripper_camera_2/image/compressed', CompressedImage,
                self.ros_image_cb)
        self.ros_overhead_trigger = rospy.ServiceProxy(
            '/overhead_camera/trigger', Trigger)

        self.ros_overhead_cam_info_sub = rospy.Subscriber(
            '/overhead_camera/camera_info', CameraInfo, self.ros_cam_info_cb)

        self.ros_gripper_1_trigger = rospy.ServiceProxy(
            '/gripper_camera_1/trigger', Trigger)
        self.ros_gripper_2_trigger = rospy.ServiceProxy(
            '/gripper_camera_2/trigger', Trigger)
        self.ros_gripper_1_cam_info_sub = rospy.Subscriber(
            '/gripper_camera_1/camera_info', CameraInfo, self.ros_cam_info_cb)
        self.ros_gripper_2_cam_info_sub = rospy.Subscriber(
            '/gripper_camera_2/camera_info', CameraInfo, self.ros_cam_info_cb)

        #self.aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250)

        self.payloads = dict()
        self.link_markers = dict()
        self.payloads_lock = threading.Lock()
        self.payloads_sub = rospy.Subscriber("payload", PayloadArray,
                                             self._payload_msg_cb)
class ProcessController(object):
    def __init__(self, disable_ft=False):
        self.lock = threading.Lock()
        urdf_xml_string = rospy.get_param("robot_description")
        srdf_xml_string = rospy.get_param("robot_description_semantic")
        self.urdf = URDF.from_parameter_server()
        self.overhead_vision_client = actionlib.SimpleActionClient(
            "recognize_objects", ObjectRecognitionAction)
        self.rapid_node = rapid_node_pkg.RAPIDCommander()
        self.controller_commander = ControllerCommander()
        self.state = 'init'
        self.current_target = None
        self.current_payload = None
        self.available_payloads = {
            'leeward_mid_panel': 'leeward_mid_panel',
            'leeward_tip_panel': 'leeward_tip_panel'
        }
        self.desired_controller_mode = self.controller_commander.MODE_AUTO_TRAJECTORY
        self.speed_scalar = 1.0
        self.disable_ft = disable_ft

        self.tf_listener = PayloadTransformListener()
        self._process_state_pub = rospy.Publisher("process_state",
                                                  ProcessState,
                                                  queue_size=100,
                                                  latch=True)
        self.publish_process_state()
        self.placement_client = actionlib.SimpleActionClient(
            'placement_step', PBVSPlacementAction)
        #self.placement_client.wait_for_server()
        self.update_payload_pose_srv = rospy.ServiceProxy(
            "update_payload_pose", UpdatePayloadPose)
        self.get_payload_array_srv = rospy.ServiceProxy(
            "get_payload_array", GetPayloadArray)
        self._goal_handle = None
        self._goal_handle_lock = threading.RLock()
        self.subprocess_handle = None
        self.plan_dictionary = {}
        self.process_starts = {}
        self.process_index = None
        self.process_states = [
            "reset_position", "pickup_prepare", "pickup_lower",
            "pickup_grab_first_step", "pickup_grab_second_step",
            "pickup_raise", "transport_payload", "place_payload",
            "gripper_release"
        ]

        self.planner = Planner(self.controller_commander, urdf_xml_string,
                               srdf_xml_string)

    def _vision_get_object_pose(self, key):
        self.overhead_vision_client.wait_for_server()

        goal = ObjectRecognitionGoal(False, [-1e10, 1e10, -1e10, 1e10])
        self.overhead_vision_client.send_goal(goal)
        self.overhead_vision_client.wait_for_result()
        ret = self.overhead_vision_client.get_result()

        for r in ret.recognized_objects.objects:
            if r.type.key == key:
                rox_pose = rox_msg.msg2transform(r.pose.pose.pose)
                rox_pose.parent_frame_id = r.pose.header.frame_id
                rox_pose.child_frame_id = key
                return rox_pose

        raise Exception("Requested object not found")

    def _vision_get_object_gripper_target_pose(self, key):

        object_pose = self._vision_get_object_pose(key)

        tag_rel_pose = self.tf_listener.lookupTransform(
            key, key + "_gripper_target", rospy.Time(0))
        return object_pose * tag_rel_pose, object_pose

    def _tf_get_object_gripper_target_pose(self, key):

        payload = self._get_payload(key)
        if payload.confidence < 0.8:
            raise Exception("Payload confidence too low for tf lookup")

        object_pose = self.tf_listener.lookupTransform("/world", key,
                                                       rospy.Time(0))

        tag_rel_pose = self.tf_listener.lookupTransform(
            key, key + "_gripper_target", rospy.Time(0))
        return object_pose * tag_rel_pose, object_pose

    def get_payload_pickup_ft_threshold(self, payload):
        if self.disable_ft:
            return []
        return self._get_payload(
            payload).gripper_targets[0].pickup_ft_threshold

    def get_state(self):
        return self.state

    def get_current_pose(self):
        return self.controller_commander.get_current_pose_msg()

    def cancel_step(self, goal=None):
        with self._goal_handle_lock:
            self.stop_motion()

    def stop_motion(self):
        with self._goal_handle_lock:
            self.controller_commander.stop_trajectory()

    def plan_rewind_motion(self, goal):
        no_rewind_list = [None, 0, 4, 5, 8]
        self._begin_step(goal)
        if (self.process_index not in no_rewind_list):
            try:
                if (self.process_index == 3):
                    self.process_index -= 1
                    rewind_target_pose = self.process_starts[
                        self.process_states[self.process_index]]

                else:
                    rewind_target_pose = self.process_starts[
                        self.process_states[self.process_index]]
                path = self._plan(rewind_target_pose,
                                  config="reposition_robot",
                                  smoother_config="reposition_robot_smoother")
                self.plan_dictionary['rewind_motion'] = path
                self._step_complete(goal)
            except Exception as err:
                traceback.print_exc()
                self._step_failed(err, goal)

        else:
            self._step_failed(
                "Rewind Unavailable, Please Manually Reposition Robot", goal)

    def move_rewind_motion(self, mode, goal):
        self._begin_step(goal)
        try:
            self.controller_commander.set_controller_mode(
                mode, self.speed_scalar, [], [])
            path = self.plan_dictionary['rewind_motion']

            self._execute_path(path, goal)
            self.process_index -= 1
            self.state = self.process_states[self.process_index]

        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def plan_reset_position(self, goal=None):

        self._begin_step(goal)
        try:
            Q = [0.02196692, -0.10959773, 0.99369529, -0.00868731]
            P = [1.8475985, -0.04983688, 0.82486047]

            rospy.loginfo("Planning to reset position")
            self.state = "reset_position"
            self.process_index = 0

            pose_target = rox.Transform(rox.q2R(Q), np.copy(P))

            path = self._plan(pose_target,
                              config="reposition_robot",
                              smoother_config="reposition_robot_smoother")

            self.plan_dictionary['reset_position'] = path

            self._step_complete(goal)

        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def move_reset_position(self,
                            mode=ControllerCommander.MODE_AUTO_TRAJECTORY,
                            goal=None):

        self._begin_step(goal)
        try:

            self.controller_commander.set_controller_mode(
                self.controller_commander.MODE_HALT, self.speed_scalar, [], [])
            self.controller_commander.set_controller_mode(
                mode, self.speed_scalar, [], [])
            path = self.plan_dictionary['reset_position']
            self._execute_path(path, goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def place_panel(self, target_payload, goal=None):
        self._begin_step(goal)

        def done_cb(status, result):
            rospy.loginfo("ibvs placement generated success")
            if (goal is not None):
                if status == actionlib.GoalStatus.SUCCEEDED:
                    self._step_complete(goal)
                else:
                    with self._goal_handle_lock:
                        if self._goal_handle == goal:
                            self._goal_handle = None
                    res = ProcessStepResult()
                    res.state = self.state
                    res.target = self.current_target if self.current_target is not None else ""
                    res.payload = self.current_payload if self.current_payload is not None else ""
                    res.error_msg = str(result.error_msg)
                    goal.set_aborted(result=res)
                    rospy.loginfo("pbvs placement generated: %s",
                                  result.error_msg)

        self.process_index = 7
        self.process_starts[self.process_states[
            self.process_index]] = self.get_current_pose()
        with self._goal_handle_lock:

            placement_goal = PBVSPlacementGoal()
            placement_goal.desired_transform = self.load_placement_target_config(
                target_payload)
            placement_goal.stage1_kp = np.array([0.9] * 6)
            placement_goal.stage2_kp = np.array([0.9] * 6)
            placement_goal.stage3_kp = np.array([0.5] * 6)
            placement_goal.stage1_tol_p = 0.05
            placement_goal.stage1_tol_r = np.deg2rad(1)
            placement_goal.stage2_tol_p = 0.05
            placement_goal.stage2_tol_r = np.deg2rad(1)
            placement_goal.stage3_tol_p = 0.001
            placement_goal.stage3_tol_r = np.deg2rad(0.2)
            placement_goal.stage2_z_offset = 0.05

            placement_goal.abort_force = Wrench(Vector3(500, 500, 500),
                                                Vector3(100, 100, 100))
            placement_goal.placement_force = Wrench(Vector3(0, 0, 300),
                                                    Vector3(0, 0, 0))
            placement_goal.force_ki = np.array([1e-6] * 6)

            client_handle = self.placement_client.send_goal(placement_goal,
                                                            done_cb=done_cb)

    def plan_pickup_prepare(self, target_payload, goal=None):

        self._begin_step(goal)
        try:
            rospy.loginfo("Begin pickup_prepare for payload %s",
                          target_payload)

            object_target, object_pose = self._vision_get_object_gripper_target_pose(
                target_payload)

            self._update_payload_pose(target_payload,
                                      object_pose,
                                      parent_frame_id="pickup_nest",
                                      confidence=0.8)

            rospy.loginfo("Found payload %s at pose %s", target_payload,
                          object_target)

            self.pose_target = copy.deepcopy(object_target)
            pose_target = self.pose_target
            pose_target.p[2] += 0.5

            rospy.loginfo("Prepare pickup %s at pose %s", target_payload,
                          object_target)
            print pose_target.p

            path = self._plan(pose_target,
                              config="reposition_robot",
                              smoother_config="reposition_robot_smoother")

            self.current_target = target_payload
            self.state = "plan_pickup_prepare"
            self.plan_dictionary['pickup_prepare'] = path

            #rospy.loginfo("Finish pickup prepare for payload %s", target_payload)
            self._step_complete(goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def move_pickup_prepare(self,
                            mode=ControllerCommander.MODE_AUTO_TRAJECTORY,
                            goal=None):
        self._begin_step(goal)
        try:
            self.controller_commander.set_controller_mode(
                mode, self.speed_scalar, [], [])
            result = None

            self.state = "pickup_prepare"
            self.process_index = 1
            self.process_starts[self.process_states[
                self.process_index]] = self.get_current_pose()
            plan = self.plan_dictionary['pickup_prepare']
            self._execute_path(plan, goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def plan_pickup_lower(self, goal=None):

        #TODO: check change state and target
        self._begin_step(goal)
        try:
            rospy.loginfo("Begin pickup_lower for payload %s",
                          self.current_target)

            object_target, _ = self._tf_get_object_gripper_target_pose(
                self.current_target)
            pose_target2 = copy.deepcopy(object_target)
            pose_target2.p[2] += 0.3
            print pose_target2.p

            #path=self.controller_commander.compute_cartesian_path(pose_target2, avoid_collisions=False)
            path = self._plan(pose_target2, config="reposition_robot_short")

            self.state = "plan_pickup_lower"
            self.plan_dictionary['pickup_lower'] = path
            rospy.loginfo("Finish pickup_lower for payload %s",
                          self.current_target)
            self._step_complete(goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def move_pickup_lower(self,
                          mode=ControllerCommander.MODE_AUTO_TRAJECTORY,
                          goal=None):
        self._begin_step(goal)
        try:
            self.controller_commander.set_controller_mode(
                mode, 0.8 * self.speed_scalar, [],
                self.get_payload_pickup_ft_threshold(self.current_target))
            result = None
            rospy.loginfo("moving_pickup_lower")
            if (self.state != "plan_pickup_lower"):
                self.plan_pickup_lower()
            self.state = "pickup_lower"
            self.process_index = 2
            self.process_starts[self.process_states[
                self.process_index]] = self.get_current_pose()
            path = self.plan_dictionary['pickup_lower']
            self._execute_path(path, goal)

        #self.execute_trajectory_action.wait_for_result()
        #self.controller_commander.execute(self.plan_dictionary['pickup_lower'])
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def plan_pickup_grab_first_step(self, goal=None):
        #TODO: check change state and target
        self._begin_step(goal)
        try:
            rospy.loginfo("Begin pickup_grab for payload %s",
                          self.current_target)

            self.object_target, _ = self._tf_get_object_gripper_target_pose(
                self.current_target)
            pose_target2 = copy.deepcopy(self.object_target)
            pose_target2.p[2] -= 0.15

            #path=self.controller_commander.compute_cartesian_path(pose_target2, avoid_collisions=False)
            path = self._plan(pose_target2, config="panel_pickup")
            self.state = "plan_pickup_grab_first_step"
            self.plan_dictionary['pickup_grab_first_step'] = path
            self._step_complete(goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def move_pickup_grab_first_step(self,
                                    mode=ControllerCommander.
                                    MODE_AUTO_TRAJECTORY,
                                    goal=None):

        self._begin_step(goal)

        try:
            self.controller_commander.set_controller_mode(mode, 0.8*self.speed_scalar, [],\
                                                          self.get_payload_pickup_ft_threshold(self.current_target))
            result = None
            if (self.state != "plan_pickup_grab_first_step"):
                self.plan_pickup_grab_first_step()
            self.state = "pickup_grab_first_step"
            self.process_index = 3
            self.process_starts[self.process_states[
                self.process_index]] = self.get_current_pose()

            path = self.plan_dictionary['pickup_grab_first_step']
            self._execute_path(path, goal, ft_stop=True)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def plan_pickup_grab_second_step(self, goal=None):
        self._begin_step(goal)
        try:
            self.rapid_node.set_digital_io("Vacuum_enable", 1)
            time.sleep(1)

            #TODO: check vacuum feedback to make sure we have the panel

            world_to_panel_tf = self.tf_listener.lookupTransform(
                "world", self.current_target, rospy.Time(0))
            world_to_gripper_tf = self.tf_listener.lookupTransform(
                "world", "vacuum_gripper_tool", rospy.Time(0))
            panel_to_gripper_tf = world_to_gripper_tf.inv() * world_to_panel_tf

            #Add extra cushion for spring extension. This should be a parameter somewhere rather than hard coded.
            if (self.current_target == "leeward_mid_panel"):
                panel_to_gripper_tf.p[2] += 0.07
            else:
                panel_to_gripper_tf.p[2] += 0.05

            self.current_payload = self.current_target
            self.current_target = None

            self._update_payload_pose(self.current_payload,
                                      panel_to_gripper_tf,
                                      "vacuum_gripper_tool", 0.5)
            self.controller_commander.set_controller_mode(
                self.controller_commander.MODE_HALT, 1, [], [])
            time.sleep(1)

            pose_target2 = copy.deepcopy(self.object_target)
            pose_target2.p[2] += 0.20

            #path=self.controller_commander.compute_cartesian_path(pose_target2, avoid_collisions=False)

            path = self._plan(pose_target2, config="panel_pickup")

            self.state = "plan_pickup_grab_second_step"
            self.plan_dictionary['pickup_grab_second_step'] = path
            rospy.loginfo("Finish pickup_grab for payload %s",
                          self.current_payload)
            self._step_complete(goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def move_pickup_grab_second_step(self,
                                     mode=ControllerCommander.
                                     MODE_AUTO_TRAJECTORY,
                                     goal=None):
        self._begin_step(goal)
        try:
            self.controller_commander.set_controller_mode(
                mode, 0.8 * self.speed_scalar, [], [])
            result = None
            if (self.state != "plan_pickup_grab_second_step"):
                self.plan_pickup_grab_second_step()
            self.state = "pickup_grab_second_step"
            self.process_index = 4
            self.process_starts[self.process_states[
                self.process_index]] = self.get_current_pose()

            path = self.plan_dictionary['pickup_grab_second_step']
            self._execute_path(path, goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def plan_pickup_raise(self, goal=None):

        #TODO: check change state and target
        self._begin_step(goal)
        try:
            rospy.loginfo("Begin pickup_raise for payload %s",
                          self.current_payload)

            #Just use gripper position for now, think up a better way in future
            object_target = self.tf_listener.lookupTransform(
                "world", "vacuum_gripper_tool", rospy.Time(0))
            pose_target2 = copy.deepcopy(object_target)
            pose_target2.p[2] += 0.35
            #pose_target2.p = np.array([-0.02285,-1.840,1.0])
            #pose_target2.R = rox.q2R([0.0, 0.707, 0.707, 0.0])

            path = self._plan(pose_target2, config="transport_panel_short")

            self.state = "plan_pickup_raise"
            self.plan_dictionary['pickup_raise'] = path
            rospy.loginfo("Finish pickup_raise for payload %s",
                          self.current_target)
            self._step_complete(goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def move_pickup_raise(self,
                          mode=ControllerCommander.MODE_AUTO_TRAJECTORY,
                          goal=None):
        self._begin_step(goal)
        try:
            self.controller_commander.set_controller_mode(
                mode, 0.8 * self.speed_scalar, [], [])
            result = None
            if (self.state != "plan_pickup_raise"):
                self.plan_pickup_raise()
            self.state = "pickup_raise"
            self.process_index = 5
            self.process_starts[self.process_states[
                self.process_index]] = self.get_current_pose()

            path = self.plan_dictionary['pickup_raise']
            self._execute_path(path, goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def plan_transport_payload(self, target, goal=None):

        #TODO: check state and payload

        rospy.loginfo("Begin transport_panel for payload %s to %s",
                      self.current_payload, target)
        self._begin_step(goal)
        try:

            panel_target_pose = self.tf_listener.lookupTransform(
                "world", target, rospy.Time(0))
            panel_gripper_pose = self.tf_listener.lookupTransform(
                self.current_payload, "vacuum_gripper_tool", rospy.Time(0))
            pose_target = panel_target_pose * panel_gripper_pose
            #pose_target.p = [1.97026484647054, 1.1179574262842452, 0.12376598588449844]
            #pose_target.R = np.array([[-0.99804142,  0.00642963,  0.06222524], [ 0.00583933,  0.99993626, -0.00966372], [-0.06228341, -0.00928144, -0.99801535]])
            #pose_target.p[1] += -0.35
            pose_target.p[2] += 0.40

            #plan=self.controller_commander.plan(pose_target)
            plan = self._plan(pose_target,
                              config="transport_panel",
                              smoother_config="transport_panel_smoother")

            self.current_target = target
            self.state = "plan_transport_payload"
            self.plan_dictionary['transport_payload'] = plan
            rospy.loginfo("Finish transport_panel for payload %s to %s",
                          self.current_payload, target)
            self._step_complete(goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def move_transport_payload(self,
                               mode=ControllerCommander.MODE_AUTO_TRAJECTORY,
                               goal=None):
        self._begin_step(goal)
        try:
            self.controller_commander.set_controller_mode(
                mode, 0.8 * self.speed_scalar, [], [])
            self.state = "transport_payload"
            self.process_index = 6
            self.process_starts[self.process_states[
                self.process_index]] = self.get_current_pose()
            plan = self.plan_dictionary['transport_payload']
            self._execute_path(plan, goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def load_placement_target_config(self, target_payload):
        if (target_payload == "leeward_mid_panel"):
            transform_fname = os.path.join(
                rospkg.RosPack().get_path(
                    'rpi_arm_composites_manufacturing_process'), 'config',
                'leeward_mid_panel_marker_transform.yaml')

        elif (target_payload == "leeward_tip_panel"):
            transform_fname = os.path.join(
                rospkg.RosPack().get_path(
                    'rpi_arm_composites_manufacturing_process'), 'config',
                'leeward_tip_panel_marker_transform.yaml')

        desired_transform_msg = TransformStamped()

        with open(transform_fname, 'r') as f:
            transform_yaml = yaml.load(f)

        genpy.message.fill_message_args(desired_transform_msg, transform_yaml)

        return desired_transform_msg

    def plan_gripper_release(self, goal=None):
        self._begin_step(goal)
        try:
            self.rapid_node.set_digital_io("Vacuum_enable", 0)
            #time.sleep(1)
            self.controller_commander.set_controller_mode(
                ControllerCommander.MODE_HALT, 0.8 * self.speed_scalar, [], [])
            #TODO: check vacuum feedback to make sure we have the panel
            pose_target2 = self.controller_commander.compute_fk()
            pose_target2.p[2] += 0.25

            #self.current_payload=self.current_target

            gripper_to_panel_tf = self.tf_listener.lookupTransform(
                "vacuum_gripper_tool", self.current_payload, rospy.Time(0))
            world_to_gripper_tf = self.tf_listener.lookupTransform(
                "world", "vacuum_gripper_tool", rospy.Time(0))
            world_to_panel_nest_tf = self.tf_listener.lookupTransform(
                "world", "panel_nest", rospy.Time(0))
            panel_to_nest_tf = world_to_panel_nest_tf.inv(
            ) * world_to_gripper_tf * gripper_to_panel_tf

            self._update_payload_pose(self.current_payload, panel_to_nest_tf,
                                      "panel_nest", 0.5)

            self.current_payload = None
            self.current_target = None

            time.sleep(1)

            path = self._plan(pose_target2, config="panel_pickup")

            self.state = "plan_gripper_release"
            self.plan_dictionary['gripper_release'] = path
            rospy.loginfo("Finished gripper release")
            self._step_complete(goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def move_gripper_release(self,
                             mode=ControllerCommander.MODE_AUTO_TRAJECTORY,
                             goal=None):
        self._begin_step(goal)
        try:
            self.controller_commander.set_controller_mode(
                mode, 0.8 * self.speed_scalar, [], [])
            result = None
            if (self.state != "plan_gripper_release"):
                self.plan_gripper_release()
            self.state = "gripper_release"
            self.process_index = 8
            self.process_starts[self.process_states[
                self.process_index]] = self.get_current_pose()

            path = self.plan_dictionary['gripper_release']
            self._execute_path(path, goal)
        except Exception as err:
            traceback.print_exc()
            self._step_failed(err, goal)

    def _fill_process_state(self):
        s = ProcessState()
        s.state = self.state if self.state is not None else ""
        s.payload = self.current_payload if self.current_payload is not None else ""
        s.target = self.current_target if self.current_target is not None else ""
        return s

    def publish_process_state(self):
        s = self._fill_process_state()
        self._process_state_pub.publish(s)

    def place_lower_temp(self):
        UV = np.zeros([32, 2])
        P = np.zeros([32, 3])

    def _update_payload_pose(self,
                             payload_name,
                             pose,
                             parent_frame_id=None,
                             confidence=0.1):

        payload = self._get_payload(payload_name)

        if parent_frame_id is None:
            parent_frame_id = payload.header.frame_id

        parent_tf = self.tf_listener.lookupTransform(parent_frame_id,
                                                     pose.parent_frame_id,
                                                     rospy.Time(0))
        pose2 = parent_tf.inv() * pose

        req = UpdatePayloadPoseRequest()
        req.name = payload_name
        req.pose = rox_msg.transform2pose_msg(pose2)
        req.header.frame_id = parent_frame_id
        req.confidence = confidence

        res = self.update_payload_pose_srv(req)
        if not res.success:
            raise Exception("Could not update payload pose")

    def _get_payload(self, payload_name):
        payload_array_res = self.get_payload_array_srv(
            GetPayloadArrayRequest([payload_name]))
        if len(payload_array_res.payload_array.payloads
               ) != 1 or payload_array_res.payload_array.payloads[
                   0].name != payload_name:
            raise Exception("Invalid payload specified")

        return payload_array_res.payload_array.payloads[0]

    def _begin_step(self, goal):
        if goal is not None:
            with self._goal_handle_lock:
                if self._goal_handle is not None:
                    res = ProcessStepResult()
                    res.state = self.state
                    res.target = self.current_target if self.current_target is not None else ""
                    res.payload = self.current_payload if self.current_payload is not None else ""
                    res.error_msg = "Attempt to execute new step while previous step running"
                    goal.set_rejected(result=res)
                    rospy.loginfo(
                        "Attempt to execute new step while previous step running"
                    )
                    raise Exception(
                        "Attempt to execute new step while previous step running"
                    )
                else:
                    goal.set_accepted()
                    self._goal_handle = goal

    def _step_complete(self, goal):

        if goal is not None:
            with self._goal_handle_lock:
                self.publish_process_state()
                res = ProcessStepResult()
                res.state = self.state
                res.target = self.current_target if self.current_target is not None else ""
                res.payload = self.current_payload if self.current_payload is not None else ""
                goal.set_succeeded(res)

                if (self._goal_handle == goal):
                    self._goal_handle = None

    def _step_failed(self, err, goal):
        if goal is None:
            raise err
        else:
            with self._goal_handle_lock:
                self.publish_process_state()
                res = ProcessStepResult()
                res.state = self.state
                res.target = self.current_target if self.current_target is not None else ""
                res.payload = self.current_payload if self.current_payload is not None else ""
                res.error_msg = str(err)
                goal.set_aborted(result=res)
                if (self._goal_handle == goal):
                    self._goal_handle = None

    def _execute_path(self, path, goal, ft_stop=False):

        if goal is None:
            self.controller_commander.execute_trajectory(path, ft_stop=ft_stop)
            return

        def done_cb(err):
            rospy.loginfo("safe_kinematic_controller generated: %s", str(err))
            if (goal is not None):
                if err is None:
                    self._step_complete(goal)
                else:
                    with self._goal_handle_lock:
                        if self._goal_handle == goal:
                            self._goal_handle = None
                    res = ProcessStepResult()
                    res.state = self.state
                    res.target = self.current_target if self.current_target is not None else ""
                    res.payload = self.current_payload if self.current_payload is not None else ""
                    res.error_msg = str(err)
                    goal.set_aborted(result=res)
                    rospy.loginfo("safe_kinematic_controller generated: %s",
                                  str(err))

        with self._goal_handle_lock:
            if goal.get_goal_status().status != actionlib.GoalStatus.ACTIVE:
                if self._goal_handle == goal:
                    self._goal_handle = None
                res = ProcessStepResult()
                res.state = self.state
                res.target = self.current_target if self.current_target is not None else ""
                res.payload = self.current_payload if self.current_payload is not None else ""
                res.error_msg = str(err)
                goal.set_aborted(result=res)
                rospy.loginfo("goal aborted before move")
                return

            self.controller_commander.async_execute_trajectory(path,
                                                               done_cb=done_cb,
                                                               ft_stop=ft_stop)

    def _plan(self,
              target_pose,
              waypoints_pose=[],
              speed_scalar=1,
              config=None,
              smoother_config=None):
        error_count = 0
        while True:
            try:
                rospy.loginfo("begin rough trajectory planning with config " +
                              str(config))
                plan1 = self.planner.trajopt_plan(target_pose,
                                                  json_config_name=config)
                rospy.loginfo("rough trajectory planning complete " +
                              str(config))
                if smoother_config is None:
                    return plan1
                rospy.loginfo("begin trajectory smoothing with config " +
                              str(smoother_config))
                plan2 = self.planner.trajopt_smooth_trajectory(
                    plan1, json_config_name=smoother_config)
                rospy.loginfo("trajectory smoothing complete with config " +
                              str(smoother_config))
                return plan2
            except:
                rospy.logerr("trajectory planning failed with config " +
                             str(config) + " smoother_config " +
                             str(smoother_config))
                error_count += 1
                if error_count > 3:
                    raise
                traceback.print_exc()
Ejemplo n.º 13
0
def main():

    t1 = time.time()

    do_place = "place-panel" in sys.argv

    if not "disable-ft" in sys.argv:
        ft_threshold1 = ft_threshold
    else:
        ft_threshold1 = []

    rospy.init_node('Vision_MoveIt_new_Cam_wason2', anonymous=True)

    print "============ Starting setup"

    listener = PayloadTransformListener()

    rapid_node = RAPIDCommander()
    controller_commander = ControllerCommander()

    object_commander = ObjectRecognitionCommander()

    controller_commander.set_controller_mode(controller_commander.MODE_HALT, 1,
                                             [], ft_threshold1)

    object_target = object_commander.get_object_gripper_target_pose(
        "leeward_mid_panel")

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 1, [], ft_threshold1)

    print "============ Printing robot Pose"
    print controller_commander.get_current_pose_msg()
    #print robot.get_current_state().joint_state.position
    print "============ Generating plan 1"

    pose_target = copy.deepcopy(object_target)
    pose_target.p[2] += 0.5

    print 'Target:', pose_target

    print "============ Executing plan1"
    controller_commander.plan_and_move(pose_target)
    print 'Execution Finished.'

    ########## Vertical Path 1 ############

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.8, [], ft_threshold1)

    print "============ Printing robot Pose"
    print controller_commander.get_current_pose_msg()
    print "============ Generating plan 2"

    pose_target2 = copy.deepcopy(object_target)
    pose_target2.p[2] += 0.15

    print 'Target:', pose_target2

    print "============ Executing plan2"
    controller_commander.compute_cartesian_path_and_move(
        pose_target2, avoid_collisions=False)
    print 'Execution Finished.'

    ########## Vertical Path 2 ############

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.4, [], ft_threshold1)

    print "============ Printing robot Pose"
    print controller_commander.get_current_pose_msg()
    print "============ Generating plan 3"

    pose_target2 = copy.deepcopy(object_target)
    pose_target2.p[2] -= 0.15

    print 'Target:', pose_target2

    print "============ Executing plan3"
    try:
        controller_commander.compute_cartesian_path_and_move(
            pose_target2, avoid_collisions=False)
        pass
    except:
        pass
    print 'Execution Finished.'

    ########## Lift Path ############

    controller_commander.set_controller_mode(
        controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [], [])

    print "============ Lift panel!"

    rapid_node.set_digital_io("Vacuum_enable", 1)
    time.sleep(0.5)
    pose_target3 = copy.deepcopy(object_target)
    pose_target3.p[2] += 0.5

    print 'Target:', pose_target3

    print "============ Executing plan4"
    controller_commander.compute_cartesian_path_and_move(
        pose_target3, avoid_collisions=False)

    if (do_place):
        print "=========== Do place!"
        print ""

        controller_commander.set_controller_mode(
            controller_commander.MODE_AUTO_TRAJECTORY, 1, [], ft_threshold1)

        print "============ Generating plan 5"

        time.sleep(2)

        panel_target_pose = listener.lookupTransform(
            "world", "panel_nest_leeward_mid_panel_target", rospy.Time(0))
        panel_gripper_target_pose = listener.lookupTransform(
            "leeward_mid_panel", "leeward_mid_panel_gripper_target",
            rospy.Time(0))
        pose_target = panel_target_pose * panel_gripper_target_pose
        print pose_target.R
        print pose_target.p

        pose_target2 = copy.deepcopy(pose_target)
        pose_target2.p[2] += 0.3
        pose_target2.p[1] -= 0.2

        print "============ Executing plan 5"
        controller_commander.plan_and_move(pose_target2)

        controller_commander.set_controller_mode(
            controller_commander.MODE_AUTO_TRAJECTORY, 0.4, [], ft_threshold1)

        print "============ Printing robot Pose"
        print controller_commander.get_current_pose_msg()
        print "============ Generating plan 3"

        pose_target2 = copy.deepcopy(pose_target)
        pose_target2.p[2] -= 0.15

        print 'Target:', pose_target2

        print "============ Executing plan3"
        try:
            controller_commander.compute_cartesian_path_and_move(
                pose_target2, avoid_collisions=False)
        except:
            pass
        print 'Execution Finished.'

        print "============ Lift gripper!"

        controller_commander.set_controller_mode(
            controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [], [])

        rapid_node.set_digital_io("Vacuum_enable", 0)
        time.sleep(0.5)
        pose_target3 = copy.deepcopy(pose_target)
        pose_target3.p[2] += 0.5

        print 'Target:', pose_target3

        print "============ Executing plan4"
        controller_commander.compute_cartesian_path_and_move(
            pose_target3, avoid_collisions=False)

    t2 = time.time()
    print 'Execution Finished.'
    print "Execution time: " + str(t2 - t1) + " seconds"
Ejemplo n.º 14
0
 def __init__(self):
     self.client = actionlib.SimpleActionClient("recognize_objects",
                                                ObjectRecognitionAction)
     self.listener = PayloadTransformListener()
Ejemplo n.º 15
0
 def __init__(self, *args, **kwargs):
     super(TestTesseractDiff, self).__init__(*args, **kwargs)
     self.tf_listener = PayloadTransformListener()
Ejemplo n.º 16
0
 def __init__(self, *args, **kwargs):
     super(TestAttachedCollisionObject, self).__init__(*args, **kwargs)
     self.tf_listener = PayloadTransformListener()
Ejemplo n.º 17
0
class TestAttachedCollisionObject(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(TestAttachedCollisionObject, self).__init__(*args, **kwargs)
        self.tf_listener = PayloadTransformListener()

    def test_attached_collision_object(self):

        expected_scene_1 = rospy.wait_for_message("/expected_planning_scene_1",
                                                  PlanningScene,
                                                  timeout=10)
        scene_1 = rospy.wait_for_message('/planning_scene',
                                         PlanningScene,
                                         timeout=10)
        self._assert_planning_scene_equal(expected_scene_1, scene_1)

        rospy.sleep(rospy.Duration(1))

        update_pose_proxy = rospy.ServiceProxy("update_payload_pose",
                                               UpdatePayloadPose)

        payload2_to_gripper_target_tf = self.tf_listener.lookupTransform(
            "payload2", "payload2_gripper_target", rospy.Time(0))

        req = UpdatePayloadPoseRequest()
        req.header.frame_id = "vacuum_gripper_tool"
        req.name = "payload2"
        req.pose = rox_msg.transform2pose_msg(
            payload2_to_gripper_target_tf.inv())
        req.confidence = 0.5
        res = update_pose_proxy(req)
        assert res.success

        expected_scene_2 = rospy.wait_for_message("/expected_planning_scene_2",
                                                  PlanningScene,
                                                  timeout=10)
        scene_2 = rospy.wait_for_message('/planning_scene',
                                         PlanningScene,
                                         timeout=10)
        self._assert_planning_scene_equal(expected_scene_2, scene_2)

        world_to_fixture2_payload2_target_tf = self.tf_listener.lookupTransform(
            "world", "fixture2_payload2_target", rospy.Time(0))

        #Add in an offset to represent a final placement error
        fixture2_payload2_target_to_payload2_tf = rox.Transform(
            rox.rot([0, 0, 1], np.deg2rad(5)), [0.1, 0, 0],
            "fixture2_payload2_target", "payload2")

        req2 = UpdatePayloadPoseRequest()
        req2.header.frame_id = "world"
        req2.name = "payload2"
        req2.pose = rox_msg.transform2pose_msg(
            world_to_fixture2_payload2_target_tf *
            fixture2_payload2_target_to_payload2_tf)
        res2 = update_pose_proxy(req2)
        assert res2.success

        expected_scene_3 = rospy.wait_for_message("/expected_planning_scene_3",
                                                  PlanningScene,
                                                  timeout=10)
        scene_3 = rospy.wait_for_message('/planning_scene',
                                         PlanningScene,
                                         timeout=10)
        self._assert_planning_scene_equal(expected_scene_3, scene_3)

    def _assert_planning_scene_equal(self, scene1, scene2):
        self.assertEqual(scene1.name, scene2.name)
        self.assertEqual(scene1.robot_model_name, scene2.robot_model_name)
        self.assertEqual(scene1.fixed_frame_transforms,
                         scene2.fixed_frame_transforms)
        self.assertEqual(scene1.link_padding, scene2.link_padding)
        self.assertEqual(scene1.link_scale, scene2.link_scale)
        self.assertEqual(scene1.object_colors, scene2.object_colors)
        self.assertEqual(scene1.world.collision_objects,
                         scene2.world.collision_objects)
        self.assertEqual(scene1.is_diff, scene2.is_diff)

        self.assertEqual(scene1.robot_state.is_diff,
                         scene2.robot_state.is_diff)
        self.assertEqual(len(scene1.robot_state.attached_collision_objects),
                         len(scene2.robot_state.attached_collision_objects))
        for i in xrange(len(scene1.robot_state.attached_collision_objects)):
            self._assert_attached_collision_object_equal(
                scene1.robot_state.attached_collision_objects[i],
                scene2.robot_state.attached_collision_objects[i])

    def _assert_attached_collision_object_equal(self, aco1, aco2):
        self.assertEqual(aco1.link_name, aco2.link_name)
        self._assert_collision_object_equal(aco1.object, aco2.object)
        self.assertEqual(aco1.touch_links, aco2.touch_links)
        self.assertEqual(aco1.detach_posture, aco2.detach_posture)
        self.assertEqual(aco1.weight, aco2.weight)

    def _assert_collision_object_equal(self, co1, co2):
        self.assertEqual(co1.header.frame_id, co2.header.frame_id)
        self.assertEqual(co1.id, co2.id)
        self.assertEqual(co1.type, co2.type)
        self.assertEqual(co1.primitives, co2.primitives)
        self._assert_pose_almost_equal(co1.primitive_poses,
                                       co2.primitive_poses)
        for m1, m2 in zip(co1.meshes, co2.meshes):
            self._assert_mesh_almost_equal(m1, m2)
        self._assert_pose_almost_equal(co1.mesh_poses, co2.mesh_poses)
        self.assertEqual(co1.planes, co2.planes)
        self._assert_pose_almost_equal(co1.plane_poses, co2.plane_poses)
        self.assertEqual(co1.operation, co2.operation)

    def _assert_mesh_almost_equal(self, m1, m2):
        for t1, t2 in zip(m1.triangles, m2.triangles):
            np.testing.assert_equal(t1.vertex_indices, t2.vertex_indices)
        for v1, v2 in zip(m1.vertices, m2.vertices):
            np.testing.assert_allclose([v1.x, v1.y, v1.z], [v2.x, v2.y, v2.z])

    def _assert_pose_almost_equal(self, pose1, pose2):
        if isinstance(pose1, list):
            for p1, p2 in zip(pose1, pose2):
                self._assert_pose_almost_equal(p1, p2)
            return
        np.testing.assert_allclose([pose1.position.x, pose1.position.y, pose1.position.z], \
                                    [pose2.position.x, pose2.position.y, pose2.position.z])
        np.testing.assert_allclose([pose1.orientation.x, pose1.orientation.y, \
                                pose1.orientation.z, pose1.orientation.w], \
                               [pose2.orientation.x, pose2.orientation.y, \
                                pose2.orientation.z, pose2.orientation.w] )
Ejemplo n.º 18
0
class PlacementController(object):

    def __init__(self):

        #Subscribe to controller_state 
        self.controller_state_sub = rospy.Subscriber("controller_state", controllerstate, self.callback)
        self.last_ros_image_stamp = rospy.Time(0)
        self.goal_handle=None
        
               
        self.listener = PayloadTransformListener()
        self.rapid_node = rapid_node_pkg.RAPIDCommander()
        self.controller_commander = controller_commander_pkg.ControllerCommander()
        self.object_commander = ObjectRecognitionCommander() 
        # Initilialize aruco boards and parameters
        self.aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
        self.parameters =  cv2.aruco.DetectorParameters_create()
        self.parameters.cornerRefinementMethod=cv2.aruco.CORNER_REFINE_SUBPIX
        self.parameters.adaptiveThreshWinSizeMax=30
        self.parameters.adaptiveThreshWinSizeStep=7
        # ================== Cam 636
        # --- Subscribe to Gripper camera node for image acquisition
        #TODO: Take in camera names and make subscribers and triggers accordingly
        self.ros_gripper_2_img_sub = rospy.Subscriber('/gripper_camera_2/image', Image, self.object_commander.ros_raw_gripper_2_image_cb)
        self.ros_gripper_2_trigger = rospy.ServiceProxy('/gripper_camera_2/trigger', Trigger)
        
        # --- Camera parameters
        self.CamParam = CameraParams()
        # --- Camera pose
        #TODO: Substitute transform in here
        R_Jcam = np.array([[0.9995,-0.0187,-0.0263],[-0.0191,-0.9997,-0.0135],[-0.0261,0.0140,-0.9996]])
        r_cam = rox.hat(np.array([0.0707, 1.1395, 0.2747]))#rox.hat(np.array([- 0.2811, -1.1397,0.0335]))#rox.hat(np.array([- 0.2811, 1.1397,0.0335]))
        self.R_Jcam = np.linalg.inv(np.vstack([ np.hstack([R_Jcam,np.zeros([3,3])]), np.hstack([np.dot(r_cam,R_Jcam),R_Jcam]) ]))
        self.dt = None
        self.iteration=0
        
        self.board_ground = None
        # functions like a gain, used with velocity to track position
        self.FTdata = None
        self.ft_flag = False
        #self.FTdata_0 = self.FTdata
        #self.FTdata_0est = self.compute_ft_est()
        #self.result = self.take_image()
        #TODO: replace with trajopt code
        self.client = actionlib.SimpleActionClient("joint_trajectory_action", FollowJointTrajectoryAction)
        # IBVS parameters
        self.du_converge_TH = None
        self.dv_converge_TH = None
        self.iteration_limit = None
        self.Ki = None   
        # Compliance controller parameters
        self.F_d_set1 = None
        self.F_d_set2 = None
        self.Kc = None
        self.ros_data=None
        self.camMatrix=None
        self.distCoeffs=None
        self.ros_gripper_2_cam_info_sub=rospy.Subscriber('/gripper_camera_2/camera_info', CameraInfo, self.fill_camera_data)

    def fill_camera_data(self,ros_data):
        self.ros_data=ros_data
        camMatrix=np.reshape(ros_data.K,(3,3))
        distCoeffs=np.array([ros_data.D])
        #if len(distCoeffs[0]) < 4:
        #    distCoeffs=np.array([[0,0,0,0,0]])
        self.CamParam.distCoeff=distCoeffs
        self.CamParam.camMatrix=camMatrix
        
    
            
            
    def callback(self, data):
        self.FTdata = np.array([data.ft_wrench.torque.x,data.ft_wrench.torque.y,data.ft_wrench.torque.z,\
        data.ft_wrench.force.x,data.ft_wrench.force.y,data.ft_wrench.force.z])
        self.ft_flag=True

    
    def trapezoid_gen(self,target,current_joint_angles,acc,dx):
        goal = FollowJointTrajectoryGoal()
        goal.trajectory.joint_names=['joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6']
        goal.trajectory.header.frame_id='/world'
    
        dist = np.array(target-current_joint_angles)
        duration = np.max(np.sqrt(np.abs(9.0*dist/2.0/acc)))
        
        vmax = 1.5/duration
        amax  = 3*vmax/duration
        dmax  = amax
        [x0,v0,a0,ta,tb,tf] = trapgen(0,1,0,0,vmax,amax,dmax,0)
        [xa,va,aa,ta,tb,tf] = trapgen(0,1,0,0,vmax,amax,dmax,ta)
        [xb,vb,ab,ta,tb,tf] = trapgen(0,1,0,0,vmax,amax,dmax,tb)
    		
        duration = np.max(np.sqrt(np.abs(9.0*dist/2.0/acc)))
        vmax = 1.5*dist/duration
        acc = 3*vmax/duration
    	
        p1=JointTrajectoryPoint()
        p1.positions = current_joint_angles
        p1.velocities = np.zeros((6,))
        p1.accelerations = aa*dist
        p1.time_from_start = rospy.Duration(0)
        
        p2=JointTrajectoryPoint()
        p2.positions = np.array(p1.positions) + dist*xa
        p2.velocities = va*dist
        p2.accelerations = np.zeros((6,))
        p2.time_from_start = rospy.Duration(ta)
        
        p3=JointTrajectoryPoint()
        p3.positions = np.array(p1.positions) + dist*xb
        p3.velocities = vb*dist
        p3.accelerations = -ab*dist
        p3.time_from_start = rospy.Duration(tb)
    
        p4=JointTrajectoryPoint()
        p4.positions = target
        p4.velocities = np.zeros((6,))
        p4.accelerations = np.zeros((6,))
        p4.time_from_start = rospy.Duration(tf)
        
        goal.trajectory.points.append(p1)
        goal.trajectory.points.append(p2)
        goal.trajectory.points.append(p3)
        goal.trajectory.points.append(p4)
        
        return goal
        
    def compute_ft_est(self):
        Tran_z = np.array([[0,0,-1],[0,-1,0],[1,0,0]])    
        Vec_wrench = 100*np.array([0.019296738361905,0.056232033265447,0.088644197659430,    
        0.620524934626544,-0.517896661195076,0.279323567303444,-0.059640563813256,   
        0.631460085138371,-0.151143175570223,-6.018321330845553]).transpose()
        T = self.listener.lookupTransform("base", "link_6", rospy.Time(0))
        rg = 9.8*np.matmul(np.matmul(T.R,Tran_z).transpose(),np.array([0,0,1]).transpose())
        A1 = np.hstack([rox.hat(rg).transpose(),np.zeros([3,1]),np.eye(3),np.zeros([3,3])])
        A2 = np.hstack([np.zeros([3,3]),rg.reshape([3,1]),np.zeros([3,3]),np.eye(3)])   
        A = np.vstack([A1,A2])
        return np.matmul(A,Vec_wrench)



    def get_pose(self, board, corners, ids, CamParam):
        #Define object and image points of both tags        
        objPoints, imgPoints 	=	aruco.getBoardObjectAndImagePoints(board, corners, ids)
        objPoints = objPoints.reshape([objPoints.shape[0],3])
        imgPoints = imgPoints.reshape([imgPoints.shape[0],2])
        
        #Get pose of both ground and panel markers from detected corners        
        retVal, rvec, tvec = cv2.solvePnP(objPoints, imgPoints, CamParam.camMatrix, CamParam.distCoeff)
        Rca, b = cv2.Rodrigues(rvec)
        
        return imgPoints,rvec, tvec, Rca, b
    

    def image_jacobian_gen(self, result, corners, ids, CamParam, board_ground,board_panel,id_start_ground, id_board_ground_size, tag_ground_size, loaded_object_points_ground_in_panel_system, display_window):

        rospy.loginfo(str(id_start_ground))
        rospy.loginfo(str(id_board_ground_size))
        rospy.loginfo(str(tag_ground_size))
        rospy.loginfo(str(corners)) #float32
        rospy.loginfo(str(board_ground))  
        rospy.loginfo(str(board_panel)) 

        idx_max = 180
        UV = np.zeros([idx_max,8])
        P = np.zeros([idx_max,3])
        id_valid = np.zeros([idx_max,1])
        
        f_hat_u = CamParam.camMatrix[0][0]
        f_hat_v = CamParam.camMatrix[1][1]
        f_0_u = CamParam.camMatrix[0][2]
        f_0_v = CamParam.camMatrix[1][2]
        
        imgPoints_ground, rvec_ground, tvec_ground, Rca_ground, b_ground = self.get_pose(board_ground, corners, ids, CamParam)
        imgPoints_panel, rvec_panel, tvec_panel, Rca_panel, b_panel = self.get_pose(board_panel, corners, ids, CamParam)
        
        corners_ground = []
        corners_panel = []
        for i_ids,i_corners in zip(ids,corners):
            if i_ids<=(id_start_ground+id_board_ground_size):
                corners_ground.append(i_corners)
            else:
                corners_panel.append(i_corners)
        #rospy.loginfo(str(id_start_ground))
        
        rvec_all_markers_ground, tvec_all_markers_ground, _ = aruco.estimatePoseSingleMarkers(corners_ground, tag_ground_size, CamParam.camMatrix, CamParam.distCoeff)
        rvec_all_markers_panel, tvec_all_markers_panel, _ = aruco.estimatePoseSingleMarkers(corners_panel, 0.025, CamParam.camMatrix, CamParam.distCoeff)
        #rospy.loginfo(str(tvec_all_markers_ground))
        #rospy.loginfo(str(tvec_all_markers_panel))
        tvec_all=np.concatenate((tvec_all_markers_ground,tvec_all_markers_panel),axis=0)
        
        
        for i_ids,i_corners,i_tvec in zip(ids,corners,tvec_all):
            if i_ids<idx_max:
                #print 'i_corners',i_corners,i_corners.reshape([1,8])
                UV[i_ids,:] = i_corners.reshape([1,8]) #np.average(i_corners, axis=1) 
                P[i_ids,:] = i_tvec
                id_valid[i_ids] = 1
    
        
        id_select = range(id_start_ground,(id_start_ground+id_board_ground_size))
        #used to find the height of the tags and the delta change of height, z height at desired position
        Z = P[id_select,2] #- [0.68184539, 0.68560932, 0.68966803, 0.69619578])
        id_valid = id_valid[id_select]
    
        dutmp = []
        dvtmp = []
        
        #Pixel estimates of the ideal ground tag location
        reprojected_imagePoints_ground_2, jacobian2	=	cv2.projectPoints(	loaded_object_points_ground_in_panel_system.transpose(), rvec_panel, tvec_panel, CamParam.camMatrix, CamParam.distCoeff)
        reprojected_imagePoints_ground_2 = reprojected_imagePoints_ground_2.reshape([reprojected_imagePoints_ground_2.shape[0],2])
       
        if(display_window):
            frame_with_markers_and_axis = cv2.cvtColor(result, cv2.COLOR_GRAY2BGR)
            frame_with_markers_and_axis	=	cv2.aruco.drawAxis(	frame_with_markers_and_axis,  CamParam.camMatrix, CamParam.distCoeff, Rca_ground, tvec_ground, 0.2	)
            frame_with_markers_and_axis	=	cv2.aruco.drawAxis(	frame_with_markers_and_axis,  CamParam.camMatrix, CamParam.distCoeff, Rca_panel, tvec_panel, 0.2	)
        
            #plot image points for ground tag from corner detection and from re-projections
            for point1,point2 in zip(imgPoints_ground,np.float32(reprojected_imagePoints_ground_2)):
                cv2.circle(frame_with_markers_and_axis,tuple(point1),5,(0,0,255),3)
                cv2.circle(frame_with_markers_and_axis,tuple(point2),5,(255,0,0),3) 
                
            height, width, channels = frame_with_markers_and_axis.shape
            cv2.imshow(display_window,cv2.resize(frame_with_markers_and_axis, (width/4, height/4)))
            cv2.waitKey(1)
            #Save
            #filename_image = "/home/rpi-cats/Desktop/DJ/Code/Images/Panel2_Acquisition_"+str(t1)+"_"+str(iteration)+".jpg"
            #scipy.misc.imsave(filename_image, frame_with_markers_and_axis)
        
        reprojected_imagePoints_ground_2 = np.reshape(reprojected_imagePoints_ground_2,(id_board_ground_size,8))
        #Go through a particular point in all tags to build the complete Jacobian
        for ic in range(4):
            #uses first set of tags, numbers used to offset camera frame, come from camera parameters               
            UV_target = np.vstack([reprojected_imagePoints_ground_2[:,2*ic]-f_0_u, reprojected_imagePoints_ground_2[:,2*ic+1]-f_0_v]).T
            uc = UV_target[:,0]
            vc = UV_target[:,1]
    
            UV_current = np.vstack([UV[id_select,2*ic]-f_0_u, UV[id_select,2*ic+1]-f_0_v]).T
            #find difference between current and desired tag difference
            delta_UV = UV_target-UV_current
    
            delet_idx = []
            J_cam_tmp =np.array([])
            for tag_i in range(id_board_ground_size):
                if id_valid[tag_i] == 1:
                    tmp = 1.0*np.array([[uc[tag_i]*vc[tag_i]/f_hat_u, -1.0*(uc[tag_i]*uc[tag_i]/f_hat_u + f_hat_u), vc[tag_i],-f_hat_u/Z[tag_i], 0.0, uc[tag_i]/Z[tag_i]],
                                               [ vc[tag_i]*vc[tag_i]/f_hat_v+f_hat_v, -1.0*uc[tag_i]*vc[tag_i]/f_hat_v, -uc[tag_i],0.0, -f_hat_v/Z[tag_i], vc[tag_i]/Z[tag_i]]])
                    if not (J_cam_tmp).any():
                        J_cam_tmp = tmp
                    else:
                        J_cam_tmp= np.concatenate((J_cam_tmp, tmp), axis=0)
                else:
                    delet_idx.append(tag_i)
                    
            delta_UV = np.delete(delta_UV, delet_idx, 0)
            dutmp.append(np.mean(delta_UV[:,0]))
            dvtmp.append(np.mean(delta_UV[:,1]))
            #camera jacobian
            if ic ==0:
                J_cam = J_cam_tmp
                delta_UV_all = delta_UV.reshape(np.shape(delta_UV)[0]*np.shape(delta_UV)[1],1)
                UV_target_all = UV_target.reshape(np.shape(UV_target)[0]*np.shape(UV_target)[1],1)
            else:
                J_cam = np.vstack([J_cam,J_cam_tmp])
                delta_UV_all = np.vstack([delta_UV_all,delta_UV.reshape(np.shape(delta_UV)[0]*np.shape(delta_UV)[1],1)]) 
                UV_target_all = np.vstack([UV_target_all,UV_target.reshape(np.shape(UV_target)[0]*np.shape(UV_target)[1],1)])
    
        du = np.mean(np.absolute(dutmp))
        dv = np.mean(np.absolute(dvtmp))
        print 'Average du of all points:',du
        print 'Average dv of all points:',dv
        
        return du, dv, J_cam, delta_UV_all
    
        
    
    def take_image(self):
        ####################### SET INITIAL POSE BASED ON PBVS #######################
        #### Final Nest Placement Error Calculation ===============================
        #Read new image
        self.last_ros_image_stamp = self.object_commander.ros_image_stamp        
        try:
            self.ros_gripper_2_trigger.wait_for_service(timeout=0.1)
            self.ros_gripper_2_trigger()
        except:
            pass
        wait_count=0
        while self.object_commander.ros_image is None or self.object_commander.ros_image_stamp == self.last_ros_image_stamp:
            if wait_count > 50:
                raise Exception("Image receive timeout")
            time.sleep(0.25)
            wait_count += 1
        self.result = self.object_commander.ros_image
        
    
    def move_to_initial_pose(self):
                
        #Set controller command mode
        self.controller_commander.set_controller_mode(self.controller_commander.MODE_AUTO_TRAJECTORY, 0.4, [],[])
        time.sleep(0.5)
        
        #open loop set the initial pose to the end pose in the transport path
        #pose_target2 = rox.Transform(rot0, tran0)        
        ##Execute movement to set location
        rospy.loginfo("Executing initial path ====================")
        #TODO: Change to trajopt planning
        self.controller_commander.compute_cartesian_path_and_move(self.initial_pose, avoid_collisions=False)
    
    def pbvs_to_stage1(self,with_lower):
        self.controller_commander.set_controller_mode(self.controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
         
        tvec_err = np.array([1000,1000,1000])

        while(np.linalg.norm(tvec_err) > 0.06):
            
            self.take_image() 
            #Detect tag corners in aqcuired image using aruco
            corners, ids, _ = cv2.aruco.detectMarkers(self.result, self.aruco_dict, parameters=self.parameters)    
            #Sort corners and ids according to ascending order of ids
            #rospy.loginfo("tvec difference: %f, %f",corners,ids)
            corners, ids = sort_corners(corners,ids)
            
            # Estimate Poses  
            imgPoints_ground,rvec_ground, tvec_ground, Rca_ground, b_ground = self.get_pose(self.board_ground, corners, ids, self.CamParam)
            imgPoints_panel,rvec_panel, tvec_panel, Rca_panel, b_panel = self.get_pose(self.board_panel, corners, ids, self.CamParam)
            rospy.loginfo("camera params")
            rospy.loginfo(str(self.CamParam.distCoeff))
            rospy.loginfo(str(self.CamParam.camMatrix))
            observed_tvec_difference = tvec_ground-tvec_panel
            observed_rvec_difference = rvec_ground-rvec_panel

            tvec_err = self.loaded_tvec_difference_stage1-observed_tvec_difference
            rvec_err = self.loaded_rvec_difference_stage1-observed_rvec_difference 
            rospy.loginfo("tvec difference: %f, %f, %f",tvec_err[0],tvec_err[1],tvec_err[2])
            rospy.loginfo("rvec difference: %f, %f, %f",rvec_err[0],rvec_err[1],rvec_err[2])

            if(not with_lower):
                tvec_err[2] = 0
            else:
                tvec_err[2] -= 0.04               
            dx = np.array([0,0,0, -tvec_err[0], tvec_err[1],tvec_err[2]])*0.7
            #dx = np.array([rvec_err[0],rvec_err[1],rvec_err[2], -tvec_err[0], tvec_err[1],tvec_err[2]])*0.7
            # Adjustment
            rospy.loginfo("PBVS to initial Position ====================")
            current_joint_angles = self.controller_commander.get_current_joint_values()
            
            print dx

            joints_vel = QP_abbirb6640(np.array(current_joint_angles).reshape(6, 1),np.array(dx))
            goal = self.trapezoid_gen(np.array(current_joint_angles) + joints_vel.dot(1),np.array(current_joint_angles),0.25,np.array(dx))
            #TODO replace with trajopt code
            self.client.wait_for_server()
            self.client.send_goal(goal)
            self.client.wait_for_result()
            res = self.client.get_result()
            if (res.error_code != 0):
                raise Exception("Trajectory execution returned error")

        rospy.loginfo("End of Initial Pose ====================")
        ### End of initial pose 

    def final_adjustment(self):
        ############################## FINAL ADJUSTMENT ##############################    
        rospy.loginfo("Final Adjustment")
        self.controller_commander.set_controller_mode(self.controller_commander.MODE_AUTO_TRAJECTORY, 0.2, [],[])
    
        self.take_image()    
        
        #Detect tag corners in aqcuired image using aruco
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(self.result, self.aruco_dict, parameters=self.parameters)    
        #Sort corners and ids according to ascending order of ids
        corners, ids = sort_corners(corners,ids)
        
        # Estimate Poses  
        imgPoints_ground, rvec_ground, tvec_ground, Rca_ground, b_ground = self.get_pose(self.board_ground, corners, ids, self.CamParam)
        imgPoints_panel, rvec_panel, tvec_panel, Rca_panel, b_panel = self.get_pose(self.board_panel, corners, ids, self.CamParam)
 

        observed_tvec_difference = tvec_ground-tvec_panel
        observed_rvec_difference = rvec_ground-rvec_panel    
        rospy.loginfo("============== Difference in nest position (before adjusment)")
        tvec_err = self.loaded_tvec_difference-observed_tvec_difference
        rvec_err = self.loaded_rvec_difference-observed_rvec_difference 
        rospy.loginfo("tvec difference: %f, %f, %f",tvec_err[0],tvec_err[1],tvec_err[2])
        rospy.loginfo("rvec difference: %f, %f, %f",rvec_err[0],rvec_err[1],rvec_err[2])
        
        current_joint_angles = self.controller_commander.get_current_joint_values()
        dx = np.array([0,0,0, -tvec_err[0], tvec_err[1],0])
        joints_vel = QP_abbirb6640(np.array(current_joint_angles).reshape(6, 1),np.array(dx))
        goal = self.trapezoid_gen(np.array(current_joint_angles) + joints_vel.dot(1),np.array(current_joint_angles),0.25,np.array(dx))
        #TODO replace with trajopt code
        self.client.wait_for_server()
        self.client.send_goal(goal)
        self.client.wait_for_result()
        res = self.client.get_result()
        if (res.error_code != 0):
            raise Exception("Trajectory execution returned error")

    	
        self.take_image()
        
        #Detect tag corners in aqcuired image using aruco
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(self.result, self.aruco_dict, parameters=self.parameters)       
        #Sort corners and ids according to ascending order of ids
        corners, ids = sort_corners(corners,ids)
        
        # Estimate Poses  
        imgPoints_ground, rvec_ground, tvec_ground, Rca_ground, b_ground = self.get_pose(self.board_ground, corners, ids, self.CamParam)
        imgPoints_panel, rvec_panel, tvec_panel, Rca_panel, b_panel = self.get_pose(self.board_panel, corners, ids, self.CamParam)
        
        observed_tvec_difference = tvec_ground-tvec_panel
        observed_rvec_difference = rvec_ground-rvec_panel    
        rospy.loginfo("============== Difference in nest position (after adjusment)")
        tvec_err = self.loaded_tvec_difference-observed_tvec_difference
        rvec_err = self.loaded_rvec_difference-observed_rvec_difference 
        rospy.loginfo("tvec difference: %f, %f, %f",tvec_err[0],tvec_err[1],tvec_err[2])
        rospy.loginfo("rvec difference: %f, %f, %f",rvec_err[0],rvec_err[1],rvec_err[2])


    def release_suction_cups(self):
        ################## RELEASE SUCTION CUPS AND LIFT THE GRIPPER ################## 	
        rospy.loginfo("============ Release Suction Cups...")
        self.controller_commander.set_controller_mode(self.controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
        self.rapid_node.set_digital_io("Vacuum_enable", 0)
        #g = ProcessStepGoal('plan_place_set_second_step',"")
        #process_client.send_goal(g)
        time.sleep(0.5)
    
        Cur_Pose = self.controller_commander.get_current_pose_msg()
        rot_current = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z]
        trans_current = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]
        pose_target2 = rox.Transform(rox.q2R([rot_current[0], rot_current[1], rot_current[2], rot_current[3]]), trans_current)
        pose_target2.p[2] += 0.25
    
        rospy.loginfo("============ Lift gripper...")
        #TODO: Change to trajopt planning
        self.controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False)
        #s=ProcessState()
        #s.state="place_set"
        #s.payload="leeward_tip_panel"
        #s.target=""
        #process_state_pub.publish(s)
        
        
    def ibvs_placement(self):
        # INPUT: 
        #            Raw image from gripper camera (Cam#636)
        #            Robot state
        #            Panel ID - 
        #                   (1) Desired placement calibration results
        #                   (2) Initial position
        #                   (3) Board info 
        #            F/T sensor data
        #            Camera info    
        # OUTPUT: 
        #            Sequential robot joint command to achieve the desired results
        # PARAMETERS: 
        #            IBVS - 
        #                   (1) Convergence threshold (du_converge_TH, dv_converge_TH, iteration_limit)
        #                   (2) Image Jacobian Gain (Ki)
        #            Complaince controller:
        #                   (1) Desired force set points (F_d_set1, F_d_set2)
        #                   (2) Complaince control gain (Kc)
 
        #test = 0            
        
        ################################# BEGIN IBVS #################################
        self.controller_commander.set_controller_mode(self.controller_commander.MODE_AUTO_TRAJECTORY, 0.7, [])
        step_size_min = self.step_size_min
        loaded_object_points_ground_in_panel_system = self.loaded_object_points_ground_in_panel_system_stage_2 
        du = 100.0
        dv = 100.0     
        
        self.FTdata_0 = self.FTdata
        self.FTdata_0est = self.compute_ft_est()
        
        while ((du>self.du_converge_TH) | (dv>self.dv_converge_TH) and (self.iteration<self.iteration_limit)):    
            #try changing du and dv to lower values(number of iterations increases)
            self.iteration += 1
        
            #Print current robot pose at the beginning of this iteration
            Cur_Pose = self.controller_commander.get_current_pose_msg()
            
            self.take_image()
            
            #Detect tag corners in aqcuired image using aruco
            corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(self.result, self.aruco_dict, parameters=self.parameters)    
            #Sort corners and ids according to ascending order of ids
            corners, ids = sort_corners(corners,ids)
    
            #check if all tags detected
            if len(ids) >10:#retVal_ground != 0 and retVal_panel !=0:        
                du, dv, J_cam, delta_UV_all = self.image_jacobian_gen(self.result, corners, ids, self.CamParam, self.board_ground,self.board_panel,self.id_start_ground, 
                                                                      self.id_board_row_ground*self.id_board_col_ground,self.tag_ground_size,loaded_object_points_ground_in_panel_system,'Cam0')
       
                dx = QP_Cam(np.dot(J_cam, self.R_Jcam),self.Ki*delta_UV_all)
                dx = dx.reshape([6,1])
       
                # Compliance Force Control
                FTdata_est = self.compute_ft_est()
                if(not self.ft_flag):
                    raise Exception("havent reached callback")
                FTread = self.FTdata - self.FTdata_0 - FTdata_est + self.FTdata_0est
                print '=====================FT============:',FTread[-1], self.FTdata[-1], self.FTdata_0[-1], FTdata_est[-1], self.FTdata_0est[-1]
                if FTread[-1]> (self.F_d_set1+50):
                    F_d = self.F_d_set1                   
                else:
                    F_d = self.F_d_set2
    
                if (self.FTdata==0).all():
                    rospy.loginfo("FT data overflow")
                else:
                    Vz = self.Kc*(F_d - FTread[-1])
                    dx[-1] = dx[-1]+Vz

                current_joint_angles = self.controller_commander.get_current_joint_values()
    
                step_size_tmp = np.linalg.norm(dx)
                if step_size_tmp <= step_size_min:
                    step_size_min = step_size_tmp
                else:
                    dx = dx/step_size_tmp*step_size_min
    
                joints_vel = QP_abbirb6640(np.array(current_joint_angles).reshape(6, 1),np.array(dx))
    
                goal = self.trapezoid_gen(np.array(current_joint_angles) + joints_vel.dot(self.dt),np.array(current_joint_angles),0.25,np.array(dx))
    
                #TODO: replace with trajopt code
                self.client.wait_for_server()     
                self.client.send_goal(goal)
                self.client.wait_for_result()
                res = self.client.get_result()
                if (res.error_code != 0):
                    raise Exception("Trajectory execution returned error")

                rospy.loginfo('Current Iteration Finished.')


        rospy.loginfo("============= iteration =============")
        rospy.loginfo("iteration: %f",self.iteration)
        
    def pointarray_to_array(self,data):
        arr=np.zeros((3,len(data)))
        #arr_column=np.empty((3,0))
        
        for i in range(len(data)):
            #arr_column[0]=data[i].x
            #arr_column[1]=data[i].y
            #arr_column[2]=data[i].z
            arr_column=np.array([[data[i].x],[data[i].y],[data[i].z]])
            #rospy.loginfo(str(arr_column))
            arr[:,i]=arr_column.reshape([3,])
            #rospy.loginfo(str(arr))
        #return np.asmatrix(arr,float)
        rospy.loginfo(str(arr))
        return arr
                
        
    def vector3_to_array(self,data):
        arr =np.asarray( [[data.x],[data.y],[data.z]]) 
        '''
        arr=np.empty(3)
        arr[0]=data.x
        arr[1]=data.y
        arr[2]=data.z
        
        arr=[]
        
        arr.append(float(data.x))
        arr.append(float(data.y))
        arr.append(float(data.z))
        '''
        rospy.loginfo(str(arr))
        #arr.reshape([3,1])
        rospy.loginfo(str(arr))
        return arr
    
    #NOT FUNCTIONING
    def aruco_dicts(self,aruco_dict_str):
        aruco_dicts=set()
        aruco_dicts.add(aruco_dict_str)
        if not hasattr(cv2.aruco, next(iter(aruco_dicts))):
            raise ValueError("Invalid aruco-dict value")
        aruco_dict_id=getattr(cv2.aruco, next(iter(aruco_dicts)))
        aruco_dict = cv2.aruco.Dictionary_get(aruco_dict_id)
        return aruco_dict
        
    def single_camera_placement(self,data,camera_1_ground,camera_1_place):
        try:
            self.dt = data.ibvs_parameters.IBVSdt
            self.iteration=0
            #aruco_dict_panel=self.aruco_dicts(camera_1_place.dictionary)
            #aruco_dict_ground=self.aruco_dicts(camera_1_ground.dictionary)
            aruco_dict_panel = self.aruco_dicts(camera_1_place.dictionary)
            
            aruco_dict_ground = self.aruco_dicts(camera_1_ground.dictionary)
            self.board_panel = cv2.aruco.GridBoard_create(camera_1_place.markersX, camera_1_place.markersY, camera_1_place.markerLength, camera_1_place.markerSpacing, aruco_dict_panel, camera_1_place.firstMarker)
            
            self.board_ground = cv2.aruco.GridBoard_create(camera_1_ground.markersX, camera_1_ground.markersY, camera_1_ground.markerLength, camera_1_ground.markerSpacing, aruco_dict_ground, camera_1_ground.firstMarker)
            
            self.id_start_ground=camera_1_ground.firstMarker
            self.id_board_row_ground=camera_1_ground.markersX
            self.id_board_col_ground=camera_1_ground.markersY
            self.tag_ground_size=camera_1_ground.markerLength
            self.loaded_object_points_ground_in_panel_system_stage_2 = self.pointarray_to_array(data.point_difference_stage2)
            # --- Load ideal pose difference information from file
            self.loaded_rvec_difference_stage1 = self.vector3_to_array(data.rvec_difference_stage1)
            self.loaded_tvec_difference_stage1 = self.vector3_to_array(data.tvec_difference_stage1)
            self.loaded_tvec_difference_stage1[1]+=0.03
            # --- Load ideal pose difference information from file
            self.loaded_rvec_difference = self.vector3_to_array(data.rvec_difference_stage2)
            self.loaded_tvec_difference = self.vector3_to_array(data.tvec_difference_stage2)
            
            self.du_converge_TH = data.ibvs_parameters.du_converge_th
            self.dv_converge_TH = data.ibvs_parameters.dv_converge_th
            self.iteration_limit = data.ibvs_parameters.iteration_limit
            self.Ki = data.ibvs_parameters.Ki
            # Compliance controller parameters
            self.F_d_set1 = data.compliance_control_parameters.F_d_set1
            self.F_d_set2 = data.compliance_control_parameters.F_d_set2
            self.Kc = data.compliance_control_parameters.Kc
            self.initial_pose=data.initial
            #self.tran0 = np.array([2.15484,1.21372,0.25766])
            #self.rot0 = rox.q2R([0.02110, -0.03317, 0.99922, -0.00468])
            self.step_size_min = data.ibvs_parameters.step_size_min
        except Exception as err:
            rospy.loginfo("Input values for placement controller are invalid "+str(err))
            #feedback=PlacementStepFeedback()
            traceback.print_exc()
            res = PlacementStepResult()
            res.state="Error"
            res.error_msg=str(err)
            #feedback.error_msg=
            #self.goal_handle.publish_feedback(feedback)
            self.goal_handle.set_aborted(str(err))
        
            
        try:
            #self.move_to_initial_pose()
            self.pbvs_to_stage1(False)
            
            self.pbvs_to_stage1(True)
            self.ibvs_placement()
            self.final_adjustment()
            #self.release_suction_cups()
        except Exception as err:
            traceback.print_exc()
            rospy.loginfo("Placement controller failed with error: "+str(err))
            #feedback=PlacementStepFeedback()
            res = PlacementStepResult()
            res.state="Error"
            res.error_msg=str(err)
            
            self.goal_handle.set_aborted(res)
            
        #res = PlacementStepResult()
        #res.state="Placement_complete"
        #res.error_msg=""
        

        self.goal_handle.set_succeeded(None)
        
        
    def two_camera_placement(self,data,camera_1_ground,camera_1_place,camera_2_ground,camera_2_place):
        self.dt = data.ibvs_parameters.IBVSdt
        self.iteration=0
        self.board_panel = cv2.aruco.GridBoard_create(camera_1_place.markersX, camera_1_place.markersY, camera_1_place.markerLength, camera_1_place.markerSpacing, camera_1_place.dictionary, camera_1_place.firstMarker)
        self.board_ground = cv2.aruco.GridBoard_create(camera_1_ground.markersX, camera_1_ground.markersY, camera_1_ground.markerLength, camera_1_ground.markerSpacing, camera_1_ground.dictionary, camera_1_ground.firstMarker)
        self.board_panel2 = cv2.aruco.GridBoard_create(camera_2_place.markersX, camera_2_place.markersY, camera_2_place.markerLength, camera_2_place.markerSpacing, camera_2_place.dictionary, camera_2_place.firstMarker)
        self.board_ground2 = cv2.aruco.GridBoard_create(camera_2_ground.markersX, camera_2_ground.markersY, camera_2_ground.markerLength, camera_2_ground.markerSpacing, camera_2_ground.dictionary, camera_2_ground.firstMarker)
        self.loaded_object_points_ground_in_panel_system_stage_2 = self.pointarray_to_array(data.point_difference_stage2)
        # --- Load ideal pose differnece information from file
        self.loaded_rvec_difference_stage1 = self.vector3_to_array(data.rvec_difference_stage1)
        self.loaded_tvec_difference_stage1 = self.vector3_to_array(data.tvec_difference_stage1)
        self.loaded_tvec_difference_stage1[1]+=0.03
        # --- Load ideal pose differnece information from file
        self.loaded_rvec_difference = self.vector3_to_array(data.rvec_difference_stage2)
        self.loaded_tvec_difference = self.vector3_to_array(data.tvec_difference_stage2)
        
        self.du_converge_TH = data.ibvs_parameters.du_converge_th
        self.dv_converge_TH = data.ibvs_parameters.dv_converge_th
        self.iteration_limit = data.ibvs_parameters.iteration_limit
        self.Ki = data.ibvs_parameters.Ki
        # Compliance controller parameters
        self.F_d_set1 = data.compliance_control_parameters.F_d_set1
        self.F_d_set2 = data.compliance_control_parameters.F_d_set2
        self.Kc = data.compliance_control_parameters.Kc
        self.initial_pose=data.initial
        #self.tran0 = np.array([2.15484,1.21372,0.25766])
        #self.rot0 = rox.q2R([0.02110, -0.03317, 0.99922, -0.00468])
        self.step_size_min = data.ibvs_parameters.step_size_min
        self.move_to_initial_pose()
        #self.pbvs_to_stage1()
        #self.ibvs_placement()
        self.final_adjustment()
        self.release_suction_cups()
        #res = ProcessStepResult()
        #res.state="Placement_complete"
        self.goal_handle.set_succeeded(None)