class SpecificWorker(GenericWorker):
    def __init__(self, proxy_map):
        super(SpecificWorker, self).__init__(proxy_map)
        self.pinza = False

    def __del__(self):
        print('SpecificWorker destructor')

    def setParams(self, params):

        SCENE_FILE = '../etc/gen3-robolab.ttt'
        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()
        self.mode = 0
        self.bloqueo = True

        #self.robot = Viriato()
        #self.robot = YouBot()
        self.robot_object = Shape("gen3")

        self.cameras = {}
        self.camera_arm_name = "camera_arm"
        cam = VisionSensor(self.camera_arm_name)
        self.cameras[self.camera_arm_name] = {
            "handle":
            cam,
            "id":
            0,
            "angle":
            np.radians(cam.get_perspective_angle()),
            "width":
            cam.get_resolution()[0],
            "height":
            cam.get_resolution()[1],
            "focal": (cam.get_resolution()[0] / 2) /
            np.tan(np.radians(cam.get_perspective_angle() / 2)),
            "rgb":
            np.array(0),
            "depth":
            np.ndarray(0)
        }

        self.joystick_newdata = []
        self.last_received_data_time = 0

    def compute(self):
        tc = TimeControl(0.05)

        while True:
            self.pr.step()
            self.read_joystick()
            self.read_camera(self.cameras[self.camera_arm_name])

            if self.pinza:
                self.pr.script_call("close@RG2", 1)
            else:
                self.pr.script_call("open@RG2", 1)

            tc.wait()

    ###########################################
    def read_camera(self, cam):
        image_float = cam["handle"].capture_rgb()
        depth = cam["handle"].capture_depth(True)
        image = cv2.normalize(src=image_float,
                              dst=None,
                              alpha=0,
                              beta=255,
                              norm_type=cv2.NORM_MINMAX,
                              dtype=cv2.CV_8U)
        cam["rgb"] = RoboCompCameraRGBDSimple.TImage(cameraID=cam["id"],
                                                     width=cam["width"],
                                                     height=cam["height"],
                                                     depth=3,
                                                     focalx=cam["focal"],
                                                     focaly=cam["focal"],
                                                     alivetime=time.time(),
                                                     image=image.tobytes())
        cam["depth"] = RoboCompCameraRGBDSimple.TDepth(
            cameraID=cam["id"],
            width=cam["handle"].get_resolution()[0],
            height=cam["handle"].get_resolution()[1],
            focalx=cam["focal"],
            focaly=cam["focal"],
            alivetime=time.time(),
            depthFactor=1.0,
            depth=depth.tobytes())

    ###########################################
    ### JOYSITCK read and move the robot
    ###########################################
    def read_joystick(self):
        if self.joystick_newdata:
            print(self.joystick_newdata)
            self.update_joystick(self.joystick_newdata[0])
            self.joystick_newdata = None
            self.last_received_data_time = time.time()
        else:
            elapsed = time.time() - self.last_received_data_time
            if elapsed > 2 and elapsed < 3:
                pass

    def update_joystick(self, datos):
        adv = advR = 0.0
        rot = rotR = 0.0
        side = sideR = 0.0
        print(datos.buttons)
        for x in datos.buttons:
            if x.name == "mode":
                self.mode += x.step
                if self.mode % 2 == 1:
                    self.bloqueo = True
                else:
                    self.bloqueo = False
        for x in datos.axes:
            print(x.name + "" + str(x.value))
            if x.name == "X_axis":
                adv = x.value if np.abs(x.value) > 1 else 0
                advR = x.value if np.abs(x.value) > 1 else 0
            if x.name == "Y_axis":
                rot = x.value if np.abs(x.value) > 0.01 else 0
                rotR = x.value if np.abs(x.value) > 0.01 else 0
            if x.name == "Z_axis":
                side = x.value if np.abs(x.value) > 1 else 0
                sideR = x.value if np.abs(x.value) > 1 else 0
            if x.name == "gripper":
                if x.value > 1:
                    self.pr.script_call("open@RG2", 1)
                    print("abriendo")
                if x.value < -1:
                    print("cerrando")
                    self.pr.script_call("close@RG2", 1)
            dummy = Dummy("target")
            parent_frame_object = None
            position = dummy.get_position()
            orientation = dummy.get_orientation()
            if self.bloqueo == False:
                print("modo0\n\n")
                dummy.set_position([
                    position[0] + adv / 1000, position[1] + rot / 1000,
                    position[2] + side / 1000
                ], parent_frame_object)
            else:
                print("modo1\n\n")
                dummy.set_orientation([
                    orientation[0] + advR / 1000, orientation[1] + rotR / 1000,
                    orientation[2] + sideR / 1000
                ], parent_frame_object)

    ##################################################################################
    # SUBSCRIPTION to sendData method from JoystickAdapter interface
    ###################################################################################
    def JoystickAdapter_sendData(self, data):
        self.joystick_newdata = [data, time.time()]

    ##################################################################################
    #                       Methods for CameraRGBDSimple
    # ===============================================================================
    #
    # getAll
    #
    def CameraRGBDSimple_getAll(self, camera):
        return RoboCompCameraRGBDSimple.TRGBD(self.cameras[camera]["rgb"],
                                              self.cameras[camera]["depth"])

    #
    # getDepth
    #
    def CameraRGBDSimple_getDepth(self, camera):
        return self.cameras[camera]["depth"]

    #
    # getImage
    #
    def CameraRGBDSimple_getImage(self, camera):
        return self.cameras[camera]["rgb"]

    # ===================================================================
    # CoppeliaUtils
    # ===================================================================
    def CoppeliaUtils_addOrModifyDummy(self, type, name, pose):
        if not Dummy.exists(name):
            dummy = Dummy.create(0.1)
            dummy.set_name(name)
        else:
            dummy = Dummy("target")
            #parent_frame_object = None
            parent_frame_object = Shape("gen3")
            #print("Coppelia ", name, pose.x/1000, pose.y/1000, pose.z/1000)
            #dummy.set_position([pose.x / 1000., pose.y / 1000., pose.z / 1000.], parent_frame_object)
            dummy.set_position([pose.x, pose.y, pose.z], parent_frame_object)
            print(pose.x, pose.y, pose.z)
            print(dummy.get_position())
            dummy.set_orientation([pose.rx, pose.ry, pose.rz],
                                  parent_frame_object)

    def KinovaArm_getCenterOfTool(self, referencedTo):
        ret = RoboCompKinovaArm.TPose()
        parent_frame_object = Shape('gen3')
        tip = Dummy("tip")
        pos = tip.get_position(parent_frame_object)
        rot = tip.get_orientation(parent_frame_object)
        ret.x = pos[0]
        ret.y = pos[1]
        ret.z = pos[2]
        ret.rx = rot[0]
        ret.ry = rot[1]
        ret.rz = rot[2]
        return ret

    def KinovaArm_openGripper(self):
        #self.pr.script_call("open@RG2", 1)
        print("Opening gripper")
        self.pinza = False

    def KinovaArm_closeGripper(self):
        #self.pr.script_call("close@RG2", 1)
        print("Closing gripper")
        self.pinza = True

    #
    # IMPLEMENTATION of setCenterOfTool method from KinovaArm interface
    #
    def KinovaArm_setCenterOfTool(self, pose, referencedTo):
        target = Dummy("target")
        parent_frame_object = Shape('gen3')
        position = target.get_position(parent_frame_object)
        #target.set_position([position[0] + pose.x / 1000, position[1] + pose.y / 1000, position[2] + pose.z / 1000], parent_frame_object)
        target.set_position([
            position[0] + pose.x / 1000, position[1] + pose.y / 1000,
            position[2] + pose.z / 1000
        ], parent_frame_object)

    def KinovaArm_setPosition(self, pose, referencedTo):
        target = Dummy("target")
        parent_frame_object = Shape('gen3')
        position = target.get_position(parent_frame_object)
        target.set_position([pose.x, pose.y, pose.z], parent_frame_object)
Esempio n. 2
0
class SpecificWorker(GenericWorker):
    def __init__(self, proxy_map):
        super(SpecificWorker, self).__init__(proxy_map)

    def __del__(self):
        print('SpecificWorker destructor')
        self.pr.stop()
        self.pr.shutdown()

    def setParams(self, params):
        SCENE_FILE = params["scene_dir"]
        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()

        self.cameras = {}
        cam = VisionSensor("Camera_Shoulder")
        self.cameras["Camera_Shoulder"] = {
            "handle":
            cam,
            "id":
            1,
            "angle":
            np.radians(cam.get_perspective_angle()),
            "width":
            cam.get_resolution()[0],
            "height":
            cam.get_resolution()[1],
            "depth":
            3,
            "focal":
            cam.get_resolution()[0] /
            np.tan(np.radians(cam.get_perspective_angle())),
            "position":
            cam.get_position(),
            "rotation":
            cam.get_quaternion(),
            "image_rgb":
            np.array(0),
            "image_rgbd":
            np.array(0),
            "depth":
            np.ndarray(0)
        }

        self.grasping_objects = {}
        can = Shape("can")
        self.grasping_objects["002_master_chef_can"] = {
            "handle": can,
            "sim_pose": None,
            "pred_pose_rgb": None,
            "pred_pose_rgbd": None
        }

        with (open("objects_pcl.pickle", "rb")) as file:
            self.object_pcl = pickle.load(file)

        self.intrinsics = np.array(
            [[
                self.cameras["Camera_Shoulder"]["focal"], 0.00000000e+00,
                self.cameras["Camera_Shoulder"]["width"] / 2.0
            ],
             [
                 0.00000000e+00, self.cameras["Camera_Shoulder"]["focal"],
                 self.cameras["Camera_Shoulder"]["height"] / 2.0
             ], [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])

        self.arm_ops = {
            "MoveToHome": 1,
            "MoveToObj": 2,
            "CloseGripper": 3,
            "OpenGripper": 4
        }

        self.grasping_iter = 10

        self.arm_base = Shape("gen3")
        self.arm_target = Dummy("target")
        self.gripper = Joint("RG2_openCloseJoint")

    def compute(self):
        print('SpecificWorker.compute...')
        try:
            self.pr.step()

            # open the arm gripper
            self.move_gripper(self.arm_ops["OpenGripper"])

            # read arm camera RGB signal
            cam = self.cameras["Camera_Shoulder"]
            image_float = cam["handle"].capture_rgb()
            depth = cam["handle"].capture_depth(in_meters=False)
            image = cv2.normalize(src=image_float,
                                  dst=None,
                                  alpha=0,
                                  beta=255,
                                  norm_type=cv2.NORM_MINMAX,
                                  dtype=cv2.CV_8U)
            cam["image_rgb"] = RoboCompCameraRGBDSimple.TImage(
                width=cam["width"],
                height=cam["height"],
                depth=3,
                focalx=cam["focal"],
                focaly=cam["focal"],
                image=image.tobytes())
            cam["image_rgbd"] = RoboCompCameraRGBDSimple.TImage(
                width=cam["width"],
                height=cam["height"],
                depth=3,
                focalx=cam["focal"],
                focaly=cam["focal"],
                image=image.tobytes())
            cam["depth"] = RoboCompCameraRGBDSimple.TDepth(
                width=cam["width"],
                height=cam["height"],
                depthFactor=1.0,
                depth=depth.tobytes())

            # get objects's poses from simulator
            for obj_name in self.grasping_objects.keys():
                self.grasping_objects[obj_name][
                    "sim_pose"] = self.grasping_objects[obj_name][
                        "handle"].get_pose()

            # get objects' poses from RGB
            pred_poses = self.objectposeestimationrgb_proxy.getObjectPose(
                cam["image_rgb"])
            self.visualize_poses(image_float, pred_poses, "rgb_pose.png")
            for pose in pred_poses:
                if pose.objectname in self.grasping_objects.keys():
                    obj_trans = [pose.x, pose.y, pose.z]
                    obj_quat = [pose.qx, pose.qy, pose.qz, pose.qw]
                    obj_pose = self.process_pose(obj_trans, obj_quat)
                    self.grasping_objects[
                        pose.objectname]["pred_pose_rgb"] = obj_pose

            # get objects' poses from RGBD
            pred_poses = self.objectposeestimationrgbd_proxy.getObjectPose(
                cam["image_rgbd"], cam["depth"])
            self.visualize_poses(image_float, pred_poses, "rgbd_pose.png")
            for pose in pred_poses:
                if pose.objectname in self.grasping_objects.keys():
                    obj_trans = [pose.x, pose.y, pose.z]
                    obj_quat = [pose.qx, pose.qy, pose.qz, pose.qw]
                    obj_pose = self.process_pose(obj_trans, obj_quat)
                    self.grasping_objects[
                        pose.objectname]["pred_pose_rgbd"] = obj_pose

            # create a dummy for arm path planning
            approach_dummy = Dummy.create()
            approach_dummy.set_name("approach_dummy")

            # initialize approach dummy in embedded lua scripts
            call_ret = self.pr.script_call(
                "initDummy@gen3", vrepConst.sim_scripttype_childscript)

            # set object pose for the arm to follow
            # NOTE : choose simulator or predicted pose
            dest_pose = self.grasping_objects["002_master_chef_can"][
                "pred_pose_rgbd"]
            dest_pose[
                2] += 0.04  # add a small offset along z-axis to grasp the object top

            # set dummy pose with the pose of object to be grasped
            approach_dummy.set_pose(dest_pose)

            # move gen3 arm to the object
            self.move_arm(approach_dummy, self.arm_ops["MoveToObj"])

            # close the arm gripper
            self.move_gripper(self.arm_ops["CloseGripper"])

            # change approach dummy pose to the final destination pose
            dest_pose[2] += 0.4
            approach_dummy.set_pose(dest_pose)

            # move gen3 arm to the final destination
            self.move_arm(approach_dummy, self.arm_ops["MoveToObj"])

            # remove the created approach dummy
            approach_dummy.remove()

        except Exception as e:
            print(e)
        return True

    def process_pose(self, obj_trans, obj_rot):
        # convert an object pose from camera frame to world frame
        # define camera pose and z-axis flip matrix
        cam_trans = self.cameras["Camera_Shoulder"]["position"]
        cam_rot_mat = R.from_quat(self.cameras["Camera_Shoulder"]["rotation"])
        z_flip = R.from_matrix(np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]))
        # get object position in world coordinates
        obj_trans = np.dot(
            cam_rot_mat.as_matrix(),
            np.dot(z_flip.as_matrix(),
                   np.array(obj_trans).reshape(-1, )))
        final_trans = obj_trans + cam_trans
        # get object orientation in world coordinates
        obj_rot_mat = R.from_quat(obj_rot)
        final_rot_mat = obj_rot_mat * z_flip * cam_rot_mat
        final_rot = final_rot_mat.as_quat()
        # return final object pose in world coordinates
        final_pose = list(final_trans)
        final_pose.extend(list(final_rot))
        return final_pose

    def visualize_poses(self, image, poses, img_name):
        # visualize the predicted poses on RGB image
        image = np.uint8(image * 255.0)
        for pose in poses:
            # visualize only defined objects
            if pose.objectname not in self.grasping_objects.keys():
                continue
            obj_pcl = self.object_pcl[pose.objectname]
            obj_trans = np.array([pose.x, pose.y, pose.z])
            if img_name == "rgb_pose.png":
                obj_trans[2] -= 0.2
            obj_rot = R.from_quat([pose.qx, pose.qy, pose.qz,
                                   pose.qw]).as_matrix()
            proj_pcl = self.vertices_reprojection(obj_pcl, obj_rot, obj_trans,
                                                  self.intrinsics)
            image = self.draw_pcl(image,
                                  proj_pcl,
                                  r=1,
                                  color=(randint(0, 255), randint(0, 255),
                                         randint(0, 255)))
        cv2.imwrite(os.path.join("output", img_name), image)

    def vertices_reprojection(self, vertices, r, t, k):
        # re-project vertices in pixel space
        p = np.matmul(k, np.matmul(r, vertices.T) + t.reshape(-1, 1))
        p[0] = p[0] / (p[2] + 1e-5)
        p[1] = p[1] / (p[2] + 1e-5)
        return p[:2].T

    def draw_pcl(self, img, p2ds, r=1, color=(255, 0, 0)):
        # draw object point cloud on RGB image
        h, w = img.shape[0], img.shape[1]
        for pt_2d in p2ds:
            pt_2d[0] = np.clip(pt_2d[0], 0, w)
            pt_2d[1] = np.clip(pt_2d[1], 0, h)
            img = cv2.circle(img, (int(pt_2d[0]), int(pt_2d[1])), r, color, -1)
        return img

    def move_arm(self, dummy_dest, func_number):
        # move arm to destination
        # NOTE : this function is using remote lua scripts embedded in the arm
        # for better path planning, so make sure to use the correct arm model
        call_function = True
        init_pose = np.array(
            self.arm_target.get_pose(relative_to=self.arm_base))
        # loop until the arm reached the object
        while True:
            # step the simulation
            self.pr.step()
            # set function index to the desired operation
            if call_function:
                try:
                    # call thearded child lua scripts via PyRep
                    call_ret = self.pr.script_call(
                        "setFunction@gen3",
                        vrepConst.sim_scripttype_childscript,
                        ints=[func_number])
                except Exception as e:
                    print(e)
            # get current poses to compare
            actual_pose = self.arm_target.get_pose(relative_to=self.arm_base)
            object_pose = dummy_dest.get_pose(relative_to=self.arm_base)
            # compare poses to check for operation end
            pose_diff = np.abs(np.array(actual_pose) - np.array(init_pose))
            if call_function and pose_diff[0] > 0.01 or pose_diff[
                    1] > 0.01 or pose_diff[2] > 0.01:
                call_function = False
            # check whether the arm reached the target
            dest_pose_diff = np.abs(
                np.array(actual_pose) - np.array(object_pose))
            if dest_pose_diff[0] < 0.015 and dest_pose_diff[
                    1] < 0.015 and dest_pose_diff[2] < 0.015:
                break

    def move_gripper(self, func_number):
        # open or close the arm gripper
        # NOTE : this function is using remote lua scripts embedded in the arm
        # for better path planning, so make sure to use the correct arm model
        call_function = True
        open_percentage = init_position = self.gripper.get_joint_position()
        # loop until the gripper is completely open (or closed)
        for iter in range(self.grasping_iter):
            # step the simulation
            self.pr.step()
            # set function index to the desired operation
            if call_function:
                try:
                    # call thearded child lua scripts via PyRep
                    call_ret = self.pr.script_call(
                        "setFunction@gen3",
                        vrepConst.sim_scripttype_childscript,
                        ints=[func_number])
                except Exception as e:
                    print(e)
                # compare the gripper position to determine whether the gripper moved
                if abs(self.gripper.get_joint_position() -
                       init_position) > 0.005:
                    call_function = False
            # compare the gripper position to determine whether the gripper closed or opened
            if not call_function and abs(open_percentage - self.gripper.
                                         get_joint_position()) < 0.003:
                break
            #actualizamos el porcentaje de apertura
            open_percentage = self.gripper.get_joint_position()