def __init__(self, robot): super(NikPathBase, self).__init__(robot.get_object_name(robot._handle)) # Motion planning handles self.intermediate_target_base = Dummy.create() self.target_base = Dummy.create() self._path_done = False self._collision_collection = self.create_collection('Collection'.encode('ascii')) # Robot parameters and handle self.target_z = self.target_base.get_position()[-1] # Make sure dummies are orphan if loaded with ttm self.intermediate_target_base.set_parent(None) self.target_base.set_parent(None) # path para self.cummulative_error = 0 self.prev_error = 0 # PID controller values. self.Kp = 1.0 self.Ki = 0.01 self.Kd = 0.1 self.desired_velocity = 0.05
def create_object(obj_path, scaling_factor=0.001): obj_name, ext = get_file_name(obj_path) if not ext == ".obj": print("[ERROR] please check obj file {}".format(obj_path)) exit() obj = Shape.import_mesh(obj_path, scaling_factor=scaling_factor) frame = Dummy.create() frame.set_parent(obj) instruction_frame = Dummy.create() instruction_frame.set_position([0, 0, 0], relative_to=obj) instruction_frame.set_parent(obj) return ObjObject(obj, frame, instruction_frame)
def get_waypoints_esf(self, wp_params: np.array): radio1 = wp_params[0] tita1 = wp_params[1] pos1_rel = np.array( [radio1 * np.sin(tita1), radio1 * np.cos(tita1), 0]) pos1_abs = pos1_rel + self.task.initial_pos.get_position() waypoint1 = Dummy.create() waypoint1.set_position(pos1_abs) radio2 = wp_params[2] tita2 = wp_params[3] pos2_rel = np.array( [radio2 * np.sin(tita2), radio2 * np.cos(tita2), 0]) pos2_abs = pos2_rel + pos1_abs waypoint2 = Dummy.create() waypoint2.set_position(pos2_abs) return waypoint1, waypoint2
def _load_task_to_scene(self): self.scene.unload() self.task = self.task_class(self.pr, self.robot) # task constructor try: # Try and load the task scene.load(self.task) except FileNotFoundError as e: # The .ttt file must not exist handle = Dummy.create() handle.set_name(self.task_file.replace('.py', '')) handle.set_model(True) # Put the dummy at the centre of the workspace self.task.get_base().set_position(Shape('workspace').get_position())
def CoppeliaUtils_addOrModifyDummy(self, type, name, pose): if not Dummy.exists(name): dummy = Dummy.create(0.1) dummy.set_name(name) else: dummy = Dummy("target") #parent_frame_object = None parent_frame_object = Shape("gen3") #print("Coppelia ", name, pose.x/1000, pose.y/1000, pose.z/1000) #dummy.set_position([pose.x / 1000., pose.y / 1000., pose.z / 1000.], parent_frame_object) dummy.set_position([pose.x, pose.y, pose.z], parent_frame_object) print(pose.x, pose.y, pose.z) print(dummy.get_position()) dummy.set_orientation([pose.rx, pose.ry, pose.rz], parent_frame_object)
def __init__(self, scene: str = "room.ttt", dt: float = 0.05): """A class constructor. Args: dt: Delta time of simulation. """ super(NavigationEnv, self).__init__(scene=scene, dt=dt, headless_mode=False) self._goal_threshold = 0.05 self._collision_dist = 0.05 self._robot = SmartBot(enable_vision=self.enable_vision) self._obstacles = sim.simGetObjectHandle("Obstacles_visual") self._navigation = self.navigation_type(self._robot, dt) self._goal = Dummy.create(size=0.1) self._goal.set_renderable(True) self._goal.set_name("Goal") max_linear_vel = ( self._robot.wheel_radius * 2 * self._robot.velocity_limit[1] / 2 ) max_angular_vel = ( self._robot.wheel_radius / self._robot.wheel_distance * np.diff(self._robot.velocity_limit) )[0] self.action_space = spaces.Box( *self._robot.velocity_limit, shape=self._robot.velocity_limit.shape, dtype="float32" ) low = self._get_lower_observation(max_angular_vel) high = self._get_upper_observation(max_linear_vel, max_angular_vel) if self.enable_vision: self.observation_space = spaces.Dict( dict( image=spaces.Box( low=0, high=255, shape=self._robot.image.shape, dtype=np.uint8 ), scalars=spaces.Box(low=low, high=high, dtype=np.float32), ) ) else: self.observation_space = spaces.Box(low=low, high=high, dtype=np.float32)
def render(self, mode='human'): '''gym render function. To render the simulator during simulation, call render(mode='human') once. To create rgb pictures, call the function every time you want to render a frame.''' if self._gym_cam is None: # Add the camera to the scene cam_placeholder = Dummy.create() cam_placeholder.set_pose([0, -0.5, 5, 1, 0, 0, 0]) self._gym_cam = VisionSensor.create([640, 360]) self._gym_cam2 = VisionSensor('Vision_sensor') self._gym_cam.set_pose(cam_placeholder.get_pose()) self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED) self._gym_cam2.set_render_mode(RenderMode.OPENGL3_WINDOWED) if mode == "rgb_array": self._gym_cam.set_render_mode(RenderMode.OPENGL3) if mode == "rgb_array": return self._gym_cam.capture_rgb()
def _create_connection_points(self, connection_locations): connection_points = [] for idx, location in enumerate(connection_locations): connection_point = Dummy.create() rand = float(np.random.rand()) dummy_name = "connection_point_{0:04f}_".format(rand).replace( ".", "") dummy_name += str(idx) connection_point.set_name(dummy_name) # relative to bounding box center #TODO: connection frame connection_point.set_position(location, relative_to=self.base_obj.frame) connection_point.set_parent(self.base_obj.frame) connection_points.append(connection_point) return tuple(connection_points)
def CoppeliaUtils_addOrModifyDummy(self, type, name, pose): if not Dummy.exists(name): dummy = Dummy.create(0.1) # one color for each type of dummy if type == RoboCompCoppeliaUtils.TargetTypes.Info: pass if type == RoboCompCoppeliaUtils.TargetTypes.Hand: pass if type == RoboCompCoppeliaUtils.TargetTypes.HeadCamera: pass dummy.set_name(name) else: dummy = Dummy(name) parent_frame_object = None if type == RoboCompCoppeliaUtils.TargetTypes.HeadCamera: parent_frame_object = Dummy("viriato_head_camera_pan_tilt") #print("Coppelia ", name, pose.x/1000, pose.y/1000, pose.z/1000) dummy.set_position( [pose.x / 1000., pose.y / 1000., pose.z / 1000.], parent_frame_object) dummy.set_orientation([pose.rx, pose.ry, pose.rz], parent_frame_object)
while not done: done = arm_path.step() pr.step() arm_path.clear_visualization() SCENE_FILE = join(dirname(abspath(__file__)), 'double_panda.ttt') pr = PyRep() pr.launch(SCENE_FILE, headless=False) pr.start() panda = Panda() panda1 = Panda(1) cubes = [] pos = [[0.675, -0.45, 0.82], [0.65, -0.225, 0.82], [0.45, -0.175, 0.82], [0.425, 0.2, 0.82], [0.625, 0.275, 0.82], [0.625, 0.525, 0.82]] dummies = [Dummy.create() for i in range(0, 6)] for i in range(0, 6): cube = Shape.create(type=PrimitiveShape.CUBOID, size=[0.1, 0.1, 0.1], color=[1.0, 0.1, 0.1]) cubes.append(cube) cube.set_position(pos[i]) dummies[i].set_position(pos[i]) dummies[i].set_parent(cubes[i]) pr.step() prev_pos, quat = panda.get_tip().get_position(), panda.get_tip( ).get_quaternion() prev_pos1, quat1 = panda1.get_tip().get_position(), panda1.get_tip( ).get_quaternion() for i in range(0, 3): try:
def compute(self): print('SpecificWorker.compute...') try: self.pr.step() # open the arm gripper self.move_gripper(self.arm_ops["OpenGripper"]) # read arm camera RGB signal cam = self.cameras["Camera_Shoulder"] image_float = cam["handle"].capture_rgb() depth = cam["handle"].capture_depth(in_meters=False) image = cv2.normalize(src=image_float, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U) cam["image_rgb"] = RoboCompCameraRGBDSimple.TImage( width=cam["width"], height=cam["height"], depth=3, focalx=cam["focal"], focaly=cam["focal"], image=image.tobytes()) cam["image_rgbd"] = RoboCompCameraRGBDSimple.TImage( width=cam["width"], height=cam["height"], depth=3, focalx=cam["focal"], focaly=cam["focal"], image=image.tobytes()) cam["depth"] = RoboCompCameraRGBDSimple.TDepth( width=cam["width"], height=cam["height"], depthFactor=1.0, depth=depth.tobytes()) # get objects's poses from simulator for obj_name in self.grasping_objects.keys(): self.grasping_objects[obj_name][ "sim_pose"] = self.grasping_objects[obj_name][ "handle"].get_pose() # get objects' poses from RGB pred_poses = self.objectposeestimationrgb_proxy.getObjectPose( cam["image_rgb"]) self.visualize_poses(image_float, pred_poses, "rgb_pose.png") for pose in pred_poses: if pose.objectname in self.grasping_objects.keys(): obj_trans = [pose.x, pose.y, pose.z] obj_quat = [pose.qx, pose.qy, pose.qz, pose.qw] obj_pose = self.process_pose(obj_trans, obj_quat) self.grasping_objects[ pose.objectname]["pred_pose_rgb"] = obj_pose # get objects' poses from RGBD pred_poses = self.objectposeestimationrgbd_proxy.getObjectPose( cam["image_rgbd"], cam["depth"]) self.visualize_poses(image_float, pred_poses, "rgbd_pose.png") for pose in pred_poses: if pose.objectname in self.grasping_objects.keys(): obj_trans = [pose.x, pose.y, pose.z] obj_quat = [pose.qx, pose.qy, pose.qz, pose.qw] obj_pose = self.process_pose(obj_trans, obj_quat) self.grasping_objects[ pose.objectname]["pred_pose_rgbd"] = obj_pose # create a dummy for arm path planning approach_dummy = Dummy.create() approach_dummy.set_name("approach_dummy") # initialize approach dummy in embedded lua scripts call_ret = self.pr.script_call( "initDummy@gen3", vrepConst.sim_scripttype_childscript) # set object pose for the arm to follow # NOTE : choose simulator or predicted pose dest_pose = self.grasping_objects["002_master_chef_can"][ "pred_pose_rgbd"] dest_pose[ 2] += 0.04 # add a small offset along z-axis to grasp the object top # set dummy pose with the pose of object to be grasped approach_dummy.set_pose(dest_pose) # move gen3 arm to the object self.move_arm(approach_dummy, self.arm_ops["MoveToObj"]) # close the arm gripper self.move_gripper(self.arm_ops["CloseGripper"]) # change approach dummy pose to the final destination pose dest_pose[2] += 0.4 approach_dummy.set_pose(dest_pose) # move gen3 arm to the final destination self.move_arm(approach_dummy, self.arm_ops["MoveToObj"]) # remove the created approach dummy approach_dummy.remove() except Exception as e: print(e) return True
def test_create_dummy(self): d = Dummy.create(0.01) self.assertIsInstance(d, Dummy)