def edit_camera_xform(world_fn, xform=None, title=None): """Visual editor of the camera position """ world = WorldModel() world.readFile(world_fn) world.readFile("camera.rob") robot = world.robot(0) sensor = robot.sensor(0) if xform is not None: sensing.set_sensor_xform(sensor, xform) vis.createWindow() if title is not None: vis.setWindowTitle(title) vis.resizeWindow(1024, 768) vis.add("world", world) vis.add("sensor", sensor) vis.add("sensor_xform", sensing.get_sensor_xform(sensor, robot)) vis.edit("sensor_xform") def update_sensor_xform(): sensor_xform = vis.getItemConfig("sensor_xform") sensor_xform = sensor_xform[:9], sensor_xform[9:] sensing.set_sensor_xform(sensor, sensor_xform) vis.loop(callback=update_sensor_xform) sensor_xform = vis.getItemConfig("sensor_xform") return sensor_xform[:9], sensor_xform[9:]
def idle(self): #print "Idle..." #this uses the simulation sim.simulate(0.01) sim.updateWorld() #this commented out line just uses the world and kinematic simulation #sensor.kinematicSimulate(world,0.01) try: #self.rgb,self.depth = processDepthSensor(sensor) if self.compute_pc: t0 = time.time() if read_local: #local point cloud #self.pc = sensing.camera_to_points(sensor,points_format='numpy',color_format='rgb') #self.pc = sensing.camera_to_points(sensor,points_format='native') self.pc = sensing.camera_to_points( sensor, points_format='Geometry3D', all_points=True, color_format='rgb') #need to calculate the camera transform yourself T = sensing.get_sensor_xform(robot, sensor) self.pc.setCurrentTransform(*T) self.pc_appearance = klampt.Appearance() else: #world point cloud #self.pc = sensing.camera_to_points_world(sensor,robot,points_format='numpy') self.pc = sensing.camera_to_points_world( sensor, robot, points_format='Geometry3D', color_format='rgb') self.pc_appearance = klampt.Appearance() #print "Read and process PC time",time.time()-t0 except Exception as e: print e import traceback traceback.print_exc() exit(-1) return True
def make_grasp_map(world, id): """Estimates a grasp quality and regression map for a sensor image. Input: world (WorldModel): contains a robot and a sensor Output: a 4-channel numpy array of size (w,h,3) with w x h the sensor dimensions and all values in the range [0,1]. The first channel encodes the quality. The second encodes the grasp opening amount. The third and fourth encode the orientation of the grasp relative to the camera frame, as a heading and elevation. Make sure to note how you've transformed these quantities to [0,1]! These will be needed for decoding. """ robot = world.robot(0) sensor = robot.sensor(0) sensor_xform = sensing.get_sensor_xform(sensor, robot) w = int(sensor.getSetting("xres")) h = int(sensor.getSetting("yres")) #You'll be filling this image out result = np.zeros((h, w, 4)) result[:, :, 3] = 0.5 #this shows how to go from a point in space to a pixel # point = (0,0,0) # projected = sensing.camera_project(sensor,robot,point) # if projected is None: # pass # else: # x,y,z = projected # if x < 0 or x > w or y < 0 or y > h: # pass # else: # result[int(y),int(x),0]=1 # result[50,10,0]=1 #load the gripper info and grasp database source_gripper = robotiq_85 global gripper_robot, gripper_world, grasp_db if gripper_robot is None: gripper_world = WorldModel() if not gripper_world.readFile(source_gripper.klampt_model): raise ValueError("Unable to read gripper file " + source_gripper.klampt_model) gripper_robot = gripper_world.robot(0) gripper_geom_open = source_gripper.get_geometry(gripper_robot, source_gripper.open_config) if grasp_db is None: db = grasp_database.GraspDatabase(source_gripper) if not db.load("../data/grasps/robotiq_85_more_sampled_grasp_db.json"): raise RuntimeError("Can't load grasp database?") grasp_db = db for i in range(world.numRigidObjects()): obj = world.rigidObject(i) ycb_obj = obj.getName() if ycb_obj not in grasp_db.object_to_grasps: print("Can't find object", ycb_obj, "in database") print("Database only contains objects", grasp_db.objects) raise ValueError() grasps = grasp_db.object_to_grasps[ycb_obj] gripper_tip = vectorops.madd(source_gripper.center, source_gripper.primary_axis, source_gripper.finger_length) num_grasps_valid = 0 for gindex, g in enumerate(grasps): #TODO: problem 1B: do collision testing of gripper_geom found_approach = False if (id, ycb_obj, gindex) in grasps_feasible: found_approach = grasps_feasible[(id, ycb_obj, gindex)] else: #this is the Geometry3D corresponding to the robot at the opening configuration gripper_geom = source_gripper.get_geometry( gripper_robot, g.finger_config) local_pt, world_pt = g.ik_constraint.getPosition() local_axis, world_axis = g.ik_constraint.getRotationAxis() #TODO: put your code here R = so3.vector_rotation(source_gripper.primary_axis, world_axis) t = vectorops.sub(world_pt, source_gripper.center) gripper_geom_open.setCurrentTransform(R, t) non_collision = True for i in range(world.numRigidObjects()): if i == gripper_robot.getID(): continue if gripper_geom_open.collides( world.rigidObject(i).geometry()): non_collision = False break found_approach = True if non_collision else False if found_approach: gripper_geom.setCurrentTransform(R, t) for i in range(world.numRigidObjects()): if i == gripper_robot.getID() or i == obj.getID(): continue if gripper_geom.collides( world.rigidObject(i).geometry()): found_approach = False break grasps_feasible[(id, ycb_obj, gindex)] = found_approach if not found_approach: continue num_grasps_valid += 1 Tfixed = g.ik_constraint.closestMatch(*se3.identity()) Tworld = se3.mul(obj.getTransform(), Tfixed) gworld = se3.apply(Tworld, gripper_tip) projected = sensing.camera_project(sensor, robot, gworld) if projected is not None: x, y, z = projected x = int(x) y = int(y) if x < 0 or x >= w or y < 0 or y >= h: continue gripper_opening_axis = so3.apply(Tworld[0], source_gripper.secondary_axis) gripper_opening_axis_cam = so3.apply(so3.inv(sensor_xform[0]), gripper_opening_axis) if gripper_opening_axis_cam[1] < 0: gripper_opening_axis_cam = vectorops.mul( gripper_opening_axis_cam, -1) gripper_axis_heading = math.atan2(gripper_opening_axis_cam[1], gripper_opening_axis_cam[0]) xy = vectorops.norm(gripper_opening_axis_cam[:2]) if xy < 1e-7: gripper_axis_elevation = math.pi * 0.5 else: gripper_axis_elevation = math.atan( gripper_opening_axis_cam[2] / xy) score0 = math.exp(-g.score) window = 10 std = 5 for u in range(-window, window + 1): if y + u < 0 or y + u >= h: continue for v in range(-window, window + 1): if x + v < 0 or x + v >= w: continue score = score0 * math.exp(-(u**2 + v**2) / (2 * std**2)) if score > result[y + u, x + v, 0]: result[y + u, x + v, :] = [ score, source_gripper.config_to_opening( g.finger_config), gripper_axis_heading / math.pi, gripper_axis_elevation / math.pi + 0.5 ] print("{}/{} grasps valid for object {}".format( num_grasps_valid, len(grasps), ycb_obj)) return result
def make_grasp_map(world): """Estimates a grasp quality and regression map for a sensor image. Input: world (WorldModel): contains a robot and a sensor worldfn (str): the filename Output: a 4-channel numpy array of size (w,h,3) with w x h the sensor dimensions and all values in the range [0,1]. The first channel encodes the score. The second encodes the grasp opening amount (0 closed, 1 open). The third and fourth encode the orientation of the grasp relative to the camera frame, as a heading and elevation. IR2 section: Make sure to note how you've transformed these quantities to [0,1]! These will be needed for decoding. """ robot = world.robot(0) sensor = robot.sensor(0) sensor_xform = sensing.get_sensor_xform(sensor,robot) w = int(sensor.getSetting("xres")) h = int(sensor.getSetting("yres")) #You'll be filling this image out result = np.zeros((h,w,4)) #this shows how to go from a point in space to a pixel point = (0,0,0) projected = sensing.camera_project(sensor,robot,point) if projected is None: pass else: x,y,z = projected if x < 0 or x > w or y < 0 or y > h: pass else: result[int(y),int(x),0]=1 result[50,10,0]=1 #load the gripper info and grasp database source_gripper = robotiq_85 db = grasp_database.GraspDatabase(source_gripper) if not db.load("../data/grasps/robotiq_85_sampled_grasp_db.json"): raise RuntimeError("Can't load grasp database?") for i in range(world.numRigidObjects()): obj = world.rigidObject(i) ycb_obj = obj.getName() if ycb_obj not in db.object_to_grasps: print("Can't find object",ycb_obj,"in database") print("Database only contains objects",db.objects) raise ValueError() grasps = db.object_to_grasps[ycb_obj] gripper_tip = vectorops.madd(source_gripper.center,source_gripper.primary_axis,source_gripper.finger_length) for g in grasps: Tfixed = g.ik_constraint.closestMatch(*se3.identity()) Tworld = se3.mul(obj.getTransform(),Tfixed) gworld = se3.apply(Tworld,gripper_tip) projected = sensing.camera_project(sensor,robot,gworld) if projected is not None: x,y,z = projected x = int(x) y = int(y) if x < 0 or x >= w or y < 0 or y >= h: continue #TODO: implement me for problem 1c result[y,x,0] = g.score #hmm... this might be more than 1? print("Set prediction",x,y,result[y,x,:]) return result