def edit_camera_xform(world_fn, xform=None, title=None): """Visual editor of the camera position """ world = WorldModel() world.readFile(world_fn) world.readFile("camera.rob") robot = world.robot(0) sensor = robot.sensor(0) if xform is not None: sensing.set_sensor_xform(sensor, xform) vis.createWindow() if title is not None: vis.setWindowTitle(title) vis.resizeWindow(1024, 768) vis.add("world", world) vis.add("sensor", sensor) vis.add("sensor_xform", sensing.get_sensor_xform(sensor, robot)) vis.edit("sensor_xform") def update_sensor_xform(): sensor_xform = vis.getItemConfig("sensor_xform") sensor_xform = sensor_xform[:9], sensor_xform[9:] sensing.set_sensor_xform(sensor, sensor_xform) vis.loop(callback=update_sensor_xform) sensor_xform = vis.getItemConfig("sensor_xform") return sensor_xform[:9], sensor_xform[9:]
def _randomlyRotateCamera(self, min_r=0, max_r=70, dist=DIST_FROM_BOARD): """ Randomly rotates camera about x-axis and zooms out from the center of the tabletop :param: min_r: the minimum random rotation in degrees :param: max_r: the maximum random rotation in degrees :param: dist: the distance to zoom out from center of the table :return: the angle of rotation sampled """ min_r = math.radians(min_r) max_r = math.radians(max_r) table_center = self.chessEngine.getTableCenter() table_R = so3.from_axis_angle(([1, 0, 0], -math.pi)) table_t = table_center rot_deg = random.uniform(min_r, max_r) zoom_out_R = so3.from_axis_angle(([1, 0, 0], rot_deg)) zoom_out_t = vectorops.mul( [0, math.sin(rot_deg), -math.cos(rot_deg)], dist) xform = se3.mul((table_R, table_t), (zoom_out_R, zoom_out_t)) sensing.set_sensor_xform(self.sensor, xform) return rot_deg
def grasp_plan_main(): world = WorldModel() world.readFile("camera.rob") robot = world.robot(0) sensor = robot.sensor(0) world.readFile("table.xml") xform = resource.get("table_camera_00.xform",type='RigidTransform') sensing.set_sensor_xform(sensor,xform) box = GeometricPrimitive() box.setAABB([0,0,0],[1,1,1]) box = resource.get('table.geom','GeometricPrimitive',default=box,world=world,doedit='auto') bmin,bmax = [v for v in box.properties[0:3]],[v for v in box.properties[3:6]] nobj = 5 obj_fns = [] for o in range(nobj): fn = random.choice(ycb_objects) obj = make_ycb_object(fn,world) #TODO: you might want to mess with colors here too obj.appearance().setSilhouette(0) obj_fns.append(fn) for i in range(world.numTerrains()): #TODO: you might want to mess with colors here too world.terrain(i).appearance().setSilhouette(0) problem1.arrange_objects(world,obj_fns,bmin,bmax) intrinsics = dict() w = int(sensor.getSetting('xres')) h = int(sensor.getSetting('yres')) xfov = float(sensor.getSetting('xfov')) yfov = float(sensor.getSetting('yfov')) intrinsics['cx'] = w/2 intrinsics['cy'] = h/2 intrinsics['fx'] = math.tan(xfov*0.5)*h*2 intrinsics['fy'] = math.tan(xfov*0.5)*h*2 print("Intrinsics",intrinsics) planner = ImageBasedGraspPredictor() def do_grasp_plan(event=None,world=world,sensor=sensor,planner=planner,camera_xform=xform,camera_intrinsics=intrinsics): sensor.kinematicReset() sensor.kinematicSimulate(world,0.01) rgb,depth = sensing.camera_to_images(sensor) grasps,scores = planner.generate(camera_xform,camera_intrinsics,rgb,depth) for i,(g,s) in enumerate(zip(grasps,scores)): color = (1-s,s,0,1) g.add_to_vis("grasp_"+str(i),color=color) def resample_objects(event=None,world=world,obj_fns=obj_fns,bmin=bmin,bmax=bmax): problem1.arrange_objects(world,obj_fns,bmin,bmax) vis.add("world",world) vis.add("sensor",sensor) vis.addAction(do_grasp_plan,'Run grasp planner','p') vis.addAction(resample_objects,'Sample new arrangement','s') vis.run()
def loop_through_sensors(world=world,sensor=sensor, world_camera_viewpoints=world_camera_viewpoints, index=index,counters=counters): viewno = counters[0] variation = counters[1] total_count = counters[2] print("Generating data for sensor view",viewno,"variation",variation) sensor_xform = world_camera_viewpoints[viewno] sensing.set_sensor_xform(sensor,sensor_xform) rgb_filename = "image_dataset/color_%04d_var%04d.png"%(total_count,variation) depth_filename = "image_dataset/depth_%04d_var%04d.png"%(total_count,variation) grasp_filename = "image_dataset/grasp_%04d.png"%(total_count,) if PROBLEM=='1b': sensor.kinematicReset() sensor.kinematicSimulate(world,0.01) print("Done with kinematic simulate") rgb,depth = sensing.camera_to_images(sensor) print("Saving to",rgb_filename,depth_filename) Image.fromarray(rgb).save(rgb_filename) depth_scale = 8000 depth_quantized = (depth * depth_scale).astype(np.uint32) Image.fromarray(depth_quantized).save(depth_filename) with open("image_dataset/metadata.csv",'a') as f: line = "{},{},{},{},{},{},{}\n".format(index,viewno,loader.write(sensor_xform,'RigidTransform'),os.path.basename(rgb_filename),os.path.basename(depth_filename),os.path.basename(grasp_filename),variation) f.write(line) if PROBLEM=='1c' and variation==0: #calculate grasp map and save it grasp_map = make_grasp_map(world) grasp_map_quantized = (grasp_map*255).astype(np.uint8) channels = ['score','opening','axis_heading','axis_elevation'] for i,c in enumerate(channels): base,ext=os.path.splitext(grasp_filename) fn = base+'_'+c+ext Image.fromarray(grasp_map_quantized[:,:,i]).save(fn) #loop through variations and views counters[1] += 1 if counters[1] >= max_variations: counters[1] = 0 counters[0] += 1 if counters[0] >= len(world_camera_viewpoints): vis.show(False) counters[2] += 1
world = klampt.WorldModel() world.readFile("../../data/simulation_test_worlds/sensortest.xml") #world.readFile("../../data/tx90scenario0.xml") robot = world.robot(0) vis.add("world", world) sim = klampt.Simulator(world) sensor = sim.controller(0).sensor("rgbd_camera") print("sensor.getSetting('link'):", sensor.getSetting("link")) print("sensor.getSetting('Tsensor'):", sensor.getSetting("Tsensor")) input("Press enter to continue...") #T = (so3.sample(),[0,0,1.0]) T = (so3.mul(so3.rotation([1, 0, 0], math.radians(-10)), [1, 0, 0, 0, 0, -1, 0, 1, 0]), [0, -2.0, 0.5]) sensing.set_sensor_xform(sensor, T, link=-1) vis.add("sensor", sensor) read_local = True #Note: GLEW sensor simulation only runs if it occurs in the visualization thread (e.g., the idle loop) class SensorTestWorld(GLPluginInterface): def __init__(self): GLPluginInterface.__init__(self) robot.randomizeConfig() #sensor.kinematicSimulate(world,0.01) sim.controller(0).setPIDCommand(robot.getConfig(), [0.0] * 7) self.rgb = None
def loop_through_sensors( world=world, sensor=sensor, world_camera_viewpoints=world_camera_viewpoints, index=index, counters=counters): viewno = counters[0] variation = counters[1] total_count = counters[2] print("Generating data for sensor view", viewno, "variation", variation) sensor_xform = world_camera_viewpoints[viewno] #TODO: problem 1.B: perturb camera and change colors # Generate random axis, random angle, rotate about angle for orientation perturbation # Generate random axis, random dist, translate distance over axis for position perturbation rand_axis = np.random.rand(3) rand_axis = vectorops.unit(rand_axis) multiplier = np.random.choice([True, False], 1) rand_angle = (np.random.rand(1)[0] * 10 + 10) * (-1 if multiplier else 1) # print(rand_angle) R = so3.from_axis_angle((rand_axis, np.radians(rand_angle))) rand_axis = vectorops.unit(np.random.random(3)) rand_dist = np.random.random(1)[0] * .10 + .10 # print(rand_dist) t = vectorops.mul(rand_axis, rand_dist) sensor_xform = se3.mul((R, t), sensor_xform) sensing.set_sensor_xform(sensor, sensor_xform) rgb_filename = "image_dataset/color_%04d_var%04d.png" % ( total_count, variation) depth_filename = "image_dataset/depth_%04d_var%04d.png" % ( total_count, variation) grasp_filename = "image_dataset/grasp_%04d_var%04d.png" % ( total_count, variation) #Generate sensor images sensor.kinematicReset() sensor.kinematicSimulate(world, 0.01) print("Done with kinematic simulate") rgb, depth = sensing.camera_to_images(sensor) print("Saving to", rgb_filename, depth_filename) Image.fromarray(rgb).save(rgb_filename) depth_scale = 8000 depth_quantized = (depth * depth_scale).astype(np.uint32) Image.fromarray(depth_quantized).save(depth_filename) with open("image_dataset/metadata.csv", 'a') as f: line = "{},{},{},{},{},{},{}\n".format( index, viewno, loader.write(sensor_xform, 'RigidTransform'), os.path.basename(rgb_filename), os.path.basename(depth_filename), os.path.basename(grasp_filename), variation) f.write(line) #calculate grasp map and save it grasp_map = make_grasp_map(world, total_count) grasp_map_quantized = (grasp_map * 255).astype(np.uint8) channels = ['score', 'opening', 'axis_heading', 'axis_elevation'] for i, c in enumerate(channels): base, ext = os.path.splitext(grasp_filename) fn = base + '_' + c + ext Image.fromarray(grasp_map_quantized[:, :, i]).save(fn) #loop through variations and views counters[1] += 1 if counters[1] >= max_variations: counters[1] = 0 counters[0] += 1 if counters[0] >= len(world_camera_viewpoints): vis.show(False) counters[2] += 1
def update_sensor_xform(): sensor_xform = vis.getItemConfig("sensor_xform") sensor_xform = sensor_xform[:9], sensor_xform[9:] sensing.set_sensor_xform(sensor, sensor_xform)
from __future__ import division import klampt, time from klampt import vis from klampt import * from klampt.model import sensing import matplotlib.pyplot as plt world = WorldModel() world.readFile('../../data/simulation_test_worlds/sensortest.xml' ) # a world with RobotiQ, and a camera sim = Simulator(world) camera = sim.controller(0).sensor('rgbd_camera') T = ([1, 0, 0, 0, 0, -1, 0, 1, 0], [0, -2.0, 0.5]) sensing.set_sensor_xform(camera, T, link=-1) sim.simulate(0.01) sim.updateWorld() rgb, depth = sensing.camera_to_images(camera) print rgb.shape print depth.shape plt.imshow(depth) # <---- THIS LINE PREVENTS VIS.SHOW() FROM SHOWING plt.show() vis.add('world', world) vis.show() while vis.shown(): time.sleep(0.1) vis.kill() plt.show() quit()