def load(self, fn): if fn.endswith('xml'): self.type = 'MultiPath' self.data = MultiPath() self.data.load(fn) elif fn.endswith('path') or fn.endswith('traj'): self.type = 'Trajectory' self.data = Trajectory() self.data.load(fn) elif fn.endswith('configs') or fn.endswith('milestones'): self.type = 'Configs' self.data = Trajectory() self.data.configs = loader.load('Configs', fn) self.data.times = list(range(len(self.data.configs))) else: print("Unknown extension on file", fn, ", loading as Trajectory") self.type = 'Trajectory' self.data = Trajectory() self.data.load(fn)
def add(self, fn, openDir=True, warn=True): #assert fn not in self.active if fn in self.active: print("add(): Warning, file", fn, "is already active") return for i, (cfn, citem) in enumerate(self.visCache): if cfn == fn: print() print("klampt_browser: PULLED", fn, "FROM CACHE") print() self.active[fn] = citem return True if len(self.active) >= MAX_VIS_ITEMS: return if os.path.isdir(fn): if openDir: failures = [] successes = [] for f in os.listdir(fn): if f not in ['.', '..'] and os.path.splitext(f)[1] != '': if not self.add(os.path.join(fn, f), openDir=False, warn=False): failures.append(f) else: successes.append(f) if len(failures) != 0 and len(successes) != 0: QtWidgets.QMessageBox.warning( self.splitter, "Invalid items", "Could not load files " + ', '.join(failures) + " as Klamp't elements") return True else: return False path, ext = os.path.splitext(fn) #print "Extension is",ext if ext in world_item_extensions: try: worldid = self.tempWorld.loadElement(fn) except Exception: if warn: QtWidgets.QMessageBox.warning( self.splitter, "Invalid item", "Could not load " + fn + " as a Klamp't world element") return False if worldid < 0: if warn: QtWidgets.QMessageBox.warning( self.splitter, "Invalid item", "Could not load " + fn + " as a Klamp't world element") return False obj = None for i in range(self.tempWorld.numRobots()): if self.tempWorld.robot(i).getID() == worldid: obj = self.tempWorld.robot(i) break for i in range(self.tempWorld.numRigidObjects()): if self.tempWorld.rigidObject(i).getID() == worldid: obj = self.tempWorld.rigidObject(i) break for i in range(self.tempWorld.numTerrains()): if self.tempWorld.terrain(i).getID() == worldid: obj = self.tempWorld.terrain(i) break assert obj is not None, "Hmm... couldn't find world id %d in world?" % ( worldid, ) self.loadedItem(fn, obj) return True try: type = loader.filenameToType(fn) except RuntimeError: if warn: QtWidgets.QMessageBox.warning( self.splitter, "Invalid item", "Could not load file " + fn + " as a known Klamp't type") return False if type == 'xml': #try loading a world try: world = WorldModel() res = world.readFile(fn) if not res: try: obj = loader.load('MultiPath', fn) except Exception as e: if warn: print( "klampt_browser: Trying MultiPath load, got exception", e) import traceback traceback.print_exc() QtWidgets.QMessageBox.warning( self.splitter, "Invalid WorldModel", "Could not load " + fn + " as a world XML file") return False self.loadedItem(fn, obj) return True except IOError: if warn: QtWidgets.QMessageBox.warning( self.splitter, "Invalid WorldModel", "Could not load " + fn + " as a world XML file") return False self.loadedItem(fn, world) return elif type == 'json': import json f = open(fn, 'r') jsonobj = json.load(f) try: obj = loader.fromJson(jsonobj) except Exception: if warn: QtWidgets.QMessageBox.warning( self.splitter, "Invalid JSON", "Could not recognize " + fn + " as a known Klamp't type") return False else: try: obj = loader.load(type, fn) except Exception as e: if warn: QtWidgets.QMessageBox.warning( self.splitter, "Invalid item", "Error while loading file " + fn + ": " + str(e)) return False self.loadedItem(fn, obj) return True
def calculation(): """ this is the main function of calibration calculation color. """ joint_positions = [] data_file = open(calidata_path + 'calibration_joint_positions.txt', 'r') for line in data_file: line = line.rstrip() l = [num for num in line.split(' ')] l2 = [float(num) for num in l] joint_positions.append(l2) data_file.close() #the marker transforms here are actually just points not transformations marker_transforms = [] data_file = open(calidata_path + 'calibration_marker_transforms.txt', 'r') for line in data_file: line = line.rstrip() l = [num for num in line.split(' ')] l2 = [float(num) for num in l] marker_transforms.append(l2) data_file.close() T_marker_assumed = loader.load( 'RigidTransform', calidata_path + 'assumed_marker_world_transform.txt') T_marker_assumed = (so3.inv(T_marker_assumed[0]), T_marker_assumed[1]) world = WorldModel() res = world.readFile("robot_model_data/ur5Blocks.xml") if not res: raise RuntimeError("unable to load model") #vis.add("world",world) robot = world.robot(0) link = robot.link(7) q0 = robot.getConfig() Tcameras = [] Tlinks = [] for i in range(len(joint_positions)): q = robot.getConfig() q[1:7] = joint_positions[i][0:6] robot.setConfig(q) Tlinks.append(link.getTransform()) #vis.add("ghost"+str(i),q) #vis.setColor("ghost"+str(i),0,1,0,0.5) robot.setConfig(q0) #vis.add("MARKER",se3.mul(link.getTransform(),T_marker_assumed)) #vis.setAttribute("MARKER","width",2) for loop in range(NLOOPS if ESTIMATE_MARKER_TRANSFORM else 1): ###Calculate Tcamera in EE (only needed for transform fit..) ###Tcameras = [se3.mul(se3.inv(Tl),se3.mul(T_marker_assumed,se3.inv(Tm_c))) for (Tl,Tm_c) in zip(Tlinks,marker_transforms)] #actually using only point fit here.... if METHOD == 'point fit': Tcamera2 = point_fit_transform( [Tm_c for Tm_c in marker_transforms], [se3.mul(se3.inv(Tl), T_marker_assumed)[1] for Tl in Tlinks]) Tcamera2 = [so3.from_matrix(Tcamera2[0]), Tcamera2[1]] Tcamera = Tcamera2 #else: # Tcamera = transform_average(Tcameras) #with the Tcamera from the current iteration, calculate marker points in world Tmarkers_world = [ se3.apply(se3.mul(Tl, Tcamera), Tm_c) for (Tl, Tm_c) in zip(Tlinks, marker_transforms) ] #Tmarkers_link = [se3.mul(se3.inv(Tl),Tm) for Tl,Tm in zip(Tlinks,Tmarkers)] if ESTIMATE_MARKER_TRANSFORM: T_marker_assumed_inv = point_fit_transform( [[0, 0, 0] for Tm_c in marker_transforms], [ se3.apply(se3.mul(Tl, Tcamera2), Tm_c) for (Tl, Tm_c) in zip(Tlinks, marker_transforms) ]) T_marker_assumed_inv = [ so3.from_matrix(T_marker_assumed_inv[0]), T_marker_assumed_inv[1] ] #print T_marker_assumed_inv T_marker_assumed = T_marker_assumed_inv #print "New assumed marker position",T_marker_assumed else: pass #print "Would want new marker transform",transform_average(Tmarkers_world) #print "New estimated camera position",Tcamera SSE_t = 0 for (Tm_c, Tl) in zip(marker_transforms, Tlinks): Tm_c_est = se3.mul(se3.mul(se3.inv(Tcamera), se3.inv(Tl)), T_marker_assumed) SSE_t += vectorops.distanceSquared(Tm_c, Tm_c_est[1]) #print "RMSE rotation (radians)",np.sqrt(SSE_R/len(Tlinks)) print "RMSE translations (meters)", np.sqrt(SSE_t / len(Tlinks)) #Tcameras = [se3.mul(se3.inv(Tl),se3.mul(T_marker_assumed,se3.inv(Tm_c))) for (Tl,Tm_c) in zip(Tlinks,marker_transforms)] #Tmarkers = [se3.mul(Tl,se3.mul(Tcamera,Tm_c)) for (Tl,Tm_c) in zip(Tlinks,marker_transforms)] print "Saving to calibrated_camera_xform.txt" cali_txt_path = calidata_path + "calibrated_camera_xform.txt" loader.save(Tcamera, "RigidTransform", cali_txt_path) if ESTIMATE_MARKER_TRANSFORM: print "Saving to calibrated_marker_link_xform.txt" loader.save(T_marker_assumed, "RigidTransform", calidata_path + "calibrated_marker_world_xform.txt") #SSE_R = 0 SSE_t = 0 for (Tm_c, Tl) in zip(marker_transforms, Tlinks): Tm_c_est = se3.mul(se3.mul(se3.inv(Tcamera), se3.inv(Tl)), T_marker_assumed) SSE_t += vectorops.distanceSquared(Tm_c, Tm_c_est[1]) print "RMSE translations (meters)", np.sqrt(SSE_t / len(Tlinks))
def gen_grasp_images(max_variations=10): """Generates grasp training images for Problem 1b""" global camera_viewpoints if len(camera_viewpoints) == 0: #This code pops up the viewpoint editor edited = False for base in base_files: camera_viewpoints[base] = [] camera_fn_template = os.path.join( "resources", os.path.splitext(base)[0] + '_camera_%02d.xform') index = 0 while True: camera_fn = camera_fn_template % (index, ) if not os.path.exists(camera_fn): break xform = loader.load('RigidTransform', camera_fn) camera_viewpoints[base].append(xform) index += 1 if len(camera_viewpoints[base]) > 0: #TODO: if you want to edit the camera transforms, comment this line out and replace it with "pass" continue #pass edited = True for i, xform in enumerate(camera_viewpoints[base]): print("Camera transform", base, i) sensor_xform = edit_camera_xform( base, xform, title="Camera transform {} {}".format(base, i)) camera_viewpoints[base][i] = sensor_xform camera_fn = camera_fn_template % (i, ) loader.save(sensor_xform, 'RigidTransform', camera_fn) while True: if len(camera_viewpoints[base]) > 0: print("Do you want to add another? (y/n)") choice = input() else: choice = 'y' if choice.strip() == 'y': sensor_xform = edit_camera_xform( base, title="New camera transform {}".format(base)) camera_viewpoints[base].append(sensor_xform) camera_fn = camera_fn_template % ( len(camera_viewpoints[base]) - 1, ) loader.save(sensor_xform, 'RigidTransform', camera_fn) else: break if edited: print("Quitting; run me again to render images") return #Here's where the main loop begins try: os.mkdir('image_dataset') except Exception: pass try: os.remove("image_dataset/metadata.csv") except Exception: pass with open("image_dataset/metadata.csv", 'w') as f: f.write( "world,view,view_transform,color_fn,depth_fn,grasp_fn,variation\n") total_image_count = 0 for fn in glob.glob("generated_worlds/world_*.xml"): ofs = fn.find('.xml') index = fn[ofs - 4:ofs] world = WorldModel() world.readFile(fn) wtype = autodetect_world_type(world) if wtype == 'unknown': print("WARNING: DONT KNOW WORLD TYPE OF", fn) world_camera_viewpoints = camera_viewpoints[wtype + '.xml'] assert len(world_camera_viewpoints) > 0 #TODO: change appearances for i in range(world.numRigidObjects()): world.rigidObject(i).appearance().setSilhouette(0) for i in range(world.numTerrains()): world.terrain(i).appearance().setSilhouette(0) world.readFile('camera.rob') robot = world.robot(0) sensor = robot.sensor(0) vis.add("world", world) counters = [0, 0, total_image_count] def loop_through_sensors( world=world, sensor=sensor, world_camera_viewpoints=world_camera_viewpoints, index=index, counters=counters): viewno = counters[0] variation = counters[1] total_count = counters[2] print("Generating data for sensor view", viewno, "variation", variation) sensor_xform = world_camera_viewpoints[viewno] #TODO: problem 1.B: perturb camera and change colors # Generate random axis, random angle, rotate about angle for orientation perturbation # Generate random axis, random dist, translate distance over axis for position perturbation rand_axis = np.random.rand(3) rand_axis = vectorops.unit(rand_axis) multiplier = np.random.choice([True, False], 1) rand_angle = (np.random.rand(1)[0] * 10 + 10) * (-1 if multiplier else 1) # print(rand_angle) R = so3.from_axis_angle((rand_axis, np.radians(rand_angle))) rand_axis = vectorops.unit(np.random.random(3)) rand_dist = np.random.random(1)[0] * .10 + .10 # print(rand_dist) t = vectorops.mul(rand_axis, rand_dist) sensor_xform = se3.mul((R, t), sensor_xform) sensing.set_sensor_xform(sensor, sensor_xform) rgb_filename = "image_dataset/color_%04d_var%04d.png" % ( total_count, variation) depth_filename = "image_dataset/depth_%04d_var%04d.png" % ( total_count, variation) grasp_filename = "image_dataset/grasp_%04d_var%04d.png" % ( total_count, variation) #Generate sensor images sensor.kinematicReset() sensor.kinematicSimulate(world, 0.01) print("Done with kinematic simulate") rgb, depth = sensing.camera_to_images(sensor) print("Saving to", rgb_filename, depth_filename) Image.fromarray(rgb).save(rgb_filename) depth_scale = 8000 depth_quantized = (depth * depth_scale).astype(np.uint32) Image.fromarray(depth_quantized).save(depth_filename) with open("image_dataset/metadata.csv", 'a') as f: line = "{},{},{},{},{},{},{}\n".format( index, viewno, loader.write(sensor_xform, 'RigidTransform'), os.path.basename(rgb_filename), os.path.basename(depth_filename), os.path.basename(grasp_filename), variation) f.write(line) #calculate grasp map and save it grasp_map = make_grasp_map(world, total_count) grasp_map_quantized = (grasp_map * 255).astype(np.uint8) channels = ['score', 'opening', 'axis_heading', 'axis_elevation'] for i, c in enumerate(channels): base, ext = os.path.splitext(grasp_filename) fn = base + '_' + c + ext Image.fromarray(grasp_map_quantized[:, :, i]).save(fn) #loop through variations and views counters[1] += 1 if counters[1] >= max_variations: counters[1] = 0 counters[0] += 1 if counters[0] >= len(world_camera_viewpoints): vis.show(False) counters[2] += 1 print("Running render loop") vis.loop(callback=loop_through_sensors) total_image_count = counters[2]
def testload(type, fn): t0 = time.time() res = loader.load(type, fn) t1 = time.time() print("Time to load", fn, ":", t1 - t0) return res
def main(): print( "================================================================================" ) print(sys.argv[0] + ": Simulates a robot file and Python controller") if len(sys.argv) <= 1: print( "USAGE: klampt_sim [world_file] [trajectory (.traj/.path) or controller (.py)]" ) print() print( " Try: klampt_sim athlete_plane.xml motions/athlete_flex_opt.path " ) print(" [run from Klampt-examples/data/]") print( "================================================================================" ) if len(sys.argv) <= 1: exit() world = WorldModel() #load up any world items, control modules, or paths control_modules = [] for fn in sys.argv[1:]: path, base = os.path.split(fn) mod_name, file_ext = os.path.splitext(base) if file_ext == '.py' or file_ext == '.pyc': sys.path.append(os.path.abspath(path)) mod = importlib.import_module(mod_name, base) control_modules.append(mod) elif file_ext == '.path': control_modules.append(fn) else: res = world.readFile(fn) if not res: print("Unable to load model " + fn) print("Quitting...") sys.exit(1) viewer = MyGLViewer(world) for i, c in enumerate(control_modules): if isinstance(c, str): from klampt.control.blocks import trajectory_tracking from klampt.model.trajectory import RobotTrajectory from klampt.io import loader traj = loader.load('Trajectory', c) rtraj = RobotTrajectory(world.robot(i), traj.times, traj.milestones) #it's a path file, try to load it controller = trajectory_tracking.TrajectoryPositionController( rtraj) else: try: maker = c.make except AttributeError: print("Module", c.__name__, "must have a make() method") print("Quitting...") sys.exit(1) controller = maker(world.robot(i)) viewer.sim.setController(world.robot(i), controller) vis.setWindowTitle("Klamp't Simulation Tester") if SPLIT_SCREEN_TEST: viewer2 = MyGLViewer(world) vis.setPlugin(viewer) vis.addPlugin(viewer2) viewer2.window.broadcast = True vis.show() while vis.shown(): time.sleep(0.01) vis.kill() else: vis.run(viewer)