def dotest(name, numTrials, **args): logFile = 'output/robotplanopt_' + name + '.csv' print("*** Dumping log to %s ***" % (logFile, )) flog = open(logFile, 'w') try: for trial in xrange(numTrials): robot.setConfig(configs[0]) res = planopt.planOptimizedTrajectory( world, robot, configs[1], kinematicsCache=kinematicscache, logFile=flog, **args) if res is not None: loader.save( res, 'auto', 'output/robotplanopt_' + name + "_" + str(trial) + ".path") except Exception as e: import traceback traceback.print_exc() print("Exception encountered during dotest", name, ":", e) res = None print("*** Log dumped to %s ***" % (logFile, )) flog.close() return res
def save(obj, fn): if hasattr(obj, 'saveFile'): return obj.saveFile(fn) if hasattr(obj, 'save'): return obj.save(fn) type = loader.filenameToType(fn) return loader.save(obj, type, fn)
def calculation(): """ this is the main function of calibration calculation color. """ joint_positions = [] data_file = open(calidata_path + 'calibration_joint_positions.txt', 'r') for line in data_file: line = line.rstrip() l = [num for num in line.split(' ')] l2 = [float(num) for num in l] joint_positions.append(l2) data_file.close() #the marker transforms here are actually just points not transformations marker_transforms = [] data_file = open(calidata_path + 'calibration_marker_transforms.txt', 'r') for line in data_file: line = line.rstrip() l = [num for num in line.split(' ')] l2 = [float(num) for num in l] marker_transforms.append(l2) data_file.close() T_marker_assumed = loader.load( 'RigidTransform', calidata_path + 'assumed_marker_world_transform.txt') T_marker_assumed = (so3.inv(T_marker_assumed[0]), T_marker_assumed[1]) world = WorldModel() res = world.readFile("robot_model_data/ur5Blocks.xml") if not res: raise RuntimeError("unable to load model") #vis.add("world",world) robot = world.robot(0) link = robot.link(7) q0 = robot.getConfig() Tcameras = [] Tlinks = [] for i in range(len(joint_positions)): q = robot.getConfig() q[1:7] = joint_positions[i][0:6] robot.setConfig(q) Tlinks.append(link.getTransform()) #vis.add("ghost"+str(i),q) #vis.setColor("ghost"+str(i),0,1,0,0.5) robot.setConfig(q0) #vis.add("MARKER",se3.mul(link.getTransform(),T_marker_assumed)) #vis.setAttribute("MARKER","width",2) for loop in range(NLOOPS if ESTIMATE_MARKER_TRANSFORM else 1): ###Calculate Tcamera in EE (only needed for transform fit..) ###Tcameras = [se3.mul(se3.inv(Tl),se3.mul(T_marker_assumed,se3.inv(Tm_c))) for (Tl,Tm_c) in zip(Tlinks,marker_transforms)] #actually using only point fit here.... if METHOD == 'point fit': Tcamera2 = point_fit_transform( [Tm_c for Tm_c in marker_transforms], [se3.mul(se3.inv(Tl), T_marker_assumed)[1] for Tl in Tlinks]) Tcamera2 = [so3.from_matrix(Tcamera2[0]), Tcamera2[1]] Tcamera = Tcamera2 #else: # Tcamera = transform_average(Tcameras) #with the Tcamera from the current iteration, calculate marker points in world Tmarkers_world = [ se3.apply(se3.mul(Tl, Tcamera), Tm_c) for (Tl, Tm_c) in zip(Tlinks, marker_transforms) ] #Tmarkers_link = [se3.mul(se3.inv(Tl),Tm) for Tl,Tm in zip(Tlinks,Tmarkers)] if ESTIMATE_MARKER_TRANSFORM: T_marker_assumed_inv = point_fit_transform( [[0, 0, 0] for Tm_c in marker_transforms], [ se3.apply(se3.mul(Tl, Tcamera2), Tm_c) for (Tl, Tm_c) in zip(Tlinks, marker_transforms) ]) T_marker_assumed_inv = [ so3.from_matrix(T_marker_assumed_inv[0]), T_marker_assumed_inv[1] ] #print T_marker_assumed_inv T_marker_assumed = T_marker_assumed_inv #print "New assumed marker position",T_marker_assumed else: pass #print "Would want new marker transform",transform_average(Tmarkers_world) #print "New estimated camera position",Tcamera SSE_t = 0 for (Tm_c, Tl) in zip(marker_transforms, Tlinks): Tm_c_est = se3.mul(se3.mul(se3.inv(Tcamera), se3.inv(Tl)), T_marker_assumed) SSE_t += vectorops.distanceSquared(Tm_c, Tm_c_est[1]) #print "RMSE rotation (radians)",np.sqrt(SSE_R/len(Tlinks)) print "RMSE translations (meters)", np.sqrt(SSE_t / len(Tlinks)) #Tcameras = [se3.mul(se3.inv(Tl),se3.mul(T_marker_assumed,se3.inv(Tm_c))) for (Tl,Tm_c) in zip(Tlinks,marker_transforms)] #Tmarkers = [se3.mul(Tl,se3.mul(Tcamera,Tm_c)) for (Tl,Tm_c) in zip(Tlinks,marker_transforms)] print "Saving to calibrated_camera_xform.txt" cali_txt_path = calidata_path + "calibrated_camera_xform.txt" loader.save(Tcamera, "RigidTransform", cali_txt_path) if ESTIMATE_MARKER_TRANSFORM: print "Saving to calibrated_marker_link_xform.txt" loader.save(T_marker_assumed, "RigidTransform", calidata_path + "calibrated_marker_world_xform.txt") #SSE_R = 0 SSE_t = 0 for (Tm_c, Tl) in zip(marker_transforms, Tlinks): Tm_c_est = se3.mul(se3.mul(se3.inv(Tcamera), se3.inv(Tl)), T_marker_assumed) SSE_t += vectorops.distanceSquared(Tm_c, Tm_c_est[1]) print "RMSE translations (meters)", np.sqrt(SSE_t / len(Tlinks))
def gen_grasp_images(max_variations=10): """Generates grasp training images for Problem 1b""" global camera_viewpoints if len(camera_viewpoints) == 0: #This code pops up the viewpoint editor edited = False for base in base_files: camera_viewpoints[base] = [] camera_fn_template = os.path.join( "resources", os.path.splitext(base)[0] + '_camera_%02d.xform') index = 0 while True: camera_fn = camera_fn_template % (index, ) if not os.path.exists(camera_fn): break xform = loader.load('RigidTransform', camera_fn) camera_viewpoints[base].append(xform) index += 1 if len(camera_viewpoints[base]) > 0: #TODO: if you want to edit the camera transforms, comment this line out and replace it with "pass" continue #pass edited = True for i, xform in enumerate(camera_viewpoints[base]): print("Camera transform", base, i) sensor_xform = edit_camera_xform( base, xform, title="Camera transform {} {}".format(base, i)) camera_viewpoints[base][i] = sensor_xform camera_fn = camera_fn_template % (i, ) loader.save(sensor_xform, 'RigidTransform', camera_fn) while True: if len(camera_viewpoints[base]) > 0: print("Do you want to add another? (y/n)") choice = input() else: choice = 'y' if choice.strip() == 'y': sensor_xform = edit_camera_xform( base, title="New camera transform {}".format(base)) camera_viewpoints[base].append(sensor_xform) camera_fn = camera_fn_template % ( len(camera_viewpoints[base]) - 1, ) loader.save(sensor_xform, 'RigidTransform', camera_fn) else: break if edited: print("Quitting; run me again to render images") return #Here's where the main loop begins try: os.mkdir('image_dataset') except Exception: pass try: os.remove("image_dataset/metadata.csv") except Exception: pass with open("image_dataset/metadata.csv", 'w') as f: f.write( "world,view,view_transform,color_fn,depth_fn,grasp_fn,variation\n") total_image_count = 0 for fn in glob.glob("generated_worlds/world_*.xml"): ofs = fn.find('.xml') index = fn[ofs - 4:ofs] world = WorldModel() world.readFile(fn) wtype = autodetect_world_type(world) if wtype == 'unknown': print("WARNING: DONT KNOW WORLD TYPE OF", fn) world_camera_viewpoints = camera_viewpoints[wtype + '.xml'] assert len(world_camera_viewpoints) > 0 #TODO: change appearances for i in range(world.numRigidObjects()): world.rigidObject(i).appearance().setSilhouette(0) for i in range(world.numTerrains()): world.terrain(i).appearance().setSilhouette(0) world.readFile('camera.rob') robot = world.robot(0) sensor = robot.sensor(0) vis.add("world", world) counters = [0, 0, total_image_count] def loop_through_sensors( world=world, sensor=sensor, world_camera_viewpoints=world_camera_viewpoints, index=index, counters=counters): viewno = counters[0] variation = counters[1] total_count = counters[2] print("Generating data for sensor view", viewno, "variation", variation) sensor_xform = world_camera_viewpoints[viewno] #TODO: problem 1.B: perturb camera and change colors # Generate random axis, random angle, rotate about angle for orientation perturbation # Generate random axis, random dist, translate distance over axis for position perturbation rand_axis = np.random.rand(3) rand_axis = vectorops.unit(rand_axis) multiplier = np.random.choice([True, False], 1) rand_angle = (np.random.rand(1)[0] * 10 + 10) * (-1 if multiplier else 1) # print(rand_angle) R = so3.from_axis_angle((rand_axis, np.radians(rand_angle))) rand_axis = vectorops.unit(np.random.random(3)) rand_dist = np.random.random(1)[0] * .10 + .10 # print(rand_dist) t = vectorops.mul(rand_axis, rand_dist) sensor_xform = se3.mul((R, t), sensor_xform) sensing.set_sensor_xform(sensor, sensor_xform) rgb_filename = "image_dataset/color_%04d_var%04d.png" % ( total_count, variation) depth_filename = "image_dataset/depth_%04d_var%04d.png" % ( total_count, variation) grasp_filename = "image_dataset/grasp_%04d_var%04d.png" % ( total_count, variation) #Generate sensor images sensor.kinematicReset() sensor.kinematicSimulate(world, 0.01) print("Done with kinematic simulate") rgb, depth = sensing.camera_to_images(sensor) print("Saving to", rgb_filename, depth_filename) Image.fromarray(rgb).save(rgb_filename) depth_scale = 8000 depth_quantized = (depth * depth_scale).astype(np.uint32) Image.fromarray(depth_quantized).save(depth_filename) with open("image_dataset/metadata.csv", 'a') as f: line = "{},{},{},{},{},{},{}\n".format( index, viewno, loader.write(sensor_xform, 'RigidTransform'), os.path.basename(rgb_filename), os.path.basename(depth_filename), os.path.basename(grasp_filename), variation) f.write(line) #calculate grasp map and save it grasp_map = make_grasp_map(world, total_count) grasp_map_quantized = (grasp_map * 255).astype(np.uint8) channels = ['score', 'opening', 'axis_heading', 'axis_elevation'] for i, c in enumerate(channels): base, ext = os.path.splitext(grasp_filename) fn = base + '_' + c + ext Image.fromarray(grasp_map_quantized[:, :, i]).save(fn) #loop through variations and views counters[1] += 1 if counters[1] >= max_variations: counters[1] = 0 counters[0] += 1 if counters[0] >= len(world_camera_viewpoints): vis.show(False) counters[2] += 1 print("Running render loop") vis.loop(callback=loop_through_sensors) total_image_count = counters[2]
def testsave(obj, type, fn): t0 = time.time() res = loader.save(obj, type, fn) t1 = time.time() print("Time to save", fn, ":", t1 - t0) return res