def multiwindow_template(world): """Tests multiple windows and views.""" vis.add("world",world) vp = vis.getViewport() vp.w,vp.h = 800,800 vis.setViewport(vp) vis.setWindowTitle("vis.spin test: will close in 5 seconds...") vis.spin(5.0) #Now testing ability to re-launch windows vis.setWindowTitle("Shown again. Close me to proceed.") vis.spin(float('inf')) vis.setWindowTitle("Dialog test. Close me to proceed.") vp = vis.getViewport() vp.w,vp.h = 400,600 vis.setViewport(vp) vis.dialog() vp.w,vp.h = 640,480 vis.setViewport(vp) for i in range(3): widgets = GLWidgetPlugin() widgets.addWidget(RobotPoser(world.robot(0))) vis.addPlugin(widgets) vis.setWindowTitle("Split screen test") vis.spin(float('inf')) vis.setPlugin(None) vis.setWindowTitle("Back to normal. Close me to quit.") vis.dialog() vis.kill()
def show_friction_cone(folder="FrictionCones/StanfordMicroSpineUnit"): V = Volume() V.load(folder) for C in V.convex_decomposition: print "Convex decomposition" print np.amin(C.points,axis=0) print np.amax(C.points,axis=0) Chull = np.vstack([V.convex_hull.points[u] for u in V.convex_hull.vertices]) #print Chull Chull *= 0.05 for i,C in enumerate(V.convex_decomposition): if len(V.convex_decomposition) == 1: g = 0 else: g = float(i)/(len(V.convex_decomposition)-1) #for j,v in enumerate(C.vertices): # vis.add("pt%d,%d"%(i,j),vectorops.mul(C.points[v,:].tolist(),0.05)) # vis.setColor("pt%d,%d"%(i,j),1,g,0) # vis.hideLabel("pt%d,%d"%(i,j)) for j,pt in enumerate(C.points): v = j vis.add("pt%d,%d"%(i,j),vectorops.mul(C.points[v,:].tolist(),0.05)) vis.setColor("pt%d,%d"%(i,j),1,g,0) vis.hideLabel("pt%d,%d"%(i,j)) for i in xrange(Chull.shape[0]): vis.add("CH%d"%(i,),Chull[i,:].tolist()) vis.hideLabel("CH%d"%(i,)) vis.add("origin",(0,0,0)) vis.setColor("origin",0,1,0) vis.dialog()
def multiwindow_template(world): """Tests multiple windows and views.""" vis.add("world", world) vp = vis.getViewport() vp.w, vp.h = 400, 600 vis.setViewport(vp) vis.addText("label1", "This is Window 1", (20, 20)) vis.setWindowTitle("Window 1") vis.show() id1 = vis.getWindow() print("First window ID:", id1) id2 = vis.createWindow("Window 2") vis.add("Lone point", [0, 0, 0]) vis.setViewport(vp) vis.addText("label1", "This is Window 2", (20, 20)) print("Second window ID:", vis.getWindow()) vis.setWindow(id2) vis.spin(float('inf')) #restore back to 1 window, clear the text vis.setWindow(id1) vis.clearText() vp = vis.getViewport() vp.w, vp.h = 800, 800 vis.setViewport(vp) vis.setWindowTitle("vis.spin test: will close in 5 seconds...") vis.spin(5.0) #Now testing ability to re-launch windows vis.setWindowTitle("Shown again. Close me to proceed.") vis.spin(float('inf')) vis.setWindowTitle("Dialog test. Close me to proceed.") vp = vis.getViewport() vp.w, vp.h = 400, 600 vis.setViewport(vp) vis.dialog() vp.w, vp.h = 640, 480 vis.setViewport(vp) for i in range(3): widgets = GLWidgetPlugin() widgets.addWidget(RobotPoser(world.robot(0))) vis.addPlugin(widgets) vis.setWindowTitle("Split screen test") vis.spin(float('inf')) vis.setPlugin(None) vis.setWindowTitle("Back to normal. Close me to quit.") vis.dialog() vis.kill()
def edit_config(): global world, robot resource.setDirectory("resources/") qhands = [r.getConfig() for r in hand_subrobots.values()] q = resource.get("robosimian_body_stability.config", default=robot.getConfig(), doedit=False) robot.setConfig(q) for r, qhand in zip(hand_subrobots.values(), qhands): r.setConfig(qhand) vis.add("world", world) vis.edit(("world", robot.getName()), True) vis.dialog() vis.edit(("world", robot.getName()), False) resource.set("robosimian_body_stability.config", robot.getConfig())
def vis_interaction_test(world): """Run some tests of visualization module interacting with the resource module""" print("Showing robot in modal dialog box") vis.add("robot", world.robot(0)) vis.add("ee", world.robot(0).link(11).getTransform()) vis.dialog() config = resource.get("resourcetest1.config", description="Should show up without a hitch...", doedit=True, editor='visual', world=world) import time if MULTITHREADED: print( "Showing threaded visualization (this will fail on GLUT or Mac OS)" ) vis.show() for i in range(3): vis.lock() q = world.robot(0).getConfig() q[9] = 3.0 world.robot(0).setConfig(q) vis.unlock() time.sleep(1.0) if not vis.shown(): break vis.lock() q = world.robot(0).getConfig() q[9] = -1.0 world.robot(0).setConfig(q) vis.unlock() time.sleep(1.0) if not vis.shown(): break print("Hiding visualization window") vis.show(False) else: print("Showing single-threaded visualization for 5s") vis.spin(5.0) config = resource.get("resourcetest1.config", description="Should show up without a hitch...", doedit=True, editor='visual', world=world)
def debug_stable_faces(obj, faces): from klampt import vis, Geometry3D, GeometricPrimitive from klampt.math import se3 import random vis.createWindow() obj.setTransform(*se3.identity()) vis.add("object", obj) for i, f in enumerate(faces): gf = GeometricPrimitive() gf.setPolygon(np.stack(f).flatten()) color = (1, 0.5 + 0.5 * random.random(), 0.5 + 0.5 * random.random(), 0.5) vis.add("face{}".format(i), Geometry3D(gf), color=color, hide_label=True) vis.setWindowTitle("Stable faces") vis.dialog()
#add the world elements individually to the visualization vis.add("robot", robot) for i in range(1, world.numRobots()): vis.add("robot" + str(i), world.robot(i)) for i in range(world.numRigidObjects()): vis.add("rigidObject" + str(i), world.rigidObject(i)) for i in range(world.numTerrains()): vis.add("terrain" + str(i), world.terrain(i)) #if you want to just see the robot in a pop up window... if DO_SIMPLIFY and DEBUG_SIMPLIFY: print("#########################################") print("Showing the simplified robot") print("#########################################") vis.setWindowTitle("Simplified robot") vis.dialog() #Automatic construction of space if not CLOSED_LOOP_TEST: if not MANUAL_SPACE_CREATION: space = robotplanning.makeSpace(world=world, robot=robot, edgeCheckResolution=1e-3, movingSubset='all') else: #Manual construction of space collider = WorldCollider(world) space = robotcspace.RobotCSpace(robot, collider) space.eps = 1e-3 space.setup() else:
def calibrate_xform_camera(camera_link_transforms, marker_link_transforms, marker_observations, marker_ids, observation_relative_errors=None, camera_initial_guess=None, marker_initial_guess=None, regularizationFactor=0, maxIters=100, tolerance=1e-7): """Single camera calibration function for a camera and markers on some set of rigid bodies. Given body transforms and a list of estimated calibration marker observations in the camera frame, estimates both the camera transform relative to the camera link as well as the marker transforms relative to their links. M: is the set of m markers. By default there is at most one marker per link. Markers can either be point markers (e.g., a mocap ball), or transform markers (e.g., an AR tag or checkerboard pattern). O: is the set of n observations, consisting of a reading (Tc_i,Tm_i,o_i,l_i) where Tc_i is the camera link's transform, Tm_i is the marker link's transform, o_i is the reading which consists of either a point or transform estimate in the camera frame, and l_i is the ID of the marker (by default, just its link) Output: a tuple (err,Tc,marker_dict) where err is the norm of the reconstruction residual, Tc is the estimated camera transform relative to the camera's link, and marker_dict is a dict mapping each marker id to its estimated position or transform on the marker's link. Arguments: - camera_link: an integer index or a RobotModelLink instance on which the camera lies. - calibration_configs: a list of the RobotModel configurations q_1,...,q_n that generated the marker_observations list. - marker_observations: a list of estimated positions or transformations of calibration markers o_1,...,o_n, given in the camera's reference frame (z forward, x right, y down). If o_i is a 3-vector, the marker is considered to be a point marker. If a se3 element (R,t) is given, the marker is considered to be a transform marker. You may not mix point and transform observations for a single marker ID. - marker_ids: a list of marker ID #'s l_1,...,l_n corresponding to each observation, e.g., the link index on which each marker lies. - observation_relative_errors: if you have an idea of the magnitude of each observation error, it can be placed into this list. Must be a list of n floats, 3-lists (point markers), or 6-lists (transform markers). By default, errors will be set proportionally to the observed distance between the camera and marker. - camera_initial_guess: if not None, an initial guess for the camera transform - marker_initial_guess: if not None, a dictionary containing initial guesses for the marker transforms - regularizationFactor: if nonzero, the optimization penalizes deviation of the estimated camera transform and marker transforms from zero proportionally to this factor. - maxIters: maximum number of iterations for optimization. - tolerance: optimization convergence tolerance. Stops when the change of estimates falls below this threshold """ if len(camera_link_transforms) != len(marker_ids): raise ValueError("Must provide the same number of marker IDs as camera transforms") if len(marker_link_transforms) != len(marker_ids): raise ValueError("Must provide the same number of marker IDs as marker transforms") if len(marker_observations) != len(marker_ids): raise ValueError("Must provide the same number of marker observations as marker transforms") #get all unique marker ids marker_id_list = list(set(marker_ids)) #detect marker types marker_types = dict((v,None) for v in marker_id_list) for i,(obs,id) in enumerate(zip(marker_observations,marker_ids)): if len(obs)==3: if marker_types[id] == 't': raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id))) marker_types[id] = 'p' elif len(obs)==2 and len(obs[0])==9 and len(obs[1])==3: if marker_types[id] == 'p': raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id))) marker_types[id] = 't' else: raise ValueError("Invalid observation for observation #%d, id %s\n"%(i,str(id))) n = len(marker_observations) m = len(marker_id_list) #get all the observation weights observation_weights = [] if observation_relative_errors is None: #default weights: proportional to distance for obs in marker_observations: if len(obs) == 3: observation_weights.append(1.0/vectorops.norm(obs)) else: observation_weights.append(1.0/vectorops.norm(obs[1])) observation_weights = [1.0]*len(observation_weights) else: if len(observation_relative_errors) != n: raise ValueError("Invalid length of observation errors") for err in observation_relative_errors: if hasattr(err,'__iter__'): observation_weights.append([1.0/v for v in err]) else: observation_weights.append(1.0/err) #initial guesses if camera_initial_guess == None: camera_initial_guess = se3.identity() if any(v == 't' for v in marker_types.itervalues()): #estimate camera rotation from point estimates because rotations are more prone to initialization failures point_observations = [] marker_point_rel = [] for i,obs in enumerate(marker_observations): if len(obs)==2: point_observations.append(obs[1]) else: point_observations.append(obs) marker_point_rel.append(se3.mul(se3.inv(camera_link_transforms[i]),marker_link_transforms[i])[1]) camera_initial_guess = (point_fit_rotation_3d(point_observations,marker_point_rel),[0.0]*3) print "Estimated camera rotation from points:",camera_initial_guess if marker_initial_guess == None: marker_initial_guess = dict((l,(se3.identity() if marker_types[l]=='t' else [0.0]*3)) for l in marker_id_list) else: marker_initial_guess = marker_initial_guess.copy() for l in marker_id_list: if l not in marker_initial_guess: marker_initial_guess[l] = (se3.identity() if marker_types[l]=='t' else [0.0]*3) camera_transform = camera_initial_guess marker_transforms = marker_initial_guess.copy() if DO_VISUALIZATION: rgroup = coordinates.addGroup("calibration") rgroup.addFrame("camera link",worldCoordinates=camera_link_transforms[-1]) rgroup.addFrame("marker link",worldCoordinates=marker_link_transforms[-1]) rgroup.addFrame("camera estimate",parent="camera link",relativeCoordinates=camera_transform) rgroup.addFrame("marker estimate",parent="marker link",relativeCoordinates=marker_transforms.values()[0]) for i,obs in enumerate(marker_observations): rgroup.addFrame("obs"+str(i)+" estimate",parent="camera estimate",relativeCoordinates=obs) vis.add("coordinates",rgroup) vis.dialog() point_observations_only = all(marker_types[marker] == 'p' for marker in marker_id_list) xform_observations_only = all(marker_types[marker] == 't' for marker in marker_id_list) if not point_observations_only and not xform_observations_only: raise NotImplementedError("Can't calibrate camera from mixed point/transform markers yet") for iters in range(maxIters): #attempt to minimize the error on the following over all observations i #camera_link_transform(q_i)*camera_transform*observation_i = marker_link_transform(l_i,q_i)*marker_transform(l_i) #first, we'll assume the camera transform is fixed and then optimize the marker transforms. #then, we'll assume the marker transforms are fixed and then optimize the camera transform. #finally we'll check the error to detect convergence #1. Estimate marker transforms from current camera transform new_marker_transforms = dict((l,(TransformStats(zero=marker_initial_guess[l],prior=regularizationFactor) if marker_types[l]=='t' else VectorStats(value,zero=[0.0]*3,prior=RegularizationFactor))) for l in marker_id_list) for i in xrange(n): marker = marker_ids[i] Tclink = camera_link_transforms[i] Tmlink = marker_link_transforms[i] obs = marker_observations[i] Trel = se3.mul(se3.inv(Tmlink),se3.mul(Tclink,camera_transform)) if marker_types[marker] == 't': estimate = se3.mul(Trel,obs) else: estimate = se3.apply(Trel,obs) new_marker_transforms[marker].add(estimate,observation_weights[i]) print "ITERATION",iters #print " ESTIMATED MARKER TRANSFORMS:",dict((k,v.average) for (k,v) in new_marker_transforms.iteritems()) #2. Estimate camera transform from current marker transforms new_camera_transform = TransformStats(zero=camera_initial_guess,prior=regularizationFactor) if point_observations_only: #TODO: weighted point fitting relative_points = [] for i in xrange(n): marker = marker_ids[i] Tclink = camera_link_transforms[i] Tmlink = marker_link_transforms[i] obs = marker_observations[i] pRel = se3.apply(se3.inv(Tclink),se3.apply(Tmlink,new_marker_transforms[marker].average)) relative_points.append(pRel) new_camera_transform.add(point_fit_xform_3d(marker_observations,relative_points),sum(observation_weights)) else: for i in xrange(n): marker = marker_ids[i] Tclink = camera_link_transforms[i] Tmlink = marker_link_transforms[i] obs = marker_observations[i] Trel = se3.mul(se3.inv(Tclink),se3.mul(Tmlink,new_marker_transforms[marker].average)) estimate = se3.mul(Trel,se3.inv(obs)) new_camera_transform.add(estimate,observation_weights[i]) #print " ESTIMATED CAMERA TRANSFORMS:",new_camera_transform.average #3. compute difference between last and current estimates diff = 0.0 diff += vectorops.normSquared(se3.error(camera_transform,new_camera_transform.average)) for marker in marker_id_list: if marker_types[marker]=='t': diff += vectorops.normSquared(se3.error(marker_transforms[marker],new_marker_transforms[marker].average)) else: diff += vectorops.distanceSquared(marker_transforms[marker],new_marker_transforms[marker].average) camera_transform = new_camera_transform.average for marker in marker_id_list: marker_transforms[marker] = new_marker_transforms[marker].average if math.sqrt(diff) < tolerance: #converged! print "Converged with diff %g on iteration %d"%(math.sqrt(diff),iters) break print " ESTIMATE CHANGE:",math.sqrt(diff) error = 0.0 for i in xrange(n): marker = marker_ids[i] Tclink = camera_link_transforms[i] Tmlink = marker_link_transforms[i] obs = marker_observations[i] Tc = se3.mul(Tclink,camera_transform) if marker_types[marker] == 't': Tm = se3.mul(Tmlink,marker_transforms[marker]) error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm)) else: Tm = se3.apply(Tmlink,marker_transforms[marker]) error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm) print " OBSERVATION ERROR:",math.sqrt(error) #raw_input() if DO_VISUALIZATION: rgroup.setFrameCoordinates("camera estimate",camera_transform) rgroup.setFrameCoordinates("marker estimate",marker_transforms.values()[0]) for i,obs in enumerate(marker_observations): rgroup.setFrameCoordinates("obs"+str(i)+" estimate",obs) vis.add("coordinates",rgroup) vis.dialog() if math.sqrt(diff) >= tolerance: print "Max iters reached" error = 0.0 for i in xrange(n): marker = marker_ids[i] Tclink = camera_link_transforms[i] Tmlink = marker_link_transforms[i] obs = marker_observations[i] Tc = se3.mul(Tclink,camera_transform) if marker_types[marker] == 't': Tm = se3.mul(Tmlink,marker_transforms[marker]) error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm)) else: Tm = se3.apply(Tmlink,marker_transforms[marker]) error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm) return (math.sqrt(error),camera_transform,marker_transforms)