def projectDrillDemoInCamera(): q = om.findObjectByName('camera overlay') om.removeFromObjectModel(q) imageView = cameraview.views['CAMERA_LEFT'] imageView.imageActor.SetOpacity(.2) drawFrameInCamera(drillDemo.drill.frame.transform, 'drill frame',visible=False) tf = transformUtils.copyFrame( drillDemo.drill.frame.transform ) tf.PreMultiply() tf.Concatenate( drillDemo.drill.drillToButtonTransform ) drawFrameInCamera(tf, 'drill button') tf2 = transformUtils.copyFrame( tf ) tf2.PreMultiply() tf2.Concatenate( transformUtils.frameFromPositionAndRPY( [0,0,0] , [180,0,0] ) ) drawFrameInCamera(tf2, 'drill button flip') drawObjectInCamera('drill',visible=False) drawObjectInCamera('sensed pointer tip') obj = om.findObjectByName('sensed pointer tip frame') if (obj is not None): drawFrameInCamera(obj.transform, 'sensed pointer tip frame',visible=False) #drawObjectInCamera('left robotiq',visible=False) #drawObjectInCamera('right pointer',visible=False) v = imageView.view v.render()
def toggleFootstepWidget(displayPoint, view, useHorizontalWidget=False): obj, _ = vis.findPickedObject(displayPoint, view) if not obj: return False name = obj.getProperty('Name') if name in ('footstep widget', 'footstep widget frame'): om.removeFromObjectModel(om.findObjectByName('footstep widget')) return True match = re.match('^step (\d+)$', name) if not match: return False stepIndex = int(match.group(1)) existingWidget = om.findObjectByName('footstep widget') if existingWidget: previousStep = existingWidget.stepIndex print 'have existing widget for step:', stepIndex om.removeFromObjectModel(existingWidget) if previousStep == stepIndex: print 'returning because widget was for selected step' return True footMesh = shallowCopy(obj.polyData) footFrame = transformUtils.copyFrame(obj.getChildFrame().transform) if useHorizontalWidget: rpy = [0.0, 0.0, transformUtils.rollPitchYawFromTransform(footFrame)[2]] footFrame = transformUtils.frameFromPositionAndRPY(footFrame.GetPosition(), np.degrees(rpy)) footObj = vis.showPolyData(footMesh, 'footstep widget', parent='planning', alpha=0.2) footObj.stepIndex = stepIndex frameObj = vis.showFrame(footFrame, 'footstep widget frame', parent=footObj, scale=0.2) footObj.actor.SetUserTransform(frameObj.transform) footObj.setProperty('Color', obj.getProperty('Color')) frameObj.setProperty('Edit', True) rep = frameObj.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) frameObj.widget.HandleRotationEnabledOff() walkGoal = om.findObjectByName('walking goal') if walkGoal: walkGoal.setProperty('Edit', False) def onFootWidgetChanged(frame): footstepsDriver.onStepModified(stepIndex - 1, frame) frameObj.connectFrameModified(onFootWidgetChanged) return True
def vtkICP(self): object_name = str(self.ui.alignObject.text) if object_name == "" or not om.findObjectByName(object_name): print "object: " + object_name + " not found" return model = vtk.vtkPolyData() om.findObjectByName(object_name + ".urdf").model.getModelMesh(model) scene = om.findObjectByName(object_name).polyData icp = vtk.vtkIterativeClosestPointTransform() icp.SetMaximumNumberOfIterations(100) #need to shift centroid to center of abject after clicking on it icp.StartByMatchingCentroidsOn() icp.SetSource(model) icp.SetTarget(scene) icp.GetLandmarkTransform().SetModeToRigidBody() icp.Modified() icp.Update() t = vtk.vtkTransformPolyDataFilter() t.SetInput(model) t.SetTransform(icp) t.Update() transformedObject = t.GetOutput() print transformedObject vis.showPolyData(transformedObject, object_name + "_transform", color=[0, 1, 0], parent=self.getTransformedObjectsFolder()) self.packageAlignmentResult(object_name, icp.GetMatrix())
def __init__(self, robotSystem, view): self.robotStateModel = robotSystem.robotStateModel self.robotStateJointController = robotSystem.robotStateJointController self.robotSystem = robotSystem self.lFootFtFrameId = self.robotStateModel.model.findLinkID( self.robotSystem.ikPlanner.leftFootLink) self.rFootFtFrameId = self.robotStateModel.model.findLinkID( self.robotSystem.ikPlanner.rightFootLink) self.leftInContact = 0 self.rightInContact = 0 self.view = view self.ddDrakeWrapper = PythonQt.dd.ddDrakeWrapper() self.warningButton = QtGui.QLabel('COP Warning') self.warningButton.setStyleSheet("background-color:white") self.dialogVisible = False d = DebugData() vis.updatePolyData(d.getPolyData(), 'measured cop', view=self.view, parent='robot state model') om.findObjectByName('measured cop').setProperty('Visible', False) self.robotStateModel.connectModelChanged(self.update)
def addGraspFrameFromProperty(self, name=None, use_grasped=False, offset=(0, 0, 0)): if name is None: name = self.params.getPropertyEnumValue(self.place_target_name) position = self.params.getProperty(name) xyz = [position[i] + offset[i] for i in xrange(0, 3)] rpy = position[3:6] dims = self._default_target_dimensions if use_grasped: obj = om.findObjectByName(self.planner.getAffordanceName()) dims = obj.getProperty('Dimensions') # TODO(sam.creasey) this is wrong we should get the # rotation from somewhere. #dims = [dims[1], dims[2], dims[0]] dims = [dims[0], dims[2], dims[1]] object_to_world = obj.getChildFrame().transform grasp_to_world = om.findObjectByName( 'grasp to world%s' % self.planner.getGraspFrameSuffix()).transform print "object to world", object_to_world.GetPosition( ), object_to_world.GetOrientation() print "grasp to world", grasp_to_world.GetPosition( ), grasp_to_world.GetOrientation() iiwaplanning.setBoxGraspTarget(xyz, rpy, dims) self.planner.setAffordanceName('box') (x_offset, _, _) = getOffsets(dims) print "dimensions", dims, "x_offset", x_offset self.planner.addBoxGraspFrames( graspOffset=([-x_offset, 0., dims[2] / 2.], [0, 0, 0]))
def makeGraspFrames(obj, graspOffset, pregraspOffset=(-0.08, 0, 0), suffix=''): pos, rpy = graspOffset objectToWorld = obj.getChildFrame().transform graspToObject = transformUtils.frameFromPositionAndRPY(pos, rpy) preGraspToGrasp = transformUtils.frameFromPositionAndRPY( pregraspOffset, [0, 0, 0]) graspToWorld = transformUtils.concatenateTransforms( [graspToObject, objectToWorld]) preGraspToWorld = transformUtils.concatenateTransforms( [preGraspToGrasp, graspToWorld]) graspFrameName = 'grasp to world%s' % suffix pregraspFrameName = 'pregrasp to world%s' % suffix om.removeFromObjectModel(om.findObjectByName(graspFrameName)) om.removeFromObjectModel(om.findObjectByName(pregraspFrameName)) graspFrame = vis.showFrame(graspToWorld, graspFrameName, scale=0.1, parent=obj, visible=False) preGraspFrame = vis.showFrame(preGraspToWorld, pregraspFrameName, scale=0.1, parent=obj, visible=False) obj.getChildFrame().getFrameSync().addFrame(graspFrame, ignoreIncoming=True) graspFrame.getFrameSync().addFrame(preGraspFrame, ignoreIncoming=True)
def updateIntersection(frame): origin = np.array(frame.transform.GetPosition()) rayLength = 5 for i in range(0, numRays): ray = rays[:, i] rayTransformed = np.array(frame.transform.TransformNormal(ray)) intersection = computeIntersection(locator, origin, origin + rayTransformed * rayLength) name = 'ray intersection ' + str(i) if intersection is not None: om.removeFromObjectModel(om.findObjectByName(name)) d = DebugData() d.addLine(origin, intersection) color = [1, 0, 0] # d.addSphere(intersection, radius=0.04) vis.updatePolyData(d.getPolyData(), name, color=color) else: om.removeFromObjectModel(om.findObjectByName(name)) d = DebugData() d.addLine(origin, origin + rayTransformed * rayLength) color = [0, 1, 0] # d.addSphere(intersection, radius=0.04) vis.updatePolyData(d.getPolyData(), name, color=color)
def colorizeMaps(enabled): if enabled: om.findObjectByName('Map Server').source.colorizeCallback = colorizeMapCallback for name in _colorizeMapNames: colorizeMapCallback(om.findObjectByName(name)) else: om.findObjectByName('Map Server').source.colorizeCallback = None
def onClearClicked(self): om.removeFromObjectModel(om.findObjectByName('walking goal')) om.removeFromObjectModel(om.findObjectByName('footstep widget')) om.removeFromObjectModel(om.findObjectByName('LCM GL')) self.clearFootstepPlan() if self.toolbarWidget: self.execButton.setEnabled(False)
def onExecClicked(self): self.commitFootstepPlan(self.lastFootstepPlan) om.removeFromObjectModel(om.findObjectByName('footstep widget')) walkGoal = om.findObjectByName('walking goal') if walkGoal: walkGoal.setProperty('Edit', False) self.execButton.setEnabled(False)
def toggleFootstepWidget(displayPoint, view, useHorizontalWidget=False): obj, _ = vis.findPickedObject(displayPoint, view) if not obj: return False name = obj.getProperty("Name") if name in ("footstep widget", "footstep widget frame"): om.removeFromObjectModel(om.findObjectByName("footstep widget")) return True match = re.match("^step (\d+)$", name) if not match: return False stepIndex = int(match.group(1)) existingWidget = om.findObjectByName("footstep widget") if existingWidget: previousStep = existingWidget.stepIndex om.removeFromObjectModel(existingWidget) if previousStep == stepIndex: return True footMesh = shallowCopy(obj.polyData) footFrame = transformUtils.copyFrame(obj.getChildFrame().transform) if useHorizontalWidget: rpy = [0.0, 0.0, transformUtils.rollPitchYawFromTransform(footFrame)[2]] footFrame = transformUtils.frameFromPositionAndRPY( footFrame.GetPosition(), np.degrees(rpy) ) footObj = vis.showPolyData( footMesh, "footstep widget", parent="planning", alpha=0.2 ) footObj.stepIndex = stepIndex frameObj = vis.showFrame( footFrame, "footstep widget frame", parent=footObj, scale=0.2 ) footObj.actor.SetUserTransform(frameObj.transform) footObj.setProperty("Color", obj.getProperty("Color")) frameObj.setProperty("Edit", True) rep = frameObj.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) frameObj.widget.HandleRotationEnabledOff() walkGoal = om.findObjectByName("walking goal") if walkGoal: walkGoal.setProperty("Edit", False) return True
def colorizeMaps(enabled): if enabled: om.findObjectByName( 'Map Server').source.colorizeCallback = colorizeMapCallback for name in _colorizeMapNames: colorizeMapCallback(om.findObjectByName(name)) else: om.findObjectByName('Map Server').source.colorizeCallback = None
def ungraspAffordance(self, affordanceName): try: del self.frameSyncs[affordanceName] del self.attachedAffordances[affordanceName] except KeyError: pass if not self.frameSyncs: om.removeFromObjectModel(om.findObjectByName('l_hand frame')) om.removeFromObjectModel(om.findObjectByName('r_hand frame'))
def ungraspAffordance(self, affordanceName): try: del self.frameSyncs[affordanceName] del self.attachedAffordances[affordanceName] except KeyError: pass if not self.frameSyncs: om.removeFromObjectModel(om.findObjectByName("l_hand frame")) om.removeFromObjectModel(om.findObjectByName("r_hand frame"))
def initialize(self): self.dt = self.options['dt'] self.controllerTypeOrder = [ 'defaultRandom', 'learnedRandom', 'learned', 'default' ] self.setDefaultOptions() self.Sensor = SensorObj(rayLength=self.options['Sensor']['rayLength'], numRays=self.options['Sensor']['numRays']) self.Controller = ControllerObj(self.Sensor) self.Car = CarPlant(controller=self.Controller, velocity=self.options['Car']['velocity']) self.Reward = Reward( self.Sensor, collisionThreshold=self.collisionThreshold, actionCost=self.options['Reward']['actionCost'], collisionPenalty=self.options['Reward']['collisionPenalty'], raycastCost=self.options['Reward']['raycastCost']) self.setSARSA() # create the things needed for simulation om.removeFromObjectModel(om.findObjectByName('world')) self.world = World.buildCircleWorld( percentObsDensity=self.options['World']['percentObsDensity'], circleRadius=self.options['World']['circleRadius'], nonRandom=self.options['World']['nonRandomWorld'], scale=self.options['World']['scale'], randomSeed=self.options['World']['randomSeed'], obstaclesInnerFraction=self.options['World'] ['obstaclesInnerFraction']) om.removeFromObjectModel(om.findObjectByName('robot')) self.robot, self.frame = World.buildRobot() self.locator = World.buildCellLocator(self.world.visObj.polyData) self.Sensor.setLocator(self.locator) self.frame = self.robot.getChildFrame() self.frame.setProperty('Scale', 3) self.frame.setProperty('Edit', True) self.frame.widget.HandleRotationEnabledOff() rep = self.frame.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) self.supervisedTrainingTime = self.options['runTime'][ 'supervisedTrainingTime'] self.learningRandomTime = self.options['runTime']['learningRandomTime'] self.learningEvalTime = self.options['runTime']['learningEvalTime'] self.defaultControllerTime = self.options['runTime'][ 'defaultControllerTime'] self.Car.setFrame(self.frame) print "Finished initialization"
def reset(self, robot, frame_name): self._tick_count = 0 robot.init_angle() self.clear_locator() om.removeFromObjectModel(om.findObjectByName("line1")) om.removeFromObjectModel(om.findObjectByName("line2")) self._play_count += 1 self.set_safe_position(robot) print("count:", self._play_count) self._update_moving_object(robot, frame_name) robot._ctrl.save()
def onMergeIntoPointCloud(): allPointClouds = om.findObjectByName("point clouds") if allPointClouds: allPointClouds = [ i.getProperty("Name") for i in allPointClouds.children() ] sel = QtGui.QInputDialog.getItem( None, "Point Cloud Merging", "Pick point cloud to merge into:", allPointClouds, current=0, editable=False, ) sel = om.findObjectByName(sel) # Make a copy of each in same frame polyDataInto = vtk.vtkPolyData() polyDataInto.ShallowCopy(sel.polyData) if sel.getChildFrame(): polyDataInto = segmentation.transformPolyData( polyDataInto, sel.getChildFrame().transform) polyDataFrom = vtk.vtkPolyData() polyDataFrom.DeepCopy(pointCloudObj.polyData) if pointCloudObj.getChildFrame(): polyDataFrom = segmentation.transformPolyData( polyDataFrom, pointCloudObj.getChildFrame().transform) # Actual merge append = filterUtils.appendPolyData([polyDataFrom, polyDataInto]) if sel.getChildFrame(): polyDataInto = segmentation.transformPolyData( polyDataInto, sel.getChildFrame().transform.GetInverse()) # resample append = segmentationroutines.applyVoxelGrid(append, 0.01) append = segmentation.addCoordArraysToPolyData(append) # Recenter the frame sel.setPolyData(append) t = vtk.vtkTransform() t.PostMultiply() t.Translate(filterUtils.computeCentroid(append)) segmentation.makeMovable(sel, t) # Hide the old one if pointCloudObj.getProperty("Name") in allPointClouds: pointCloudObj.setProperty("Visible", False)
def initialize(self): self.dt = self.options["dt"] self.controllerTypeOrder = ["default"] self.setDefaultOptions() self.Sensor = SensorObj( rayLength=self.options["Sensor"]["rayLength"], numRays=self.options["Sensor"]["numRays"] ) self.SensorApproximator = SensorApproximatorObj( numRays=self.options["Sensor"]["numRays"], circleRadius=self.options["World"]["circleRadius"] ) self.Controller = ControllerObj(self.Sensor, self.SensorApproximator) self.Car = CarPlant(controller=self.Controller, velocity=self.options["Car"]["velocity"]) self.Controller.initializeVelocity(self.Car.v) self.ActionSet = ActionSetObj() # create the things needed for simulation om.removeFromObjectModel(om.findObjectByName("world")) self.world = World.buildLineSegmentTestWorld( percentObsDensity=self.options["World"]["percentObsDensity"], circleRadius=self.options["World"]["circleRadius"], nonRandom=self.options["World"]["nonRandomWorld"], scale=self.options["World"]["scale"], randomSeed=self.options["World"]["randomSeed"], obstaclesInnerFraction=self.options["World"]["obstaclesInnerFraction"], ) om.removeFromObjectModel(om.findObjectByName("robot")) self.robot, self.frame = World.buildRobot() self.frame = self.robot.getChildFrame() self.frame.setProperty("Scale", 3) # self.frame.setProperty('Visible', False) # self.frame.setProperty('Edit', True) self.frame.widget.HandleRotationEnabledOff() rep = self.frame.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) self.defaultControllerTime = self.options["runTime"]["defaultControllerTime"] self.Car.setFrame(self.frame) print "Finished initialization"
def initialize(self): self.dt = self.options['dt'] self.controllerTypeOrder = ['default'] self.setDefaultOptions() self.Sensor = SensorObj(rayLength=self.options['Sensor']['rayLength'], numRays=self.options['Sensor']['numRays']) self.SensorManual = SensorObjManual(rayLength=self.options['Sensor']['rayLength'], numRays=self.options['Sensor']['numRays']) self.SensorApproximator = SensorApproximatorObj(numRays=self.options['Sensor']['numRays'], circleRadius=self.options['World']['circleRadius'], ) self.Controller = ControllerObj(self.Sensor, self.SensorApproximator) self.Car = CarPlant(controller=self.Controller, velocity=self.options['Car']['velocity']) self.Controller.initializeVelocity(self.Car.v) # create the things needed for simulation om.removeFromObjectModel(om.findObjectByName('world')) self.world = World.buildLineSegmentTestWorld(percentObsDensity=self.options['World']['percentObsDensity'], circleRadius=self.options['World']['circleRadius'], nonRandom=self.options['World']['nonRandomWorld'], scale=self.options['World']['scale'], randomSeed=self.options['World']['randomSeed'], obstaclesInnerFraction=self.options['World']['obstaclesInnerFraction']) om.removeFromObjectModel(om.findObjectByName('robot')) self.robot, self.frame = World.buildRobot() self.frame = self.robot.getChildFrame() self.frame.setProperty('Scale', 3) #self.frame.setProperty('Visible', False) #self.frame.setProperty('Edit', True) self.frame.widget.HandleRotationEnabledOff() rep = self.frame.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) self.defaultControllerTime = self.options['runTime']['defaultControllerTime'] self.Car.setFrame(self.frame) print "Finished initialization"
def _addPlanItem(plan, name, itemClass): assert plan is not None item = itemClass(name) item.plan = plan om.removeFromObjectModel(om.findObjectByName(name)) om.addToObjectModel(item, om.getOrCreateContainer('segmentation')) return item
def fitObjectToPointcloud(self, objectName, pointCloud=None, downsampleObject=True, objectPolyData=None, filename=None, visualize=True, algorithm="GoICP"): if objectPolyData is None: if filename is None: filename = utils.getObjectMeshFilename(objectName) objectPolyData = ioUtils.readPolyData(filename) # downsample the object poly data objectPolyDataForAlgorithm = objectPolyData if downsampleObject: objectPolyDataForAlgorithm = segmentation.applyVoxelGrid( objectPolyData, leafSize=0.002) if pointCloud is None: pointCloud = om.findObjectByName('reconstruction').polyData sceneToModelTransform = GlobalRegistration.runRegistration( algorithm, pointCloud, objectPolyDataForAlgorithm) objectToWorld = sceneToModelTransform.GetLinearInverse() self.objectToWorldTransform[objectName] = objectToWorld if visualize: alignedObj = vis.updatePolyData(objectPolyData, objectName + ' aligned') alignedObj.actor.SetUserTransform(objectToWorld) return objectToWorld
def onMouseMove(self, displayPoint, modifiers=None): om.removeFromObjectModel(om.findObjectByName('link selection')) self.linkName = None self.pickedPoint = None selection = self.getSelection(displayPoint) if selection is None: return pickedPoint, linkName, normal, pickedCellId = selection d = DebugData() d.addSphere(pickedPoint, radius=0.01) d.addLine(pickedPoint, np.array(pickedPoint) + 0.1 * np.array(normal), radius=0.005) obj = vis.updatePolyData(d.getPolyData(), 'link selection', color=[0,1,0]) obj.actor.SetPickable(False) self.linkName = linkName self.pickedPoint = pickedPoint self.normal = normal self.pickedCellId = pickedCellId modifiers = QtGui.QApplication.keyboardModifiers() if modifiers == QtCore.Qt.ControlModifier and self.cellCaptureMode: self.provisionallyCaptureCell(linkName, pickedCellId)
def testAlign(modelName, sceneName): removeFile('model_features.bin') removeFile('scene_features.bin') removeFile('features.bin') for objType, objName in [('model', modelName), ('scene', sceneName)]: print 'process', objName pd = om.findObjectByName(objName).polyData pd = segmentation.applyVoxelGrid(pd, leafSize=0.005) print 'compute normals...' pd = segmentation.normalEstimation(pd, searchRadius=0.05, useVoxelGrid=False, voxelGridLeafSize=0.01) print 'compute features...' computePointFeatureHistograms(pd, searchRadius=0.10) renameFeaturesFile(objType + '_features.bin') print 'run registration...' subprocess.check_call(['bash', 'runFGR.sh']) showTransformedData(sceneName) print 'done.'
def drawForce(self, name, linkName, forceLocation, force, color, key=None): forceDirection = force / np.linalg.norm(force) # om.removeFromObjectModel(om.findObjectByName(name)) linkToWorld = self.robotStateModel.getLinkFrame(linkName) forceLocationInWorld = np.array( linkToWorld.TransformPoint(forceLocation)) forceDirectionInWorld = np.array( linkToWorld.TransformDoubleVector(forceDirection)) # point = forceLocationInWorld - 0.1*forceDirectionInWorld # d = DebugData() # # d.addArrow(point, forceLocationInWorld, headRadius=0.025, tubeRadius=0.005, color=color) # d.addSphere(forceLocationInWorld, radius=0.01) # d.addLine(point, forceLocationInWorld, radius=0.005) transformForVis = transformUtils.getTransformFromOriginAndNormal( forceLocationInWorld, forceDirectionInWorld) obj = vis.updatePolyData(self.plungerPolyData, name, color=color) obj.actor.SetUserTransform(transformForVis) if key is not None and om.findObjectByName(name) is not None: obj.addProperty('magnitude', self.externalForces[key]['forceMagnitude']) obj.addProperty('linkName', linkName) obj.addProperty('key', key) obj.connectRemovedFromObjectModel(self.removeForceFromFrameObject) obj.properties.connectPropertyChanged( functools.partial(self.onPropertyChanged, obj)) return obj
def __init__(self, view): self.view = view loader = QtUiTools.QUiLoader() uifile = QtCore.QFile(':/ui/ddFrameVisualization.ui') assert uifile.open(uifile.ReadOnly) self.widget = loader.load(uifile) self.ui = WidgetDict(self.widget.children()) self.botFrameUpdater = BotFrameUpdater(self.ui.botFramesListWidget) robotModel = om.findObjectByName('robot state model') self.linkFrameUpdater = LinkFrameUpdater(robotModel, self.ui.linkFramesListWidget) self.eventFilter = PythonQt.dd.ddPythonEventFilter() self.ui.scrollArea.installEventFilter(self.eventFilter) self.eventFilter.addFilteredEventType(QtCore.QEvent.Resize) self.eventFilter.connect('handleEvent(QObject*, QEvent*)', self.onEvent) PythonQt.dd.ddGroupBoxHider(self.ui.botFramesGroup) PythonQt.dd.ddGroupBoxHider(self.ui.linkFramesGroup) self.updateTimer = TimerCallback(targetFps=60) self.updateTimer.callback = self.updateFrames self.updateTimer.start()
def _remove_subscriber(self): if (self._subscriber is None): return lcmUtils.removeSubscriber(self._subscriber) om.removeFromObjectModel(om.findObjectByName(self._folder_name)) self._subscriber = None
def computeRewardFromFrameLocation(self, u=0.0): carFrame = om.findObjectByName('robot frame') raycastDistance = self.sensorObj.raycastAll(carFrame) carState = 0.0 # just a placeholder for now S = (carState, raycastDistance) return self.computeReward(S, u)
def run(self): polyData = self.getPointCloud() annotation = self.getAnnotationInput() annotationPoint = annotation.annotationPoints[0] planePoints, normal = segmentation.applyLocalPlaneFit(polyData, annotationPoint, searchRadius=0.1, searchRadiusEnd=0.2) viewDirection = segmentation.SegmentationContext.getGlobalInstance().getViewDirection() if np.dot(normal, viewDirection) < 0: normal = -normal xaxis = normal zaxis = [0, 0, 1] yaxis = np.cross(zaxis, xaxis) xaxis = np.cross(yaxis, zaxis) xaxis /= np.linalg.norm(xaxis) yaxis /= np.linalg.norm(yaxis) t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis) t.PostMultiply() t.Translate(annotationPoint) polyData = annotation.polyData polyData = segmentation.transformPolyData(polyData, t.GetLinearInverse()) annotation.setProperty('Visible', False) om.removeFromObjectModel(om.findObjectByName('wall')) obj = vis.showPolyData(polyData, 'wall') obj.actor.SetUserTransform(t) vis.showFrame(t, 'wall frame', scale=0.2, parent=obj)
def run(self): polyData = self.getPointCloud() om.removeFromObjectModel(om.findObjectByName('pointcloud snapshot')) vis.showPolyData(polyData, 'pointcloud snapshot', parent='segmentation', visible=False)
def getGroundFrame(self): frame = om.findObjectByName( self.properties.getProperty('Ground frame name')) if not frame: self.fail('could not find ground frame') return frame
def add_target(self, target): data = DebugData() center = [target[0], target[1], 1] axis = [0, 0, 1] # Upright cylinder. data.addCylinder(center, axis, 2, 3) om.removeFromObjectModel(om.findObjectByName("target")) self._add_polydata(data.getPolyData(), "target", [0, 0.8, 0])
def planReach(self): ikPlanner = self.robotSystem.ikPlanner startPose = self.getPlanningStartPose() startPoseName = 'q_reach_start' endPoseName = 'q_reach_end' self.robotSystem.ikPlanner.addPose(startPose, startPoseName) side = 'right' movingReachConstraint = ikPlanner.createMovingReachConstraints(startPoseName, lockBase=True, lockBack=True, lockArm=True, side=side) palmToHand = ikPlanner.getPalmToHandLink(side=side) targetFrame = om.findObjectByName('reach frame').transform poseConstraints = ikPlanner.createPositionOrientationGraspConstraints(side, targetFrame, graspToHandLinkFrame=palmToHand, angleToleranceInDegrees=5.0) constraints = [] constraints.extend(movingReachConstraint) constraints.extend(poseConstraints) constraintSet = ikplanner.ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName) constraintSet.ikParameters = IkParameters(maxDegreesPerSecond=30) seedPose = ikPlanner.getMergedPostureFromDatabase(startPose, 'surprise:switch', 'above_switch', side='right') seedPoseName = 'q_above_switch' self.robotSystem.ikPlanner.addPose(seedPose, seedPoseName) constraintSet.seedPoseName = seedPoseName constraintSet.nominalPoseName = seedPoseName endPose, info = constraintSet.runIk() plan = constraintSet.planEndPoseGoal() self.addPlan(plan)
def updateReachFrame(self): graspFrame = transformUtils.copyFrame(self.pinchToBox) boxFrame = om.findObjectByName('Switch Box').getChildFrame().transform graspFrame.PostMultiply() graspFrame.Concatenate(boxFrame) vis.updateFrame(graspFrame, 'pinch reach frame', scale=0.2)
def showFusedMaps(self): om.removeFromObjectModel(om.findObjectByName('stereo')) om.getOrCreateContainer('stereo') q = imageManager.queue cameraToLocalNow = vtk.vtkTransform() utime = q.getCurrentImageTime('CAMERA_TSDF') q.getTransform('CAMERA_LEFT', 'local', utime, cameraToLocalNow) cameraToLocalFusedNow = vtk.vtkTransform() q.getTransform('CAMERA_LEFT_ALT', 'local', utime, cameraToLocalFusedNow) for i in range(len(self.pointClouds)): fusedNowToLocalNow = vtk.vtkTransform() fusedNowToLocalNow.PreMultiply() fusedNowToLocalNow.Concatenate(cameraToLocalNow) fusedNowToLocalNow.Concatenate( cameraToLocalFusedNow.GetLinearInverse()) fusedTransform = vtk.vtkTransform() fusedTransform.PreMultiply() fusedTransform.Concatenate(fusedNowToLocalNow) fusedTransform.Concatenate(self.cameraToLocalFusedTransforms[i]) pd = filterUtils.transformPolyData(self.pointClouds[i], fusedTransform) vis.showFrame(fusedTransform, ('cloud frame ' + str(i)), visible=True, scale=0.2, parent='stereo') vis.showPolyData(pd, ('stereo ' + str(i)), parent='stereo', colorByName='rgb_colors')
def colorizeMapsOff(): obj = om.findObjectByName('Map Server') obj.source.colorizeCallback = None alpha = 0.7 pointSize = 1.0 obj.setProperty('Alpha', alpha) obj.setProperty('Point Size', pointSize)
def getDebugFolder(): obj = om.findObjectByName("debug") if obj is None: obj = om.getOrCreateContainer("debug", om.getOrCreateContainer("segmentation")) om.collapse(obj) return obj
def moveFunnelRandomly(): xyz = [ np.random.uniform(0.25, 0.75), np.random.uniform(-0.3, 0.3), np.random.uniform(0.0, 0.5) ] rpy = [ np.random.uniform(-120, -60), np.random.uniform(-30, 30), np.random.uniform(-30, 30) ] t = transformUtils.frameFromPositionAndRPY(xyz, rpy) om.findObjectByName('blue funnel').getChildFrame().copyFrame(t)
def getBDIAdjustedFootstepsFolder(): obj = om.findObjectByName('BDI adj footstep plan') if obj is None: obj = om.getOrCreateContainer('BDI adj footstep plan') obj.setIcon(om.Icons.Feet) om.collapse(obj) return obj
def getFootstepRelativeTransform(self): self.footstepsToPlan = [] for n in xrange(1,self.numFootsteps + 1): stepFrameName = 'step ' + str(n) + ' frame' fToWorld = transformUtils.copyFrame(om.findObjectByName(stepFrameName).transform) fToPlan = self.getTransformToPlanningFrame(fToWorld) self.footstepsToPlan.append(fToPlan)
def saveObjectPolyData(objectName): visObj = om.findObjectByName(objectName) filename = os.path.join(getLabelFusionDataDir(), 'object-meshes', objectName + '_aligned.vtp') polyData = filterUtils.transformPolyData(visObj.polyData, visObj.actor.GetUserTransform()) ioUtils.writePolyData(polyData, filename)
def getSupportSearchPoint(supportName='cylinder'): obj = om.findObjectByName(supportName) t = obj.getChildFrame().transform zCoord = obj.getProperty('Length') / 2.0 p = np.array(t.TransformPoint([0, 0, zCoord])) return p
def handle_message(self, msg): # Limits the rate of message handling, since redrawing is done in the # message handler. self._sub.setSpeedLimit(30) # Removes the folder completely. om.removeFromObjectModel(om.findObjectByName(self._folder_name)) # Recreates folder. folder = om.getOrCreateContainer(self._folder_name) # Though strangely named, DebugData() is the object through which # drawing is done in DrakeVisualizer. d = DebugData() # Iterate over all triangles, drawing the contact surface as a triangle # outlined in blue. for surface in msg.hydroelastic_contacts: for tri in surface.triangles: va = np.array([tri.p_WA[0], tri.p_WA[1], tri.p_WA[2]]) vb = np.array([tri.p_WB[0], tri.p_WB[1], tri.p_WB[2]]) vc = np.array([tri.p_WC[0], tri.p_WC[1], tri.p_WC[2]]) d.addLine(p1=va, p2=vb, radius=0, color=[0, 0, 1]) d.addLine(p1=vb, p2=vc, radius=0, color=[0, 0, 1]) d.addLine(p1=va, p2=vc, radius=0, color=[0, 0, 1]) key = (str(surface.body1_name), str(surface.body2_name)) vis.showPolyData( d.getPolyData(), str(key), parent=folder, color=[0, 0, 1])
def showFusedMaps(self): om.removeFromObjectModel(om.findObjectByName("stereo")) om.getOrCreateContainer("stereo") q = imageManager.queue cameraToLocalNow = vtk.vtkTransform() utime = q.getCurrentImageTime("CAMERA_TSDF") q.getTransform("CAMERA_LEFT", "local", utime, cameraToLocalNow) cameraToLocalFusedNow = vtk.vtkTransform() q.getTransform("CAMERA_LEFT_ALT", "local", utime, cameraToLocalFusedNow) for i in range(len(self.pointClouds)): fusedNowToLocalNow = vtk.vtkTransform() fusedNowToLocalNow.PreMultiply() fusedNowToLocalNow.Concatenate(cameraToLocalNow) fusedNowToLocalNow.Concatenate(cameraToLocalFusedNow.GetLinearInverse()) fusedTransform = vtk.vtkTransform() fusedTransform.PreMultiply() fusedTransform.Concatenate(fusedNowToLocalNow) fusedTransform.Concatenate(self.cameraToLocalFusedTransforms[i]) pd = filterUtils.transformPolyData(self.pointClouds[i], fusedTransform) vis.showFrame(fusedTransform, ("cloud frame " + str(i)), visible=True, scale=0.2, parent="stereo") vis.showPolyData(pd, ("stereo " + str(i)), parent="stereo", colorByName="rgb_colors")
def getFootstepsFolder(): obj = om.findObjectByName('footstep plan') if obj is None: obj = om.getOrCreateContainer('footstep plan', parentObj=om.getOrCreateContainer('planning')) obj.setIcon(om.Icons.Feet) om.collapse(obj) return obj
def getTerrainSlicesFolder(): obj = om.findObjectByName('terrain slices') if obj is None: obj = om.getOrCreateContainer('terrain slices', parentObj=getFootstepsFolder()) obj.setProperty('Visible', False) om.collapse(obj) return obj
def cropPointCloud(self, point=None, pointCloud=None, radius=0.3, visualize=True): """ Crop pointcloud using sphere around given point and radius :param point: defaults to point chosen using measurement panel :param pointCloud: defaults to 'reconstruction' :return: cropped point cloud, polyData """ if point is None: if len(self.measurementPanel.pickPoints) == 0: print "you haven't selected any points using measurement panel" return point = self.measurementPanel.pickPoints[-1] if pointCloud is None: pointCloud = om.findObjectByName('reconstruction').polyData croppedPointCloud = segmentation.cropToSphere(pointCloud, point, radius=radius) if visualize: vis.updatePolyData(croppedPointCloud, 'cropped pointcloud') return croppedPointCloud
def fitSupport(pickPoint=[0.92858565, 0.00213802, 0.30315629]): om.removeFromObjectModel(om.findObjectByName('cylinder')) polyData = getPointCloud() t = vtk.vtkTransform() t.Translate(pickPoint) polyData = segmentation.cropToBox(polyData, t, [0.3, 0.3, 0.5]) addHSVArrays(polyData) vis.updatePolyData(polyData, 'crop region', colorByName='rgb_colors', visible=False) zMax = getMaxZCoordinate(polyData) cyl = makeCylinder() cyl.setProperty('Radius', 0.03) cyl.setProperty('Length', zMax) origin = segmentation.computeCentroid(polyData) origin[2] = zMax / 2.0 t = transformUtils.frameFromPositionAndRPY(origin, [0, 0, 0]) cyl.getChildFrame().copyFrame(t)
def colorizeMultisense(enabled): obj = om.findObjectByName('Multisense') if not obj: return setVisProperties(obj, enabled) colorBy = 'Camera RGB' if enabled else 'Solid Color' obj.setProperty('Color By', colorBy)
def remove_subscriber(self): if self._sub is None: return lcmUtils.removeSubscriber(self._sub) self._sub = None om.removeFromObjectModel(om.findObjectByName(self._folder_name)) print self._name + " subscriber removed."
def removeHandFrames(self): sides = ['left', 'right'] for side in sides: linkName = self.ikPlanner.getHandLink(side) frameName = '%s constraint frame' % linkName frame = om.findObjectByName(frameName) if frame: om.removeFromObjectModel(frame)
def update(self, robotModel): name = "camera frustum %s" % self.robotModel.getProperty("Name") obj = om.findObjectByName(name) if obj and not obj.getProperty("Visible"): return vis.updatePolyData(self.getCameraFrustumGeometry(self.rayLength), name, parent=self.robotModel, visible=False)
def __init__(self): om.init() self.view = PythonQt.dd.ddQVTKWidgetView() # init grid self.gridObj = vis.showGrid(self.view, parent='scene') self.gridObj.setProperty('Surface Mode', 'Surface with edges') self.gridObj.setProperty('Color', [0,0,0]) self.gridObj.setProperty('Alpha', 0.1) # init view options self.viewOptions = vis.ViewOptionsItem(self.view) om.addToObjectModel(self.viewOptions, parentObj=om.findObjectByName('scene')) self.viewOptions.setProperty('Background color', [0.3, 0.3, 0.35]) self.viewOptions.setProperty('Background color 2', [0.95,0.95,1]) # setup camera applogic.setCameraTerrainModeEnabled(self.view, True) applogic.resetCamera(viewDirection=[-1, 0, -0.3], view=self.view) # add view behaviors viewBehaviors = viewbehaviors.ViewBehaviors(self.view) applogic._defaultRenderView = self.view self.mainWindow = QtGui.QMainWindow() self.mainWindow.setCentralWidget(self.view) self.mainWindow.resize(768 * (16/9.0), 768) self.mainWindow.setWindowTitle('Drake Visualizer') self.mainWindow.setWindowIcon(QtGui.QIcon(':/images/drake_logo.png')) self.mainWindow.show() self.drakeVisualizer = DrakeVisualizer(self.view) self.lcmglManager = lcmgl.LCMGLManager(self.view) if lcmgl.LCMGL_AVAILABLE else None self.screenGrabberPanel = ScreenGrabberPanel(self.view) self.screenGrabberDock = self.addWidgetToDock(self.screenGrabberPanel.widget, QtCore.Qt.RightDockWidgetArea) self.screenGrabberDock.setVisible(False) self.cameraBookmarksPanel = camerabookmarks.CameraBookmarkWidget(self.view) self.cameraBookmarksDock = self.addWidgetToDock(self.cameraBookmarksPanel.widget, QtCore.Qt.RightDockWidgetArea) self.cameraBookmarksDock.setVisible(False) model = om.getDefaultObjectModel() model.getTreeWidget().setWindowTitle('Scene Browser') model.getPropertiesPanel().setWindowTitle('Properties Panel') model.setActiveObject(self.viewOptions) self.sceneBrowserDock = self.addWidgetToDock(model.getTreeWidget(), QtCore.Qt.LeftDockWidgetArea) self.propertiesDock = self.addWidgetToDock(self.wrapScrollArea(model.getPropertiesPanel()), QtCore.Qt.LeftDockWidgetArea) self.sceneBrowserDock.setVisible(False) self.propertiesDock.setVisible(False) applogic.addShortcut(self.mainWindow, 'Ctrl+Q', self.applicationInstance().quit) applogic.addShortcut(self.mainWindow, 'F1', self._toggleObjectModel) applogic.addShortcut(self.mainWindow, 'F2', self._toggleScreenGrabber) applogic.addShortcut(self.mainWindow, 'F3', self._toggleCameraBookmarks) applogic.addShortcut(self.mainWindow, 'F8', applogic.showPythonConsole)
def initialize(self): self.dt = self.options['dt'] self.controllerTypeOrder = ['default'] self.setDefaultOptions() self.Controller = ControllerObj() self.Car = CarPlant(controller=self.Controller, velocity=self.options['Car']['velocity']) self.Controller.initializeVelocity(self.Car.v) self.ActionSet = ActionSetObj() # create the things needed for simulation om.removeFromObjectModel(om.findObjectByName('world')) self.world = World.buildCircleWorld(percentObsDensity=self.options['World']['percentObsDensity'], circleRadius=self.options['World']['circleRadius'], nonRandom=self.options['World']['nonRandomWorld'], scale=self.options['World']['scale'], randomSeed=self.options['World']['randomSeed'], obstaclesInnerFraction=self.options['World']['obstaclesInnerFraction']) om.removeFromObjectModel(om.findObjectByName('robot')) self.robot, self.frame = World.buildRobot() self.locator = World.buildCellLocator(self.world.visObj.polyData) #will need to set locator for purple trajectories #self.Sensor.setLocator(self.locator) self.frame = self.robot.getChildFrame() self.frame.setProperty('Scale', 3) #self.frame.setProperty('Visible', False) #self.frame.setProperty('Edit', True) self.frame.widget.HandleRotationEnabledOff() rep = self.frame.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) self.defaultControllerTime = self.options['runTime']['defaultControllerTime'] self.Car.setFrame(self.frame) print "Finished initialization"
def run(self): planName = self.properties.getProperty('Plan name') plan = om.findObjectByName(planName) if not isinstance(plan, ManipulationPlanItem): self.fail('could not find manipulation plan') plan = plan.plan robotSystem.manipPlanner.commitManipPlan(plan)
def updateLinkFrame(self, robotModel, linkName, create=True): linkFrameName = '%s frame' % linkName if not create and not om.findObjectByName(linkFrameName): return t = robotModel.getLinkFrame(linkName) return vis.updateFrame(t, linkFrameName, scale=0.2, visible=False, parent=self.robotModel)