def updateIntersection(frame): origin = np.array(frame.transform.GetPosition()) rayLength = 5 for i in range(0,numRays): ray = rays[:,i] rayTransformed = np.array(frame.transform.TransformNormal(ray)) intersection = computeIntersection(locator, origin, origin + rayTransformed*rayLength) name = 'ray intersection ' + str(i) if intersection is not None: om.removeFromObjectModel(om.findObjectByName(name)) d = DebugData() d.addLine(origin, intersection) color = [1,0,0] # d.addSphere(intersection, radius=0.04) vis.updatePolyData(d.getPolyData(), name, color=color) else: om.removeFromObjectModel(om.findObjectByName(name)) d = DebugData() d.addLine(origin, origin+rayTransformed*rayLength) color = [0,1,0] # d.addSphere(intersection, radius=0.04) vis.updatePolyData(d.getPolyData(), name, color=color)
def promotePolyDataItem(cls, obj): parent = obj.parent() view = obj.views[0] name = obj.getProperty('Name') polyData = obj.polyData props = obj.properties._properties childFrame = obj.getChildFrame() if childFrame: t = transformUtils.copyFrame(childFrame.transform) else: t = vtk.vtkTransform() t.PostMultiply() t.Translate(filterUtils.computeCentroid(polyData)) polyData = filterUtils.transformPolyData(polyData, t.GetLinearInverse()) children = [c for c in obj.children() if c is not childFrame] meshId = cls.getMeshManager().add(polyData) om.removeFromObjectModel(obj) obj = MeshAffordanceItem(name, view) obj.setProperty('Filename', meshId) om.addToObjectModel(obj, parentObj=parent) frame = vis.addChildFrame(obj) frame.copyFrame(t) for child in children: om.addToObjectModel(child, parentObj=obj) obj.syncProperties(props) return obj
def _addPlanItem(plan, name, itemClass): assert plan is not None item = itemClass(name) item.plan = plan om.removeFromObjectModel(om.findObjectByName(name)) om.addToObjectModel(item, om.getOrCreateContainer('segmentation')) return item
def run(self): polyData = self.getPointCloud() om.removeFromObjectModel(om.findObjectByName('pointcloud snapshot')) vis.showPolyData(polyData, 'pointcloud snapshot', parent='segmentation', visible=False)
def onHideBDIButton(self): self.driver.showBDIPlan = False self.driver.bdiRobotModel.setProperty('Visible', False) folder = om.getOrCreateContainer("BDI footstep plan") om.removeFromObjectModel(folder) folder = om.getOrCreateContainer("BDI adj footstep plan") om.removeFromObjectModel(folder)
def run(self): polyData = self.getPointCloud() annotation = self.getAnnotationInput() annotationPoint = annotation.annotationPoints[0] planePoints, normal = segmentation.applyLocalPlaneFit( polyData, annotationPoint, searchRadius=0.1, searchRadiusEnd=0.2) viewDirection = segmentation.SegmentationContext.getGlobalInstance( ).getViewDirection() if np.dot(normal, viewDirection) < 0: normal = -normal xaxis = normal zaxis = [0, 0, 1] yaxis = np.cross(zaxis, xaxis) xaxis = np.cross(yaxis, zaxis) xaxis /= np.linalg.norm(xaxis) yaxis /= np.linalg.norm(yaxis) t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis) t.PostMultiply() t.Translate(annotationPoint) polyData = annotation.polyData polyData = segmentation.transformPolyData(polyData, t.GetLinearInverse()) annotation.setProperty('Visible', False) om.removeFromObjectModel(om.findObjectByName('wall')) obj = vis.showPolyData(polyData, 'wall') obj.actor.SetUserTransform(t) vis.showFrame(t, 'wall frame', scale=0.2, parent=obj)
def showFusedMaps(self): om.removeFromObjectModel(om.findObjectByName('stereo')) om.getOrCreateContainer('stereo') q = imageManager.queue cameraToLocalNow = vtk.vtkTransform() utime = q.getCurrentImageTime('CAMERA_TSDF') q.getTransform('CAMERA_LEFT', 'local', utime, cameraToLocalNow) cameraToLocalFusedNow = vtk.vtkTransform() q.getTransform('CAMERA_LEFT_ALT', 'local', utime, cameraToLocalFusedNow) for i in range(len(self.pointClouds)): fusedNowToLocalNow = vtk.vtkTransform() fusedNowToLocalNow.PreMultiply() fusedNowToLocalNow.Concatenate(cameraToLocalNow) fusedNowToLocalNow.Concatenate( cameraToLocalFusedNow.GetLinearInverse()) fusedTransform = vtk.vtkTransform() fusedTransform.PreMultiply() fusedTransform.Concatenate(fusedNowToLocalNow) fusedTransform.Concatenate(self.cameraToLocalFusedTransforms[i]) pd = filterUtils.transformPolyData(self.pointClouds[i], fusedTransform) vis.showFrame(fusedTransform, ('cloud frame ' + str(i)), visible=True, scale=0.2, parent='stereo') vis.showPolyData(pd, ('stereo ' + str(i)), parent='stereo', colorByName='rgb_colors')
def run(self): polyData = self.getPointCloud() annotation = self.getAnnotationInput() annotationPoint = annotation.annotationPoints[0] planePoints, normal = segmentation.applyLocalPlaneFit(polyData, annotationPoint, searchRadius=0.1, searchRadiusEnd=0.2) viewDirection = segmentation.SegmentationContext.getGlobalInstance().getViewDirection() if np.dot(normal, viewDirection) < 0: normal = -normal xaxis = normal zaxis = [0, 0, 1] yaxis = np.cross(zaxis, xaxis) xaxis = np.cross(yaxis, zaxis) xaxis /= np.linalg.norm(xaxis) yaxis /= np.linalg.norm(yaxis) t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis) t.PostMultiply() t.Translate(annotationPoint) polyData = annotation.polyData polyData = segmentation.transformPolyData(polyData, t.GetLinearInverse()) annotation.setProperty('Visible', False) om.removeFromObjectModel(om.findObjectByName('wall')) obj = vis.showPolyData(polyData, 'wall') obj.actor.SetUserTransform(t) vis.showFrame(t, 'wall frame', scale=0.2, parent=obj)
def testAffordanceToUrdf(): affs = [func() for func in newSphere, newBox, newCylinder, newCapsule, newMesh] print affordanceurdf.urdfStringFromAffordances(affs) for aff in affs: om.removeFromObjectModel(aff)
def _onPropertyChanged(self, propertySet, propertyName): PolyDataItem._onPropertyChanged(self, propertySet, propertyName) if propertyName == 'Scale': scale = self.getProperty(propertyName) self.rep.SetWorldSize(scale) self._updateAxesGeometry() elif propertyName == 'Edit': view = app.getCurrentRenderView() if view not in self.views: view = self.views[0] self.widget.SetInteractor(view.renderWindow().GetInteractor()) self.widget.SetEnabled(self.getProperty(propertyName)) isEditing = self.getProperty(propertyName) if isEditing: frameupdater.registerFrame(self) elif propertyName == 'Trace': trace = self.getProperty(propertyName) if trace and not self.traceData: self.traceData = FrameTraceVisualizer(self) elif not trace and self.traceData: om.removeFromObjectModel(self.traceData.getTraceData()) self.traceData = None elif propertyName == 'Tube': self._updateAxesGeometry()
def computeGraspFrameSamplesBoard(self): affordanceFrame = self.getAffordanceFrame() additionalOffset = 0.0 yoffset = 0.5 * self.affordance.params['ywidth'] + additionalOffset xoffset = 0.5 * self.affordance.params['xwidth'] + additionalOffset frames = [ [[0.0, yoffset, 0.0], [0.0, 90, 180.0]], [[0.0, yoffset, 0.0], [0.0, -90, 180.0]], [[0.0, -yoffset, 0.0], [0.0, 90, 0.0]], [[0.0, -yoffset, 0.0], [0.0, -90, 0.0]], [[xoffset, 0.0, 0.0], [-90, -90, 180.0]], [[xoffset, 0.0, 0.0], [90, 90, 180.0]], [[-xoffset, 0.0, 0.0], [90, -90, 180.0]], [[-xoffset, 0.0, 0.0], [-90, 90, 180.0]], ] for i, frame in enumerate(frames): pos, rpy = frame t = transformUtils.frameFromPositionAndRPY(pos, rpy) t.Concatenate(affordanceFrame.transform) name = 'sample grasp frame %d' % i om.removeFromObjectModel(self.findAffordanceChild(name)) vis.showFrame(copyFrame(t), name, parent=self.affordance, visible=False, scale=0.2)
def resetTargetPath(self): for obj in om.getObjects(): if obj.getProperty('Name') == 'target frame desired': om.removeFromObjectModel(obj) for obj in om.getObjects(): if obj.getProperty('Name') == 'target frame desired path': om.removeFromObjectModel(obj)
def projectDrillDemoInCamera(): q = om.findObjectByName('camera overlay') om.removeFromObjectModel(q) imageView = cameraview.views['CAMERA_LEFT'] imageView.imageActor.SetOpacity(.2) drawFrameInCamera(drillDemo.drill.frame.transform, 'drill frame',visible=False) tf = transformUtils.copyFrame( drillDemo.drill.frame.transform ) tf.PreMultiply() tf.Concatenate( drillDemo.drill.drillToButtonTransform ) drawFrameInCamera(tf, 'drill button') tf2 = transformUtils.copyFrame( tf ) tf2.PreMultiply() tf2.Concatenate( transformUtils.frameFromPositionAndRPY( [0,0,0] , [180,0,0] ) ) drawFrameInCamera(tf2, 'drill button flip') drawObjectInCamera('drill',visible=False) drawObjectInCamera('sensed pointer tip') obj = om.findObjectByName('sensed pointer tip frame') if (obj is not None): drawFrameInCamera(obj.transform, 'sensed pointer tip frame',visible=False) #drawObjectInCamera('left robotiq',visible=False) #drawObjectInCamera('right pointer',visible=False) v = imageView.view v.render()
def showFusedMaps(self): om.removeFromObjectModel(om.findObjectByName('stereo')) om.getOrCreateContainer('stereo') q = imageManager.queue cameraToLocalNow = vtk.vtkTransform() utime = q.getCurrentImageTime('CAMERA_TSDF') q.getTransform('CAMERA_LEFT','local', utime,cameraToLocalNow) cameraToLocalFusedNow = vtk.vtkTransform() q.getTransform('CAMERA_LEFT_ALT','local', utime,cameraToLocalFusedNow) for i in range(len(self.pointClouds)): fusedNowToLocalNow = vtk.vtkTransform() fusedNowToLocalNow.PreMultiply() fusedNowToLocalNow.Concatenate( cameraToLocalNow) fusedNowToLocalNow.Concatenate( cameraToLocalFusedNow.GetLinearInverse() ) fusedTransform = vtk.vtkTransform() fusedTransform.PreMultiply() fusedTransform.Concatenate( fusedNowToLocalNow) fusedTransform.Concatenate( self.cameraToLocalFusedTransforms[i] ) pd = filterUtils.transformPolyData(self.pointClouds[i], fusedTransform) vis.showFrame(fusedTransform, ('cloud frame ' + str(i)), visible=True, scale=0.2, parent='stereo') vis.showPolyData(pd, ('stereo ' + str(i)), parent='stereo', colorByName='rgb_colors')
def computeGraspFrameSamplesBoard(self): affordanceFrame = self.getAffordanceFrame() additionalOffset = 0.0 yoffset = 0.5*self.affordance.params['ywidth'] + additionalOffset xoffset = 0.5*self.affordance.params['xwidth'] + additionalOffset frames = [ [[0.0, yoffset, 0.0], [0.0, 90, 180.0]], [[0.0, yoffset, 0.0], [0.0, -90, 180.0]], [[0.0, -yoffset, 0.0], [0.0, 90, 0.0]], [[0.0, -yoffset, 0.0], [0.0, -90, 0.0]], [[xoffset, 0.0, 0.0], [-90, -90, 180.0]], [[xoffset, 0.0, 0.0], [90, 90, 180.0]], [[-xoffset, 0.0, 0.0], [90, -90, 180.0]], [[-xoffset, 0.0, 0.0], [-90, 90, 180.0]], ] for i, frame in enumerate(frames): pos, rpy = frame t = transformUtils.frameFromPositionAndRPY(pos, rpy) t.Concatenate(affordanceFrame.transform) name = 'sample grasp frame %d' % i om.removeFromObjectModel(self.findAffordanceChild(name)) vis.showFrame(copyFrame(t), name, parent=self.affordance, visible=False, scale=0.2)
def toggleFootstepWidget(displayPoint, view, useHorizontalWidget=False): obj, _ = vis.findPickedObject(displayPoint, view) if not obj: return False name = obj.getProperty('Name') if name in ('footstep widget', 'footstep widget frame'): om.removeFromObjectModel(om.findObjectByName('footstep widget')) return True match = re.match('^step (\d+)$', name) if not match: return False stepIndex = int(match.group(1)) existingWidget = om.findObjectByName('footstep widget') if existingWidget: previousStep = existingWidget.stepIndex print 'have existing widget for step:', stepIndex om.removeFromObjectModel(existingWidget) if previousStep == stepIndex: print 'returning because widget was for selected step' return True footMesh = shallowCopy(obj.polyData) footFrame = transformUtils.copyFrame(obj.getChildFrame().transform) if useHorizontalWidget: rpy = [0.0, 0.0, transformUtils.rollPitchYawFromTransform(footFrame)[2]] footFrame = transformUtils.frameFromPositionAndRPY(footFrame.GetPosition(), np.degrees(rpy)) footObj = vis.showPolyData(footMesh, 'footstep widget', parent='planning', alpha=0.2) footObj.stepIndex = stepIndex frameObj = vis.showFrame(footFrame, 'footstep widget frame', parent=footObj, scale=0.2) footObj.actor.SetUserTransform(frameObj.transform) footObj.setProperty('Color', obj.getProperty('Color')) frameObj.setProperty('Edit', True) rep = frameObj.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) frameObj.widget.HandleRotationEnabledOff() walkGoal = om.findObjectByName('walking goal') if walkGoal: walkGoal.setProperty('Edit', False) def onFootWidgetChanged(frame): footstepsDriver.onStepModified(stepIndex - 1, frame) frameObj.connectFrameModified(onFootWidgetChanged) return True
def onExecClicked(self): self.commitFootstepPlan(self.lastFootstepPlan) om.removeFromObjectModel(om.findObjectByName('footstep widget')) walkGoal = om.findObjectByName('walking goal') if walkGoal: walkGoal.setProperty('Edit', False) self.execButton.setEnabled(False)
def ungraspAffordance(self, affordanceName): try: del self.frameSyncs[affordanceName] except KeyError: pass if not self.frameSyncs: om.removeFromObjectModel(om.findObjectByName('l_hand frame')) om.removeFromObjectModel(om.findObjectByName('r_hand frame'))
def computeFootstepPlanSafeRegions(self, blocks, robotPose, standingFootName): print 'planning with safe regions. %d blocks.' % len(blocks) folder = om.getOrCreateContainer('Safe terrain regions') om.removeFromObjectModel(folder) footsteps = [] for i, block in enumerate(blocks): corners = block.getCorners() rpy = np.radians(block.cornerTransform.GetOrientation()) #rpy = [0.0, 0.0, 0.0] self.convertStepToSafeRegion(corners, rpy) lastBlock = blocks[-1] goalFrame = transformUtils.copyFrame(lastBlock.cornerTransform) goalOffset = vtk.vtkTransform() goalOffset.Translate(0.3, lastBlock.rectWidth/2.0, 0.0) goalFrame.PreMultiply() goalFrame.Concatenate(goalOffset) goalPosition = np.array(goalFrame.GetPosition()) if len(blocks) > 1: goalFrame = transformUtils.copyFrame(blocks[-2].cornerTransform) goalFrame.Translate(goalPosition - np.array(goalFrame.GetPosition())) vis.updateFrame(goalFrame, 'footstep plan goal', scale=0.2) request = self.footstepsPanel.driver.constructFootstepPlanRequest(robotPose, goalFrame) assert standingFootName in (self.ikPlanner.leftFootLink, self.ikPlanner.rightFootLink) if standingFootName == self.ikPlanner.rightFootLink: leadingFoot = lcmdrc.footstep_plan_params_t.LEAD_RIGHT else: leadingFoot = lcmdrc.footstep_plan_params_t.LEAD_LEFT request.params.leading_foot = leadingFoot request.params.max_forward_step = 0.5 request.params.nom_forward_step = 0.12 request.params.nom_step_width = 0.22 request.params.max_num_steps = 8 #2*len(blocks) plan = self.footstepsPanel.driver.sendFootstepPlanRequest(request, waitForResponse=True) if not plan: return [] #print 'received footstep plan with %d steps.' % len(plan.footsteps) footsteps = [] for i, footstep in enumerate(plan.footsteps): footstepTransform = self.transformFromFootstep(footstep) footsteps.append(Footstep(footstepTransform, footstep.is_right_foot)) return footsteps[2:]
def computeGraspFrame(self): frame = self.getAffordanceChild('sample grasp frame %d' % self.graspSample) name = 'grasp frame' om.removeFromObjectModel(self.findAffordanceChild(name)) vis.showFrame(copyFrame(frame.transform), name, parent=self.affordance, visible=False, scale=0.2)
def initialize(self): self.dt = self.options['dt'] self.controllerTypeOrder = [ 'defaultRandom', 'learnedRandom', 'learned', 'default' ] self.setDefaultOptions() self.Sensor = SensorObj(rayLength=self.options['Sensor']['rayLength'], numRays=self.options['Sensor']['numRays']) self.Controller = ControllerObj(self.Sensor) self.Car = CarPlant(controller=self.Controller, velocity=self.options['Car']['velocity']) self.Reward = Reward( self.Sensor, collisionThreshold=self.collisionThreshold, actionCost=self.options['Reward']['actionCost'], collisionPenalty=self.options['Reward']['collisionPenalty'], raycastCost=self.options['Reward']['raycastCost']) self.setSARSA() # create the things needed for simulation om.removeFromObjectModel(om.findObjectByName('world')) self.world = World.buildCircleWorld( percentObsDensity=self.options['World']['percentObsDensity'], circleRadius=self.options['World']['circleRadius'], nonRandom=self.options['World']['nonRandomWorld'], scale=self.options['World']['scale'], randomSeed=self.options['World']['randomSeed'], obstaclesInnerFraction=self.options['World'] ['obstaclesInnerFraction']) om.removeFromObjectModel(om.findObjectByName('robot')) self.robot, self.frame = World.buildRobot() self.locator = World.buildCellLocator(self.world.visObj.polyData) self.Sensor.setLocator(self.locator) self.frame = self.robot.getChildFrame() self.frame.setProperty('Scale', 3) self.frame.setProperty('Edit', True) self.frame.widget.HandleRotationEnabledOff() rep = self.frame.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) self.supervisedTrainingTime = self.options['runTime'][ 'supervisedTrainingTime'] self.learningRandomTime = self.options['runTime']['learningRandomTime'] self.learningEvalTime = self.options['runTime']['learningEvalTime'] self.defaultControllerTime = self.options['runTime'][ 'defaultControllerTime'] self.Car.setFrame(self.frame) print "Finished initialization"
def drawBDIFootstepPlanAdjusted(self): if (self.bdi_plan_adjusted is None): return folder = om.getOrCreateContainer('BDI adj footstep plan') om.removeFromObjectModel(folder) folder = om.getOrCreateContainer('BDI adj footstep plan') folder.setIcon(om.Icons.Feet) om.collapse(folder) self.drawFootstepPlan(self.bdi_plan_adjusted, folder, [1.0, 1.0, 0.0] , [0.0, 1.0, 1.0])
def showHandSamples(self, numberOfSamples=15): om.removeFromObjectModel(om.findObjectByName('sampled hands')) handFolder = om.getOrCreateContainer('sampled hands', parentObj=om.getOrCreateContainer('debug')) for i in xrange(numberOfSamples): t = self.splineInterp(i/float(numberOfSamples-1)) handObj, f = self.handFactory.placeHandModelWithTransform(t, self.view, side=self.side, name='sample %d' % i, parent=handFolder) handObj.setProperty('Alpha', 0.3) handFolder.setProperty('Visible', False)
def testCollection(): affordanceCollection = affordanceManager.collection assert len(affordanceCollection.collection) == 0 aff = newBox() assert len(affordanceCollection.collection) == 1 assert aff.getProperty('uuid') in affordanceCollection.collection om.removeFromObjectModel(aff) assert len(affordanceCollection.collection) == 0
def initialize(self): self.dt = self.options["dt"] self.controllerTypeOrder = ["defaultRandom", "learnedRandom", "learned", "default"] self.setDefaultOptions() self.Sensor = SensorObj( rayLength=self.options["Sensor"]["rayLength"], numRays=self.options["Sensor"]["numRays"] ) self.Controller = ControllerObj(self.Sensor) self.Car = CarPlant(controller=self.Controller, velocity=self.options["Car"]["velocity"]) self.Reward = Reward( self.Sensor, collisionThreshold=self.collisionThreshold, actionCost=self.options["Reward"]["actionCost"], collisionPenalty=self.options["Reward"]["collisionPenalty"], raycastCost=self.options["Reward"]["raycastCost"], ) self.setSARSA() # create the things needed for simulation om.removeFromObjectModel(om.findObjectByName("world")) self.world = World.buildCircleWorld( percentObsDensity=self.options["World"]["percentObsDensity"], circleRadius=self.options["World"]["circleRadius"], nonRandom=self.options["World"]["nonRandomWorld"], scale=self.options["World"]["scale"], randomSeed=self.options["World"]["randomSeed"], obstaclesInnerFraction=self.options["World"]["obstaclesInnerFraction"], ) om.removeFromObjectModel(om.findObjectByName("robot")) self.robot, self.frame = World.buildRobot() self.locator = World.buildCellLocator(self.world.visObj.polyData) self.Sensor.setLocator(self.locator) self.frame = self.robot.getChildFrame() self.frame.setProperty("Scale", 3) self.frame.setProperty("Edit", True) self.frame.widget.HandleRotationEnabledOff() rep = self.frame.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) self.supervisedTrainingTime = self.options["runTime"]["supervisedTrainingTime"] self.learningRandomTime = self.options["runTime"]["learningRandomTime"] self.learningEvalTime = self.options["runTime"]["learningEvalTime"] self.defaultControllerTime = self.options["runTime"]["defaultControllerTime"] self.Car.setFrame(self.frame) print "Finished initialization"
def run(self): polyData = self.getPointCloud() annotation = self.getAnnotationInput() annotationPoint = annotation.annotationPoints[0] mesh = segmentation.fitShelfItem(polyData, annotationPoint, clusterTolerance=self.properties.getProperty('Cluster tolerance')) annotation.setProperty('Visible', False) om.removeFromObjectModel(om.findObjectByName('shelf item')) obj = vis.showPolyData(mesh, 'shelf item', color=[0,1,0]) t = transformUtils.frameFromPositionAndRPY(segmentation.computeCentroid(mesh), [0,0,0]) segmentation.makeMovable(obj, t)
def selectCandidate(self, selectedObj, candidates): if self.properties.getProperty('Delete candidates'): for obj in candidates: if obj != selectedObj: om.removeFromObjectModel(obj) newName = self.properties.getProperty('New name') if newName: oldName = selectedObj.getProperty('Name') selectedObj.setProperty('Name', newName) for child in selectedObj.children(): child.setProperty('Name', child.getProperty('Name').replace(oldName, newName))
def drawFrameInCamera(t, frameName='new frame',visible=True): v = imageView.view q = cameraview.imageManager.queue localToCameraT = vtk.vtkTransform() q.getTransform('local', 'CAMERA_LEFT', localToCameraT) res = vis.showFrame( vtk.vtkTransform() , 'temp',view=v, visible=True, scale = 0.2) om.removeFromObjectModel(res) pd = res.polyData pd = filterUtils.transformPolyData(pd, t) pd = filterUtils.transformPolyData(pd, localToCameraT) q.projectPoints('CAMERA_LEFT', pd ) vis.showPolyData(pd, ('overlay ' + frameName), view=v, colorByName='Axes',parent='camera overlay',visible=visible)
def selectCandidate(self, selectedObj, candidates): if self.properties.getProperty('Delete candidates'): for obj in candidates: if obj != selectedObj: om.removeFromObjectModel(obj) newName = self.properties.getProperty('New name') if newName: oldName = selectedObj.getProperty('Name') selectedObj.setProperty('Name', newName) for child in selectedObj.children(): child.setProperty( 'Name', child.getProperty('Name').replace(oldName, newName))
def onImageViewDoubleClick(self, displayPoint, modifiers, imageView): if modifiers != QtCore.Qt.ControlModifier: return imagePixel = imageView.getImagePixel(displayPoint) cameraPos, ray = imageView.getWorldPositionAndRay(imagePixel) polyData = self.updatePointcloudSnapshot().polyData pickPoint = segmentation.extractPointsAlongClickRay(cameraPos, ray, polyData) om.removeFromObjectModel(om.findObjectByName('valve')) segmentation.segmentValveByBoundingBox(polyData, pickPoint) self.findAffordance()
def onImageViewDoubleClick(self, displayPoint, modifiers, imageView): if modifiers != QtCore.Qt.ControlModifier: return imagePixel = imageView.getImagePixel(displayPoint) cameraPos, ray = imageView.getWorldPositionAndRay(imagePixel) polyData = self.updatePointcloudSnapshot().polyData pickPoint = segmentation.extractPointsAlongClickRay( cameraPos, ray, polyData) om.removeFromObjectModel(om.findObjectByName('valve')) segmentation.segmentValveByBoundingBox(polyData, pickPoint) self.findAffordance()
def selectLink(self, displayPoint, view): if not self.enabled(): return False robotModel, _ = vis.findPickedObject(displayPoint, view) try: robotModel.model.getLinkNameForMesh except AttributeError: return False model = robotModel.model pickedPoint, _, polyData = vis.pickProp(displayPoint, view) linkName = model.getLinkNameForMesh(polyData) if not linkName: return False fadeValue = 1.0 if linkName == self.selectedLink else 0.05 for name in model.getLinkNames(): linkColor = model.getLinkColor(name) linkColor.setAlphaF(fadeValue) model.setLinkColor(name, linkColor) if linkName == self.selectedLink: self.selectedLink = None vis.hideCaptionWidget() om.removeFromObjectModel( om.findObjectByName('selected link frame')) else: self.selectedLink = linkName linkColor = model.getLinkColor(self.selectedLink) linkColor.setAlphaF(1.0) model.setLinkColor(self.selectedLink, linkColor) vis.showCaptionWidget(robotModel.getLinkFrame( self.selectedLink).GetPosition(), self.selectedLink, view=view) vis.updateFrame(robotModel.getLinkFrame(self.selectedLink), 'selected link frame', scale=0.2, parent=robotModel) return True
def dropTableObject(self, side='left'): obj, _ = self.getNextTableObject(side) obj.setProperty('Visible', False) for child in obj.children(): child.setProperty('Visible', False) self.clusterObjects.remove(obj) # remove from clusterObjects om.removeFromObjectModel(obj) # remove from objectModel if self.useCollisionEnvironment: objAffordance = om.findObjectByName(obj.getProperty('Name') + ' affordance') objAffordance.setProperty('Collision Enabled', False) objAffordance.setProperty('Visible', False) self.affordanceUpdater.ungraspAffordance(obj.getProperty('Name'))
def initialize(self): self.dt = self.options['dt'] self.controllerTypeOrder = ['defaultRandom', 'learnedRandom', 'learned', 'default'] self.setDefaultOptions() self.Sensor = SensorObj(rayLength=self.options['Sensor']['rayLength'], numRays=self.options['Sensor']['numRays']) self.Controller = ControllerObj(self.Sensor) self.Car = CarPlant( controller=self.Controller, velocity=self.options['Car']['velocity']) self.Reward = Reward(self.Sensor, collisionThreshold=self.collisionThreshold, actionCost=self.options['Reward']['actionCost'], collisionPenalty=self.options['Reward']['collisionPenalty'], raycastCost=self.options['Reward']['raycastCost']) self.setSARSA() # create the things needed for simulation om.removeFromObjectModel(om.findObjectByName('world')) self.world = World.buildCircleWorld(percentObsDensity=self.options['World']['percentObsDensity'], circleRadius=self.options['World']['circleRadius'], nonRandom=self.options['World']['nonRandomWorld'], scale=self.options['World']['scale'], randomSeed=self.options['World']['randomSeed'], obstaclesInnerFraction=self.options['World']['obstaclesInnerFraction']) om.removeFromObjectModel(om.findObjectByName('robot')) self.robot, self.frame = World.buildRobot() self.locator = World.buildCellLocator(self.world.visObj.polyData) self.Sensor.setLocator(self.locator) self.frame = self.robot.getChildFrame() self.frame.setProperty('Scale', 3) self.frame.setProperty('Edit', True) self.frame.widget.HandleRotationEnabledOff() rep = self.frame.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) self.supervisedTrainingTime = self.options['runTime']['supervisedTrainingTime'] self.learningRandomTime = self.options['runTime']['learningRandomTime'] self.learningEvalTime = self.options['runTime']['learningEvalTime'] self.defaultControllerTime = self.options['runTime']['defaultControllerTime'] self.Car.setFrame(self.frame) print "Finished initialization"
def run(self): polyData = self.getPointCloud() annotation = self.getAnnotationInput() annotationPoint = annotation.annotationPoints[0] mesh = segmentation.fitShelfItem( polyData, annotationPoint, clusterTolerance=self.properties.getProperty('Cluster tolerance')) annotation.setProperty('Visible', False) om.removeFromObjectModel(om.findObjectByName('shelf item')) obj = vis.showPolyData(mesh, 'shelf item', color=[0, 1, 0]) t = transformUtils.frameFromPositionAndRPY( segmentation.computeCentroid(mesh), [0, 0, 0]) segmentation.makeMovable(obj, t)
def onSegmentTable(self, p1, p2): print p1 print p2 self.picker.stop() om.removeFromObjectModel(self.picker.annotationObj) self.picker = None om.removeFromObjectModel(om.findObjectByName('table demo')) self.tableData = segmentation.segmentTableEdge(self.getInputPointCloud(), p1, p2) self.tableObj = vis.showPolyData(self.tableData.mesh, 'table', parent='table demo', color=[0,1,0]) self.tableFrame = vis.showFrame(self.tableData.frame, 'table frame', parent=self.tableObj, scale=0.2) self.tableBox = vis.showPolyData(self.tableData.box, 'table box', parent=self.tableObj, color=[0,1,0], visible=False) self.tableObj.actor.SetUserTransform(self.tableFrame.transform) self.tableBox.actor.SetUserTransform(self.tableFrame.transform) if self.useCollisionEnvironment: self.addCollisionObject(self.tableObj)
def initialize(self): self.dt = self.options['dt'] self.controllerTypeOrder = ['default'] self.setDefaultOptions() self.Sensor = SensorObj(rayLength=self.options['Sensor']['rayLength'], numRays=self.options['Sensor']['numRays']) self.Controller = ControllerObj(self.Sensor) self.Quad = QuadPlant(controller=self.Controller, velocity=self.options['Quad']['velocity']) # create the things needed for simulation om.removeFromObjectModel(om.findObjectByName('world')) self.world = World.buildWarehouseWorld( percentObsDensity=self.options['World']['percentObsDensity'], circleRadius=self.options['World']['circleRadius'], nonRandom=self.options['World']['nonRandomWorld'], scale=self.options['World']['scale'], randomSeed=self.options['World']['randomSeed'], obstaclesInnerFraction=self.options['World'] ['obstaclesInnerFraction']) om.removeFromObjectModel(om.findObjectByName('robot')) self.robot, self.frame = World.buildRobot() self.locator = World.buildCellLocator(self.world.visObj.polyData) self.Sensor.setLocator(self.locator) self.frame = self.robot.getChildFrame() self.frame.setProperty('Scale', 3) self.frame.setProperty('Edit', True) self.frame.widget.HandleRotationEnabledOff() rep = self.frame.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) self.defaultControllerTime = self.options['runTime'][ 'defaultControllerTime'] self.Quad.setFrame(self.frame) print "Finished initialization"
def spawnTargetAffordance(self): for obj in om.getObjects(): if obj.getProperty('Name') == 'target': om.removeFromObjectModel(obj) targetFrame = transformUtils.frameFromPositionAndRPY([0.6, 0.2, 0.6], [180, 0, 90]) folder = om.getOrCreateContainer('affordances') z = DebugData() z.addLine(np.array([0, 0, 0]), np.array([-self.boxLength, 0, 0]), radius=0.02) # main bar z.addLine(np.array([-self.boxLength, 0, 0]), np.array([-self.boxLength, 0, self.boxLength]), radius=0.02) # main bar z.addLine(np.array([-self.boxLength, 0, self.boxLength]), np.array([0, 0, self.boxLength]), radius=0.02) # main bar z.addLine(np.array([0, 0, self.boxLength]), np.array([0, 0, 0]), radius=0.02) # main bar targetMesh = z.getPolyData() self.targetAffordance = vis.showPolyData( targetMesh, 'target', color=[0.0, 1.0, 0.0], cls=affordanceitems.FrameAffordanceItem, parent=folder, alpha=0.3) self.targetAffordance.actor.SetUserTransform(targetFrame) self.targetFrame = vis.showFrame(targetFrame, 'target frame', parent=self.targetAffordance, visible=False, scale=0.2) self.targetFrame = self.targetFrame.transform params = dict(length=self.boxLength, otdf_type='target', friendly_name='target') self.targetAffordance.setAffordanceParams(params) self.targetAffordance.updateParamsFromActorTransform()
def drawPolytope(msg): _id = msg.id; if msg.remove: om.removeFromObjectModel(om.findObjectByName('IRIS polytopes')) return if msg.highlighted: color = [.8,.2,.2] else: color = [.8,.8,.8] name = 'polytope {:d}'.format(_id) obj = om.findObjectByName(name) if obj: om.removeFromObjectModel(obj) V = np.array(msg.V) polyData = vnp.numpyToPolyData(V.T.copy()) vol_mesh = filterUtils.computeDelaunay3D(polyData) debug = DebugData() debug.addPolyData(vol_mesh) vis.showPolyData(debug.getPolyData(), name, color=color, alpha=0.4, parent='IRIS polytopes')
def updateGraspEndPose(self, enableSearch=True): self.computeInitialState() self.findAffordance() if enableSearch: om.removeFromObjectModel(self.findAffordanceChild('desired grasp frame')) om.removeFromObjectModel(self.findAffordanceChild('desired grasp hand')) if not self.findAffordanceChild('desired grasp frame'): self.computeGraspFrameSamples() self.computeGraspFrame() self.computeGraspEndPoseSearch() self.computeGraspEndPoseFrames() else: self.computeGraspEndPose() self.computePreGraspFrame() self.computePreGraspEndPose()
def drawPolytope(msg): _id = msg.id if msg.remove: om.removeFromObjectModel(om.findObjectByName("IRIS polytopes")) return if msg.highlighted: color = [0.8, 0.2, 0.2] else: color = [0.8, 0.8, 0.8] name = "polytope {:d}".format(_id) obj = om.findObjectByName(name) if obj: om.removeFromObjectModel(obj) V = np.array(msg.V) polyData = vnp.numpyToPolyData(V.T.copy()) vol_mesh = filterUtils.computeDelaunay3D(polyData) debug = DebugData() debug.addPolyData(vol_mesh) vis.showPolyData(debug.getPolyData(), name, color=color, alpha=0.4, parent="IRIS polytopes")
def onShowMapButton(self): # reloads the map each time - in case its changed on disk: #if (self.octomap_cloud is None): filename = ddapp.getDRCBaseDir() + "/software/build/data/octomap.pcd" self.octomap_cloud = io.readPolyData(filename) # c++ object called vtkPolyData assert (self.octomap_cloud.GetNumberOfPoints() !=0 ) # clip point cloud to height - doesnt work very well yet. need to know robot's height #self.octomap_cloud = segmentation.cropToLineSegment(self.octomap_cloud, np.array([0,0,-10]), np.array([0,0,3]) ) # access to z values #points= vnp.getNumpyFromVtk(self.octomap_cloud, 'Points') #zvalues = points[:,2] # remove previous map: folder = om.getOrCreateContainer("segmentation") om.removeFromObjectModel(folder) vis.showPolyData(self.octomap_cloud, 'prior map', alpha=1.0, color=[0,0,0.4])
def onSegmentBin(self, p1, p2): print p1 print p2 self.picker.stop() om.removeFromObjectModel(self.picker.annotationObj) self.picker = None om.removeFromObjectModel(om.findObjectByName('bin frame')) binEdge = p2 - p1 zaxis = [0.0, 0.0, 1.0] xaxis = np.cross(binEdge, zaxis) xaxis /= np.linalg.norm(xaxis) yaxis = np.cross(zaxis, xaxis) t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis) t.PostMultiply() t.Translate(p1) self.binFrame = vis.showFrame(t, 'bin frame', parent=None, scale=0.2)