def processSnippet(): obj = om.getOrCreateContainer('continuous') om.getOrCreateContainer('cont debug', obj) if (continuouswalkingDemo.processContinuousStereo): polyData = ioUtils.readPolyData(os.path.join(dataDir, 'terrain/block_snippet_stereo.vtp')) polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01) else: polyData = ioUtils.readPolyData(os.path.join(dataDir, 'terrain/block_snippet.vtp')) vis.updatePolyData( polyData, 'walking snapshot trimmed', parent='continuous') standingFootName = 'l_foot' standingFootFrame = robotStateModel.getLinkFrame(standingFootName) vis.updateFrame(standingFootFrame, standingFootName, parent='continuous', visible=False) # Step 2: find all the surfaces in front of the robot (about 0.75sec) clusters = segmentation.findHorizontalSurfaces(polyData) if (clusters is None): print "No cluster found, stop walking now!" return # Step 3: find the corners of the minimum bounding rectangles blocks,groundPlane = cwdemo.extractBlocksFromSurfaces(clusters, standingFootFrame) footsteps = cwdemo.placeStepsOnBlocks(blocks, groundPlane, standingFootName, standingFootFrame) cwdemo.drawFittedSteps(footsteps)
def processSingleBlock(robotStateModel, whichFile=0): if (whichFile == 0): polyData = ioUtils.readPolyData(os.path.join(dataDir, 'tabletop/table_top_45.vtp')) else: polyData = ioUtils.readPolyData(os.path.join(dataDir, 'terrain/block_top.vtp')) standingFootName = 'l_foot' standingFootFrame = robotStateModel.getLinkFrame(standingFootName) cwdemo.findMinimumBoundingRectangle(polyData, standingFootFrame)
def buildRobot(x=0, y=0): #print "building robot" polyData = ioUtils.readPolyData('celica.obj') scale = 0.04 t = vtk.vtkTransform() t.RotateZ(90) t.Scale(scale, scale, scale) polyData = filterUtils.transformPolyData(polyData, t) #d = DebugData() #d.addCone((x,y,0), (1,0,0), height=0.2, radius=0.1) #polyData = d.getPolyData() obj = vis.showPolyData(polyData, 'robot') robotFrame = vis.addChildFrame(obj) # this makes sure we can only rotate it in two dimensions robotFrame.setProperty('Scale', 3) robotFrame.setProperty('Edit', True) robotFrame.widget.HandleRotationEnabledOff() rep = robotFrame.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) return obj, robotFrame
def onOpenGeometry(filename): polyData = io.readPolyData(filename) if not polyData or not polyData.GetNumberOfPoints(): app.showErrorMessage('Failed to read any data from file: %s' % filename, title='Reader error') return vis.showPolyData(polyData, os.path.basename(filename), parent='files')
def loadMap(self, filename, transform=None): print filename pd = io.readPolyData(filename) if transform is not None: pd = filterUtils.transformPolyData(pd, self.cameraToLocalInit) pdi = vis.updatePolyData(pd, 'map') try: pdi.setProperty('Color By', 'rgb_colors') except Exception, e: print "Could not set color to RGB - not an element" #raise e
def __init__(self): pose = transformUtils.poseFromTransform(vtk.vtkTransform()) self.pointcloud = ioUtils.readPolyData(ddapp.getDRCBaseDir() + '/software/models/rehearsal_pointcloud.vtp') self.pointcloudPD = vis.showPolyData(self.pointcloud, 'coursemodel', parent=None) segmentation.makeMovable(self.pointcloudPD, transformUtils.transformFromPose(array([0, 0, 0]), array([ 1.0, 0. , 0. , 0.0]))) self.originFrame = self.pointcloudPD.getChildFrame() t = transformUtils.transformFromPose(array([-4.39364111, -0.51507392, -0.73125563]), array([ 0.93821625, 0. , 0. , -0.34604951])) self.valveWalkFrame = vis.updateFrame(t, 'ValveWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-3.31840048, 0.36408685, -0.67413123]), array([ 0.93449475, 0. , 0. , -0.35597691])) self.drillPreWalkFrame = vis.updateFrame(t, 'DrillPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.24553758, -0.52990939, -0.73255338]), array([ 0.93697004, 0. , 0. , -0.34940972])) self.drillWalkFrame = vis.updateFrame(t, 'DrillWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.51306835, -0.92994004, -0.74173541 ]), array([-0.40456572, 0. , 0. , 0.91450893])) self.drillWallWalkFarthestSafeFrame = vis.updateFrame(t, 'DrillWallWalkFarthestSafe', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.5314524 , -0.27401861, -0.71302976]), array([ 0.98691519, 0. , 0. , -0.16124022])) self.drillWallWalkBackFrame = vis.updateFrame(t, 'DrillWallWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-1.16122318, 0.04723203, -0.67493468]), array([ 0.93163145, 0. , 0. , -0.36340451])) self.surprisePreWalkFrame = vis.updateFrame(t, 'SurprisePreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-0.5176186 , -1.00151554, -0.70650799]), array([ 0.84226497, 0. , 0. , -0.53906374])) self.surpriseWalkFrame = vis.updateFrame(t, 'SurpriseWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-0.69100097, -0.43713269, -0.68495922]), array([ 0.98625075, 0. , 0. , -0.16525575])) self.surpriseWalkBackFrame = vis.updateFrame(t, 'SurpriseWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([ 0.65827322, -0.08028796, -0.77370834]), array([ 0.94399977, 0. , 0. , -0.3299461 ])) self.terrainPreWalkFrame = vis.updateFrame(t, 'TerrainPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([ 5.47126425, -0.09790393, -0.70504679]), array([ 1., 0., 0., 0.])) self.stairsPreWalkFrame = vis.updateFrame(t, 'StairsPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) self.frameSync = vis.FrameSync() self.frameSync.addFrame(self.originFrame) self.frameSync.addFrame(self.pointcloudPD.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.valveWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkFarthestSafeFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surprisePreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.terrainPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.stairsPreWalkFrame, ignoreIncoming=True)
def onShowMapButton(self): vis.updateFrame(self.cameraToLocalInit, 'initial cam' ) filename = os.path.expanduser('~/Kinect_Logs/Kintinuous.pcd') print filename pd = io.readPolyData(filename) pd = filterUtils.transformPolyData(pd, self.cameraToLocalInit ) #t = transformUtils.frameFromPositionAndRPY([0,0,0],[-90,0,-90]) #pd = filterUtils.transformPolyData(pd , t) pdi = vis.updatePolyData(pd,'map') pdi.setProperty('Color By', 'rgb_colors')
def loadFootMeshes(): meshes = [] for i in range(0,2): d = DebugData() for footMeshFile in _footMeshFiles[i]: d.addPolyData(ioUtils.readPolyData( footMeshFile , computeNormals=True)) t = vtk.vtkTransform() t.Scale(0.98, 0.98, 0.98) pd = filterUtils.transformPolyData(d.getPolyData(), t) meshes.append(pd) return meshes
def loadPolyDataMeshes(geom): filename = geom.string_data basename, ext = os.path.splitext(filename) if ext.lower() == '.wrl': filename = basename + '.obj' alternateFilename = basename + '.vtm' if USE_TEXTURE_MESHES and os.path.isfile(alternateFilename): polyDataList = Geometry.loadMultiBlockMeshes(alternateFilename) else: polyDataList = [ioUtils.readPolyData(filename)] return polyDataList
def buildRobot(x=0,y=0): #print "building robot" polyData = ioUtils.readPolyData('celica.obj') scale = 0.04 t = vtk.vtkTransform() t.RotateZ(90) t.Scale(scale, scale, scale) polyData = filterUtils.transformPolyData(polyData, t) #d = DebugData() #d.addCone((x,y,0), (1,0,0), height=0.2, radius=0.1) #polyData = d.getPolyData() obj = vis.showPolyData(polyData, 'robot') robotFrame = vis.addChildFrame(obj) return obj, robotFrame
def buildRobot(x=0, y=0): #print "building robot" polyData = ioUtils.readPolyData('crazyflie.obj') scale = 0.01 t = vtk.vtkTransform() t.RotateX(90) t.Scale(scale, scale, scale) polyData = filterUtils.transformPolyData(polyData, t) #d = DebugData() #d.addCone((x,y,0), (1,0,0), height=0.2, radius=0.1) #polyData = d.getPolyData() obj = vis.showPolyData(polyData, 'robot') robotFrame = vis.addChildFrame(obj) return obj, robotFrame
def onShowMapButton(self): # reloads the map each time - in case its changed on disk: #if (self.octomap_cloud is None): filename = ddapp.getDRCBaseDir() + "/software/build/data/octomap.pcd" self.octomap_cloud = io.readPolyData(filename) # c++ object called vtkPolyData assert (self.octomap_cloud.GetNumberOfPoints() !=0 ) # clip point cloud to height - doesnt work very well yet. need to know robot's height #self.octomap_cloud = segmentation.cropToLineSegment(self.octomap_cloud, np.array([0,0,-10]), np.array([0,0,3]) ) # access to z values #points= vnp.getNumpyFromVtk(self.octomap_cloud, 'Points') #zvalues = points[:,2] # remove previous map: folder = om.getOrCreateContainer("segmentation") om.removeFromObjectModel(folder) vis.showPolyData(self.octomap_cloud, 'prior map', alpha=1.0, color=[0,0,0.4])
def updateGeometryFromProperties(self): filename = self.getProperty('Filename') if not filename: polyData = vtk.vtkPolyData() else: polyData = self.getMeshManager().get(filename) if not polyData: if not os.path.isabs(filename): filename = os.path.join(ddapp.getDRCBaseDir(), filename) if os.path.isfile(filename): polyData = ioUtils.readPolyData(filename) else: # use axes as a placeholder mesh d = DebugData() d.addFrame(vtk.vtkTransform(), scale=0.1, tubeRadius=0.005) polyData = d.getPolyData() self.setPolyData(polyData)
from ddapp import roboturdf app = ConsoleApp() # create a view view = app.createView() segmentation._defaultSegmentationView = view robotStateModel, robotStateJointController = roboturdf.loadRobotModel('robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) # Move robot to near to table: robotStateJointController.q [5] = math.radians(120) robotStateJointController.q[0] = -1.5 robotStateJointController.q[1] = 2 robotStateJointController.q[2] = 0.95 robotStateJointController.push() # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData(os.path.join(dataDir, 'tabletop/table-and-bin-scene.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot') #segmentation.segmentTableScene(polyData, [-1.58661389, 2.91242337, 0.79958105] ) segmentation.segmentTableSceneClusters(polyData, [-1.58661389, 2.91242337, 0.79958105], clusterInXY=True ) if app.getTestingInteractiveEnabled(): view.show() app.showObjectModel() app.start()
robotStateJointController.q[5] = -0.63677# math.radians(120) robotStateJointController.q[0] = 0.728 robotStateJointController.q[1] = -0.7596 robotStateJointController.q[2] = 0.79788 robotStateJointController.q[33] = 0.5 # head down robotStateJointController.push() ''' groundHeight = 0.0 viewFrame = segmentation.transformUtils.frameFromPositionAndRPY([1, -1, groundHeight + 1.5], [0, 0, -35]) segmentationroutines.SegmentationContext.initWithUser(groundHeight, viewFrame) # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData(os.path.join(dataDir, 'tabletop/table-sparse-stereo.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot original', colorByName='rgb_colors') polyData = segmentationroutines.sparsifyStereoCloud( polyData ) vis.showPolyData(polyData, 'pointcloud snapshot') # Use only returns near the robot: polyData = segmentation.addCoordArraysToPolyData(polyData) polyData = segmentation.thresholdPoints(polyData, 'distance_along_view_x', [0, 1.3]) segmentation.segmentTableThenFindDrills(polyData, [1.2864902, -0.93351376, 1.10208917]) if app.getTestingInteractiveEnabled(): v = applogic.getCurrentView() v.camera().SetPosition([3,3,3]) v.camera().SetFocalPoint([0,0,0])
def loadTableTopPointCloud(): dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData( os.path.join(dataDir, 'tabletop/table-and-door-scene.vtp')) return vis.showPolyData(polyData, 'pointcloud snapshot')
robotStateModel, robotStateJointController = roboturdf.loadRobotModel( 'robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) # Move robot to near to table: robotStateJointController.q[5] = math.radians(120) robotStateJointController.q[0] = -1.5 robotStateJointController.q[1] = 2 robotStateJointController.q[2] = 0.95 robotStateJointController.push() # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData( os.path.join(dataDir, 'tabletop/table-and-bin-scene.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot') #segmentation.segmentTableScene(polyData, [-1.58661389, 2.91242337, 0.79958105] ) segmentation.segmentTableSceneClusters(polyData, [-1.58661389, 2.91242337, 0.79958105], clusterInXY=True) if app.getTestingInteractiveEnabled(): view.show() app.showObjectModel() app.start()
def loadTableTopPointCloud(): dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData(os.path.join(dataDir, 'tabletop/table-and-door-scene.vtp')) return vis.showPolyData(polyData, 'pointcloud snapshot')
robotStateModel, robotStateJointController = roboturdf.loadRobotModel('robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) # Move robot to near to valve wall: # 0degrees #robotStateJointController.q[5] = math.radians(120) #robotStateJointController.q[0] = 0 #robotStateJointController.q[1] = 0 # 30,60,90 robotStateJointController.q [5] = math.radians(-120) robotStateJointController.q [0] = 1 robotStateJointController.q [1] = 1 robotStateJointController.q[2] = 0.85 robotStateJointController.push() # load poly data dataDir = app.getTestingDataDirectory() #polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene.vtp')) #polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene-30.vtp')) #polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene-60.vtp')) polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene-90.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot') segmentation.segmentValveWallAuto(.2, mode='both', removeGroundMethod=segmentation.removeGround ) if app.getTestingInteractiveEnabled(): view.show() app.showObjectModel() app.start()
# create a view view = app.createView() segmentation._defaultSegmentationView = view robotStateModel, robotStateJointController = roboturdf.loadRobotModel( 'robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData( os.path.join(dataDir, 'amazon-pod/01-small-changes.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot', visible=False) # remove ground and clip to just the pod: groundPoints, polyData = segmentation.removeGround(polyData) vis.showPolyData(polyData, 'scene', visible=False) polyData = segmentation.addCoordArraysToPolyData(polyData) polyData = segmentation.thresholdPoints(polyData, 'y', [1, 1.6]) polyData = segmentation.thresholdPoints(polyData, 'x', [-1.2, 0.5]) vis.showPolyData(polyData, 'clipped', visible=False) # remove outliers polyData = segmentation.labelOutliers(polyData, searchRadius=0.03, neighborsInSearchRadius=40) polyData = segmentation.thresholdPoints(polyData, 'is_outlier', [0, 0])
for link in links: robotHighlighter.highlightLink(link, [1,0.4,0.0]) elif msg.plan_type == lcmdrc.plan_status_t.BRACING: for link in links: robotHighlighter.highlightLink(link, [1, 0, 0]) else: for link in links: robotHighlighter.dehighlightLink(link) fallDetectorSub = lcmUtils.addSubscriber("PLAN_EXECUTION_STATUS", lcmdrc.plan_status_t, onPlanStatus) fallDetectorSub.setSpeedLimit(10) if useDataFiles: for filename in drcargs.args().data_files: polyData = io.readPolyData(filename) if polyData: vis.showPolyData(polyData, os.path.basename(filename)) if useImageWidget: imageWidget = cameraview.ImageWidget(cameraview.imageManager, 'CAMERA_LEFT', view) #imageWidget = cameraview.ImageWidget(cameraview.imageManager, 'KINECT_RGB', view) if useCameraFrustumVisualizer and cameraview.CameraFrustumVisualizer.isCompatibleWithConfig(): cameraFrustumVisualizer = cameraview.CameraFrustumVisualizer(robotStateModel, cameraview.imageManager, 'CAMERA_LEFT') class ImageOverlayManager(object): def __init__(self): self.viewName = 'CAMERA_LEFT'
vis.updatePolyData(polyData, 'rest', parent='segmentation', visible=False) return polyData app = ConsoleApp() # create a view view = app.createView() segmentation._defaultSegmentationView = view robotStateModel, robotStateJointController = roboturdf.loadRobotModel('robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData(os.path.join(dataDir, 'amazon-pod/01-small-changes.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot', visible=False) # remove ground and clip to just the pod: groundPoints, polyData = segmentation.removeGround(polyData) vis.showPolyData(polyData, 'scene', visible=False) polyData = segmentation.addCoordArraysToPolyData(polyData) polyData = segmentation.thresholdPoints(polyData, 'y', [1, 1.6]) polyData = segmentation.thresholdPoints(polyData, 'x', [-1.2, 0.5]) vis.showPolyData(polyData, 'clipped', visible=False) # remove outliers polyData = segmentation.labelOutliers(polyData, searchRadius=0.03, neighborsInSearchRadius=40) polyData = segmentation.thresholdPoints(polyData, 'is_outlier', [0, 0]) vis.showPolyData(polyData, 'inliers', visible=False)
from ddapp import roboturdf app = ConsoleApp() # create a view view = app.createView() segmentation._defaultSegmentationView = view segmentation.initAffordanceManager(view) robotStateModel, robotStateJointController = roboturdf.loadRobotModel('robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-sparse-stereo.pcd')) vis.showPolyData(polyData, 'pointcloud snapshot original', colorByName='rgb_colors') polyData = segmentationroutines.sparsifyStereoCloud( polyData ) vis.showPolyData(polyData, 'pointcloud snapshot') # fit valve and lever segmentation.segmentValveWallAuto(.2, mode='valve', removeGroundMethod=segmentation.removeGroundSimple ) if app.getTestingInteractiveEnabled(): v = applogic.getCurrentView() v.camera().SetPosition([3,3,3]) v.camera().SetFocalPoint([0,0,0]) view.show() app.showObjectModel()
robotStateJointController.q[0] = 0.728 robotStateJointController.q[1] = -0.7596 robotStateJointController.q[2] = 0.79788 robotStateJointController.q[33] = 0.5 # head down robotStateJointController.push() ''' groundHeight = 0.0 viewFrame = segmentation.transformUtils.frameFromPositionAndRPY( [1, -1, groundHeight + 1.5], [0, 0, -35]) segmentationroutines.SegmentationContext.initWithUser(groundHeight, viewFrame) # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData( os.path.join(dataDir, 'tabletop/table-sparse-stereo.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot original', colorByName='rgb_colors') polyData = segmentationroutines.sparsifyStereoCloud(polyData) vis.showPolyData(polyData, 'pointcloud snapshot') # Use only returns near the robot: polyData = segmentation.addCoordArraysToPolyData(polyData) polyData = segmentation.thresholdPoints(polyData, 'distance_along_view_x', [0, 1.3]) segmentation.segmentTableThenFindDrills(polyData, [1.2864902, -0.93351376, 1.10208917]) if app.getTestingInteractiveEnabled():
# create a view view = app.createView() segmentation._defaultSegmentationView = view robotStateModel, robotStateJointController = roboturdf.loadRobotModel( 'robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData( os.path.join(dataDir, 'valve/valve-sparse-stereo.pcd')) vis.showPolyData(polyData, 'pointcloud snapshot original', colorByName='rgb_colors') polyData = segmentationroutines.sparsifyStereoCloud(polyData) vis.showPolyData(polyData, 'pointcloud snapshot') # fit valve and lever segmentation.segmentValveWallAuto( .2, mode='valve', removeGroundMethod=segmentation.removeGroundSimple) if app.getTestingInteractiveEnabled(): v = applogic.getCurrentView() v.camera().SetPosition([3, 3, 3]) v.camera().SetFocalPoint([0, 0, 0])