def processSingleBlock(robotStateModel, whichFile=0): if whichFile == 0: polyData = ioUtils.readPolyData(os.path.join(dataDir, "terrain/terrain_simple_ihmc.vtp")) vis.updatePolyData(polyData, "terrain_simple_ihmc.vtp", parent="continuous") else: polyData = ioUtils.readPolyData(os.path.join(dataDir, "terrain/terrain_flagstones_ihmc.vtp")) cwdemo.chosenTerrain = "stairs" cwdemo.supportContact = lcmdrc.footstep_params_t.SUPPORT_GROUPS_MIDFOOT_TOE vis.updatePolyData(polyData, "terrain_stairs_ihmc.vtp", parent="continuous") if drcargs.args().directorConfigFile.find("atlas") != -1: standingFootName = "l_foot" else: standingFootName = "leftFoot" standingFootFrame = robotStateModel.getLinkFrame(standingFootName) vis.updateFrame(standingFootFrame, standingFootName, parent="continuous", visible=False) # Step 1: filter the data down to a box in front of the robot: polyData = cwdemo.getRecedingTerrainRegion(polyData, footstepsDriver.getFeetMidPoint(robotStateModel)) # Step 2: find all the surfaces in front of the robot (about 0.75sec) clusters = segmentation.findHorizontalSurfaces(polyData) if clusters is None: print "No cluster found, stop walking now!" return # Step 3: find the corners of the minimum bounding rectangles blocks, match_idx, groundPlane = cwdemo.extractBlocksFromSurfaces(clusters, standingFootFrame) footsteps = cwdemo.placeStepsOnBlocks(blocks, groundPlane, standingFootName, standingFootFrame) cwdemo.drawFittedSteps(footsteps)
def processSnippet(): obj = om.getOrCreateContainer("continuous") om.getOrCreateContainer("cont debug", obj) if continuouswalkingDemo.processContinuousStereo: polyData = ioUtils.readPolyData(os.path.join(dataDir, "terrain/block_snippet_stereo.vtp")) polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01) else: polyData = ioUtils.readPolyData(os.path.join(dataDir, "terrain/block_snippet.vtp")) vis.updatePolyData(polyData, "walking snapshot trimmed", parent="continuous") standingFootName = "l_foot" standingFootFrame = robotStateModel.getLinkFrame(standingFootName) vis.updateFrame(standingFootFrame, standingFootName, parent="continuous", visible=False) # Step 2: find all the surfaces in front of the robot (about 0.75sec) clusters = segmentation.findHorizontalSurfaces(polyData) if clusters is None: print "No cluster found, stop walking now!" return # Step 3: find the corners of the minimum bounding rectangles blocks, match_idx, groundPlane = cwdemo.extractBlocksFromSurfaces(clusters, standingFootFrame) footsteps = cwdemo.placeStepsOnBlocks(blocks, groundPlane, standingFootName, standingFootFrame) cwdemo.drawFittedSteps(footsteps)
def processSingleBlock(robotStateModel, whichFile=0): if (whichFile == 0): polyData = ioUtils.readPolyData(os.path.join(dataDir, 'tabletop/table_top_45.vtp')) else: polyData = ioUtils.readPolyData(os.path.join(dataDir, 'terrain/block_top.vtp')) standingFootName = cwdemo.ikPlanner.leftFootLink standingFootFrame = robotStateModel.getLinkFrame(standingFootName) segmentation.findMinimumBoundingRectangle(polyData, standingFootFrame)
def processSingleBlock(robotStateModel, whichFile=0): if whichFile == 0: polyData = ioUtils.readPolyData(os.path.join(dataDir, "tabletop/table_top_45.vtp")) else: polyData = ioUtils.readPolyData(os.path.join(dataDir, "terrain/block_top.vtp")) standingFootName = "l_foot" standingFootFrame = robotStateModel.getLinkFrame(standingFootName) cwdemo.findMinimumBoundingRectangle(polyData, standingFootFrame)
def updateGeometryFromProperties(self): filename = self.getProperty('Filename') scale = self.getProperty('Scale') if not filename: polyData = vtk.vtkPolyData() else: polyData = self.getMeshManager().get(filename) if not polyData: if not os.path.isabs(filename): filename = os.path.join(director.getDRCBaseDir(), filename) if os.path.isfile(filename): polyData = ioUtils.readPolyData(filename) if not scale == [1, 1, 1]: transform = vtk.vtkTransform() transform.Scale(scale) transformFilter = vtk.vtkTransformPolyDataFilter() transformFilter.SetInput(polyData) transformFilter.SetTransform(transform) transformFilter.Update() polyData = transformFilter.GetOutput() else: # use axes as a placeholder mesh d = DebugData() d.addFrame(vtk.vtkTransform(), scale=0.1, tubeRadius=0.005) polyData = d.getPolyData() self.setPolyData(polyData)
def loadPolyDataMeshes(geom): filename = Geometry.resolvePackageFilename(geom.string_data) basename, ext = os.path.splitext(filename) preferredExtensions = [".vtm", ".vtp", ".obj"] for x in preferredExtensions: if os.path.isfile(basename + x): filename = basename + x break if not os.path.isfile(filename): print "warning, cannot find file:", filename return [] if filename.endswith("vtm"): polyDataList = ioUtils.readMultiBlock(filename) else: polyDataList = [ioUtils.readPolyData(filename)] if USE_TEXTURE_MESHES: for polyData in polyDataList: Geometry.loadTextureForMesh(polyData, filename) return polyDataList
def _openGeometry(self, filename): polyData = ioUtils.readPolyData(filename) if not polyData or not polyData.GetNumberOfPoints(): self._showErrorMessage('Failed to read any data from file: %s' % filename, title='Reader error') return vis.showPolyData(polyData, os.path.basename(filename), parent='files')
def loadMap(self, filename, transform=None): print filename pd = io.readPolyData(filename) if transform is not None: pd = filterUtils.transformPolyData(pd, self.cameraToLocalInit) pdi = vis.updatePolyData(pd, 'map') try: pdi.setProperty('Color By', 'rgb_colors') except Exception, e: print "Could not set color to RGB - not an element" #raise e
def __init__(self): pose = transformUtils.poseFromTransform(vtk.vtkTransform()) self.pointcloud = ioUtils.readPolyData(director.getDRCBaseDir() + '/software/models/rehearsal_pointcloud.vtp') self.pointcloudPD = vis.showPolyData(self.pointcloud, 'coursemodel', parent=None) segmentation.makeMovable(self.pointcloudPD, transformUtils.transformFromPose(array([0, 0, 0]), array([ 1.0, 0. , 0. , 0.0]))) self.originFrame = self.pointcloudPD.getChildFrame() t = transformUtils.transformFromPose(array([-4.39364111, -0.51507392, -0.73125563]), array([ 0.93821625, 0. , 0. , -0.34604951])) self.valveWalkFrame = vis.updateFrame(t, 'ValveWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-3.31840048, 0.36408685, -0.67413123]), array([ 0.93449475, 0. , 0. , -0.35597691])) self.drillPreWalkFrame = vis.updateFrame(t, 'DrillPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.24553758, -0.52990939, -0.73255338]), array([ 0.93697004, 0. , 0. , -0.34940972])) self.drillWalkFrame = vis.updateFrame(t, 'DrillWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.51306835, -0.92994004, -0.74173541 ]), array([-0.40456572, 0. , 0. , 0.91450893])) self.drillWallWalkFarthestSafeFrame = vis.updateFrame(t, 'DrillWallWalkFarthestSafe', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.5314524 , -0.27401861, -0.71302976]), array([ 0.98691519, 0. , 0. , -0.16124022])) self.drillWallWalkBackFrame = vis.updateFrame(t, 'DrillWallWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-1.16122318, 0.04723203, -0.67493468]), array([ 0.93163145, 0. , 0. , -0.36340451])) self.surprisePreWalkFrame = vis.updateFrame(t, 'SurprisePreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-0.5176186 , -1.00151554, -0.70650799]), array([ 0.84226497, 0. , 0. , -0.53906374])) self.surpriseWalkFrame = vis.updateFrame(t, 'SurpriseWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-0.69100097, -0.43713269, -0.68495922]), array([ 0.98625075, 0. , 0. , -0.16525575])) self.surpriseWalkBackFrame = vis.updateFrame(t, 'SurpriseWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([ 0.65827322, -0.08028796, -0.77370834]), array([ 0.94399977, 0. , 0. , -0.3299461 ])) self.terrainPreWalkFrame = vis.updateFrame(t, 'TerrainPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([ 5.47126425, -0.09790393, -0.70504679]), array([ 1., 0., 0., 0.])) self.stairsPreWalkFrame = vis.updateFrame(t, 'StairsPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) self.frameSync = vis.FrameSync() self.frameSync.addFrame(self.originFrame) self.frameSync.addFrame(self.pointcloudPD.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.valveWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkFarthestSafeFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surprisePreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.terrainPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.stairsPreWalkFrame, ignoreIncoming=True)
def _openGeometry(self, filename): if filename.lower().endswith("wrl"): self.onOpenVrml(filename) return polyData = ioUtils.readPolyData(filename) if not polyData or not polyData.GetNumberOfPoints(): self._showErrorMessage("Failed to read any data from file: %s" % filename, title="Reader error") return vis.showPolyData(polyData, os.path.basename(filename), parent="files")
def loadFootMeshes(): meshes = [] for i in range(0,2): d = DebugData() for footMeshFile in _footMeshFiles[i]: d.addPolyData(ioUtils.readPolyData( footMeshFile , computeNormals=True)) t = vtk.vtkTransform() t.Scale(0.98, 0.98, 0.98) pd = filterUtils.transformPolyData(d.getPolyData(), t) meshes.append(pd) return meshes
def onOpenGeometry(filename): if filename.lower().endswith('wrl'): onOpenVrml(filename) return polyData = io.readPolyData(filename) if not polyData or not polyData.GetNumberOfPoints(): app.showErrorMessage('Failed to read any data from file: %s' % filename, title='Reader error') return vis.showPolyData(polyData, os.path.basename(filename), parent='files')
def openGeometry(self, filename): if filename.lower().endswith('wrl'): self.onOpenVrml(filename) return polyData = ioUtils.readPolyData(filename) if not polyData or not polyData.GetNumberOfPoints(): self.app.showErrorMessage('Failed to read any data from file: %s' % filename, title='Reader error') return obj = vis.showPolyData(polyData, os.path.basename(filename), parent=self.getRootFolder()) vis.addChildFrame(obj)
def loadPolyDataMeshes(geom): filename = geom.string_data basename, ext = os.path.splitext(filename) if ext.lower() == '.wrl': filename = basename + '.obj' alternateFilename = basename + '.vtm' if USE_TEXTURE_MESHES and os.path.isfile(alternateFilename): polyDataList = Geometry.loadMultiBlockMeshes(alternateFilename) else: polyDataList = [ioUtils.readPolyData(filename)] return polyDataList
def buildRobot(x=0,y=0): #print "building robot" polyData = ioUtils.readPolyData('crazyflie.obj') scale = 0.005 t = vtk.vtkTransform() t.RotateX(90) t.Scale(scale, scale, scale) polyData = filterUtils.transformPolyData(polyData, t) #d = DebugData() #d.addCone((x,y,0), (1,0,0), height=0.2, radius=0.1) #polyData = d.getPolyData() obj = vis.showPolyData(polyData, 'robot') robotFrame = vis.addChildFrame(obj) return obj, robotFrame
def __init__(self, velocity=25.0, scale=0.15, exploration=0.5, model="A10.obj"): """Constructs a Robot. Args: velocity: Velocity of the robot in the forward direction. scale: Scale of the model. exploration: Exploration rate. model: Object model to use. """ self._target = (0, 0) self._exploration = exploration t = vtk.vtkTransform() t.Scale(scale, scale, scale) polydata = ioUtils.readPolyData(model) polydata = filterUtils.transformPolyData(polydata, t) super(Robot, self).__init__(velocity, polydata) self._ctrl = Controller()
def onShowMapButton(self): # reloads the map each time - in case its changed on disk: # if (self.octomap_cloud is None): filename = director.getDRCBaseDir() + "/software/build/data/octomap.pcd" self.octomap_cloud = io.readPolyData(filename) # c++ object called vtkPolyData assert self.octomap_cloud.GetNumberOfPoints() != 0 # clip point cloud to height - doesnt work very well yet. need to know robot's height # self.octomap_cloud = segmentation.cropToLineSegment(self.octomap_cloud, np.array([0,0,-10]), np.array([0,0,3]) ) # access to z values # points= vnp.getNumpyFromVtk(self.octomap_cloud, 'Points') # zvalues = points[:,2] # remove previous map: folder = om.getOrCreateContainer("segmentation") om.removeFromObjectModel(folder) vis.showPolyData(self.octomap_cloud, "prior map", alpha=1.0, color=[0, 0, 0.4])
def updateGeometryFromProperties(self): filename = self.getProperty('Filename') if not filename: polyData = vtk.vtkPolyData() else: polyData = self.getMeshManager().get(filename) if not polyData: if not os.path.isabs(filename): filename = os.path.join(director.getDRCBaseDir(), filename) if os.path.isfile(filename): polyData = ioUtils.readPolyData(filename) else: # use axes as a placeholder mesh d = DebugData() d.addFrame(vtk.vtkTransform(), scale=0.1, tubeRadius=0.005) polyData = d.getPolyData() self.setPolyData(polyData)
def createPolyDataFromFiles(geom): filename = Geometry.resolvePackageFilename(geom.string_data) basename, ext = os.path.splitext(filename) if filename in Geometry.MeshCache: return Geometry.MeshCache[filename] preferredExtensions = ['.vtm', '.vtp', '.obj'] for x in preferredExtensions: if os.path.isfile(basename + x): filename = basename + x break if not os.path.isfile(filename): print 'warning, cannot find file:', filename return [] visInfo = None if filename.endswith('vtm'): polyDataList = ioUtils.readMultiBlock(filename) else: if filename.endswith('obj'): polyDataList, actors = ioUtils.readObjMtl(filename) if actors: visInfo = Geometry.makeVisInfoFromActors(actors) else: polyDataList = [ioUtils.readPolyData(filename)] for polyData in polyDataList: Geometry.loadTextureForMesh(polyData, filename) result = (polyDataList, visInfo) Geometry.MeshCache[filename] = result return result
def getObjectPolyData(objectData, objectName): filename = getObjectMeshFilename(objectData, objectName) return ioUtils.readPolyData(filename)
robotStateJointController.q[0] = 0.728 robotStateJointController.q[1] = -0.7596 robotStateJointController.q[2] = 0.79788 robotStateJointController.q[33] = 0.5 # head down robotStateJointController.push() ''' groundHeight = 0.0 viewFrame = segmentation.transformUtils.frameFromPositionAndRPY( [1, -1, groundHeight + 1.5], [0, 0, -35]) segmentationroutines.SegmentationContext.initWithUser(groundHeight, viewFrame) # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData( os.path.join(dataDir, 'tabletop/table-sparse-stereo.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot original', colorByName='rgb_colors') polyData = segmentationroutines.sparsifyStereoCloud(polyData) vis.showPolyData(polyData, 'pointcloud snapshot') # Use only returns near the robot: polyData = segmentation.addCoordArraysToPolyData(polyData) polyData = segmentation.thresholdPoints(polyData, 'distance_along_view_x', [0, 1.3]) # I removed segmentTableThenFindDrills() because it used a depreciated function: #segmentation.segmentTableThenFindDrills(polyData, [1.2864902, -0.93351376, 1.10208917]) segmentation.segmentTableSceneClusters(polyData, [1.2864902, -0.93351376, 1.10208917],
input file with the ply extension replaced with vtp. Note, the ply file needs to be in ascii format and not binary. The vtk ply reader seems to crash on binary ply files. You can use meshlab to open a binary ply file and re-save it as an ascii ply file. ''' import os from director import ioUtils from director import vtkNumpy as vnp if __name__ == '__main__': filename = sys.argv[1] outputFilename = os.path.splitext(filename)[0] + '.vtp' polyData = ioUtils.readPolyData(filename) # TODO: # This should just be fixed in ioUtils.readPolyData, but for now # there is a workaround for an issue with the ply reader. # The output of the ply reader has points but not vertex cells, # so create a new polydata with vertex cells and copy the cells over. points = vnp.getNumpyFromVtk(polyData, 'Points') newPolyData = vnp.numpyToPolyData(points, createVertexCells=True) polyData.SetVerts(newPolyData.GetVerts()) print 'writing:', outputFilename ioUtils.writePolyData(polyData, outputFilename)
def load_poly_data(self): self.poly_data_raw = ioUtils.readPolyData(self.reconstruction_filename) self.poly_data = filterUtils.transformPolyData(self.poly_data_raw, self._reconstruction_to_world) self.crop_poly_data()
robotStateJointController.q[5] = -0.63677# math.radians(120) robotStateJointController.q[0] = 0.728 robotStateJointController.q[1] = -0.7596 robotStateJointController.q[2] = 0.79788 robotStateJointController.q[33] = 0.5 # head down robotStateJointController.push() ''' groundHeight = 0.0 viewFrame = segmentation.transformUtils.frameFromPositionAndRPY([1, -1, groundHeight + 1.5], [0, 0, -35]) segmentationroutines.SegmentationContext.initWithUser(groundHeight, viewFrame) # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData(os.path.join(dataDir, 'tabletop/table-sparse-stereo.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot original', colorByName='rgb_colors') polyData = segmentationroutines.sparsifyStereoCloud( polyData ) vis.showPolyData(polyData, 'pointcloud snapshot') # Use only returns near the robot: polyData = segmentation.addCoordArraysToPolyData(polyData) polyData = segmentation.thresholdPoints(polyData, 'distance_along_view_x', [0, 1.3]) segmentation.segmentTableThenFindDrills(polyData, [1.2864902, -0.93351376, 1.10208917]) if app.getTestingInteractiveEnabled(): v = applogic.getCurrentView() v.camera().SetPosition([3,3,3]) v.camera().SetFocalPoint([0,0,0])
def load_poly_data(self): self.poly_data_raw = ioUtils.readPolyData(self.reconstruction_filename) self.poly_data = self.poly_data_raw self.crop_poly_data()
from director import roboturdf app = ConsoleApp() # create a view view = app.createView() segmentation._defaultSegmentationView = view segmentation.initAffordanceManager(view) robotStateModel, robotStateJointController = roboturdf.loadRobotModel('robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-sparse-stereo.pcd')) vis.showPolyData(polyData, 'pointcloud snapshot original', colorByName='rgb_colors') polyData = segmentationroutines.sparsifyStereoCloud( polyData ) vis.showPolyData(polyData, 'pointcloud snapshot') # fit valve and lever segmentation.segmentValveWallAuto(.2, mode='valve', removeGroundMethod=segmentation.removeGroundSimple ) if app.getTestingInteractiveEnabled(): v = applogic.getCurrentView() v.camera().SetPosition([3,3,3]) v.camera().SetFocalPoint([0,0,0]) view.show() app.showObjectModel()
# Move robot to near to valve wall: # 0degrees #robotStateJointController.q[5] = math.radians(120) #robotStateJointController.q[0] = 0 #robotStateJointController.q[1] = 0 # 30,60,90 robotStateJointController.q[5] = math.radians(-120) robotStateJointController.q[0] = 1 robotStateJointController.q[1] = 1 robotStateJointController.q[2] = 0.85 robotStateJointController.push() # load poly data dataDir = app.getTestingDataDirectory() #polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene.vtp')) #polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene-30.vtp')) #polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene-60.vtp')) polyData = ioUtils.readPolyData( os.path.join(dataDir, 'valve/valve-lever-scene-90.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot') segmentation.segmentValveWallAuto(.2, mode='both', removeGroundMethod=segmentation.removeGround) if app.getTestingInteractiveEnabled(): view.show() app.showObjectModel() app.start()
def load_poly_data(self): self.poly_data_raw = ioUtils.readPolyData(self.reconstruction_filename) self.poly_data = filterUtils.transformPolyData( self.poly_data_raw, self._reconstruction_to_world) self.crop_poly_data()
from director import roboturdf app = ConsoleApp() # create a view view = app.createView() segmentation._defaultSegmentationView = view robotStateModel, robotStateJointController = roboturdf.loadRobotModel('robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) # Move robot to near to table: robotStateJointController.q [5] = math.radians(120) robotStateJointController.q[0] = -1.5 robotStateJointController.q[1] = 2 robotStateJointController.q[2] = 0.95 robotStateJointController.push() # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData(os.path.join(dataDir, 'tabletop/table-and-bin-scene.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot') #segmentation.segmentTableScene(polyData, [-1.58661389, 2.91242337, 0.79958105] ) segmentation.segmentTableSceneClusters(polyData, [-1.58661389, 2.91242337, 0.79958105], clusterInXY=True ) if app.getTestingInteractiveEnabled(): view.show() app.showObjectModel() app.start()
def __init__(self, velocity=10.0, scale=0.5, exploration=0.5, model="drone.obj", move_data=None, path=None, altitude=0., battery=4500.0, total_current=8000.0, return_base=True, base=None): """Constructs a Robot. Args: :param velocity: Velocity of the robot in the forward direction. :param scale: Scale of the model. :param exploration: Exploration rate. :param model: Object model to use. :param move_data: Visit points related to each cell. :param path: Path of drone. :param altitude: Altitude of drone. :param battery: Battery power of drone mAh. :param total_current: Mean current usage of drone. :param return_base: If robot needs to return base at the end of mission. :param base: Base location coordinates. """ if base is None: base = [0, 0] self._target = (0, 0) self._exploration = exploration """ If drone is working on task or not. """ self.working = True """ Power related parameters """ self._battery = battery self._battery_consumption = 0. self._battery_percentage = 0. self._current = total_current self._unit_power_consumption = self._current * 1 / 3600 self._safety_limit_for_battery = 0.32 * self._battery self._return_base = return_base self._base = base self.emergency_land = False self.emergency_base_return = False """ Render robot in simulator. """ t = vtk.vtkTransform() t.Scale(scale, scale, scale) t.RotateX(90.0) polydata = ioUtils.readPolyData(model) polydata = filterUtils.transformPolyData(polydata, t) """ Initialize data for motion """ self._state = np.array([0., 0., 0.]) self.altitude = altitude self._dt = 1.0 / 30.0 self._velocity = float(velocity) self._unit_shift = self.velocity * self._dt self._unit_angle_shift = self._dt * np.pi / 5 self._raw_polydata = polydata self._polydata = polydata self._sensors = [] self._moveData = move_data self._path = path self._target_cell_index = 0 self._target_cell = 6 self._target_move_index = 0 self._target_point = None self.cell_change = True self._last_target_point = None self._obstacle_ahead = False self.total_motion_distance = 0 self._target_acquisition_movements = { "turn_face": False, "get_closer": False, "rotate": False, "total_angular_movement": 0., "circle_completed": False, "rotation_angle": self._dt * np.pi / 15 } self.directions = { "left": np.pi, "top": np.pi / 2, "right": 0, "bottom": -np.pi / 2 } self.init_move_data()
def loadTableTopPointCloud(): dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData(os.path.join(dataDir, 'tabletop/table-and-door-scene.vtp')) return vis.showPolyData(polyData, 'pointcloud snapshot')
vis.updatePolyData(polyData, 'rest', parent='segmentation', visible=False) return polyData app = ConsoleApp() # create a view view = app.createView() segmentation._defaultSegmentationView = view robotStateModel, robotStateJointController = roboturdf.loadRobotModel('robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData(os.path.join(dataDir, 'amazon-pod/01-small-changes.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot', visible=False) # remove ground and clip to just the pod: groundPoints, polyData = segmentation.removeGround(polyData) vis.showPolyData(polyData, 'scene', visible=False) polyData = segmentation.addCoordArraysToPolyData(polyData) polyData = segmentation.thresholdPoints(polyData, 'y', [1, 1.6]) polyData = segmentation.thresholdPoints(polyData, 'x', [-1.2, 0.5]) vis.showPolyData(polyData, 'clipped', visible=False) # remove outliers polyData = segmentation.labelOutliers(polyData, searchRadius=0.03, neighborsInSearchRadius=40) polyData = segmentation.thresholdPoints(polyData, 'is_outlier', [0, 0]) vis.showPolyData(polyData, 'inliers', visible=False)
def __init__(self): pose = transformUtils.poseFromTransform(vtk.vtkTransform()) self.pointcloud = ioUtils.readPolyData( director.getDRCBaseDir() + '/software/models/rehearsal_pointcloud.vtp') self.pointcloudPD = vis.showPolyData(self.pointcloud, 'coursemodel', parent=None) segmentation.makeMovable( self.pointcloudPD, transformUtils.transformFromPose(array([0, 0, 0]), array([1.0, 0., 0., 0.0]))) self.originFrame = self.pointcloudPD.getChildFrame() t = transformUtils.transformFromPose( array([-4.39364111, -0.51507392, -0.73125563]), array([0.93821625, 0., 0., -0.34604951])) self.valveWalkFrame = vis.updateFrame(t, 'ValveWalk', scale=0.2, visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose( array([-3.31840048, 0.36408685, -0.67413123]), array([0.93449475, 0., 0., -0.35597691])) self.drillPreWalkFrame = vis.updateFrame(t, 'DrillPreWalk', scale=0.2, visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose( array([-2.24553758, -0.52990939, -0.73255338]), array([0.93697004, 0., 0., -0.34940972])) self.drillWalkFrame = vis.updateFrame(t, 'DrillWalk', scale=0.2, visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose( array([-2.51306835, -0.92994004, -0.74173541]), array([-0.40456572, 0., 0., 0.91450893])) self.drillWallWalkFarthestSafeFrame = vis.updateFrame( t, 'DrillWallWalkFarthestSafe', scale=0.2, visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose( array([-2.5314524, -0.27401861, -0.71302976]), array([0.98691519, 0., 0., -0.16124022])) self.drillWallWalkBackFrame = vis.updateFrame(t, 'DrillWallWalkBack', scale=0.2, visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose( array([-1.16122318, 0.04723203, -0.67493468]), array([0.93163145, 0., 0., -0.36340451])) self.surprisePreWalkFrame = vis.updateFrame(t, 'SurprisePreWalk', scale=0.2, visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose( array([-0.5176186, -1.00151554, -0.70650799]), array([0.84226497, 0., 0., -0.53906374])) self.surpriseWalkFrame = vis.updateFrame(t, 'SurpriseWalk', scale=0.2, visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose( array([-0.69100097, -0.43713269, -0.68495922]), array([0.98625075, 0., 0., -0.16525575])) self.surpriseWalkBackFrame = vis.updateFrame(t, 'SurpriseWalkBack', scale=0.2, visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose( array([0.65827322, -0.08028796, -0.77370834]), array([0.94399977, 0., 0., -0.3299461])) self.terrainPreWalkFrame = vis.updateFrame(t, 'TerrainPreWalk', scale=0.2, visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose( array([5.47126425, -0.09790393, -0.70504679]), array([1., 0., 0., 0.])) self.stairsPreWalkFrame = vis.updateFrame(t, 'StairsPreWalk', scale=0.2, visible=True, parent=self.pointcloudPD) self.frameSync = vis.FrameSync() self.frameSync.addFrame(self.originFrame) self.frameSync.addFrame(self.pointcloudPD.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.valveWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkFarthestSafeFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surprisePreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.terrainPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.stairsPreWalkFrame, ignoreIncoming=True)
def load_polydata(): filename = "/home/manuelli/code/modules/dense_correspondence_manipulation/scripts/test_reconstruction.ply" poly_data = ioUtils.readPolyData(filename) vis.showPolyData(poly_data, 'test')
file and re-save it as an ascii ply file. ''' import os from director import ioUtils from director import vtkNumpy as vnp if __name__ == '__main__': filename = sys.argv[1] outputFilename = os.path.splitext(filename)[0] + '.vtp' print "reading poly data" polyData = ioUtils.readPolyData(filename) print "finished reading poly data" # TODO: # This should just be fixed in ioUtils.readPolyData, but for now # there is a workaround for an issue with the ply reader. # The output of the ply reader has points but not vertex cells, # so create a new polydata with vertex cells and copy the cells over. points = vnp.getNumpyFromVtk(polyData, 'Points') newPolyData = vnp.numpyToPolyData(points, createVertexCells=True) polyData.SetVerts(newPolyData.GetVerts()) print 'writing:', outputFilename ioUtils.writePolyData(polyData, outputFilename)
# create a view view = app.createView() segmentation._defaultSegmentationView = view robotStateModel, robotStateJointController = roboturdf.loadRobotModel( 'robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData( os.path.join(dataDir, 'amazon-pod/01-small-changes.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot', visible=False) # remove ground and clip to just the pod: groundPoints, polyData = segmentation.removeGround(polyData) vis.showPolyData(polyData, 'scene', visible=False) polyData = segmentation.addCoordArraysToPolyData(polyData) polyData = segmentation.thresholdPoints(polyData, 'y', [1, 1.6]) polyData = segmentation.thresholdPoints(polyData, 'x', [-1.2, 0.5]) vis.showPolyData(polyData, 'clipped', visible=False) # remove outliers polyData = segmentation.labelOutliers(polyData, searchRadius=0.03, neighborsInSearchRadius=40) polyData = segmentation.thresholdPoints(polyData, 'is_outlier', [0, 0])
robotStateModel, robotStateJointController = roboturdf.loadRobotModel( 'robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) # Move robot to near to table: robotStateJointController.q[5] = math.radians(120) robotStateJointController.q[0] = -1.5 robotStateJointController.q[1] = 2 robotStateJointController.q[2] = 0.95 robotStateJointController.push() # load poly data dataDir = app.getTestingDataDirectory() polyData = ioUtils.readPolyData( os.path.join(dataDir, 'tabletop/table-and-bin-scene.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot') p1 = [-1.58661389, 2.91242337, 0.79958105] data = segmentation.segmentTableScene(polyData, p1) vis.showClusterObjects(data.clusters, parent='segmentation') segmentation.showTable(data.table, parent='segmentation') if app.getTestingInteractiveEnabled(): view.show() app.showObjectModel() app.start()
for link in links: robotHighlighter.highlightLink(link, [1,0.4,0.0]) elif msg.plan_type == lcmdrc.plan_status_t.BRACING: for link in links: robotHighlighter.highlightLink(link, [1, 0, 0]) else: for link in links: robotHighlighter.dehighlightLink(link) fallDetectorSub = lcmUtils.addSubscriber("PLAN_EXECUTION_STATUS", lcmdrc.plan_status_t, onPlanStatus) fallDetectorSub.setSpeedLimit(10) if useDataFiles: for filename in drcargs.args().data_files: polyData = io.readPolyData(filename) if polyData: vis.showPolyData(polyData, os.path.basename(filename)) if useImageWidget: imageWidget = cameraview.ImageWidget(cameraview.imageManager, 'CAMERA_LEFT', view) #imageWidget = cameraview.ImageWidget(cameraview.imageManager, 'KINECT_RGB', view) if useCameraFrustumVisualizer and cameraview.CameraFrustumVisualizer.isCompatibleWithConfig(): cameraFrustumVisualizer = cameraview.CameraFrustumVisualizer(robotStateModel, cameraview.imageManager, 'CAMERA_LEFT') class ImageOverlayManager(object): def __init__(self): self.viewName = 'CAMERA_LEFT'
robotStateModel, robotStateJointController = roboturdf.loadRobotModel('robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True) segmentationroutines.SegmentationContext.initWithRobot(robotStateModel) # Move robot to near to valve wall: # 0degrees #robotStateJointController.q[5] = math.radians(120) #robotStateJointController.q[0] = 0 #robotStateJointController.q[1] = 0 # 30,60,90 robotStateJointController.q [5] = math.radians(-120) robotStateJointController.q [0] = 1 robotStateJointController.q [1] = 1 robotStateJointController.q[2] = 0.85 robotStateJointController.push() # load poly data dataDir = app.getTestingDataDirectory() #polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene.vtp')) #polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene-30.vtp')) #polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene-60.vtp')) polyData = ioUtils.readPolyData(os.path.join(dataDir, 'valve/valve-lever-scene-90.vtp')) vis.showPolyData(polyData, 'pointcloud snapshot') segmentation.segmentValveWallAuto(.2, mode='both', removeGroundMethod=segmentation.removeGround ) if app.getTestingInteractiveEnabled(): view.show() app.showObjectModel() app.start()
if __name__ == '__main__': #setup view_height = 640 view_width = 480 data_dir = sys.argv[1] num_im = int(sys.argv[2]) mesh = sys.argv[3] #launch app application = mainwindowapp.construct() #globals? view = app.getCurrentRenderView() #app.resetCamera(viewDirection=[-1,0,0], view=view) #load mesh TODO: load raw point cloud and convert to mesh automatically polyData = ioUtils.readPolyData(data_dir + '/' + sys.argv[3]) obj = vis.showPolyData(polyData, name=mesh) #setup view.setFixedSize(view_height, view_width) setCameraInstrinsicsAsus(view) setCameraTransform(view.camera(), vtk.vtkTransform()) view.forceRender() filter1 = vtk.vtkWindowToImageFilter() filter1 = vtk.vtkWindowToImageFilter() imageWriter = vtk.vtkBMPWriter() scale = vtk.vtkImageShiftScale() filter1.SetInput(view.renderWindow()) filter1.SetMagnification(1)