Example #1
0
 def getPointCloud(self):
     assert self.pointCloudSource in ('lidar', 'stereo')
     if self.pointCloudSource == 'stereo':
         return segmentation.getDisparityPointCloud(decimation=1,
                                                    removeOutliers=False)
     else:
         return segmentation.getCurrentRevolutionData()
Example #2
0
    def updatePointcloudSnapshot(self):

        if (self.useLidar is True):
            return vis.updatePolyData(segmentation.getCurrentRevolutionData(),
                                      'pointcloud snapshot', parent='segmentation')
        else:
            return vis.updatePolyData(segmentation.getDisparityPointCloud(4),
                                      'pointcloud snapshot', parent='segmentation')
Example #3
0
    def updatePointcloudSnapshot(self):

        if (self.useLidar is True):
            return vis.updatePolyData(segmentation.getCurrentRevolutionData(),
                                      'pointcloud snapshot',
                                      parent='segmentation')
        else:
            return vis.updatePolyData(segmentation.getDisparityPointCloud(4),
                                      'pointcloud snapshot',
                                      parent='segmentation')
Example #4
0
    def getInputPointCloud(self):
        polyData = segmentation.getCurrentRevolutionData()
        if polyData is None:
            obj = om.findObjectByName('scene')
            if obj:
                polyData = obj.polyData
            else: # fall back to map in case we used mapping rather than loading of a scene
                obj = om.findObjectByName('map')
                if obj:
                    polyData = obj.polyData

        return polyData
    def makeReplanRequest(self, standingFootName, removeFirstLeftStep = True, nextDoubleSupportPose=None):

        if (self.processContinuousStereo):
            polyData = self.cameraView.getStereoPointCloud(2,'CAMERA_FUSED', cameraName='CAMERA_TSDF', removeSize=4000)
            doStereoFiltering = True
            print "makeReplanRequest processContinuousStereo"
        elif (self.processRawStereo):
            polyData = self.cameraView.getStereoPointCloud(2,'CAMERA')
            doStereoFiltering = True
            print "makeReplanRequest processRawStereo"
        else:
            polyData = segmentation.getCurrentRevolutionData()
            doStereoFiltering = False

        self.replanFootsteps(polyData, standingFootName, removeFirstLeftStep, doStereoFiltering, nextDoubleSupportPose)
    def makeReplanRequest(self, standingFootName, removeFirstLeftStep = True, nextDoubleSupportPose=None):

        if (self.processContinuousStereo):
            polyData = self.cameraView.getStereoPointCloud(2,'CAMERA_FUSED', cameraName='CAMERA_TSDF', removeSize=4000)
            doStereoFiltering = True
            print "makeReplanRequest processContinuousStereo"
        elif (self.processRawStereo):
            polyData = self.cameraView.getStereoPointCloud(2,'CAMERA')
            doStereoFiltering = True
            print "makeReplanRequest processRawStereo"
        else:
            polyData = segmentation.getCurrentRevolutionData()
            doStereoFiltering = False

        self.replanFootsteps(polyData, standingFootName, removeFirstLeftStep, doStereoFiltering, nextDoubleSupportPose)
Example #7
0
 def getPointCloud(self):
     return segmentation.getCurrentRevolutionData()
Example #8
0
 def getPointCloud(self):
     return segmentation.getCurrentRevolutionData()
Example #9
0
 def fitPosts():
     segmentation.fitVerticalPosts(segmentation.getCurrentRevolutionData())
     affordancePanel.onGetRaycastTerrain()
    def fitRunningBoardAtFeet(self):

        # get stance frame
        startPose = self.getPlanningStartPose()
        stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel, useWorldZ=False)
        stanceFrameAxes = transformUtils.getAxesFromTransform(stanceFrame)

        # get pointcloud and extract search region covering the running board
        polyData = segmentation.getCurrentRevolutionData()
        polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01)
        _, polyData = segmentation.removeGround(polyData)
        polyData = segmentation.cropToBox(polyData, stanceFrame, [1.0, 1.0, 0.1])

        if not polyData.GetNumberOfPoints():
            print 'empty search region point cloud'
            return

        vis.updatePolyData(polyData, 'running board search points', parent=segmentation.getDebugFolder(), color=[0,1,0], visible=False)

        # extract maximal points along the stance x axis
        perpAxis = stanceFrameAxes[0]
        edgeAxis = stanceFrameAxes[1]
        edgePoints = segmentation.computeEdge(polyData, edgeAxis, perpAxis)
        edgePoints = vnp.getVtkPolyDataFromNumpyPoints(edgePoints)
        vis.updatePolyData(edgePoints, 'edge points', parent=segmentation.getDebugFolder(), visible=True)

        # ransac fit a line to the edge points
        linePoint, lineDirection, fitPoints = segmentation.applyLineFit(edgePoints)
        if np.dot(lineDirection, stanceFrameAxes[1]) < 0:
            lineDirection = -lineDirection

        linePoints = segmentation.thresholdPoints(fitPoints, 'ransac_labels', [1.0, 1.0])
        dists = np.dot(vnp.getNumpyFromVtk(linePoints, 'Points')-linePoint, lineDirection)
        p1 = linePoint + lineDirection*np.min(dists)
        p2 = linePoint + lineDirection*np.max(dists)
        vis.updatePolyData(fitPoints, 'line fit points', parent=segmentation.getDebugFolder(), colorByName='ransac_labels', visible=False)


        # compute a new frame that is in plane with the stance frame
        # and matches the orientation and position of the detected edge
        origin = np.array(stanceFrame.GetPosition())
        normal = np.array(stanceFrameAxes[2])

        # project stance origin to edge, then back to foot frame
        originProjectedToEdge = linePoint + lineDirection*np.dot(origin - linePoint, lineDirection)
        originProjectedToPlane = segmentation.projectPointToPlane(originProjectedToEdge, origin, normal)
        zaxis = np.array(stanceFrameAxes[2])
        yaxis = np.array(lineDirection)
        xaxis = np.cross(yaxis, zaxis)
        xaxis /= np.linalg.norm(xaxis)
        yaxis = np.cross(zaxis, xaxis)
        yaxis /= np.linalg.norm(yaxis)

        d = DebugData()
        d.addSphere(p1, radius=0.005)
        d.addSphere(p2, radius=0.005)
        d.addLine(p1, p2)
        d.addSphere(originProjectedToEdge, radius=0.001, color=[1,0,0])
        d.addSphere(originProjectedToPlane, radius=0.001, color=[0,1,0])
        d.addLine(originProjectedToPlane, origin, color=[0,1,0])
        d.addLine(originProjectedToEdge, origin, color=[1,0,0])
        vis.updatePolyData(d.getPolyData(), 'running board edge', parent=segmentation.getDebugFolder(), colorByName='RGB255', visible=False)

        # update the running board box affordance position and orientation to
        # fit the detected edge
        box = self.spawnRunningBoardAffordance()
        boxDimensions = box.getProperty('Dimensions')
        t = transformUtils.getTransformFromAxesAndOrigin(xaxis, yaxis, zaxis, originProjectedToPlane)
        t.PreMultiply()
        t.Translate(-boxDimensions[0]/2.0, 0.0, -boxDimensions[2]/2.0)
        box.getChildFrame().copyFrame(t)

        self.initialize()
Example #11
0
 def getPointCloud(self):
     assert self.pointCloudSource in ('lidar', 'stereo')
     if self.pointCloudSource == 'stereo':
         return segmentation.getDisparityPointCloud(decimation=1, removeOutliers=False)
     else:
         return segmentation.getCurrentRevolutionData()
    def fitRunningBoardAtFeet(self):

        # get stance frame
        startPose = self.getPlanningStartPose()
        stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(
            self.robotSystem.robotStateModel, useWorldZ=False)
        stanceFrameAxes = transformUtils.getAxesFromTransform(stanceFrame)

        # get pointcloud and extract search region covering the running board
        polyData = segmentation.getCurrentRevolutionData()
        polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01)
        _, polyData = segmentation.removeGround(polyData)
        polyData = segmentation.cropToBox(polyData, stanceFrame,
                                          [1.0, 1.0, 0.1])

        if not polyData.GetNumberOfPoints():
            print 'empty search region point cloud'
            return

        vis.updatePolyData(polyData,
                           'running board search points',
                           parent=segmentation.getDebugFolder(),
                           color=[0, 1, 0],
                           visible=False)

        # extract maximal points along the stance x axis
        perpAxis = stanceFrameAxes[0]
        edgeAxis = stanceFrameAxes[1]
        edgePoints = segmentation.computeEdge(polyData, edgeAxis, perpAxis)
        edgePoints = vnp.getVtkPolyDataFromNumpyPoints(edgePoints)
        vis.updatePolyData(edgePoints,
                           'edge points',
                           parent=segmentation.getDebugFolder(),
                           visible=True)

        # ransac fit a line to the edge points
        linePoint, lineDirection, fitPoints = segmentation.applyLineFit(
            edgePoints)
        if np.dot(lineDirection, stanceFrameAxes[1]) < 0:
            lineDirection = -lineDirection

        linePoints = segmentation.thresholdPoints(fitPoints, 'ransac_labels',
                                                  [1.0, 1.0])
        dists = np.dot(
            vnp.getNumpyFromVtk(linePoints, 'Points') - linePoint,
            lineDirection)
        p1 = linePoint + lineDirection * np.min(dists)
        p2 = linePoint + lineDirection * np.max(dists)
        vis.updatePolyData(fitPoints,
                           'line fit points',
                           parent=segmentation.getDebugFolder(),
                           colorByName='ransac_labels',
                           visible=False)

        # compute a new frame that is in plane with the stance frame
        # and matches the orientation and position of the detected edge
        origin = np.array(stanceFrame.GetPosition())
        normal = np.array(stanceFrameAxes[2])

        # project stance origin to edge, then back to foot frame
        originProjectedToEdge = linePoint + lineDirection * np.dot(
            origin - linePoint, lineDirection)
        originProjectedToPlane = segmentation.projectPointToPlane(
            originProjectedToEdge, origin, normal)
        zaxis = np.array(stanceFrameAxes[2])
        yaxis = np.array(lineDirection)
        xaxis = np.cross(yaxis, zaxis)
        xaxis /= np.linalg.norm(xaxis)
        yaxis = np.cross(zaxis, xaxis)
        yaxis /= np.linalg.norm(yaxis)

        d = DebugData()
        d.addSphere(p1, radius=0.005)
        d.addSphere(p2, radius=0.005)
        d.addLine(p1, p2)
        d.addSphere(originProjectedToEdge, radius=0.001, color=[1, 0, 0])
        d.addSphere(originProjectedToPlane, radius=0.001, color=[0, 1, 0])
        d.addLine(originProjectedToPlane, origin, color=[0, 1, 0])
        d.addLine(originProjectedToEdge, origin, color=[1, 0, 0])
        vis.updatePolyData(d.getPolyData(),
                           'running board edge',
                           parent=segmentation.getDebugFolder(),
                           colorByName='RGB255',
                           visible=False)

        # update the running board box affordance position and orientation to
        # fit the detected edge
        box = self.spawnRunningBoardAffordance()
        boxDimensions = box.getProperty('Dimensions')
        t = transformUtils.getTransformFromAxesAndOrigin(
            xaxis, yaxis, zaxis, originProjectedToPlane)
        t.PreMultiply()
        t.Translate(-boxDimensions[0] / 2.0, 0.0, -boxDimensions[2] / 2.0)
        box.getChildFrame().copyFrame(t)

        self.initialize()