Ejemplo n.º 1
0
def removePlaneAndBeyond(polyData, expectedNormal=[1,0,0], filterRange=[-np.inf, -0.03], whichAxis=1, whichAxisLetter='y', percentile = 95):

    yvalues = vnp.getNumpyFromVtk(polyData, 'Points')[:, whichAxis]
    backY = np.percentile(yvalues, percentile)

    if ( percentile > 50):
        searchRegion = segmentation.thresholdPoints(polyData, whichAxisLetter, [backY - 0.1, np.inf])
    else:
        searchRegion = segmentation.thresholdPoints(polyData, whichAxisLetter, [-np.inf, backY + 0.1])

    vis.updatePolyData(searchRegion, 'search region', parent="segmentation", colorByName=whichAxisLetter, visible=False)

    # find the plane of the back wall, remove it and the points behind it:
    _, origin, normal = segmentation.applyPlaneFit(searchRegion, distanceThreshold=0.02, expectedNormal=expectedNormal, perpendicularAxis=expectedNormal, returnOrigin=True)

    points = vnp.getNumpyFromVtk(polyData, 'Points')
    dist = np.dot(points - origin, normal)
    vnp.addNumpyToVtk(polyData, dist, 'dist_to_plane')

    backFrame = transformUtils.getTransformFromOriginAndNormal(origin, normal, normalAxis=2)
    vis.updateFrame(backFrame, 'back frame', parent='segmentation', scale=0.15 , visible=False)
    vis.updatePolyData(polyData, 'dist to back', parent='segmentation', visible=False)

    polyData = segmentation.thresholdPoints(polyData, 'dist_to_plane', filterRange)
    vis.updatePolyData(polyData, 'back off and all', parent='segmentation', visible=False)

    return polyData
Ejemplo n.º 2
0
def fitObjectsOnShelf(polyData, maxHeight = 0.25):
    # find the shelf plane:
    polyDataWithoutFront, _ = segmentation.removeMajorPlane(polyData, distanceThreshold=0.02)
    polyDataPlaneFit, origin, normal = segmentation.applyPlaneFit(polyDataWithoutFront,  expectedNormal=np.array([0.0,0.0,1.0]), perpendicularAxis=np.array([0.0,0.0,1.0]), returnOrigin=True)
    vis.updatePolyData(polyDataPlaneFit, 'polyDataPlaneFit', parent='segmentation', visible=False)

    shelfSurfacePoints = segmentation.thresholdPoints(polyDataPlaneFit, 'dist_to_plane', [-0.01, 0.01])
    shelfCenter = segmentation.computeCentroid(shelfSurfacePoints)
    shelfFrame = transformUtils.getTransformFromOriginAndNormal(shelfCenter, normal, normalAxis=2)
    vis.showFrame(shelfFrame, 'shelfFrame', parent='segmentation', scale=0.15 , visible=False)

    # find the points near to the shelf plane and find objects on it:
    points = vnp.getNumpyFromVtk(polyData, 'Points')
    dist = np.dot(points - origin, normal)
    vnp.addNumpyToVtk(polyData, dist, 'dist_to_plane')
    shelfPoints = segmentation.thresholdPoints(polyData, 'dist_to_plane', [-0.01, maxHeight])
    vis.updatePolyData(shelfPoints, 'shelf', parent='segmentation', visible=False)

    data = segmentation.segmentTableScene(shelfPoints, shelfCenter, filterClustering = False )
    vis.showClusterObjects(data.clusters + [data.table], parent='segmentation')

    # remove the points that we considered from the orginal cloud
    dists = vnp.getNumpyFromVtk(polyData, 'dist_to_plane')
    diffShelf = ( ((dists > maxHeight) + (dists < -0.01))) + 0.1 -0.1
    vnp.addNumpyToVtk(polyData, diffShelf, 'diff_shelf')
    polyData = segmentation.thresholdPoints(polyData, 'diff_shelf', [1, 1])

    vis.updatePolyData(polyData, 'rest', parent='segmentation', visible=False)
    return polyData
Ejemplo n.º 3
0
def removePlaneAndBeyond(polyData,
                         expectedNormal=[1, 0, 0],
                         filterRange=[-np.inf, -0.03],
                         whichAxis=1,
                         whichAxisLetter='y',
                         percentile=95):

    yvalues = vnp.getNumpyFromVtk(polyData, 'Points')[:, whichAxis]
    backY = np.percentile(yvalues, percentile)

    if (percentile > 50):
        searchRegion = segmentation.thresholdPoints(polyData, whichAxisLetter,
                                                    [backY - 0.1, np.inf])
    else:
        searchRegion = segmentation.thresholdPoints(polyData, whichAxisLetter,
                                                    [-np.inf, backY + 0.1])

    vis.updatePolyData(searchRegion,
                       'search region',
                       parent="segmentation",
                       colorByName=whichAxisLetter,
                       visible=False)

    # find the plane of the back wall, remove it and the points behind it:
    _, origin, normal = segmentation.applyPlaneFit(
        searchRegion,
        distanceThreshold=0.02,
        expectedNormal=expectedNormal,
        perpendicularAxis=expectedNormal,
        returnOrigin=True)

    points = vnp.getNumpyFromVtk(polyData, 'Points')
    dist = np.dot(points - origin, normal)
    vnp.addNumpyToVtk(polyData, dist, 'dist_to_plane')

    backFrame = transformUtils.getTransformFromOriginAndNormal(origin,
                                                               normal,
                                                               normalAxis=2)
    vis.updateFrame(backFrame,
                    'back frame',
                    parent='segmentation',
                    scale=0.15,
                    visible=False)
    vis.updatePolyData(polyData,
                       'dist to back',
                       parent='segmentation',
                       visible=False)

    polyData = segmentation.thresholdPoints(polyData, 'dist_to_plane',
                                            filterRange)
    vis.updatePolyData(polyData,
                       'back off and all',
                       parent='segmentation',
                       visible=False)

    return polyData
Ejemplo n.º 4
0
def fitObjectsOnShelf(polyData, maxHeight=0.25):
    # find the shelf plane:
    polyDataWithoutFront, _ = segmentation.removeMajorPlane(
        polyData, distanceThreshold=0.02)
    polyDataPlaneFit, origin, normal = segmentation.applyPlaneFit(
        polyDataWithoutFront,
        expectedNormal=np.array([0.0, 0.0, 1.0]),
        perpendicularAxis=np.array([0.0, 0.0, 1.0]),
        returnOrigin=True)
    vis.updatePolyData(polyDataPlaneFit,
                       'polyDataPlaneFit',
                       parent='segmentation',
                       visible=False)

    shelfSurfacePoints = segmentation.thresholdPoints(polyDataPlaneFit,
                                                      'dist_to_plane',
                                                      [-0.01, 0.01])
    shelfCenter = segmentation.computeCentroid(shelfSurfacePoints)
    shelfFrame = transformUtils.getTransformFromOriginAndNormal(shelfCenter,
                                                                normal,
                                                                normalAxis=2)
    vis.showFrame(shelfFrame,
                  'shelfFrame',
                  parent='segmentation',
                  scale=0.15,
                  visible=False)

    # find the points near to the shelf plane and find objects on it:
    points = vnp.getNumpyFromVtk(polyData, 'Points')
    dist = np.dot(points - origin, normal)
    vnp.addNumpyToVtk(polyData, dist, 'dist_to_plane')
    shelfPoints = segmentation.thresholdPoints(polyData, 'dist_to_plane',
                                               [-0.01, maxHeight])
    vis.updatePolyData(shelfPoints,
                       'shelf',
                       parent='segmentation',
                       visible=False)

    data = segmentation.segmentTableScene(shelfPoints,
                                          shelfCenter,
                                          filterClustering=False)
    vis.showClusterObjects(data.clusters + [data.table], parent='segmentation')

    # remove the points that we considered from the orginal cloud
    dists = vnp.getNumpyFromVtk(polyData, 'dist_to_plane')
    diffShelf = (((dists > maxHeight) + (dists < -0.01))) + 0.1 - 0.1
    vnp.addNumpyToVtk(polyData, diffShelf, 'diff_shelf')
    polyData = segmentation.thresholdPoints(polyData, 'diff_shelf', [1, 1])

    vis.updatePolyData(polyData, 'rest', parent='segmentation', visible=False)
    return polyData
Ejemplo n.º 5
0
    def onNewWalkingGoal(self, walkingGoal=None):

        walkingGoal = walkingGoal or self.newWalkingGoalFrame(self.robotModel)
        frameObj = vis.updateFrame(walkingGoal,
                                   'walking goal',
                                   parent='planning',
                                   scale=0.25)
        frameObj.setProperty('Edit', True)

        rep = frameObj.widget.GetRepresentation()
        rep.SetTranslateAxisEnabled(2, False)
        rep.SetRotateAxisEnabled(0, False)
        rep.SetRotateAxisEnabled(1, False)
        frameObj.widget.HandleRotationEnabledOff()

        if self.placer:
            self.placer.stop()

        terrain = om.findObjectByName('HEIGHT_MAP_SCENE')
        if terrain:

            pos = np.array(frameObj.transform.GetPosition())

            polyData = filterUtils.removeNonFinitePoints(terrain.polyData)
            if polyData.GetNumberOfPoints():
                polyData = segmentation.labelDistanceToLine(
                    polyData, pos, pos + [0, 0, 1])
                polyData = segmentation.thresholdPoints(
                    polyData, 'distance_to_line', [0.0, 0.1])
                if polyData.GetNumberOfPoints():
                    pos[2] = np.nanmax(
                        vnp.getNumpyFromVtk(polyData, 'Points')[:, 2])
                    frameObj.transform.Translate(
                        pos - np.array(frameObj.transform.GetPosition()))

            d = DebugData()
            d.addSphere((0, 0, 0), radius=0.03)
            handle = vis.showPolyData(d.getPolyData(),
                                      'walking goal terrain handle',
                                      parent=frameObj,
                                      visible=True,
                                      color=[1, 1, 0])
            handle.actor.SetUserTransform(frameObj.transform)
            self.placer = PlacerWidget(app.getCurrentRenderView(), handle,
                                       terrain)

            def onFramePropertyModified(propertySet, propertyName):
                if propertyName == 'Edit':
                    if propertySet.getProperty(propertyName):
                        self.placer.start()
                    else:
                        self.placer.stop()

            frameObj.properties.connectPropertyChanged(onFramePropertyModified)
            onFramePropertyModified(frameObj, 'Edit')

        frameObj.connectFrameModified(self.onWalkingGoalModified)
        self.onWalkingGoalModified(frameObj)
Ejemplo n.º 6
0
    def __init__(self, uid, view, seed_pose, irisDriver, existing_region=None):

        d = DebugData()
        self.uid = uid
        vis.PolyDataItem.__init__(self, "IRIS region {:d}".format(uid), d.getPolyData(), view)
        self.transform = seed_pose
        d.addSphere((0,0,0), radius=0.02)
        self.seedObj = vis.showPolyData(d.getPolyData(), 'region seed', parent=om.getOrCreateContainer('IRIS region seeds'))
        self.seedObj.actor.SetUserTransform(self.transform)
        self.frameObj = vis.showFrame(self.transform, 'region seed frame',
                                      scale=0.2,
                                      visible=False,
                                      parent=self.seedObj)
        self.frameObj.setProperty('Edit', True)

        self.frameObj.widget.HandleRotationEnabledOff()

        terrain = om.findObjectByName('HEIGHT_MAP_SCENE')
        if terrain:
            rep = self.frameObj.widget.GetRepresentation()
            rep.SetTranslateAxisEnabled(2, False)
            rep.SetRotateAxisEnabled(0, False)
            rep.SetRotateAxisEnabled(1, False)

            pos = np.array(self.frameObj.transform.GetPosition())
            polyData = filterUtils.removeNonFinitePoints(terrain.polyData)
            if polyData.GetNumberOfPoints():
                polyData = segmentation.labelDistanceToLine(polyData, pos, pos+[0,0,1])
                polyData = segmentation.thresholdPoints(polyData, 'distance_to_line', [0.0, 0.1])
                if polyData.GetNumberOfPoints():
                    pos[2] = np.nanmax(vnp.getNumpyFromVtk(polyData, 'Points')[:,2])
                    self.frameObj.transform.Translate(pos - np.array(self.frameObj.transform.GetPosition()))

            self.placer = PlacerWidget(view, self.seedObj, terrain)
            self.placer.start()
        else:
            self.frameObj.setProperty('Edit', True)
            self.frameObj.setProperty('Visible', True)


        self.driver = irisDriver
        self.safe_region = None
        self.addProperty('Visible', True)
        self.addProperty('Enabled for Walking', True)
        self.addProperty('Alpha', 1.0)
        self.addProperty('Color', QtGui.QColor(200,200,20))

        self.frameObj.connectFrameModified(self.onFrameModified)
        if existing_region is None:
            self.onFrameModified(self.frameObj)
        else:
            self.setRegion(existing_region)

        self.setProperty('Alpha', 0.5)
        self.setProperty('Color', QtGui.QColor(220,220,220))
Ejemplo n.º 7
0
    def getRecedingTerrainRegion(self, polyData, linkFrame):
        ''' Find the point cloud in front of the foot frame'''

        #polyData = shallowCopy(polyData)
        points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
        #vtkNumpy.addNumpyToVtk(polyData, points[:,0].copy(), 'x')
        #vtkNumpy.addNumpyToVtk(polyData, points[:,1].copy(), 'y')
        #vtkNumpy.addNumpyToVtk(polyData, points[:,2].copy(), 'z')

        viewOrigin = linkFrame.TransformPoint([0.0, 0.0, 0.0])
        viewX = linkFrame.TransformVector([1.0, 0.0, 0.0])
        viewY = linkFrame.TransformVector([0.0, 1.0, 0.0])
        viewZ = linkFrame.TransformVector([0.0, 0.0, 1.0])
        polyData = segmentation.labelPointDistanceAlongAxis(polyData, viewX, origin=viewOrigin, resultArrayName='distance_along_foot_x')
        polyData = segmentation.labelPointDistanceAlongAxis(polyData, viewY, origin=viewOrigin, resultArrayName='distance_along_foot_y')
        polyData = segmentation.labelPointDistanceAlongAxis(polyData, viewZ, origin=viewOrigin, resultArrayName='distance_along_foot_z')

        polyData = segmentation.thresholdPoints(polyData, 'distance_along_foot_x', [0.12, 1.6])
        polyData = segmentation.thresholdPoints(polyData, 'distance_along_foot_y', [-0.4, 0.4])
        polyData = segmentation.thresholdPoints(polyData, 'distance_along_foot_z', [-0.4, 0.4])

        vis.updatePolyData( polyData, 'walking snapshot trimmed', parent='cont debug', visible=True)
        return polyData
Ejemplo n.º 8
0
    def getRecedingTerrainRegion(self, polyData, linkFrame):
        ''' Find the point cloud in front of the foot frame'''

        #polyData = shallowCopy(polyData)
        points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
        #vtkNumpy.addNumpyToVtk(polyData, points[:,0].copy(), 'x')
        #vtkNumpy.addNumpyToVtk(polyData, points[:,1].copy(), 'y')
        #vtkNumpy.addNumpyToVtk(polyData, points[:,2].copy(), 'z')

        viewOrigin = linkFrame.TransformPoint([0.0, 0.0, 0.0])
        viewX = linkFrame.TransformVector([1.0, 0.0, 0.0])
        viewY = linkFrame.TransformVector([0.0, 1.0, 0.0])
        viewZ = linkFrame.TransformVector([0.0, 0.0, 1.0])
        polyData = segmentation.labelPointDistanceAlongAxis(polyData, viewX, origin=viewOrigin, resultArrayName='distance_along_foot_x')
        polyData = segmentation.labelPointDistanceAlongAxis(polyData, viewY, origin=viewOrigin, resultArrayName='distance_along_foot_y')
        polyData = segmentation.labelPointDistanceAlongAxis(polyData, viewZ, origin=viewOrigin, resultArrayName='distance_along_foot_z')

        polyData = segmentation.thresholdPoints(polyData, 'distance_along_foot_x', [0.12, 1.6])
        polyData = segmentation.thresholdPoints(polyData, 'distance_along_foot_y', [-0.4, 0.4])
        polyData = segmentation.thresholdPoints(polyData, 'distance_along_foot_z', [-0.4, 0.4])

        vis.updatePolyData( polyData, 'walking snapshot trimmed', parent='cont debug', visible=True)
        return polyData
Ejemplo n.º 9
0
    def onNewWalkingGoal(self, walkingGoal=None):

        walkingGoal = walkingGoal or self.newWalkingGoalFrame(self.robotModel)
        frameObj = vis.updateFrame(walkingGoal, 'walking goal', parent='planning', scale=0.25)
        frameObj.setProperty('Edit', True)


        rep = frameObj.widget.GetRepresentation()
        rep.SetTranslateAxisEnabled(2, False)
        rep.SetRotateAxisEnabled(0, False)
        rep.SetRotateAxisEnabled(1, False)
        frameObj.widget.HandleRotationEnabledOff()

        if self.placer:
            self.placer.stop()

        terrain = om.findObjectByName('HEIGHT_MAP_SCENE')
        if terrain:

            pos = np.array(frameObj.transform.GetPosition())

            polyData = filterUtils.removeNonFinitePoints(terrain.polyData)
            if polyData.GetNumberOfPoints():
                polyData = segmentation.labelDistanceToLine(polyData, pos, pos+[0,0,1])
                polyData = segmentation.thresholdPoints(polyData, 'distance_to_line', [0.0, 0.1])
                if polyData.GetNumberOfPoints():
                    pos[2] = np.nanmax(vnp.getNumpyFromVtk(polyData, 'Points')[:,2])
                    frameObj.transform.Translate(pos - np.array(frameObj.transform.GetPosition()))

            d = DebugData()
            d.addSphere((0,0,0), radius=0.03)
            handle = vis.showPolyData(d.getPolyData(), 'walking goal terrain handle', parent=frameObj, visible=True, color=[1,1,0])
            handle.actor.SetUserTransform(frameObj.transform)
            self.placer = PlacerWidget(app.getCurrentRenderView(), handle, terrain)

            def onFramePropertyModified(propertySet, propertyName):
                if propertyName == 'Edit':
                    if propertySet.getProperty(propertyName):
                        self.placer.start()
                    else:
                        self.placer.stop()

            frameObj.properties.connectPropertyChanged(onFramePropertyModified)
            onFramePropertyModified(frameObj, 'Edit')

        frameObj.connectFrameModified(self.onWalkingGoalModified)
        self.onWalkingGoalModified(frameObj)
Ejemplo n.º 10
0
robotStateJointController.push()
'''

groundHeight = 0.0
viewFrame = segmentation.transformUtils.frameFromPositionAndRPY([1, -1, groundHeight + 1.5], [0, 0, -35])

segmentationroutines.SegmentationContext.initWithUser(groundHeight, viewFrame)

# load poly data
dataDir = app.getTestingDataDirectory()
polyData = ioUtils.readPolyData(os.path.join(dataDir, 'tabletop/table-sparse-stereo.vtp'))
vis.showPolyData(polyData, 'pointcloud snapshot original', colorByName='rgb_colors')
polyData = segmentationroutines.sparsifyStereoCloud( polyData )
vis.showPolyData(polyData, 'pointcloud snapshot')

# Use only returns near the robot:
polyData = segmentation.addCoordArraysToPolyData(polyData)
polyData = segmentation.thresholdPoints(polyData, 'distance_along_view_x', [0, 1.3])

segmentation.segmentTableThenFindDrills(polyData, [1.2864902,  -0.93351376,  1.10208917])

if app.getTestingInteractiveEnabled():

    v = applogic.getCurrentView()
    v.camera().SetPosition([3,3,3])
    v.camera().SetFocalPoint([0,0,0])

    view.show()
    app.showObjectModel()
    app.start()
Ejemplo n.º 11
0
segmentationroutines.SegmentationContext.initWithUser(groundHeight, viewFrame)

# load poly data
dataDir = app.getTestingDataDirectory()
polyData = ioUtils.readPolyData(
    os.path.join(dataDir, 'tabletop/table-sparse-stereo.vtp'))
vis.showPolyData(polyData,
                 'pointcloud snapshot original',
                 colorByName='rgb_colors')
polyData = segmentationroutines.sparsifyStereoCloud(polyData)
vis.showPolyData(polyData, 'pointcloud snapshot')

# Use only returns near the robot:
polyData = segmentation.addCoordArraysToPolyData(polyData)
polyData = segmentation.thresholdPoints(polyData, 'distance_along_view_x',
                                        [0, 1.3])

segmentation.segmentTableThenFindDrills(polyData,
                                        [1.2864902, -0.93351376, 1.10208917])

if app.getTestingInteractiveEnabled():

    v = applogic.getCurrentView()
    v.camera().SetPosition([3, 3, 3])
    v.camera().SetFocalPoint([0, 0, 0])

    view.show()
    app.showObjectModel()
    app.start()
Ejemplo n.º 12
0
    def __init__(self, uid, view, seed_pose, irisDriver, existing_region=None):

        d = DebugData()
        self.uid = uid
        vis.PolyDataItem.__init__(self, "IRIS region {:d}".format(uid),
                                  d.getPolyData(), view)
        self.transform = seed_pose
        d.addSphere((0, 0, 0), radius=0.02)
        self.seedObj = vis.showPolyData(
            d.getPolyData(),
            'region seed',
            parent=om.getOrCreateContainer('IRIS region seeds'))
        self.seedObj.actor.SetUserTransform(self.transform)
        self.frameObj = vis.showFrame(self.transform,
                                      'region seed frame',
                                      scale=0.2,
                                      visible=False,
                                      parent=self.seedObj)
        self.frameObj.setProperty('Edit', True)

        self.frameObj.widget.HandleRotationEnabledOff()

        terrain = om.findObjectByName('HEIGHT_MAP_SCENE')
        if terrain:
            rep = self.frameObj.widget.GetRepresentation()
            rep.SetTranslateAxisEnabled(2, False)
            rep.SetRotateAxisEnabled(0, False)
            rep.SetRotateAxisEnabled(1, False)

            pos = np.array(self.frameObj.transform.GetPosition())
            polyData = filterUtils.removeNonFinitePoints(terrain.polyData)
            if polyData.GetNumberOfPoints():
                polyData = segmentation.labelDistanceToLine(
                    polyData, pos, pos + [0, 0, 1])
                polyData = segmentation.thresholdPoints(
                    polyData, 'distance_to_line', [0.0, 0.1])
                if polyData.GetNumberOfPoints():
                    pos[2] = np.nanmax(
                        vnp.getNumpyFromVtk(polyData, 'Points')[:, 2])
                    self.frameObj.transform.Translate(
                        pos - np.array(self.frameObj.transform.GetPosition()))

            self.placer = PlacerWidget(view, self.seedObj, terrain)
            self.placer.start()
        else:
            self.frameObj.setProperty('Edit', True)
            self.frameObj.setProperty('Visible', True)

        self.driver = irisDriver
        self.safe_region = None
        self.addProperty('Visible', True)
        self.addProperty('Enabled for Walking', True)
        self.addProperty('Alpha', 1.0)
        self.addProperty('Color', QtGui.QColor(200, 200, 20))

        self.frameObj.connectFrameModified(self.onFrameModified)
        if existing_region is None:
            self.onFrameModified(self.frameObj)
        else:
            self.setRegion(existing_region)

        self.setProperty('Alpha', 0.5)
        self.setProperty('Color', QtGui.QColor(220, 220, 220))
Ejemplo n.º 13
0
    def fitRunningBoardAtFeet(self):

        # get stance frame
        startPose = self.getPlanningStartPose()
        stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(
            self.robotSystem.robotStateModel, useWorldZ=False)
        stanceFrameAxes = transformUtils.getAxesFromTransform(stanceFrame)

        # get pointcloud and extract search region covering the running board
        polyData = segmentation.getCurrentRevolutionData()
        polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01)
        _, polyData = segmentation.removeGround(polyData)
        polyData = segmentation.cropToBox(polyData, stanceFrame,
                                          [1.0, 1.0, 0.1])

        if not polyData.GetNumberOfPoints():
            print 'empty search region point cloud'
            return

        vis.updatePolyData(polyData,
                           'running board search points',
                           parent=segmentation.getDebugFolder(),
                           color=[0, 1, 0],
                           visible=False)

        # extract maximal points along the stance x axis
        perpAxis = stanceFrameAxes[0]
        edgeAxis = stanceFrameAxes[1]
        edgePoints = segmentation.computeEdge(polyData, edgeAxis, perpAxis)
        edgePoints = vnp.getVtkPolyDataFromNumpyPoints(edgePoints)
        vis.updatePolyData(edgePoints,
                           'edge points',
                           parent=segmentation.getDebugFolder(),
                           visible=True)

        # ransac fit a line to the edge points
        linePoint, lineDirection, fitPoints = segmentation.applyLineFit(
            edgePoints)
        if np.dot(lineDirection, stanceFrameAxes[1]) < 0:
            lineDirection = -lineDirection

        linePoints = segmentation.thresholdPoints(fitPoints, 'ransac_labels',
                                                  [1.0, 1.0])
        dists = np.dot(
            vnp.getNumpyFromVtk(linePoints, 'Points') - linePoint,
            lineDirection)
        p1 = linePoint + lineDirection * np.min(dists)
        p2 = linePoint + lineDirection * np.max(dists)
        vis.updatePolyData(fitPoints,
                           'line fit points',
                           parent=segmentation.getDebugFolder(),
                           colorByName='ransac_labels',
                           visible=False)

        # compute a new frame that is in plane with the stance frame
        # and matches the orientation and position of the detected edge
        origin = np.array(stanceFrame.GetPosition())
        normal = np.array(stanceFrameAxes[2])

        # project stance origin to edge, then back to foot frame
        originProjectedToEdge = linePoint + lineDirection * np.dot(
            origin - linePoint, lineDirection)
        originProjectedToPlane = segmentation.projectPointToPlane(
            originProjectedToEdge, origin, normal)
        zaxis = np.array(stanceFrameAxes[2])
        yaxis = np.array(lineDirection)
        xaxis = np.cross(yaxis, zaxis)
        xaxis /= np.linalg.norm(xaxis)
        yaxis = np.cross(zaxis, xaxis)
        yaxis /= np.linalg.norm(yaxis)

        d = DebugData()
        d.addSphere(p1, radius=0.005)
        d.addSphere(p2, radius=0.005)
        d.addLine(p1, p2)
        d.addSphere(originProjectedToEdge, radius=0.001, color=[1, 0, 0])
        d.addSphere(originProjectedToPlane, radius=0.001, color=[0, 1, 0])
        d.addLine(originProjectedToPlane, origin, color=[0, 1, 0])
        d.addLine(originProjectedToEdge, origin, color=[1, 0, 0])
        vis.updatePolyData(d.getPolyData(),
                           'running board edge',
                           parent=segmentation.getDebugFolder(),
                           colorByName='RGB255',
                           visible=False)

        # update the running board box affordance position and orientation to
        # fit the detected edge
        box = self.spawnRunningBoardAffordance()
        boxDimensions = box.getProperty('Dimensions')
        t = transformUtils.getTransformFromAxesAndOrigin(
            xaxis, yaxis, zaxis, originProjectedToPlane)
        t.PreMultiply()
        t.Translate(-boxDimensions[0] / 2.0, 0.0, -boxDimensions[2] / 2.0)
        box.getChildFrame().copyFrame(t)

        self.initialize()
Ejemplo n.º 14
0
view = app.createView()
segmentation._defaultSegmentationView = view

robotStateModel, robotStateJointController = roboturdf.loadRobotModel('robot state model', view, parent='sensors', color=roboturdf.getRobotGrayColor(), visible=True)
segmentationroutines.SegmentationContext.initWithRobot(robotStateModel)

# load poly data
dataDir = app.getTestingDataDirectory()
polyData = ioUtils.readPolyData(os.path.join(dataDir, 'amazon-pod/01-small-changes.vtp'))
vis.showPolyData(polyData, 'pointcloud snapshot', visible=False)

# remove ground and clip to just the pod:
groundPoints, polyData = segmentation.removeGround(polyData)
vis.showPolyData(polyData, 'scene', visible=False)
polyData = segmentation.addCoordArraysToPolyData(polyData)
polyData = segmentation.thresholdPoints(polyData, 'y', [1, 1.6])
polyData = segmentation.thresholdPoints(polyData, 'x', [-1.2, 0.5])
vis.showPolyData(polyData, 'clipped', visible=False)

# remove outliers
polyData = segmentation.labelOutliers(polyData, searchRadius=0.03, neighborsInSearchRadius=40)
polyData = segmentation.thresholdPoints(polyData, 'is_outlier', [0, 0])
vis.showPolyData(polyData, 'inliers', visible=False)

# remove walls, and points behind temp:
polyData = removePlaneAndBeyond(polyData, expectedNormal=[0,1,0], filterRange=[-np.inf, -0.03], whichAxis=1, whichAxisLetter='y', percentile = 95)
polyData = removePlaneAndBeyond(polyData, expectedNormal=[1,0,0], filterRange=[-np.inf, -0.03], whichAxis=0, whichAxisLetter='x', percentile = 95)
polyData = removePlaneAndBeyond(polyData, expectedNormal=[1,0,0], filterRange=[0.03, np.inf], whichAxis=0, whichAxisLetter='x', percentile = 5)

vis.updatePolyData(polyData, 'only shelves', parent='segmentation', visible=False)
Ejemplo n.º 15
0
    def replanFootsteps(self, polyData, standingFootName, removeFirstLeftStep=True, doStereoFiltering=True, nextDoubleSupportPose=None):

        obj = om.getOrCreateContainer('continuous')
        om.getOrCreateContainer('cont debug', obj)

        vis.updatePolyData( polyData, 'walking snapshot', parent='cont debug', visible=False)

        standingFootFrame = self.robotStateModel.getLinkFrame(standingFootName)
        vis.updateFrame(standingFootFrame, standingFootName, parent='cont debug', visible=False)
        # TODO: remove the pitch and roll of this frame to support it being on uneven ground

        # Step 1: filter the data down to a box in front of the robot:
        polyData = self.getRecedingTerrainRegion(polyData, footstepsdriver.FootstepsDriver.getFeetMidPoint(self.robotStateModel))
        if (doStereoFiltering is True):
            # used for stereo data:
            polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01)
            polyData = segmentation.labelOutliers(polyData, searchRadius=0.06, neighborsInSearchRadius=15) # 0.06 and 10 originally
            vis.updatePolyData(polyData, 'voxel plane points', parent='cont debug', colorByName='is_outlier', visible=False)
            polyData = segmentation.thresholdPoints(polyData, 'is_outlier', [0, 0])
            vis.updatePolyData( polyData, 'walking snapshot trimmed', parent='cont debug', visible=True)

        # Step 2: find all the surfaces in front of the robot (about 0.75sec)
        clusters = segmentation.findHorizontalSurfaces(polyData, removeGroundFirst=False, normalEstimationSearchRadius=0.05,
                                                       clusterTolerance=0.025, distanceToPlaneThreshold=0.0025, normalsDotUpRange=[0.95, 1.0])
        if (clusters is None):
            print "No cluster found, stop walking now!"
            return

        # Step 3: find the corners of the minimum bounding rectangles
        blocks,groundPlane = self.extractBlocksFromSurfaces(clusters, standingFootFrame)

        # Step 5: Find the two foot positions we should plan with: the next committed tool and the current standing foot
        '''
        if (self.committedStep is not None):
          #print "i got a committedStep. is_right_foot?" , self.committedStep.is_right_foot
          if (self.committedStep.is_right_foot):
              standingFootTransform = self.robotStateModel.getLinkFrame(self.ikPlanner.leftFootLink)
              nextDoubleSupportPose = self.getNextDoubleSupportPose(standingFootTransform, self.committedStep.transform)
          else:
              standingFootTransform = self.robotStateModel.getLinkFrame(self.ikPlanner.rightFootLink)
              nextDoubleSupportPose = self.getNextDoubleSupportPose(self.committedStep.transform, standingFootTransform)

          comm_mesh,comm_color = self.getMeshAndColor(self.committedStep.is_right_foot)
          comm_color[1] = 0.75 ; comm_color[2] = 0.25
          stand_mesh, stand_color = self.getMeshAndColor( not self.committedStep.is_right_foot )
          stand_color[1] = 0.75 ; stand_color[2] = 0.25
          vis.updateFrame(self.committedStep.transform, 'committed foot', parent='foot placements', scale=0.2, visible=False)
          obj = vis.showPolyData(comm_mesh, 'committed step', color=comm_color, alpha=1.0, parent='steps')
          obj.actor.SetUserTransform(self.committedStep.transform)
          vis.updateFrame(standingFootTransform, 'standing foot', parent='foot placements', scale=0.2, visible=False)
          obj = vis.showPolyData(stand_mesh, 'standing step', color=stand_color, alpha=1.0, parent='steps')
          obj.actor.SetUserTransform(standingFootTransform)

        else:
            # don't have a committed footstep, assume we are standing still
            nextDoubleSupportPose = self.robotStateJointController.getPose('EST_ROBOT_STATE')
        '''


        self.displayExpectedPose(nextDoubleSupportPose)


        if not self.useManualFootstepPlacement and self.queryPlanner:
            footsteps = self.computeFootstepPlanSafeRegions(blocks, nextDoubleSupportPose, standingFootName)

        else:

            footsteps = self.placeStepsOnBlocks(blocks, groundPlane, standingFootName, standingFootFrame, removeFirstLeftStep)

            if not len(footsteps):
                return

            if self.queryPlanner:
                self.sendPlanningRequest(footsteps, nextDoubleSupportPose)
            else:
                self.drawFittedSteps(footsteps)
Ejemplo n.º 16
0
    def replanFootsteps(self, polyData, standingFootName, removeFirstLeftStep=True, doStereoFiltering=True, nextDoubleSupportPose=None):

        obj = om.getOrCreateContainer('continuous')
        om.getOrCreateContainer('cont debug', obj)

        vis.updatePolyData( polyData, 'walking snapshot', parent='cont debug', visible=False)

        standingFootFrame = self.robotStateModel.getLinkFrame(standingFootName)
        vis.updateFrame(standingFootFrame, standingFootName, parent='cont debug', visible=False)
        # TODO: remove the pitch and roll of this frame to support it being on uneven ground

        # Step 1: filter the data down to a box in front of the robot:
        polyData = self.getRecedingTerrainRegion(polyData, footstepsdriver.FootstepsDriver.getFeetMidPoint(self.robotStateModel))
        if (doStereoFiltering is True):
            # used for stereo data:
            polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01)
            polyData = segmentation.labelOutliers(polyData, searchRadius=0.06, neighborsInSearchRadius=15) # 0.06 and 10 originally
            vis.updatePolyData(polyData, 'voxel plane points', parent='cont debug', colorByName='is_outlier', visible=False)
            polyData = segmentation.thresholdPoints(polyData, 'is_outlier', [0, 0])
            vis.updatePolyData( polyData, 'walking snapshot trimmed', parent='cont debug', visible=True)

        # Step 2: find all the surfaces in front of the robot (about 0.75sec)
        clusters = segmentation.findHorizontalSurfaces(polyData, removeGroundFirst=False, normalEstimationSearchRadius=0.05,
                                                       clusterTolerance=0.025, distanceToPlaneThreshold=0.0025, normalsDotUpRange=[0.95, 1.0])
        if (clusters is None):
            print "No cluster found, stop walking now!"
            return

        # Step 3: find the corners of the minimum bounding rectangles
        blocks,groundPlane = self.extractBlocksFromSurfaces(clusters, standingFootFrame)

        # Step 5: Find the two foot positions we should plan with: the next committed tool and the current standing foot
        '''
        if (self.committedStep is not None):
          #print "i got a committedStep. is_right_foot?" , self.committedStep.is_right_foot
          if (self.committedStep.is_right_foot):
              standingFootTransform = self.robotStateModel.getLinkFrame(self.ikPlanner.leftFootLink)
              nextDoubleSupportPose = self.getNextDoubleSupportPose(standingFootTransform, self.committedStep.transform)
          else:
              standingFootTransform = self.robotStateModel.getLinkFrame(self.ikPlanner.rightFootLink)
              nextDoubleSupportPose = self.getNextDoubleSupportPose(self.committedStep.transform, standingFootTransform)

          comm_mesh,comm_color = self.getMeshAndColor(self.committedStep.is_right_foot)
          comm_color[1] = 0.75 ; comm_color[2] = 0.25
          stand_mesh, stand_color = self.getMeshAndColor( not self.committedStep.is_right_foot )
          stand_color[1] = 0.75 ; stand_color[2] = 0.25
          vis.updateFrame(self.committedStep.transform, 'committed foot', parent='foot placements', scale=0.2, visible=False)
          obj = vis.showPolyData(comm_mesh, 'committed step', color=comm_color, alpha=1.0, parent='steps')
          obj.actor.SetUserTransform(self.committedStep.transform)
          vis.updateFrame(standingFootTransform, 'standing foot', parent='foot placements', scale=0.2, visible=False)
          obj = vis.showPolyData(stand_mesh, 'standing step', color=stand_color, alpha=1.0, parent='steps')
          obj.actor.SetUserTransform(standingFootTransform)

        else:
            # don't have a committed footstep, assume we are standing still
            nextDoubleSupportPose = self.robotStateJointController.getPose('EST_ROBOT_STATE')
        '''


        self.displayExpectedPose(nextDoubleSupportPose)


        if not self.useManualFootstepPlacement and self.queryPlanner:
            footsteps = self.computeFootstepPlanSafeRegions(blocks, nextDoubleSupportPose, standingFootName)

        else:

            footsteps = self.placeStepsOnBlocks(blocks, groundPlane, standingFootName, standingFootFrame, removeFirstLeftStep)

            if not len(footsteps):
                return

            if self.queryPlanner:
                self.sendPlanningRequest(footsteps, nextDoubleSupportPose)
            else:
                self.drawFittedSteps(footsteps)
Ejemplo n.º 17
0
    def fitRunningBoardAtFeet(self):

        # get stance frame
        startPose = self.getPlanningStartPose()
        stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel, useWorldZ=False)
        stanceFrameAxes = transformUtils.getAxesFromTransform(stanceFrame)

        # get pointcloud and extract search region covering the running board
        polyData = segmentation.getCurrentRevolutionData()
        polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01)
        _, polyData = segmentation.removeGround(polyData)
        polyData = segmentation.cropToBox(polyData, stanceFrame, [1.0, 1.0, 0.1])

        if not polyData.GetNumberOfPoints():
            print 'empty search region point cloud'
            return

        vis.updatePolyData(polyData, 'running board search points', parent=segmentation.getDebugFolder(), color=[0,1,0], visible=False)

        # extract maximal points along the stance x axis
        perpAxis = stanceFrameAxes[0]
        edgeAxis = stanceFrameAxes[1]
        edgePoints = segmentation.computeEdge(polyData, edgeAxis, perpAxis)
        edgePoints = vnp.getVtkPolyDataFromNumpyPoints(edgePoints)
        vis.updatePolyData(edgePoints, 'edge points', parent=segmentation.getDebugFolder(), visible=True)

        # ransac fit a line to the edge points
        linePoint, lineDirection, fitPoints = segmentation.applyLineFit(edgePoints)
        if np.dot(lineDirection, stanceFrameAxes[1]) < 0:
            lineDirection = -lineDirection

        linePoints = segmentation.thresholdPoints(fitPoints, 'ransac_labels', [1.0, 1.0])
        dists = np.dot(vnp.getNumpyFromVtk(linePoints, 'Points')-linePoint, lineDirection)
        p1 = linePoint + lineDirection*np.min(dists)
        p2 = linePoint + lineDirection*np.max(dists)
        vis.updatePolyData(fitPoints, 'line fit points', parent=segmentation.getDebugFolder(), colorByName='ransac_labels', visible=False)


        # compute a new frame that is in plane with the stance frame
        # and matches the orientation and position of the detected edge
        origin = np.array(stanceFrame.GetPosition())
        normal = np.array(stanceFrameAxes[2])

        # project stance origin to edge, then back to foot frame
        originProjectedToEdge = linePoint + lineDirection*np.dot(origin - linePoint, lineDirection)
        originProjectedToPlane = segmentation.projectPointToPlane(originProjectedToEdge, origin, normal)
        zaxis = np.array(stanceFrameAxes[2])
        yaxis = np.array(lineDirection)
        xaxis = np.cross(yaxis, zaxis)
        xaxis /= np.linalg.norm(xaxis)
        yaxis = np.cross(zaxis, xaxis)
        yaxis /= np.linalg.norm(yaxis)

        d = DebugData()
        d.addSphere(p1, radius=0.005)
        d.addSphere(p2, radius=0.005)
        d.addLine(p1, p2)
        d.addSphere(originProjectedToEdge, radius=0.001, color=[1,0,0])
        d.addSphere(originProjectedToPlane, radius=0.001, color=[0,1,0])
        d.addLine(originProjectedToPlane, origin, color=[0,1,0])
        d.addLine(originProjectedToEdge, origin, color=[1,0,0])
        vis.updatePolyData(d.getPolyData(), 'running board edge', parent=segmentation.getDebugFolder(), colorByName='RGB255', visible=False)

        # update the running board box affordance position and orientation to
        # fit the detected edge
        box = self.spawnRunningBoardAffordance()
        boxDimensions = box.getProperty('Dimensions')
        t = transformUtils.getTransformFromAxesAndOrigin(xaxis, yaxis, zaxis, originProjectedToPlane)
        t.PreMultiply()
        t.Translate(-boxDimensions[0]/2.0, 0.0, -boxDimensions[2]/2.0)
        box.getChildFrame().copyFrame(t)

        self.initialize()
Ejemplo n.º 18
0
    parent='sensors',
    color=roboturdf.getRobotGrayColor(),
    visible=True)
segmentationroutines.SegmentationContext.initWithRobot(robotStateModel)

# load poly data
dataDir = app.getTestingDataDirectory()
polyData = ioUtils.readPolyData(
    os.path.join(dataDir, 'amazon-pod/01-small-changes.vtp'))
vis.showPolyData(polyData, 'pointcloud snapshot', visible=False)

# remove ground and clip to just the pod:
groundPoints, polyData = segmentation.removeGround(polyData)
vis.showPolyData(polyData, 'scene', visible=False)
polyData = segmentation.addCoordArraysToPolyData(polyData)
polyData = segmentation.thresholdPoints(polyData, 'y', [1, 1.6])
polyData = segmentation.thresholdPoints(polyData, 'x', [-1.2, 0.5])
vis.showPolyData(polyData, 'clipped', visible=False)

# remove outliers
polyData = segmentation.labelOutliers(polyData,
                                      searchRadius=0.03,
                                      neighborsInSearchRadius=40)
polyData = segmentation.thresholdPoints(polyData, 'is_outlier', [0, 0])
vis.showPolyData(polyData, 'inliers', visible=False)

# remove walls, and points behind temp:
polyData = removePlaneAndBeyond(polyData,
                                expectedNormal=[0, 1, 0],
                                filterRange=[-np.inf, -0.03],
                                whichAxis=1,