コード例 #1
0
ファイル: iiwaplanning.py プロジェクト: manuelli/spartan-1
def fitSupport(pickPoint=[0.92858565, 0.00213802, 0.30315629]):

    om.removeFromObjectModel(om.findObjectByName('cylinder'))

    polyData = getPointCloud()

    t = vtk.vtkTransform()
    t.Translate(pickPoint)
    polyData = segmentation.cropToBox(polyData, t, [0.3, 0.3, 0.5])

    addHSVArrays(polyData)

    vis.updatePolyData(polyData,
                       'crop region',
                       colorByName='rgb_colors',
                       visible=False)

    zMax = getMaxZCoordinate(polyData)

    cyl = makeCylinder()
    cyl.setProperty('Radius', 0.03)
    cyl.setProperty('Length', zMax)

    origin = segmentation.computeCentroid(polyData)
    origin[2] = zMax / 2.0

    t = transformUtils.frameFromPositionAndRPY(origin, [0, 0, 0])
    cyl.getChildFrame().copyFrame(t)
コード例 #2
0
    def cropPointCloudToModelBoundingBox(pointCloud,
                                         objectPointCloud,
                                         scaleFactor=1.5):
        print "cropping pointcloud to box"
        # f = segmentation.makePolyDataFields(objectPointCloud)
        f = GlobalRegistrationUtils.getOrientedBoundingBox(objectPointCloud)

        croppedPointCloud = segmentation.cropToBox(
            pointCloud, f.frame, scaleFactor * np.array(f.dims))
        return croppedPointCloud
コード例 #3
0
    def fit(self, polyData, points):
        iiwaplanning.fitSupport(pickPoint=points[0])
        return


        pickPoint = points[0]
        t = vtk.vtkTransform()
        t.Translate(pickPoint)

        print 'pick point:', pickPoint
        print 'crop'

        polyData = segmentation.cropToBox(polyData, t, [0.3,0.3,0.5])


        import colorsys
        rgb = vnp.getNumpyFromVtk(polyData, 'rgb_colors')/255.0
        hsv = np.array([colorsys.rgb_to_hsv(*t) for t in rgb])
        vnp.addNumpyToVtk(polyData, hsv[:,0].copy(), 'hue')
        vnp.addNumpyToVtk(polyData, hsv[:,1].copy(), 'saturation')
        vnp.addNumpyToVtk(polyData, hsv[:,2].copy(), 'value')


        vis.updatePolyData(polyData, 'crop region', colorByName='rgb_colors', visible=False)

        # hide input data
        #om.findObjectByName(self.pointCloudObjectName).setProperty('Visible', False)

        #cluster = segmentation.makePolyDataFields(polyData)
        #vis.showClusterObjects([cluster], parent='segmentation')

        hueRange = [0.12, 0.14]
        valueRange = [0.5, 1.0]

        print 'thresh'
        points = segmentation.thresholdPoints(segmentation.thresholdPoints(polyData, 'hue', hueRange), 'value', valueRange)
        #points = segmentation.extractLargestCluster(points,  minClusterSize=10, clusterTolerance=0.02)

        vis.updatePolyData(points, 'pole points', color=[0,0,1], visible=False)


        maxZ = np.nanmax(vnp.getNumpyFromVtk(points, 'Points')[:,2])

        print 'maxZ', maxZ
        pickPoint = pickPoint[0], pickPoint[1], maxZ+0.2
        print pickPoint


        t = vtk.vtkTransform()
        t.Translate(pickPoint)

        print 'crop2'
        polyData = segmentation.cropToBox(polyData, t, [0.15,0.15,0.4])

        vis.updatePolyData(polyData, 'object points', colorByName='rgb_colors', visible=False)

        if polyData.GetNumberOfPoints() > 5:
            print 'make fields'
            cluster = segmentation.makePolyDataFields(polyData)
            vis.showClusterObjects([cluster], parent='segmentation')

        print 'done'
コード例 #4
0
    def fitRunningBoardAtFeet(self):

        # get stance frame
        startPose = self.getPlanningStartPose()
        stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(
            self.robotSystem.robotStateModel, useWorldZ=False)
        stanceFrameAxes = transformUtils.getAxesFromTransform(stanceFrame)

        # get pointcloud and extract search region covering the running board
        polyData = segmentation.getCurrentRevolutionData()
        polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01)
        _, polyData = segmentation.removeGround(polyData)
        polyData = segmentation.cropToBox(polyData, stanceFrame,
                                          [1.0, 1.0, 0.1])

        if not polyData.GetNumberOfPoints():
            print('empty search region point cloud')
            return

        vis.updatePolyData(polyData,
                           'running board search points',
                           parent=segmentation.getDebugFolder(),
                           color=[0, 1, 0],
                           visible=False)

        # extract maximal points along the stance x axis
        perpAxis = stanceFrameAxes[0]
        edgeAxis = stanceFrameAxes[1]
        edgePoints = segmentation.computeEdge(polyData, edgeAxis, perpAxis)
        edgePoints = vnp.getVtkPolyDataFromNumpyPoints(edgePoints)
        vis.updatePolyData(edgePoints,
                           'edge points',
                           parent=segmentation.getDebugFolder(),
                           visible=True)

        # ransac fit a line to the edge points
        linePoint, lineDirection, fitPoints = segmentation.applyLineFit(
            edgePoints)
        if np.dot(lineDirection, stanceFrameAxes[1]) < 0:
            lineDirection = -lineDirection

        linePoints = segmentation.thresholdPoints(fitPoints, 'ransac_labels',
                                                  [1.0, 1.0])
        dists = np.dot(
            vnp.getNumpyFromVtk(linePoints, 'Points') - linePoint,
            lineDirection)
        p1 = linePoint + lineDirection * np.min(dists)
        p2 = linePoint + lineDirection * np.max(dists)
        vis.updatePolyData(fitPoints,
                           'line fit points',
                           parent=segmentation.getDebugFolder(),
                           colorByName='ransac_labels',
                           visible=False)

        # compute a new frame that is in plane with the stance frame
        # and matches the orientation and position of the detected edge
        origin = np.array(stanceFrame.GetPosition())
        normal = np.array(stanceFrameAxes[2])

        # project stance origin to edge, then back to foot frame
        originProjectedToEdge = linePoint + lineDirection * np.dot(
            origin - linePoint, lineDirection)
        originProjectedToPlane = segmentation.projectPointToPlane(
            originProjectedToEdge, origin, normal)
        zaxis = np.array(stanceFrameAxes[2])
        yaxis = np.array(lineDirection)
        xaxis = np.cross(yaxis, zaxis)
        xaxis /= np.linalg.norm(xaxis)
        yaxis = np.cross(zaxis, xaxis)
        yaxis /= np.linalg.norm(yaxis)

        d = DebugData()
        d.addSphere(p1, radius=0.005)
        d.addSphere(p2, radius=0.005)
        d.addLine(p1, p2)
        d.addSphere(originProjectedToEdge, radius=0.001, color=[1, 0, 0])
        d.addSphere(originProjectedToPlane, radius=0.001, color=[0, 1, 0])
        d.addLine(originProjectedToPlane, origin, color=[0, 1, 0])
        d.addLine(originProjectedToEdge, origin, color=[1, 0, 0])
        vis.updatePolyData(d.getPolyData(),
                           'running board edge',
                           parent=segmentation.getDebugFolder(),
                           colorByName='RGB255',
                           visible=False)

        # update the running board box affordance position and orientation to
        # fit the detected edge
        box = self.spawnRunningBoardAffordance()
        boxDimensions = box.getProperty('Dimensions')
        t = transformUtils.getTransformFromAxesAndOrigin(
            xaxis, yaxis, zaxis, originProjectedToPlane)
        t.PreMultiply()
        t.Translate(-boxDimensions[0] / 2.0, 0.0, -boxDimensions[2] / 2.0)
        box.getChildFrame().copyFrame(t)

        self.initialize()
コード例 #5
0
    def fitRunningBoardAtFeet(self):

        # get stance frame
        startPose = self.getPlanningStartPose()
        stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel, useWorldZ=False)
        stanceFrameAxes = transformUtils.getAxesFromTransform(stanceFrame)

        # get pointcloud and extract search region covering the running board
        polyData = segmentation.getCurrentRevolutionData()
        polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01)
        _, polyData = segmentation.removeGround(polyData)
        polyData = segmentation.cropToBox(polyData, stanceFrame, [1.0, 1.0, 0.1])

        if not polyData.GetNumberOfPoints():
            print 'empty search region point cloud'
            return

        vis.updatePolyData(polyData, 'running board search points', parent=segmentation.getDebugFolder(), color=[0,1,0], visible=False)

        # extract maximal points along the stance x axis
        perpAxis = stanceFrameAxes[0]
        edgeAxis = stanceFrameAxes[1]
        edgePoints = segmentation.computeEdge(polyData, edgeAxis, perpAxis)
        edgePoints = vnp.getVtkPolyDataFromNumpyPoints(edgePoints)
        vis.updatePolyData(edgePoints, 'edge points', parent=segmentation.getDebugFolder(), visible=True)

        # ransac fit a line to the edge points
        linePoint, lineDirection, fitPoints = segmentation.applyLineFit(edgePoints)
        if np.dot(lineDirection, stanceFrameAxes[1]) < 0:
            lineDirection = -lineDirection

        linePoints = segmentation.thresholdPoints(fitPoints, 'ransac_labels', [1.0, 1.0])
        dists = np.dot(vnp.getNumpyFromVtk(linePoints, 'Points')-linePoint, lineDirection)
        p1 = linePoint + lineDirection*np.min(dists)
        p2 = linePoint + lineDirection*np.max(dists)
        vis.updatePolyData(fitPoints, 'line fit points', parent=segmentation.getDebugFolder(), colorByName='ransac_labels', visible=False)


        # compute a new frame that is in plane with the stance frame
        # and matches the orientation and position of the detected edge
        origin = np.array(stanceFrame.GetPosition())
        normal = np.array(stanceFrameAxes[2])

        # project stance origin to edge, then back to foot frame
        originProjectedToEdge = linePoint + lineDirection*np.dot(origin - linePoint, lineDirection)
        originProjectedToPlane = segmentation.projectPointToPlane(originProjectedToEdge, origin, normal)
        zaxis = np.array(stanceFrameAxes[2])
        yaxis = np.array(lineDirection)
        xaxis = np.cross(yaxis, zaxis)
        xaxis /= np.linalg.norm(xaxis)
        yaxis = np.cross(zaxis, xaxis)
        yaxis /= np.linalg.norm(yaxis)

        d = DebugData()
        d.addSphere(p1, radius=0.005)
        d.addSphere(p2, radius=0.005)
        d.addLine(p1, p2)
        d.addSphere(originProjectedToEdge, radius=0.001, color=[1,0,0])
        d.addSphere(originProjectedToPlane, radius=0.001, color=[0,1,0])
        d.addLine(originProjectedToPlane, origin, color=[0,1,0])
        d.addLine(originProjectedToEdge, origin, color=[1,0,0])
        vis.updatePolyData(d.getPolyData(), 'running board edge', parent=segmentation.getDebugFolder(), colorByName='RGB255', visible=False)

        # update the running board box affordance position and orientation to
        # fit the detected edge
        box = self.spawnRunningBoardAffordance()
        boxDimensions = box.getProperty('Dimensions')
        t = transformUtils.getTransformFromAxesAndOrigin(xaxis, yaxis, zaxis, originProjectedToPlane)
        t.PreMultiply()
        t.Translate(-boxDimensions[0]/2.0, 0.0, -boxDimensions[2]/2.0)
        box.getChildFrame().copyFrame(t)

        self.initialize()