コード例 #1
0
ファイル: surprisetask.py プロジェクト: rdeits/director
    def fitSwitchBox(self, polyData, points):
        boxPosition = points[0]
        wallPoint = points[1]


        # find a frame that is aligned with wall
        searchRadius = 0.2
        planePoints, normal = segmentation.applyLocalPlaneFit(polyData, points[0], searchRadius=np.linalg.norm(points[1] - points[0]), searchRadiusEnd=1.0)

        obj = vis.updatePolyData(planePoints, 'wall plane points', color=[0,1,0], visible=False)
        obj.setProperty('Point Size', 7)

        viewDirection = segmentation.SegmentationContext.getGlobalInstance().getViewDirection()
        if np.dot(normal, viewDirection) < 0:
            normal = -normal

        origin = segmentation.computeCentroid(planePoints)

        zaxis = [0,0,1]
        xaxis = normal
        yaxis = np.cross(zaxis, xaxis)
        yaxis /= np.linalg.norm(yaxis)
        zaxis = np.cross(xaxis, yaxis)
        zaxis /= np.linalg.norm(zaxis)

        t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)

        # translate that frame to the box position
        t.PostMultiply()
        t.Translate(boxPosition)

        boxFrame = transformUtils.copyFrame(t)
        self.switchPlanner.spawnBoxAffordanceAtFrame(boxFrame)
コード例 #2
0
ファイル: surprisetask.py プロジェクト: mlab-upenn/arch-apex
    def fitSwitchBox(self, polyData, points):
        boxPosition = points[0]
        wallPoint = points[1]


        # find a frame that is aligned with wall
        searchRadius = 0.2
        planePoints, normal = segmentation.applyLocalPlaneFit(polyData, points[0], searchRadius=np.linalg.norm(points[1] - points[0]), searchRadiusEnd=1.0)

        obj = vis.updatePolyData(planePoints, 'wall plane points', color=[0,1,0], visible=False)
        obj.setProperty('Point Size', 7)

        viewDirection = segmentation.SegmentationContext.getGlobalInstance().getViewDirection()
        if np.dot(normal, viewDirection) < 0:
            normal = -normal

        origin = segmentation.computeCentroid(planePoints)

        zaxis = [0,0,1]
        xaxis = normal
        yaxis = np.cross(zaxis, xaxis)
        yaxis /= np.linalg.norm(yaxis)
        zaxis = np.cross(xaxis, yaxis)
        zaxis /= np.linalg.norm(zaxis)

        t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)

        # translate that frame to the box position
        t.PostMultiply()
        t.Translate(boxPosition)

        boxFrame = transformUtils.copyFrame(t)
        self.switchPlanner.spawnBoxAffordanceAtFrame(boxFrame)
コード例 #3
0
ファイル: testAmazonPod.py プロジェクト: mlab-upenn/arch-apex
def fitObjectsOnShelf(polyData, maxHeight = 0.25):
    # find the shelf plane:
    polyDataWithoutFront, _ = segmentation.removeMajorPlane(polyData, distanceThreshold=0.02)
    polyDataPlaneFit, origin, normal = segmentation.applyPlaneFit(polyDataWithoutFront,  expectedNormal=np.array([0.0,0.0,1.0]), perpendicularAxis=np.array([0.0,0.0,1.0]), returnOrigin=True)
    vis.updatePolyData(polyDataPlaneFit, 'polyDataPlaneFit', parent='segmentation', visible=False)

    shelfSurfacePoints = segmentation.thresholdPoints(polyDataPlaneFit, 'dist_to_plane', [-0.01, 0.01])
    shelfCenter = segmentation.computeCentroid(shelfSurfacePoints)
    shelfFrame = transformUtils.getTransformFromOriginAndNormal(shelfCenter, normal, normalAxis=2)
    vis.showFrame(shelfFrame, 'shelfFrame', parent='segmentation', scale=0.15 , visible=False)

    # find the points near to the shelf plane and find objects on it:
    points = vnp.getNumpyFromVtk(polyData, 'Points')
    dist = np.dot(points - origin, normal)
    vnp.addNumpyToVtk(polyData, dist, 'dist_to_plane')
    shelfPoints = segmentation.thresholdPoints(polyData, 'dist_to_plane', [-0.01, maxHeight])
    vis.updatePolyData(shelfPoints, 'shelf', parent='segmentation', visible=False)

    data = segmentation.segmentTableScene(shelfPoints, shelfCenter, filterClustering = False )
    vis.showClusterObjects(data.clusters + [data.table], parent='segmentation')

    # remove the points that we considered from the orginal cloud
    dists = vnp.getNumpyFromVtk(polyData, 'dist_to_plane')
    diffShelf = ( ((dists > maxHeight) + (dists < -0.01))) + 0.1 -0.1
    vnp.addNumpyToVtk(polyData, diffShelf, 'diff_shelf')
    polyData = segmentation.thresholdPoints(polyData, 'diff_shelf', [1, 1])

    vis.updatePolyData(polyData, 'rest', parent='segmentation', visible=False)
    return polyData
コード例 #4
0
ファイル: tabledemo.py プロジェクト: edowson/director
    def segmentTableObjects(self):

        tableCentroid = segmentation.computeCentroid(self.tableData.box)
        self.tableData.frame.TransformPoint(tableCentroid, tableCentroid)

        data = segmentation.segmentTableScene(self.getInputPointCloud(), tableCentroid)
        data.clusters = self.sortClustersOnTable(data.clusters)

        self.clusterObjects = vis.showClusterObjects(data.clusters, parent='segmentation')
        self.segmentationData = data
コード例 #5
0
ファイル: robottasks.py プロジェクト: simalpha/director
    def run(self):

        polyData = self.getPointCloud()
        annotation = self.getAnnotationInput()
        annotationPoint = annotation.annotationPoints[0]

        mesh = segmentation.fitShelfItem(polyData, annotationPoint, clusterTolerance=self.properties.getProperty('Cluster tolerance'))

        annotation.setProperty('Visible', False)
        om.removeFromObjectModel(om.findObjectByName('shelf item'))
        obj = vis.showPolyData(mesh, 'shelf item', color=[0,1,0])
        t = transformUtils.frameFromPositionAndRPY(segmentation.computeCentroid(mesh), [0,0,0])
        segmentation.makeMovable(obj, t)
コード例 #6
0
def fitObjectsOnShelf(polyData, maxHeight=0.25):
    # find the shelf plane:
    polyDataWithoutFront, _ = segmentation.removeMajorPlane(
        polyData, distanceThreshold=0.02)
    polyDataPlaneFit, origin, normal = segmentation.applyPlaneFit(
        polyDataWithoutFront,
        expectedNormal=np.array([0.0, 0.0, 1.0]),
        perpendicularAxis=np.array([0.0, 0.0, 1.0]),
        returnOrigin=True)
    vis.updatePolyData(polyDataPlaneFit,
                       'polyDataPlaneFit',
                       parent='segmentation',
                       visible=False)

    shelfSurfacePoints = segmentation.thresholdPoints(polyDataPlaneFit,
                                                      'dist_to_plane',
                                                      [-0.01, 0.01])
    shelfCenter = segmentation.computeCentroid(shelfSurfacePoints)
    shelfFrame = transformUtils.getTransformFromOriginAndNormal(shelfCenter,
                                                                normal,
                                                                normalAxis=2)
    vis.showFrame(shelfFrame,
                  'shelfFrame',
                  parent='segmentation',
                  scale=0.15,
                  visible=False)

    # find the points near to the shelf plane and find objects on it:
    points = vnp.getNumpyFromVtk(polyData, 'Points')
    dist = np.dot(points - origin, normal)
    vnp.addNumpyToVtk(polyData, dist, 'dist_to_plane')
    shelfPoints = segmentation.thresholdPoints(polyData, 'dist_to_plane',
                                               [-0.01, maxHeight])
    vis.updatePolyData(shelfPoints,
                       'shelf',
                       parent='segmentation',
                       visible=False)

    data = segmentation.segmentTableScene(shelfPoints,
                                          shelfCenter,
                                          filterClustering=False)
    vis.showClusterObjects(data.clusters + [data.table], parent='segmentation')

    # remove the points that we considered from the orginal cloud
    dists = vnp.getNumpyFromVtk(polyData, 'dist_to_plane')
    diffShelf = (((dists > maxHeight) + (dists < -0.01))) + 0.1 - 0.1
    vnp.addNumpyToVtk(polyData, diffShelf, 'diff_shelf')
    polyData = segmentation.thresholdPoints(polyData, 'diff_shelf', [1, 1])

    vis.updatePolyData(polyData, 'rest', parent='segmentation', visible=False)
    return polyData
コード例 #7
0
ファイル: robottasks.py プロジェクト: liutaotao369/director
    def run(self):

        polyData = self.getPointCloud()
        annotation = self.getAnnotationInput()
        annotationPoint = annotation.annotationPoints[0]

        mesh = segmentation.fitShelfItem(
            polyData,
            annotationPoint,
            clusterTolerance=self.properties.getProperty('Cluster tolerance'))

        annotation.setProperty('Visible', False)
        om.removeFromObjectModel(om.findObjectByName('shelf item'))
        obj = vis.showPolyData(mesh, 'shelf item', color=[0, 1, 0])
        t = transformUtils.frameFromPositionAndRPY(
            segmentation.computeCentroid(mesh), [0, 0, 0])
        segmentation.makeMovable(obj, t)
コード例 #8
0
    def findMinimumBoundingRectangle(self, polyData, linkFrame):
        '''
        Find minimum bounding rectangle.
        The input is assumed to be a rectangular point cloud of cinder blocks
        Returns transform of far right corner (pointing away from robot)
        '''
        # TODO: for non-z up surfaces, this needs work
        # TODO: return other parameters

        # Originally From: https://github.com/dbworth/minimum-area-bounding-rectangle
        polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.02)
        #vis.updatePolyData( polyData, 'block top', parent='cont debug', visible=False)
        polyDataCentroid = segmentation.computeCentroid(polyData)
        pts =vtkNumpy.getNumpyFromVtk( polyData , 'Points' )

        xy_points =  pts[:,[0,1]]
        vis.updatePolyData( get2DAsPolyData(xy_points) , 'xy_points', parent='cont debug', visible=False)
        hull_points = qhull_2d.qhull2D(xy_points)
        vis.updatePolyData( get2DAsPolyData(hull_points) , 'hull_points', parent='cont debug', visible=False)
        # Reverse order of points, to match output from other qhull implementations
        hull_points = hull_points[::-1]
        # print 'Convex hull points: \n', hull_points, "\n"

        # Find minimum area bounding rectangle
        (rot_angle, rectArea, rectDepth, rectWidth, center_point, corner_points_ground) = min_bounding_rect.minBoundingRect(hull_points)
        vis.updatePolyData( get2DAsPolyData(corner_points_ground) , 'corner_points_ground', parent='cont debug', visible=False)
        cornerPoints = np.vstack((corner_points_ground.T, polyDataCentroid[2]*np.ones( corner_points_ground.shape[0]) )).T
        cornerPolyData = vtkNumpy.getVtkPolyDataFromNumpyPoints(cornerPoints)

        # Create a frame at the far right point - which points away from the robot
        farRightCorner = self.findFarRightCorner(cornerPolyData , linkFrame)
        viewDirection = segmentation.SegmentationContext.getGlobalInstance().getViewDirection()
        robotYaw = math.atan2( viewDirection[1], viewDirection[0] )*180.0/np.pi
        blockAngle =  rot_angle*(180/math.pi)
        #print "robotYaw   ", robotYaw
        #print "blockAngle ", blockAngle
        blockAngleAll = np.array([blockAngle , blockAngle+90 , blockAngle+180, blockAngle+270])
        #print blockAngleAll
        for i in range(0,4):
            if(blockAngleAll[i]>180):
              blockAngleAll[i]=blockAngleAll[i]-360
        #print blockAngleAll
        values = abs(blockAngleAll - robotYaw)
        #print values
        min_idx = np.argmin(values)
        if ( (min_idx==1) or (min_idx==3) ):
            #print "flip rectDepth and rectWidth as angle is not away from robot"
            temp = rectWidth ; rectWidth = rectDepth ; rectDepth = temp

        #print "best angle", blockAngleAll[min_idx]
        rot_angle = blockAngleAll[min_idx]*math.pi/180.0


        # Optional: overwrite all of the above with the yaw of the robt when it was standing in front of the blocks:
        if (self.fixBlockYawWithInitial):
            rot_angle = self.initialRobotYaw

        cornerTransform = transformUtils.frameFromPositionAndRPY( farRightCorner , [0,0, np.rad2deg(rot_angle) ] )

        #print "Minimum area bounding box:"
        #print "Rotation angle:", rot_angle, "rad  (", rot_angle*(180/math.pi), "deg )"
        #print "rectDepth:", rectDepth, " rectWidth:", rectWidth, "  Area:", rectArea
        #print "Center point: \n", center_point # numpy array
        #print "Corner points: \n", cornerPoints, "\n"  # numpy array
        return cornerTransform, rectDepth, rectWidth, rectArea
コード例 #9
0
    def findMinimumBoundingRectangle(self, polyData, linkFrame):
        '''
        Find minimum bounding rectangle.
        The input is assumed to be a rectangular point cloud of cinder blocks
        Returns transform of far right corner (pointing away from robot)
        '''
        # TODO: for non-z up surfaces, this needs work
        # TODO: return other parameters

        # Originally From: https://github.com/dbworth/minimum-area-bounding-rectangle
        polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.02)
        #vis.updatePolyData( polyData, 'block top', parent='cont debug', visible=False)
        polyDataCentroid = segmentation.computeCentroid(polyData)
        pts =vtkNumpy.getNumpyFromVtk( polyData , 'Points' )

        xy_points =  pts[:,[0,1]]
        vis.updatePolyData( get2DAsPolyData(xy_points) , 'xy_points', parent='cont debug', visible=False)
        hull_points = qhull_2d.qhull2D(xy_points)
        vis.updatePolyData( get2DAsPolyData(hull_points) , 'hull_points', parent='cont debug', visible=False)
        # Reverse order of points, to match output from other qhull implementations
        hull_points = hull_points[::-1]
        # print 'Convex hull points: \n', hull_points, "\n"

        # Find minimum area bounding rectangle
        (rot_angle, rectArea, rectDepth, rectWidth, center_point, corner_points_ground) = min_bounding_rect.minBoundingRect(hull_points)
        vis.updatePolyData( get2DAsPolyData(corner_points_ground) , 'corner_points_ground', parent='cont debug', visible=False)
        cornerPoints = np.vstack((corner_points_ground.T, polyDataCentroid[2]*np.ones( corner_points_ground.shape[0]) )).T
        cornerPolyData = vtkNumpy.getVtkPolyDataFromNumpyPoints(cornerPoints)

        # Create a frame at the far right point - which points away from the robot
        farRightCorner = self.findFarRightCorner(cornerPolyData , linkFrame)
        viewDirection = segmentation.SegmentationContext.getGlobalInstance().getViewDirection()
        robotYaw = math.atan2( viewDirection[1], viewDirection[0] )*180.0/np.pi
        blockAngle =  rot_angle*(180/math.pi)
        #print "robotYaw   ", robotYaw
        #print "blockAngle ", blockAngle
        blockAngleAll = np.array([blockAngle , blockAngle+90 , blockAngle+180, blockAngle+270])
        #print blockAngleAll
        for i in range(0,4):
            if(blockAngleAll[i]>180):
              blockAngleAll[i]=blockAngleAll[i]-360
        #print blockAngleAll
        values = abs(blockAngleAll - robotYaw)
        #print values
        min_idx = np.argmin(values)
        if ( (min_idx==1) or (min_idx==3) ):
            #print "flip rectDepth and rectWidth as angle is not away from robot"
            temp = rectWidth ; rectWidth = rectDepth ; rectDepth = temp

        #print "best angle", blockAngleAll[min_idx]
        rot_angle = blockAngleAll[min_idx]*math.pi/180.0


        # Optional: overwrite all of the above with the yaw of the robt when it was standing in front of the blocks:
        if (self.fixBlockYawWithInitial):
            rot_angle = self.initialRobotYaw

        cornerTransform = transformUtils.frameFromPositionAndRPY( farRightCorner , [0,0, np.rad2deg(rot_angle) ] )

        #print "Minimum area bounding box:"
        #print "Rotation angle:", rot_angle, "rad  (", rot_angle*(180/math.pi), "deg )"
        #print "rectDepth:", rectDepth, " rectWidth:", rectWidth, "  Area:", rectArea
        #print "Center point: \n", center_point # numpy array
        #print "Corner points: \n", cornerPoints, "\n"  # numpy array
        return cornerTransform, rectDepth, rectWidth, rectArea