Ejemplo n.º 1
0
def getCameraTransform(camera):
    """
    Camera transform has X - right, Y - down, Z-forward
    Note that in VTK GetViewUp and ForwardDir are NOT necessarily orthogonal
    """
    focal_point = np.array(camera.GetFocalPoint())
    position = np.array(camera.GetPosition())
    view_up = np.array(camera.GetViewUp())

    forward_dir = focal_point - position
    if np.linalg.norm(forward_dir) < 1e-8:
        print "forward_dir norm was very small, setting to [1,0,0]"
        forward_dir = [1.0, 0.0, 0.0]

    up_dir = np.array(view_up)

    yaxis = -up_dir
    zaxis = forward_dir
    xaxis = np.cross(yaxis, zaxis)
    yaxis = np.cross(zaxis, xaxis)

    # normalize the axes
    xaxis /= np.linalg.norm(xaxis)
    yaxis /= np.linalg.norm(yaxis)
    zaxis /= np.linalg.norm(zaxis)

    return transformUtils.getTransformFromAxesAndOrigin(
        xaxis, yaxis, zaxis, position)
def getCameraTransform(camera):
    """
    Camera transform has X - right, Y - down, Z-forward
    Note that in VTK GetViewUp and ForwardDir are NOT necessarily orthogonal
    """
    focal_point = np.array(camera.GetFocalPoint())
    position = np.array(camera.GetPosition())
    view_up = np.array(camera.GetViewUp())


    forward_dir = focal_point - position
    if np.linalg.norm(forward_dir) < 1e-8:
        print "forward_dir norm was very small, setting to [1,0,0]"
        forward_dir = [1.0, 0.0, 0.0]

    up_dir = np.array(view_up)

    yaxis = -up_dir
    zaxis = forward_dir
    xaxis = np.cross(yaxis, zaxis)
    yaxis = np.cross(zaxis, xaxis)


    # normalize the axes
    xaxis /= np.linalg.norm(xaxis)
    yaxis /= np.linalg.norm(yaxis)
    zaxis /= np.linalg.norm(zaxis)


    return transformUtils.getTransformFromAxesAndOrigin(xaxis, yaxis, zaxis, position)
Ejemplo n.º 3
0
    def spawnTableFrame(self):
        pointOnCloseTableEdge = self.measurementPanel.pickPoints[0]
        pointOnTable = self.measurementPanel.pickPoints[1]
        pointAboveTable = self.measurementPanel.pickPoints[2]
        scenePolyData = self.openniDepthPointCloud.polyData
        d = GRUtils.segmentTable(scenePolyData=scenePolyData,
                                 searchRadius=0.3,
                                 visualize=False,
                                 thickness=0.01,
                                 pointOnTable=pointOnTable,
                                 pointAboveTable=pointAboveTable)

        origin = d['pointOnTable']
        normal = d['normal']

        yaxis = -normal
        zaxis = pointOnTable - pointOnCloseTableEdge
        xaxis = np.cross(yaxis, zaxis)
        # frame = transformUtils.getTransformFromOriginAndNormal(origin, normal)
        frame = transformUtils.getTransformFromAxesAndOrigin(
            xaxis, yaxis, zaxis, origin)
        self.tableFrame = vis.updateFrame(frame,
                                          'table frame',
                                          parent=self.visFolder,
                                          scale=0.15)
Ejemplo n.º 4
0
    def fitRunningBoardAtFeet(self):

        # get stance frame
        startPose = self.getPlanningStartPose()
        stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(
            self.robotSystem.robotStateModel, useWorldZ=False)
        stanceFrameAxes = transformUtils.getAxesFromTransform(stanceFrame)

        # get pointcloud and extract search region covering the running board
        polyData = segmentation.getCurrentRevolutionData()
        polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01)
        _, polyData = segmentation.removeGround(polyData)
        polyData = segmentation.cropToBox(polyData, stanceFrame,
                                          [1.0, 1.0, 0.1])

        if not polyData.GetNumberOfPoints():
            print('empty search region point cloud')
            return

        vis.updatePolyData(polyData,
                           'running board search points',
                           parent=segmentation.getDebugFolder(),
                           color=[0, 1, 0],
                           visible=False)

        # extract maximal points along the stance x axis
        perpAxis = stanceFrameAxes[0]
        edgeAxis = stanceFrameAxes[1]
        edgePoints = segmentation.computeEdge(polyData, edgeAxis, perpAxis)
        edgePoints = vnp.getVtkPolyDataFromNumpyPoints(edgePoints)
        vis.updatePolyData(edgePoints,
                           'edge points',
                           parent=segmentation.getDebugFolder(),
                           visible=True)

        # ransac fit a line to the edge points
        linePoint, lineDirection, fitPoints = segmentation.applyLineFit(
            edgePoints)
        if np.dot(lineDirection, stanceFrameAxes[1]) < 0:
            lineDirection = -lineDirection

        linePoints = segmentation.thresholdPoints(fitPoints, 'ransac_labels',
                                                  [1.0, 1.0])
        dists = np.dot(
            vnp.getNumpyFromVtk(linePoints, 'Points') - linePoint,
            lineDirection)
        p1 = linePoint + lineDirection * np.min(dists)
        p2 = linePoint + lineDirection * np.max(dists)
        vis.updatePolyData(fitPoints,
                           'line fit points',
                           parent=segmentation.getDebugFolder(),
                           colorByName='ransac_labels',
                           visible=False)

        # compute a new frame that is in plane with the stance frame
        # and matches the orientation and position of the detected edge
        origin = np.array(stanceFrame.GetPosition())
        normal = np.array(stanceFrameAxes[2])

        # project stance origin to edge, then back to foot frame
        originProjectedToEdge = linePoint + lineDirection * np.dot(
            origin - linePoint, lineDirection)
        originProjectedToPlane = segmentation.projectPointToPlane(
            originProjectedToEdge, origin, normal)
        zaxis = np.array(stanceFrameAxes[2])
        yaxis = np.array(lineDirection)
        xaxis = np.cross(yaxis, zaxis)
        xaxis /= np.linalg.norm(xaxis)
        yaxis = np.cross(zaxis, xaxis)
        yaxis /= np.linalg.norm(yaxis)

        d = DebugData()
        d.addSphere(p1, radius=0.005)
        d.addSphere(p2, radius=0.005)
        d.addLine(p1, p2)
        d.addSphere(originProjectedToEdge, radius=0.001, color=[1, 0, 0])
        d.addSphere(originProjectedToPlane, radius=0.001, color=[0, 1, 0])
        d.addLine(originProjectedToPlane, origin, color=[0, 1, 0])
        d.addLine(originProjectedToEdge, origin, color=[1, 0, 0])
        vis.updatePolyData(d.getPolyData(),
                           'running board edge',
                           parent=segmentation.getDebugFolder(),
                           colorByName='RGB255',
                           visible=False)

        # update the running board box affordance position and orientation to
        # fit the detected edge
        box = self.spawnRunningBoardAffordance()
        boxDimensions = box.getProperty('Dimensions')
        t = transformUtils.getTransformFromAxesAndOrigin(
            xaxis, yaxis, zaxis, originProjectedToPlane)
        t.PreMultiply()
        t.Translate(-boxDimensions[0] / 2.0, 0.0, -boxDimensions[2] / 2.0)
        box.getChildFrame().copyFrame(t)

        self.initialize()
    def fitRunningBoardAtFeet(self):

        # get stance frame
        startPose = self.getPlanningStartPose()
        stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel, useWorldZ=False)
        stanceFrameAxes = transformUtils.getAxesFromTransform(stanceFrame)

        # get pointcloud and extract search region covering the running board
        polyData = segmentation.getCurrentRevolutionData()
        polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01)
        _, polyData = segmentation.removeGround(polyData)
        polyData = segmentation.cropToBox(polyData, stanceFrame, [1.0, 1.0, 0.1])

        if not polyData.GetNumberOfPoints():
            print 'empty search region point cloud'
            return

        vis.updatePolyData(polyData, 'running board search points', parent=segmentation.getDebugFolder(), color=[0,1,0], visible=False)

        # extract maximal points along the stance x axis
        perpAxis = stanceFrameAxes[0]
        edgeAxis = stanceFrameAxes[1]
        edgePoints = segmentation.computeEdge(polyData, edgeAxis, perpAxis)
        edgePoints = vnp.getVtkPolyDataFromNumpyPoints(edgePoints)
        vis.updatePolyData(edgePoints, 'edge points', parent=segmentation.getDebugFolder(), visible=True)

        # ransac fit a line to the edge points
        linePoint, lineDirection, fitPoints = segmentation.applyLineFit(edgePoints)
        if np.dot(lineDirection, stanceFrameAxes[1]) < 0:
            lineDirection = -lineDirection

        linePoints = segmentation.thresholdPoints(fitPoints, 'ransac_labels', [1.0, 1.0])
        dists = np.dot(vnp.getNumpyFromVtk(linePoints, 'Points')-linePoint, lineDirection)
        p1 = linePoint + lineDirection*np.min(dists)
        p2 = linePoint + lineDirection*np.max(dists)
        vis.updatePolyData(fitPoints, 'line fit points', parent=segmentation.getDebugFolder(), colorByName='ransac_labels', visible=False)


        # compute a new frame that is in plane with the stance frame
        # and matches the orientation and position of the detected edge
        origin = np.array(stanceFrame.GetPosition())
        normal = np.array(stanceFrameAxes[2])

        # project stance origin to edge, then back to foot frame
        originProjectedToEdge = linePoint + lineDirection*np.dot(origin - linePoint, lineDirection)
        originProjectedToPlane = segmentation.projectPointToPlane(originProjectedToEdge, origin, normal)
        zaxis = np.array(stanceFrameAxes[2])
        yaxis = np.array(lineDirection)
        xaxis = np.cross(yaxis, zaxis)
        xaxis /= np.linalg.norm(xaxis)
        yaxis = np.cross(zaxis, xaxis)
        yaxis /= np.linalg.norm(yaxis)

        d = DebugData()
        d.addSphere(p1, radius=0.005)
        d.addSphere(p2, radius=0.005)
        d.addLine(p1, p2)
        d.addSphere(originProjectedToEdge, radius=0.001, color=[1,0,0])
        d.addSphere(originProjectedToPlane, radius=0.001, color=[0,1,0])
        d.addLine(originProjectedToPlane, origin, color=[0,1,0])
        d.addLine(originProjectedToEdge, origin, color=[1,0,0])
        vis.updatePolyData(d.getPolyData(), 'running board edge', parent=segmentation.getDebugFolder(), colorByName='RGB255', visible=False)

        # update the running board box affordance position and orientation to
        # fit the detected edge
        box = self.spawnRunningBoardAffordance()
        boxDimensions = box.getProperty('Dimensions')
        t = transformUtils.getTransformFromAxesAndOrigin(xaxis, yaxis, zaxis, originProjectedToPlane)
        t.PreMultiply()
        t.Translate(-boxDimensions[0]/2.0, 0.0, -boxDimensions[2]/2.0)
        box.getChildFrame().copyFrame(t)

        self.initialize()