def setCameraTransform(camera, transform): '''Set camera transform so that view direction is +Z and view up is -Y''' origin = np.array(transform.GetPosition()) axes = transformUtils.getAxesFromTransform(transform) camera.SetPosition(origin) camera.SetFocalPoint(origin + axes[2]) camera.SetViewUp(-axes[1])
def setCameraTransform(camera, transform): """ Camera transform is of the Right-Down-Forward (XYZ) convention. See http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/OWENS/LECT9/node2.html """ origin = np.array(transform.GetPosition()) axes = transformUtils.getAxesFromTransform(transform) camera.SetPosition(origin) camera.SetFocalPoint(origin + axes[2]) camera.SetViewUp(-axes[1]) camera.Modified()
def setCameraTransform(camera, transform): """ Camera transform is of the Right-Down-Forward (XYZ) convention. See http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/OWENS/LECT9/node2.html """ origin = np.array(transform.GetPosition()) axes = transformUtils.getAxesFromTransform(transform) camera.SetPosition(origin) camera.SetFocalPoint(origin+axes[2]) camera.SetViewUp(-axes[1]) camera.Modified()
def cropToBox(polyData, transform, dimensions, data_type='cells'): ''' dimensions is length 3 describing box dimensions ''' origin = np.array(transform.GetPosition()) axes = transformUtils.getAxesFromTransform(transform) for axis, length in zip(axes, dimensions): cropAxis = np.array(axis)*(length/2.0) polyData = cropToLineSegment(polyData, origin - cropAxis, origin + cropAxis) return polyData
def cropToBox(polyData, transform, dimensions, data_type='cells'): ''' dimensions is length 3 describing box dimensions ''' origin = np.array(transform.GetPosition()) axes = transformUtils.getAxesFromTransform(transform) for axis, length in zip(axes, dimensions): cropAxis = np.array(axis) * (length / 2.0) polyData = cropToLineSegment(polyData, origin - cropAxis, origin + cropAxis) return polyData
def testTransform(): ''' test transformFromPose --> getAxesFromTransform is same as quat --> matrix ''' quat = transformations.random_quaternion() pos = np.random.rand(3) frame = transformUtils.transformFromPose(pos, quat) axes = transformUtils.getAxesFromTransform(frame) mat = transformUtils.getNumpyFromTransform(frame) assert np.allclose(mat[:3,:3], np.array(axes).transpose()) mat2 = transformations.quaternion_matrix(quat) mat2[:3,3] = pos print mat print mat2 assert np.allclose(mat, mat2)
def testTransform(): ''' test transformFromPose --> getAxesFromTransform is same as quat --> matrix ''' quat = transformations.random_quaternion() pos = np.random.rand(3) frame = transformUtils.transformFromPose(pos, quat) axes = transformUtils.getAxesFromTransform(frame) mat = transformUtils.getNumpyFromTransform(frame) assert np.allclose(mat[:3, :3], np.array(axes).transpose()) mat2 = transformations.quaternion_matrix(quat) mat2[:3, 3] = pos print(mat) print(mat2) assert np.allclose(mat, mat2)
def createUtorsoGazeConstraints(self, tspan): constraints = [] g = ik.WorldGazeDirConstraint() g.linkName = 'utorso' g.targetFrame = vtk.vtkTransform() axes = transformUtils.getAxesFromTransform(self.polaris.leftFootEgressOutsideFrame.transform) g.targetAxis = axes[0] g.bodyAxis = [1,0,0] g.coneThreshold = self.coneThreshold g.tspan = tspan constraints.append(g) g = ik.WorldGazeDirConstraint() g.linkName = 'utorso' g.targetFrame = vtk.vtkTransform() g.targetAxis = [0,0,1] g.bodyAxis = [0,0,1] g.coneThreshold = self.coneThreshold g.tspan = tspan constraints.append(g) return constraints
def setCameraTransform(camera, transform): """ Camera transform is of the Right-Down-Forward (XYZ) convention. See http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/OWENS/LECT9/node2.html Note you may need to re-render afterwards by calling view.forceRender() :param camera: vtkCamera object :type camera: :param transform: vtkTransform :type transform: :return: :rtype: """ origin = np.array(transform.GetPosition()) axes = transformUtils.getAxesFromTransform(transform) camera.SetPosition(origin) camera.SetFocalPoint(origin + axes[2]) camera.SetViewUp(-axes[1]) camera.Modified()
def fitRunningBoardAtFeet(self): # get stance frame startPose = self.getPlanningStartPose() stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint( self.robotSystem.robotStateModel, useWorldZ=False) stanceFrameAxes = transformUtils.getAxesFromTransform(stanceFrame) # get pointcloud and extract search region covering the running board polyData = segmentation.getCurrentRevolutionData() polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01) _, polyData = segmentation.removeGround(polyData) polyData = segmentation.cropToBox(polyData, stanceFrame, [1.0, 1.0, 0.1]) if not polyData.GetNumberOfPoints(): print('empty search region point cloud') return vis.updatePolyData(polyData, 'running board search points', parent=segmentation.getDebugFolder(), color=[0, 1, 0], visible=False) # extract maximal points along the stance x axis perpAxis = stanceFrameAxes[0] edgeAxis = stanceFrameAxes[1] edgePoints = segmentation.computeEdge(polyData, edgeAxis, perpAxis) edgePoints = vnp.getVtkPolyDataFromNumpyPoints(edgePoints) vis.updatePolyData(edgePoints, 'edge points', parent=segmentation.getDebugFolder(), visible=True) # ransac fit a line to the edge points linePoint, lineDirection, fitPoints = segmentation.applyLineFit( edgePoints) if np.dot(lineDirection, stanceFrameAxes[1]) < 0: lineDirection = -lineDirection linePoints = segmentation.thresholdPoints(fitPoints, 'ransac_labels', [1.0, 1.0]) dists = np.dot( vnp.getNumpyFromVtk(linePoints, 'Points') - linePoint, lineDirection) p1 = linePoint + lineDirection * np.min(dists) p2 = linePoint + lineDirection * np.max(dists) vis.updatePolyData(fitPoints, 'line fit points', parent=segmentation.getDebugFolder(), colorByName='ransac_labels', visible=False) # compute a new frame that is in plane with the stance frame # and matches the orientation and position of the detected edge origin = np.array(stanceFrame.GetPosition()) normal = np.array(stanceFrameAxes[2]) # project stance origin to edge, then back to foot frame originProjectedToEdge = linePoint + lineDirection * np.dot( origin - linePoint, lineDirection) originProjectedToPlane = segmentation.projectPointToPlane( originProjectedToEdge, origin, normal) zaxis = np.array(stanceFrameAxes[2]) yaxis = np.array(lineDirection) xaxis = np.cross(yaxis, zaxis) xaxis /= np.linalg.norm(xaxis) yaxis = np.cross(zaxis, xaxis) yaxis /= np.linalg.norm(yaxis) d = DebugData() d.addSphere(p1, radius=0.005) d.addSphere(p2, radius=0.005) d.addLine(p1, p2) d.addSphere(originProjectedToEdge, radius=0.001, color=[1, 0, 0]) d.addSphere(originProjectedToPlane, radius=0.001, color=[0, 1, 0]) d.addLine(originProjectedToPlane, origin, color=[0, 1, 0]) d.addLine(originProjectedToEdge, origin, color=[1, 0, 0]) vis.updatePolyData(d.getPolyData(), 'running board edge', parent=segmentation.getDebugFolder(), colorByName='RGB255', visible=False) # update the running board box affordance position and orientation to # fit the detected edge box = self.spawnRunningBoardAffordance() boxDimensions = box.getProperty('Dimensions') t = transformUtils.getTransformFromAxesAndOrigin( xaxis, yaxis, zaxis, originProjectedToPlane) t.PreMultiply() t.Translate(-boxDimensions[0] / 2.0, 0.0, -boxDimensions[2] / 2.0) box.getChildFrame().copyFrame(t) self.initialize()
def fitRunningBoardAtFeet(self): # get stance frame startPose = self.getPlanningStartPose() stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel, useWorldZ=False) stanceFrameAxes = transformUtils.getAxesFromTransform(stanceFrame) # get pointcloud and extract search region covering the running board polyData = segmentation.getCurrentRevolutionData() polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01) _, polyData = segmentation.removeGround(polyData) polyData = segmentation.cropToBox(polyData, stanceFrame, [1.0, 1.0, 0.1]) if not polyData.GetNumberOfPoints(): print 'empty search region point cloud' return vis.updatePolyData(polyData, 'running board search points', parent=segmentation.getDebugFolder(), color=[0,1,0], visible=False) # extract maximal points along the stance x axis perpAxis = stanceFrameAxes[0] edgeAxis = stanceFrameAxes[1] edgePoints = segmentation.computeEdge(polyData, edgeAxis, perpAxis) edgePoints = vnp.getVtkPolyDataFromNumpyPoints(edgePoints) vis.updatePolyData(edgePoints, 'edge points', parent=segmentation.getDebugFolder(), visible=True) # ransac fit a line to the edge points linePoint, lineDirection, fitPoints = segmentation.applyLineFit(edgePoints) if np.dot(lineDirection, stanceFrameAxes[1]) < 0: lineDirection = -lineDirection linePoints = segmentation.thresholdPoints(fitPoints, 'ransac_labels', [1.0, 1.0]) dists = np.dot(vnp.getNumpyFromVtk(linePoints, 'Points')-linePoint, lineDirection) p1 = linePoint + lineDirection*np.min(dists) p2 = linePoint + lineDirection*np.max(dists) vis.updatePolyData(fitPoints, 'line fit points', parent=segmentation.getDebugFolder(), colorByName='ransac_labels', visible=False) # compute a new frame that is in plane with the stance frame # and matches the orientation and position of the detected edge origin = np.array(stanceFrame.GetPosition()) normal = np.array(stanceFrameAxes[2]) # project stance origin to edge, then back to foot frame originProjectedToEdge = linePoint + lineDirection*np.dot(origin - linePoint, lineDirection) originProjectedToPlane = segmentation.projectPointToPlane(originProjectedToEdge, origin, normal) zaxis = np.array(stanceFrameAxes[2]) yaxis = np.array(lineDirection) xaxis = np.cross(yaxis, zaxis) xaxis /= np.linalg.norm(xaxis) yaxis = np.cross(zaxis, xaxis) yaxis /= np.linalg.norm(yaxis) d = DebugData() d.addSphere(p1, radius=0.005) d.addSphere(p2, radius=0.005) d.addLine(p1, p2) d.addSphere(originProjectedToEdge, radius=0.001, color=[1,0,0]) d.addSphere(originProjectedToPlane, radius=0.001, color=[0,1,0]) d.addLine(originProjectedToPlane, origin, color=[0,1,0]) d.addLine(originProjectedToEdge, origin, color=[1,0,0]) vis.updatePolyData(d.getPolyData(), 'running board edge', parent=segmentation.getDebugFolder(), colorByName='RGB255', visible=False) # update the running board box affordance position and orientation to # fit the detected edge box = self.spawnRunningBoardAffordance() boxDimensions = box.getProperty('Dimensions') t = transformUtils.getTransformFromAxesAndOrigin(xaxis, yaxis, zaxis, originProjectedToPlane) t.PreMultiply() t.Translate(-boxDimensions[0]/2.0, 0.0, -boxDimensions[2]/2.0) box.getChildFrame().copyFrame(t) self.initialize()