def onTransformModified(self, caller, event): import numpy as np catheterToRasNode = self.catheterSelector.currentNode() catheterToRasTransform = vtk.vtkGeneralTransform() catheterToRasNode.GetTransformToWorld(catheterToRasTransform) catheterPosition_Catheter = np.array([0.0, 0.0, 0.0]) catheterPosition_Ras = catheterToRasTransform.TransformFloatPoint( catheterPosition_Catheter) pathPoint_Ras = np.array([0.0, 0.0, 0.0]) ArteryFids = self.fiducialSelector.currentNode() self.closestPointFiducials(ArteryFids, catheterPosition_Ras, pathPoint_Ras) rasToCatheterTransform = vtk.vtkGeneralTransform() catheterToRasNode.GetTransformToWorld(rasToCatheterTransform) rasToCatheterTransform.Inverse() rasToCatheterTransform.Update() pathPoint_Catheter = rasToCatheterTransform.TransformFloatPoint( pathPoint_Ras) catheterToCenterTransform = vtk.vtkTransform() catheterToPathArray = catheterPosition_Catheter - pathPoint_Catheter catheterToCenterTransform.Translate(catheterToPathArray) #Get output transform output = self.outputSelector.currentNode() output.SetAndObserveTransformToParent(catheterToCenterTransform) return
def assignTransformDataToCameraNode(self, viewToRasTransformNode, viewNode, modelNode): camerasLogic = slicer.modules.cameras.logic() cameraNode = camerasLogic.GetViewActiveCameraNode(viewNode) viewToRasTransform = vtk.vtkGeneralTransform() viewToRasTransform = viewToRasTransformNode.GetTransformToParent() originView = [0, 0, 0] originRas = viewToRasTransform.TransformPoint(originView) cameraNode.SetPosition(originRas) upDirectionView = [0, 1, 0] upDirectionRas = [0, 0, 0] viewToRasTransform.TransformVectorAtPoint(originView, upDirectionView, upDirectionRas) cameraNode.SetViewUp(upDirectionRas) focalPointView = [0, 0, -100 ] # default, but changes if modelNode is provided if modelNode: modelParentNode = modelNode.GetParentTransformNode() modelToViewTransform = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes( modelParentNode, viewToRasTransformNode, modelToViewTransform) modelBounds = [0, 0, 0, 0, 0, 0] modelNode.GetBounds(modelBounds) modelCenterX = (modelBounds[0] + modelBounds[1]) / 2 modelCenterY = (modelBounds[2] + modelBounds[3]) / 2 modelCenterZ = (modelBounds[4] + modelBounds[5]) / 2 modelPosition = [modelCenterX, modelCenterY, modelCenterZ] modelPositionView = modelToViewTransform.TransformPoint( modelPosition) focalPointView = [0, 0, modelPositionView[2]] focalPointRas = viewToRasTransform.TransformPoint(focalPointView) cameraNode.SetFocalPoint(focalPointRas) self.resetCameraClippingRange(viewNode)
def preview(self, xy): # Calculate the current level trace view if the mouse is inside the volume extent # Get master volume image data import vtkSegmentationCorePython as vtkSegmentationCore masterImageData = self.effect.scriptedEffect.masterVolumeImageData() segmentationNode = self.effect.scriptedEffect.parameterSetNode( ).GetSegmentationNode() parentTransformNode = None if segmentationNode: parentTransformNode = segmentationNode.GetParentTransformNode() self.xyPoints.Reset() ijk = self.effect.xyToIjk(xy, self.sliceWidget, masterImageData, parentTransformNode) dimensions = masterImageData.GetDimensions() self.tracingFilter.SetInputData(masterImageData) self.tracingFilter.SetSeed(ijk) # Select the plane corresponding to current slice orientation # for the input volume sliceNode = self.effect.scriptedEffect.viewNode(self.sliceWidget) offset = max(sliceNode.GetDimensions()) i0, j0, k0 = self.effect.xyToIjk((0, 0), self.sliceWidget, masterImageData, parentTransformNode) i1, j1, k1 = self.effect.xyToIjk((offset, offset), self.sliceWidget, masterImageData, parentTransformNode) if i0 == i1: self.tracingFilter.SetPlaneToJK() if j0 == j1: self.tracingFilter.SetPlaneToIK() if k0 == k1: self.tracingFilter.SetPlaneToIJ() self.tracingFilter.Update() polyData = self.tracingFilter.GetOutput() # Get master volume IJK to slice XY transform xyToRas = sliceNode.GetXYToRAS() rasToIjk = vtk.vtkMatrix4x4() masterImageData.GetImageToWorldMatrix(rasToIjk) rasToIjk.Invert() xyToIjk = vtk.vtkGeneralTransform() xyToIjk.PostMultiply() xyToIjk.Concatenate(xyToRas) if parentTransformNode: worldToSegmentation = vtk.vtkMatrix4x4() parentTransformNode.GetMatrixTransformFromWorld( worldToSegmentation) xyToIjk.Concatenate(worldToSegmentation) xyToIjk.Concatenate(rasToIjk) ijkToXy = xyToIjk.GetInverse() ijkToXy.TransformPoints(polyData.GetPoints(), self.xyPoints) self.polyData.DeepCopy(polyData) self.polyData.GetPoints().DeepCopy(self.xyPoints) self.sliceWidget.sliceView().scheduleRender()
def __init__(self, sliceWidget): super(LevelTracingEffectTool,self).__init__(sliceWidget) # create a logic instance to do the non-gui work self.logic = LevelTracingEffectLogic(self.sliceWidget.sliceLogic()) # instance variables self.actionState = '' # initialization self.xyPoints = vtk.vtkPoints() self.rasPoints = vtk.vtkPoints() self.polyData = vtk.vtkPolyData() self.tracingFilter = vtkITK.vtkITKLevelTracingImageFilter() self.ijkToXY = vtk.vtkGeneralTransform() self.mapper = vtk.vtkPolyDataMapper2D() self.actor = vtk.vtkActor2D() property_ = self.actor.GetProperty() property_.SetColor( 107/255., 190/255., 99/255. ) property_.SetLineWidth( 1 ) if vtk.VTK_MAJOR_VERSION <= 5: self.mapper.SetInput(self.polyData) else: self.mapper.SetInputData(self.polyData) self.actor.SetMapper(self.mapper) property_ = self.actor.GetProperty() property_.SetColor(1,1,0) property_.SetLineWidth(1) self.renderer.AddActor2D( self.actor ) self.actors.append( self.actor )
def __init__(self, sliceWidget): super(LevelTracingEffectTool, self).__init__(sliceWidget) # create a logic instance to do the non-gui work self.logic = LevelTracingEffectLogic(self.sliceWidget.sliceLogic()) # instance variables self.actionState = '' # initialization self.xyPoints = vtk.vtkPoints() self.rasPoints = vtk.vtkPoints() self.polyData = vtk.vtkPolyData() self.tracingFilter = vtkITK.vtkITKLevelTracingImageFilter() self.ijkToXY = vtk.vtkGeneralTransform() self.mapper = vtk.vtkPolyDataMapper2D() self.actor = vtk.vtkActor2D() property_ = self.actor.GetProperty() property_.SetColor(107 / 255., 190 / 255., 99 / 255.) property_.SetLineWidth(1) if vtk.VTK_MAJOR_VERSION <= 5: self.mapper.SetInput(self.polyData) else: self.mapper.SetInputData(self.polyData) self.actor.SetMapper(self.mapper) property_ = self.actor.GetProperty() property_.SetColor(1, 1, 0) property_.SetLineWidth(1) self.renderer.AddActor2D(self.actor) self.actors.append(self.actor)
def pointDistancesLabelsFromSurface(self, inputPoints, inputModel): """Calculate closest point to point distance""" if not inputModel.GetPolyData() or inputModel.GetPolyData().GetNumberOfPoints() == 0: raise ValueError("Empty input model") # Transform model polydata to world coordinate system if inputModel.GetParentTransformNode(): transformModelToWorld = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes(inputModel.GetParentTransformNode(), None, transformModelToWorld) polyTransformToWorld = vtk.vtkTransformPolyDataFilter() polyTransformToWorld.SetTransform(transformModelToWorld) polyTransformToWorld.SetInputData(inputModel.GetPolyData()) polyTransformToWorld.Update() surface_World = polyTransformToWorld.GetOutput() else: surface_World = inputModel.GetPolyData() distanceFilter = vtk.vtkImplicitPolyDataDistance() distanceFilter.SetInput(surface_World) nOfFiducialPoints = inputPoints.GetNumberOfFiducials() import numpy as np distances = np.zeros(nOfFiducialPoints) labels = [""] * nOfFiducialPoints for i in range(nOfFiducialPoints): point_World = np.zeros(3) inputPoints.GetNthControlPointPositionWorld(i, point_World) closestPointOnSurface_World = np.zeros(3) closestPointDistance = distanceFilter.EvaluateFunctionAndGetClosestPoint(point_World, closestPointOnSurface_World) labels[i] = inputPoints.GetNthControlPointLabel(i) distances[i] = closestPointDistance return distances, labels
def generateMergedLabelmapInReferenceGeometry(self, segmentationNode, referenceVolumeNode): if segmentationNode is None: logging.error("Invalid segmentation node") return None if referenceVolumeNode is None: logging.error("Invalid reference volume node") return None # Get reference geometry in the segmentation node's coordinate system referenceGeometry_Reference = slicer.vtkOrientedImageData( ) # reference geometry in reference node coordinate system referenceGeometry_Segmentation = slicer.vtkOrientedImageData() mergedLabelmap_Reference = slicer.vtkOrientedImageData() referenceGeometryToSegmentationTransform = vtk.vtkGeneralTransform() # Set reference image geometry referenceGeometry_Reference.SetExtent( referenceVolumeNode.GetImageData().GetExtent()) ijkToRasMatrix = vtk.vtkMatrix4x4() referenceVolumeNode.GetIJKToRASMatrix(ijkToRasMatrix) referenceGeometry_Reference.SetGeometryFromImageToWorldMatrix( ijkToRasMatrix) # Transform it to the segmentation node coordinate system referenceGeometry_Segmentation = slicer.vtkOrientedImageData() referenceGeometry_Segmentation.DeepCopy(referenceGeometry_Reference) # Get transform between reference volume and segmentation node if (referenceVolumeNode.GetParentTransformNode() != segmentationNode.GetParentTransformNode()): slicer.vtkMRMLTransformNode.GetTransformBetweenNodes( referenceVolumeNode.GetParentTransformNode(), segmentationNode.GetParentTransformNode(), referenceGeometryToSegmentationTransform) slicer.vtkOrientedImageDataResample.TransformOrientedImage( referenceGeometry_Segmentation, referenceGeometryToSegmentationTransform, True) # Generate shared labelmap for the exported segments in segmentation coordinates sharedImage_Segmentation = slicer.vtkOrientedImageData() if (not segmentationNode.GenerateMergedLabelmapForAllSegments( sharedImage_Segmentation, 0, None)): logging.error( "ExportSegmentsToLabelmapNode: Failed to generate shared labelmap" ) return None # Transform shared labelmap to reference geometry coordinate system segmentationToReferenceGeometryTransform = referenceGeometryToSegmentationTransform.GetInverse( ) segmentationToReferenceGeometryTransform.Update() slicer.vtkOrientedImageDataResample.ResampleOrientedImageToReferenceOrientedImage( sharedImage_Segmentation, referenceGeometry_Reference, mergedLabelmap_Reference, False, False, segmentationToReferenceGeometryTransform) return mergedLabelmap_Reference
def checkVesselLocation( self): #TODO implement re-centering for vessel and cutter models vesselToRasTransform = vtk.vtkGeneralTransform() self.vesselModelToVessel.GetTransformToWorld( vesselToRasTransform) # vesselToRasTransform updated in place rasToVesselTransform = vtk.vtkGeneralTransform() vtk.vtkMatrix4x4.Invert(vesselToRasTransform, rasToVesselTransform) #rasToVesselTransform = vesselToRasTransform.GetMatrix().Inverse() # get retractor fiducial point in RAS coordinates reftractorReferenceNode = slicer.util.getNode('Retractor Reference') retractorRascoordinates = [0, 0, 0, 0] reftractorReferenceNode.GetNthFiducialWorldCoordinates( 0, retractorRascoordinates) retractorLocationRAS = retractorRascoordinates[:-1] # transform retractor reference point from RAS to vessel coordinate system retractorLocationVesselCoordinates = rasToVesselTransform.TransformFloatPoint( retractorLocationRAS) minDistance = float('inf') closestPoint = [] for i in range(NUM_VESSEL_FIDS): pathFiducialRas = [0, 0, 0, 0] pathFiducialsNode.GetNthFiducialWorldCoordinates( i, vesselFiducialRas) pathFiducialVesselCoordinates = rasToVesselTransform.TransformFloatPoint( pathFiducialRas[:-1]) distance = math.sqrt( vtkMath.Distance2BetweenPoints( retractorLocationVesselCoordinates, pathFiducialVesselCoordinates)) if distance < minDistance: minDistance = distance closestPoint = pathFiducialVesselCoordinates if minDistance > 400: translateVesselToRetractor_vessel = [ retractorLocationVesselCoordinates[0] - closestPoint[0], retractorLocationVesselCoordinates[1] - vesselLocation[1], retractorLocationVesselCoordinates[2] - vesselLocation[2] ] vesselToPath = vtk.vtkTransform() vesselToPath.Translate(translateVesselToRetractor_vessel[0], translateVesselToRetractor_vessel[1], translateVesselToRetractor_vessel[2]) vesselModelToVessel = slicer.util.getNode('VesselModelToVessel') vesselModelToVessel.SetAndObserveTransformToParent(vesselToPath)
def createISORegionOverlay(self, curveNode): """ :param curve: The curve that the overlay will be created from (vtkMRMLMarkupsCurveNode) """ polyData = curveNode.GetShortestDistanceSurfaceNode().GetPolyData() if polyData is None: #TODO return transformFilter = vtk.vtkTransformPolyDataFilter() transformFilter.SetInputData(polyData) modelToWorldTransform = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes( curveNode.GetShortestDistanceSurfaceNode().GetParentTransformNode( ), None, modelToWorldTransform) transformFilter.SetTransform(modelToWorldTransform) transformFilter.Update() polyData = transformFilter.GetOutput() pointLocator = vtk.vtkPointLocator() pointLocator.SetDataSet(transformFilter.GetOutput()) pointLocator.BuildLocator() curvePoints = curveNode.GetCurvePointsWorld() isoRegionsArrayName = "ISO-Regions" pointData = polyData.GetPointData() isoRegionsArray = pointData.GetArray(isoRegionsArrayName) if isoRegionsArray is None: isoRegionsArray = vtk.vtkIdTypeArray() isoRegionsArray.SetName(isoRegionsArrayName) isoRegionsArray.SetNumberOfValues(polyData.GetNumberOfPoints()) isoRegionsArray.Fill(-1) curveNode.GetShortestDistanceSurfaceNode().AddPointScalars( isoRegionsArray) visitedPoints = [] previousISORegion = [] for regionIndex in range(7): currentISORegion = [] if regionIndex == 0: for i in range(curvePoints.GetNumberOfPoints()): curvePoint_World = curvePoints.GetPoint(i) currentISORegion.append( pointLocator.FindClosestPoint(curvePoint_World)) else: currentISORegion = self.getAdjacentPoints( polyData, previousISORegion) for isoPointID in currentISORegion: if isoPointID in visitedPoints: continue isoRegionsArray.SetValue(isoPointID, regionIndex) visitedPoints += currentISORegion previousISORegion = currentISORegion
def preview(self, xy): # Calculate the current level trace view if the mouse is inside the volume extent # Get master volume image data import vtkSegmentationCorePython as vtkSegmentationCore masterImageData = self.effect.scriptedEffect.masterVolumeImageData() self.xyPoints.Reset() ijk = self.effect.xyToIjk(xy, self.sliceWidget, masterImageData) dimensions = masterImageData.GetDimensions() for index in xrange(3): # TracingFilter crashes if it receives a seed point at the edge of the image, # so only accept the point if it is inside the image and is at least one pixel away from the edge if ijk[index] < 1 or ijk[index] >= dimensions[index] - 1: return self.tracingFilter.SetInputData(masterImageData) self.tracingFilter.SetSeed(ijk) # Select the plane corresponding to current slice orientation # for the input volume sliceNode = self.effect.scriptedEffect.viewNode(self.sliceWidget) offset = max(sliceNode.GetDimensions()) i0, j0, k0 = self.effect.xyToIjk((0, 0), self.sliceWidget, masterImageData) i1, j1, k1 = self.effect.xyToIjk((offset, offset), self.sliceWidget, masterImageData) if i0 == i1: self.tracingFilter.SetPlaneToJK() if j0 == j1: self.tracingFilter.SetPlaneToIK() if k0 == k1: self.tracingFilter.SetPlaneToIJ() self.tracingFilter.Update() polyData = self.tracingFilter.GetOutput() # Get master volume IJK to slice XY transform xyToRas = sliceNode.GetXYToRAS() rasToIjk = vtk.vtkMatrix4x4() masterImageData.GetImageToWorldMatrix(rasToIjk) rasToIjk.Invert() xyToIjk = vtk.vtkGeneralTransform() xyToIjk.PostMultiply() xyToIjk.Concatenate(xyToRas) xyToIjk.Concatenate(rasToIjk) ijkToXy = xyToIjk.GetInverse() ijkToXy.TransformPoints(polyData.GetPoints(), self.xyPoints) self.polyData.DeepCopy(polyData) self.polyData.GetPoints().DeepCopy(self.xyPoints) self.sliceWidget.sliceView().scheduleRender()
def preview(self,xy): # Calculate the current level trace view if the mouse is inside the volume extent # Get master volume image data import vtkSegmentationCore masterImageData = vtkSegmentationCore.vtkOrientedImageData() self.effect.scriptedEffect.masterVolumeImageData(masterImageData) self.xyPoints.Reset() ijk = self.effect.xyToIjk(xy, self.sliceWidget, masterImageData) dimensions = masterImageData.GetDimensions() for index in xrange(3): # TracingFilter crashes if it receives a seed point at the edge of the image, # so only accept the point if it is inside the image and is at least one pixel away from the edge if ijk[index] < 1 or ijk[index] >= dimensions[index]-1: return self.tracingFilter.SetInputData(masterImageData) self.tracingFilter.SetSeed(ijk) # Select the plane corresponding to current slice orientation # for the input volume sliceNode = self.effect.scriptedEffect.viewNode(self.sliceWidget) offset = max(sliceNode.GetDimensions()) i0,j0,k0 = self.effect.xyToIjk((0,0), self.sliceWidget, masterImageData) i1,j1,k1 = self.effect.xyToIjk((offset,offset), self.sliceWidget, masterImageData) if i0 == i1: self.tracingFilter.SetPlaneToJK() if j0 == j1: self.tracingFilter.SetPlaneToIK() if k0 == k1: self.tracingFilter.SetPlaneToIJ() self.tracingFilter.Update() polyData = self.tracingFilter.GetOutput() # Get master volume IJK to slice XY transform xyToRas = sliceNode.GetXYToRAS() rasToIjk = vtk.vtkMatrix4x4() masterImageData.GetImageToWorldMatrix(rasToIjk) rasToIjk.Invert() xyToIjk = vtk.vtkGeneralTransform() xyToIjk.PostMultiply() xyToIjk.Concatenate(xyToRas) xyToIjk.Concatenate(rasToIjk) ijkToXy = xyToIjk.GetInverse() ijkToXy.TransformPoints(polyData.GetPoints(), self.xyPoints) self.polyData.DeepCopy(polyData) self.polyData.GetPoints().DeepCopy(self.xyPoints) self.sliceWidget.sliceView().scheduleRender()
def warp(srcLandmark, dstLandmark, subj): tps = vtkThinPlateSplineTransform() tps.SetSourceLandmarks(srcLandmark.GetPoints()) tps.SetTargetLandmarks(dstLandmark.GetPoints()) tps.SetBasisToR() t1 = vtkGeneralTransform() t1.SetInput(tps) tf = vtkTransformPolyDataFilter() tf.SetInput(subj) tf.SetTransform(t1) tf.Update() warped = tf.GetOutput() return warped
def warp(srcLandmark,dstLandmark,subj): tps = vtkThinPlateSplineTransform() tps.SetSourceLandmarks(srcLandmark.GetPoints()) tps.SetTargetLandmarks(dstLandmark.GetPoints()) tps.SetBasisToR() t1 = vtkGeneralTransform() t1.SetInput(tps) tf = vtkTransformPolyDataFilter() tf.SetInput(subj) tf.SetTransform(t1) tf.Update() warped = tf.GetOutput() return warped
def AlignSurfaceLM(self, szeReader): # Rotate surface because it is not aligned with landmarks gen_trans = vtk.vtkGeneralTransform() gen_trans.RotateZ(-90) gen_trans.RotateX(90) gen_trans_filter = vtk.vtkTransformPolyDataFilter() gen_trans_filterCopy = vtk.vtkTransformPolyDataFilter() gen_trans_filter.SetInputData(szeReader.actor.GetMapper().GetInput()) gen_trans_filterCopy.SetInputData(szeReader.actorCopy.GetMapper().GetInput()) gen_trans_filter.SetTransform(gen_trans) gen_trans_filterCopy.SetTransform(gen_trans) szeReader.actorCopy.GetMapper().SetInputConnection(gen_trans_filterCopy.GetOutputPort()) szeReader.actor.GetMapper().SetInputConnection(gen_trans_filter.GetOutputPort()) szeReader.actorCopy.GetMapper().Update() szeReader.actor.GetMapper().Update()
def computeExtentsOfModelInViewport(self, viewNode, modelNode): modelToRasTransform = vtk.vtkGeneralTransform() modelToRasTransformNode = modelNode.GetParentTransformNode() modelToRasTransformNode.GetTransformToWorld(modelToRasTransform) transformFilter = vtk.vtkTransformFilter() transformFilter.SetTransform(modelToRasTransform) transformFilter.SetInputData(modelNode.GetPolyData()) transformFilter.Update() pointsRas = transformFilter.GetOutput().GetPoints() numberOfPoints = pointsRas.GetNumberOfPoints() minimumXViewport = float('inf') maximumXViewport = float('-inf') minimumYViewport = float('inf') maximumYViewport = float('-inf') minimumZViewport = float('inf') maximumZViewport = float('-inf') for pointIndex in xrange(0, numberOfPoints): pointRas = [0, 0, 0] pointsRas.GetPoint(pointIndex, pointRas) pointViewport = self.convertRasToViewport(viewNode, pointRas) xViewport = pointViewport[0] if xViewport < minimumXViewport: minimumXViewport = xViewport if xViewport > maximumXViewport: maximumXViewport = xViewport yViewport = pointViewport[1] if yViewport < minimumYViewport: minimumYViewport = yViewport if yViewport > maximumYViewport: maximumYViewport = yViewport zViewport = pointViewport[2] if zViewport < minimumZViewport: minimumZViewport = zViewport if zViewport > maximumZViewport: maximumZViewport = zViewport extentsViewport = [ minimumXViewport, maximumXViewport, minimumYViewport, maximumYViewport, minimumZViewport, maximumZViewport ] return extentsViewport
def __init__(self, effect, sliceWidget): self.effect = effect self.sliceWidget = sliceWidget self.actionState = '' self.xyPoints = vtk.vtkPoints() self.rasPoints = vtk.vtkPoints() self.polyData = vtk.vtkPolyData() self.tracingFilter = vtkITK.vtkITKLevelTracingImageFilter() self.ijkToXY = vtk.vtkGeneralTransform() self.mapper = vtk.vtkPolyDataMapper2D() self.actor = vtk.vtkActor2D() actorProperty = self.actor.GetProperty() actorProperty.SetColor( 107/255., 190/255., 99/255. ) actorProperty.SetLineWidth( 1 ) self.mapper.SetInputData(self.polyData) self.actor.SetMapper(self.mapper) actorProperty = self.actor.GetProperty() actorProperty.SetColor(1,1,0) actorProperty.SetLineWidth(1)
def __init__(self, effect, sliceWidget): self.effect = effect self.sliceWidget = sliceWidget self.actionState = '' self.xyPoints = vtk.vtkPoints() self.rasPoints = vtk.vtkPoints() self.polyData = vtk.vtkPolyData() self.tracingFilter = vtkITK.vtkITKLevelTracingImageFilter() self.ijkToXY = vtk.vtkGeneralTransform() self.mapper = vtk.vtkPolyDataMapper2D() self.actor = vtk.vtkActor2D() actorProperty = self.actor.GetProperty() actorProperty.SetColor(107 / 255., 190 / 255., 99 / 255.) actorProperty.SetLineWidth(1) self.mapper.SetInputData(self.polyData) self.actor.SetMapper(self.mapper) actorProperty = self.actor.GetProperty() actorProperty.SetColor(1, 1, 0) actorProperty.SetLineWidth(1)
tpoints.SetPoint(0,0.000,0.000,0.800) tpoints.SetPoint(1,0.000,0.000,-0.200) tpoints.SetPoint(2,0.433,0.000,0.350) tpoints.SetPoint(3,0.433,0.000,-0.150) tpoints.SetPoint(4,-0.000,0.233,0.350) tpoints.SetPoint(5,-0.000,0.433,-0.150) tpoints.SetPoint(6,-0.433,-0.000,0.350) tpoints.SetPoint(7,-0.433,-0.000,-0.150) tpoints.SetPoint(8,0.000,-0.233,0.350) tpoints.SetPoint(9,0.000,-0.433,-0.150) thin = vtk.vtkThinPlateSplineTransform() thin.SetSourceLandmarks(spoints) thin.SetTargetLandmarks(tpoints) thin.SetBasisToR2LogR() # thin Inverse t1 = vtk.vtkGeneralTransform() t1.SetInput(thin) f11 = vtk.vtkTransformPolyDataFilter() f11.SetInputConnection(ap.GetOutputPort()) f11.SetTransform(t1) m11 = vtk.vtkDataSetMapper() m11.SetInputConnection(f11.GetOutputPort()) a11 = vtk.vtkActor() a11.SetMapper(m11) a11.RotateY(90) a11.GetProperty().SetColor(1,0,0) #[a11 GetProperty] SetRepresentationToWireframe ren11 = vtk.vtkRenderer() ren11.SetViewport(0.0,0.5,0.25,1.0) ren11.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1) ren11.AddActor(a11)
def runCurveOptimization(self, inputCurveNode, outputTableNode): """ :param inputCurveNode: User placed curve node to be optimized """ metrics = [] inputCurvePolyData = vtk.vtkPolyData() inputCurvePolyData.SetPoints(inputCurveNode.GetCurvePointsWorld()) inputPointLocator = vtk.vtkPointLocator() inputPointLocator.SetDataSet(inputCurvePolyData) inputPointLocator.BuildLocator() transformFilter = vtk.vtkTransformPolyDataFilter() transformFilter.SetInputData( inputCurveNode.GetShortestDistanceSurfaceNode().GetPolyData()) modelToWorldTransform = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes( inputCurveNode.GetShortestDistanceSurfaceNode(). GetParentTransformNode(), None, modelToWorldTransform) transformFilter.SetTransform(modelToWorldTransform) transformFilter.Update() inputPolyDataLocator = vtk.vtkPointLocator() inputPolyDataLocator.SetDataSet(transformFilter.GetOutput()) inputPolyDataLocator.BuildLocator() self.createISORegionOverlay(inputCurveNode) weightArray = vtk.vtkStringArray() weightArray.SetName(self.WEIGHTS_COLUMN_NAME) averageDistanceArray = vtk.vtkDoubleArray() averageDistanceArray.SetName(self.AVERAGE_DISTANCE_COLUMN_NAME) maxDistanceArray = vtk.vtkDoubleArray() maxDistanceArray.SetName(self.MAX_DISTANCE_COLUMN_NAME) overlapPercentArray = vtk.vtkDoubleArray() overlapPercentArray.SetName(self.OVERLAP_PERCENT_COLUMN_NAME) isoOverlapArray = vtk.vtkDoubleArray() isoOverlapArray.SetName(self.ISO_OVERLAP_COLUMN_NAME) table = vtk.vtkTable() table.AddColumn(weightArray) table.AddColumn(averageDistanceArray) table.AddColumn(maxDistanceArray) table.AddColumn(overlapPercentArray) table.AddColumn(isoOverlapArray) outputTableNode.SetAndObserveTable(table) optimizerCurve = slicer.mrmlScene.GetFirstNodeByName( "CurveComparisonPreview") if optimizerCurve is None: optimizerCurve = slicer.mrmlScene.AddNewNodeByClass( "vtkMRMLMarkupsFreeSurferCurveNode", "CurveComparisonPreview") optimizerCurve.SetAndObserveShortestDistanceSurfaceNode( inputCurveNode.GetShortestDistanceSurfaceNode()) optimizerCurve.SetCurveTypeToShortestDistanceOnSurface() numberOfControlPoints = inputCurveNode.GetNumberOfControlPoints() startPoint_World = [0, 0, 0] inputCurveNode.GetNthControlPointPositionWorld(0, startPoint_World) endPoint_World = [0, 0, 0] inputCurveNode.GetNthControlPointPositionWorld( numberOfControlPoints - 1, endPoint_World) points = vtk.vtkPoints() points.InsertNextPoint(startPoint_World) points.InsertNextPoint(endPoint_World) optimizerCurve.SetControlPointPositionsWorld(points) for i in range(1, pow(2, 8)): weights = self.binaryArray(i, 8) self.evaluateWeights(inputCurveNode, optimizerCurve, weights, inputPointLocator, inputPolyDataLocator, outputTableNode) table.Modified()
def computeStatistics(self, segmentID): import vtkSegmentationCorePython as vtkSegmentationCore requestedKeys = self.getRequestedKeys() segmentationNode = slicer.mrmlScene.GetNodeByID(self.getParameterNode().GetParameter("Segmentation")) grayscaleNode = slicer.mrmlScene.GetNodeByID(self.getParameterNode().GetParameter("ScalarVolume")) if len(requestedKeys)==0: return {} containsLabelmapRepresentation = segmentationNode.GetSegmentation().ContainsRepresentation( vtkSegmentationCore.vtkSegmentationConverter.GetSegmentationBinaryLabelmapRepresentationName()) if not containsLabelmapRepresentation: return {} if grayscaleNode is None or grayscaleNode.GetImageData() is None: return {} # Get geometry of grayscale volume node as oriented image data # reference geometry in reference node coordinate system referenceGeometry_Reference = vtkSegmentationCore.vtkOrientedImageData() referenceGeometry_Reference.SetExtent(grayscaleNode.GetImageData().GetExtent()) ijkToRasMatrix = vtk.vtkMatrix4x4() grayscaleNode.GetIJKToRASMatrix(ijkToRasMatrix) referenceGeometry_Reference.SetGeometryFromImageToWorldMatrix(ijkToRasMatrix) # Get transform between grayscale volume and segmentation segmentationToReferenceGeometryTransform = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes(segmentationNode.GetParentTransformNode(), grayscaleNode.GetParentTransformNode(), segmentationToReferenceGeometryTransform) cubicMMPerVoxel = reduce(lambda x,y: x*y, referenceGeometry_Reference.GetSpacing()) ccPerCubicMM = 0.001 segment = segmentationNode.GetSegmentation().GetSegment(segmentID) segBinaryLabelName = vtkSegmentationCore.vtkSegmentationConverter.GetSegmentationBinaryLabelmapRepresentationName() segmentLabelmap = segment.GetRepresentation(segBinaryLabelName) segmentLabelmap_Reference = vtkSegmentationCore.vtkOrientedImageData() vtkSegmentationCore.vtkOrientedImageDataResample.ResampleOrientedImageToReferenceOrientedImage( segmentLabelmap, referenceGeometry_Reference, segmentLabelmap_Reference, False, # nearest neighbor interpolation False, # no padding segmentationToReferenceGeometryTransform) # We need to know exactly the value of the segment voxels, apply threshold to make force the selected label value labelValue = 1 backgroundValue = 0 thresh = vtk.vtkImageThreshold() thresh.SetInputData(segmentLabelmap_Reference) thresh.ThresholdByLower(0) thresh.SetInValue(backgroundValue) thresh.SetOutValue(labelValue) thresh.SetOutputScalarType(vtk.VTK_UNSIGNED_CHAR) thresh.Update() # Use binary labelmap as a stencil stencil = vtk.vtkImageToImageStencil() stencil.SetInputData(thresh.GetOutput()) stencil.ThresholdByUpper(labelValue) stencil.Update() stat = vtk.vtkImageAccumulate() stat.SetInputData(grayscaleNode.GetImageData()) stat.SetStencilData(stencil.GetOutput()) stat.Update() # create statistics list stats = {} if "voxel_count" in requestedKeys: stats["voxel_count"] = stat.GetVoxelCount() if "volume_mm3" in requestedKeys: stats["volume_mm3"] = stat.GetVoxelCount() * cubicMMPerVoxel if "volume_cm3" in requestedKeys: stats["volume_cm3"] = stat.GetVoxelCount() * cubicMMPerVoxel * ccPerCubicMM if stat.GetVoxelCount()>0: if "min" in requestedKeys: stats["min"] = stat.GetMin()[0] if "max" in requestedKeys: stats["max"] = stat.GetMax()[0] if "mean" in requestedKeys: stats["mean"] = stat.GetMean()[0] if "stdev" in requestedKeys: stats["stdev"] = stat.GetStandardDeviation()[0] return stats
def getStencilForVolume(self, segmentationNode, segmentID, grayscaleNode): import vtkSegmentationCorePython as vtkSegmentationCore containsLabelmapRepresentation = segmentationNode.GetSegmentation( ).ContainsRepresentation( vtkSegmentationCore.vtkSegmentationConverter. GetSegmentationBinaryLabelmapRepresentationName()) if not containsLabelmapRepresentation: return None if (not grayscaleNode or not grayscaleNode.GetImageData() or not grayscaleNode.GetImageData().GetPointData() or not grayscaleNode.GetImageData().GetPointData().GetScalars()): # Input grayscale node does not contain valid image data return None # Get geometry of grayscale volume node as oriented image data # reference geometry in reference node coordinate system referenceGeometry_Reference = vtkSegmentationCore.vtkOrientedImageData( ) referenceGeometry_Reference.SetExtent( grayscaleNode.GetImageData().GetExtent()) ijkToRasMatrix = vtk.vtkMatrix4x4() grayscaleNode.GetIJKToRASMatrix(ijkToRasMatrix) referenceGeometry_Reference.SetGeometryFromImageToWorldMatrix( ijkToRasMatrix) # Get transform between grayscale volume and segmentation segmentationToReferenceGeometryTransform = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes( segmentationNode.GetParentTransformNode(), grayscaleNode.GetParentTransformNode(), segmentationToReferenceGeometryTransform) segmentLabelmap = vtkSegmentationCore.vtkOrientedImageData() segmentationNode.GetBinaryLabelmapRepresentation( segmentID, segmentLabelmap) if (not segmentLabelmap or not segmentLabelmap.GetPointData() or not segmentLabelmap.GetPointData().GetScalars()): # No input label data return None segmentLabelmap_Reference = vtkSegmentationCore.vtkOrientedImageData() vtkSegmentationCore.vtkOrientedImageDataResample.ResampleOrientedImageToReferenceOrientedImage( segmentLabelmap, referenceGeometry_Reference, segmentLabelmap_Reference, False, # nearest neighbor interpolation False, # no padding segmentationToReferenceGeometryTransform) # We need to know exactly the value of the segment voxels, apply threshold to make force the selected label value labelValue = 1 backgroundValue = 0 thresh = vtk.vtkImageThreshold() thresh.SetInputData(segmentLabelmap_Reference) thresh.ThresholdByLower(0) thresh.SetInValue(backgroundValue) thresh.SetOutValue(labelValue) thresh.SetOutputScalarType(vtk.VTK_UNSIGNED_CHAR) thresh.Update() # Use binary labelmap as a stencil stencil = vtk.vtkImageToImageStencil() stencil.SetInputData(thresh.GetOutput()) stencil.ThresholdByUpper(labelValue) stencil.Update() return stencil
def process(self, inputModelA, inputModelB, outputModel, operation): """ Run the processing algorithm. Can be used without GUI widget. :param inputModelA: first input model node :param inputModelB: second input model node :param outputModel: result model node, if empty then a new output node will be created :param operation: union, intersection, difference, difference2 """ if not inputModelA or not inputModelB or not outputModel: raise ValueError("Input or output model nodes are invalid") import time startTime = time.time() logging.info('Processing started') import vtkSlicerCombineModelsModuleLogicPython as vtkbool combine = vtkbool.vtkPolyDataBooleanFilter() if operation == 'union': combine.SetOperModeToUnion() elif operation == 'intersection': combine.SetOperModeToIntersection() elif operation == 'difference': combine.SetOperModeToDifference() elif operation == 'difference2': combine.SetOperModeToDifference2() else: raise ValueError("Invalid operation: " + operation) if inputModelA.GetParentTransformNode( ) == outputModel.GetParentTransformNode(): combine.SetInputConnection(0, inputModelA.GetPolyDataConnection()) else: transformToOutput = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes( inputModelA.GetParentTransformNode(), outputModel.GetParentTransformNode(), transformToOutput) transformer = vtk.vtkTransformPolyDataFilter() transformer.SetTransform(transformToOutput) transformer.SetInputConnection(inputModelA.GetPolyDataConnection()) combine.SetInputConnection(0, transformer.GetOutputPort()) if inputModelB.GetParentTransformNode( ) == outputModel.GetParentTransformNode(): combine.SetInputConnection(1, inputModelB.GetPolyDataConnection()) else: transformToOutput = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes( inputModelB.GetParentTransformNode(), outputModel.GetParentTransformNode(), transformToOutput) transformer = vtk.vtkTransformPolyDataFilter() transformer.SetTransform(transformToOutput) transformer.SetInputConnection(inputModelB.GetPolyDataConnection()) combine.SetInputConnection(1, transformer.GetOutputPort()) # These parameters might be useful to expose: # combine.MergeRegsOn() # default off # combine.DecPolysOff() # default on combine.Update() outputModel.SetAndObservePolyData(combine.GetOutput()) outputModel.CreateDefaultDisplayNodes() # The filter creates a few scalars, don't show them by default, as they would be somewhat distracting outputModel.GetDisplayNode().SetScalarVisibility(False) stopTime = time.time() logging.info( 'Processing completed in {0:.2f} seconds'.format(stopTime - startTime))
def computeStatistics(self, segmentID): import vtkSegmentationCorePython as vtkSegmentationCore requestedKeys = self.getRequestedKeys() segmentationNode = slicer.mrmlScene.GetNodeByID( self.getParameterNode().GetParameter("Segmentation")) if len(requestedKeys) == 0: return {} containsLabelmapRepresentation = segmentationNode.GetSegmentation( ).ContainsRepresentation( vtkSegmentationCore.vtkSegmentationConverter. GetSegmentationBinaryLabelmapRepresentationName()) if not containsLabelmapRepresentation: return {} segmentLabelmap = slicer.vtkOrientedImageData() segmentationNode.GetBinaryLabelmapRepresentation( segmentID, segmentLabelmap) if (not segmentLabelmap or not segmentLabelmap.GetPointData() or not segmentLabelmap.GetPointData().GetScalars()): # No input label data return {} # We need to know exactly the value of the segment voxels, apply threshold to make force the selected label value labelValue = 1 backgroundValue = 0 thresh = vtk.vtkImageThreshold() thresh.SetInputData(segmentLabelmap) thresh.ThresholdByLower(0) thresh.SetInValue(backgroundValue) thresh.SetOutValue(labelValue) thresh.SetOutputScalarType(vtk.VTK_UNSIGNED_CHAR) thresh.Update() # Use binary labelmap as a stencil stencil = vtk.vtkImageToImageStencil() stencil.SetInputData(thresh.GetOutput()) stencil.ThresholdByUpper(labelValue) stencil.Update() stat = vtk.vtkImageAccumulate() stat.SetInputData(thresh.GetOutput()) stat.SetStencilData(stencil.GetOutput()) stat.Update() # Add data to statistics list cubicMMPerVoxel = reduce(lambda x, y: x * y, segmentLabelmap.GetSpacing()) ccPerCubicMM = 0.001 stats = {} if "voxel_count" in requestedKeys: stats["voxel_count"] = stat.GetVoxelCount() if "volume_mm3" in requestedKeys: stats["volume_mm3"] = stat.GetVoxelCount() * cubicMMPerVoxel if "volume_cm3" in requestedKeys: stats["volume_cm3"] = stat.GetVoxelCount( ) * cubicMMPerVoxel * ccPerCubicMM calculateShapeStats = False for shapeKey in self.shapeKeys: if shapeKey in requestedKeys: calculateShapeStats = True break if calculateShapeStats: directions = vtk.vtkMatrix4x4() segmentLabelmap.GetDirectionMatrix(directions) # Remove oriented bounding box from requested keys and replace with individual keys requestedOptions = requestedKeys statFilterOptions = self.shapeKeys calculateOBB = ("obb_diameter_mm" in requestedKeys or "obb_origin_ras" in requestedKeys or "obb_direction_ras_x" in requestedKeys or "obb_direction_ras_y" in requestedKeys or "obb_direction_ras_z" in requestedKeys) if calculateOBB: temp = statFilterOptions statFilterOptions = [] for option in temp: if not option in self.obbKeys: statFilterOptions.append(option) statFilterOptions.append("oriented_bounding_box") temp = requestedOptions requestedOptions = [] for option in temp: if not option in self.obbKeys: requestedOptions.append(option) requestedOptions.append("oriented_bounding_box") shapeStat = vtkITK.vtkITKLabelShapeStatistics() shapeStat.SetInputData(thresh.GetOutput()) shapeStat.SetDirections(directions) for shapeKey in statFilterOptions: shapeStat.SetComputeShapeStatistic( self.keyToShapeStatisticNames[shapeKey], shapeKey in requestedOptions) shapeStat.Update() # If segmentation node is transformed, apply that transform to get RAS coordinates transformSegmentToRas = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes( segmentationNode.GetParentTransformNode(), None, transformSegmentToRas) statTable = shapeStat.GetOutput() if "centroid_ras" in requestedKeys: centroidRAS = [0, 0, 0] centroidArray = statTable.GetColumnByName( self.keyToShapeStatisticNames["centroid_ras"]) centroid = centroidArray.GetTuple(0) transformSegmentToRas.TransformPoint(centroid, centroidRAS) stats["centroid_ras"] = centroidRAS if "roundness" in requestedKeys: roundnessArray = statTable.GetColumnByName( self.keyToShapeStatisticNames["roundness"]) roundness = roundnessArray.GetTuple(0)[0] stats["roundness"] = roundness if "flatness" in requestedKeys: flatnessArray = statTable.GetColumnByName( self.keyToShapeStatisticNames["flatness"]) flatness = flatnessArray.GetTuple(0)[0] stats["flatness"] = flatness if "feret_diameter_mm" in requestedKeys: feretDiameterArray = statTable.GetColumnByName( self.keyToShapeStatisticNames["feret_diameter_mm"]) feretDiameter = feretDiameterArray.GetTuple(0)[0] stats["feret_diameter_mm"] = feretDiameter if "surface_area_mm2" in requestedKeys: perimeterArray = statTable.GetColumnByName( self.keyToShapeStatisticNames["surface_area_mm2"]) perimeter = perimeterArray.GetTuple(0)[0] stats["surface_area_mm2"] = perimeter if "obb_origin_ras" in requestedKeys: obbOriginRAS = [0, 0, 0] obbOriginArray = statTable.GetColumnByName( self.keyToShapeStatisticNames["obb_origin_ras"]) obbOrigin = obbOriginArray.GetTuple(0) transformSegmentToRas.TransformPoint(obbOrigin, obbOriginRAS) stats["obb_origin_ras"] = obbOriginRAS if "obb_diameter_mm" in requestedKeys: obbDiameterArray = statTable.GetColumnByName( self.keyToShapeStatisticNames["obb_diameter_mm"]) obbDiameterMM = list(obbDiameterArray.GetTuple(0)) stats["obb_diameter_mm"] = obbDiameterMM if "obb_direction_ras_x" in requestedKeys: obbOriginArray = statTable.GetColumnByName( self.keyToShapeStatisticNames["obb_origin_ras"]) obbOrigin = obbOriginArray.GetTuple(0) obbDirectionXArray = statTable.GetColumnByName( self.keyToShapeStatisticNames["obb_direction_ras_x"]) obbDirectionX = list(obbDirectionXArray.GetTuple(0)) transformSegmentToRas.TransformVectorAtPoint( obbOrigin, obbDirectionX, obbDirectionX) stats["obb_direction_ras_x"] = obbDirectionX if "obb_direction_ras_y" in requestedKeys: obbOriginArray = statTable.GetColumnByName( self.keyToShapeStatisticNames["obb_origin_ras"]) obbOrigin = obbOriginArray.GetTuple(0) obbDirectionYArray = statTable.GetColumnByName( self.keyToShapeStatisticNames["obb_direction_ras_y"]) obbDirectionY = list(obbDirectionYArray.GetTuple(0)) transformSegmentToRas.TransformVectorAtPoint( obbOrigin, obbDirectionY, obbDirectionY) stats["obb_direction_ras_y"] = obbDirectionY if "obb_direction_ras_z" in requestedKeys: obbOriginArray = statTable.GetColumnByName( self.keyToShapeStatisticNames["obb_origin_ras"]) obbOrigin = obbOriginArray.GetTuple(0) obbDirectionZArray = statTable.GetColumnByName( self.keyToShapeStatisticNames["obb_direction_ras_z"]) obbDirectionZ = list(obbDirectionZArray.GetTuple(0)) transformSegmentToRas.TransformVectorAtPoint( obbOrigin, obbDirectionZ, obbDirectionZ) stats["obb_direction_ras_z"] = obbDirectionZ return stats
# read it back tpsReader = vtk.vtkMNITransformReader() if (tpsReader.CanReadFile(filename) != 0): tpsReader.SetFileName(filename) thinPlate = tpsReader.GetTransform() # make a linear transform linearTransform = vtk.vtkTransform() linearTransform.PostMultiply() linearTransform.Translate(-127.5, -127.5, 0) linearTransform.RotateZ(30) linearTransform.Translate(+127.5, +127.5, 0) # remove the linear part of the thin plate tpsGeneral = vtk.vtkGeneralTransform() tpsGeneral.SetInput(thinPlate) tpsGeneral.PreMultiply() tpsGeneral.Concatenate(linearTransform.GetInverse().GetMatrix()) # convert the thin plate spline into a grid transformToGrid = vtk.vtkTransformToGrid() transformToGrid.SetInput(tpsGeneral) transformToGrid.SetGridSpacing(16, 16, 1) transformToGrid.SetGridOrigin(-64.5, -64.5, 0) transformToGrid.SetGridExtent(0, 24, 0, 24, 0, 0) transformToGrid.Update() gridTransform = vtk.vtkGridTransform() gridTransform.SetDisplacementGridConnection( transformToGrid.GetOutputPort())
def cornerstoneannotationsToSlicerSegments(self, masterVolume, auth_token, jobid, projectjson, useremail): print('cornerstoneannotationsToSlicerSegments()', flush=True) # voxels = slicer.util.arrayFromVolume(masterVolume) ijkToRasMatrix = vtk.vtkMatrix4x4() masterVolume.GetIJKToRASMatrix(ijkToRasMatrix) imageData = masterVolume.GetImageData() transformVolumeRasToRas = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes( masterVolume.GetParentTransformNode(), None, transformVolumeRasToRas) tdioClientAPI = TDIOClientAPI() instanceindex = -1 contoursmap = {} colorsmap = {} segmentsmap = {} seriesjson = projectjson['images']['seriesList'][0] for instance in seriesjson['instanceList']: instanceindex = instanceindex + 1 instanceid = instance['instanceId'] response = tdioClientAPI.getcornerstoneannotation( 'https://app.trainingdata.io', auth_token, jobid, seriesjson['seriesId'], instanceid, useremail) cannotations = [] cannotations = response if len(cannotations) == 0: continue jsonobj = json.loads(cannotations[0]['jsonstring']) annotations = jsonobj['annotations'] print('instanceid1', instanceid, len(annotations)) for annotation in annotations: # print(annotation) toolData = annotation['toolData'] toolType = annotation['toolName'] color = annotation['selectedColor'] color = TDIOUtils.hex_to_rgb(annotation['selectedColor']) aclass = annotation['annotationClass'] label = ''.join(filter(lambda x: x in printable, aclass)) for data in toolData: if not not data['color']: if len(data['color']) == 0: continue color = TDIOUtils.hex_to_rgb(data['color']) if toolType == 'freehand': if label not in contoursmap: contoursmap[label] = [] points = getPointsFromPolygon(data) # points = [[10,10], [110,10], [110,110], [10, 110]] contour = [] for p in points: try: ras = self.ijkToRas( ijkToRasMatrix, transformVolumeRasToRas, p[0], p[1], instanceindex) # print('ras:', ras) contour.append( np.array([p[0], p[1], instanceindex], dtype=(np.int))) except Exception as ex: print(ex) contoursmap[label].append(contour) colorsmap[label] = color segmentationNode = slicer.mrmlScene.AddNewNodeByClass( "vtkMRMLSegmentationNode") segmentationNode.CreateDefaultDisplayNodes() segmentationNode.SetReferenceImageGeometryParameterFromVolumeNode( masterVolume) #I tried with and without this line labelinginterface = projectjson['labelinginterface'] if 'tools' in labelinginterface and len( labelinginterface['tools']) > 0: for tool in labelinginterface['tools']: name = tool['name'] color = TDIOUtils.hex_to_rgb(tool['color']) print(color) segment = slicer.vtkSegment() segment.SetName(name) segment.SetColor(color) segmentsmap[name] = segment # print(colorsmap, contoursmap) for key in contoursmap: segmentName = key segment = segmentsmap[key] contours = contoursmap[key] self.addContoursToSegment(ijkToRasMatrix, transformVolumeRasToRas, segment, contours, segmentName) for key in segmentsmap: segment = segmentsmap[key] segmentationNode.GetSegmentation().AddSegment(segment)
def write_transforms_to_itk_format(transform_list, outdir, subject_ids=None): """Write VTK affine or spline transforms to ITK 4 text file formats. Input transforms are in VTK RAS space and are forward transforms. Output transforms are in LPS space and are the corresponsing inverse transforms, according to the conventions for these file formats and for resampling images. The affine transform is straightforward. The spline transform file format is just a list of displacements that have to be in the same order as they are stored in ITK C code. This now outputs an ITK transform that works correctly to transform the tracts (or any volume in the same space) in Slicer. In the nonrigid case, we also output a vtk native spline transform file using MNI format. """ idx = 0 tx_fnames = list() for tx in transform_list: # save out the vtk transform to a text file as it is # The MNI transform reader/writer are available in vtk so use those: if tx.GetClassName() != 'vtkBSplineTransform': writer = vtk.vtkMNITransformWriter() writer.AddTransform(tx) if subject_ids is not None: fname = 'vtk_txform_' + str(subject_ids[idx]) + '.xfm' else: fname = 'vtk_txform_{0:05d}.xfm'.format(idx) writer.SetFileName(os.path.join(outdir, fname)) writer.Write() # file name for itk transform written below if subject_ids is not None: fname = 'itk_txform_' + str(subject_ids[idx]) + '.tfm' else: fname = 'itk_txform_{0:05d}.tfm'.format(idx) fname = os.path.join(outdir, fname) tx_fnames.append(fname) # Save the itk transform as the inverse of this transform (resampling transform) and in LPS. # This will show the same transform in the slicer GUI as the vtk transform we internally computed # that is stored in the .xfm text file, above. # To apply our transform to resample a volume in LPS: # convert to RAS, use inverse of transform to resample, convert back to LPS if tx.GetClassName() == 'vtkThinPlateSplineTransform' or tx.GetClassName() == 'vtkBSplineTransform': #print 'Saving nonrigid transform displacements in ITK format' # Deep copy to avoid modifying input transform that will be applied to polydata if tx.GetClassName() == 'vtkThinPlateSplineTransform': tps = vtk.vtkThinPlateSplineTransform() else: tps = vtk.vtkBSplineTransform() tps.DeepCopy(tx) #extent = tps.GetCoefficients().GetExtent() #origin = tps.GetCoefficients().GetOrigin() #spacing = tps.GetCoefficients().GetSpacing() #dims = tps.GetCoefficients().GetDimensions() #print "E:", extent #print "O:", origin #print "S:", spacing #print "D:", dims # invert to get the transform suitable for resampling an image tps.Inverse() # convert the inverse spline transform from RAS to LPS ras_2_lps = vtk.vtkTransform() ras_2_lps.Scale(-1, -1, 1) lps_2_ras = vtk.vtkTransform() lps_2_ras.Scale(-1, -1, 1) spline_inverse_lps = vtk.vtkGeneralTransform() spline_inverse_lps.Concatenate(lps_2_ras) spline_inverse_lps.Concatenate(tps) spline_inverse_lps.Concatenate(ras_2_lps) # Now, loop through LPS space. Find the effect of the # inverse transform on each point. This is essentially what # vtk.vtkTransformToGrid() does, but this puts things into # LPS. # This low-res grid produced small differences (order of 1-2mm) when transforming # polydatas inside Slicer vs. in this code. #grid_size = [15, 15, 15] #grid_spacing = 10 # This higher-res grid has fewer small numerical differences # grid_size = [50, 50, 50] # grid_spacing = 5 # This higher-res grid has fewer small numerical differences, but files are larger #grid_size = [70, 70, 70] #grid_spacing = 3 # This higher-res grid is sufficient to limit numerical # differences to under .1mm in tests. However, files are # quite large (47M). As this is still much smaller than # the tractography files, and correctness is desired, we # will produce large transform files. A preferable # solution would be to store the forward transform we # compute at the grid points at which it is defined, but # there is no inverse flag available in the file # format. Therefore the inverse must be stored at high # resolution. grid_size = [105, 105, 105] grid_spacing = 2 extent_0 = [-(grid_size[0] - 1)/2, -(grid_size[1] - 1)/2, -(grid_size[2] - 1)/2] extent_1 = [ (grid_size[0] - 1)/2, (grid_size[1] - 1)/2, (grid_size[2] - 1)/2] origin = -grid_spacing * (numpy.array(extent_1) - numpy.array(extent_0))/2.0 grid_points_LPS = list() grid_points_RAS = list() # ordering of grid points must match itk-style array order for images for s in range(extent_0[0], extent_1[0]+1): for p in range(extent_0[1], extent_1[1]+1): for l in range(extent_0[2], extent_1[2]+1): grid_points_RAS.append([-l*grid_spacing, -p*grid_spacing, s*grid_spacing]) grid_points_LPS.append([l*grid_spacing, p*grid_spacing, s*grid_spacing]) displacements_LPS = list() print "LPS grid for storing transform:", grid_points_LPS[0], grid_points_LPS[-1], grid_spacing lps_points = vtk.vtkPoints() lps_points2 = vtk.vtkPoints() for gp_lps in grid_points_LPS: lps_points.InsertNextPoint(gp_lps[0], gp_lps[1], gp_lps[2]) spline_inverse_lps.TransformPoints(lps_points, lps_points2) pidx = 0 for gp_lps in grid_points_LPS: pt = lps_points2.GetPoint(pidx) diff_lps = [pt[0] - gp_lps[0], pt[1] - gp_lps[1], pt[2] - gp_lps[2]] pidx += 1 ## # this tested grid definition and origin were okay. ## diff_lps = [20,30,40] ## # this tested that the ordering of L,P,S is correct: ## diff_lps = [0, gp_lps[1], 0] ## diff_lps = [gp_lps[0], 0, 0] ## diff_lps = [0, 0, gp_lps[2]] ## # this tested that the ordering of grid points is correct ## # only the R>0, A>0, S<0 region shows a transform. ## if gp_lps[0] < 0 and gp_lps[1] < 0 and gp_lps[2] < 0: ## diff_lps = [gp_lps[0]/2.0, 0, 0] ## else: ## diff_lps = [0, 0, 0] displacements_LPS.append(diff_lps) # save the points and displacement vectors in ITK format. #print 'Saving in ITK transform format.' f = open(fname, 'w') f.write('#Insight Transform File V1.0\n') f.write('# Transform 0\n') # ITK version 3 that included an additive (!) affine transform #f.write('Transform: BSplineDeformableTransform_double_3_3\n') # ITK version 4 that does not include a second transform in the file f.write('Transform: BSplineTransform_double_3_3\n') f.write('Parameters: ') # "Here the data are: The bulk of the BSpline part are 3D # displacement vectors for each of the BSpline grid-nodes # in physical space, i.e. for each grid-node, there will # be three blocks of displacements defining dx,dy,dz for # all grid nodes." for block in [0, 1, 2]: for diff in displacements_LPS: f.write('{0} '.format(diff[block])) #FixedParameters: size size size origin origin origin origin spacing spacing spacing (then direction cosines: 1 0 0 0 1 0 0 0 1) f.write('\nFixedParameters:') #f.write(' {0} {0} {0}'.format(2*sz+1)) f.write(' {0}'.format(grid_size[0])) f.write(' {0}'.format(grid_size[1])) f.write(' {0}'.format(grid_size[2])) f.write(' {0}'.format(origin[0])) f.write(' {0}'.format(origin[1])) f.write(' {0}'.format(origin[2])) f.write(' {0} {0} {0}'.format(grid_spacing)) f.write(' 1 0 0 0 1 0 0 0 1\n') f.close() else: tx_inverse = vtk.vtkTransform() tx_inverse.DeepCopy(tx) tx_inverse.Inverse() ras_2_lps = vtk.vtkTransform() ras_2_lps.Scale(-1, -1, 1) lps_2_ras = vtk.vtkTransform() lps_2_ras.Scale(-1, -1, 1) tx2 = vtk.vtkTransform() tx2.Concatenate(lps_2_ras) tx2.Concatenate(tx_inverse) tx2.Concatenate(ras_2_lps) three_by_three = list() translation = list() for i in range(0,3): for j in range(0,3): three_by_three.append(tx2.GetMatrix().GetElement(i,j)) translation.append(tx2.GetMatrix().GetElement(0,3)) translation.append(tx2.GetMatrix().GetElement(1,3)) translation.append(tx2.GetMatrix().GetElement(2,3)) f = open(fname, 'w') f.write('#Insight Transform File V1.0\n') f.write('# Transform 0\n') f.write('Transform: AffineTransform_double_3_3\n') f.write('Parameters: ') for el in three_by_three: f.write('{0} '.format(el)) for el in translation: f.write('{0} '.format(el)) f.write('\nFixedParameters: 0 0 0\n') f.close() idx +=1 return(tx_fnames)
def run(self, inputModel, outputModel, annotationROI): logging.info('Processing started') bounds = [0] * 6 annotationROI.GetBounds(bounds) cube = vtk.vtkCubeSource() cube.SetBounds(bounds) triangleCube = vtk.vtkTriangleFilter() if annotationROI.GetTransformNodeID(): trfNode = slicer.mrmlScene.GetNodeByID( annotationROI.GetTransformNodeID()) trfFromWorld = vtk.vtkGeneralTransform() trfNode.GetTransformToWorld(trfFromWorld) transformFilter = vtk.vtkTransformPolyDataFilter() transformFilter.SetTransform(trfFromWorld) transformFilter.SetInputConnection(cube.GetOutputPort()) triangleCube.SetInputConnection(transformFilter.GetOutputPort()) else: triangleCube.SetInputConnection(cube.GetOutputPort()) triangleInput = vtk.vtkTriangleFilter() if inputModel.GetTransformNodeID(): trfNode = slicer.mrmlScene.GetNodeByID( inputModel.GetTransformNodeID()) trfFromWorld = vtk.vtkGeneralTransform() trfNode.GetTransformToWorld(trfFromWorld) transformFilter = vtk.vtkTransformPolyDataFilter() transformFilter.SetTransform(trfFromWorld) transformFilter.SetInputConnection( inputModel.GetPolyDataConnection()) triangleInput.SetInputConnection(transformFilter.GetOutputPort()) else: triangleInput.SetInputConnection( inputModel.GetPolyDataConnection()) boolean = vtk.vtkBooleanOperationPolyDataFilter() boolean.SetInputConnection(0, triangleInput.GetOutputPort()) boolean.SetInputConnection(1, triangleCube.GetOutputPort()) boolean.SetOperationToIntersection() output = vtk.vtkPolyData() if inputModel.GetTransformNodeID(): trfNode = slicer.mrmlScene.GetNodeByID( inputModel.GetTransformNodeID()) trfFromWorld = vtk.vtkGeneralTransform() trfNode.GetTransformFromWorld(trfFromWorld) transformFilter = vtk.vtkTransformPolyDataFilter() transformFilter.SetTransform(trfFromWorld) transformFilter.SetInputConnection(boolean.GetOutputPort()) transformFilter.Update() output.DeepCopy(transformFilter.GetOutput()) else: boolean.Update() output.DeepCopy(boolean.GetOutput()) if outputModel == None: modelsLogic = slicer.modules.models.logic() outputModel = modelsLogic.AddModel(output) outputModel.GetDisplayNode().SetSliceIntersectionVisibility(True) outputModel.SetName(inputModel.GetName() + '_cropped') else: outputModel.SetAndObservePolyData(surface.GetOutput()) if outputModel.GetDisplayNode() == None: outputModel.CreateDefaultDisplayNodes() outputModel.GetDisplayNode().SetSliceIntersectionVisibility(True) #outputModel.SetAndObserveTransformNodeID('') if inputModel.GetTransformNodeID(): outputModel.SetAndObserveTransformNodeID( inputModel.GetTransformNodeID()) # copy attributes names = vtk.vtkStringArray() inputModel.GetAttributeNames(names) for n in range(names.GetNumberOfValues()): outputModel.SetAttribute( names.GetValue(n), inputModel.GetAttribute(names.GetValue(n))) logging.info('Processing completed') return True
def addGrayscaleVolumeStatistics(self): import vtkSegmentationCorePython as vtkSegmentationCore containsLabelmapRepresentation = self.segmentationNode.GetSegmentation().ContainsRepresentation( vtkSegmentationCore.vtkSegmentationConverter.GetSegmentationBinaryLabelmapRepresentationName()) if not containsLabelmapRepresentation: return if self.grayscaleNode is None or self.grayscaleNode.GetImageData() is None: return # Get geometry of grayscale volume node as oriented image data referenceGeometry_Reference = vtkSegmentationCore.vtkOrientedImageData() # reference geometry in reference node coordinate system referenceGeometry_Reference.SetExtent(self.grayscaleNode.GetImageData().GetExtent()) ijkToRasMatrix = vtk.vtkMatrix4x4() self.grayscaleNode.GetIJKToRASMatrix(ijkToRasMatrix) referenceGeometry_Reference.SetGeometryFromImageToWorldMatrix(ijkToRasMatrix) # Get transform between grayscale volume and segmentation segmentationToReferenceGeometryTransform = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes(self.segmentationNode.GetParentTransformNode(), self.grayscaleNode.GetParentTransformNode(), segmentationToReferenceGeometryTransform) cubicMMPerVoxel = reduce(lambda x,y: x*y, referenceGeometry_Reference.GetSpacing()) ccPerCubicMM = 0.001 for segmentID in self.statistics["SegmentIDs"]: segment = self.segmentationNode.GetSegmentation().GetSegment(segmentID) segmentLabelmap = segment.GetRepresentation(vtkSegmentationCore.vtkSegmentationConverter.GetSegmentationBinaryLabelmapRepresentationName()) segmentLabelmap_Reference = vtkSegmentationCore.vtkOrientedImageData() vtkSegmentationCore.vtkOrientedImageDataResample.ResampleOrientedImageToReferenceOrientedImage( segmentLabelmap, referenceGeometry_Reference, segmentLabelmap_Reference, False, # nearest neighbor interpolation False, # no padding segmentationToReferenceGeometryTransform) # We need to know exactly the value of the segment voxels, apply threshold to make force the selected label value labelValue = 1 backgroundValue = 0 thresh = vtk.vtkImageThreshold() thresh.SetInputData(segmentLabelmap_Reference) thresh.ThresholdByLower(0) thresh.SetInValue(backgroundValue) thresh.SetOutValue(labelValue) thresh.SetOutputScalarType(vtk.VTK_UNSIGNED_CHAR) thresh.Update() # Use binary labelmap as a stencil stencil = vtk.vtkImageToImageStencil() stencil.SetInputData(thresh.GetOutput()) stencil.ThresholdByUpper(labelValue) stencil.Update() stat = vtk.vtkImageAccumulate() stat.SetInputData(self.grayscaleNode.GetImageData()) stat.SetStencilData(stencil.GetOutput()) stat.Update() # Add data to statistics list self.statistics[segmentID,"GS voxel count"] = stat.GetVoxelCount() self.statistics[segmentID,"GS volume mm3"] = stat.GetVoxelCount() * cubicMMPerVoxel self.statistics[segmentID,"GS volume cc"] = stat.GetVoxelCount() * cubicMMPerVoxel * ccPerCubicMM if stat.GetVoxelCount()>0: self.statistics[segmentID,"GS min"] = stat.GetMin()[0] self.statistics[segmentID,"GS max"] = stat.GetMax()[0] self.statistics[segmentID,"GS mean"] = stat.GetMean()[0] self.statistics[segmentID,"GS stdev"] = stat.GetStandardDeviation()[0]
def updateOutputTable(self, inputVolume, inputCurve, outputTable, lineResolution): if inputCurve is None or inputVolume is None or outputTable is None: return if inputCurve.GetNumberOfDefinedControlPoints() < 2: outputTable.GetTable().SetNumberOfRows(0) return curvePoints_RAS = inputCurve.GetCurvePointsWorld() closedCurve = inputCurve.IsA('vtkMRMLClosedCurveNode') curveLengthMm = slicer.vtkMRMLMarkupsCurveNode.GetCurveLength(curvePoints_RAS, closedCurve) # Need to get the start/end point of the line in the IJK coordinate system # as VTK filters cannot take into account direction cosines # We transform the curve points from RAS coordinate system (instead of directly from the inputCurve coordinate system) # to make sure the curve is transformed to RAS exactly the same way as it is done for display. inputVolumeToIJK = vtk.vtkMatrix4x4() inputVolume.GetRASToIJKMatrix(inputVolumeToIJK) rasToInputVolumeTransform = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes(None, inputVolume.GetParentTransformNode(), rasToInputVolumeTransform) rasToIJKTransform = vtk.vtkGeneralTransform() # rasToIJKTransform = inputVolumeToIJK * rasToInputVolumeTransform rasToIJKTransform.Concatenate(inputVolumeToIJK) rasToIJKTransform.Concatenate(rasToInputVolumeTransform) curvePoly_RAS = vtk.vtkPolyData() curvePoly_RAS.SetPoints(curvePoints_RAS) transformRasToIjk = vtk.vtkTransformPolyDataFilter() transformRasToIjk.SetInputData(curvePoly_RAS) transformRasToIjk.SetTransform(rasToIJKTransform) transformRasToIjk.Update() curvePoly_IJK = transformRasToIjk.GetOutput() curvePoints_IJK = curvePoly_IJK.GetPoints() if curvePoints_IJK.GetNumberOfPoints() < 2: # We checked before that there are at least two control points, so it should not happen raise ValueError() startPointIndex = 0 endPointIndex = curvePoints_IJK.GetNumberOfPoints() - 1 lineStartPoint_IJK = curvePoints_IJK.GetPoint(startPointIndex) lineEndPoint_IJK = curvePoints_IJK.GetPoint(endPointIndex) # Special case: single-slice volume # vtkProbeFilter treats vtkImageData as a general data set and it considers its bounds to end # in the middle of edge voxels. This makes single-slice volumes to have zero thickness, which # can be easily missed by a line that that is drawn on the plane (e.g., they happen to be # extremely on the same side of the plane, very slightly off, due to runding errors). # We move the start/end points very close to the plane and force them to be on opposite sides of the plane. dims = inputVolume.GetImageData().GetDimensions() for axisIndex in range(3): if dims[axisIndex] == 1: if abs(lineStartPoint_IJK[axisIndex]) < 0.5 and abs(lineEndPoint_IJK[axisIndex]) < 0.5: # both points are inside the volume plane # keep their distance the same (to keep the overall length of the line he same) # but make sure the points are on the opposite side of the plane (to ensure probe filter # considers the line crossing the image plane) pointDistance = max(abs(lineStartPoint_IJK[axisIndex]-lineEndPoint_IJK[axisIndex]), 1e-6) lineStartPoint_IJK[axisIndex] = -0.5 * pointDistance lineEndPoint_IJK[axisIndex] = 0.5 * pointDistance curvePoints_IJK.SetPoint(startPointIndex, lineStartPoint_IJK) curvePoints_IJK.SetPoint(endPointIndex, lineEndPoint_IJK) sampledCurvePoints_IJK = vtk.vtkPoints() samplingDistance = curveLengthMm / lineResolution slicer.vtkMRMLMarkupsCurveNode.ResamplePoints(curvePoints_IJK, sampledCurvePoints_IJK, samplingDistance, closedCurve) sampledCurvePoly_IJK = vtk.vtkPolyData() sampledCurvePoly_IJK.SetPoints(sampledCurvePoints_IJK) probeFilter=vtk.vtkProbeFilter() probeFilter.SetInputData(sampledCurvePoly_IJK) probeFilter.SetSourceData(inputVolume.GetImageData()) probeFilter.ComputeToleranceOff() probeFilter.Update() probedPoints=probeFilter.GetOutput() # Create arrays of data distanceArray = self.getArrayFromTable(outputTable, DISTANCE_ARRAY_NAME) relativeDistanceArray = self.getArrayFromTable(outputTable, PROPORTIONAL_DISTANCE_ARRAY_NAME) intensityArray = self.getArrayFromTable(outputTable, INTENSITY_ARRAY_NAME) outputTable.GetTable().SetNumberOfRows(probedPoints.GetNumberOfPoints()) x = range(0, probedPoints.GetNumberOfPoints()) xStep = curveLengthMm/(probedPoints.GetNumberOfPoints()-1) probedPointScalars = probedPoints.GetPointData().GetScalars() xLength = x[len(x) - 1] * xStep for i in range(len(x)): distanceArray.SetValue(i, x[i]*xStep) relativeDistanceArray.SetValue(i, (x[i]*xStep / xLength) * 100) intensityArray.SetValue(i, probedPointScalars.GetTuple(i)[0]) distanceArray.Modified() relativeDistanceArray.Modified() intensityArray.Modified() outputTable.GetTable().Modified()
def addGrayscaleVolumeStatistics(self): import vtkSegmentationCorePython as vtkSegmentationCore containsLabelmapRepresentation = self.segmentationNode.GetSegmentation( ).ContainsRepresentation( vtkSegmentationCore.vtkSegmentationConverter. GetSegmentationBinaryLabelmapRepresentationName()) if not containsLabelmapRepresentation: return if self.grayscaleNode is None or self.grayscaleNode.GetImageData( ) is None: return # Get geometry of grayscale volume node as oriented image data referenceGeometry_Reference = vtkSegmentationCore.vtkOrientedImageData( ) # reference geometry in reference node coordinate system referenceGeometry_Reference.SetExtent( self.grayscaleNode.GetImageData().GetExtent()) ijkToRasMatrix = vtk.vtkMatrix4x4() self.grayscaleNode.GetIJKToRASMatrix(ijkToRasMatrix) referenceGeometry_Reference.SetGeometryFromImageToWorldMatrix( ijkToRasMatrix) # Get transform between grayscale volume and segmentation segmentationToReferenceGeometryTransform = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes( self.segmentationNode.GetParentTransformNode(), self.grayscaleNode.GetParentTransformNode(), segmentationToReferenceGeometryTransform) cubicMMPerVoxel = reduce(lambda x, y: x * y, referenceGeometry_Reference.GetSpacing()) ccPerCubicMM = 0.001 for segmentID in self.statistics["SegmentIDs"]: segment = self.segmentationNode.GetSegmentation().GetSegment( segmentID) segmentLabelmap = segment.GetRepresentation( vtkSegmentationCore.vtkSegmentationConverter. GetSegmentationBinaryLabelmapRepresentationName()) segmentLabelmap_Reference = vtkSegmentationCore.vtkOrientedImageData( ) vtkSegmentationCore.vtkOrientedImageDataResample.ResampleOrientedImageToReferenceOrientedImage( segmentLabelmap, referenceGeometry_Reference, segmentLabelmap_Reference, False, # nearest neighbor interpolation False, # no padding segmentationToReferenceGeometryTransform) # We need to know exactly the value of the segment voxels, apply threshold to make force the selected label value labelValue = 1 backgroundValue = 0 thresh = vtk.vtkImageThreshold() thresh.SetInputData(segmentLabelmap_Reference) thresh.ThresholdByLower(0) thresh.SetInValue(backgroundValue) thresh.SetOutValue(labelValue) thresh.SetOutputScalarType(vtk.VTK_UNSIGNED_CHAR) thresh.Update() # Use binary labelmap as a stencil stencil = vtk.vtkImageToImageStencil() stencil.SetInputData(thresh.GetOutput()) stencil.ThresholdByUpper(labelValue) stencil.Update() stat = vtk.vtkImageAccumulate() stat.SetInputData(self.grayscaleNode.GetImageData()) stat.SetStencilData(stencil.GetOutput()) stat.Update() # Add data to statistics list self.statistics[segmentID, "GS voxel count"] = stat.GetVoxelCount() self.statistics[ segmentID, "GS volume mm3"] = stat.GetVoxelCount() * cubicMMPerVoxel self.statistics[segmentID, "GS volume cc"] = stat.GetVoxelCount( ) * cubicMMPerVoxel * ccPerCubicMM if stat.GetVoxelCount() > 0: self.statistics[segmentID, "GS min"] = stat.GetMin()[0] self.statistics[segmentID, "GS max"] = stat.GetMax()[0] self.statistics[segmentID, "GS mean"] = stat.GetMean()[0] self.statistics[segmentID, "GS stdev"] = stat.GetStandardDeviation()[0]
def run(self, segmentationNode, volumeNode, axis, tableNode, plotChartNode): """ Run the processing algorithm. Can be used without GUI widget. :param segmentationNode: cross section area will be computed on this :param volumeNode: optional reference volume (to determine slice positions and directions) :param axis: axis index to compute cross section areas along :param tableNode: result table node :param plotChartNode: result chart node """ import numpy as np logging.info('Processing started') if not segmentationNode: raise ValueError("Segmentation node is invalid") # Get visible segment ID list. # Get segment ID list visibleSegmentIds = vtk.vtkStringArray() segmentationNode.GetDisplayNode().GetVisibleSegmentIDs( visibleSegmentIds) if visibleSegmentIds.GetNumberOfValues() == 0: raise ValueError( "SliceAreaPlot will not return any results: there are no visible segments" ) if axis == "row": axisIndex = 0 elif axis == "column": axisIndex = 1 elif axis == "slice": axisIndex = 2 else: raise ValueError("Invalid axis name: " + axis) # # Make a table and set the first column as the slice number. This is used # as the X axis for plots. # tableNode.RemoveAllColumns() table = tableNode.GetTable() # Make a plot chart node. Plot series nodes will be added to this in the # loop below that iterates over each segment. plotChartNode.SetTitle('Segment cross-section area (' + axis + ')') plotChartNode.SetXAxisTitle(axis + " index") plotChartNode.SetYAxisTitle('Area in mm^2') # TODO: use length unit # # For each segment, get the area and put it in the table in a new column. # try: # Create temporary volume node tempSegmentLabelmapVolumeNode = slicer.mrmlScene.AddNewNodeByClass( 'vtkMRMLLabelMapVolumeNode', "SegmentCrossSectionAreaTemp") for segmentIndex in range(visibleSegmentIds.GetNumberOfValues()): segmentID = visibleSegmentIds.GetValue(segmentIndex) segmentList = vtk.vtkStringArray() segmentList.InsertNextValue(segmentID) if not slicer.modules.segmentations.logic( ).ExportSegmentsToLabelmapNode(segmentationNode, segmentList, tempSegmentLabelmapVolumeNode, volumeNode): continue if segmentIndex == 0: volumeExtents = tempSegmentLabelmapVolumeNode.GetImageData( ).GetExtent() numSlices = volumeExtents[axisIndex * 2 + 1] - volumeExtents[axisIndex * 2] + 1 startPosition_Ijk = [ (volumeExtents[0] + volumeExtents[1]) / 2.0 if axisIndex != 0 else volumeExtents[0], (volumeExtents[2] + volumeExtents[3]) / 2.0 if axisIndex != 1 else volumeExtents[2], (volumeExtents[4] + volumeExtents[5]) / 2.0 if axisIndex != 2 else volumeExtents[4], 1 ] endPosition_Ijk = [ (volumeExtents[0] + volumeExtents[1]) / 2.0 if axisIndex != 0 else volumeExtents[1], (volumeExtents[2] + volumeExtents[3]) / 2.0 if axisIndex != 1 else volumeExtents[3], (volumeExtents[4] + volumeExtents[5]) / 2.0 if axisIndex != 2 else volumeExtents[5], 1 ] # Get physical coordinates from voxel coordinates volumeIjkToRas = vtk.vtkMatrix4x4() tempSegmentLabelmapVolumeNode.GetIJKToRASMatrix( volumeIjkToRas) startPosition_Ras = np.array([0.0, 0.0, 0.0, 1.0]) volumeIjkToRas.MultiplyPoint(startPosition_Ijk, startPosition_Ras) endPosition_Ras = np.array([0.0, 0.0, 0.0, 1.0]) volumeIjkToRas.MultiplyPoint(endPosition_Ijk, endPosition_Ras) volumePositionIncrement_Ras = np.array([0, 0, 0, 1]) if numSlices > 1: volumePositionIncrement_Ras = ( endPosition_Ras - startPosition_Ras) / (numSlices - 1.0) # If volume node is transformed, apply that transform to get volume's RAS coordinates transformVolumeRasToRas = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes( tempSegmentLabelmapVolumeNode.GetParentTransformNode(), None, transformVolumeRasToRas) sliceNumberArray = vtk.vtkIntArray() sliceNumberArray.SetName("Index") slicePositionArray = vtk.vtkFloatArray() slicePositionArray.SetNumberOfComponents(3) slicePositionArray.SetComponentName(0, "R") slicePositionArray.SetComponentName(1, "A") slicePositionArray.SetComponentName(2, "S") slicePositionArray.SetName("Position") for i in range(numSlices): sliceNumberArray.InsertNextValue(i) point_VolumeRas = startPosition_Ras + i * volumePositionIncrement_Ras point_Ras = transformVolumeRasToRas.TransformPoint( point_VolumeRas[0:3]) slicePositionArray.InsertNextTuple3(*point_Ras) table.AddColumn(sliceNumberArray) tableNode.SetColumnDescription(sliceNumberArray.GetName(), "Index of " + axis) tableNode.SetColumnUnitLabel(sliceNumberArray.GetName(), "voxel") table.AddColumn(slicePositionArray) tableNode.SetColumnDescription( slicePositionArray.GetName(), "RAS position of slice center") tableNode.SetColumnUnitLabel(slicePositionArray.GetName(), "mm") # TODO: use length unit narray = slicer.util.arrayFromVolume( tempSegmentLabelmapVolumeNode) areaArray = vtk.vtkFloatArray() segmentName = segmentationNode.GetSegmentation().GetSegment( segmentID).GetName() areaArray.SetName(segmentName) # Convert number of voxels to area in mm2 spacing = tempSegmentLabelmapVolumeNode.GetSpacing() areaOfPixelMm2 = spacing[0] * spacing[1] * spacing[ 2] / spacing[axisIndex] # Count number of >0 voxels for each slice for i in range(numSlices): if axisIndex == 0: areaBySliceInVoxels = np.count_nonzero(narray[:, :, i]) elif axisIndex == 1: areaBySliceInVoxels = np.count_nonzero(narray[:, i, :]) elif axisIndex == 2: areaBySliceInVoxels = np.count_nonzero(narray[i, :, :]) areaBySliceInMm2 = areaBySliceInVoxels * areaOfPixelMm2 areaArray.InsertNextValue(areaBySliceInMm2) tableNode.AddColumn(areaArray) tableNode.SetColumnUnitLabel(areaArray.GetName(), "mm2") # TODO: use length unit tableNode.SetColumnDescription(areaArray.GetName(), "Cross-section area") # Make a plot series node for this column. plotSeriesNode = slicer.mrmlScene.AddNewNodeByClass( "vtkMRMLPlotSeriesNode", segmentName) plotSeriesNode.SetAndObserveTableNodeID(tableNode.GetID()) plotSeriesNode.SetXColumnName("Index") plotSeriesNode.SetYColumnName(segmentName) plotSeriesNode.SetUniqueColor() # Add this series to the plot chart node created above. plotChartNode.AddAndObservePlotSeriesNodeID( plotSeriesNode.GetID()) finally: # Remove temporary volume node colorNode = tempSegmentLabelmapVolumeNode.GetDisplayNode( ).GetColorNode() if colorNode: slicer.mrmlScene.RemoveNode(colorNode) slicer.mrmlScene.RemoveNode(tempSegmentLabelmapVolumeNode) logging.info('Processing completed')
def performAffineAndThinPlateRegistration(self, state, landmarks): """Perform Affine registration first, use the transformed result as the input of the thin plate transform""" # if state.transformed: # if state.transformed.GetTransformNodeID() != state.transform.GetID(): # state.transformed.SetAndObserveTransformNodeID(state.transform.GetID()) volumeNodes = (state.fixed, state.moving) fiducialNodes = (state.fixedFiducials,state.movingFiducials) points = state.logic.vtkPointsForVolumes( volumeNodes, fiducialNodes ) #yingli debug #print 'self.linearMode',self.linearMode if not self.landmarkTransform: self.landmarkTransform = vtk.vtkLandmarkTransform() if self.linearMode == 'Rigid': self.landmarkTransform.SetModeToRigidBody() if self.linearMode == 'Similarity': self.landmarkTransform.SetModeToSimilarity() if self.linearMode == 'Affine': self.landmarkTransform.SetModeToAffine() if state.fixedFiducials.GetNumberOfFiducials() < 3: self.landmarkTransform.SetModeToRigidBody() self.landmarkTransform.SetSourceLandmarks(points[state.moving]) self.landmarkTransform.SetTargetLandmarks(points[state.fixed]) self.landmarkTransform.Update() #transform moving landmarks affine_transformed_moving_points = vtk.vtkPoints() self.landmarkTransform.TransformPoints(points[state.moving],affine_transformed_moving_points) #yingli debug #print self.landmarkTransform.GetMatrix() #print 'old moving', points[state.moving].GetPoint(0) #print 'new moving', affine_transformed_moving_points.GetPoint(0) # do thin plate, use affine transformed result as the input # since this is a resample transform, source is the fixed (resampling target) space # and moving is the target space if not self.thinPlateTransform: self.thinPlateTransform = vtk.vtkThinPlateSplineTransform() self.thinPlateTransform.SetBasisToR() # for 3D transform self.thinPlateTransform.SetSourceLandmarks(affine_transformed_moving_points) self.thinPlateTransform.SetTargetLandmarks(points[state.fixed]) self.thinPlateTransform.Update() if points[state.moving].GetNumberOfPoints() != points[state.fixed].GetNumberOfPoints(): raise hell #add nesting transfrom: order matters! transformSelector = slicer.qMRMLNodeComboBox() transformSelector.nodeTypes = ( ("vtkMRMLTransformNode"), "" ) transformSelector.selectNodeUponCreation = True transformSelector.addEnabled = True transformSelector.removeEnabled = True transformSelector.noneEnabled = True transformSelector.showHidden = False transformSelector.showChildNodeTypes = False transformSelector.setMRMLScene( slicer.mrmlScene ) transformSelector.setToolTip( "The transform for linear registration" ) transformSelector.enabled = False # concatenate transfroms: method 1: add nesting transfrom: order matters! # note: this method will keep each transform(not ideal) # landmarkTransformNode = slicer.util.getNode('Affine-Transform') # if not landmarkTransformNode: # landmarkTransformNode=transformSelector.addNode() # landmarkTransformNode.SetName('Affine-Transform') # state.transform.SetAndObserveTransformNodeID(landmarkTransformNode.GetID()) # landmarkTransformNode.ApplyTransform(self.landmarkTransform) # thinPlateTransformNode = slicer.util.getNode('ThinPlate-Transform') # if not thinPlateTransformNode: # thinPlateTransformNode=transformSelector.addNode() # thinPlateTransformNode.SetName('ThinPlate-Transform') # landmarkTransformNode.SetAndObserveTransformNodeID(thinPlateTransformNode.GetID()) # # thinPlateTransformNode.ApplyTransform(self.thinPlateTransform) #state.transform.SetAndObserveTransformToParent(self.landmarkTransform) # #state.transform.SetAndObserveTransformToParent(self.thinPlateTransform) # stateTransformNodeNode = slicer.util.getNode('Transform') # stateTransformNodeNode.SetAndObserveTransformToParent(self.thinPlateTransform) #test vtk concatenate #self.landmarkTransform.Concatenate(self.landmarkTransform) #state.transform.SetAndObserveTransformToParent(self.thinPlateTransform) #concatenate transfroms: method 2: use vtkGeneralTransform to concatenate transfroms. transform = vtk.vtkGeneralTransform() transform.Concatenate(self.thinPlateTransform) transform.Concatenate(self.landmarkTransform) state.transform.SetAndObserveTransformToParent(transform)
def write_transforms_to_itk_format(transform_list, outdir, subject_ids=None): """Write VTK affine or spline transforms to ITK 4 text file formats. Input transforms are in VTK RAS space and are forward transforms. Output transforms are in LPS space and are the corresponsing inverse transforms, according to the conventions for these file formats and for resampling images. The affine transform is straightforward. The spline transform file format is just a list of displacements that have to be in the same order as they are stored in ITK C code. This now outputs an ITK transform that works correctly to transform the tracts (or any volume in the same space) in Slicer. In the nonrigid case, we also output a vtk native spline transform file using MNI format. """ idx = 0 tx_fnames = list() for tx in transform_list: # save out the vtk transform to a text file as it is # The MNI transform reader/writer are available in vtk so use those: if tx.GetClassName() != 'vtkBSplineTransform': writer = vtk.vtkMNITransformWriter() writer.AddTransform(tx) if subject_ids is not None: fname = 'vtk_txform_' + str(subject_ids[idx]) + '.xfm' else: fname = 'vtk_txform_{0:05d}.xfm'.format(idx) writer.SetFileName(os.path.join(outdir, fname)) writer.Write() # file name for itk transform written below if subject_ids is not None: fname = 'itk_txform_' + str(subject_ids[idx]) + '.tfm' else: fname = 'itk_txform_{0:05d}.tfm'.format(idx) fname = os.path.join(outdir, fname) tx_fnames.append(fname) # Save the itk transform as the inverse of this transform (resampling transform) and in LPS. # This will show the same transform in the slicer GUI as the vtk transform we internally computed # that is stored in the .xfm text file, above. # To apply our transform to resample a volume in LPS: # convert to RAS, use inverse of transform to resample, convert back to LPS if tx.GetClassName( ) == 'vtkThinPlateSplineTransform' or tx.GetClassName( ) == 'vtkBSplineTransform': #print 'Saving nonrigid transform displacements in ITK format' # Deep copy to avoid modifying input transform that will be applied to polydata if tx.GetClassName() == 'vtkThinPlateSplineTransform': tps = vtk.vtkThinPlateSplineTransform() else: tps = vtk.vtkBSplineTransform() tps.DeepCopy(tx) #extent = tps.GetCoefficients().GetExtent() #origin = tps.GetCoefficients().GetOrigin() #spacing = tps.GetCoefficients().GetSpacing() #dims = tps.GetCoefficients().GetDimensions() #print "E:", extent #print "O:", origin #print "S:", spacing #print "D:", dims # invert to get the transform suitable for resampling an image tps.Inverse() # convert the inverse spline transform from RAS to LPS ras_2_lps = vtk.vtkTransform() ras_2_lps.Scale(-1, -1, 1) lps_2_ras = vtk.vtkTransform() lps_2_ras.Scale(-1, -1, 1) spline_inverse_lps = vtk.vtkGeneralTransform() spline_inverse_lps.Concatenate(lps_2_ras) spline_inverse_lps.Concatenate(tps) spline_inverse_lps.Concatenate(ras_2_lps) # Now, loop through LPS space. Find the effect of the # inverse transform on each point. This is essentially what # vtk.vtkTransformToGrid() does, but this puts things into # LPS. # This low-res grid produced small differences (order of 1-2mm) when transforming # polydatas inside Slicer vs. in this code. #grid_size = [15, 15, 15] #grid_spacing = 10 # This higher-res grid has fewer small numerical differences # grid_size = [50, 50, 50] # grid_spacing = 5 # This higher-res grid has fewer small numerical differences, but files are larger #grid_size = [70, 70, 70] #grid_spacing = 3 # This higher-res grid is sufficient to limit numerical # differences to under .1mm in tests. However, files are # quite large (47M). As this is still much smaller than # the tractography files, and correctness is desired, we # will produce large transform files. A preferable # solution would be to store the forward transform we # compute at the grid points at which it is defined, but # there is no inverse flag available in the file # format. Therefore the inverse must be stored at high # resolution. grid_size = [105, 105, 105] grid_spacing = 2 extent_0 = [ -(grid_size[0] - 1) / 2, -(grid_size[1] - 1) / 2, -(grid_size[2] - 1) / 2 ] extent_1 = [(grid_size[0] - 1) / 2, (grid_size[1] - 1) / 2, (grid_size[2] - 1) / 2] origin = -grid_spacing * (numpy.array(extent_1) - numpy.array(extent_0)) / 2.0 grid_points_LPS = list() grid_points_RAS = list() # ordering of grid points must match itk-style array order for images for s in range(extent_0[0], extent_1[0] + 1): for p in range(extent_0[1], extent_1[1] + 1): for l in range(extent_0[2], extent_1[2] + 1): grid_points_RAS.append([ -l * grid_spacing, -p * grid_spacing, s * grid_spacing ]) grid_points_LPS.append([ l * grid_spacing, p * grid_spacing, s * grid_spacing ]) displacements_LPS = list() print "LPS grid for storing transform:", grid_points_LPS[ 0], grid_points_LPS[-1], grid_spacing lps_points = vtk.vtkPoints() lps_points2 = vtk.vtkPoints() for gp_lps in grid_points_LPS: lps_points.InsertNextPoint(gp_lps[0], gp_lps[1], gp_lps[2]) spline_inverse_lps.TransformPoints(lps_points, lps_points2) pidx = 0 for gp_lps in grid_points_LPS: pt = lps_points2.GetPoint(pidx) diff_lps = [ pt[0] - gp_lps[0], pt[1] - gp_lps[1], pt[2] - gp_lps[2] ] pidx += 1 ## # this tested grid definition and origin were okay. ## diff_lps = [20,30,40] ## # this tested that the ordering of L,P,S is correct: ## diff_lps = [0, gp_lps[1], 0] ## diff_lps = [gp_lps[0], 0, 0] ## diff_lps = [0, 0, gp_lps[2]] ## # this tested that the ordering of grid points is correct ## # only the R>0, A>0, S<0 region shows a transform. ## if gp_lps[0] < 0 and gp_lps[1] < 0 and gp_lps[2] < 0: ## diff_lps = [gp_lps[0]/2.0, 0, 0] ## else: ## diff_lps = [0, 0, 0] displacements_LPS.append(diff_lps) # save the points and displacement vectors in ITK format. #print 'Saving in ITK transform format.' f = open(fname, 'w') f.write('#Insight Transform File V1.0\n') f.write('# Transform 0\n') # ITK version 3 that included an additive (!) affine transform #f.write('Transform: BSplineDeformableTransform_double_3_3\n') # ITK version 4 that does not include a second transform in the file f.write('Transform: BSplineTransform_double_3_3\n') f.write('Parameters: ') # "Here the data are: The bulk of the BSpline part are 3D # displacement vectors for each of the BSpline grid-nodes # in physical space, i.e. for each grid-node, there will # be three blocks of displacements defining dx,dy,dz for # all grid nodes." for block in [0, 1, 2]: for diff in displacements_LPS: f.write('{0} '.format(diff[block])) #FixedParameters: size size size origin origin origin origin spacing spacing spacing (then direction cosines: 1 0 0 0 1 0 0 0 1) f.write('\nFixedParameters:') #f.write(' {0} {0} {0}'.format(2*sz+1)) f.write(' {0}'.format(grid_size[0])) f.write(' {0}'.format(grid_size[1])) f.write(' {0}'.format(grid_size[2])) f.write(' {0}'.format(origin[0])) f.write(' {0}'.format(origin[1])) f.write(' {0}'.format(origin[2])) f.write(' {0} {0} {0}'.format(grid_spacing)) f.write(' 1 0 0 0 1 0 0 0 1\n') f.close() else: tx_inverse = vtk.vtkTransform() tx_inverse.DeepCopy(tx) tx_inverse.Inverse() ras_2_lps = vtk.vtkTransform() ras_2_lps.Scale(-1, -1, 1) lps_2_ras = vtk.vtkTransform() lps_2_ras.Scale(-1, -1, 1) tx2 = vtk.vtkTransform() tx2.Concatenate(lps_2_ras) tx2.Concatenate(tx_inverse) tx2.Concatenate(ras_2_lps) three_by_three = list() translation = list() for i in range(0, 3): for j in range(0, 3): three_by_three.append(tx2.GetMatrix().GetElement(i, j)) translation.append(tx2.GetMatrix().GetElement(0, 3)) translation.append(tx2.GetMatrix().GetElement(1, 3)) translation.append(tx2.GetMatrix().GetElement(2, 3)) f = open(fname, 'w') f.write('#Insight Transform File V1.0\n') f.write('# Transform 0\n') f.write('Transform: AffineTransform_double_3_3\n') f.write('Parameters: ') for el in three_by_three: f.write('{0} '.format(el)) for el in translation: f.write('{0} '.format(el)) f.write('\nFixedParameters: 0 0 0\n') f.close() idx += 1 return (tx_fnames)
def run(self, inputVolume, inputMRIVolume, Thresholdvalue, enableScreenshots=0): """ Run the actual algorithm """ #if not self.isValidInputOutputData(inputVolume, inputMRIVolume, outputVolume): #slicer.util.errorDisplay('Input volume is the same as output volume. Choose a different output volume.') # return False logging.info('Processing started') ###################################################################################################################### #os.path.abspath(__file__) #os.path.dirname(os.path.abspath(__file__)) now = datetime.now() distinctsubpathname = str(now.strftime("%m%d%Y_%H%M%S")) #Remove Directories if os.path.exists(os.path.dirname(__file__) + "/Tmp"): self.remove(os.path.dirname(__file__) + "/Tmp") os.mkdir(os.path.dirname(__file__) + "/Tmp") os.mkdir(os.path.dirname(__file__) + "/Tmp/"+distinctsubpathname) os.mkdir(os.path.dirname(__file__) + "/Tmp/"+distinctsubpathname+"/transform") os.mkdir(os.path.dirname(__file__) + "/Tmp/"+distinctsubpathname+"/result") self.ElastixExecutable = os.path.dirname(__file__)+"/Elastix/bin/elastix" self.TransformixExecutable = os.path.dirname(__file__)+"/Elastix/bin/transformix" self.fixedInputFile = os.path.dirname(__file__) + "/Tmp/"+distinctsubpathname+"/fixedtmp.nii" self.movingInputFile = os.path.dirname(__file__) + "/Tmp/"+distinctsubpathname+"/movingtmp.nii" self.transformpath = os.path.dirname(__file__) + "/Tmp/"+distinctsubpathname+"/transform" self.resultelastixpath = os.path.dirname(__file__) + "/Tmp/"+distinctsubpathname+"/result" self.parameterfile = os.path.dirname(__file__) + "/Resources/Parameters_Rigid.txt" self.transformparameterfile = self.transformpath + "/TransformParameters.0.txt" self.resourcespath = os.path.dirname(__file__) + "/Resources" self.electrodefilepath = os.path.dirname(__file__) + "/Tmp/"+distinctsubpathname volumesLogic = slicer.modules.volumes.logic() if inputMRIVolume is None: namemriaux= "MRI_ATLAS_"+str(now.strftime("%m%d%Y_%H%M%S")) slicer.util.loadVolume(self.resourcespath+"/atlasImage.mha", {'name': namemriaux}) self.parameterfile2 = os.path.dirname(__file__) + "/Resources/Parameters_BSpline.txt" self.transformparameterfile = self.transformpath + "/TransformParameters.1.txt" properties = {'useCompression': 0} slicer.util.saveNode(inputVolume, self.fixedInputFile, properties) slicer.util.saveNode(slicer.util.getNode(namemriaux), self.movingInputFile, properties) subprocess.call(r'"'+self.ElastixExecutable+'" -f "'+self.fixedInputFile+'" -m "'+self.movingInputFile+'" -out "'+self.transformpath+'" -p "'+self.parameterfile+'" -p "'+self.parameterfile2+'"', shell=True) subprocess.call(r'"'+self.TransformixExecutable+'" -tp "'+self.transformparameterfile+'" -out "'+self.resultelastixpath+'" -in "'+self.movingInputFile+'" -def all', shell=True) else: properties = {'useCompression': 0} slicer.util.saveNode(inputVolume, self.fixedInputFile, properties) slicer.util.saveNode(inputMRIVolume, self.movingInputFile, properties) subprocess.call(r'"'+self.ElastixExecutable+'" -f "'+self.fixedInputFile+'" -m "'+self.movingInputFile+'" -out "'+self.transformpath+'" -p "'+self.parameterfile+'"', shell=True) subprocess.call(r'"'+self.TransformixExecutable+'" -tp "'+self.transformparameterfile+'" -out "'+self.resultelastixpath+'" -in "'+self.movingInputFile+'"', shell=True) #slicer.mrmlScene.RemoveNode(inputVolume) #slicer.mrmlScene.RemoveNode(inputMRIVolume) outputTransformNode = slicer.vtkMRMLTransformNode() outputTransformNode.SetName("transform_"+str(now.strftime("%m%d%Y_%H%M%S"))) slicer.mrmlScene.AddNode(outputTransformNode) outputTransformPath = os.path.join(self.resultelastixpath, "deformationField.mhd") [success, loadedOutputTransformNode] = slicer.util.loadTransform(outputTransformPath, returnNode = True) if success: if loadedOutputTransformNode.GetReadAsTransformToParent(): outputTransformNode.SetAndObserveTransformToParent(loadedOutputTransformNode.GetTransformToParent()) else: outputTransformNode.SetAndObserveTransformFromParent(loadedOutputTransformNode.GetTransformFromParent()) #slicer.mrmlScene.RemoveNode(loadedOutputTransformNode) nameaux= "Brain_Reg_"+str(now.strftime("%m%d%Y_%H%M%S")) slicer.util.loadVolume(self.resultelastixpath+"/result.mhd", {'name': nameaux}) outputVolume = volumesLogic.CloneVolume(slicer.mrmlScene, inputVolume, "Aux_Crop_"+str(now.strftime("%m%d%Y_%H%M%S"))) brainmask = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, inputVolume, "brainmask" ) path2files = os.path.dirname(__file__) cliParams={} cliParams["inputVolume"] = slicer.util.getNode(nameaux).GetID() cliParams["outputVolume"] = outputVolume.GetID() cliParams["brainMask"] = brainmask.GetID() if platform.system() is "Windows": cliParams["datPath"] = path2files + '\\Resources\\dat\\' cliParams["refVolsPath"] = path2files + '\\Resources\\ref_vols' else: cliParams["datPath"] = path2files + '/Resources/dat/' cliParams["refVolsPath"] = path2files + '/Resources/ref_vols' slicer.cli.run(slicer.modules.robexbrainextractioncore, None, cliParams, wait_for_completion=True) BrainCropVolume = volumesLogic.CloneVolume(slicer.mrmlScene, inputVolume, "Brain_Crop_"+str(now.strftime("%m%d%Y_%H%M%S"))) cliParams={} cliParams["InputVolume"] = outputVolume.GetID() cliParams["OutputVolume"] = BrainCropVolume.GetID() slicer.cli.run(slicer.modules.castscalarvolume, None, cliParams, wait_for_completion=True) #slicer.mrmlScene.RemoveNode(slicer.util.getNode("Aux_Crop_"+str(now.strftime("%m%d%Y_%H%M%S")))) PreprocessesCT = volumesLogic.CloneVolume(slicer.mrmlScene, inputVolume, "Preprocessed_CT_"+str(now.strftime("%m%d%Y_%H%M%S"))) cliParams = {'InputVolume': inputVolume.GetID(), 'MaskVolume': brainmask.GetID(), 'OutputVolume': PreprocessesCT.GetID()} cliNode = slicer.cli.run(slicer.modules.maskscalarvolume, None, cliParams, wait_for_completion=True) electrodeslabelimage = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, inputVolume, "electrodes" ) cliParams = {'inputVolume': PreprocessesCT.GetID(), 'outputVolume': electrodeslabelimage.GetID(),'threshold' : Thresholdvalue,'datPath' : self.electrodefilepath} cliNode = slicer.cli.run(slicer.modules.fastelectrodesegmentor, None, cliParams, wait_for_completion=True) Brain3D = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, BrainCropVolume, "Brain3D" ) cliParams = {'inputVolume': BrainCropVolume.GetID(), 'outputVolume': Brain3D.GetID()} cliNode = slicer.cli.run(slicer.modules.modifiedqentropysegmentation, None, cliParams, wait_for_completion=True) #Rendering Electrodes self.makeModel(Brain3D, "1,2,3", 5,"BRAIN") with open(self.electrodefilepath+"/electrodescordinatesFile.txt", 'r') as filehandle: columnarray = [] rowarray = [] slicenumarray = [] electrodes = [] for line in filehandle: currentPlace = line[:-1] if len(currentPlace.strip()) != 0: aux =currentPlace.split(",") columnarray.append(int(aux[0])) rowarray.append(int(aux[1])) slicenumarray.append(int(aux[2])) if len(currentPlace.strip()) == 0: targetarray = ([round(np.median(columnarray)),round(np.median(rowarray)),round(np.median(slicenumarray))]) if (self.checkifelectrodeexists(electrodes,targetarray)==False): electrodes.append(targetarray) columnarray=[] rowarray=[] slicenumarray=[] filehandle.close() nameelectrodesfiducial= "Electrodes_"+str(now.strftime("%m%d%Y_%H%M%S")) mlogic = slicer.modules.markups.logic() fidNode = slicer.util.getNode(mlogic.AddNewFiducialNode(nameelectrodesfiducial)) volumeNode = inputVolume electrodenum = 0 for electrode in electrodes: electrodenum = electrodenum+1 # Get position of highest voxel value point_Ijk = [electrode[0], electrode[1], electrode[2]] # Get physical coordinates from voxel coordinates volumeIjkToRas = vtk.vtkMatrix4x4() volumeNode.GetIJKToRASMatrix(volumeIjkToRas) point_VolumeRas = [0, 0, 0, 1] volumeIjkToRas.MultiplyPoint(np.append(point_Ijk,1.0), point_VolumeRas) # If volume node is transformed, apply that transform to get volume's RAS coordinates transformVolumeRasToRas = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes(volumeNode.GetParentTransformNode(), None, transformVolumeRasToRas) point_Ras = transformVolumeRasToRas.TransformPoint(point_VolumeRas[0:3]) # Add a markup at the computed position and print its coordinates fidNode.AddFiducial(point_Ras[0], point_Ras[1], point_Ras[2], "Contact_"+str(electrodenum)) logging.info('Processing completed') return True
def applyMarkupsLabel(self, input_sequence_browser_node, input_markup_node, input_volume_node, output_text_node, output_sequence_browser_node): text_sequence_node = output_sequence_browser_node.GetSequenceNode( output_text_node) if text_sequence_node is None: text_sequence_node = slicer.mrmlScene.AddNewNodeByClass( "vtkMRMLSequenceNode", input_volume_node.GetName() + "_LabelSequence") output_sequence_browser_node.AddSynchronizedSequenceNode( text_sequence_node) output_sequence_browser_node.AddProxyNode(output_text_node, text_sequence_node, False) text_sequence_node.RemoveAllDataNodes() image_sequence_node = output_sequence_browser_node.GetSequenceNode( input_volume_node) if image_sequence_node is None: image_sequence_node = slicer.mrmlScene.AddNewNodeByClass( "vtkMRMLSequenceNode", input_volume_node.GetName() + "_ImageSequence") output_sequence_browser_node.AddSynchronizedSequenceNode( image_sequence_node) output_sequence_browser_node.AddProxyNode(input_volume_node, image_sequence_node, False) image_sequence_node.RemoveAllDataNodes() masterSequence = input_sequence_browser_node.GetMasterSequenceNode() for i in range(input_sequence_browser_node.GetNumberOfItems()): input_sequence_browser_node.SetSelectedItemNumber(i) sequenceValue = masterSequence.GetNthIndexValue(i) extent = input_volume_node.GetImageData().GetExtent() markupToVolumeTransform = vtk.vtkGeneralTransform() slicer.vtkMRMLTransformNode.GetTransformBetweenNodes( input_markup_node.GetParentTransformNode(), input_volume_node.GetParentTransformNode(), markupToVolumeTransform) rasToIJKMatrix = vtk.vtkMatrix4x4() input_volume_node.GetRASToIJKMatrix(rasToIJKMatrix) rasToIJKTransform = vtk.vtkTransform() rasToIJKTransform.SetMatrix(rasToIJKMatrix) markupToVolumeTransform.Concatenate(rasToIJKTransform) overlappingMarkupIndex = -1 for pointIndex in range( input_markup_node.GetNumberOfControlPoints()): position_Markup = [0, 0, 0] input_markup_node.GetNthControlPointPosition( pointIndex, position_Markup) point_IJK = [0, 0, 0] markupToVolumeTransform.TransformPoint(position_Markup, point_IJK) threshold_mm = 10 # TODO: Make threshold markup parameter if (point_IJK[0] > extent[0] - threshold_mm and point_IJK[0] < extent[1] + threshold_mm and point_IJK[1] > extent[2] - threshold_mm and point_IJK[1] < extent[3] + threshold_mm and point_IJK[2] > extent[4] - threshold_mm and point_IJK[2] < extent[5] + threshold_mm): overlappingMarkupIndex = pointIndex break labelText = "None" if overlappingMarkupIndex >= 0: labelText = input_markup_node.GetNthControlPointLabel( overlappingMarkupIndex) overlappingMarkupIndex = threshold_mm output_text_node.SetText(labelText) text_sequence_node.SetDataNodeAtValue(output_text_node, sequenceValue) image_sequence_node.SetDataNodeAtValue(input_volume_node, sequenceValue) currentTransformNode = input_volume_node.GetParentTransformNode() while not currentTransformNode is None: sequenceNode = input_sequence_browser_node.GetSequenceNode( currentTransformNode) if sequenceNode: output_sequence_browser_node.AddSynchronizedSequenceNode( sequenceNode) output_sequence_browser_node.AddProxyNode( currentTransformNode, sequenceNode, False) currentTransformNode = currentTransformNode.GetParentTransformNode( )