def __init__(self, sliceWidget): super(LevelTracingEffectTool, self).__init__(sliceWidget) # create a logic instance to do the non-gui work self.logic = LevelTracingEffectLogic(self.sliceWidget.sliceLogic()) # instance variables self.actionState = "" # initialization self.xyPoints = vtk.vtkPoints() self.rasPoints = vtk.vtkPoints() self.polyData = vtk.vtkPolyData() self.tracingFilter = vtkITK.vtkITKLevelTracingImageFilter() self.ijkToXY = vtk.vtkGeneralTransform() self.mapper = vtk.vtkPolyDataMapper2D() self.actor = vtk.vtkActor2D() property_ = self.actor.GetProperty() property_.SetColor(107 / 255.0, 190 / 255.0, 99 / 255.0) property_.SetLineWidth(1) self.mapper.SetInput(self.polyData) self.actor.SetMapper(self.mapper) property_ = self.actor.GetProperty() property_.SetColor(1, 1, 0) property_.SetLineWidth(1) self.renderer.AddActor2D(self.actor) self.actors.append(self.actor)
def __init__(self, sliceWidget): super(LevelTracingEffectTool, self).__init__(sliceWidget) # create a logic instance to do the non-gui work self.logic = LevelTracingEffectLogic(self.sliceWidget.sliceLogic()) # instance variables self.actionState = '' # initialization self.xyPoints = vtk.vtkPoints() self.rasPoints = vtk.vtkPoints() self.polyData = vtk.vtkPolyData() self.tracingFilter = vtkITK.vtkITKLevelTracingImageFilter() self.ijkToXY = vtk.vtkGeneralTransform() self.mapper = vtk.vtkPolyDataMapper2D() self.actor = vtk.vtkActor2D() property_ = self.actor.GetProperty() property_.SetColor(107 / 255., 190 / 255., 99 / 255.) property_.SetLineWidth(1) if vtk.VTK_MAJOR_VERSION <= 5: self.mapper.SetInput(self.polyData) else: self.mapper.SetInputData(self.polyData) self.actor.SetMapper(self.mapper) property_ = self.actor.GetProperty() property_.SetColor(1, 1, 0) property_.SetLineWidth(1) self.renderer.AddActor2D(self.actor) self.actors.append(self.actor)
def onReferencePointMarkupPlace(self, fieldIndex, enable): # Get markup label field = self.measurementPreset.inputFields[fieldIndex] # Get valve model from selected valve node valveId = field[FIELD_VALVE_ID] if not valveId in self.inputValveModels.keys(): logging.error( 'onReferencePointMarkupPlace failed: no {0} valve node is selected' .format(valveId)) qt.QTimer.singleShot( 0, self.pointFieldMarkupsNode[fieldIndex].RemoveAllMarkups) return valveModel = self.inputValveModels[valveId] label = field[FIELD_NAME] if enable: # Point placement activated - remove old markup valveModel.removeAnnulusMarkupLabel(label) self.pointFieldMarkupsNode[fieldIndex].RemoveAllMarkups() else: # Point placement completed add new markup on the contour if self.pointFieldMarkupsNode[fieldIndex].GetNumberOfMarkups( ) == 0: # duplicate update event return # Get point from temporary markup node pointPositionWorld = [0, 0, 0] self.pointFieldMarkupsNode[ fieldIndex].GetNthControlPointPositionWorld( 0, pointPositionWorld) qt.QTimer.singleShot( 0, self.pointFieldMarkupsNode[fieldIndex].RemoveAllMarkups) # Add label on closest point on contour worldToProbeTransform = vtk.vtkGeneralTransform() valveModel.getProbeToRasTransformNode().GetTransformFromWorld( worldToProbeTransform) pointPositionAnnulus = worldToProbeTransform.TransformDoublePoint( pointPositionWorld[0:3]) if field[FIELD_ON_ANNULUS_CONTOUR] is True: [closestPointOnAnnulusCurve, _] = valveModel.annulusContourCurve.getClosestPoint( pointPositionAnnulus) pointPosition = closestPointOnAnnulusCurve else: # NB: no snapping or restriction pointPosition = pointPositionAnnulus valveModel.setAnnulusMarkupLabel(label, pointPosition) self.measurementPreset.onInputFieldChanged( field[FIELD_ID], self.inputValveModels, self.inputFieldValues, computeDependentValues=True)
def transformPolyData(self, transformNode): t = vtk.vtkGeneralTransform() transformNode.GetTransformToWorld(t) self.transformPolyDataFilter.SetTransform(t) self.transformPolyDataFilter.SetInputData(self.modelNode.GetPolyData()) self.transformPolyDataFilter.Update() self.transformedModel.SetAndObservePolyData(self.transformPolyDataFilter.GetOutput())
def run(self, referenceSequenceNode, inputNode, transformSequenceNode, outputSequenceNode): """ Run the actual algorithm """ if outputSequenceNode: outputSequenceNode.RemoveAllDataNodes() numOfImageNodes = referenceSequenceNode.GetNumberOfDataNodes() for i in xrange(numOfImageNodes): referenceNode = referenceSequenceNode.GetNthDataNode(i) referenceNodeIndexValue = referenceSequenceNode.GetNthIndexValue(i) dimensions = [1, 1, 1] referenceNode.GetImageData().GetDimensions(dimensions) transformNode = transformSequenceNode.GetNthDataNode(i) inputIJK2RASMatrix = vtk.vtkMatrix4x4() inputNode.GetIJKToRASMatrix(inputIJK2RASMatrix) referenceRAS2IJKMatrix = vtk.vtkMatrix4x4() referenceNode.GetRASToIJKMatrix(referenceRAS2IJKMatrix) inputRAS2RASMatrix = transformNode.GetTransformToParent() resampleTransform = vtk.vtkGeneralTransform() resampleTransform.Identity() resampleTransform.PostMultiply() resampleTransform.Concatenate(inputIJK2RASMatrix) resampleTransform.Concatenate(inputRAS2RASMatrix) resampleTransform.Concatenate(referenceRAS2IJKMatrix) resampleTransform.Inverse() resampler = vtk.vtkImageReslice() resampler.SetInput(inputNode.GetImageData()) resampler.SetOutputOrigin(0, 0, 0) resampler.SetOutputSpacing(1, 1, 1) resampler.SetOutputExtent(0, dimensions[0], 0, dimensions[1], 0, dimensions[2]) resampler.SetResliceTransform(resampleTransform) resampler.Update() outputNode = slicer.vtkMRMLScalarVolumeNode() outputNode.CopyOrientation(referenceNode) outputNode.SetAndObserveImageData(resampler.GetOutput()) outputSequenceNode.SetDataNodeAtValue(outputNode, referenceNodeIndexValue) return True
def run(self,referenceSequenceNode, inputNode, transformSequenceNode, outputSequenceNode): """ Run the actual algorithm """ if outputSequenceNode: outputSequenceNode.RemoveAllDataNodes() numOfImageNodes = referenceSequenceNode.GetNumberOfDataNodes() for i in xrange(numOfImageNodes): referenceNode = referenceSequenceNode.GetNthDataNode(i) referenceNodeIndexValue = referenceSequenceNode.GetNthIndexValue(i) dimensions = [1,1,1] referenceNode.GetImageData().GetDimensions(dimensions) transformNode = transformSequenceNode.GetNthDataNode(i) inputIJK2RASMatrix = vtk.vtkMatrix4x4() inputNode.GetIJKToRASMatrix(inputIJK2RASMatrix) referenceRAS2IJKMatrix = vtk.vtkMatrix4x4() referenceNode.GetRASToIJKMatrix(referenceRAS2IJKMatrix) inputRAS2RASMatrix = transformNode.GetTransformToParent() resampleTransform = vtk.vtkGeneralTransform() resampleTransform.Identity() resampleTransform.PostMultiply() resampleTransform.Concatenate(inputIJK2RASMatrix) resampleTransform.Concatenate(inputRAS2RASMatrix) resampleTransform.Concatenate(referenceRAS2IJKMatrix) resampleTransform.Inverse() resampler = vtk.vtkImageReslice() resampler.SetInput(inputNode.GetImageData()) resampler.SetOutputOrigin(0,0,0) resampler.SetOutputSpacing(1,1,1) resampler.SetOutputExtent(0,dimensions[0],0,dimensions[1],0,dimensions[2]) resampler.SetResliceTransform(resampleTransform) resampler.Update() outputNode = slicer.vtkMRMLScalarVolumeNode() outputNode.CopyOrientation(referenceNode) outputNode.SetAndObserveImageData(resampler.GetOutput()) outputSequenceNode.SetDataNodeAtValue(outputNode, referenceNodeIndexValue) return True
def updateViewpointCamera(self): # no logging - it slows Slicer down a *lot* # Need to set camera attributes according to the concatenated transform toolCameraToRASTransform = vtk.vtkGeneralTransform() self.transformNode.GetTransformToWorld(toolCameraToRASTransform) cameraOriginInRASMm = self.computeCameraOriginInRASMm(toolCameraToRASTransform) focalPointInRASMm = self.computeCameraFocalPointInRASMm(toolCameraToRASTransform) upDirectionInRAS = self.computeCameraUpDirectionInRAS(toolCameraToRASTransform,cameraOriginInRASMm,focalPointInRASMm) self.setCameraParameters(cameraOriginInRASMm,focalPointInRASMm,upDirectionInRAS) # model visibility if (self.modelPOVOffNode): modelPOVOffDisplayNode = self.modelPOVOffNode.GetDisplayNode() modelPOVOffDisplayNode.SetVisibility(False) if (self.modelPOVOnNode): modelPOVOnDisplayNode = self.modelPOVOnNode.GetDisplayNode() modelPOVOnDisplayNode.SetVisibility(True)
def resliceThroughTransform(sourceNode, transform, referenceNode, targetNode): """ Fills the targetNode's vtkImageData with the source after applying the transform. Uses spacing from referenceNode. Ignores any vtkMRMLTransforms. sourceNode, referenceNode, targetNode: vtkMRMLScalarVolumeNodes transform: vtkAbstractTransform """ # # get the transform from RAS back to source pixel space sourceRASToIJK = vtk.vtkMatrix4x4() sourceNode.GetRASToIJKMatrix(sourceRASToIJK) # # get the transform from target image space to RAS referenceIJKToRAS = vtk.vtkMatrix4x4() targetNode.GetIJKToRASMatrix(referenceIJKToRAS) # # this is the ijkToRAS concatenated with the passed in (abstract)transform resliceTransform = vtk.vtkGeneralTransform() resliceTransform.Concatenate(sourceRASToIJK) resliceTransform.Concatenate(transform) resliceTransform.Concatenate(referenceIJKToRAS) # # use the matrix to extract the volume and convert it to an array reslice = vtk.vtkImageReslice() reslice.SetInterpolationModeToLinear() reslice.InterpolateOn() reslice.SetResliceTransform(resliceTransform) if vtk.VTK_MAJOR_VERSION <= 5: reslice.SetInput(sourceNode.GetImageData()) else: reslice.SetInputConnection(sourceNode.GetImageDataConnection()) # dimensions = referenceNode.GetImageData().GetDimensions() reslice.SetOutputExtent(0, dimensions[0] - 1, 0, dimensions[1] - 1, 0, dimensions[2] - 1) reslice.SetOutputOrigin((0, 0, 0)) reslice.SetOutputSpacing((1, 1, 1)) # reslice.UpdateWholeExtent() targetNode.SetAndObserveImageData(reslice.GetOutput())
def resliceThroughTransform(self, sourceNode, transform, referenceNode, targetNode): """ Fills the targetNode's vtkImageData with the source after applying the transform. Uses spacing from referenceNode. Ignores any vtkMRMLTransforms. sourceNode, referenceNode, targetNode: vtkMRMLScalarVolumeNodes transform: vtkAbstractTransform """ # get the transform from RAS back to source pixel space sourceRASToIJK = vtk.vtkMatrix4x4() sourceNode.GetRASToIJKMatrix(sourceRASToIJK) # get the transform from target image space to RAS referenceIJKToRAS = vtk.vtkMatrix4x4() targetNode.GetIJKToRASMatrix(referenceIJKToRAS) # this is the ijkToRAS concatenated with the passed in (abstract)transform self.resliceTransform = vtk.vtkGeneralTransform() self.resliceTransform.Concatenate(sourceRASToIJK) self.resliceTransform.Concatenate(transform) self.resliceTransform.Concatenate(referenceIJKToRAS) # use the matrix to extract the volume and convert it to an array self.reslice = vtk.vtkImageReslice() self.reslice.SetInterpolationModeToLinear() self.reslice.InterpolateOn() self.reslice.SetResliceTransform(self.resliceTransform) self.reslice.SetInput( sourceNode.GetImageData() ) dimensions = referenceNode.GetImageData().GetDimensions() self.reslice.SetOutputExtent(0, dimensions[0]-1, 0, dimensions[1]-1, 0, dimensions[2]-1) self.reslice.SetOutputOrigin((0,0,0)) self.reslice.SetOutputSpacing((1,1,1)) self.reslice.UpdateWholeExtent() targetNode.SetAndObserveImageData(self.reslice.GetOutput())
def transformPolyData(self, modelNode, transformNode): transformedModel = slicer.util.getNode('Transformed Model') if not transformedModel: transformedModel = slicer.vtkMRMLModelNode() transformedModel.SetName('Transformed Model') transformedModel.SetAndObservePolyData(modelNode.GetPolyData()) modelDisplay = slicer.vtkMRMLModelDisplayNode() modelDisplay.SetSliceIntersectionVisibility(True) modelDisplay.SetColor(0,1,0) slicer.mrmlScene.AddNode(modelDisplay) transformedModel.SetAndObserveDisplayNodeID(modelDisplay.GetID()) slicer.mrmlScene.AddNode(transformedModel) transformedModel.SetDisplayVisibility(False) t = vtk.vtkGeneralTransform() transformNode.GetTransformToWorld(t) transformPolyDataFilter = vtk.vtkTransformPolyDataFilter() transformPolyDataFilter.SetTransform(t) transformPolyDataFilter.SetInputData(modelNode.GetPolyData()) transformPolyDataFilter.Update() transformedModel.SetAndObservePolyData(transformPolyDataFilter.GetOutput())