Exemple #1
0
 def setInputNode(self, node):
     if node.GetClassName() == "vtkMRMLStreamingVolumeNode":
         try:
             self.videoImage = slicer.util.getNode(node.GetName() +
                                                   '_Image')
             self.videoImage.SetAndObserveImageData(node.GetImageData())
             node.AddObserver(
                 slicer.vtkMRMLStreamingVolumeNode.FrameModifiedEvent,
                 self.referenceImageModified)
             self.streamingNode = node
         except slicer.util.MRMLNodeNotFoundException:
             #Create a node to store the image data of the video so that IGTLink is
             # sending an IMAGE message, not a VIDEO message
             imageSpacing = [0.2, 0.2, 0.2]
             # Create volume node
             self.videoImage = slicer.vtkMRMLVectorVolumeNode()
             self.videoImage.SetName(node.GetName() + '_Image')
             self.videoImage.SetSpacing(imageSpacing)
             self.videoImage.SetAndObserveImageData(node.GetImageData())
             #self.videoImage.SetImageDataConnection(node.GetImageDataConnection())
             node.AddObserver(
                 slicer.vtkMRMLStreamingVolumeNode.FrameModifiedEvent,
                 self.referenceImageModified)
             # Add volume to scene
             slicer.mrmlScene.AddNode(self.videoImage)
         self.inputNode = self.videoImage
         self.streamingNode = node
     else:
         self.inputNode = node
 def onWIPButtonClicked(self):
   import urllib2
   reply = urllib2.urlopen('http://www.osa.sunysb.edu/erich.png')
   byte_array = reply.read()
   image = qt.QImage(qt.QImage.Format_RGB888)
   image.loadFromData(byte_array) 
   imageData = self.QImage2vtkImage(image)
   volumeNode = slicer.vtkMRMLVectorVolumeNode()
   volumeNode.SetName("WEB")
   volumeNode.SetAndObserveImageData(imageData)
   displayNode = slicer.vtkMRMLVectorVolumeDisplayNode()
   slicer.mrmlScene.AddNode(volumeNode)
   slicer.mrmlScene.AddNode(displayNode)
   volumeNode.SetAndObserveDisplayNodeID(displayNode.GetID())
   displayNode.SetAndObserveColorNodeID('vtkMRMLColorTableNodeGrey')
   self.mutate() 
  def openTargetImage(self):
    import string
    p = self.v.page() 
    m = p.mainFrame()
    imageBound=m.evaluateJavaScript('viewer.viewport.viewportToImageRectangle(viewer.viewport.getBounds().x, viewer.viewport.getBounds().y, viewer.viewport.getBounds().width, viewer.viewport.getBounds().height)')
    x=imageBound[u'x']
    y=imageBound[u'y']
    width=imageBound[u'width']
    height=imageBound[u'height']
    self.j['x'] = x
    self.j['y'] = y
    self.j['width'] = width
    self.j['height'] = height
    imagedata = m.evaluateJavaScript('imagedata')
    tmpfilename=  imagedata[u'metaData'][1]
    imageFileName=string.rstrip(tmpfilename,'.dzi')
    self.tilename = imagedata[u'imageId']
    print self.tilename
    self.parameterNode.SetParameter("SlicerPathology,tilename", self.tilename)
    current_weburl ='http://quip1.uhmc.sunysb.edu/fcgi-bin/iipsrv.fcgi?IIIF=' + imageFileName +'/' + str(x) + ','+ str(y) + ',' + str(width) + ',' + str(height) + '/full/0/default.jpg'
    print current_weburl
    self.v.setUrl(qt.QUrl(current_weburl))
    self.v.show()

    reply = urllib2.urlopen(current_weburl)
    byte_array = reply.read()
    image = qt.QImage(qt.QImage.Format_RGB888)
    image.loadFromData(byte_array)
    imageData = self.QImage2vtkImage(image)
    volumeNode = slicer.vtkMRMLVectorVolumeNode()
    volumeNode.SetName("WEB")
    volumeNode.SetAndObserveImageData(imageData)
    displayNode = slicer.vtkMRMLVectorVolumeDisplayNode()
    slicer.mrmlScene.AddNode(volumeNode)
    slicer.mrmlScene.AddNode(displayNode)
    volumeNode.SetAndObserveDisplayNodeID(displayNode.GetID())
    displayNode.SetAndObserveColorNodeID('vtkMRMLColorTableNodeGrey')
    self.mutate()
    def setup(self):
        ScriptedLoadableModuleWidget.setup(self)
        self.logic = DataCollectionLogic()

        self.moduleDir = os.path.dirname(slicer.modules.datacollection.path)
        datasetDirectory = os.path.join(self.moduleDir, os.pardir, "Datasets")
        try:
            os.listdir(datasetDirectory)
        except FileNotFoundError:
            os.mkdir(datasetDirectory)

        #
        # Parameters Area
        #
        parametersCollapsibleButton = ctk.ctkCollapsibleButton()
        parametersCollapsibleButton.text = "Parameters"
        self.layout.addWidget(parametersCollapsibleButton)

        # Layout within the dummy collapsible button
        parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)

        #self.imageSaveDirectory = qt.QLineEdit("Select directory to save images")
        #parametersFormLayout.addRow(self.imageSaveDirectory)

        self.selectRecordingNodeComboBox = qt.QComboBox()
        self.selectRecordingNodeComboBox.addItems(["Select Image Node"])
        self.recordingNodes = slicer.util.getNodesByClass("vtkMRMLVolumeNode")
        recordingNodeNames = []
        for recordingNode in self.recordingNodes:
            recordingNodeNames.append(recordingNode.GetName())
        self.selectRecordingNodeComboBox.addItems(recordingNodeNames)
        parametersFormLayout.addRow(self.selectRecordingNodeComboBox)

        self.datasetSelector = qt.QComboBox()
        self.datasetSelector.addItems(["Select Dataset"])
        datasetDirectoryContents = os.listdir(
            os.path.join(self.moduleDir, os.pardir, "Datasets"))
        datasetNames = [
            dir for dir in datasetDirectoryContents if dir.find(".") == -1
        ]
        self.datasetSelector.addItems(["Create New Dataset"])
        self.datasetSelector.addItems(datasetNames)
        parametersFormLayout.addRow(self.datasetSelector)

        self.videoIDComboBox = qt.QComboBox()
        self.videoIDComboBox.addItems(
            ["Select video ID", "Create new video ID"])
        parametersFormLayout.addRow(self.videoIDComboBox)

        self.fileTypeComboBox = qt.QComboBox()
        self.fileTypeComboBox.addItems([".jpg", ".png", ".bmp", ".tiff"])
        parametersFormLayout.addRow(self.fileTypeComboBox)
        self.fileType = self.fileTypeComboBox.currentText

        self.collectFromSequenceCheckBox = qt.QCheckBox(
            "Collect from Sequence")
        self.collectingFromSequence = False
        parametersFormLayout.addRow(self.collectFromSequenceCheckBox)

        self.problemTypeComboBox = qt.QComboBox()
        self.problemTypeComboBox.addItems([
            "Select problem type", "Classification", "Detection",
            "Segmentation"
        ])
        parametersFormLayout.addRow(self.problemTypeComboBox)
        self.classificationFrame = qt.QFrame()
        self.classificationLayout()
        parametersFormLayout.addRow(self.classificationFrame)
        self.classificationFrame.visible = False
        self.detectionFrame = qt.QFrame()
        self.detectionLayout()
        parametersFormLayout.addRow(self.detectionFrame)
        self.detectionFrame.visible = False
        self.segmentationFrame = qt.QFrame()
        self.segmentationLayout()
        parametersFormLayout.addRow(self.segmentationFrame)
        self.segmentationFrame.visible = False

        #
        # Start/Stop Image Collection Button
        #
        self.startStopCollectingImagesButton = qt.QPushButton(
            "Start Image Collection")
        self.startStopCollectingImagesButton.toolTip = "Collect images."
        self.startStopCollectingImagesButton.enabled = False
        parametersFormLayout.addRow(self.startStopCollectingImagesButton)

        self.infoLabel = qt.QLabel("")
        parametersFormLayout.addRow(self.infoLabel)
        # connections
        self.fileTypeComboBox.connect('currentIndexChanged(int)',
                                      self.onFileTypeSelected)
        self.inputSegmentationSelector.connect(
            'currentIndexChanged(int)', self.onSegmentationInputSelected)
        self.selectRecordingNodeComboBox.connect("currentIndexChanged(int)",
                                                 self.onRecordingNodeSelected)
        self.datasetSelector.connect('currentIndexChanged(int)',
                                     self.onDatasetSelected)
        self.startStopCollectingImagesButton.connect(
            'clicked(bool)', self.onStartStopCollectingImagesButton)
        self.videoIDComboBox.connect('currentIndexChanged(int)',
                                     self.onVideoIDSelected)
        self.collectFromSequenceCheckBox.connect(
            'stateChanged(int)', self.onCollectFromSequenceChecked)
        self.problemTypeComboBox.connect('currentIndexChanged(int)',
                                         self.onProblemTypeSelected)

        # Add vertical spacer
        self.layout.addStretch(1)

        # Refresh Start/Stop Collecting Images Button state
        self.onSelect()
        try:
            self.webcamReference = slicer.util.getNode('Webcam_Reference')
        except slicer.util.MRMLNodeNotFoundException:
            #if not self.webcamReference:
            imageSpacing = [0.2, 0.2, 0.2]
            imageData = vtk.vtkImageData()
            imageData.SetDimensions(640, 480, 1)
            imageData.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
            thresholder = vtk.vtkImageThreshold()
            thresholder.SetInputData(imageData)
            thresholder.SetInValue(0)
            thresholder.SetOutValue(0)
            # Create volume node
            self.webcamReference = slicer.vtkMRMLVectorVolumeNode()
            self.webcamReference.SetName('Webcam_Reference')
            self.webcamReference.SetSpacing(imageSpacing)
            self.webcamReference.SetImageDataConnection(
                thresholder.GetOutputPort())
            # Add volume to scene
            slicer.mrmlScene.AddNode(self.webcamReference)
            displayNode = slicer.vtkMRMLVectorVolumeDisplayNode()
            slicer.mrmlScene.AddNode(displayNode)
            self.webcamReference.SetAndObserveDisplayNodeID(
                displayNode.GetID())

        self.webcamConnectorNode = self.createWebcamPlusConnector()
        self.webcamConnectorNode.Start()
        self.setupWebcamResliceDriver()
Exemple #5
0
    def run(self, charVolume, testThicknessVolume, testLabel, outputLabel):
        """
    Run characterization step
    """

        # Checks if parameters and conditions are set to run algorithm
        # Tests characterization input nodes:
        if not charVolume:
            slicer.util.errorDisplay('No input volumes were selected.')
            return False
        if not self.isValidInputNode(charVolume):
            slicer.util.errorDisplay(
                'Characterization selected volumes is invalid.')
            return False
        if not outputLabel:
            slicer.util.errorDisplay('No output volume was selected.')
            return False

        # Default parameters
        doHaralick = True
        doHistogram = False
        logging.info('Processing started')

        # Runs characterization step ******************************
        vectorFeatureNodes = slicer.vtkMRMLVectorVolumeNode()
        slicer.mrmlScene.AddNode(vectorFeatureNodes)

        listFeatures = []
        listZFeatures = []
        if testThicknessVolume is None:
            doWithThickness = False
        else:
            doWithThickness = True

        self.runCharacterizationCLI(charVolume, vectorFeatureNodes, doHaralick,
                                    doHistogram)

        # Creates list from vector image
        self.createListFromVectorImage(vectorFeatureNodes, listFeatures)

        # Runs Z-Score step ***************************************
        self.generateZScore(charVolume, listFeatures, testThicknessVolume,
                            listZFeatures)

        # Creates list with all features in correct order for classifier
        listFull = []
        self.composeListWithAllFeatures(listFeatures, listZFeatures,
                                        doWithThickness, listFull)

        # Check for dependencies
        # from pip._internal import main as pipmain
        import subprocess

        pip_modules = [['scipy', 'scipy'], ['scikit-learn', 'sklearn'],
                       ['joblib', 'joblib']]
        for module in pip_modules:
            try:
                module_obj = __import__(module[1])
                logging.info("{0} was found. Successfully imported".format(
                    module[0]))
            except ImportError:
                logging.info(
                    "{0} was not found. Attempting to install {0}.".format(
                        module[0]))
                # pipmain(['install', '--user', module[0]])
                subprocess.call([
                    sys.executable, "-m", "pip", "install", "--user", module[0]
                ])

        self.doClassification(listFull, testLabel, outputLabel,
                              doWithThickness)

        logging.info('Processing completed')

        return True
  def setup(self):
    ScriptedLoadableModuleWidget.setup(self)
    self.logic = Collect_Training_ImagesLogic()

    self.moduleDir = os.path.dirname(slicer.modules.collect_training_images.path)

    #
    # Parameters Area
    #
    parametersCollapsibleButton = ctk.ctkCollapsibleButton()
    parametersCollapsibleButton.text = "Parameters"
    self.layout.addWidget(parametersCollapsibleButton)

    # Layout within the dummy collapsible button
    parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)

    #self.imageSaveDirectory = qt.QLineEdit("Select directory to save images")
    #parametersFormLayout.addRow(self.imageSaveDirectory)

    self.modelSelector = qt.QComboBox()
    self.modelSelector.addItems(["Select model"])
    modelDirectoryContents = os.listdir(os.path.join(self.moduleDir,os.pardir,"Models"))
    modelNames = [dir for dir in modelDirectoryContents if dir.find(".") == -1 and dir != "Dockerfile"]
    self.modelSelector.addItems(["Create new model"])
    self.modelSelector.addItems(modelNames)
    parametersFormLayout.addRow(self.modelSelector)

    #self.imageSaveDirectoryLabel = qt.QLabel("Training photo directory:")
    #parametersFormLayout.addRow(self.imageSaveDirectoryLabel)

    self.imageSaveDirectoryLineEdit = ctk.ctkPathLineEdit()
    #node = self.logic.getParameterNode()
    imageSaveDirectory = os.path.dirname(slicer.modules.collect_training_images.path)
    self.imageSaveDirectoryLineEdit.currentPath = imageSaveDirectory
    self.imageSaveDirectoryLineEdit.filters = ctk.ctkPathLineEdit.Dirs
    self.imageSaveDirectoryLineEdit.options = ctk.ctkPathLineEdit.DontUseSheet
    self.imageSaveDirectoryLineEdit.options = ctk.ctkPathLineEdit.ShowDirsOnly
    self.imageSaveDirectoryLineEdit.showHistoryButton = False
    self.imageSaveDirectoryLineEdit.setMinimumWidth(100)
    self.imageSaveDirectoryLineEdit.setMaximumWidth(500)
    #parametersFormLayout.addRow(self.imageSaveDirectoryLineEdit)

    self.imageClassComboBox = qt.QComboBox()
    self.imageClassComboBox.addItems(["Select image class","Create new image class"])

    parametersFormLayout.addRow(self.imageClassComboBox)

    #
    # Start/Stop Image Collection Button
    #
    self.startStopCollectingImagesButton = qt.QPushButton("Start Image Collection")
    self.startStopCollectingImagesButton.toolTip = "Collect training images."
    self.startStopCollectingImagesButton.enabled = False
    parametersFormLayout.addRow(self.startStopCollectingImagesButton)


    self.infoLabel = qt.QLabel("")
    parametersFormLayout.addRow(self.infoLabel)

    # connections
    self.modelSelector.connect('currentIndexChanged(int)',self.onModelSelected)
    self.startStopCollectingImagesButton.connect('clicked(bool)', self.onStartStopCollectingImagesButton)
    self.imageClassComboBox.connect('currentIndexChanged(int)',self.onImageClassSelected)

    # Add vertical spacer
    self.layout.addStretch(1)

    # Refresh Start/Stop Collecting Images Button state
    self.onSelect()
    try:
      self.webcamReference = slicer.util.getNode('Webcam_Reference')
    except slicer.util.MRMLNodeNotFoundException:
    #if not self.webcamReference:
      imageSpacing = [0.2, 0.2, 0.2]
      imageData = vtk.vtkImageData()
      imageData.SetDimensions(640, 480, 1)
      imageData.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
      thresholder = vtk.vtkImageThreshold()
      thresholder.SetInputData(imageData)
      thresholder.SetInValue(0)
      thresholder.SetOutValue(0)
      # Create volume node
      self.webcamReference = slicer.vtkMRMLVectorVolumeNode()
      self.webcamReference.SetName('Webcam_Reference')
      self.webcamReference.SetSpacing(imageSpacing)
      self.webcamReference.SetImageDataConnection(thresholder.GetOutputPort())
      # Add volume to scene
      slicer.mrmlScene.AddNode(self.webcamReference)
      displayNode = slicer.vtkMRMLVectorVolumeDisplayNode()
      slicer.mrmlScene.AddNode(displayNode)
      self.webcamReference.SetAndObserveDisplayNodeID(displayNode.GetID())

    self.webcamConnectorNode = self.createWebcamPlusConnector()
    self.webcamConnectorNode.Start()
    self.setupWebcamResliceDriver()
    def loadVolume(self, outputNode=None):
        """
    Load the files in paths to outputNode.
    TODO: currently downsample is done with nearest neighbor filtering
    (i.e. skip slices and take every other row/column).
    It would be better to do a high order spline or other
    method, but there is no convenient streaming option for these.
    One option will be do to a box filter by averaging adjacent
    slices/row/columns which should be easy in numpy
    and give good results for a pixel aligned 50% scale operation.
    """

        ijkToRAS, extent, numberOfScalarComponents = self.outputVolumeGeometry(
        )
        outputSpacing = [numpy.linalg.norm(ijkToRAS[0:3, i]) for i in range(3)]
        originalVolumeSpacing = [
            numpy.linalg.norm(self.originalVolumeIJKToRAS[0:3, i])
            for i in range(3)
        ]

        stepSize = [
            int(outputSpacing[i] / originalVolumeSpacing[i]) for i in range(3)
        ]

        paths = self._filePaths[::stepSize[2]]

        if self.reverseSliceOrder:
            paths.reverse()

        volumeArray = None
        sliceIndex = 0
        firstArrayFullShape = None
        for inputSliceIndex, path in enumerate(paths):
            if inputSliceIndex < extent[4] or inputSliceIndex > extent[5]:
                # out of selected bounds
                continue
            reader = sitk.ImageFileReader()
            reader.SetFileName(path)
            image = reader.Execute()
            sliceArray = sitk.GetArrayFromImage(image)
            if len(sliceArray.shape) == 3 and self.outputGrayscale:
                # We convert to grayscale by simply taking the first component, which is appropriate for cases when grayscale image is stored as R=G=B,
                # but to convert real RGB images it could better to compute the mean or luminance.
                sliceArray = sitk.GetArrayFromImage(image)[:, :, 0]
            currentArrayFullShape = sliceArray.shape
            if firstArrayFullShape is None:
                firstArrayFullShape = currentArrayFullShape

            if volumeArray is None:
                shape = [
                    extent[5] - extent[4] + 1, extent[3] - extent[2] + 1,
                    extent[1] - extent[0] + 1
                ]
                if len(sliceArray.shape) == 3:
                    shape.append(sliceArray.shape[2])
                volumeArray = numpy.zeros(shape, dtype=sliceArray.dtype)
            if len(sliceArray.shape) == 3:
                # vector volume
                sliceArray = sliceArray[extent[2] *
                                        stepSize[1]:(extent[3] + 1) *
                                        stepSize[1]:stepSize[1], extent[0] *
                                        stepSize[0]:(extent[1] + 1) *
                                        stepSize[0]:stepSize[0], :]
            else:
                # grayscale volume
                sliceArray = sliceArray[extent[2] *
                                        stepSize[1]:(extent[3] + 1) *
                                        stepSize[1]:stepSize[1], extent[0] *
                                        stepSize[0]:(extent[1] + 1) *
                                        stepSize[0]:stepSize[0]]
            if (sliceIndex > 0) and (volumeArray[sliceIndex].shape !=
                                     sliceArray.shape):
                logging.debug(
                    "After downsampling, {} size is {} x {}\n\n{} size is {} x {} ({} scalar components)"
                    .format(paths[0], volumeArray[0].shape[0],
                            volumeArray[0].shape[1], path, sliceArray.shape[0],
                            sliceArray.shape[1]),
                    sliceArray.shape[2] if len(sliceArray.shape) == 3 else 1)
                message = "There are multiple datasets in the folder. Please select a single file as a sample or specify a pattern.\nDetails:\n{0} size is {1} x {2} ({6} scalar components)\n\n{3} size is {4} x {5} ({7} scalar components)".format(
                    paths[0], firstArrayFullShape[0], firstArrayFullShape[1],
                    path, currentArrayFullShape[0], currentArrayFullShape[1],
                    firstArrayFullShape.shape[2]
                    if len(firstArrayFullShape.shape) == 3 else 1,
                    currentArrayFullShape.shape[2]
                    if len(currentArrayFullShape.shape) == 3 else 1)
                raise ValueError(message)
            volumeArray[sliceIndex] = sliceArray
            sliceIndex += 1

        if not outputNode:
            if len(volumeArray.shape) == 3:
                outputNode = slicer.vtkMRMLScalarVolumeNode()
            else:
                outputNode = slicer.vtkMRMLVectorVolumeNode()
                if volumeArray.shape[3] == 3:
                    outputNode.SetVoxelVectorType(
                        outputNode.VoxelVectorTypeColorRGB)
                elif volumeArray.shape[3] == 4:
                    outputNode.SetVoxelVectorType(
                        outputNode.VoxelVectorTypeColorRGBA)
            slicer.mrmlScene.AddNode(outputNode)
            path = paths[0]
            fileName = os.path.basename(path)
            name = os.path.splitext(fileName)[0]
            outputNode.SetName(name)
        else:
            # Output volume already exists, check if it is the correct type
            if len(volumeArray.shape) == 4:
                if outputNode.IsA("vtkMRMLScalarVolumeNode"):
                    raise ValueError(
                        "Select a vector volume as output volume or force grayscale output."
                    )
            else:
                if outputNode.IsA("vtkMRMLVectorVolumeNode"):
                    raise ValueError(
                        "Select a scalar volume as output volume.")

        ijkToRAS = slicer.util.vtkMatrixFromArray(ijkToRAS)
        outputNode.SetIJKToRASMatrix(ijkToRAS)
        slicer.util.updateVolumeFromArray(outputNode, volumeArray)
        slicer.util.setSliceViewerLayers(background=outputNode, fit=True)
        return outputNode
 def onSaveButtonClicked(self):
   import zipfile
   import os.path
   import uuid
   bundle = EditUtil.EditUtil().getParameterNode().GetParameter('QuickTCGAEffect,erich')
   tran = json.loads(bundle)
   layers = []
   for key in tran:
     nn = tran[key]
     nn["file"] = key + '.tif'
     layers.append(tran[key])
   self.j['layers'] = layers
   self.j['username'] = self.setupUserName.text
   self.j['sourcetile'] = self.tilename
   self.j['generator'] = slicer.app.applicationVersion
   self.j['timestamp'] = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
   self.j['execution_id'] = self.setupExecutionID.text + "-"+ uuid.uuid4().get_urn()
   labelNodes = slicer.util.getNodes('vtkMRMLLabelMapVolumeNode*')
   savedMessage = 'Segmentations for the following series were saved:\n\n'
   zfname = os.path.join(self.dataDirButton.directory, self.tilename + "_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") + '.zip')
   print "zipfile name"
   print zfname
   zf = zipfile.ZipFile(zfname, mode='w')
   red_logic = slicer.app.layoutManager().sliceWidget("Red").sliceLogic()
   red_cn = red_logic.GetSliceCompositeNode()
   fg = red_cn.GetForegroundVolumeID()
   ff = slicer.util.getNode(fg)
   sNode = slicer.vtkMRMLVolumeArchetypeStorageNode()
   sNode.SetFileName("original.tif")
   sNode.SetWriteFileFormat('tif')
   sNode.SetURI(None)
   success = sNode.WriteData(ff)
   zf.write("original.tif")
   os.remove("original.tif")
   for label in labelNodes.values():
     labelName = label.GetName()
     labelFileName = os.path.join(self.dataDirButton.directory, labelName + '.tif')
     compFileName = os.path.join(self.dataDirButton.directory, labelName + '-comp.tif')
     sNode.SetFileName(labelFileName)
     success = sNode.WriteData(label)
     if success:
       print "adding "+labelFileName+" to zipfile"
       zf.write(labelFileName,os.path.basename(labelFileName))
       os.remove(labelFileName)
     else:
       print "failed writing "+labelFileName
     comp = self.WriteLonI(label.GetImageData(),ff.GetImageData())
     volumeNode = slicer.vtkMRMLVectorVolumeNode()
     volumeNode.SetName("COMP")
     volumeNode.SetAndObserveImageData(comp)
     sNode.SetFileName(compFileName)
     success = sNode.WriteData(volumeNode)
     if success:
       print "adding "+compFileName+" to zipfile"
       zf.write(compFileName,os.path.basename(compFileName))
       os.remove(compFileName)
     else:
       print "failed writing "+compFileName
   jstr = json.dumps(self.j,sort_keys=True, indent=4, separators=(',', ': '))
   mfname = os.path.join(self.dataDirButton.directory, 'manifest.json')
   f = open(mfname,'w')
   f.write(jstr)
   f.close()
   zf.write(mfname,os.path.basename(mfname))
   zf.close()
   os.remove(mfname)
Exemple #9
0
    def setup(self):
        ScriptedLoadableModuleWidget.setup(self)
        self.logic = CNN_Image_ClassifierLogic()
        self.moduleDir = os.path.dirname(
            slicer.modules.collect_training_images.path)

        # Instantiate and connect widgets ...

        #
        # Parameters Area
        #
        parametersCollapsibleButton = ctk.ctkCollapsibleButton()
        parametersCollapsibleButton.text = "Parameters"
        self.layout.addWidget(parametersCollapsibleButton)

        # Layout within the dummy collapsible button
        parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)

        self.modelSelector = qt.QComboBox()
        self.modelSelector.addItems(["Select model"])
        modelDirectoryContents = os.listdir(
            os.path.join(self.moduleDir, os.pardir, "Models/retrainContainer"))
        modelNames = [
            dir for dir in modelDirectoryContents
            if dir.find(".") == -1 and dir != "Dockerfile"
        ]
        self.modelSelector.addItems(["Create new model"])
        self.modelSelector.addItems(modelNames)
        parametersFormLayout.addRow(self.modelSelector)

        #
        # Apply Button
        #
        self.applyButton = qt.QPushButton("Start")
        self.applyButton.toolTip = "Run the algorithm."
        self.applyButton.enabled = False
        parametersFormLayout.addRow(self.applyButton)

        #
        # Object table
        #
        self.objectTable = qt.QTableWidget()
        self.objectTable.setColumnCount(3)
        self.objectTable.setHorizontalHeaderLabels(
            ["Name", "Found", "Confidence"])
        parametersFormLayout.addRow(self.objectTable)

        #
        # Adjust Confidence Thresholds
        #
        confidenceThresholdsCollapsibleButton = ctk.ctkCollapsibleButton()
        confidenceThresholdsCollapsibleButton.text = "Confidence Thresholds"
        self.layout.addWidget(confidenceThresholdsCollapsibleButton)

        confidenceFormLayout = qt.QFormLayout(
            confidenceThresholdsCollapsibleButton)

        self.confidenceSlider = qt.QSlider(0x1)  #horizontal slider
        self.confidenceSlider.setRange(0, 100)
        self.confidenceSlider.setTickInterval(5)
        self.confidenceSlider.setTickPosition(2)  #Ticks appear below slider
        self.confidenceSlider.setSliderPosition(80)
        self.confidenceSlider.setToolTip(
            "Set the minimum degree of confidence that must be met for an object to be considered found"
        )
        confidenceFormLayout.addRow("Confidence: ", self.confidenceSlider)
        self.confidenceLabel = qt.QLabel("80%")
        confidenceFormLayout.addRow(self.confidenceLabel)

        # connections
        self.applyButton.connect('clicked(bool)', self.onApplyButton)
        self.modelSelector.connect('currentIndexChanged(int)',
                                   self.onModelSelected)
        self.confidenceSlider.connect('sliderMoved(int)',
                                      self.onConfidenceChanged)

        # Add vertical spacer
        self.layout.addStretch(1)

        # Refresh Apply button state
        self.onSelect()

        self.webcamReference = slicer.util.getNode('Webcam_Reference')
        if not self.webcamReference:
            imageSpacing = [0.2, 0.2, 0.2]
            imageData = vtk.vtkImageData()
            imageData.SetDimensions(640, 480, 1)
            imageData.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
            thresholder = vtk.vtkImageThreshold()
            thresholder.SetInputData(imageData)
            thresholder.SetInValue(0)
            thresholder.SetOutValue(0)
            # Create volume node
            self.webcamReference = slicer.vtkMRMLVectorVolumeNode()
            self.webcamReference.SetName('Webcam_Reference')
            self.webcamReference.SetSpacing(imageSpacing)
            self.webcamReference.SetImageDataConnection(
                thresholder.GetOutputPort())
            # Add volume to scene
            slicer.mrmlScene.AddNode(self.webcamReference)
            displayNode = slicer.vtkMRMLVectorVolumeDisplayNode()
            slicer.mrmlScene.AddNode(displayNode)
            self.webcamReference.SetAndObserveDisplayNodeID(
                displayNode.GetID())

        self.webcamConnectorNode = self.createWebcamPlusConnector()
        self.webcamConnectorNode.Start()
        self.setupWebcamResliceDriver()