Ejemplo n.º 1
0
    def BuildGUI(self):
        self.GetUIPanel().AddPage(self._title,self._title,"")
        pageWidget = self.GetUIPanel().GetPageWidget("DiffusionWelcome")
        self.BuildHelpAndAboutFrame(pageWidget,self._helpText,self._aboutText)
    
        moduleFrame = slicer.vtkSlicerModuleCollapsibleFrame()
        moduleFrame.SetParent(self.GetUIPanel().GetPageWidget(self._title))
        moduleFrame.Create()
        moduleFrame.SetLabelText(self._title)
        moduleFrame.ExpandFrame()
        widgetName = moduleFrame.GetWidgetName()
        pageWidgetName = self.GetUIPanel().GetPageWidget("DiffusionWelcome").GetWidgetName()
        slicer.TkCall("pack %s -side top -anchor nw -fill x -padx 2 -pady 2 -in %s" % (widgetName,pageWidgetName))

        text = slicer.vtkKWTextWithHyperlinksWithScrollbars()
        text.SetParent(moduleFrame.GetFrame())
        text.Create()
        text.SetHorizontalScrollbarVisibility(0)
        text.GetWidget().SetReliefToFlat()
        text.GetWidget().SetWrapToWord()
        text.GetWidget().QuickFormattingOn()
        text.GetWidget().SetHeight(35)
        text.SetText(self._helpText+"\n\n\n"+self._mainText)
        # Important that Read only after SetText otherwise it doesn't work
        text.GetWidget().ReadOnlyOn()
        slicer.TkCall("pack %s -side top -anchor nw -fill x -padx 2 -pady 2" % text.GetWidgetName())
Ejemplo n.º 2
0
    def BuildGUI(self):
        self.GetUIPanel().AddPage(self._title, self._title, "")
        pageWidget = self.GetUIPanel().GetPageWidget("DiffusionWelcome")
        self.BuildHelpAndAboutFrame(pageWidget, self._helpText,
                                    self._aboutText)

        moduleFrame = slicer.vtkSlicerModuleCollapsibleFrame()
        moduleFrame.SetParent(self.GetUIPanel().GetPageWidget(self._title))
        moduleFrame.Create()
        moduleFrame.SetLabelText(self._title)
        moduleFrame.ExpandFrame()
        widgetName = moduleFrame.GetWidgetName()
        pageWidgetName = self.GetUIPanel().GetPageWidget(
            "DiffusionWelcome").GetWidgetName()
        slicer.TkCall(
            "pack %s -side top -anchor nw -fill x -padx 2 -pady 2 -in %s" %
            (widgetName, pageWidgetName))

        text = slicer.vtkKWTextWithHyperlinksWithScrollbars()
        text.SetParent(moduleFrame.GetFrame())
        text.Create()
        text.SetHorizontalScrollbarVisibility(0)
        text.GetWidget().SetReliefToFlat()
        text.GetWidget().SetWrapToWord()
        text.GetWidget().QuickFormattingOn()
        text.GetWidget().SetHeight(35)
        text.SetText(self._helpText + "\n\n\n" + self._mainText)
        # Important that Read only after SetText otherwise it doesn't work
        text.GetWidget().ReadOnlyOn()
        slicer.TkCall("pack %s -side top -anchor nw -fill x -padx 2 -pady 2" %
                      text.GetWidgetName())
Ejemplo n.º 3
0
    def BuildGUI(self):
        self.GetUIPanel().AddPage("SegmentationWelcome", "SegmentationWelcome",
                                  "")
        pageWidget = self.GetUIPanel().GetPageWidget("SegmentationWelcome")
        helpText = "This module provides a quick guide for choosing the segmentation options most appropriate for your task. More detailed information can be found on Slicer's wiki: <a>http://www.slicer.org/slicerWiki/index.php/Modules:SegmentationOverview3.6</a>.\n\nThe following provides a brief overview of the main and auxiliary  modules Slicer has to offer for image segmentation. Selecting the best module will depend on your input data and the underlying questions you are asking. The spectrum of approaches ranges from fully automated to manual segmentation; most modules are generic and can handle any image content, but a few are designed specifically for brain images."
        aboutText = "This work is supported by NA-MIC, NAC, Harvard Catalyst, NCIGT, and the Slicer Community. See http://www.slicer.org for details.  Module implemented by Wendy Plesniak and Steve Pieper."
        self.BuildHelpAndAboutFrame(pageWidget, helpText, aboutText)

        moduleFrame = slicer.vtkSlicerModuleCollapsibleFrame()
        moduleFrame.SetParent(
            self.GetUIPanel().GetPageWidget("SegmentationWelcome"))
        moduleFrame.Create()
        moduleFrame.SetLabelText("SegmentationWelcome")
        moduleFrame.ExpandFrame()
        widgetName = moduleFrame.GetWidgetName()
        pageWidgetName = self.GetUIPanel().GetPageWidget(
            "SegmentationWelcome").GetWidgetName()
        slicer.TkCall(
            "pack %s -side top -anchor nw -fill x -padx 2 -pady 2 -in %s" %
            (widgetName, pageWidgetName))

        text = slicer.vtkKWTextWithHyperlinksWithScrollbars()
        text.SetParent(moduleFrame.GetFrame())
        text.Create()
        text.SetHorizontalScrollbarVisibility(0)
        text.GetWidget().SetReliefToFlat()
        text.GetWidget().SetWrapToWord()
        text.GetWidget().QuickFormattingOn()
        text.GetWidget().SetHeight(35)
        text.SetText(
            """This module provides a quick guide for choosing the segmentation options most appropriate for your task. More detailed information can be found on Slicer's wiki: <a>http://www.slicer.org/slicerWiki/index.php/Modules:SegmentationOverview3.6</a>.
        
The following provides a brief overview of the main modules Slicer has to offer for image segmentation. Selecting the best module will depend on your input data and the underlying questions you are asking. The spectrum of approaches ranges from fully automated to manual segmentation; most modules are generic and can handle any image content, but a few are designed specifically for brain images. Slicer modules are listed below:

**Interactive Editor**
Slicer's interactive editor is a module for manual segmentation of volumes. Some of the tools mimic a painting interface like photoshop or gimp, but work on 3D arrays of voxels rather than on 2D pixels. The overall goal is to allow users to efficiently and precisely define structures within their volumes as label map volumes. These label maps can be used for building models, or further processing (using, for example, the Label Statistics module).

**EM Segment Template Builder**
This module is designed for users who perform atlas based medical image segmentation and has been tested on brain MRI images. Familiarity with statistical modeling may help but is not required. It allows the user to configure the algorithm ---step-by-step--- to a variety of imaging protocols and anatomical structures, and then apply the algorithm to segment data. Configuration settings are stored in an EMSegment parameters node in the Slicer3 MRML tree. These settings can be saved and later applied to new data via any of the EMSegment interfaces within Slicer3 or the command-line EMSegment executable.

**EMSegment Command-Line**
This module is used to simplify the process of segmenting large collections of images by providing a command line interface to the EMSegment algorithm for script and batch processing. The primary function of the EMSegment Template Builder module is to step the user through the process of calibrating, via algorithm parameters, the segmentation algorithm to a particular set of input data. However, once a successful collection of parameters is established, the user will commonly want to bypass this detailed calibration process when segmenting new images by using those parameters collected from the calibration process in EMSegment Template Builder module. The command-line executable provides this batch processing capability.

**EMSegment Simple**
This module provide a simplified ~~one-click~~ GUI interface for the EMSegment Command-line Executable. The interface is simple because the number of required command-line parameters is minimized, and it is flexible because any EM algorithm parameter can be modified, within the MRML scene, via the EMSegment GUI interface. Similar to the EMSegment Command-line Executable, once a set of parameters for segmentation algorithm is deemed satisfactory through the process of calibrating in the Template Builder module, the user will commonly want to bypass this detailed calibration process when segmenting new images. This module provides a simple interface to specify a small number of required command-line parameters --- a MRML scene (containing algorithm parameters), a target image (or multiple target images, e.g., T1 and T2), and an output labelmap image --- and to allow any EM algorithm parameters to be modified, within the MRML scene, via the EMSegment GUI interface.

**Fast Marching Segmentation**
This module is most useful to segment image regions that have similar intensity. Initialization is accomlished by selecting points within the region to be segmented and expected volume of the segmented structure. The segmentation is completed relatively quickly for typical images, allowing experimentation with choosing optimal parameters. The resulting volume can be adjusted interactively by scrolling through the evolution of the label contour.

** Otsu Treshold Segmentation**
This filter creates a labeled image from a grayscale image. First, it calculates an optimal threshold that separates the image into foreground and background. This threshold separates those two classes so that their intra-class variance is minimal (see <a>http://en.wikipedia.org/wiki/Otsu%27s_method</a>). Then the filter runs a connected component algorithm to generate unique labels for each connected region of the foreground. Finally, the resulting image is relabeled to provide consecutive numbering.

** Simple Region Growing**
Simple Region Growing is a statistical region growing algorithm. The algorithm takes one or more seeds as input. It executes using the following steps:

    * A statistical model of the foreground (mean and standard deviation of intensity) is estimated over neighborhoods of the seed points. The statistical model is converted to a scalar threshold range using the mean intensity of a seed point plus or minus a multiplier or the standard deviation.

    * The algorithm then constructs a segmentation by labeling all voxels that are connected to the seed voxels and satisfy the scalar threshold range.

After this initial segmentation, the statistical model can be iteratively refined by re-calculating the mean and standard deviation of the intensity of the voxels in the initial segmentation. The refined statistical model in turn is converted to a new scalar threshold range as described in the preceding paragraph. This is followed by a new segmentation where the algorithm labels all voxels connected to the seed voxels and satisfy the new scalar threshold range. The number of repetitions for the segmentation process is specified using an iteration parameter to the algorithm.

Through this process, Simple Region Growing attempts to adapt to the statistical properties of the image. Initially, the statistical model is based strictly on the neighborhoods about the seeds. This statistical model is precise (being based on the user supplied seeds) but also uncertain (because the number of samples in the model can be rather small). After the initial segmentation, the statistics are recalculated which yields a more certain model (because the number of samples in the model can be rather large).

** Robust Statistics **
This module is a general purpose segmenter. The target object is initialized by a label map. An active contour model then evolves to extract the desired boundary of the object.

**BRAINSROIAuto**
BRAINSROIAuto automatically generates a Binary Image (or Mask) to encompass the region in an brain image volume occupied by the brain. In general, BRAINSROIAuto takes a brain image volume, and generates a mask without requiring parameter tweaking.
""")
        # Important that Read only after SetText otherwise it doesn't work
        text.GetWidget().ReadOnlyOn()
        slicer.TkCall("pack %s -side top -anchor nw -fill x -padx 2 -pady 2" %
                      text.GetWidgetName())
    def __init__(self,parentFrame,parentClass):
        SlicerVMTKAdvancedPageSkeleton.__init__(self,parentFrame,parentClass)

        self._welcomeMessage = slicer.vtkKWTextWithHyperlinksWithScrollbars()
Ejemplo n.º 5
0
    def BuildGUI(self):
        self.GetUIPanel().AddPage("RegistrationWelcome", "RegistrationWelcome",
                                  "")
        pageWidget = self.GetUIPanel().GetPageWidget("RegistrationWelcome")
        helpText = "This module provides a quick guide for choosing the registration option most optimal for your task. More detailed information can be found on Slicer's wiki: <a>http://www.slicer.org/slicerWiki/index.php/Slicer3:Registration</a>.\n\nThe following is a brief overview of the main and auxiliary modules Slicer has to offer for image registration. Most modules are generic and can handle any image content, but a few are designed specifically for brain images."
        aboutText = "This work is supported by NA-MIC, NAC, Harvard Catalyst, NCIGT, and the Slicer Community. See http://www.slicer.org for details.  Module implemented by Wendy Plesniak and Steve Pieper."
        self.BuildHelpAndAboutFrame(pageWidget, helpText, aboutText)

        moduleFrame = slicer.vtkSlicerModuleCollapsibleFrame()
        moduleFrame.SetParent(
            self.GetUIPanel().GetPageWidget("RegistrationWelcome"))
        moduleFrame.Create()
        moduleFrame.SetLabelText("RegistrationWelcome")
        moduleFrame.ExpandFrame()
        widgetName = moduleFrame.GetWidgetName()
        pageWidgetName = self.GetUIPanel().GetPageWidget(
            "RegistrationWelcome").GetWidgetName()
        slicer.TkCall(
            "pack %s -side top -anchor nw -fill x -padx 2 -pady 2 -in %s" %
            (widgetName, pageWidgetName))

        text = slicer.vtkKWTextWithHyperlinksWithScrollbars()
        text.SetParent(moduleFrame.GetFrame())
        text.Create()
        text.SetHorizontalScrollbarVisibility(0)
        text.GetWidget().SetReliefToFlat()
        text.GetWidget().SetWrapToWord()
        text.GetWidget().QuickFormattingOn()
        text.GetWidget().SetHeight(35)
        text.SetText(
            """This module provides a quick guide for choosing the registration option most optimal for your task. More detailed information can be found on Slicer's wiki: <a>http://www.slicer.org/slicerWiki/index.php/Slicer3:Registration</a>.\n\nThe following is a brief overview of the main and auxiliary modules Slicer has to offer for image registration. Most modules are generic and can handle any image content, but a few are designed specifically for brain images. Slicer modules are listed below by category:\n\n

**FAST REGISTRATION**\n\n

**  Transforms:** The Transformations module creates and edits slicer Transform nodes. Transformation nodes are used in Slicer to define spacial relationships between different nodes (such as volumes, models, fiducials, ROI's, or other Transform nodes) or between the nodes and the global RAS space. You can establish these relations by dragging the nodes under the Transformation nodes in the Data module. \n

**  Fast Affine Registration:** This module implements a registration algorithm based on the Mattes mutual information registration metric and the affine transformation. Both the fixed and moving images may be optionally smoothed before registration. \n

**  Fast Rigid Registration:** This module implements a registration algorithm based on the Mattes mutual information registration metric. The transformation mapping the moving image to the fixed image consists of 3 translations and 3 rotations. Thus, only rigid body transformations are permitted. Both the fixed and moving images may be optionally smoothed before registration. The module optionally breaks the optimization into multiple stages, each with a different learning rate and number of iterations. \n

**  Fast Nonrigid Registration:** This module performs non-rigid alignment/registration of two volumes based on a B-spline interpolation scheme, driven by points on a (cubic) control grid. The number of points on the grid is expected to be low to obtain a fast and feasible solution. Output consists in a transform node and/or resampled target volume. For finer grids a scale-space approach of multiple runs with gradually increasing grid size is recommended. Note that non-rigid transforms as produced here are not available for instant view in the data module in the way linear transforms are, i.e. moving a new volume under a B-spline transform in the Data/MRML tree will not show an effect in the views. To see the result the volume must be resampled, using the ResampleScalarVectorDWIVolume module. \n

**ROBUST REGISTRATION**\n\n

**  Expert Automated Registration:** This module is an integrated framework providing access to ITK registration technologies. Algorithms can be run in single mode or pipelined. Depending on the size of the data sets, a significant amount of memory is needed. There is an option to trade off speed for memory. Most of the code is parallelized and will take advantage of multicore capabilities, if available. \n

**  Robust Multiresolution Affine Registration:** This module implements mutual information based affine registration using a multi-resolution optimization strategy. Several parts of the algorithm are based on a description of the FLIRT algorithm, see the Reference section. \n

**  BRAINSDemoWarp:** BRAINSDemonWarp is a command line program for image registration by using different methods including Thirion and diffeomorphic demons algorithms. The function takes in a template image and a target image along with other optional parameters and registers the template image onto the target image. The resultant deformation fields and metric values can be written to a file. The program uses the Insight Toolkit (www.ITK.org) for all the computations, and can operate on any of the image types supported by that library. \n

**  BRAINSFit:** \n

**  BRAINSResample:** \n

**BRAIN-ONLY REGISTRATION**\n\n

**  ACPC Transform:** The ACPC Transform Module is used to orient brain images along predefined anatomical landmarks: (manually defined) fiducials for the inter-hemispheral midline, anterior- and posterior commissure are used to align an image such that these landmarks become vertical and horizontal, respectively. This transformation can then be applied to a volume using the Resample Scalar/Vector/DWI Volume module. **Note: renamed from RealignVolume**. \n

**NON-RASTER-IMAGE DATA REGISTRATION**\n\n

**  Fiducial Registration:** The Fiducial Alignment Module can align images based on pairs of manually selected fiducial points (rigid and affine). Two sets of fiducials (fiducial lists) are required, forming matching pairs to be aligned. The transform can either be translation only, rigid transform, or similarity transform. \n

**  Surface Registration: ** The ICP Surface Registration Module performs automated registration of surfaces (not images) using the Iterative Closest Point algorithm using rigid, similarity and affine transforms. This is useful if image data directly is unreliable, but surfaces can be produced from segmentations that provide good information about desired alignment. \n """
        )

        # Important that Read only after SetText otherwise it doesn't work
        text.GetWidget().ReadOnlyOn()
        slicer.TkCall("pack %s -side top -anchor nw -fill x -padx 2 -pady 2" %
                      text.GetWidgetName())
    def __init__(self,parentFrame,parentClass):
        SlicerVMTKAdvancedPageSkeleton.__init__(self,parentFrame,parentClass)

        self._welcomeMessage = slicer.vtkKWTextWithHyperlinksWithScrollbars()
Ejemplo n.º 7
0
    def BuildGUI(self):
        self.GetUIPanel().AddPage("RegistrationWelcome", "RegistrationWelcome", "")
        pageWidget = self.GetUIPanel().GetPageWidget("RegistrationWelcome")
        helpText = "This module provides a quick guide for choosing the registration option most optimal for your task. More detailed information can be found on Slicer's wiki: <a>http://www.slicer.org/slicerWiki/index.php/Slicer3:Registration</a>.\n\nThe following is a brief overview of the main and auxiliary modules Slicer has to offer for image registration. Most modules are generic and can handle any image content, but a few are designed specifically for brain images."
        aboutText = "This work is supported by NA-MIC, NAC, Harvard Catalyst, NCIGT, and the Slicer Community. See http://www.slicer.org for details.  Module implemented by Wendy Plesniak and Steve Pieper."
        self.BuildHelpAndAboutFrame(pageWidget, helpText, aboutText)

        moduleFrame = slicer.vtkSlicerModuleCollapsibleFrame()
        moduleFrame.SetParent(self.GetUIPanel().GetPageWidget("RegistrationWelcome"))
        moduleFrame.Create()
        moduleFrame.SetLabelText("RegistrationWelcome")
        moduleFrame.ExpandFrame()
        widgetName = moduleFrame.GetWidgetName()
        pageWidgetName = self.GetUIPanel().GetPageWidget("RegistrationWelcome").GetWidgetName()
        slicer.TkCall("pack %s -side top -anchor nw -fill x -padx 2 -pady 2 -in %s" % (widgetName, pageWidgetName))

        text = slicer.vtkKWTextWithHyperlinksWithScrollbars()
        text.SetParent(moduleFrame.GetFrame())
        text.Create()
        text.SetHorizontalScrollbarVisibility(0)
        text.GetWidget().SetReliefToFlat()
        text.GetWidget().SetWrapToWord()
        text.GetWidget().QuickFormattingOn()
        text.GetWidget().SetHeight(35)
        text.SetText(
            """This module provides a quick guide for choosing the registration option most optimal for your task. More detailed information can be found on Slicer's wiki: <a>http://www.slicer.org/slicerWiki/index.php/Slicer3:Registration</a>.\n\nThe following is a brief overview of the main and auxiliary modules Slicer has to offer for image registration. Most modules are generic and can handle any image content, but a few are designed specifically for brain images. Slicer modules are listed below by category:\n\n

**FAST REGISTRATION**\n\n

**  Transforms:** The Transformations module creates and edits slicer Transform nodes. Transformation nodes are used in Slicer to define spacial relationships between different nodes (such as volumes, models, fiducials, ROI's, or other Transform nodes) or between the nodes and the global RAS space. You can establish these relations by dragging the nodes under the Transformation nodes in the Data module. \n

**  Fast Affine Registration:** This module implements a registration algorithm based on the Mattes mutual information registration metric and the affine transformation. Both the fixed and moving images may be optionally smoothed before registration. \n

**  Fast Rigid Registration:** This module implements a registration algorithm based on the Mattes mutual information registration metric. The transformation mapping the moving image to the fixed image consists of 3 translations and 3 rotations. Thus, only rigid body transformations are permitted. Both the fixed and moving images may be optionally smoothed before registration. The module optionally breaks the optimization into multiple stages, each with a different learning rate and number of iterations. \n

**  Fast Nonrigid Registration:** This module performs non-rigid alignment/registration of two volumes based on a B-spline interpolation scheme, driven by points on a (cubic) control grid. The number of points on the grid is expected to be low to obtain a fast and feasible solution. Output consists in a transform node and/or resampled target volume. For finer grids a scale-space approach of multiple runs with gradually increasing grid size is recommended. Note that non-rigid transforms as produced here are not available for instant view in the data module in the way linear transforms are, i.e. moving a new volume under a B-spline transform in the Data/MRML tree will not show an effect in the views. To see the result the volume must be resampled, using the ResampleScalarVectorDWIVolume module. \n

**ROBUST REGISTRATION**\n\n

**  Expert Automated Registration:** This module is an integrated framework providing access to ITK registration technologies. Algorithms can be run in single mode or pipelined. Depending on the size of the data sets, a significant amount of memory is needed. There is an option to trade off speed for memory. Most of the code is parallelized and will take advantage of multicore capabilities, if available. \n

**  Robust Multiresolution Affine Registration:** This module implements mutual information based affine registration using a multi-resolution optimization strategy. Several parts of the algorithm are based on a description of the FLIRT algorithm, see the Reference section. \n

**  BRAINSDemoWarp:** BRAINSDemonWarp is a command line program for image registration by using different methods including Thirion and diffeomorphic demons algorithms. The function takes in a template image and a target image along with other optional parameters and registers the template image onto the target image. The resultant deformation fields and metric values can be written to a file. The program uses the Insight Toolkit (www.ITK.org) for all the computations, and can operate on any of the image types supported by that library. \n

**  BRAINSFit:** \n

**  BRAINSResample:** \n

**BRAIN-ONLY REGISTRATION**\n\n

**  ACPC Transform:** The ACPC Transform Module is used to orient brain images along predefined anatomical landmarks: (manually defined) fiducials for the inter-hemispheral midline, anterior- and posterior commissure are used to align an image such that these landmarks become vertical and horizontal, respectively. This transformation can then be applied to a volume using the Resample Scalar/Vector/DWI Volume module. **Note: renamed from RealignVolume**. \n

**NON-RASTER-IMAGE DATA REGISTRATION**\n\n

**  Fiducial Registration:** The Fiducial Alignment Module can align images based on pairs of manually selected fiducial points (rigid and affine). Two sets of fiducials (fiducial lists) are required, forming matching pairs to be aligned. The transform can either be translation only, rigid transform, or similarity transform. \n

**  Surface Registration: ** The ICP Surface Registration Module performs automated registration of surfaces (not images) using the Iterative Closest Point algorithm using rigid, similarity and affine transforms. This is useful if image data directly is unreliable, but surfaces can be produced from segmentations that provide good information about desired alignment. \n """
        )

        # Important that Read only after SetText otherwise it doesn't work
        text.GetWidget().ReadOnlyOn()
        slicer.TkCall("pack %s -side top -anchor nw -fill x -padx 2 -pady 2" % text.GetWidgetName())
Ejemplo n.º 8
0
    def BuildGUI(self):
        self.GetUIPanel().AddPage("SegmentationWelcome","SegmentationWelcome","")
        pageWidget = self.GetUIPanel().GetPageWidget("SegmentationWelcome")
        helpText = "This module provides a quick guide for choosing the segmentation options most appropriate for your task. More detailed information can be found on Slicer's wiki: <a>http://www.slicer.org/slicerWiki/index.php/Modules:SegmentationOverview3.6</a>.\n\nThe following provides a brief overview of the main and auxiliary  modules Slicer has to offer for image segmentation. Selecting the best module will depend on your input data and the underlying questions you are asking. The spectrum of approaches ranges from fully automated to manual segmentation; most modules are generic and can handle any image content, but a few are designed specifically for brain images."
        aboutText = "This work is supported by NA-MIC, NAC, Harvard Catalyst, NCIGT, and the Slicer Community. See http://www.slicer.org for details.  Module implemented by Wendy Plesniak and Steve Pieper."
        self.BuildHelpAndAboutFrame(pageWidget,helpText,aboutText)
    
        moduleFrame = slicer.vtkSlicerModuleCollapsibleFrame()
        moduleFrame.SetParent(self.GetUIPanel().GetPageWidget("SegmentationWelcome"))
        moduleFrame.Create()
        moduleFrame.SetLabelText("SegmentationWelcome")
        moduleFrame.ExpandFrame()
        widgetName = moduleFrame.GetWidgetName()
        pageWidgetName = self.GetUIPanel().GetPageWidget("SegmentationWelcome").GetWidgetName()
        slicer.TkCall("pack %s -side top -anchor nw -fill x -padx 2 -pady 2 -in %s" % (widgetName,pageWidgetName))

        text = slicer.vtkKWTextWithHyperlinksWithScrollbars()
        text.SetParent(moduleFrame.GetFrame())
        text.Create()
        text.SetHorizontalScrollbarVisibility(0)
        text.GetWidget().SetReliefToFlat()
        text.GetWidget().SetWrapToWord()
        text.GetWidget().QuickFormattingOn()
        text.GetWidget().SetHeight(35)
        text.SetText("""This module provides a quick guide for choosing the segmentation options most appropriate for your task. More detailed information can be found on Slicer's wiki: <a>http://www.slicer.org/slicerWiki/index.php/Modules:SegmentationOverview3.6</a>.
        
The following provides a brief overview of the main modules Slicer has to offer for image segmentation. Selecting the best module will depend on your input data and the underlying questions you are asking. The spectrum of approaches ranges from fully automated to manual segmentation; most modules are generic and can handle any image content, but a few are designed specifically for brain images. Slicer modules are listed below:

**Interactive Editor**
Slicer's interactive editor is a module for manual segmentation of volumes. Some of the tools mimic a painting interface like photoshop or gimp, but work on 3D arrays of voxels rather than on 2D pixels. The overall goal is to allow users to efficiently and precisely define structures within their volumes as label map volumes. These label maps can be used for building models, or further processing (using, for example, the Label Statistics module).

**EM Segment Template Builder**
This module is designed for users who perform atlas based medical image segmentation and has been tested on brain MRI images. Familiarity with statistical modeling may help but is not required. It allows the user to configure the algorithm ---step-by-step--- to a variety of imaging protocols and anatomical structures, and then apply the algorithm to segment data. Configuration settings are stored in an EMSegment parameters node in the Slicer3 MRML tree. These settings can be saved and later applied to new data via any of the EMSegment interfaces within Slicer3 or the command-line EMSegment executable.

**EMSegment Command-Line**
This module is used to simplify the process of segmenting large collections of images by providing a command line interface to the EMSegment algorithm for script and batch processing. The primary function of the EMSegment Template Builder module is to step the user through the process of calibrating, via algorithm parameters, the segmentation algorithm to a particular set of input data. However, once a successful collection of parameters is established, the user will commonly want to bypass this detailed calibration process when segmenting new images by using those parameters collected from the calibration process in EMSegment Template Builder module. The command-line executable provides this batch processing capability.

**EMSegment Simple**
This module provide a simplified ~~one-click~~ GUI interface for the EMSegment Command-line Executable. The interface is simple because the number of required command-line parameters is minimized, and it is flexible because any EM algorithm parameter can be modified, within the MRML scene, via the EMSegment GUI interface. Similar to the EMSegment Command-line Executable, once a set of parameters for segmentation algorithm is deemed satisfactory through the process of calibrating in the Template Builder module, the user will commonly want to bypass this detailed calibration process when segmenting new images. This module provides a simple interface to specify a small number of required command-line parameters --- a MRML scene (containing algorithm parameters), a target image (or multiple target images, e.g., T1 and T2), and an output labelmap image --- and to allow any EM algorithm parameters to be modified, within the MRML scene, via the EMSegment GUI interface.

**Fast Marching Segmentation**
This module is most useful to segment image regions that have similar intensity. Initialization is accomlished by selecting points within the region to be segmented and expected volume of the segmented structure. The segmentation is completed relatively quickly for typical images, allowing experimentation with choosing optimal parameters. The resulting volume can be adjusted interactively by scrolling through the evolution of the label contour.

** Otsu Treshold Segmentation**
This filter creates a labeled image from a grayscale image. First, it calculates an optimal threshold that separates the image into foreground and background. This threshold separates those two classes so that their intra-class variance is minimal (see <a>http://en.wikipedia.org/wiki/Otsu%27s_method</a>). Then the filter runs a connected component algorithm to generate unique labels for each connected region of the foreground. Finally, the resulting image is relabeled to provide consecutive numbering.

** Simple Region Growing**
Simple Region Growing is a statistical region growing algorithm. The algorithm takes one or more seeds as input. It executes using the following steps:

    * A statistical model of the foreground (mean and standard deviation of intensity) is estimated over neighborhoods of the seed points. The statistical model is converted to a scalar threshold range using the mean intensity of a seed point plus or minus a multiplier or the standard deviation.

    * The algorithm then constructs a segmentation by labeling all voxels that are connected to the seed voxels and satisfy the scalar threshold range.

After this initial segmentation, the statistical model can be iteratively refined by re-calculating the mean and standard deviation of the intensity of the voxels in the initial segmentation. The refined statistical model in turn is converted to a new scalar threshold range as described in the preceding paragraph. This is followed by a new segmentation where the algorithm labels all voxels connected to the seed voxels and satisfy the new scalar threshold range. The number of repetitions for the segmentation process is specified using an iteration parameter to the algorithm.

Through this process, Simple Region Growing attempts to adapt to the statistical properties of the image. Initially, the statistical model is based strictly on the neighborhoods about the seeds. This statistical model is precise (being based on the user supplied seeds) but also uncertain (because the number of samples in the model can be rather small). After the initial segmentation, the statistics are recalculated which yields a more certain model (because the number of samples in the model can be rather large).

** Robust Statistics **
This module is a general purpose segmenter. The target object is initialized by a label map. An active contour model then evolves to extract the desired boundary of the object.

**BRAINSROIAuto**
BRAINSROIAuto automatically generates a Binary Image (or Mask) to encompass the region in an brain image volume occupied by the brain. In general, BRAINSROIAuto takes a brain image volume, and generates a mask without requiring parameter tweaking.
""")
        # Important that Read only after SetText otherwise it doesn't work
        text.GetWidget().ReadOnlyOn()
        slicer.TkCall("pack %s -side top -anchor nw -fill x -padx 2 -pady 2" % text.GetWidgetName())