Exemplo n.º 1
0
def genren_AGG(sls,
               sls2=None,
               niidata=None,
               roi1=None,
               roi2=None,
               roi3=None,
               aff=np.eye(4),
               putpath='test.png',
               showme=False,
               showaxes=False):

    renderer = window.Renderer()
    renderer.set_camera(position=(-606.93, -153.23, 28.70),
                        focal_point=(2.78, 11.06, 15.66),
                        view_up=(0, 0, 1))

    stream_actor = actor.line(sls)
    renderer.add(stream_actor)

    if sls2 is not None:
        stream_actor2 = actor.line(sls2, colors=(1, 1, 1))
        renderer.add(stream_actor2)

    if roi1 is not None:
        contour_actor1 = actor.contour_from_roi(roi1,
                                                affine=aff,
                                                color=(1., 1., 0.),
                                                opacity=0.5)
        renderer.add(contour_actor1)

    if roi2 is not None:
        contour_actor2 = actor.contour_from_roi(roi2,
                                                affine=aff,
                                                color=(1., 0., 0.),
                                                opacity=0.5)
        renderer.add(contour_actor2)

    if roi3 is not None:
        contour_actor3 = actor.contour_from_roi(roi3,
                                                affine=aff,
                                                color=(0., 0., 1.),
                                                opacity=0.5)
        renderer.add(contour_actor3)

    if niidata is not None:
        slice_actor = actor.slicer(niidata, affine=aff)
        renderer.add(slice_actor)

    if showaxes:
        axes = actor.axes()
        renderer.add(axes)

    if showme:
        window.show(renderer, size=(500, 500), reset_camera=False)
    window.record(renderer, out_path=putpath, size=(500, 500))
    # renderer.camera_info()
    del renderer
    return putpath
Exemplo n.º 2
0
def window_show_test(bundles, mask_roi, anat, interactive=True, outpath=None):
    """

    :param bundles:
    :param mask_roi:
    :param anat:
    :param interactive:
    :param outpath:
    :return:
    """

    candidate_streamlines_actor = actor.streamtube(
        bundles, cmap.line_colors(candidate_sl))
    ROI_actor = actor.contour_from_roi(mask_roi,
                                       color=(1., 1., 0.),
                                       opacity=0.5)

    ren = window.Renderer()
    if anat:
        vol_actor = actor.slicer(anat)
        vol_actor.display(x=40)
        vol_actor2 = vol_actor.copy()
        vol_actor2.display(z=35)

    # Add display objects to canvas
    ren.add(candidate_streamlines_actor)
    ren.add(ROI_actor)
    ren.add(vol_actor)
    ren.add(vol_actor2)
    if outpath is not None:
        window.record(ren, n_frames=1, out_path=outpath, size=(800, 800))
    if interactive:
        window.show(ren)
Exemplo n.º 3
0
def visualize_roi(roi,
                  affine_or_mapping=None,
                  static_img=None,
                  roi_affine=None,
                  static_affine=None,
                  reg_template=None,
                  scene=None,
                  color=np.array([1, 0, 0]),
                  opacity=1.0,
                  inline=False,
                  interact=False):
    """
    Render a region of interest into a VTK viz as a volume
    """
    if not isinstance(roi, np.ndarray):
        if isinstance(roi, str):
            roi = nib.load(roi).get_fdata()
        else:
            roi = roi.get_fdata()

    if affine_or_mapping is not None:
        if isinstance(affine_or_mapping, np.ndarray):
            # This is an affine:
            if (static_img is None or roi_affine is None
                    or static_affine is None):
                raise ValueError(
                    "If using an affine to transform an ROI, "
                    "need to also specify all of the following",
                    "inputs: `static_img`, `roi_affine`, ", "`static_affine`")
            roi = reg.resample(roi, static_img, roi_affine, static_affine)
        else:
            # Assume it is  a mapping:
            if (isinstance(affine_or_mapping, str)
                    or isinstance(affine_or_mapping, nib.Nifti1Image)):
                if reg_template is None or static_img is None:
                    raise ValueError(
                        "If using a mapping to transform an ROI, need to ",
                        "also specify all of the following inputs: ",
                        "`reg_template`, `static_img`")
                affine_or_mapping = reg.read_mapping(affine_or_mapping,
                                                     static_img, reg_template)

            roi = auv.patch_up_roi(
                affine_or_mapping.transform_inverse(
                    roi, interpolation='nearest')).astype(bool)

    if scene is None:
        scene = window.Scene()

    roi_actor = actor.contour_from_roi(roi, color=color, opacity=opacity)
    scene.add(roi_actor)

    if inline:
        tdir = tempfile.gettempdir()
        fname = op.join(tdir, "fig.png")
        window.snapshot(scene, fname=fname)
        display.display_png(display.Image(fname))

    return _inline_interact(scene, inline, interact)
Exemplo n.º 4
0
def create_roi_actor(roi_list, affine, opacity=0.8):
    roi_actor_list = []
    for i in xrange(len(roi_list)):
        random = np.random.RandomState(i)
        color = random.uniform(0., 1., size=3)
        print color
        i_actor = actor.contour_from_roi(roi_list[i],
                                         affine=affine,
                                         color=color,
                                         opacity=opacity)
        roi_actor_list.append(i_actor)

    return roi_actor_list
Exemplo n.º 5
0
def left_button_clicked(i_ren, obj, file_select_text):
    """ A callback to handle left click for this UI element.

    Parameters
    ----------
    i_ren: :class:`CustomInteractorStyle`
    obj: :class:`vtkActor`
        The picked actor
    file_select_text: :class:`FileSelectMenuText2D`

    """

    if file_select_text.file_type == "directory":
        file_select_text.file_select.select_file(file_name="")
        file_select_text.file_select.window_offset = 0
        file_select_text.file_select.current_directory = os.path.abspath(
            os.path.join(file_select_text.file_select.current_directory,
                         file_select_text.text_actor.message))
        file_select_text.file_select.window = 0
        file_select_text.file_select.fill_text_actors()
        print "----------------------"
    else:
        file_select_text.file_select.select_file(
            file_name=file_select_text.file_name)
        file_select_text.file_select.fill_text_actors()
        file_select_text.mark_selected()
        print "+++++++++++++++++++++++"
        data_path = os.path.join(
            file_select_text.file_select.current_directory,
            file_select_text.file_name)
        suffix = os.path.split(data_path)[-1].split('.')[-1]
        print
        if suffix == 'tck':
            fibs = nib.streamlines.tck.TckFile.load(data_path)
            ac = actor.line(fibs.streamlines)
            ren.add(ac)
        if suffix == 'gz':
            img = nib.load(data_path)
            im = actor.contour_from_roi(img.get_data(), affine=img.affine)
            ren.add(im)

    i_ren.force_render()
    i_ren.event.abort()  # Stop propagating the event.
Exemplo n.º 6
0
"""
We will create a streamline actor from the streamlines.
"""

streamlines_actor = actor.line(streamlines, cmap.line_colors(streamlines))
"""
Next, we create a surface actor from the corpus callosum seed ROI. We
provide the ROI data, the affine, the color in [R,G,B], and the opacity as
a decimal between zero and one. Here, we set the color as blue/green with
50% opacity.
"""

surface_opacity = 0.5
surface_color = [0, 1, 1]

seedroi_actor = actor.contour_from_roi(seed_mask, affine, surface_color,
                                       surface_opacity)
"""
Next, we initialize a ''Renderer'' object and add both actors
to the rendering.
"""

ren = window.ren()
ren.add(streamlines_actor)
ren.add(seedroi_actor)
"""
If you uncomment the following line, the rendering will pop up in an
interactive window.
"""

interactive = False
if interactive:
Exemplo n.º 7
0
"""
We can use some of DIPY_'s visualization tools to display the ROI we targeted
above and all the streamlines that pass though that ROI. The ROI is the yellow
region near the center of the axial image.
"""

from dipy.viz import window, actor, colormap as cmap

# Enables/disables interactive visualization
interactive = False

# Make display objects
color = cmap.line_colors(cc_streamlines)
cc_streamlines_actor = actor.line(cc_streamlines,
                                  cmap.line_colors(cc_streamlines))
cc_ROI_actor = actor.contour_from_roi(cc_slice, color=(1., 1., 0.),
                                      opacity=0.5)

vol_actor = actor.slicer(t1_data)

vol_actor.display(x=40)
vol_actor2 = vol_actor.copy()
vol_actor2.display(z=35)

# Add display objects to canvas
r = window.Renderer()
r.add(vol_actor)
r.add(vol_actor2)
r.add(cc_streamlines_actor)
r.add(cc_ROI_actor)

# Save figures
Exemplo n.º 8
0
# Make a corpus callosum seed mask for tracking
seed_mask = labels == 2
seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine)

# Make a streamline bundle model of the corpus callosum ROI connectivity
streamlines = LocalTracking(csa_peaks, classifier, seeds, affine,
                            step_size=2)
streamlines = Streamlines(streamlines)

# Visualize the streamlines and the Path Length Map base ROI
# (in this case also the seed ROI)

streamlines_actor = actor.line(streamlines, cmap.line_colors(streamlines))
surface_opacity = 0.5
surface_color = [0, 1, 1]
seedroi_actor = actor.contour_from_roi(seed_mask, affine,
                                       surface_color, surface_opacity)

ren = window.Renderer()
ren.add(streamlines_actor)
ren.add(seedroi_actor)

"""
If you set interactive to True (below), the rendering will pop up in an
interactive window.
"""

interactive = False
if interactive:
    window.show(ren)

window.record(ren, n_frames=1, out_path='plm_roi_sls.png',
Exemplo n.º 9
0
def visualize_roi(roi,
                  affine_or_mapping=None,
                  static_img=None,
                  roi_affine=None,
                  static_affine=None,
                  reg_template=None,
                  name='ROI',
                  figure=None,
                  color=np.array([1, 0, 0]),
                  flip_axes=None,
                  opacity=1.0,
                  inline=False,
                  interact=False):
    """
    Render a region of interest into a VTK viz as a volume

    Parameters
    ----------
    roi : str or Nifti1Image
        The ROI information

    affine_or_mapping : ndarray, Nifti1Image, or str, optional
       An affine transformation or mapping to apply to the ROIs before
       visualization. Default: no transform.

    static_img: str or Nifti1Image, optional
        Template to resample roi to.
        Default: None

    roi_affine: ndarray, optional
        Default: None

    static_affine: ndarray, optional
        Default: None

    reg_template: str or Nifti1Image, optional
        Template to use for registration.
        Default: None

    name: str, optional
        Name of ROI for the legend.
        Default: 'ROI'

    color : ndarray, optional
        RGB color for ROI.
        Default: np.array([1, 0, 0])

    flip_axes : None
        This parameter is to conform fury and plotly APIs.

    opacity : float, optional
        Opacity of ROI.
        Default: 1.0

    figure : fury Scene object, optional
        If provided, the visualization will be added to this Scene. Default:
        Initialize a new Scene.

    interact : bool
        Whether to provide an interactive VTK window for interaction.
        Default: False

    inline : bool
        Whether to embed the visualization inline in a notebook. Only works
        in the notebook context. Default: False.

    Returns
    -------
    Fury Scene object
    """
    roi = vut.prepare_roi(roi, affine_or_mapping, static_img, roi_affine,
                          static_affine, reg_template)

    if figure is None:
        figure = window.Scene()

    roi_actor = actor.contour_from_roi(roi, color=color, opacity=opacity)
    figure.add(roi_actor)

    return _inline_interact(figure, inline, interact)
Exemplo n.º 10
0
"""
We can use some of dipy_'s visualization tools to display the ROI we targeted
above and all the streamlines that pass though that ROI. The ROI is the yellow
region near the center of the axial image.
"""

from dipy.viz import window, actor
from dipy.viz.colormap import line_colors

# Enables/disables interactive visualization
interactive = False

# Make display objects
color = line_colors(cc_streamlines)
cc_streamlines_actor = actor.line(cc_streamlines, line_colors(cc_streamlines))
cc_ROI_actor = actor.contour_from_roi(cc_slice, color=(1., 1., 0.),
                                      opacity=0.5)

vol_actor = actor.slicer(t1_data)

vol_actor.display(x=40)
vol_actor2 = vol_actor.copy()
vol_actor2.display(z=35)

# Add display objects to canvas
r = window.Renderer()
r.add(vol_actor)
r.add(vol_actor2)
r.add(cc_streamlines_actor)
r.add(cc_ROI_actor)

# Save figures
Exemplo n.º 11
0
Arquivo: app.py Projeto: kerkelae/dipy
    def build_show(self, scene):

        title = 'Horizon ' + horizon_version
        self.show_m = window.ShowManager(
            scene,
            title=title,
            size=(1200, 900),
            order_transparent=self.order_transparent,
            reset_camera=False)
        self.show_m.initialize()

        if self.cluster and self.tractograms:

            lengths = np.array([self.cla[c]['length'] for c in self.cla])
            szs = [self.cla[c]['size'] for c in self.cla]
            sizes = np.array(szs)

            # global self.panel2, slider_length, slider_size
            self.panel2 = ui.Panel2D(size=(320, 200),
                                     position=(870, 520),
                                     color=(1, 1, 1),
                                     opacity=0.1,
                                     align="right")

            cluster_panel_label = build_label(text="Cluster panel", bold=True)

            slider_label_threshold = build_label(text="Threshold")
            print("Cluster threshold", self.cluster_thr)
            slider_threshold = ui.LineSlider2D(min_value=5,
                                               max_value=25,
                                               initial_value=self.cluster_thr,
                                               text_template="{value:.0f}",
                                               length=140,
                                               shape='square')
            _color_slider(slider_threshold)

            slider_label_length = build_label(text="Length")
            slider_length = ui.LineSlider2D(
                min_value=lengths.min(),
                max_value=np.percentile(lengths, 98),
                initial_value=np.percentile(lengths, 25),
                text_template="{value:.0f}",
                length=140)
            _color_slider(slider_length)

            slider_label_size = build_label(text="Size")
            slider_size = ui.LineSlider2D(min_value=sizes.min(),
                                          max_value=np.percentile(sizes, 98),
                                          initial_value=np.percentile(
                                              sizes, 50),
                                          text_template="{value:.0f}",
                                          length=140)
            _color_slider(slider_size)

            # global self.length_min, size_min
            self.size_min = sizes.min()
            self.length_min = lengths.min()

            def change_threshold(istyle, obj, slider):
                sv = np.round(slider.value, 0)
                self.remove_cluster_actors(scene)
                self.add_cluster_actors(scene, self.tractograms, threshold=sv)

                # TODO need to double check if this section is still needed
                lengths = np.array([self.cla[c]['length'] for c in self.cla])
                szs = [self.cla[c]['size'] for c in self.cla]
                sizes = np.array(szs)

                slider_length.min_value = lengths.min()
                slider_length.max_value = lengths.max()
                slider_length.value = lengths.min()
                slider_length.update()

                slider_size.min_value = sizes.min()
                slider_size.max_value = sizes.max()
                slider_size.value = sizes.min()
                slider_size.update()

                self.length_min = min(lengths)
                self.size_min = min(sizes)

                self.show_m.render()

            slider_threshold.handle_events(slider_threshold.handle.actor)
            slider_threshold.on_left_mouse_button_released = change_threshold

            def hide_clusters_length(slider):
                self.length_min = np.round(slider.value)

                for k in self.cla:
                    if (self.cla[k]['length'] < self.length_min
                            or self.cla[k]['size'] < self.size_min):
                        self.cla[k]['centroid_actor'].SetVisibility(0)
                        if k.GetVisibility() == 1:
                            k.SetVisibility(0)
                    else:
                        self.cla[k]['centroid_actor'].SetVisibility(1)
                self.show_m.render()

            def hide_clusters_size(slider):
                self.size_min = np.round(slider.value)

                for k in self.cla:
                    if (self.cla[k]['length'] < self.length_min
                            or self.cla[k]['size'] < self.size_min):
                        self.cla[k]['centroid_actor'].SetVisibility(0)
                        if k.GetVisibility() == 1:
                            k.SetVisibility(0)
                    else:
                        self.cla[k]['centroid_actor'].SetVisibility(1)
                self.show_m.render()

            slider_length.on_change = hide_clusters_length

            # Clustering panel
            self.panel2.add_element(slider_label_threshold, coords=(0.1, 0.15))
            self.panel2.add_element(slider_threshold, coords=(0.42, 0.15))

            self.panel2.add_element(slider_label_length, coords=(0.1, 0.4))
            self.panel2.add_element(slider_length, coords=(0.42, 0.4))

            slider_size.on_change = hide_clusters_size

            self.panel2.add_element(slider_label_size, coords=(0.1, 0.65))
            self.panel2.add_element(slider_size, coords=(0.42, 0.65))

            self.panel2.add_element(cluster_panel_label, coords=(0.05, 0.85))

            scene.add(self.panel2)

            # Information panel
            text_block = build_label(HELP_MESSAGE, 18)
            text_block.message = HELP_MESSAGE

            self.help_panel = ui.Panel2D(size=(320, 200),
                                         position=(10, 10),
                                         color=(0.8, 0.8, 1),
                                         opacity=0.2,
                                         align="left")

            self.help_panel.add_element(text_block, coords=(0.05, 0.1))
            scene.add(self.help_panel)

        if len(self.images) > 0:
            # Only first non-binary image loading supported for now
            first_img = True
            first_roi = True
            if self.roi_images:
                roi_color = self.roi_colors
                for img in self.images:
                    img_data, img_affine = img
                    dim = np.unique(img_data).shape[0]
                    if dim == 2:
                        if 'rois' in self.random_colors:
                            roi_color = next(self.color_gen)
                        roi_actor = actor.contour_from_roi(
                            img_data,
                            affine=img_affine,
                            color=roi_color,
                            opacity=self.mem.roi_opacity)
                        self.mem.slicer_roi_actor.append(roi_actor)
                        scene.add(roi_actor)

                        if first_roi:
                            self.panel3 = ui.Panel2D(size=(320, 100),
                                                     position=(870, 730),
                                                     color=(1, 1, 1),
                                                     opacity=0.1,
                                                     align="right")

                            rois_panel_label = build_label(text="ROIs panel",
                                                           bold=True)

                            slider_label_opacity = build_label(text="Opacity")
                            slider_opacity = ui.LineSlider2D(
                                min_value=0.0,
                                max_value=1.0,
                                initial_value=self.mem.roi_opacity,
                                length=140,
                                text_template="{ratio:.0%}")
                            _color_slider(slider_opacity)

                            def change_opacity(slider):
                                roi_opacity = slider.value
                                self.mem.roi_opacity = roi_opacity
                                for contour in self.mem.slicer_roi_actor:
                                    contour.GetProperty().SetOpacity(
                                        roi_opacity)

                            slider_opacity.on_change = change_opacity

                            self.panel3.add_element(slider_label_opacity,
                                                    coords=(0.1, 0.3))
                            self.panel3.add_element(slider_opacity,
                                                    coords=(0.42, 0.3))

                            self.panel3.add_element(rois_panel_label,
                                                    coords=(0.05, 0.7))

                            scene.add(self.panel3)

                            first_roi = False
                    else:
                        if first_img:
                            data, affine = img
                            self.vox2ras = affine

                            if len(self.pams) > 0:
                                pam = self.pams[0]
                            else:
                                pam = None
                            self.panel = slicer_panel(scene,
                                                      self.show_m.iren,
                                                      data,
                                                      affine,
                                                      self.world_coords,
                                                      pam=pam,
                                                      mem=self.mem)
                            first_img = False
            else:
                data, affine = self.images[0]
                self.vox2ras = affine

                if len(self.pams) > 0:
                    pam = self.pams[0]
                else:
                    pam = None
                self.panel = slicer_panel(scene,
                                          self.show_m.iren,
                                          data,
                                          affine,
                                          self.world_coords,
                                          pam=pam,
                                          mem=self.mem)
        else:
            data = None
            affine = None
            pam = None

        self.win_size = scene.GetSize()

        def win_callback(obj, event):
            if self.win_size != obj.GetSize():
                size_old = self.win_size
                self.win_size = obj.GetSize()
                size_change = [self.win_size[0] - size_old[0], 0]
                if data is not None:
                    self.panel.re_align(size_change)
                if self.cluster:
                    self.panel2.re_align(size_change)
                    self.help_panel.re_align(size_change)
                if self.roi_images:
                    self.panel3.re_align(size_change)

        self.show_m.initialize()

        self.hide_centroids = True
        self.select_all = False

        def hide():
            if self.hide_centroids:
                for ca in self.cea:
                    if (self.cea[ca]['length'] >= self.length_min
                            or self.cea[ca]['size'] >= self.size_min):
                        if self.cea[ca]['selected'] == 0:
                            ca.VisibilityOff()
            else:
                for ca in self.cea:
                    if (self.cea[ca]['length'] >= self.length_min
                            and self.cea[ca]['size'] >= self.size_min):
                        if self.cea[ca]['selected'] == 0:
                            ca.VisibilityOn()
            self.hide_centroids = not self.hide_centroids
            self.show_m.render()

        def invert():
            for ca in self.cea:
                if (self.cea[ca]['length'] >= self.length_min
                        and self.cea[ca]['size'] >= self.size_min):
                    self.cea[ca]['selected'] = \
                        not self.cea[ca]['selected']
                    cas = self.cea[ca]['cluster_actor']
                    self.cla[cas]['selected'] = \
                        self.cea[ca]['selected']
            self.show_m.render()

        def save():
            saving_streamlines = Streamlines()
            for bundle in self.cla.keys():
                if bundle.GetVisibility():
                    t = self.cla[bundle]['tractogram']
                    c = self.cla[bundle]['cluster']
                    indices = self.tractogram_clusters[t][c]
                    saving_streamlines.extend(Streamlines(indices))
            print('Saving result in tmp.trk')

            # Using the header of the first of the tractograms
            sft_new = StatefulTractogram(saving_streamlines,
                                         self.tractograms[0], Space.RASMM)
            save_tractogram(sft_new, 'tmp.trk', bbox_valid_check=False)
            print('Saved!')

        def new_window():
            active_streamlines = Streamlines()
            for bundle in self.cla.keys():
                if bundle.GetVisibility():
                    t = self.cla[bundle]['tractogram']
                    c = self.cla[bundle]['cluster']
                    indices = self.tractogram_clusters[t][c]
                    active_streamlines.extend(Streamlines(indices))

            # Using the header of the first of the tractograms
            active_sft = StatefulTractogram(active_streamlines,
                                            self.tractograms[0], Space.RASMM)
            hz2 = Horizon([active_sft],
                          self.images,
                          cluster=True,
                          cluster_thr=self.cluster_thr / 2.,
                          random_colors=self.random_colors,
                          length_lt=np.inf,
                          length_gt=0,
                          clusters_lt=np.inf,
                          clusters_gt=0,
                          world_coords=True,
                          interactive=True)
            ren2 = hz2.build_scene()
            hz2.build_show(ren2)

        def show_all():
            if self.select_all is False:
                for ca in self.cea:
                    if (self.cea[ca]['length'] >= self.length_min
                            and self.cea[ca]['size'] >= self.size_min):
                        self.cea[ca]['selected'] = 1
                        cas = self.cea[ca]['cluster_actor']
                        self.cla[cas]['selected'] = \
                            self.cea[ca]['selected']
                self.show_m.render()
                self.select_all = True
            else:
                for ca in self.cea:
                    if (self.cea[ca]['length'] >= self.length_min
                            and self.cea[ca]['size'] >= self.size_min):
                        self.cea[ca]['selected'] = 0
                        cas = self.cea[ca]['cluster_actor']
                        self.cla[cas]['selected'] = \
                            self.cea[ca]['selected']
                self.show_m.render()
                self.select_all = False

        def expand():
            for c in self.cea:
                if self.cea[c]['selected']:
                    if not self.cea[c]['expanded']:
                        len_ = self.cea[c]['length']
                        sz_ = self.cea[c]['size']
                        if (len_ >= self.length_min and sz_ >= self.size_min):
                            self.cea[c]['cluster_actor']. \
                                VisibilityOn()
                            c.VisibilityOff()
                            self.cea[c]['expanded'] = 1

            self.show_m.render()

        def reset():
            for c in self.cea:

                if (self.cea[c]['length'] >= self.length_min
                        and self.cea[c]['size'] >= self.size_min):
                    self.cea[c]['cluster_actor'].VisibilityOff()
                    c.VisibilityOn()
                    self.cea[c]['expanded'] = 0

            self.show_m.render()

        def key_press(obj, event):
            key = obj.GetKeySym()
            if self.cluster:

                # hide on/off unselected centroids
                if key == 'h' or key == 'H':
                    hide()

                # invert selection
                if key == 'i' or key == 'I':
                    invert()

                # retract help panel
                if key == 'o' or key == 'O':
                    self.help_panel._set_position((-300, 0))
                    self.show_m.render()

                # save current result
                if key == 's' or key == 'S':
                    save()

                if key == 'y' or key == 'Y':
                    new_window()

                if key == 'a' or key == 'A':
                    show_all()

                if key == 'e' or key == 'E':
                    expand()

                if key == 'r' or key == 'R':
                    reset()

        options = [
            r'un\hide centroids', 'invert selection', r'un\select all',
            'expand clusters', 'collapse clusters', 'save streamlines',
            'recluster'
        ]
        listbox = ui.ListBox2D(values=options,
                               position=(10, 300),
                               size=(200, 270),
                               multiselection=False,
                               font_size=18)

        def display_element():
            action = listbox.selected[0]
            if action == r'un\hide centroids':
                hide()
            if action == 'invert selection':
                invert()
            if action == r'un\select all':
                show_all()
            if action == 'expand clusters':
                expand()
            if action == 'collapse clusters':
                reset()
            if action == 'save streamlines':
                save()
            if action == 'recluster':
                new_window()

        listbox.on_change = display_element
        listbox.panel.opacity = 0.2
        listbox.set_visibility(0)

        self.show_m.scene.add(listbox)

        def left_click_centroid_callback(obj, event):

            self.cea[obj]['selected'] = not self.cea[obj]['selected']
            self.cla[self.cea[obj]['cluster_actor']]['selected'] = \
                self.cea[obj]['selected']
            self.show_m.render()

        def right_click_centroid_callback(obj, event):
            for lactor in listbox._get_actors():
                lactor.SetVisibility(not lactor.GetVisibility())

            listbox.scroll_bar.set_visibility(False)
            self.show_m.render()

        def left_click_cluster_callback(obj, event):

            if self.cla[obj]['selected']:
                self.cla[obj]['centroid_actor'].VisibilityOn()
                ca = self.cla[obj]['centroid_actor']
                self.cea[ca]['selected'] = 0
                obj.VisibilityOff()
                self.cea[ca]['expanded'] = 0

            self.show_m.render()

        def right_click_cluster_callback(obj, event):
            print('Cluster Area Selected')
            self.show_m.render()

        for cl in self.cla:
            cl.AddObserver('LeftButtonPressEvent', left_click_cluster_callback,
                           1.0)
            cl.AddObserver('RightButtonPressEvent',
                           right_click_cluster_callback, 1.0)
            self.cla[cl]['centroid_actor'].AddObserver(
                'LeftButtonPressEvent', left_click_centroid_callback, 1.0)
            self.cla[cl]['centroid_actor'].AddObserver(
                'RightButtonPressEvent', right_click_centroid_callback, 1.0)

        self.mem.window_timer_cnt = 0

        def timer_callback(obj, event):

            self.mem.window_timer_cnt += 1
            # TODO possibly add automatic rotation option
            # self.show_m.scene.azimuth(0.01 * self.mem.window_timer_cnt)
            # self.show_m.render()

        scene.reset_camera()
        scene.zoom(1.5)
        scene.reset_clipping_range()

        if self.interactive:

            self.show_m.add_window_callback(win_callback)
            self.show_m.add_timer_callback(True, 200, timer_callback)
            self.show_m.iren.AddObserver('KeyPressEvent', key_press)

            if self.return_showm:
                return self.show_m

            if self.recorded_events is None:
                self.show_m.render()
                self.show_m.start()

            else:

                # set to True if event recorded file needs updating
                recording = False
                recording_filename = self.recorded_events

                if recording:
                    self.show_m.record_events_to_file(recording_filename)
                else:
                    self.show_m.play_events_from_file(recording_filename)

        else:

            window.record(scene,
                          out_path=self.out_png,
                          size=(1200, 900),
                          reset_camera=False)
Exemplo n.º 12
0
def test_contour_from_roi():

    # Render volume
    renderer = window.renderer()
    data = np.zeros((50, 50, 50))
    data[20:30, 25, 25] = 1.
    data[25, 20:30, 25] = 1.
    affine = np.eye(4)
    surface = actor.contour_from_roi(data, affine,
                                     color=np.array([1, 0, 1]),
                                     opacity=.5)
    renderer.add(surface)

    renderer.reset_camera()
    renderer.reset_clipping_range()
    # window.show(renderer)

    # Test binarization
    renderer2 = window.renderer()
    data2 = np.zeros((50, 50, 50))
    data2[20:30, 25, 25] = 1.
    data2[35:40, 25, 25] = 1.
    affine = np.eye(4)
    surface2 = actor.contour_from_roi(data2, affine,
                                      color=np.array([0, 1, 1]),
                                      opacity=.5)
    renderer2.add(surface2)

    renderer2.reset_camera()
    renderer2.reset_clipping_range()
    # window.show(renderer2)

    arr = window.snapshot(renderer, 'test_surface.png', offscreen=True)
    arr2 = window.snapshot(renderer2, 'test_surface2.png', offscreen=True)

    report = window.analyze_snapshot(arr, find_objects=True)
    report2 = window.analyze_snapshot(arr2, find_objects=True)

    npt.assert_equal(report.objects, 1)
    npt.assert_equal(report2.objects, 2)

    # test on real streamlines using tracking example
    from dipy.data import read_stanford_labels
    from dipy.reconst.shm import CsaOdfModel
    from dipy.data import default_sphere
    from dipy.direction import peaks_from_model
    from dipy.tracking.local import ThresholdTissueClassifier
    from dipy.tracking import utils
    from dipy.tracking.local import LocalTracking
    from dipy.viz.colormap import line_colors

    hardi_img, gtab, labels_img = read_stanford_labels()
    data = hardi_img.get_data()
    labels = labels_img.get_data()
    affine = hardi_img.get_affine()

    white_matter = (labels == 1) | (labels == 2)

    csa_model = CsaOdfModel(gtab, sh_order=6)
    csa_peaks = peaks_from_model(csa_model, data, default_sphere,
                                 relative_peak_threshold=.8,
                                 min_separation_angle=45,
                                 mask=white_matter)

    classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25)

    seed_mask = labels == 2
    seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine)

    # Initialization of LocalTracking.
    # The computation happens in the next step.
    streamlines = LocalTracking(csa_peaks, classifier, seeds, affine,
                                step_size=2)

    # Compute streamlines and store as a list.
    streamlines = list(streamlines)

    # Prepare the display objects.
    streamlines_actor = actor.line(streamlines, line_colors(streamlines))
    seedroi_actor = actor.contour_from_roi(seed_mask, affine, [0, 1, 1], 0.5)

    # Create the 3d display.
    r = window.ren()
    r2 = window.ren()
    r.add(streamlines_actor)
    arr3 = window.snapshot(r, 'test_surface3.png', offscreen=True)
    report3 = window.analyze_snapshot(arr3, find_objects=True)
    r2.add(streamlines_actor)
    r2.add(seedroi_actor)
    arr4 = window.snapshot(r2, 'test_surface4.png', offscreen=True)
    report4 = window.analyze_snapshot(arr4, find_objects=True)

    # assert that the seed ROI rendering is not far
    # away from the streamlines (affine error)
    npt.assert_equal(report3.objects, report4.objects)
Exemplo n.º 13
0
native_streamlines = "native_streamline_%s_bin-%d.vtk" % (subs_strings[s], i)
unfold_streamlines = "unfold_streamlines_%s_bin-%d.vtk" % (subs_strings[s], i)
from_unfold_streamlines = "from_unfold_streamlines_%s_bin-%d.vtk" % (
    subs_strings[s], i)
roi_nii = nib.load('data/oldUnfold/U.nii.gz')
roi = roi_nii.get_fdata()

nlines = load_vtk_streamlines('data/oldUnfold/' + native_streamlines)
ulines = load_vtk_streamlines('data/oldUnfold/' + from_unfold_streamlines)
#unfoldlines=load_vtk_streamlines('data/oldUnfold/Unfolded/'+unfold_streamlines)

surface_opacity = 0.05
surface_color = [0, 1, 1]

roi_actor = actor.contour_from_roi(roi, roi_nii.affine, surface_color,
                                   surface_opacity)


def plot_streamlines(streamlines, roi_actor):
    if has_fury:
        # Prepare the display objects.
        color = colormap.line_colors(streamlines)

        streamlines_actor = actor.line(streamlines,
                                       colormap.line_colors(streamlines))

        # Create the 3D display.
        scene = window.Scene()
        scene.add(roi_actor)
        scene.add(streamlines_actor)
Exemplo n.º 14
0
def test_contour_from_roi():

    # Render volume
    renderer = window.renderer()
    data = np.zeros((50, 50, 50))
    data[20:30, 25, 25] = 1.
    data[25, 20:30, 25] = 1.
    affine = np.eye(4)
    surface = actor.contour_from_roi(data, affine,
                                     color=np.array([1, 0, 1]),
                                     opacity=.5)
    renderer.add(surface)

    renderer.reset_camera()
    renderer.reset_clipping_range()
    # window.show(renderer)

    # Test binarization
    renderer2 = window.renderer()
    data2 = np.zeros((50, 50, 50))
    data2[20:30, 25, 25] = 1.
    data2[35:40, 25, 25] = 1.
    affine = np.eye(4)
    surface2 = actor.contour_from_roi(data2, affine,
                                      color=np.array([0, 1, 1]),
                                      opacity=.5)
    renderer2.add(surface2)

    renderer2.reset_camera()
    renderer2.reset_clipping_range()
    # window.show(renderer2)

    arr = window.snapshot(renderer, 'test_surface.png', offscreen=True)
    arr2 = window.snapshot(renderer2, 'test_surface2.png', offscreen=True)

    report = window.analyze_snapshot(arr, find_objects=True)
    report2 = window.analyze_snapshot(arr2, find_objects=True)

    npt.assert_equal(report.objects, 1)
    npt.assert_equal(report2.objects, 2)

    # test on real streamlines using tracking example
    from dipy.data import read_stanford_labels
    from dipy.reconst.shm import CsaOdfModel
    from dipy.data import default_sphere
    from dipy.direction import peaks_from_model
    from dipy.tracking.local import ThresholdTissueClassifier
    from dipy.tracking import utils
    from dipy.tracking.local import LocalTracking
    from dipy.viz.colormap import line_colors

    hardi_img, gtab, labels_img = read_stanford_labels()
    data = hardi_img.get_data()
    labels = labels_img.get_data()
    affine = hardi_img.get_affine()

    white_matter = (labels == 1) | (labels == 2)

    csa_model = CsaOdfModel(gtab, sh_order=6)
    csa_peaks = peaks_from_model(csa_model, data, default_sphere,
                                 relative_peak_threshold=.8,
                                 min_separation_angle=45,
                                 mask=white_matter)

    classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25)

    seed_mask = labels == 2
    seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1], affine=affine)

    # Initialization of LocalTracking.
    # The computation happens in the next step.
    streamlines = LocalTracking(csa_peaks, classifier, seeds, affine,
                                step_size=2)

    # Compute streamlines and store as a list.
    streamlines = list(streamlines)

    # Prepare the display objects.
    streamlines_actor = actor.line(streamlines, line_colors(streamlines))
    seedroi_actor = actor.contour_from_roi(seed_mask, affine, [0, 1, 1], 0.5)

    # Create the 3d display.
    r = window.ren()
    r2 = window.ren()
    r.add(streamlines_actor)
    arr3 = window.snapshot(r, 'test_surface3.png', offscreen=True)
    report3 = window.analyze_snapshot(arr3, find_objects=True)
    r2.add(streamlines_actor)
    r2.add(seedroi_actor)
    arr4 = window.snapshot(r2, 'test_surface4.png', offscreen=True)
    report4 = window.analyze_snapshot(arr4, find_objects=True)

    # assert that the seed ROI rendering is not far
    # away from the streamlines (affine error)
    npt.assert_equal(report3.objects, report4.objects)
Exemplo n.º 15
0
fib = nib.streamlines.tck.TckFile.load(fib_file)
streamlines = fib.streamlines
from pyfat.algorithm.fiber_selection import select_by_vol_roi

print len(streamlines)
# # create a rendering renderer
# ren = window.Renderer()
# stream_actor = actor.line(streamlines)

roi = nib.load(roi_file)
roi_data = roi.get_data()
roi1 = nib.load(roi_file1)
roi1_data = roi1.get_data()
ROI_actor = actor.contour_from_roi(roi_data,
                                   affine=roi.affine,
                                   color=(1., 1., 0.),
                                   opacity=0.8)
ROI_actor1 = actor.contour_from_roi(roi1_data,
                                    affine=roi1.affine,
                                    color=(1., 1., 0.),
                                    opacity=0.8)

####################################################
roi_vis = nib.load(roi_vis)
roi_vis_data = roi_vis.get_data()
label = list(set(roi_vis_data[roi_vis_data.nonzero()]))
roi_vis_data0 = np.zeros(roi_vis_data.shape)
roi_vis_data0[roi_vis_data == label[0]] = 1.0
roi_vis_data1 = np.zeros(roi_vis_data.shape)
roi_vis_data1[roi_vis_data == label[1]] = 1.0
roi_vis_data2 = np.zeros(roi_vis_data.shape)