def mark_selected(self):  # what different from scatter selection
        # Change the color of the selected point
        reds = get_translucent_cmap(1, 0, 0)

        select_data = np.transpose(self._vol_data)
        not_select = np.logical_not(self.selected)
        np.place(select_data, not_select, 0)
        select_data = np.transpose(select_data)

        self._volume_pool.append((select_data, (1, 6), reds))

        # TODO: no set_data function available in multi_volume_visual
        self._volume._update_all_volumes(self._volume_pool)
        print('self.volume_pool', len(self._volume_pool))
    def __init__(self, keys='interactive'):
        super(DemoScene, self).__init__()
        # Layout and canvas creation
        box = QtGui.QVBoxLayout(self)
        self.resize(500,500)
        self.setLayout(box)
        self.canvas = scene.SceneCanvas(keys=keys)
        box.addWidget(self.canvas.native)

        # Camera
        self.view = self.canvas.central_widget.add_view()
        self.view.camera = scene.cameras.TurntableCamera(elevation = 25, azimuth=20, distance = 2.0, center=(0,0,0))

        # Data
        fitsdata = pyfits.open('l1448_13co.fits')
        vol1 = np.nan_to_num(fitsdata[0].data)
        self.vol_data = vol1

        """
        The transpose here and after is for solving the coordinate mismatch between volume visual input data and its
        rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform
        (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what
        we see' on the screen rather than the real input data of volume.

        """
        new_pos = np.transpose(vol1)

        # TODO: replace the min&max threshold with real settings in Glue UI
        min_threshold = np.min(self.vol_data)
        max_threshold = np.max(self.vol_data)
        self.pos_data = np.argwhere(new_pos >= min_threshold)  # get voxel positions

        grays = get_translucent_cmap(1, 1, 1)

        self.volume_pool = [(vol1, (1, 6), grays)]
        self.volume = MultiVolume(self.volume_pool)
        self.trans = [-vol1.shape[2]/2., -vol1.shape[1]/2., -vol1.shape[0]/2.]
        self.volume.transform = scene.STTransform(translate=self.trans)
        self.view.add(self.volume)

        # Add a 3D axis to keep us oriented
        axis = scene.visuals.XYZAxis(parent=self.view.scene)

        self.selection = SelectionCommon(canvas=self.canvas, view=self.view,
                                          vol_data=self.vol_data, volume=self.volume,
                                          volume_pool=self.volume_pool, pos_data=self.pos_data)
    def mark_selected(self):
        # Change the color of the selected point
        reds = get_translucent_cmap(1, 0, 0)

        select_data = np.transpose(self.vol_data)
        not_select = np.logical_not(self.selected)
        np.place(select_data, not_select, 0)
        select_data = np.transpose(select_data)
        
        print('select_data is', select_data, select_data.shape)
        maxpos = np.unravel_index(select_data.argmax(), select_data.shape)
        print('got the max pos', maxpos)
        self.volume_pool.append((select_data, (1, 6), reds))

        # TODO: no set_data function available in multi_volume_visual
        self.volume._update_all_volumes(self.volume_pool)
        print('self.volume_pool', len(self.volume_pool))
        self.canvas.update()
    def __init__(self, keys='interactive'):
        super(DemoScene, self).__init__()

        # Layout and canvas creation
        box = QtGui.QVBoxLayout(self)
        self.resize(800, 600)
        self.setLayout(box)
        self.canvas = scene.SceneCanvas(keys=keys)
        box.addWidget(self.canvas.native)

        # Connect events
        self.canvas.events.mouse_press.connect(self.on_mouse_press)
        self.canvas.events.mouse_release.connect(self.on_mouse_release)
        self.canvas.events.mouse_move.connect(self.on_mouse_move)
        self.canvas.events.key_press.connect(self.on_key_press)

        # Setup some defaults
        self.mesh = None
        self.selected = []
        self.white = (1.0, 1.0, 1.0, 1.0)
        self.black = (0.0, 0.0, 0.0, 0.0)

        # Camera
        self.view = self.canvas.central_widget.add_view()
        self.view.camera = scene.cameras.TurntableCamera(elevation = 25, azimuth=20, distance = 2.0, center=(0,0,0))

        # Data
        fitsdata = pyfits.open('l1448_13co.fits')
        self.vol_data = np.nan_to_num(fitsdata[0].data)

        """
        The transpose here and after is for solving the coordinate mismatch between volume visual input data and its
        rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform
        (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what
        we see' on the screen rather than the real input data of volume.

        """
        new_pos = np.transpose(self.vol_data)

        # TODO: replace the min&max threshold with real settings in Glue UI
        self.pos_data = np.argwhere(new_pos >= np.min(self.vol_data))  # get voxel positions

        grays = get_translucent_cmap(1, 1, 1)

        self.volume_pool = [(self.vol_data, (1, 6), grays)]
        self.volume = MultiVolume(self.volume_pool)
        self.trans = [-self.vol_data.shape[2]/2., -self.vol_data.shape[1]/2., -self.vol_data.shape[0]/2.]
        self.volume.transform = scene.STTransform(translate=self.trans)
        self.view.add(self.volume)

        self.tr = self.volume.node_transform(self.view)  # ChainTransform

        # create a volume for showing the selected part
        self.volume1 = scene.visuals.Volume(self.vol_data, clim=(4, 6), parent=self.view.scene)
        self.volume1.transform = scene.STTransform(translate=self.trans)
        self.volume1.visible = False

        # Add a text instruction
        self.text = scene.visuals.Text('', color='white', pos=(self.canvas.size[0]/4.0,  20), parent=self.canvas.scene)

        # Add a 3D axis to keep us oriented
        axis = scene.visuals.XYZAxis(parent=self.view.scene)

        # Set up for lasso drawing
        self.line_pos = []
        self.line = scene.visuals.Line(color='yellow', method='gl', parent=self.canvas.scene)

        # Selection
        self.selection_flag = False
        self.selection_pool = {'1': 'lasso', '2': 'rectangle', '3': 'ellipse', '4': 'pick', '5': 'floodfill'}
        self.selection_id = '1'  # default as 1
        self.selection_origin = (0, 0)
Ejemplo n.º 5
0
subset2[:,:33,-33:][keep] = data[:,:33,-33:][keep]

subset3 = np.zeros_like(data)
keep = data[:,-33:,-33:] > 0.5
subset3[:,-33:,-33:][keep] = data[:,-33:,-33:][keep]

subset4 = np.zeros_like(data)
keep = data[:,-33:,:33] > 0.5
subset4[:,-33:,:33][keep] = data[:,-33:,:33][keep]

# Create Vispy visualization

canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True)
view = canvas.central_widget.add_view()

grays = get_translucent_cmap(1, 1, 1)
reds = get_translucent_cmap(1, 0, 0)
greens = get_translucent_cmap(0, 1, 0)
blues = get_translucent_cmap(0, 0, 1)
oranges = get_translucent_cmap(1, 0.5, 0)

# Create the volume visuals, only one is visible
print(data.max())
volumes = [
           (data, (0, 6), grays),
           (subset1, (0, 4), reds),
           (subset2, (0, 4), greens),
           (subset3, (0, 4), blues),
           (subset4, (0, 4), oranges)
       ]
       
Ejemplo n.º 6
0
    def __init__(self, keys='interactive'):
        super(DemoScene, self).__init__()

        # Layout and canvas creation
        box = QtGui.QVBoxLayout(self)
        self.resize(800, 600)
        self.setLayout(box)
        self.canvas = scene.SceneCanvas(keys=keys)
        box.addWidget(self.canvas.native)

        # Connect events
        self.canvas.events.mouse_press.connect(self.on_mouse_press)
        self.canvas.events.mouse_release.connect(self.on_mouse_release)
        self.canvas.events.mouse_move.connect(self.on_mouse_move)
        self.canvas.events.key_press.connect(self.on_key_press)

        # Setup some defaults
        self.mesh = None
        self.selected = []
        self.white = (1.0, 1.0, 1.0, 1.0)
        self.black = (0.0, 0.0, 0.0, 0.0)

        # Camera
        self.view = self.canvas.central_widget.add_view()
        self.view.camera = scene.cameras.TurntableCamera(elevation=25,
                                                         azimuth=20,
                                                         distance=2.0,
                                                         center=(0, 0, 0))

        # Data
        fitsdata = pyfits.open('l1448_13co.fits')
        vol1 = np.nan_to_num(fitsdata[0].data)
        self.vol_data = vol1
        """
        The transpose here and after is for solving the coordinate mismatch between volume visual input data and its
        rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform
        (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what
        we see' on the screen rather than the real input data of volume.

        """
        new_pos = np.transpose(vol1)

        # TODO: replace the min&max threshold with real settings in Glue UI
        min_threshold = np.min(self.vol_data)
        max_threshold = np.max(self.vol_data)
        self.pos_data = np.argwhere(
            new_pos >= min_threshold)  # get voxel positions

        grays = get_translucent_cmap(1, 1, 1)

        self.volume_pool = [(vol1, (1, 6), grays)]
        self.volume = MultiVolume(self.volume_pool)
        self.trans = [
            -vol1.shape[2] / 2., -vol1.shape[1] / 2., -vol1.shape[0] / 2.
        ]
        self.volume.transform = scene.STTransform(translate=self.trans)
        self.view.add(self.volume)

        self.tr = self.volume.node_transform(self.view)  # ChainTransform

        # Add a text instruction
        self.text = scene.visuals.Text('',
                                       color='white',
                                       pos=(self.canvas.size[0] / 4.0, 20),
                                       parent=self.canvas.scene)

        # Add a 3D axis to keep us oriented
        axis = scene.visuals.XYZAxis(parent=self.view.scene)

        # Set up for lasso drawing
        self.line_pos = []
        self.line = scene.visuals.Line(color='yellow',
                                       method='gl',
                                       parent=self.canvas.scene)

        # Selection
        self.selection_flag = False
        self.selection_pool = {
            '1': 'lasso',
            '2': 'rectangle',
            '3': 'ellipse',
            '4': 'pick'
        }
        self.selection_id = '1'  # default as 1
        self.selection_origin = (0, 0)
Ejemplo n.º 7
0
# Read volume
vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0']
vol2 = np.load(io.load_data_file('brain/mri.npz'))['data']
vol2 = np.flipud(np.rollaxis(vol2, 1))

# Prepare canvas
canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True)
canvas.measure_fps()

# Set up a viewbox to display the image with interactive pan/zoom
view = canvas.central_widget.add_view()

# Set whether we are emulating a 3D texture
emulate_texture = False

reds = get_translucent_cmap(1, 0, 0)
blues = get_translucent_cmap(0, 0, 1)

# Create the volume visuals, only one is visible
volumes = [(vol1, None, blues), (vol1[::-1,::-1,::-1], None, reds)]
volume1 = MultiVolume(volumes, parent=view.scene, threshold=0.225,
                               emulate_texture=emulate_texture)
volume1.transform = scene.STTransform(translate=(64, 64, 0))

# Create three cameras (Fly, Turntable and Arcball)
fov = 60.
cam2 = scene.cameras.TurntableCamera(parent=view.scene, fov=fov,
                                     name='Turntable')
view.camera = cam2  # Select turntable at first

canvas.update()
Ejemplo n.º 8
0
# Read volume
vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0']
vol2 = np.load(io.load_data_file('brain/mri.npz'))['data']
vol2 = np.flipud(np.rollaxis(vol2, 1))

# Prepare canvas
canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True)
canvas.measure_fps()

# Set up a viewbox to display the image with interactive pan/zoom
view = canvas.central_widget.add_view()

# Set whether we are emulating a 3D texture
emulate_texture = False

reds = get_translucent_cmap(1, 0, 0)
blues = get_translucent_cmap(0, 0, 1)

# Create the volume visuals, only one is visible
volumes = [(vol1, None, blues), (vol1[::-1, ::-1, ::-1], None, reds)]
volume1 = MultiVolume(volumes,
                      parent=view.scene,
                      threshold=0.225,
                      emulate_texture=emulate_texture)
volume1.transform = scene.STTransform(translate=(64, 64, 0))

# Create three cameras (Fly, Turntable and Arcball)
fov = 60.
cam2 = scene.cameras.TurntableCamera(parent=view.scene,
                                     fov=fov,
                                     name='Turntable')