예제 #1
0
    def __init__(self, keys='interactive'):
        super(DemoScene, self).__init__()
        # Layout and canvas creation
        box = QtGui.QVBoxLayout(self)
        self.resize(500, 500)
        self.setLayout(box)
        self.canvas = scene.SceneCanvas(keys=keys)
        box.addWidget(self.canvas.native)

        # Camera
        self.view = self.canvas.central_widget.add_view()
        self.view.camera = scene.cameras.TurntableCamera(elevation=25,
                                                         azimuth=20,
                                                         distance=2.0,
                                                         center=(0, 0, 0))

        # Data
        fitsdata = pyfits.open('l1448_13co.fits')
        vol1 = np.nan_to_num(fitsdata[0].data)
        self.vol_data = vol1
        """
        The transpose here and after is for solving the coordinate mismatch between volume visual input data and its
        rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform
        (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what
        we see' on the screen rather than the real input data of volume.

        """
        new_pos = np.transpose(vol1)

        # TODO: replace the min&max threshold with real settings in Glue UI
        min_threshold = np.min(self.vol_data)
        max_threshold = np.max(self.vol_data)
        self.pos_data = np.argwhere(
            new_pos >= min_threshold)  # get voxel positions

        grays = get_translucent_cmap(1, 1, 1)

        self.volume_pool = [(vol1, (1, 6), grays)]
        self.volume = MultiVolume(self.volume_pool)
        self.trans = [
            -vol1.shape[2] / 2., -vol1.shape[1] / 2., -vol1.shape[0] / 2.
        ]
        self.volume.transform = scene.STTransform(translate=self.trans)
        self.view.add(self.volume)

        # Add a 3D axis to keep us oriented
        axis = scene.visuals.XYZAxis(parent=self.view.scene)

        self.selection = SelectionCommon(canvas=self.canvas,
                                         view=self.view,
                                         vol_data=self.vol_data,
                                         volume=self.volume,
                                         volume_pool=self.volume_pool,
                                         pos_data=self.pos_data)
    def __init__(self, keys='interactive'):
        super(DemoScene, self).__init__()

        # Layout and canvas creation
        box = QtGui.QVBoxLayout(self)
        self.resize(800, 600)
        self.setLayout(box)
        self.canvas = scene.SceneCanvas(keys=keys)
        box.addWidget(self.canvas.native)

        # Connect events
        self.canvas.events.mouse_press.connect(self.on_mouse_press)
        self.canvas.events.mouse_release.connect(self.on_mouse_release)
        self.canvas.events.mouse_move.connect(self.on_mouse_move)
        self.canvas.events.key_press.connect(self.on_key_press)

        # Setup some defaults
        self.mesh = None
        self.selected = []
        self.white = (1.0, 1.0, 1.0, 1.0)
        self.black = (0.0, 0.0, 0.0, 0.0)

        # Camera
        self.view = self.canvas.central_widget.add_view()
        self.view.camera = scene.cameras.TurntableCamera(elevation = 25, azimuth=20, distance = 2.0, center=(0,0,0))

        # Data
        fitsdata = pyfits.open('l1448_13co.fits')
        self.vol_data = np.nan_to_num(fitsdata[0].data)

        """
        The transpose here and after is for solving the coordinate mismatch between volume visual input data and its
        rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform
        (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what
        we see' on the screen rather than the real input data of volume.

        """
        new_pos = np.transpose(self.vol_data)

        # TODO: replace the min&max threshold with real settings in Glue UI
        self.pos_data = np.argwhere(new_pos >= np.min(self.vol_data))  # get voxel positions

        grays = get_translucent_cmap(1, 1, 1)

        self.volume_pool = [(self.vol_data, (1, 6), grays)]
        self.volume = MultiVolume(self.volume_pool)
        self.trans = [-self.vol_data.shape[2]/2., -self.vol_data.shape[1]/2., -self.vol_data.shape[0]/2.]
        self.volume.transform = scene.STTransform(translate=self.trans)
        self.view.add(self.volume)

        self.tr = self.volume.node_transform(self.view)  # ChainTransform

        # create a volume for showing the selected part
        self.volume1 = scene.visuals.Volume(self.vol_data, clim=(4, 6), parent=self.view.scene)
        self.volume1.transform = scene.STTransform(translate=self.trans)
        self.volume1.visible = False

        # Add a text instruction
        self.text = scene.visuals.Text('', color='white', pos=(self.canvas.size[0]/4.0,  20), parent=self.canvas.scene)

        # Add a 3D axis to keep us oriented
        axis = scene.visuals.XYZAxis(parent=self.view.scene)

        # Set up for lasso drawing
        self.line_pos = []
        self.line = scene.visuals.Line(color='yellow', method='gl', parent=self.canvas.scene)

        # Selection
        self.selection_flag = False
        self.selection_pool = {'1': 'lasso', '2': 'rectangle', '3': 'ellipse', '4': 'pick', '5': 'floodfill'}
        self.selection_id = '1'  # default as 1
        self.selection_origin = (0, 0)
class DemoScene(QtGui.QWidget):
    def __init__(self, keys='interactive'):
        super(DemoScene, self).__init__()

        # Layout and canvas creation
        box = QtGui.QVBoxLayout(self)
        self.resize(800, 600)
        self.setLayout(box)
        self.canvas = scene.SceneCanvas(keys=keys)
        box.addWidget(self.canvas.native)

        # Connect events
        self.canvas.events.mouse_press.connect(self.on_mouse_press)
        self.canvas.events.mouse_release.connect(self.on_mouse_release)
        self.canvas.events.mouse_move.connect(self.on_mouse_move)
        self.canvas.events.key_press.connect(self.on_key_press)

        # Setup some defaults
        self.mesh = None
        self.selected = []
        self.white = (1.0, 1.0, 1.0, 1.0)
        self.black = (0.0, 0.0, 0.0, 0.0)

        # Camera
        self.view = self.canvas.central_widget.add_view()
        self.view.camera = scene.cameras.TurntableCamera(elevation = 25, azimuth=20, distance = 2.0, center=(0,0,0))

        # Data
        fitsdata = pyfits.open('l1448_13co.fits')
        self.vol_data = np.nan_to_num(fitsdata[0].data)

        """
        The transpose here and after is for solving the coordinate mismatch between volume visual input data and its
        rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform
        (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what
        we see' on the screen rather than the real input data of volume.

        """
        new_pos = np.transpose(self.vol_data)

        # TODO: replace the min&max threshold with real settings in Glue UI
        self.pos_data = np.argwhere(new_pos >= np.min(self.vol_data))  # get voxel positions

        grays = get_translucent_cmap(1, 1, 1)

        self.volume_pool = [(self.vol_data, (1, 6), grays)]
        self.volume = MultiVolume(self.volume_pool)
        self.trans = [-self.vol_data.shape[2]/2., -self.vol_data.shape[1]/2., -self.vol_data.shape[0]/2.]
        self.volume.transform = scene.STTransform(translate=self.trans)
        self.view.add(self.volume)

        self.tr = self.volume.node_transform(self.view)  # ChainTransform

        # create a volume for showing the selected part
        self.volume1 = scene.visuals.Volume(self.vol_data, clim=(4, 6), parent=self.view.scene)
        self.volume1.transform = scene.STTransform(translate=self.trans)
        self.volume1.visible = False

        # Add a text instruction
        self.text = scene.visuals.Text('', color='white', pos=(self.canvas.size[0]/4.0,  20), parent=self.canvas.scene)

        # Add a 3D axis to keep us oriented
        axis = scene.visuals.XYZAxis(parent=self.view.scene)

        # Set up for lasso drawing
        self.line_pos = []
        self.line = scene.visuals.Line(color='yellow', method='gl', parent=self.canvas.scene)

        # Selection
        self.selection_flag = False
        self.selection_pool = {'1': 'lasso', '2': 'rectangle', '3': 'ellipse', '4': 'pick', '5': 'floodfill'}
        self.selection_id = '1'  # default as 1
        self.selection_origin = (0, 0)

    def event_connect(self, flag):
        if flag:
            self.view.camera._viewbox.events.mouse_move.disconnect(
                    self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_press.disconnect(
                        self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_release.disconnect(
                        self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_wheel.disconnect(
                        self.view.camera.viewbox_mouse_event)
        else:
            self.view.camera._viewbox.events.mouse_move.connect(
                    self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_press.connect(
                        self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_release.connect(
                        self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_wheel.connect(
                        self.view.camera.viewbox_mouse_event)

#================================= Functionality Functions Start ==================================#

    def mark_selected(self):
        # Change the color of the selected point
        reds = get_translucent_cmap(1, 0, 0)

        select_data = np.transpose(self.vol_data)
        not_select = np.logical_not(self.selected)
        np.place(select_data, not_select, 0)
        select_data = np.transpose(select_data)
        
        print('select_data is', select_data, select_data.shape)
        maxpos = np.unravel_index(select_data.argmax(), select_data.shape)
        print('got the max pos', maxpos)
        self.volume_pool.append((select_data, (1, 6), reds))

        # TODO: no set_data function available in multi_volume_visual
        self.volume._update_all_volumes(self.volume_pool)
        print('self.volume_pool', len(self.volume_pool))
        self.canvas.update()

    def get_max_pos(self):
        # Ray intersection on the CPU to highlight the selected point(s)
        data = self.tr.map(self.pos_data)[:, :2]  # Map coordinates
        print('data after tr.map', data)
        m1 = data > (self.selection_origin - 4)
        m2 = data < (self.selection_origin + 4)
        max_value = 0.
        max_pos = None
        pick_selected = np.argwhere(m1[:,0] & m1[:,1] & m2[:,0] & m2[:,1])
        for item in pick_selected:
            index = np.unravel_index(item, self.vol_data.shape)
            if self.vol_data[index] > max_value:
                max_value = self.vol_data[index]
                max_pos = np.array(index).flatten()
        print('maxpos, maxvalue', max_pos, max_value)
        return (max_pos[0], max_pos[1], max_pos[2])  # list argument for flood_fill_3d.cyfill()

    def draw_floodfill_visual(self, threhold):
        formate_data = np.asarray(self.vol_data, np.float64)
        pos = self.get_max_pos()

        selec_vol = flood_fill_3d.cyfill(formate_data, pos, 5, threhold)  # (3d data, start pos, replaced val, thresh)

        self.volume1.set_data(selec_vol)
        self.volume1.visible = True
        self.volume.visible = False

        self.canvas.update()


#================================= Event Functions Start ==================================#

    def on_key_press(self, event):
        # Set selection_flag and instruction text

        if event.text in self.selection_pool.keys():
            if not self.selection_flag:
                self.text.text = 'Now is %s selection mode, press %s to switch' % (self.selection_pool[event.text],
                                                                                   event.text)
                self.selection_flag = True
            else:
                self.text.text = 'Now is view mode, press %s to switch' % event.text
                self.selection_flag = False
            self.event_connect(self.selection_flag)
            self.selection_id = event.text
            # self.volume.visible = True

    def on_mouse_press(self, event):
        print('I wanna know mouse pos', event.pos)

        # Realize picking functionality and set origin mouse pos
        if event.button == 1 and self.selection_flag:
            if self.selection_id == '4':
                # Ray intersection on the CPU to highlight the selected point(s)
                data = self.tr.map(self.pos_data)[:, :2]  # Map coordinates
                print('data after tr.map', data)
                m1 = data > (event.pos - 4)
                m2 = data < (event.pos + 4)

                pick_selected = np.argwhere(m1[:,0] & m1[:,1] & m2[:,0] & m2[:,1])
                len_mask = self.vol_data.shape[0]*self.vol_data.shape[1]*self.vol_data.shape[2]
                
                full_mask = np.zeros(len_mask)
                full_mask[pick_selected] = True
                self.selected = full_mask
                print('self.selected is', self.selected, len(self.selected))
                self.mark_selected()

            else:
                self.selection_origin = event.pos

    def on_mouse_release(self, event):
        # Identify selected points and mark them
        if event.button == 1 and self.selection_flag and self.selection_id is not '4':
            data = self.tr.map(self.pos_data)[:, :2]

            if self.selection_id in ['1', '2', '3']:
                selection_path = path.Path(self.line_pos, closed=True)
                mask = selection_path.contains_points(data)

                self.selected = mask
                print('mask len', len(mask), mask)
                self.mark_selected()

                # Reset lasso
                self.line_pos = []  # TODO: Empty pos input is not allowed for line_visual
                self.line.set_data(np.array(self.line_pos))
                self.line.update()

            if self.selection_id in ['2', '3']:
                self.selection_origin = None

    def on_mouse_move(self, event):
        # Draw lasso/rectangle/ellipse shape with mouse dragging
        if event.button == 1 and event.is_dragging and self.selection_flag:
            if self.selection_id == '1':
                self.line_pos.append(event.pos)
                self.line.set_data(np.array(self.line_pos))

            if self.selection_id in ['2', '3']:
                width = event.pos[0] - self.selection_origin[0]
                height = event.pos[1] - self.selection_origin[1]
                center = (width/2. + self.selection_origin[0], height/2.+self.selection_origin[1], 0)

                if self.selection_id == '2':
                    self.line_pos = rectangle_vertice(center, height, width)
                    self.line.set_data(np.array(self.line_pos))

                if self.selection_id == '3':
                    self.line_pos = ellipse_vertice(center, radius=(np.abs(width/2.), np.abs(height/2.)),
                                                    start_angle=0., span_angle=360., num_segments=500)
                    self.line.set_data(pos=np.array(self.line_pos), connect='strip')

            if self.selection_id == '5':
                # calculate the threshold and call draw visual
                width = event.pos[0] - self.selection_origin[0]
                height = event.pos[1] - self.selection_origin[1]
                drag_distance = math.sqrt(width**2+height**2)
                canvas_diag = math.sqrt(self.canvas.size[0]**2 + self.canvas.size[1]**2)
                # normalize the threshold between max and min value
                normalize = (np.max(self.vol_data) - np.min(self.vol_data))/canvas_diag
                self.draw_floodfill_visual(drag_distance*normalize)
예제 #4
0
canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True)
view = canvas.central_widget.add_view()

grays = get_translucent_cmap(1, 1, 1)
reds = get_translucent_cmap(1, 0, 0)
greens = get_translucent_cmap(0, 1, 0)
blues = get_translucent_cmap(0, 0, 1)
oranges = get_translucent_cmap(1, 0.5, 0)

# Create the volume visuals, only one is visible
print(data.max())
volumes = [(data, (0, 6), grays), (subset1, (0, 4), reds),
           (subset2, (0, 4), greens), (subset3, (0, 4), blues),
           (subset4, (0, 4), oranges)]

volume1 = MultiVolume(volumes, parent=view.scene)
volume1.transform = scene.STTransform(translate=(64, 64, 0))

view.camera = scene.cameras.TurntableCamera(parent=view.scene,
                                            fov=60.,
                                            name='Turntable')

canvas.update()
#
# # create colormaps that work well for translucent and additive volume rendering
#
#
# # for testing performance
# # @canvas.connect
# # def on_draw(ev):
# # canvas.update()
예제 #5
0
reds = get_translucent_cmap(1, 0, 0)
greens = get_translucent_cmap(0, 1, 0)
blues = get_translucent_cmap(0, 0, 1)
oranges = get_translucent_cmap(1, 0.5, 0)

# Create the volume visuals, only one is visible
print(data.max())
volumes = [
           (data, (0, 6), grays),
           (subset1, (0, 4), reds),
           (subset2, (0, 4), greens),
           (subset3, (0, 4), blues),
           (subset4, (0, 4), oranges)
       ]
       
volume1 = MultiVolume(volumes, parent=view.scene)
volume1.transform = scene.STTransform(translate=(64, 64, 0))


view.camera = scene.cameras.TurntableCamera(parent=view.scene, fov=60.,
                                            name='Turntable')


canvas.update()
#
# # create colormaps that work well for translucent and additive volume rendering
#
#
# # for testing performance
# # @canvas.connect
# # def on_draw(ev):
예제 #6
0
    def __init__(self, keys='interactive'):
        super(DemoScene, self).__init__()

        # Layout and canvas creation
        box = QtGui.QVBoxLayout(self)
        self.resize(800, 600)
        self.setLayout(box)
        self.canvas = scene.SceneCanvas(keys=keys)
        box.addWidget(self.canvas.native)

        # Connect events
        self.canvas.events.mouse_press.connect(self.on_mouse_press)
        self.canvas.events.mouse_release.connect(self.on_mouse_release)
        self.canvas.events.mouse_move.connect(self.on_mouse_move)
        self.canvas.events.key_press.connect(self.on_key_press)

        # Setup some defaults
        self.mesh = None
        self.selected = []
        self.white = (1.0, 1.0, 1.0, 1.0)
        self.black = (0.0, 0.0, 0.0, 0.0)

        # Camera
        self.view = self.canvas.central_widget.add_view()
        self.view.camera = scene.cameras.TurntableCamera(elevation=90,
                                                         azimuth=0,
                                                         fov=60,
                                                         center=(0, 0, 0))

        # Data
        fitsdata = fits.open('l1448_13co.fits')
        self.vol_data = np.nan_to_num(fitsdata[0].data)
        """
        The transpose here and after is for solving the coordinate mismatch between volume visual input data and its
        rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform
        (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what
        we see' on the screen rather than the real input data of volume.

        """
        new_pos = np.transpose(self.vol_data)

        # TODO: replace the min&max threshold with real settings in Glue UI
        self.pos_data = np.indices(self.vol_data.shape).reshape(
            3, -1).transpose()

        grays = get_translucent_cmap(1, 1, 1)

        self.volume_pool = [(self.vol_data, (1, 6), grays)]
        self.volume = MultiVolume(self.volume_pool)
        self.trans = [
            -self.vol_data.shape[2] / 2., -self.vol_data.shape[1] / 2.,
            -self.vol_data.shape[0] / 2.
        ]
        self.volume.transform = scene.STTransform(translate=self.trans)
        self.view.add(self.volume)

        self.tr = self.volume.get_transform(map_from='visual', map_to='canvas')

        # create a volume for showing the selected part
        self.volume1 = scene.visuals.Volume(self.vol_data,
                                            clim=(4, 6),
                                            parent=self.view.scene)
        self.volume1.transform = scene.STTransform(translate=self.trans)
        self.volume1.visible = False

        # Add a text instruction
        self.text = scene.visuals.Text('',
                                       color='white',
                                       pos=(self.canvas.size[0] / 4.0, 20),
                                       parent=self.canvas.scene)

        # Add a 3D axis to keep us oriented
        axis = scene.visuals.XYZAxis(parent=self.view.scene)

        # Set up for lasso drawing
        self.line_pos = []
        self.line = scene.visuals.Line(color='yellow',
                                       method='gl',
                                       parent=self.canvas.scene)

        # Selection
        self.selection_flag = False
        self.selection_pool = {
            '1': 'lasso',
            '2': 'rectangle',
            '3': 'ellipse',
            '4': 'pick',
            '5': 'floodfill'
        }
        self.selection_id = '1'  # default as 1
        self.selection_origin = (0, 0)
예제 #7
0
class DemoScene(QtGui.QWidget):
    def __init__(self, keys='interactive'):
        super(DemoScene, self).__init__()

        # Layout and canvas creation
        box = QtGui.QVBoxLayout(self)
        self.resize(800, 600)
        self.setLayout(box)
        self.canvas = scene.SceneCanvas(keys=keys)
        box.addWidget(self.canvas.native)

        # Connect events
        self.canvas.events.mouse_press.connect(self.on_mouse_press)
        self.canvas.events.mouse_release.connect(self.on_mouse_release)
        self.canvas.events.mouse_move.connect(self.on_mouse_move)
        self.canvas.events.key_press.connect(self.on_key_press)

        # Setup some defaults
        self.mesh = None
        self.selected = []
        self.white = (1.0, 1.0, 1.0, 1.0)
        self.black = (0.0, 0.0, 0.0, 0.0)

        # Camera
        self.view = self.canvas.central_widget.add_view()
        self.view.camera = scene.cameras.TurntableCamera(elevation=90,
                                                         azimuth=0,
                                                         fov=60,
                                                         center=(0, 0, 0))

        # Data
        fitsdata = fits.open('l1448_13co.fits')
        self.vol_data = np.nan_to_num(fitsdata[0].data)
        """
        The transpose here and after is for solving the coordinate mismatch between volume visual input data and its
        rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform
        (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what
        we see' on the screen rather than the real input data of volume.

        """
        new_pos = np.transpose(self.vol_data)

        # TODO: replace the min&max threshold with real settings in Glue UI
        self.pos_data = np.indices(self.vol_data.shape).reshape(
            3, -1).transpose()

        grays = get_translucent_cmap(1, 1, 1)

        self.volume_pool = [(self.vol_data, (1, 6), grays)]
        self.volume = MultiVolume(self.volume_pool)
        self.trans = [
            -self.vol_data.shape[2] / 2., -self.vol_data.shape[1] / 2.,
            -self.vol_data.shape[0] / 2.
        ]
        self.volume.transform = scene.STTransform(translate=self.trans)
        self.view.add(self.volume)

        self.tr = self.volume.get_transform(map_from='visual', map_to='canvas')

        # create a volume for showing the selected part
        self.volume1 = scene.visuals.Volume(self.vol_data,
                                            clim=(4, 6),
                                            parent=self.view.scene)
        self.volume1.transform = scene.STTransform(translate=self.trans)
        self.volume1.visible = False

        # Add a text instruction
        self.text = scene.visuals.Text('',
                                       color='white',
                                       pos=(self.canvas.size[0] / 4.0, 20),
                                       parent=self.canvas.scene)

        # Add a 3D axis to keep us oriented
        axis = scene.visuals.XYZAxis(parent=self.view.scene)

        # Set up for lasso drawing
        self.line_pos = []
        self.line = scene.visuals.Line(color='yellow',
                                       method='gl',
                                       parent=self.canvas.scene)

        # Selection
        self.selection_flag = False
        self.selection_pool = {
            '1': 'lasso',
            '2': 'rectangle',
            '3': 'ellipse',
            '4': 'pick',
            '5': 'floodfill'
        }
        self.selection_id = '1'  # default as 1
        self.selection_origin = (0, 0)

    def transform(self, data):
        data = self.tr.map(data)
        data /= data[:, 3:]  # normalize with homogeneous coordinates
        return data[:, :2]

    def event_connect(self, flag):
        if flag:
            self.view.camera._viewbox.events.mouse_move.disconnect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_press.disconnect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_release.disconnect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_wheel.disconnect(
                self.view.camera.viewbox_mouse_event)
        else:
            self.view.camera._viewbox.events.mouse_move.connect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_press.connect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_release.connect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_wheel.connect(
                self.view.camera.viewbox_mouse_event)

#================================= Functionality Functions Start ==================================#

    def mark_selected(self):
        # Change the color of the selected point
        reds = get_translucent_cmap(1, 0, 0)

        select_data = np.transpose(self.vol_data)
        not_select = np.logical_not(self.selected)
        np.place(select_data, not_select, 0)
        select_data = np.transpose(select_data)

        print('select_data is', select_data, select_data.shape)
        maxpos = np.unravel_index(select_data.argmax(), select_data.shape)
        print('got the max pos', maxpos)
        self.volume_pool.append((select_data, (1, 6), reds))

        # TODO: no set_data function available in multi_volume_visual
        self.volume._update_all_volumes(self.volume_pool)
        print('self.volume_pool', len(self.volume_pool))
        self.canvas.update()

    def get_max_pos(self):
        # Ray intersection on the CPU to highlight the selected point(s)
        data = self.transform(self.pos_data)  # Map coordinates
        m1 = data > (self.selection_origin - 4)
        m2 = data < (self.selection_origin + 4)
        max_value = 0.
        max_pos = None
        pick_selected = np.argwhere(m1[:, 0] & m1[:, 1] & m2[:, 0] & m2[:, 1])
        for item in pick_selected:
            index = tuple(self.pos_data[item].flatten())
            if self.vol_data[index] > max_value:
                max_value = self.vol_data[index]
                max_pos = index
        return max_pos  # list argument for flood_fill_3d.cyfill()

    def draw_floodfill_visual(self, threshold):
        formate_data = np.asarray(self.vol_data, np.float64)
        pos = self.get_max_pos()

        # Normalize the threshold so that it returns values in the range 1.01
        # to 101 (since it can currently be between 0 and 1)

        threshold = 1 + 10**(threshold * 4 - 2)

        selec_vol = floodfill_scipy(formate_data, pos,
                                    threshold).astype(float) * 5

        self.volume1.set_data(selec_vol)
        self.volume1.visible = True
        self.volume.visible = False

        self.canvas.update()


#================================= Event Functions Start ==================================#

    def on_key_press(self, event):
        # Set selection_flag and instruction text

        if event.text in self.selection_pool.keys():
            if not self.selection_flag:
                self.text.text = 'Now is %s selection mode, press %s to switch' % (
                    self.selection_pool[event.text], event.text)
                self.selection_flag = True
            else:
                self.text.text = 'Now is view mode, press %s to switch' % event.text
                self.selection_flag = False
            self.event_connect(self.selection_flag)
            self.selection_id = event.text
            # self.volume.visible = True

    def on_mouse_press(self, event):
        print('I wanna know mouse pos', event.pos)

        # Realize picking functionality and set origin mouse pos
        if event.button == 1 and self.selection_flag:
            if self.selection_id == '4':
                # Ray intersection on the CPU to highlight the selected point(s)
                data = self.transform(self.pos_data)  # Map coordinates
                print('data after tr.map', data)
                m1 = data > (event.pos - 4)
                m2 = data < (event.pos + 4)

                pick_selected = np.argwhere(m1[:, 0] & m1[:, 1] & m2[:, 0]
                                            & m2[:, 1])
                len_mask = self.vol_data.shape[0] * self.vol_data.shape[
                    1] * self.vol_data.shape[2]

                full_mask = np.zeros(len_mask)
                full_mask[pick_selected] = True
                self.selected = full_mask
                print('self.selected is', self.selected, len(self.selected))
                self.mark_selected()

            else:
                self.selection_origin = event.pos

    def on_mouse_release(self, event):
        # Identify selected points and mark them
        if event.button == 1 and self.selection_flag and self.selection_id is not '4':
            data = self.transform(self.pos_data)

            if self.selection_id in ['1', '2', '3']:
                selection_path = path.Path(self.line_pos, closed=True)
                mask = selection_path.contains_points(data)

                self.selected = mask
                print('mask len', len(mask), mask)
                self.mark_selected()

                # Reset lasso
                self.line_pos = [
                ]  # TODO: Empty pos input is not allowed for line_visual
                self.line.set_data(np.array(self.line_pos))
                self.line.update()

            if self.selection_id in ['2', '3']:
                self.selection_origin = None

    def on_mouse_move(self, event):
        # Draw lasso/rectangle/ellipse shape with mouse dragging
        if event.button == 1 and event.is_dragging and self.selection_flag:
            if self.selection_id == '1':
                self.line_pos.append(event.pos)
                self.line.set_data(np.array(self.line_pos))

            if self.selection_id in ['2', '3']:
                width = event.pos[0] - self.selection_origin[0]
                height = event.pos[1] - self.selection_origin[1]
                center = (width / 2. + self.selection_origin[0],
                          height / 2. + self.selection_origin[1], 0)

                if self.selection_id == '2':
                    self.line_pos = rectangle_vertice(center, height, width)
                    self.line.set_data(np.array(self.line_pos))

                if self.selection_id == '3':
                    self.line_pos = ellipse_vertice(
                        center,
                        radius=(np.abs(width / 2.), np.abs(height / 2.)),
                        start_angle=0.,
                        span_angle=360.,
                        num_segments=500)
                    self.line.set_data(pos=np.array(self.line_pos),
                                       connect='strip')

            if self.selection_id == '5':
                # calculate the threshold and call draw visual
                width = event.pos[0] - self.selection_origin[0]
                height = event.pos[1] - self.selection_origin[1]
                drag_distance = math.sqrt(width**2 + height**2)
                canvas_diag = math.sqrt(self.canvas.size[0]**2 +
                                        self.canvas.size[1]**2)
                self.draw_floodfill_visual(drag_distance / canvas_diag)
class DemoScene(QtGui.QWidget):
    def __init__(self, keys='interactive'):
        super(DemoScene, self).__init__()

        # Layout and canvas creation
        box = QtGui.QVBoxLayout(self)
        self.resize(800, 600)
        self.setLayout(box)
        self.canvas = scene.SceneCanvas(keys=keys)
        box.addWidget(self.canvas.native)

        # Connect events
        self.canvas.events.mouse_press.connect(self.on_mouse_press)
        self.canvas.events.mouse_release.connect(self.on_mouse_release)
        self.canvas.events.mouse_move.connect(self.on_mouse_move)
        self.canvas.events.key_press.connect(self.on_key_press)

        # Setup some defaults
        self.mesh = None
        self.selected = []
        self.white = (1.0, 1.0, 1.0, 1.0)
        self.black = (0.0, 0.0, 0.0, 0.0)

        # Camera
        self.view = self.canvas.central_widget.add_view()
        self.view.camera = scene.cameras.TurntableCamera(elevation=25,
                                                         azimuth=20,
                                                         distance=2.0,
                                                         center=(0, 0, 0))

        # Data
        fitsdata = pyfits.open('l1448_13co.fits')
        vol1 = np.nan_to_num(fitsdata[0].data)
        self.vol_data = vol1
        """
        The transpose here and after is for solving the coordinate mismatch between volume visual input data and its
        rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform
        (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what
        we see' on the screen rather than the real input data of volume.

        """
        new_pos = np.transpose(vol1)

        # TODO: replace the min&max threshold with real settings in Glue UI
        min_threshold = np.min(self.vol_data)
        max_threshold = np.max(self.vol_data)
        self.pos_data = np.argwhere(
            new_pos >= min_threshold)  # get voxel positions

        grays = get_translucent_cmap(1, 1, 1)

        self.volume_pool = [(vol1, (1, 6), grays)]
        self.volume = MultiVolume(self.volume_pool)
        self.trans = [
            -vol1.shape[2] / 2., -vol1.shape[1] / 2., -vol1.shape[0] / 2.
        ]
        self.volume.transform = scene.STTransform(translate=self.trans)
        self.view.add(self.volume)

        self.tr = self.volume.node_transform(self.view)  # ChainTransform

        # Add a text instruction
        self.text = scene.visuals.Text('',
                                       color='white',
                                       pos=(self.canvas.size[0] / 4.0, 20),
                                       parent=self.canvas.scene)

        # Add a 3D axis to keep us oriented
        axis = scene.visuals.XYZAxis(parent=self.view.scene)

        # Set up for lasso drawing
        self.line_pos = []
        self.line = scene.visuals.Line(color='yellow',
                                       method='gl',
                                       parent=self.canvas.scene)

        # Selection
        self.selection_flag = False
        self.selection_pool = {
            '1': 'lasso',
            '2': 'rectangle',
            '3': 'ellipse',
            '4': 'pick'
        }
        self.selection_id = '1'  # default as 1
        self.selection_origin = (0, 0)

    def event_connect(self, flag):
        if flag:
            self.view.camera._viewbox.events.mouse_move.disconnect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_press.disconnect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_release.disconnect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_wheel.disconnect(
                self.view.camera.viewbox_mouse_event)
        else:
            self.view.camera._viewbox.events.mouse_move.connect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_press.connect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_release.connect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_wheel.connect(
                self.view.camera.viewbox_mouse_event)

    def mark_selected(self):
        # Change the color of the selected point
        reds = get_translucent_cmap(1, 0, 0)

        select_data = np.transpose(self.vol_data)
        not_select = np.logical_not(self.selected)
        np.place(select_data, not_select, 0)
        select_data = np.transpose(select_data)

        self.volume_pool.append((select_data, (1, 6), reds))

        # TODO: no set_data function available in multi_volume_visual
        self.volume._update_all_volumes(self.volume_pool)
        print('self.volume_pool', len(self.volume_pool))
        self.canvas.update()

    def on_key_press(self, event):
        # Set selection_flag and instruction text

        if event.text in self.selection_pool.keys():
            if not self.selection_flag:
                self.text.text = 'Now is %s selection mode, press %s to switch' % (
                    self.selection_pool[event.text], event.text)
                self.selection_flag = True
            else:
                self.text.text = 'Now is view mode, press %s to switch' % event.text
                self.selection_flag = False
            self.event_connect(self.selection_flag)
            self.selection_id = event.text
            self.volume.visible = True

    def on_mouse_press(self, event):
        # Realize picking functionality and set origin mouse pos

        if event.button == 1 and self.selection_flag:
            if self.selection_id == '4':
                # Ray intersection on the CPU to highlight the selected point(s)
                data = self.tr.map(self.pos_data)[:, :2]  # Map coordinates
                print('data after tr.map', data)
                m1 = data > (event.pos - 4)
                m2 = data < (event.pos + 4)

                self.selected = np.argwhere(m1[:, 0] & m1[:, 1] & m2[:, 0]
                                            & m2[:, 1])
                print('self.selected is', self.selected)
                self.mark_selected()

            else:
                self.selection_origin = event.pos

    def on_mouse_release(self, event):
        # Identify selected points and mark them

        if event.button == 1 and self.selection_flag and self.selection_id is not '4':
            data = self.tr.map(self.pos_data)[:, :2]

            if self.selection_id in ['1', '2', '3']:
                selection_path = path.Path(self.line_pos, closed=True)
                mask = selection_path.contains_points(data)

                self.selected = mask
                print('mask len', len(mask))
                self.mark_selected()

                # Reset lasso
                self.line_pos = [
                ]  # TODO: Empty pos input is not allowed for line_visual
                self.line.set_data(np.array(self.line_pos))
                self.line.update()

            if self.selection_id in ['2', '3']:
                self.selection_origin = None

    def on_mouse_move(self, event):
        # Draw lasso/rectangle/ellipse shape with mouse dragging

        if event.button == 1 and event.is_dragging and self.selection_flag:
            if self.selection_id == '1':
                self.line_pos.append(event.pos)
                self.line.set_data(np.array(self.line_pos))

            if self.selection_id in ['2', '3']:
                width = event.pos[0] - self.selection_origin[0]
                height = event.pos[1] - self.selection_origin[1]
                center = (width / 2. + self.selection_origin[0],
                          height / 2. + self.selection_origin[1], 0)

                if self.selection_id == '2':
                    self.line_pos = rectangle_vertice(center, height, width)
                    self.line.set_data(np.array(self.line_pos))

                if self.selection_id == '3':
                    self.line_pos = ellipse_vertice(
                        center,
                        radius=(np.abs(width / 2.), np.abs(height / 2.)),
                        start_angle=0.,
                        span_angle=360.,
                        num_segments=500)
                    self.line.set_data(pos=np.array(self.line_pos),
                                       connect='strip')
예제 #9
0
class DemoScene(QtGui.QWidget):
    def __init__(self, keys='interactive'):
        super(DemoScene, self).__init__()

        # Layout and canvas creation
        box = QtGui.QVBoxLayout(self)
        self.resize(800, 600)
        self.setLayout(box)
        self.canvas = scene.SceneCanvas(keys=keys)
        box.addWidget(self.canvas.native)

        # Connect events
        self.canvas.events.mouse_press.connect(self.on_mouse_press)
        self.canvas.events.mouse_release.connect(self.on_mouse_release)
        self.canvas.events.mouse_move.connect(self.on_mouse_move)
        self.canvas.events.key_press.connect(self.on_key_press)

        # Setup some defaults
        self.mesh = None
        self.selected = []
        self.white = (1.0, 1.0, 1.0, 1.0)
        self.black = (0.0, 0.0, 0.0, 0.0)

        # Camera
        self.view = self.canvas.central_widget.add_view()
        self.view.camera = scene.cameras.TurntableCamera(elevation=25,
                                                         azimuth=20,
                                                         distance=2.0,
                                                         center=(0, 0, 0))

        # Data
        fitsdata = pyfits.open('l1448_13co.fits')
        self.vol_data = np.nan_to_num(fitsdata[0].data)
        """
        The transpose here and after is for solving the coordinate mismatch between volume visual input data and its
        rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform
        (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what
        we see' on the screen rather than the real input data of volume.

        """
        new_pos = np.transpose(self.vol_data)

        # TODO: replace the min&max threshold with real settings in Glue UI
        self.pos_data = np.argwhere(
            new_pos >= np.min(self.vol_data))  # get voxel positions

        grays = get_translucent_cmap(1, 1, 1)

        self.volume_pool = [(self.vol_data, (1, 6), grays)]
        self.volume = MultiVolume(self.volume_pool)
        self.trans = [
            -self.vol_data.shape[2] / 2., -self.vol_data.shape[1] / 2.,
            -self.vol_data.shape[0] / 2.
        ]
        self.volume.transform = scene.STTransform(translate=self.trans)
        self.view.add(self.volume)

        self.tr = self.volume.node_transform(self.view)  # ChainTransform

        # create a volume for showing the selected part
        self.volume1 = scene.visuals.Volume(
            self.vol_data, parent=self.view.scene)  #clim=(4, 6) for floodfill
        self.volume1.transform = scene.STTransform(translate=self.trans)
        self.volume1.visible = False

        # Add a text instruction
        self.text = scene.visuals.Text('',
                                       color='white',
                                       pos=(self.canvas.size[0] / 4.0, 20),
                                       parent=self.canvas.scene)

        # Add a 3D axis to keep us oriented
        axis = scene.visuals.XYZAxis(parent=self.view.scene)

        # Set up for lasso drawing
        self.line_pos = []
        self.line = scene.visuals.Line(color='yellow',
                                       method='gl',
                                       parent=self.canvas.scene)

        # Selection
        self.selection_flag = False
        self.selection_pool = {
            '1': 'lasso',
            '2': 'rectangle',
            '3': 'ellipse',
            '4': 'pick',
            '5': 'floodfill'
        }
        self.selection_id = '1'  # default as 1
        self.selection_origin = (0, 0)

    def event_connect(self, flag):
        if flag:
            self.view.camera._viewbox.events.mouse_move.disconnect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_press.disconnect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_release.disconnect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_wheel.disconnect(
                self.view.camera.viewbox_mouse_event)
        else:
            self.view.camera._viewbox.events.mouse_move.connect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_press.connect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_release.connect(
                self.view.camera.viewbox_mouse_event)
            self.view.camera._viewbox.events.mouse_wheel.connect(
                self.view.camera.viewbox_mouse_event)

#================================= Functionality Functions Start ==================================#

    def mark_selected(self):
        # Change the color of the selected point
        reds = get_translucent_cmap(1, 0, 0)

        select_data = np.transpose(self.vol_data)
        not_select = np.logical_not(self.selected)
        np.place(select_data, not_select, 0)
        select_data = np.transpose(select_data)

        print('select_data is', select_data, select_data.shape)
        maxpos = np.unravel_index(select_data.argmax(), select_data.shape)
        print('got the max pos', maxpos)
        self.volume_pool.append((select_data, (1, 6), reds))

        # TODO: no set_data function available in multi_volume_visual
        self.volume._update_all_volumes(self.volume_pool)
        print('self.volume_pool', len(self.volume_pool))
        self.canvas.update()

    def get_max_pos(self):
        # Ray intersection on the CPU to highlight the selected point(s)
        data = self.tr.map(self.pos_data)[:, :2]  # Map coordinates
        print('data after tr.map', data)
        m1 = data > (self.selection_origin - 4)
        m2 = data < (self.selection_origin + 4)
        max_value = 0.
        max_pos = None
        pick_selected = np.argwhere(m1[:, 0] & m1[:, 1] & m2[:, 0] & m2[:, 1])
        for item in pick_selected:
            index = np.unravel_index(item, self.vol_data.shape)
            if self.vol_data[index] > max_value:
                max_value = self.vol_data[index]
                max_pos = np.array(index).flatten()
        print('maxpos, maxvalue', max_pos, max_value)
        return (max_pos[0], max_pos[1], max_pos[2]
                )  # list argument for flood_fill_3d.cyfill()

    def draw_floodfill_visual(self, threhold):
        formate_data = np.asarray(self.vol_data, np.float64)
        pos = self.get_max_pos()

        selec_vol = flood_fill_3d.cyfill(
            formate_data, pos, 5,
            threhold)  # (3d data, start pos, replaced val, thresh)

        self.volume1.set_data(selec_vol)
        self.volume1.visible = True
        self.volume.visible = False

        self.canvas.update()

    def get_dendrogram(self, max_pos):
        # if the dendrogram file doesn't exist
        if not os.path.isfile('dendrogram.fits'):
            dendro = Dendrogram.compute(self.vol_data,
                                        min_value=1.0,
                                        min_delta=1,
                                        min_npix=10,
                                        verbose=True)
            dendro.save_to('dendrogram.fits')

        dendro = Dendrogram.load_from('dendrogram.fits')
        substructure = dendro.structure_at(
            max_pos)  # max_pos should be (z, y, x)
        dendro_mask = substructure.get_mask(shape=self.vol_data.shape)
        return dendro_mask


#================================= Event Functions Start ==================================#

    def on_key_press(self, event):
        # Set selection_flag and instruction text

        if event.text in self.selection_pool.keys():
            if not self.selection_flag:
                self.text.text = 'Now is %s selection mode, press %s to switch' % (
                    self.selection_pool[event.text], event.text)
                self.selection_flag = True
            else:
                self.text.text = 'Now is view mode, press %s to switch' % event.text
                self.selection_flag = False
            self.event_connect(self.selection_flag)
            self.selection_id = event.text

    def on_mouse_press(self, event):
        # Realize picking functionality and set origin mouse pos
        self.selection_origin = event.pos
        if event.button == 1 and self.selection_flag:
            # dendrogram selection here
            if self.selection_id == '4':

                max_pos = self.get_max_pos()
                dendro_mask = self.get_dendrogram(max_pos)
                select_data = np.copy(self.vol_data)
                select_data[np.logical_not(dendro_mask)] = 0

                self.volume1.set_data(select_data)
                self.volume1.visible = True
                self.volume.visible = False

                self.canvas.update()

    def on_mouse_release(self, event):
        # Identify selected points and mark them
        if event.button == 1 and self.selection_flag and self.selection_id is not '4':
            data = self.tr.map(self.pos_data)[:, :2]

            if self.selection_id in ['1', '2', '3']:
                selection_path = path.Path(self.line_pos, closed=True)
                mask = selection_path.contains_points(data)

                self.selected = mask
                print('mask len', len(mask), mask)
                self.mark_selected()

                # Reset lasso
                self.line_pos = [
                ]  # TODO: Empty pos input is not allowed for line_visual
                self.line.set_data(np.array(self.line_pos))
                self.line.update()

            if self.selection_id in ['2', '3']:
                self.selection_origin = None

    def on_mouse_move(self, event):
        # Draw lasso/rectangle/ellipse shape with mouse dragging
        if event.button == 1 and event.is_dragging and self.selection_flag:
            if self.selection_id == '1':
                self.line_pos.append(event.pos)
                self.line.set_data(np.array(self.line_pos))

            if self.selection_id in ['2', '3']:
                width = event.pos[0] - self.selection_origin[0]
                height = event.pos[1] - self.selection_origin[1]
                center = (width / 2. + self.selection_origin[0],
                          height / 2. + self.selection_origin[1], 0)

                if self.selection_id == '2':
                    self.line_pos = rectangle_vertice(center, height, width)
                    self.line.set_data(np.array(self.line_pos))

                if self.selection_id == '3':
                    self.line_pos = ellipse_vertice(
                        center,
                        radius=(np.abs(width / 2.), np.abs(height / 2.)),
                        start_angle=0.,
                        span_angle=360.,
                        num_segments=500)
                    self.line.set_data(pos=np.array(self.line_pos),
                                       connect='strip')

            if self.selection_id == '5':
                # calculate the threshold and call draw visual
                width = event.pos[0] - self.selection_origin[0]
                height = event.pos[1] - self.selection_origin[1]
                drag_distance = math.sqrt(width**2 + height**2)
                canvas_diag = math.sqrt(self.canvas.size[0]**2 +
                                        self.canvas.size[1]**2)
                # normalize the threshold between max and min value
                normalize = (np.max(self.vol_data) -
                             np.min(self.vol_data)) / canvas_diag
                self.draw_floodfill_visual(drag_distance * normalize)
예제 #10
0
canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True)
canvas.measure_fps()

# Set up a viewbox to display the image with interactive pan/zoom
view = canvas.central_widget.add_view()

# Set whether we are emulating a 3D texture
emulate_texture = False

reds = get_translucent_cmap(1, 0, 0)
blues = get_translucent_cmap(0, 0, 1)

# Create the volume visuals, only one is visible
volumes = [(vol1, None, blues), (vol1[::-1, ::-1, ::-1], None, reds)]
volume1 = MultiVolume(volumes,
                      parent=view.scene,
                      threshold=0.225,
                      emulate_texture=emulate_texture)
volume1.transform = scene.STTransform(translate=(64, 64, 0))

# Create three cameras (Fly, Turntable and Arcball)
fov = 60.
cam2 = scene.cameras.TurntableCamera(parent=view.scene,
                                     fov=fov,
                                     name='Turntable')
view.camera = cam2  # Select turntable at first

canvas.update()

if __name__ == '__main__':
    print(__doc__)
    app.run()