Exemple #1
0
    def __init__(self, keys='interactive'):
        super(DemoScene, self).__init__()
        # Layout and canvas creation
        box = QtGui.QVBoxLayout(self)
        self.resize(500, 500)
        self.setLayout(box)
        self.canvas = scene.SceneCanvas(keys=keys)
        box.addWidget(self.canvas.native)

        # Camera
        self.view = self.canvas.central_widget.add_view()
        self.view.camera = scene.cameras.TurntableCamera(elevation=25,
                                                         azimuth=20,
                                                         distance=2.0,
                                                         center=(0, 0, 0))

        # Data
        fitsdata = pyfits.open('l1448_13co.fits')
        vol1 = np.nan_to_num(fitsdata[0].data)
        self.vol_data = vol1
        """
        The transpose here and after is for solving the coordinate mismatch between volume visual input data and its
        rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform
        (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what
        we see' on the screen rather than the real input data of volume.

        """
        new_pos = np.transpose(vol1)

        # TODO: replace the min&max threshold with real settings in Glue UI
        min_threshold = np.min(self.vol_data)
        max_threshold = np.max(self.vol_data)
        self.pos_data = np.argwhere(
            new_pos >= min_threshold)  # get voxel positions

        grays = get_translucent_cmap(1, 1, 1)

        self.volume_pool = [(vol1, (1, 6), grays)]
        self.volume = MultiVolume(self.volume_pool)
        self.trans = [
            -vol1.shape[2] / 2., -vol1.shape[1] / 2., -vol1.shape[0] / 2.
        ]
        self.volume.transform = scene.STTransform(translate=self.trans)
        self.view.add(self.volume)

        # Add a 3D axis to keep us oriented
        axis = scene.visuals.XYZAxis(parent=self.view.scene)

        self.selection = SelectionCommon(canvas=self.canvas,
                                         view=self.view,
                                         vol_data=self.vol_data,
                                         volume=self.volume,
                                         volume_pool=self.volume_pool,
                                         pos_data=self.pos_data)
canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True)
view = canvas.central_widget.add_view()

grays = get_translucent_cmap(1, 1, 1)
reds = get_translucent_cmap(1, 0, 0)
greens = get_translucent_cmap(0, 1, 0)
blues = get_translucent_cmap(0, 0, 1)
oranges = get_translucent_cmap(1, 0.5, 0)

# Create the volume visuals, only one is visible
print(data.max())
volumes = [(data, (0, 6), grays), (subset1, (0, 4), reds),
           (subset2, (0, 4), greens), (subset3, (0, 4), blues),
           (subset4, (0, 4), oranges)]

volume1 = MultiVolume(volumes, parent=view.scene)
volume1.transform = scene.STTransform(translate=(64, 64, 0))

view.camera = scene.cameras.TurntableCamera(parent=view.scene,
                                            fov=60.,
                                            name='Turntable')

canvas.update()
#
# # create colormaps that work well for translucent and additive volume rendering
#
#
# # for testing performance
# # @canvas.connect
# # def on_draw(ev):
# # canvas.update()
Exemple #3
0
    def __init__(self, keys='interactive'):
        super(DemoScene, self).__init__()

        # Layout and canvas creation
        box = QtGui.QVBoxLayout(self)
        self.resize(800, 600)
        self.setLayout(box)
        self.canvas = scene.SceneCanvas(keys=keys)
        box.addWidget(self.canvas.native)

        # Connect events
        self.canvas.events.mouse_press.connect(self.on_mouse_press)
        self.canvas.events.mouse_release.connect(self.on_mouse_release)
        self.canvas.events.mouse_move.connect(self.on_mouse_move)
        self.canvas.events.key_press.connect(self.on_key_press)

        # Setup some defaults
        self.mesh = None
        self.selected = []
        self.white = (1.0, 1.0, 1.0, 1.0)
        self.black = (0.0, 0.0, 0.0, 0.0)

        # Camera
        self.view = self.canvas.central_widget.add_view()
        self.view.camera = scene.cameras.TurntableCamera(elevation=90,
                                                         azimuth=0,
                                                         fov=60,
                                                         center=(0, 0, 0))

        # Data
        fitsdata = fits.open('l1448_13co.fits')
        self.vol_data = np.nan_to_num(fitsdata[0].data)
        """
        The transpose here and after is for solving the coordinate mismatch between volume visual input data and its
        rendering result. The rendered volume shown on 2D screen, or 'what we see', is through displaying transform
        (self.tr here) of 'transposed input data', thus we use 'transpose' to enable our selection focusing on 'what
        we see' on the screen rather than the real input data of volume.

        """
        new_pos = np.transpose(self.vol_data)

        # TODO: replace the min&max threshold with real settings in Glue UI
        self.pos_data = np.indices(self.vol_data.shape).reshape(
            3, -1).transpose()

        grays = get_translucent_cmap(1, 1, 1)

        self.volume_pool = [(self.vol_data, (1, 6), grays)]
        self.volume = MultiVolume(self.volume_pool)
        self.trans = [
            -self.vol_data.shape[2] / 2., -self.vol_data.shape[1] / 2.,
            -self.vol_data.shape[0] / 2.
        ]
        self.volume.transform = scene.STTransform(translate=self.trans)
        self.view.add(self.volume)

        self.tr = self.volume.get_transform(map_from='visual', map_to='canvas')

        # create a volume for showing the selected part
        self.volume1 = scene.visuals.Volume(self.vol_data,
                                            clim=(4, 6),
                                            parent=self.view.scene)
        self.volume1.transform = scene.STTransform(translate=self.trans)
        self.volume1.visible = False

        # Add a text instruction
        self.text = scene.visuals.Text('',
                                       color='white',
                                       pos=(self.canvas.size[0] / 4.0, 20),
                                       parent=self.canvas.scene)

        # Add a 3D axis to keep us oriented
        axis = scene.visuals.XYZAxis(parent=self.view.scene)

        # Set up for lasso drawing
        self.line_pos = []
        self.line = scene.visuals.Line(color='yellow',
                                       method='gl',
                                       parent=self.canvas.scene)

        # Selection
        self.selection_flag = False
        self.selection_pool = {
            '1': 'lasso',
            '2': 'rectangle',
            '3': 'ellipse',
            '4': 'pick',
            '5': 'floodfill'
        }
        self.selection_id = '1'  # default as 1
        self.selection_origin = (0, 0)
canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True)
canvas.measure_fps()

# Set up a viewbox to display the image with interactive pan/zoom
view = canvas.central_widget.add_view()

# Set whether we are emulating a 3D texture
emulate_texture = False

reds = get_translucent_cmap(1, 0, 0)
blues = get_translucent_cmap(0, 0, 1)

# Create the volume visuals, only one is visible
volumes = [(vol1, None, blues), (vol1[::-1, ::-1, ::-1], None, reds)]
volume1 = MultiVolume(volumes,
                      parent=view.scene,
                      threshold=0.225,
                      emulate_texture=emulate_texture)
volume1.transform = scene.STTransform(translate=(64, 64, 0))

# Create three cameras (Fly, Turntable and Arcball)
fov = 60.
cam2 = scene.cameras.TurntableCamera(parent=view.scene,
                                     fov=fov,
                                     name='Turntable')
view.camera = cam2  # Select turntable at first

canvas.update()

if __name__ == '__main__':
    print(__doc__)
    app.run()