def _update_scaled_data(self, label):

        # If the data slice hasn't been set yet, we should stop here
        if self._data_bounds is None:
            return

        index = self.volumes[label]['index']
        clim = self.volumes[label].get('clim', None)
        data = self.volumes[label]['data']

        # With certain graphics cards, sending the data in one chunk to OpenGL
        # causes artifacts in the rendering - see e.g.
        # https://github.com/vispy/vispy/issues/1412
        # To avoid this, we process the data in chunks. Since we need to do
        # this, we can also do the copy and renormalization on the chunk to
        # avoid excessive memory usage.

        # To start off we need to tell the texture about the new shape
        self.shared_program['u_volumetex_{0:d}'.format(index)].resize(
            data.shape)

        # Determine the chunk shape - the value of 128 as the minimum value
        # is arbitrary but appears to work nicely. We can reduce that in future
        # if needed.

        sliced_data = data.compute_fixed_resolution_buffer(self._data_bounds)

        chunk_shape = [min(x, 128, self.resolution) for x in sliced_data.shape]

        # FIXME: shouldn't be needed!
        zeros = np.zeros(self._vol_shape, dtype=np.float32)
        self.shared_program['u_volumetex_{0:d}'.format(index)].set_data(zeros)

        # Now loop over chunks

        for view in iterate_chunks(sliced_data.shape, chunk_shape=chunk_shape):

            chunk = sliced_data[view]
            chunk = chunk.astype(np.float32)
            if clim is not None:
                chunk -= clim[0]
                chunk *= 1 / (clim[1] - clim[0])

            # PERF: nan_to_num doesn't actually help memory usage as it runs
            # isnan internally, and it's slower, so we just use the following
            # methind. In future we could do this directly with a C extension.
            chunk[np.isnan(chunk)] = 0.

            offset = tuple([s.start for s in view])

            if chunk.size == 0:
                continue

            self.shared_program['u_volumetex_{0:d}'.format(index)].set_data(
                chunk, offset=offset)
Ejemplo n.º 2
0
    def contains3d(self, x, y, z):
        """
        Test whether the projected coordinates are contained in the 2d ROI.
        """

        if not self.defined():
            raise UndefinedROI

        x = np.asarray(x)
        y = np.asarray(y)
        z = np.asarray(z)

        # Since the projection can significantly increase the memory usage, we
        # do the following operation in chunks. In future we could likely use
        # e.g. vaex, dask, or other multi-threaded/fast libraries to speed this
        # and other ROI code up.

        mask = np.zeros(x.shape, dtype=bool)

        for slices in iterate_chunks(x.shape, n_max=1000000):

            # Work in homogeneous coordinates so we can support perspective
            # projections as well
            x_sub, y_sub, z_sub = x[slices], y[slices], z[slices]
            vertices = np.array([x_sub, y_sub, z_sub, np.ones(x_sub.shape)])

            # The following returns homogeneous screen coordinates
            screen_h = np.tensordot(self.projection_matrix,
                                    vertices,
                                    axes=(1, 0))

            # Convert to screen coordinates, as we don't care about z
            screen_x, screen_y = screen_h[:2] / screen_h[3]

            mask[slices] = self.roi_2d.contains(screen_x, screen_y)

        return mask
Ejemplo n.º 3
0
Archivo: roi.py Proyecto: glue-viz/glue
    def contains3d(self, x, y, z):
        """
        Test whether the projected coordinates are contained in the 2d ROI.
        """

        if not self.defined():
            raise UndefinedROI

        x = np.asarray(x)
        y = np.asarray(y)
        z = np.asarray(z)

        # Since the projection can significantly increase the memory usage, we
        # do the following operation in chunks. In future we could likely use
        # e.g. vaex, dask, or other multi-threaded/fast libraries to speed this
        # and other ROI code up.

        mask = np.zeros(x.shape, dtype=bool)

        for slices in iterate_chunks(x.shape, n_max=1000000):

            # Work in homogeneous coordinates so we can support perspective
            # projections as well
            x_sub, y_sub, z_sub = x[slices], y[slices], z[slices]
            vertices = np.array([x_sub, y_sub, z_sub, np.ones(x_sub.shape)])

            # The following returns homogeneous screen coordinates
            screen_h = np.tensordot(self.projection_matrix,
                                    vertices, axes=(1, 0))

            # Convert to screen coordinates, as we don't care about z
            screen_x, screen_y = screen_h[:2] / screen_h[3]

            mask[slices] = self.roi_2d.contains(screen_x, screen_y)

        return mask