Beispiel #1
0
    def compute(self, data, view=None):

        left = self._left
        right = self._right

        if not isinstance(self._left, numbers.Number):
            left = data[self._left, view]
        if not isinstance(self._right, numbers.Number):
            right = data[self._right, view]

        # As described in more detail in ComponentLink.compute, we can
        # 'unbroadcast' the arrays to ensure a minimal operation

        original_shape = None

        if isinstance(left, np.ndarray):
            original_shape = left.shape
            left = unbroadcast(left)

        if isinstance(right, np.ndarray):
            original_shape = right.shape
            right = unbroadcast(right)

        if original_shape is not None:
            left, right = np.broadcast_arrays(left, right)

        result = self._op(left, right)

        if original_shape is None:
            return result
        else:
            return broadcast_to(result, original_shape)
Beispiel #2
0
    def compute(self, data, view=None):

        left = self._left
        right = self._right

        if not isinstance(self._left, numbers.Number):
            left = data[self._left, view]
        if not isinstance(self._right, numbers.Number):
            right = data[self._right, view]

        # As described in more detail in ComponentLink.compute, we can
        # 'unbroadcast' the arrays to ensure a minimal operation

        original_shape = None

        if isinstance(left, np.ndarray):
            original_shape = left.shape
            left = unbroadcast(left)

        if isinstance(right, np.ndarray):
            original_shape = right.shape
            right = unbroadcast(right)

        if original_shape is not None:
            left, right = np.broadcast_arrays(left, right)

        result = self._op(left, right)

        if original_shape is None:
            return result
        else:
            return broadcast_to(result, original_shape)
Beispiel #3
0
def points_inside_poly(x, y, vx, vy):

    if x.dtype.kind == 'M' and vx.dtype.kind == 'M':
        vx = vx.astype(x.dtype).astype(float)
        x = x.astype(float)

    if y.dtype.kind == 'M' and vy.dtype.kind == 'M':
        vy = vy.astype(y.dtype).astype(float)
        y = y.astype(float)

    original_shape = x.shape

    x = unbroadcast(x)
    y = unbroadcast(y)

    x = x.astype(float)
    y = y.astype(float)

    x, y = np.broadcast_arrays(x, y)

    reduced_shape = x.shape

    x = x.flat
    y = y.flat

    from matplotlib.path import Path
    p = Path(np.column_stack((vx, vy)))

    keep = ((x >= np.min(vx)) &
            (x <= np.max(vx)) &
            (y >= np.min(vy)) &
            (y <= np.max(vy)))

    inside = np.zeros(len(x), bool)

    x = x[keep]
    y = y[keep]

    coords = np.column_stack((x, y))

    inside[keep] = p.contains_points(coords).astype(bool)

    good = np.isfinite(x) & np.isfinite(y)
    inside[keep][~good] = False

    inside = inside.reshape(reduced_shape)
    inside = broadcast_to(inside, original_shape)

    return inside
Beispiel #4
0
def points_inside_poly(x, y, vx, vy):

    if x.dtype.kind == 'M' and vx.dtype.kind == 'M':
        vx = vx.astype(x.dtype).astype(float)
        x = x.astype(float)

    if y.dtype.kind == 'M' and vy.dtype.kind == 'M':
        vy = vy.astype(y.dtype).astype(float)
        y = y.astype(float)

    original_shape = x.shape

    x = unbroadcast(x)
    y = unbroadcast(y)

    x = x.astype(float)
    y = y.astype(float)

    x, y = np.broadcast_arrays(x, y)

    reduced_shape = x.shape

    x = x.flat
    y = y.flat

    from matplotlib.path import Path
    p = Path(np.column_stack((vx, vy)))

    keep = ((x >= np.min(vx)) &
            (x <= np.max(vx)) &
            (y >= np.min(vy)) &
            (y <= np.max(vy)))

    inside = np.zeros(len(x), bool)

    x = x[keep]
    y = y[keep]

    coords = np.column_stack((x, y))

    inside[keep] = p.contains_points(coords).astype(bool)

    good = np.isfinite(x) & np.isfinite(y)
    inside[keep][~good] = False

    inside = inside.reshape(reduced_shape)
    inside = broadcast_to(inside, original_shape)

    return inside
Beispiel #5
0
    def compute(self, data, view=None):
        """
        For a given data set, compute the component comp_to given the data
        associated with each comp_from and the ``using`` function

        This raises an :class:`glue.core.exceptions.IncompatibleAttribute` if the
        data set doesn't have all the ComponentIDs needed for the transformation

        Parameters
        ----------
        data : `~glue.core.data.Data`
            The data set to use
        view : `None` or `slice` or `tuple`
            Optional view (e.g. slice) through the data to use

        Returns
        -------
        result
            The data associated with comp_to component
        """

        # First we get the values of all the 'from' components.
        args = [data[join_component_view(f, view)] for f in self._from]

        # We keep track of the original shape of the arguments
        original_shape = args[0].shape
        logger.debug("shape of first argument: %s", original_shape)

        # We now unbroadcast the arrays to only compute the link with the
        # smallest number of values we can. This can help for cases where
        # the link depends only on e.g. pixel components or world coordinates
        # that themselves only depend on a subset of pixel components.
        # Unbroadcasting is the act of returning the smallest array that
        # contains all the information needed to be broadcasted back to its
        # full value
        args = [unbroadcast(arg) for arg in args]

        # We now broadcast these to the smallest common shape in case the
        # linking functions don't know how to broadcast arrays with different
        # shapes.
        args = np.broadcast_arrays(*args)

        # We call the actual linking function
        result = self._using(*args)

        # We call asarray since link functions may return Python scalars in some cases
        result = np.asarray(result)

        # In some cases, linking functions return ravelled arrays, so we
        # fix this here.
        logger.debug("shape of result: %s", result.shape)
        if result.shape != args[0].shape:
            logger.debug("ComponentLink function %s changed shape. Fixing",
                         self._using.__name__)
            result.shape = args[0].shape

        # Finally we broadcast the final result to desired shape
        result = broadcast_to(result, original_shape)

        return result
Beispiel #6
0
    def compute(self, data, view=None):
        """
        For a given data set, compute the component comp_to given the data
        associated with each comp_from and the ``using`` function

        This raises an :class:`glue.core.exceptions.IncompatibleAttribute` if the
        data set doesn't have all the ComponentIDs needed for the transformation

        Parameters
        ----------
        data : `~glue.core.data.Data`
            The data set to use
        view : `None` or `slice` or `tuple`
            Optional view (e.g. slice) through the data to use

        Returns
        -------
        result
            The data associated with comp_to component
        """

        # First we get the values of all the 'from' components.
        args = [data[join_component_view(f, view)] for f in self._from]

        # We keep track of the original shape of the arguments
        original_shape = args[0].shape
        logger.debug("shape of first argument: %s", original_shape)

        # We now unbroadcast the arrays to only compute the link with the
        # smallest number of values we can. This can help for cases where
        # the link depends only on e.g. pixel components or world coordinates
        # that themselves only depend on a subset of pixel components.
        # Unbroadcasting is the act of returning the smallest array that
        # contains all the information needed to be broadcasted back to its
        # full value
        args = [unbroadcast(arg) for arg in args]

        # We now broadcast these to the smallest common shape in case the
        # linking functions don't know how to broadcast arrays with different
        # shapes.
        args = np.broadcast_arrays(*args)

        # We call the actual linking function
        result = self._using(*args)

        # We call asarray since link functions may return Python scalars in some cases
        result = np.asarray(result)

        # In some cases, linking functions return ravelled arrays, so we
        # fix this here.
        logger.debug("shape of result: %s", result.shape)
        if result.shape != args[0].shape:
            logger.debug("ComponentLink function %s changed shape. Fixing",
                         self._using.__name__)
            result.shape = args[0].shape

        # Finally we broadcast the final result to desired shape
        result = broadcast_to(result, original_shape)

        return result
Beispiel #7
0
def test_efficiency():

    def add(x, y):
        # Make sure we don't benefit from broadcasting here
        return x.copy() + y.copy()

    data = Data(x=np.ones((2, 3, 4, 5)), y=np.ones((2, 3, 4, 5)))

    for i, from_ids in enumerate(([data.id['x'], data.id['y']],
                                  data.world_component_ids[:2],
                                  data.pixel_component_ids[:2])):

        if i == 0:
            expected_shape = (2, 3, 4, 5)
        else:
            expected_shape = (2, 3, 1, 1)

        for cls in [ComponentLink, BinaryComponentLink]:

            if cls is ComponentLink:
                link = ComponentLink(from_ids, ComponentID('test'), using=add)
            else:
                link = BinaryComponentLink(from_ids[0], from_ids[1], add)

            result = link.compute(data)
            assert result.shape == (2, 3, 4, 5)
            assert unbroadcast(result).shape == expected_shape
Beispiel #8
0
def test_efficiency():
    def add(x, y):
        # Make sure we don't benefit from broadcasting here
        return x.copy() + y.copy()

    data = Data(x=np.ones((2, 3, 4, 5)),
                y=np.ones((2, 3, 4, 5)),
                coords=IdentityCoordinates(n_dim=4))

    for i, from_ids in enumerate(([data.id['x'],
                                   data.id['y']], data.world_component_ids[:2],
                                  data.pixel_component_ids[:2])):

        if i == 0:
            expected_shape = (2, 3, 4, 5)
        else:
            expected_shape = (2, 3, 1, 1)

        for cls in [ComponentLink, BinaryComponentLink]:

            if cls is ComponentLink:
                link = ComponentLink(from_ids, ComponentID('test'), using=add)
            else:
                link = BinaryComponentLink(from_ids[0], from_ids[1], add)

            result = link.compute(data)
            assert result.shape == (2, 3, 4, 5)
            assert unbroadcast(result).shape == expected_shape
Beispiel #9
0
def test_efficient_pixel_to_pixel_simple():

    # Simple test to make sure that when WCS only returns one world coordinate
    # this still works correctly (since this requires special treatment behind
    # the scenes).

    wcs1 = WCS(naxis=2)
    wcs1.wcs.ctype = 'DEC--TAN', 'RA---TAN'
    wcs1.wcs.set()

    wcs2 = WCS(naxis=2)
    wcs2.wcs.ctype = 'GLON-CAR', 'GLAT-CAR'
    wcs2.wcs.set()

    # First try with scalars
    x, y = efficient_pixel_to_pixel(wcs1, wcs2, 1, 2)
    assert x.shape == ()
    assert y.shape == ()

    # Now try with broadcasted arrays
    x = np.linspace(10, 20, 10)
    y = np.linspace(10, 20, 20)
    Y1, X1 = np.meshgrid(y, x, indexing='ij', copy=False)
    Y2, X2 = efficient_pixel_to_pixel(wcs1, wcs2, X1, Y1)

    # The final arrays should have the correct shape
    assert X2.shape == (20, 10)
    assert Y2.shape == (20, 10)

    # and there are no efficiency gains here since the celestial axes are correlated
    assert unbroadcast(X2).shape == (20, 10)
Beispiel #10
0
def test_efficient_pixel_to_pixel_simple():

    # Simple test to make sure that when WCS only returns one world coordinate
    # this still works correctly (since this requires special treatment behind
    # the scenes).

    wcs1 = WCS(naxis=2)
    wcs1.wcs.ctype = 'DEC--TAN', 'RA---TAN'
    wcs1.wcs.set()

    wcs2 = WCS(naxis=2)
    wcs2.wcs.ctype = 'GLON-CAR', 'GLAT-CAR'
    wcs2.wcs.set()

    # First try with scalars
    x, y = efficient_pixel_to_pixel(wcs1, wcs2, 1, 2)
    assert x.shape == ()
    assert y.shape == ()

    # Now try with broadcasted arrays
    x = np.linspace(10, 20, 10)
    y = np.linspace(10, 20, 20)
    Y1, X1 = np.meshgrid(y, x, indexing='ij', copy=False)
    Y2, X2 = efficient_pixel_to_pixel(wcs1, wcs2, X1, Y1)

    # The final arrays should have the correct shape
    assert X2.shape == (20, 10)
    assert Y2.shape == (20, 10)

    # and there are no efficiency gains here since the celestial axes are correlated
    assert unbroadcast(X2).shape == (20, 10)
Beispiel #11
0
    def pixel2world_single_axis(self, *pixel, **kwargs):
        """
        Convert pixel to world coordinates, preserving input type/shape.

        This is a wrapper around pixel2world which returns the result for just
        one axis, and also determines whether the calculation can be sped up
        if broadcasting is present in the input arrays.

        Parameters
        ----------
        *pixel : scalars lists, or Numpy arrays
            The pixel coordinates (0-based) to convert
        axis : int, optional
            If only one axis is needed, it should be specified since the
            calculation will be much more efficient.

        Returns
        -------
        world : `numpy.ndarray`
            The world coordinates for the requested axis
        """

        # PY3: the following is needed for Python 2
        axis = kwargs.get('axis', None)

        if axis is None:
            raise ValueError("axis needs to be set")

        if np.size(pixel[0]) == 0:
            return np.array([], dtype=float)

        original_shape = pixel[0].shape
        pixel_new = []

        # NOTE: the axis passed to this function is the WCS axis not the Numpy
        # axis, so we need to convert it as needed.
        dep_axes = self.dependent_axes(len(pixel) - 1 - axis)
        for ip, p in enumerate(pixel):
            if (len(pixel) - 1 - ip) in dep_axes:
                pixel_new.append(unbroadcast(p))
            else:
                pixel_new.append(p.flat[0])
        pixel = np.broadcast_arrays(*pixel_new)

        result = self.pixel2world(*pixel)

        return broadcast_to(result[axis], original_shape)
Beispiel #12
0
    def pixel2world_single_axis(self, *pixel, **kwargs):
        """
        Convert pixel to world coordinates, preserving input type/shape.

        This is a wrapper around pixel2world which returns the result for just
        one axis, and also determines whether the calculation can be sped up
        if broadcasting is present in the input arrays.

        Parameters
        ----------
        *pixel : scalars lists, or Numpy arrays
            The pixel coordinates (0-based) to convert
        axis : int, optional
            If only one axis is needed, it should be specified since the
            calculation will be much more efficient.

        Returns
        -------
        world : `numpy.ndarray`
            The world coordinates for the requested axis
        """

        # PY3: the following is needed for Python 2
        axis = kwargs.get('axis', None)

        if axis is None:
            raise ValueError("axis needs to be set")

        if np.size(pixel[0]) == 0:
            return np.array([], dtype=float)

        original_shape = pixel[0].shape
        pixel_new = []

        # NOTE: the axis passed to this function is the WCS axis not the Numpy
        # axis, so we need to convert it as needed.
        dep_axes = self.dependent_axes(len(pixel) - 1 - axis)
        for ip, p in enumerate(pixel):
            if (len(pixel) - 1 - ip) in dep_axes:
                pixel_new.append(unbroadcast(p))
            else:
                pixel_new.append(p.flat[0])
        pixel = np.broadcast_arrays(*pixel_new)

        result = self.pixel2world(*pixel)

        return broadcast_to(result[axis], original_shape)
Beispiel #13
0
def world2pixel_single_axis(wcs, *world, pixel_axis=None):
    """
    Convert world to pixel coordinates, preserving input type/shape.

    This is a wrapper around world_to_pixel_values which returns the result for
    just one axis, and also determines whether the calculation can be sped up
    if broadcasting is present in the input arrays.

    Parameters
    ----------
    *world : scalars lists, or Numpy arrays
        The world coordinates to convert
    pixel_axis : int, optional
        The index of the pixel coordinate that is needed.

    Returns
    -------
    pixel : `numpy.ndarray`
        The pixel coordinates for the requested axis
    """

    if pixel_axis is None:
        raise ValueError("pixel_axis needs to be set")

    if np.size(world[0]) == 0:
        return np.array([], dtype=float)

    original_shape = world[0].shape
    world_new = []

    # Now find all the world coordinates that are needed to calculate this
    # world coordinate, using the axis correlation matrix
    world_dep = wcs.axis_correlation_matrix[:, pixel_axis]

    for iw, w in enumerate(world):
        if world_dep[iw]:
            world_new.append(unbroadcast(w))
        else:
            world_new.append(w.flat[0])
    world = np.broadcast_arrays(*world_new)

    result = wcs.world_to_pixel_values(*world)

    return broadcast_to(result[pixel_axis], original_shape)
Beispiel #14
0
def pixel2world_single_axis(wcs, *pixel, world_axis=None):
    """
    Convert pixel to world coordinates, preserving input type/shape.

    This is a wrapper around pixel_to_world_values which returns the result for
    just one axis, and also determines whether the calculation can be sped up
    if broadcasting is present in the input arrays.

    Parameters
    ----------
    *pixel : scalars lists, or Numpy arrays
        The pixel coordinates (0-based) to convert
    world_axis : int, optional
        The index of the world coordinate that is needed.

    Returns
    -------
    world : `numpy.ndarray`
        The world coordinates for the requested axis
    """

    if world_axis is None:
        raise ValueError("world_axis needs to be set")

    if np.size(pixel[0]) == 0:
        return np.array([], dtype=float)

    original_shape = pixel[0].shape
    pixel_new = []

    # Now find all the pixel coordinates that are needed to calculate this
    # world coordinate, using the axis correlation matrix
    pixel_dep = wcs.axis_correlation_matrix[world_axis, :]

    for ip, p in enumerate(pixel):
        if pixel_dep[ip]:
            pixel_new.append(unbroadcast(p))
        else:
            pixel_new.append(p.flat[0])
    pixel = np.broadcast_arrays(*pixel_new)

    result = wcs.pixel_to_world_values(*pixel)

    return broadcast_to(result[world_axis], original_shape)
Beispiel #15
0
def efficient_pixel_to_pixel(wcs1, wcs2, *inputs):
    """
    Wrapper that performs a pixel -> world -> pixel transformation with two
    WCS instances, and un-broadcasting arrays whenever possible for efficiency.
    """

    # Shortcut for scalars
    if np.isscalar(inputs[0]):
        world_outputs = wcs1.pixel_to_world(*inputs)
        if not isinstance(world_outputs, (tuple, list)):
            world_outputs = (world_outputs, )
        return wcs2.world_to_pixel(*world_outputs)

    # Remember original shape
    original_shape = inputs[0].shape

    matrix = pixel_to_pixel_correlation_matrix(wcs1, wcs2)
    split_info = split_matrix(matrix)

    outputs = [None] * wcs2.pixel_n_dim

    for (pixel_in_indices, pixel_out_indices) in split_info:

        pixel_inputs = []
        for ipix in range(wcs1.pixel_n_dim):
            if ipix in pixel_in_indices:
                pixel_inputs.append(unbroadcast(inputs[ipix]))
            else:
                pixel_inputs.append(inputs[ipix].flat[0])

        pixel_inputs = np.broadcast_arrays(*pixel_inputs)

        world_outputs = wcs1.pixel_to_world(*pixel_inputs)
        if not isinstance(world_outputs, (tuple, list)):
            world_outputs = (world_outputs, )
        pixel_outputs = wcs2.world_to_pixel(*world_outputs)

        for ipix in range(wcs2.pixel_n_dim):
            if ipix in pixel_out_indices:
                outputs[ipix] = broadcast_to(pixel_outputs[ipix],
                                             original_shape)

    return outputs
Beispiel #16
0
def test_efficient_pixel_to_pixel():

    wcs1 = WCS(naxis=3)
    wcs1.wcs.ctype = 'DEC--TAN', 'FREQ', 'RA---TAN'
    wcs1.wcs.set()

    wcs2 = WCS(naxis=3)
    wcs2.wcs.ctype = 'GLON-CAR', 'GLAT-CAR', 'FREQ'
    wcs2.wcs.set()

    # First try with scalars
    x, y, z = efficient_pixel_to_pixel(wcs1, wcs2, 1, 2, 3)
    assert x.shape == ()
    assert y.shape == ()
    assert z.shape == ()

    # Now try with broadcasted arrays
    x = np.linspace(10, 20, 10)
    y = np.linspace(10, 20, 20)
    z = np.linspace(10, 20, 30)
    Z1, Y1, X1 = np.meshgrid(z, y, x, indexing='ij', copy=False)
    X2, Y2, Z2 = efficient_pixel_to_pixel(wcs1, wcs2, X1, Y1, Z1)

    # The final arrays should have the correct shape
    assert X2.shape == (30, 20, 10)
    assert Y2.shape == (30, 20, 10)
    assert Z2.shape == (30, 20, 10)

    # But behind the scenes should also be broadcasted
    assert unbroadcast(X2).shape == (30, 1, 10)
    assert unbroadcast(Y2).shape == (30, 1, 10)
    assert unbroadcast(Z2).shape == (1, 20, 1)

    # We can put the values back through the function to ensure round-tripping
    X3, Y3, Z3 = efficient_pixel_to_pixel(wcs2, wcs1, X2, Y2, Z2)

    # The final arrays should have the correct shape
    assert X2.shape == (30, 20, 10)
    assert Y2.shape == (30, 20, 10)
    assert Z2.shape == (30, 20, 10)

    # But behind the scenes should also be broadcasted
    assert unbroadcast(X3).shape == (30, 1, 10)
    assert unbroadcast(Y3).shape == (1, 20, 1)
    assert unbroadcast(Z3).shape == (30, 1, 10)

    # And these arrays should match the input
    assert_allclose(X1, X3)
    assert_allclose(Y1, Y3)
    assert_allclose(Z1, Z3)
Beispiel #17
0
def test_efficient_pixel_to_pixel():

    wcs1 = WCS(naxis=3)
    wcs1.wcs.ctype = 'DEC--TAN', 'FREQ', 'RA---TAN'
    wcs1.wcs.set()

    wcs2 = WCS(naxis=3)
    wcs2.wcs.ctype = 'GLON-CAR', 'GLAT-CAR', 'FREQ'
    wcs2.wcs.set()

    # First try with scalars
    x, y, z = efficient_pixel_to_pixel(wcs1, wcs2, 1, 2, 3)
    assert x.shape == ()
    assert y.shape == ()
    assert z.shape == ()

    # Now try with broadcasted arrays
    x = np.linspace(10, 20, 10)
    y = np.linspace(10, 20, 20)
    z = np.linspace(10, 20, 30)
    Z1, Y1, X1 = np.meshgrid(z, y, x, indexing='ij', copy=False)
    X2, Y2, Z2 = efficient_pixel_to_pixel(wcs1, wcs2, X1, Y1, Z1)

    # The final arrays should have the correct shape
    assert X2.shape == (30, 20, 10)
    assert Y2.shape == (30, 20, 10)
    assert Z2.shape == (30, 20, 10)

    # But behind the scenes should also be broadcasted
    assert unbroadcast(X2).shape == (30, 1, 10)
    assert unbroadcast(Y2).shape == (30, 1, 10)
    assert unbroadcast(Z2).shape == (1, 20, 1)

    # We can put the values back through the function to ensure round-tripping
    X3, Y3, Z3 = efficient_pixel_to_pixel(wcs2, wcs1, X2, Y2, Z2)

    # The final arrays should have the correct shape
    assert X2.shape == (30, 20, 10)
    assert Y2.shape == (30, 20, 10)
    assert Z2.shape == (30, 20, 10)

    # But behind the scenes should also be broadcasted
    assert unbroadcast(X3).shape == (30, 1, 10)
    assert unbroadcast(Y3).shape == (1, 20, 1)
    assert unbroadcast(Z3).shape == (30, 1, 10)

    # And these arrays should match the input
    assert_allclose(X1, X3)
    assert_allclose(Y1, Y3)
    assert_allclose(Z1, Z3)
Beispiel #18
0
def efficient_pixel_to_pixel(wcs1, wcs2, *inputs):
    """
    Wrapper that performs a pixel -> world -> pixel transformation with two
    WCS instances, and un-broadcasting arrays whenever possible for efficiency.
    """

    # Shortcut for scalars
    if np.isscalar(inputs[0]):
        world_outputs = wcs1.pixel_to_world(*inputs)
        if not isinstance(world_outputs, (tuple, list)):
            world_outputs = (world_outputs,)
        return wcs2.world_to_pixel(*world_outputs)

    # Remember original shape
    original_shape = inputs[0].shape

    matrix = pixel_to_pixel_correlation_matrix(wcs1, wcs2)
    split_info = split_matrix(matrix)

    outputs = [None] * wcs2.pixel_n_dim

    for (pixel_in_indices, pixel_out_indices) in split_info:

        pixel_inputs = []
        for ipix in range(wcs1.pixel_n_dim):
            if ipix in pixel_in_indices:
                pixel_inputs.append(unbroadcast(inputs[ipix]))
            else:
                pixel_inputs.append(inputs[ipix].flat[0])

        pixel_inputs = np.broadcast_arrays(*pixel_inputs)

        world_outputs = wcs1.pixel_to_world(*pixel_inputs)
        if not isinstance(world_outputs, (tuple, list)):
            world_outputs = (world_outputs,)
        pixel_outputs = wcs2.world_to_pixel(*world_outputs)

        for ipix in range(wcs2.pixel_n_dim):
            if ipix in pixel_out_indices:
                outputs[ipix] = broadcast_to(pixel_outputs[ipix], original_shape)

    return outputs
Beispiel #19
0
    def world2pixel_single_axis(self, *world, **kwargs):
        """
        Convert world to pixel coordinates, preserving input type/shape.

        This is a wrapper around world2pixel which returns the result for just
        one axis, and also determines whether the calculation can be sped up
        if broadcasting is present in the input arrays.

        Parameters
        ----------
        *world : scalars lists, or Numpy arrays
            The world coordinates to convert
        axis : int, optional
            If only one axis is needed, it should be specified since the
            calculation will be much more efficient.

        Returns
        -------
        pixel : `numpy.ndarray`
            The pixel coordinates for the requested axis
        """

        # PY3: the following is needed for Python 2
        axis = kwargs.get('axis', None)

        if axis is None:
            raise ValueError("axis needs to be set")

        original_shape = world[0].shape
        world_new = []
        dep_axes = self.dependent_axes(axis)
        for iw, w in enumerate(world):
            if iw in dep_axes:
                world_new.append(unbroadcast(w))
            else:
                world_new.append(w.flat[0])
        world = np.broadcast_arrays(*world_new)

        result = self.world2pixel(*world)

        return broadcast_to(result[axis], original_shape)
Beispiel #20
0
    def world2pixel_single_axis(self, *world, **kwargs):
        """
        Convert world to pixel coordinates, preserving input type/shape.

        This is a wrapper around world2pixel which returns the result for just
        one axis, and also determines whether the calculation can be sped up
        if broadcasting is present in the input arrays.

        Parameters
        ----------
        *world : scalars lists, or Numpy arrays
            The world coordinates to convert
        axis : int, optional
            If only one axis is needed, it should be specified since the
            calculation will be much more efficient.

        Returns
        -------
        pixel : `numpy.ndarray`
            The pixel coordinates for the requested axis
        """

        # PY3: the following is needed for Python 2
        axis = kwargs.get('axis', None)

        if axis is None:
            raise ValueError("axis needs to be set")

        original_shape = world[0].shape
        world_new = []
        dep_axes = self.dependent_axes(axis)
        for iw, w in enumerate(world):
            if iw in dep_axes:
                world_new.append(unbroadcast(w))
            else:
                world_new.append(w.flat[0])
        world = np.broadcast_arrays(*world_new)

        result = self.world2pixel(*world)

        return broadcast_to(result[axis], original_shape)
Beispiel #21
0
def pixel_cid_to_pixel_cid_matrix(data1, data2):
    """
    Given two datasets, return a boolean matrix indicating which of the pixel
    components are linked.

    The returned matrix has the shape (data1.ndim, data2.ndim).
    """

    # For simplicity, and to avoid code used elsewhere, we rely on the fact
    # that independent coordinates are broadcasted during conversions.

    matrix = np.zeros((data1.ndim, data2.ndim), dtype=bool)

    for idim, pix_cid in enumerate(data1.pixel_component_ids):
        try:
            pix_coords = unbroadcast(data2[pix_cid, [slice(2)] * data2.ndim])
            matrix[idim] = np.array(pix_coords.shape) == 2
        except IncompatibleAttribute:
            pass

    return matrix
def compute_fixed_resolution_buffer(data, bounds, target_data=None, target_cid=None,
                                    subset_state=None, broadcast=True, cache_id=None):
    """
    Get a fixed-resolution buffer for a dataset.

    Parameters
    ----------
    data : `~glue.core.Data`
        The dataset from which to extract a fixed resolution buffer
    bounds : list
        The list of bounds for the fixed resolution buffer. This list should
        have as many items as there are dimensions in ``target_data``. Each
        item should either be a scalar value, or a tuple of ``(min, max, nsteps)``.
    target_data : `~glue.core.Data`, optional
        The data in whose frame of reference the bounds are defined. Defaults
        to ``data``.
    target_cid : `~glue.core.component_id.ComponentID`, optional
        If specified, gives the component ID giving the component to use for the
        data values. Alternatively, use ``subset_state`` to get a subset mask.
    subset_state : `~glue.core.subset.SubsetState`, optional
        If specified, gives the subset state for which to compute a mask.
        Alternatively, use ``target_cid`` if you want to get data values.
    broadcast : bool, optional
        If `True`, then if a dimension in ``target_data`` for which ``bounds``
        is not a scalar does not affect any of the dimensions in ``data``,
        then the final array will be effectively broadcast along this
        dimension, otherwise an error will be raised.
    """

    if target_data is None:
        target_data = data

    if target_cid is None and subset_state is None:
        raise ValueError("Either target_cid or subset_state should be specified")

    if target_cid is not None and subset_state is not None:
        raise ValueError("Either target_cid or subset_state should be specified (not both)")

    # If cache_id is specified, we keep a cached version of the resulting array
    # indexed by cache_id as well as a hash formed of the call arguments to this
    # function. We then check if the resulting array already exists in the cache.

    if cache_id is not None:

        if subset_state is None:
            # Use uuid for component ID since otherwise component IDs don't return
            # False when comparing two different CIDs (instead they return a subset state).
            # For bounds we use a special wrapper that can identify wildcards.
            current_array_hash = (data, bounds, target_data, target_cid.uuid, broadcast)
        else:
            current_array_hash = (data, bounds, target_data, subset_state, broadcast)

        current_pixel_hash = (data, target_data)

        if cache_id in ARRAY_CACHE:
            if ARRAY_CACHE[cache_id]['hash'] == current_array_hash:
                return ARRAY_CACHE[cache_id]['array']

        # To save time later, if the pixel cache doesn't match at the level of the
        # data and target_data, we just reset the cache.
        if cache_id in PIXEL_CACHE:
            if PIXEL_CACHE[cache_id]['hash'] != current_pixel_hash:
                PIXEL_CACHE.pop(cache_id)

    # Start off by generating arrays of coordinates in the original dataset
    pixel_coords = [np.linspace(*bound) if isinstance(bound, tuple) else bound for bound in bounds]
    pixel_coords = np.meshgrid(*pixel_coords, indexing='ij', copy=False)

    # Keep track of the original shape of these arrays
    original_shape = pixel_coords[0].shape

    # Now loop through the dimensions of 'data' to find the corresponding
    # coordinates in the frame of view of this dataset.

    translated_coords = []
    dimensions_all = []

    invalid_all = np.zeros(original_shape, dtype=bool)

    for ipix, pix in enumerate(data.pixel_component_ids):

        # At this point, if cache_id is in PIXEL_CACHE, we know that data and
        # target_data match so we just check the bounds. Note that the bounds
        # include the AnyScalar wildcard for any dimensions that don't impact
        # the pixel coordinates here. We do this so that we don't have to
        # recompute the pixel coordinates when e.g. slicing through cubes.

        if cache_id in PIXEL_CACHE and ipix in PIXEL_CACHE[cache_id] and PIXEL_CACHE[cache_id][ipix]['bounds'] == bounds:

            translated_coord = PIXEL_CACHE[cache_id][ipix]['translated_coord']
            dimensions = PIXEL_CACHE[cache_id][ipix]['dimensions']
            invalid = PIXEL_CACHE[cache_id][ipix]['invalid']

        else:

            translated_coord, dimensions = translate_pixel(target_data, pixel_coords, pix)

            # The returned coordinates may often be a broadcasted array. To convert
            # the coordinates to integers and check which ones are within bounds, we
            # thus operate on the un-broadcasted array, before broadcasting it back
            # to the original shape.
            translated_coord = np.round(unbroadcast(translated_coord)).astype(int)
            invalid = (translated_coord < 0) | (translated_coord >= data.shape[ipix])

            # Since we are going to be using these coordinates later on to index an
            # array, we need the coordinates to be within the array, so we reset
            # any invalid coordinates and keep track of which pixels are invalid
            # to reset them later.
            translated_coord[invalid] = 0

            # We now populate the cache
            if cache_id is not None:

                if cache_id not in PIXEL_CACHE:
                    PIXEL_CACHE[cache_id] = {'hash': current_pixel_hash}

                PIXEL_CACHE[cache_id][ipix] = {'translated_coord': translated_coord,
                                               'dimensions': dimensions,
                                               'invalid': invalid,
                                               'bounds': bounds_for_cache(bounds, dimensions)}

        invalid_all |= invalid

        # Broadcast back to the original shape and add to the list
        translated_coords.append(broadcast_to(translated_coord, original_shape))

        # Also keep track of all the dimensions that contributed to this coordinate
        dimensions_all.extend(dimensions)

    translated_coords = tuple(translated_coords)

    # If a dimension from the target data for which bounds was set to an interval
    # did not actually contribute to any of the coordinates in data, then if
    # broadcast is set to False we raise an error, otherwise we proceed and
    # implicitly broadcast values along that dimension of the target data.

    if data is not target_data and not broadcast:
        for i in range(target_data.ndim):
            if isinstance(bounds[i], tuple) and i not in dimensions_all:
                raise IncompatibleDataException()

    # PERF: optimize further - check if we can extract a sub-region that
    # contains all the valid values.

    # Take subset_state into account, if present
    if subset_state is None:
        array = data.get_data(target_cid, view=translated_coords).astype(float)
        invalid_value = -np.inf
    else:
        array = data.get_mask(subset_state, view=translated_coords)
        invalid_value = False

    if np.any(invalid_all):
        if not array.flags.writeable:
            array = np.array(array, dtype=type(invalid_value))
        array[invalid_all] = invalid_value

    # Drop dimensions for which bounds were scalars
    slices = []
    for bound in bounds:
        if isinstance(bound, tuple):
            slices.append(slice(None))
        else:
            slices.append(0)

    array = array[tuple(slices)]

    if cache_id is not None:

        # For the bounds, we use a special wildcard for bounds that don't affect
        # the result. This will allow the cache to match regardless of the
        # value for those bounds. However, we only do this for scalar bounds.

        cache_bounds = bounds_for_cache(bounds, dimensions_all)

        current_array_hash = current_array_hash[:1] + (cache_bounds,) + current_array_hash[2:]

        if subset_state is None:
            ARRAY_CACHE[cache_id] = {'hash': current_array_hash, 'array': array}
        else:
            ARRAY_CACHE[cache_id] = {'hash': current_array_hash, 'array': array}

    return array
Beispiel #23
0
    def get_sliced_data(self, view=None):

        # Getting the sliced data can be computationally expensive in some cases
        # in particular when reprojecting data/subsets. To avoid recomputing
        # these in cases where it isn't necessary, for example if the reference
        # data is a spectral cube and the layer is a 2D mosaic, we set up a
        # cache at the end of this method, and we then set up callbacks to
        # reset the cache if any of the following properties change. We need
        # to set a very high priority so that this is the first thing to happen.
        # Note that we need to set up the callbacks here as the viewer_state is
        # not always set in the __init__, for example when loading up sessions.
        # We also need to make sure that the cache gets reset when the links
        # change or when the subset changes. This is taken care of by calling
        # reset_cache in the layer artist update() method, which gets called
        # for these cases.

        if not self._viewer_callbacks_set:
            self.viewer_state.add_callback('slices', self.reset_cache_from_slices,
                                           echo_old=True, priority=100000)
            self.viewer_state.add_callback('x_att', self.reset_cache, priority=100000)
            self.viewer_state.add_callback('y_att', self.reset_cache, priority=100000)
            if self.is_callback_property('attribute'):  # this isn't the case for subsets
                self.add_callback('attribute', self.reset_cache, priority=100000)
            self._viewer_callbacks_set = True

        if self._image_cache is not None:
            if view == self._image_cache['view']:
                return self._image_cache['image']

        # In the cache, we need to keep track of which slice indices should
        # cause the cache to be reset. By default, we assume that any changes
        # in slices should cause the cache to get reset, and in the reprojection
        # code below we then set up more specific conditions.
        reset_slices = True

        full_view, agg_func, transpose = self.viewer_state.numpy_slice_aggregation_transpose

        # The view should be that which should just be applied to the data
        # slice, not to all the dimensions of the data - thus it should have at
        # most two dimension

        if view is not None:

            if len(view) > 2:
                raise ValueError('view should have at most two elements')
            if len(view) == 1:
                view = view + [slice(None)]

            x_axis = self.viewer_state.x_att.axis
            y_axis = self.viewer_state.y_att.axis

            full_view[x_axis] = view[1]
            full_view[y_axis] = view[0]

        # First, check whether the data is simply the reference data - if so
        # we can just use _get_image (which assumed alignment with reference_data)
        # to get the image to use.

        if self.layer.data is self.viewer_state.reference_data:
            image = self._get_image(view=tuple(full_view))
        else:

            # Second, we check whether the current data is linked pixel-wise with
            # the reference data.

            order = self.layer.data.pixel_aligned_data.get(self.viewer_state.reference_data)

            if order is not None:

                # order gives the order of the pixel components of the reference
                # data in the current data. With this we adjust the view and then
                # check that the result is a 2D array - if not, it means for example
                # that the layer is a 2D image and the reference data is a 3D cube
                # and that we are not slicing one of the dimensions in the 3D cube
                # that is also in the 2D image, resulting in a 1D array (which it
                # doesn't make sense to show.

                full_view = [full_view[idx] for idx in order]
                image = self._get_image(view=tuple(full_view))

                if image.ndim != 2:
                    raise IncompatibleDataException()
                else:
                    # Now check whether we need to transpose the image - we need
                    # to update this since the previously defined ``tranpose``
                    # value assumed data in the order of the reference data
                    x_axis = self.viewer_state.x_att.axis
                    y_axis = self.viewer_state.y_att.axis
                    transpose = order.index(x_axis) < order.index(y_axis)

            else:

                # Now the real fun begins! The pixel grids are not lined up. Fun
                # times!

                # Let's make sure there are no AggregateSlice variables in
                # the view as we can't deal with this currently
                if any(isinstance(v, AggregateSlice) for v in full_view):
                    raise IncompatibleDataException()
                else:
                    agg_func = None

                # Start off by finding all the pixel coordinates of the current
                # view in the reference frame of the current layer data. In
                # principle we could do something as simple as:
                #
                #   pixel_coords = [self.viewer_state.reference_data[pix, full_view]
                #                   for pix in self.layer.pixel_component_ids]
                #   coords = [np.round(p.ravel()).astype(int) for p in pixel_coords]
                #
                # However this is sub-optimal because in reality some of these
                # pixel coordinate conversions won't change when the view is
                # changed (e.g. when a slice index changes). We therefore
                # cache each transformed pixel coordinate.

                if self._pixel_cache is None:
                    # The cache hasn't been set yet or has been reset so we
                    # initialize it here.
                    self._pixel_cache = {'reset_slices': [None] * self.layer.ndim,
                                         'coord': [None] * self.layer.ndim,
                                         'shape': [None] * self.layer.ndim,
                                         'view': None}

                coords = []

                sub_data_view = [slice(0, 2)] * self.viewer_state.reference_data.ndim

                for ipix, pix in enumerate(self.layer.pixel_component_ids):

                    if self._pixel_cache['view'] != view or self._pixel_cache['coord'][ipix] is None:

                        # Start off by finding all the pixel coordinates of the current
                        # view in the reference frame of the current layer data.
                        pixel_coord = self.viewer_state.reference_data[pix, full_view]
                        coord = np.round(pixel_coord.ravel()).astype(int)

                        # Now update cache - basically check which dimensions in
                        # the output of the transformation rely on broadcasting.
                        # The 'reset_slices' item is a list that indicates
                        # whether the cache should be reset when the index along
                        # a given dimension changes.
                        sub_data = self.viewer_state.reference_data[pix, sub_data_view]
                        sub_data = unbroadcast(sub_data)
                        self._pixel_cache['reset_slices'][ipix] = [x > 1 for x in sub_data.shape]
                        self._pixel_cache['coord'][ipix] = coord
                        self._pixel_cache['shape'][ipix] = pixel_coord.shape
                        original_shape = pixel_coord.shape

                    else:

                        coord = self._pixel_cache['coord'][ipix]
                        original_shape = self._pixel_cache['shape'][ipix]

                    coords.append(coord)

                self._pixel_cache['view'] = view

                # TODO: add test when image is smaller than cube

                # We now do a nearest-neighbor interpolation. We don't use
                # map_coordinates because it is picky about array endian-ness
                # and if we just use normal Numpy slicing we can preserve the
                # data type (and avoid memory copies)
                keep = np.ones(len(coords[0]), dtype=bool)
                image = np.zeros(len(coords[0])) * np.nan
                for icoord, coord in enumerate(coords):
                    keep[(coord < 0) | (coord >= self.layer.shape[icoord])] = False
                coords = [coord[keep] for coord in coords]
                image[keep] = self._get_image(view=tuple(coords))

                # Finally convert array back to a 2D array
                image = image.reshape(original_shape)

                # Determine which slice indices should cause the cache to get
                # reset and the image to be re-projected.

                reset_slices = []
                single_pixel = (0,) * self.layer.ndim
                for pix in self.viewer_state.reference_data.pixel_component_ids:
                    try:
                        self.layer[pix, single_pixel]
                        reset_slices.append(True)
                    except IncompatibleAttribute:
                        reset_slices.append(False)

        # Apply aggregation functions if needed

        if agg_func is None:

            if image.ndim != 2:
                raise IncompatibleDataException()

        else:

            if image.ndim != len(agg_func):
                raise ValueError("Sliced image dimensions ({0}) does not match "
                                 "aggregation function list ({1})"
                                 .format(image.ndim, len(agg_func)))

            for axis in range(image.ndim - 1, -1, -1):
                func = agg_func[axis]
                if func is not None:
                    image = func(image, axis=axis)

            if image.ndim != 2:
                raise ValueError("Image after aggregation should have two dimensions")

        if transpose:
            image = image.transpose()

        self._image_cache = {'view': view, 'image': image, 'reset_slices': reset_slices}

        return image
Beispiel #24
0
    def get_sliced_data(self, view=None):

        # Getting the sliced data can be computationally expensive in some cases
        # in particular when reprojecting data/subsets. To avoid recomputing
        # these in cases where it isn't necessary, for example if the reference
        # data is a spectral cube and the layer is a 2D mosaic, we set up a
        # cache at the end of this method, and we then set up callbacks to
        # reset the cache if any of the following properties change. We need
        # to set a very high priority so that this is the first thing to happen.
        # Note that we need to set up the callbacks here as the viewer_state is
        # not always set in the __init__, for example when loading up sessions.
        # We also need to make sure that the cache gets reset when the links
        # change or when the subset changes. This is taken care of by calling
        # reset_cache in the layer artist update() method, which gets called
        # for these cases.

        if not self._viewer_callbacks_set:
            self.viewer_state.add_callback('slices',
                                           self.reset_cache_from_slices,
                                           echo_old=True,
                                           priority=100000)
            self.viewer_state.add_callback('x_att',
                                           self.reset_cache,
                                           priority=100000)
            self.viewer_state.add_callback('y_att',
                                           self.reset_cache,
                                           priority=100000)
            if self.is_callback_property(
                    'attribute'):  # this isn't the case for subsets
                self.add_callback('attribute',
                                  self.reset_cache,
                                  priority=100000)
            self._viewer_callbacks_set = True

        if self._image_cache is not None:
            if view == self._image_cache['view']:
                return self._image_cache['image']

        # In the cache, we need to keep track of which slice indices should
        # cause the cache to be reset. By default, we assume that any changes
        # in slices should cause the cache to get reset, and in the reprojection
        # code below we then set up more specific conditions.
        reset_slices = True

        full_view, agg_func, transpose = self.viewer_state.numpy_slice_aggregation_transpose

        # The view should be that which should just be applied to the data
        # slice, not to all the dimensions of the data - thus it should have at
        # most two dimension

        if view is not None:

            if len(view) > 2:
                raise ValueError('view should have at most two elements')
            if len(view) == 1:
                view = view + [slice(None)]

            x_axis = self.viewer_state.x_att.axis
            y_axis = self.viewer_state.y_att.axis

            full_view[x_axis] = view[1]
            full_view[y_axis] = view[0]

        # First, check whether the data is simply the reference data - if so
        # we can just use _get_image (which assumed alignment with reference_data)
        # to get the image to use.

        if self.layer.data is self.viewer_state.reference_data:
            image = self._get_image(view=tuple(full_view))
        else:

            # Second, we check whether the current data is linked pixel-wise with
            # the reference data.

            order = self.layer.data.pixel_aligned_data.get(
                self.viewer_state.reference_data)

            if order is not None:

                # order gives the order of the pixel components of the reference
                # data in the current data. With this we adjust the view and then
                # check that the result is a 2D array - if not, it means for example
                # that the layer is a 2D image and the reference data is a 3D cube
                # and that we are not slicing one of the dimensions in the 3D cube
                # that is also in the 2D image, resulting in a 1D array (which it
                # doesn't make sense to show.

                full_view = [full_view[idx] for idx in order]
                image = self._get_image(view=tuple(full_view))

                if image.ndim != 2:
                    raise IncompatibleDataException()
                else:
                    # Now check whether we need to transpose the image - we need
                    # to update this since the previously defined ``tranpose``
                    # value assumed data in the order of the reference data
                    x_axis = self.viewer_state.x_att.axis
                    y_axis = self.viewer_state.y_att.axis
                    transpose = order.index(x_axis) < order.index(y_axis)

            else:

                # Now the real fun begins! The pixel grids are not lined up. Fun
                # times!

                # Let's make sure there are no AggregateSlice variables in
                # the view as we can't deal with this currently
                if any(isinstance(v, AggregateSlice) for v in full_view):
                    raise IncompatibleDataException()
                else:
                    agg_func = None

                # Start off by finding all the pixel coordinates of the current
                # view in the reference frame of the current layer data. In
                # principle we could do something as simple as:
                #
                #   pixel_coords = [self.viewer_state.reference_data[pix, full_view]
                #                   for pix in self.layer.pixel_component_ids]
                #   coords = [np.round(p.ravel()).astype(int) for p in pixel_coords]
                #
                # However this is sub-optimal because in reality some of these
                # pixel coordinate conversions won't change when the view is
                # changed (e.g. when a slice index changes). We therefore
                # cache each transformed pixel coordinate.

                if self._pixel_cache is None:
                    # The cache hasn't been set yet or has been reset so we
                    # initialize it here.
                    self._pixel_cache = {
                        'reset_slices': [None] * self.layer.ndim,
                        'coord': [None] * self.layer.ndim,
                        'shape': [None] * self.layer.ndim,
                        'view': None
                    }

                coords = []

                sub_data_view = [slice(0, 2)
                                 ] * self.viewer_state.reference_data.ndim

                for ipix, pix in enumerate(self.layer.pixel_component_ids):

                    if self._pixel_cache['view'] != view or self._pixel_cache[
                            'coord'][ipix] is None:

                        # Start off by finding all the pixel coordinates of the current
                        # view in the reference frame of the current layer data.
                        pixel_coord = self.viewer_state.reference_data[
                            pix, full_view]
                        coord = np.round(pixel_coord.ravel()).astype(int)

                        # Now update cache - basically check which dimensions in
                        # the output of the transformation rely on broadcasting.
                        # The 'reset_slices' item is a list that indicates
                        # whether the cache should be reset when the index along
                        # a given dimension changes.
                        sub_data = self.viewer_state.reference_data[
                            pix, sub_data_view]
                        sub_data = unbroadcast(sub_data)
                        self._pixel_cache['reset_slices'][ipix] = [
                            x > 1 for x in sub_data.shape
                        ]
                        self._pixel_cache['coord'][ipix] = coord
                        self._pixel_cache['shape'][ipix] = pixel_coord.shape
                        original_shape = pixel_coord.shape

                    else:

                        coord = self._pixel_cache['coord'][ipix]
                        original_shape = self._pixel_cache['shape'][ipix]

                    coords.append(coord)

                self._pixel_cache['view'] = view

                # TODO: add test when image is smaller than cube

                # We now do a nearest-neighbor interpolation. We don't use
                # map_coordinates because it is picky about array endian-ness
                # and if we just use normal Numpy slicing we can preserve the
                # data type (and avoid memory copies)
                keep = np.ones(len(coords[0]), dtype=bool)
                image = np.zeros(len(coords[0])) * np.nan
                for icoord, coord in enumerate(coords):
                    keep[(coord < 0) |
                         (coord >= self.layer.shape[icoord])] = False
                coords = [coord[keep] for coord in coords]
                image[keep] = self._get_image(view=tuple(coords))

                # Finally convert array back to a 2D array
                image = image.reshape(original_shape)

                # Determine which slice indices should cause the cache to get
                # reset and the image to be re-projected.

                reset_slices = []
                single_pixel = (0, ) * self.layer.ndim
                for pix in self.viewer_state.reference_data.pixel_component_ids:
                    try:
                        self.layer[pix, single_pixel]
                        reset_slices.append(True)
                    except IncompatibleAttribute:
                        reset_slices.append(False)

        # Apply aggregation functions if needed

        if agg_func is None:

            if image.ndim != 2:
                raise IncompatibleDataException()

        else:

            if image.ndim != len(agg_func):
                raise ValueError(
                    "Sliced image dimensions ({0}) does not match "
                    "aggregation function list ({1})".format(
                        image.ndim, len(agg_func)))

            for axis in range(image.ndim - 1, -1, -1):
                func = agg_func[axis]
                if func is not None:
                    image = func(image, axis=axis)

            if image.ndim != 2:
                raise ValueError(
                    "Image after aggregation should have two dimensions")

        if transpose:
            image = image.transpose()

        self._image_cache = {
            'view': view,
            'image': image,
            'reset_slices': reset_slices
        }

        return image
Beispiel #25
0
def compute_fixed_resolution_buffer(data,
                                    bounds,
                                    target_data=None,
                                    target_cid=None,
                                    subset_state=None,
                                    broadcast=True,
                                    cache_id=None):
    """
    Get a fixed-resolution buffer for a dataset.

    Parameters
    ----------
    data : `~glue.core.Data`
        The dataset from which to extract a fixed resolution buffer
    bounds : list
        The list of bounds for the fixed resolution buffer. This list should
        have as many items as there are dimensions in ``target_data``. Each
        item should either be a scalar value, or a tuple of ``(min, max, nsteps)``.
    target_data : `~glue.core.Data`, optional
        The data in whose frame of reference the bounds are defined. Defaults
        to ``data``.
    target_cid : `~glue.core.component_id.ComponentID`, optional
        If specified, gives the component ID giving the component to use for the
        data values. Alternatively, use ``subset_state`` to get a subset mask.
    subset_state : `~glue.core.subset.SubsetState`, optional
        If specified, gives the subset state for which to compute a mask.
        Alternatively, use ``target_cid`` if you want to get data values.
    broadcast : bool, optional
        If `True`, then if a dimension in ``target_data`` for which ``bounds``
        is not a scalar does not affect any of the dimensions in ``data``,
        then the final array will be effectively broadcast along this
        dimension, otherwise an error will be raised.
    """

    if target_data is None:
        target_data = data

    if target_cid is None and subset_state is None:
        raise ValueError(
            "Either target_cid or subset_state should be specified")

    if target_cid is not None and subset_state is not None:
        raise ValueError(
            "Either target_cid or subset_state should be specified (not both)")

    # If cache_id is specified, we keep a cached version of the resulting array
    # indexed by cache_id as well as a hash formed of the call arguments to this
    # function. We then check if the resulting array already exists in the cache.

    if cache_id is not None:

        if subset_state is None:
            # Use uuid for component ID since otherwise component IDs don't return
            # False when comparing two different CIDs (instead they return a subset state).
            # For bounds we use a special wrapper that can identify wildcards.
            current_array_hash = (data, bounds, target_data, target_cid.uuid,
                                  broadcast)
        else:
            current_array_hash = (data, bounds, target_data, subset_state,
                                  broadcast)

        current_pixel_hash = (data, target_data)

        if cache_id in ARRAY_CACHE:
            if ARRAY_CACHE[cache_id]['hash'] == current_array_hash:
                return ARRAY_CACHE[cache_id]['array']

        # To save time later, if the pixel cache doesn't match at the level of the
        # data and target_data, we just reset the cache.
        if cache_id in PIXEL_CACHE:
            if PIXEL_CACHE[cache_id]['hash'] != current_pixel_hash:
                PIXEL_CACHE.pop(cache_id)

    # Start off by generating arrays of coordinates in the original dataset
    pixel_coords = [
        np.linspace(*bound) if isinstance(bound, tuple) else bound
        for bound in bounds
    ]
    pixel_coords = np.meshgrid(*pixel_coords, indexing='ij', copy=False)

    # Keep track of the original shape of these arrays
    original_shape = pixel_coords[0].shape

    # Now loop through the dimensions of 'data' to find the corresponding
    # coordinates in the frame of view of this dataset.

    translated_coords = []
    dimensions_all = []

    invalid_all = np.zeros(original_shape, dtype=bool)

    for ipix, pix in enumerate(data.pixel_component_ids):

        # At this point, if cache_id is in PIXEL_CACHE, we know that data and
        # target_data match so we just check the bounds. Note that the bounds
        # include the AnyScalar wildcard for any dimensions that don't impact
        # the pixel coordinates here. We do this so that we don't have to
        # recompute the pixel coordinates when e.g. slicing through cubes.

        if cache_id in PIXEL_CACHE and ipix in PIXEL_CACHE[
                cache_id] and PIXEL_CACHE[cache_id][ipix]['bounds'] == bounds:

            translated_coord = PIXEL_CACHE[cache_id][ipix]['translated_coord']
            dimensions = PIXEL_CACHE[cache_id][ipix]['dimensions']
            invalid = PIXEL_CACHE[cache_id][ipix]['invalid']

        else:

            translated_coord, dimensions = translate_pixel(
                target_data, pixel_coords, pix)

            # The returned coordinates may often be a broadcasted array. To convert
            # the coordinates to integers and check which ones are within bounds, we
            # thus operate on the un-broadcasted array, before broadcasting it back
            # to the original shape.
            translated_coord = np.round(
                unbroadcast(translated_coord)).astype(int)
            invalid = (translated_coord < 0) | (translated_coord >=
                                                data.shape[ipix])

            # Since we are going to be using these coordinates later on to index an
            # array, we need the coordinates to be within the array, so we reset
            # any invalid coordinates and keep track of which pixels are invalid
            # to reset them later.
            translated_coord[invalid] = 0

            # We now populate the cache
            if cache_id is not None:

                if cache_id not in PIXEL_CACHE:
                    PIXEL_CACHE[cache_id] = {'hash': current_pixel_hash}

                PIXEL_CACHE[cache_id][ipix] = {
                    'translated_coord': translated_coord,
                    'dimensions': dimensions,
                    'invalid': invalid,
                    'bounds': bounds_for_cache(bounds, dimensions)
                }

        invalid_all |= invalid

        # Broadcast back to the original shape and add to the list
        translated_coords.append(broadcast_to(translated_coord,
                                              original_shape))

        # Also keep track of all the dimensions that contributed to this coordinate
        dimensions_all.extend(dimensions)

    translated_coords = tuple(translated_coords)

    # If a dimension from the target data for which bounds was set to an interval
    # did not actually contribute to any of the coordinates in data, then if
    # broadcast is set to False we raise an error, otherwise we proceed and
    # implicitly broadcast values along that dimension of the target data.

    if data is not target_data and not broadcast:
        for i in range(target_data.ndim):
            if isinstance(bounds[i], tuple) and i not in dimensions_all:
                raise IncompatibleDataException()

    # PERF: optimize further - check if we can extract a sub-region that
    # contains all the valid values.

    # Take subset_state into account, if present
    if subset_state is None:
        array = data.get_data(target_cid, view=translated_coords).astype(float)
        invalid_value = -np.inf
    else:
        array = data.get_mask(subset_state, view=translated_coords)
        invalid_value = False

    if np.any(invalid_all):
        if not array.flags.writeable:
            array = np.array(array, dtype=type(invalid_value))
        array[invalid_all] = invalid_value

    # Drop dimensions for which bounds were scalars
    slices = []
    for bound in bounds:
        if isinstance(bound, tuple):
            slices.append(slice(None))
        else:
            slices.append(0)

    array = array[tuple(slices)]

    if cache_id is not None:

        # For the bounds, we use a special wildcard for bounds that don't affect
        # the result. This will allow the cache to match regardless of the
        # value for those bounds. However, we only do this for scalar bounds.

        cache_bounds = bounds_for_cache(bounds, dimensions_all)

        current_array_hash = current_array_hash[:1] + (
            cache_bounds, ) + current_array_hash[2:]

        if subset_state is None:
            ARRAY_CACHE[cache_id] = {
                'hash': current_array_hash,
                'array': array
            }
        else:
            ARRAY_CACHE[cache_id] = {
                'hash': current_array_hash,
                'array': array
            }

    return array