Beispiel #1
0
    def update_profile(self, update_limits=True):

        if self._profile_cache is not None:
            return self._profile_cache

        if not self._viewer_callbacks_set:
            self.viewer_state.add_callback('x_att',
                                           self.reset_cache,
                                           priority=100000)
            self.viewer_state.add_callback('function',
                                           self.reset_cache,
                                           priority=100000)
            if self.is_callback_property('attribute'):
                self.add_callback('attribute',
                                  self.reset_cache,
                                  priority=100000)
            self._viewer_callbacks_set = True

        if self.viewer_state is None or self.viewer_state.x_att is None or self.attribute is None:
            raise IncompatibleDataException()

        # Check what pixel axis in the current dataset x_att corresponds to
        pix_cid = is_convertible_to_single_pixel_cid(
            self.layer, self.viewer_state.x_att_pixel)

        if pix_cid is None:
            raise IncompatibleDataException()

        # If we get here, then x_att does correspond to a single pixel axis in
        # the cube, so we now prepare a list of axes to collapse over.
        axes = tuple(i for i in range(self.layer.ndim) if i != pix_cid.axis)

        # We now get the y values for the data

        # TODO: in future we should optimize the case where the mask is much
        # smaller than the data to just average the relevant 'spaxels' in the
        # data rather than collapsing the whole cube.

        if isinstance(self.layer, Subset):
            data = self.layer.data
            subset_state = self.layer.subset_state
        else:
            data = self.layer
            subset_state = None

        profile_values = data.compute_statistic(self.viewer_state.function,
                                                self.attribute,
                                                axis=axes,
                                                subset_state=subset_state)

        if np.all(np.isnan(profile_values)):
            self._profile_cache = [], []
        else:
            axis_view = [0] * data.ndim
            axis_view[pix_cid.axis] = slice(None)
            axis_values = data[self.viewer_state.x_att, tuple(axis_view)]
            self._profile_cache = axis_values, profile_values

        if update_limits:
            self.update_limits(update_profile=False)
Beispiel #2
0
    def add_data(self, data):

        # Check if data already exists in viewer
        if not self.allow_duplicate_data and data in self._layer_artist_container:
            return True

        if self.large_data_size is not None and data.size >= self.large_data_size:
            proceed = warn('Add large data set?', 'Data set {0:s} has {1:d} points, and '
                           'may render slowly.'.format(data.label, data.size), default='Cancel',
                           setting='show_large_data_warning')
            if not proceed:
                return False

        if data not in self.session.data_collection:
            raise IncompatibleDataException("Data not in DataCollection")

        # Create layer artist and add to container
        layer = self.get_data_layer_artist(data)

        if layer is None:
            return False

        self._layer_artist_container.append(layer)
        layer.update()

        # Add existing subsets to viewer
        for subset in data.subsets:
            self.add_subset(subset)

        self.redraw()

        return True
    def add_data(self, data):

        # Check if data already exists in viewer
        if not self.allow_duplicate_data and data in self._layer_artist_container:
            return True

        if data not in self.session.data_collection:
            raise IncompatibleDataException("Data not in DataCollection")

        # Create layer artist and add to container
        layer = self.get_data_layer_artist(data)

        if layer is None:
            return False

        self._layer_artist_container.append(layer)
        layer.update()

        # Add existing subsets to viewer
        for subset in data.subsets:
            self.add_subset(subset)

        self.redraw()

        return True
Beispiel #4
0
 def add_data(self, data):
     if data.ndim != 1:
         raise IncompatibleDataException(
             "Only 1-D data can be added to "
             "the dendrogram viewer (tried to add a {}-D "
             "dataset)".format(data.ndim))
     return super(DendrogramViewer, self).add_data(data)
Beispiel #5
0
    def update_histogram(self):

        if self._histogram_cache is not None:
            return self._histogram_cache

        if not self._viewer_callbacks_set:
            self.viewer_state.add_callback('x_att',
                                           self.reset_cache,
                                           priority=100000)
            self.viewer_state.add_callback('x_log',
                                           self.reset_cache,
                                           priority=100000)
            self.viewer_state.add_callback('hist_x_min',
                                           self.reset_cache,
                                           priority=100000)
            self.viewer_state.add_callback('hist_x_max',
                                           self.reset_cache,
                                           priority=100000)
            self.viewer_state.add_callback('hist_n_bin',
                                           self.reset_cache,
                                           priority=100000)
            self._viewer_callbacks_set = True

        if (self.viewer_state is None or self.viewer_state.x_att is None
                or self.viewer_state.hist_x_min is None
                or self.viewer_state.hist_x_max is None
                or self.viewer_state.hist_n_bin is None
                or self.viewer_state.x_log is None):
            raise IncompatibleDataException()

        if isinstance(self.layer, Subset):
            data = self.layer.data
            subset_state = self.layer.subset_state
        else:
            data = self.layer
            subset_state = None

        range = sorted(
            (self.viewer_state.hist_x_min, self.viewer_state.hist_x_max))

        hist_values = data.compute_histogram(
            [self._viewer_state.x_att],
            range=[range],
            bins=[self._viewer_state.hist_n_bin],
            log=[self._viewer_state.x_log],
            subset_state=subset_state)

        # TODO: determine whether this belongs here or in the layer artist
        if isinstance(range[0], np.datetime64):
            range = [datetime64_to_mpl(range[0]), datetime64_to_mpl(range[1])]

        if self._viewer_state.x_log:
            hist_edges = np.logspace(np.log10(range[0]), np.log10(range[1]),
                                     self._viewer_state.hist_n_bin + 1)
        else:
            hist_edges = np.linspace(range[0], range[1],
                                     self._viewer_state.hist_n_bin + 1)

        self._histogram_cache = hist_edges, hist_values
Beispiel #6
0
    def add_layer(self, layer):
        if layer.data not in self.data:
            raise IncompatibleDataException("Layer not in data collection")

        self._ensure_layer_data_present(layer)
        if self.layer_present(layer):
            return self._artists[layer][0]

        art = HistogramLayerArtist(layer, self._axes)
        self._artists.append(art)

        self._ensure_subsets_present(layer)
        self._sync_layer(layer)
        self._redraw()
        return art
Beispiel #7
0
    def update_histogram(self):

        current_settings = (id(self.viewer_state.x_att),
                            self.viewer_state.x_log,
                            self.viewer_state.hist_x_min,
                            self.viewer_state.hist_x_max,
                            self.viewer_state.hist_n_bin)

        if self._histogram_cache is not None and self._histogram_cache[
                0] == current_settings:
            return self._histogram_cache[1]

        if (self.viewer_state is None or self.viewer_state.x_att is None
                or self.viewer_state.hist_x_min is None
                or self.viewer_state.hist_x_max is None
                or self.viewer_state.hist_n_bin is None
                or self.viewer_state.x_log is None):
            raise IncompatibleDataException()

        if isinstance(self.layer, Subset):
            data = self.layer.data
            subset_state = self.layer.subset_state
        else:
            data = self.layer
            subset_state = None

        range = sorted(
            (self.viewer_state.hist_x_min, self.viewer_state.hist_x_max))

        hist_values = data.compute_histogram(
            [self._viewer_state.x_att],
            range=[range],
            bins=[self._viewer_state.hist_n_bin],
            log=[self._viewer_state.x_log],
            subset_state=subset_state)

        # TODO: determine whether this belongs here or in the layer artist
        if isinstance(range[0], np.datetime64):
            range = [datetime64_to_mpl(range[0]), datetime64_to_mpl(range[1])]

        if self._viewer_state.x_log:
            hist_edges = np.logspace(np.log10(range[0]), np.log10(range[1]),
                                     self._viewer_state.hist_n_bin + 1)
        else:
            hist_edges = np.linspace(range[0], range[1],
                                     self._viewer_state.hist_n_bin + 1)

        self._histogram_cache = current_settings, (hist_edges, hist_values)
Beispiel #8
0
    def add_data(self, data):

        if data in self._layer_artist_container:
            return True

        if data not in self.session.data_collection:
            raise IncompatibleDataException("Data not in DataCollection")

        # Create layer artist and add to container
        layer = self._data_artist_cls(self._axes, self.state, layer=data)
        self._layer_artist_container.append(layer)
        layer.update()

        # Add existing subsets to viewer
        for subset in data.subsets:
            self.add_subset(subset)

        return True
Beispiel #9
0
    def add_data(self, data):

        # Check if data already exists in viewer
        if not self.allow_duplicate_data and data in self._layer_artist_container:
            return True

        if self.large_data_size is not None and data.size >= self.large_data_size:
            proceed = self.warn('Add large data set?',
                                'Data set {0:s} has {1:d} points, and '
                                'may render slowly.'.format(
                                    data.label, data.size),
                                default='Cancel',
                                setting='show_large_data_warning')
            if not proceed:
                return False

        if data not in self.session.data_collection:
            raise IncompatibleDataException("Data not in DataCollection")

        # Create layer artist and add to container. First check whether any
        # plugins want to make a custom layer artist.
        layer = get_layer_artist_from_registry(
            data, self) or self.get_data_layer_artist(data)

        if layer is None:
            return False

        # When adding a layer artist to the layer artist container, zorder
        # gets set automatically - however since we call a forced update of the
        # layer after adding it to the container we can ignore any callbacks
        # related to zorder. We also then need to set layer.state.zorder manually.
        with ignore_callback(layer.state, 'zorder'):
            self._layer_artist_container.append(layer)
        layer.update()
        self.draw_legend(
        )  # need to be called here because callbacks are ignored in previous step

        # Add existing subsets to viewer
        for subset in data.subsets:
            self.add_subset(subset)

        return True
Beispiel #10
0
    def get_sliced_data(self, view=None):

        # Getting the sliced data can be computationally expensive in some cases
        # in particular when reprojecting data/subsets. To avoid recomputing
        # these in cases where it isn't necessary, for example if the reference
        # data is a spectral cube and the layer is a 2D mosaic, we set up a
        # cache at the end of this method, and we then set up callbacks to
        # reset the cache if any of the following properties change. We need
        # to set a very high priority so that this is the first thing to happen.
        # Note that we need to set up the callbacks here as the viewer_state is
        # not always set in the __init__, for example when loading up sessions.
        # We also need to make sure that the cache gets reset when the links
        # change or when the subset changes. This is taken care of by calling
        # reset_cache in the layer artist update() method, which gets called
        # for these cases.

        if not self._viewer_callbacks_set:
            self.viewer_state.add_callback('slices',
                                           self.reset_cache_from_slices,
                                           echo_old=True,
                                           priority=100000)
            self.viewer_state.add_callback('x_att',
                                           self.reset_cache,
                                           priority=100000)
            self.viewer_state.add_callback('y_att',
                                           self.reset_cache,
                                           priority=100000)
            if self.is_callback_property(
                    'attribute'):  # this isn't the case for subsets
                self.add_callback('attribute',
                                  self.reset_cache,
                                  priority=100000)
            self._viewer_callbacks_set = True

        if self._image_cache is not None:
            if view == self._image_cache['view']:
                return self._image_cache['image']

        # In the cache, we need to keep track of which slice indices should
        # cause the cache to be reset. By default, we assume that any changes
        # in slices should cause the cache to get reset, and in the reprojection
        # code below we then set up more specific conditions.
        reset_slices = True

        full_view, agg_func, transpose = self.viewer_state.numpy_slice_aggregation_transpose

        # The view should be that which should just be applied to the data
        # slice, not to all the dimensions of the data - thus it should have at
        # most two dimension

        if view is not None:

            if len(view) > 2:
                raise ValueError('view should have at most two elements')
            if len(view) == 1:
                view = view + [slice(None)]

            x_axis = self.viewer_state.x_att.axis
            y_axis = self.viewer_state.y_att.axis

            full_view[x_axis] = view[1]
            full_view[y_axis] = view[0]

        # First, check whether the data is simply the reference data - if so
        # we can just use _get_image (which assumed alignment with reference_data)
        # to get the image to use.

        if self.layer.data is self.viewer_state.reference_data:
            image = self._get_image(view=tuple(full_view))
        else:

            # Second, we check whether the current data is linked pixel-wise with
            # the reference data.

            order = self.layer.data.pixel_aligned_data.get(
                self.viewer_state.reference_data)

            if order is not None:

                # order gives the order of the pixel components of the reference
                # data in the current data. With this we adjust the view and then
                # check that the result is a 2D array - if not, it means for example
                # that the layer is a 2D image and the reference data is a 3D cube
                # and that we are not slicing one of the dimensions in the 3D cube
                # that is also in the 2D image, resulting in a 1D array (which it
                # doesn't make sense to show.

                full_view = [full_view[idx] for idx in order]
                image = self._get_image(view=tuple(full_view))

                if image.ndim != 2:
                    raise IncompatibleDataException()
                else:
                    # Now check whether we need to transpose the image - we need
                    # to update this since the previously defined ``tranpose``
                    # value assumed data in the order of the reference data
                    x_axis = self.viewer_state.x_att.axis
                    y_axis = self.viewer_state.y_att.axis
                    transpose = order.index(x_axis) < order.index(y_axis)

            else:

                # Now the real fun begins! The pixel grids are not lined up. Fun
                # times!

                # Let's make sure there are no AggregateSlice variables in
                # the view as we can't deal with this currently
                if any(isinstance(v, AggregateSlice) for v in full_view):
                    raise IncompatibleDataException()
                else:
                    agg_func = None

                # Start off by finding all the pixel coordinates of the current
                # view in the reference frame of the current layer data. In
                # principle we could do something as simple as:
                #
                #   pixel_coords = [self.viewer_state.reference_data[pix, full_view]
                #                   for pix in self.layer.pixel_component_ids]
                #   coords = [np.round(p.ravel()).astype(int) for p in pixel_coords]
                #
                # However this is sub-optimal because in reality some of these
                # pixel coordinate conversions won't change when the view is
                # changed (e.g. when a slice index changes). We therefore
                # cache each transformed pixel coordinate.

                if self._pixel_cache is None:
                    # The cache hasn't been set yet or has been reset so we
                    # initialize it here.
                    self._pixel_cache = {
                        'reset_slices': [None] * self.layer.ndim,
                        'coord': [None] * self.layer.ndim,
                        'shape': [None] * self.layer.ndim,
                        'view': None
                    }

                coords = []

                sub_data_view = [slice(0, 2)
                                 ] * self.viewer_state.reference_data.ndim

                for ipix, pix in enumerate(self.layer.pixel_component_ids):

                    if self._pixel_cache['view'] != view or self._pixel_cache[
                            'coord'][ipix] is None:

                        # Start off by finding all the pixel coordinates of the current
                        # view in the reference frame of the current layer data.
                        pixel_coord = self.viewer_state.reference_data[
                            pix, full_view]
                        coord = np.round(pixel_coord.ravel()).astype(int)

                        # Now update cache - basically check which dimensions in
                        # the output of the transformation rely on broadcasting.
                        # The 'reset_slices' item is a list that indicates
                        # whether the cache should be reset when the index along
                        # a given dimension changes.
                        sub_data = self.viewer_state.reference_data[
                            pix, sub_data_view]
                        sub_data = unbroadcast(sub_data)
                        self._pixel_cache['reset_slices'][ipix] = [
                            x > 1 for x in sub_data.shape
                        ]
                        self._pixel_cache['coord'][ipix] = coord
                        self._pixel_cache['shape'][ipix] = pixel_coord.shape
                        original_shape = pixel_coord.shape

                    else:

                        coord = self._pixel_cache['coord'][ipix]
                        original_shape = self._pixel_cache['shape'][ipix]

                    coords.append(coord)

                self._pixel_cache['view'] = view

                # TODO: add test when image is smaller than cube

                # We now do a nearest-neighbor interpolation. We don't use
                # map_coordinates because it is picky about array endian-ness
                # and if we just use normal Numpy slicing we can preserve the
                # data type (and avoid memory copies)
                keep = np.ones(len(coords[0]), dtype=bool)
                image = np.zeros(len(coords[0])) * np.nan
                for icoord, coord in enumerate(coords):
                    keep[(coord < 0) |
                         (coord >= self.layer.shape[icoord])] = False
                coords = [coord[keep] for coord in coords]
                image[keep] = self._get_image(view=tuple(coords))

                # Finally convert array back to a 2D array
                image = image.reshape(original_shape)

                # Determine which slice indices should cause the cache to get
                # reset and the image to be re-projected.

                reset_slices = []
                single_pixel = (0, ) * self.layer.ndim
                for pix in self.viewer_state.reference_data.pixel_component_ids:
                    try:
                        self.layer[pix, single_pixel]
                        reset_slices.append(True)
                    except IncompatibleAttribute:
                        reset_slices.append(False)

        # Apply aggregation functions if needed

        if agg_func is None:

            if image.ndim != 2:
                raise IncompatibleDataException()

        else:

            if image.ndim != len(agg_func):
                raise ValueError(
                    "Sliced image dimensions ({0}) does not match "
                    "aggregation function list ({1})".format(
                        image.ndim, len(agg_func)))

            for axis in range(image.ndim - 1, -1, -1):
                func = agg_func[axis]
                if func is not None:
                    image = func(image, axis=axis)

            if image.ndim != 2:
                raise ValueError(
                    "Image after aggregation should have two dimensions")

        if transpose:
            image = image.transpose()

        self._image_cache = {
            'view': view,
            'image': image,
            'reset_slices': reset_slices
        }

        return image
Beispiel #11
0
    def get_sliced_data(self, view=None, bounds=None):

        full_view, agg_func, transpose = self.viewer_state.numpy_slice_aggregation_transpose

        x_axis = self.viewer_state.x_att.axis
        y_axis = self.viewer_state.y_att.axis

        # For this method, we make use of Data.compute_fixed_resolution_buffer,
        # which requires us to specify bounds in the form (min, max, nsteps).
        # We also allow view to be passed here (which is a normal Numpy view)
        # and, if given, translate it to bounds. If neither are specified,
        # we behave as if view was [slice(None), slice(None)].

        def slice_to_bound(slc, size):
            min, max, step = slc.indices(size)
            n = (max - min - 1) // step
            max = min + step * n
            return (min, max, n + 1)

        if bounds is None:

            # The view should be that which should just be applied to the data
            # slice, not to all the dimensions of the data - thus it should have at
            # most two dimensions

            if view is None:
                view = [slice(None), slice(None)]
            elif len(view) == 1:
                view = view + [slice(None)]
            elif len(view) > 2:
                raise ValueError('view should have at most two elements')

            full_view[x_axis] = view[1]
            full_view[y_axis] = view[0]

        else:

            full_view[x_axis] = bounds[1]
            full_view[y_axis] = bounds[0]

        for i in range(self.viewer_state.reference_data.ndim):
            if isinstance(full_view[i], slice):
                full_view[i] = slice_to_bound(
                    full_view[i], self.viewer_state.reference_data.shape[i])

        # We now get the fixed resolution buffer

        if isinstance(self.layer, BaseData):
            image = self.layer.compute_fixed_resolution_buffer(
                full_view,
                target_data=self.viewer_state.reference_data,
                target_cid=self.attribute,
                broadcast=False,
                cache_id=self.uuid)
        else:
            image = self.layer.data.compute_fixed_resolution_buffer(
                full_view,
                target_data=self.viewer_state.reference_data,
                subset_state=self.layer.subset_state,
                broadcast=False,
                cache_id=self.uuid)

        # We apply aggregation functions if needed

        if agg_func is None:
            if image.ndim != 2:
                raise IncompatibleDataException()
        else:
            if image.ndim != len(agg_func):
                raise ValueError(
                    "Sliced image dimensions ({0}) does not match "
                    "aggregation function list ({1})".format(
                        image.ndim, len(agg_func)))
            for axis in range(image.ndim - 1, -1, -1):
                func = agg_func[axis]
                if func is not None:
                    image = func(image, axis=axis)
            if image.ndim != 2:
                raise ValueError(
                    "Image after aggregation should have two dimensions")

        # And finally we transpose the data if the order of x/y is different
        # from the native order.

        if transpose:
            image = image.transpose()

        return image
Beispiel #12
0
def compute_fixed_resolution_buffer(data,
                                    bounds,
                                    target_data=None,
                                    target_cid=None,
                                    subset_state=None,
                                    broadcast=True,
                                    cache_id=None):
    """
    Get a fixed-resolution buffer for a dataset.

    Parameters
    ----------
    data : `~glue.core.Data`
        The dataset from which to extract a fixed resolution buffer
    bounds : list
        The list of bounds for the fixed resolution buffer. This list should
        have as many items as there are dimensions in ``target_data``. Each
        item should either be a scalar value, or a tuple of ``(min, max, nsteps)``.
    target_data : `~glue.core.Data`, optional
        The data in whose frame of reference the bounds are defined. Defaults
        to ``data``.
    target_cid : `~glue.core.component_id.ComponentID`, optional
        If specified, gives the component ID giving the component to use for the
        data values. Alternatively, use ``subset_state`` to get a subset mask.
    subset_state : `~glue.core.subset.SubsetState`, optional
        If specified, gives the subset state for which to compute a mask.
        Alternatively, use ``target_cid`` if you want to get data values.
    broadcast : bool, optional
        If `True`, then if a dimension in ``target_data`` for which ``bounds``
        is not a scalar does not affect any of the dimensions in ``data``,
        then the final array will be effectively broadcast along this
        dimension, otherwise an error will be raised.
    """

    if target_data is None:
        target_data = data

    if target_cid is None and subset_state is None:
        raise ValueError(
            "Either target_cid or subset_state should be specified")

    if target_cid is not None and subset_state is not None:
        raise ValueError(
            "Either target_cid or subset_state should be specified (not both)")

    # If cache_id is specified, we keep a cached version of the resulting array
    # indexed by cache_id as well as a hash formed of the call arguments to this
    # function. We then check if the resulting array already exists in the cache.

    if cache_id is not None:

        if subset_state is None:
            # Use uuid for component ID since otherwise component IDs don't return
            # False when comparing two different CIDs (instead they return a subset state).
            # For bounds we use a special wrapper that can identify wildcards.
            current_array_hash = (data, bounds, target_data, target_cid.uuid,
                                  broadcast)
        else:
            current_array_hash = (data, bounds, target_data, subset_state,
                                  broadcast)

        current_pixel_hash = (data, target_data)

        if cache_id in ARRAY_CACHE:
            if ARRAY_CACHE[cache_id]['hash'] == current_array_hash:
                return ARRAY_CACHE[cache_id]['array']

        # To save time later, if the pixel cache doesn't match at the level of the
        # data and target_data, we just reset the cache.
        if cache_id in PIXEL_CACHE:
            if PIXEL_CACHE[cache_id]['hash'] != current_pixel_hash:
                PIXEL_CACHE.pop(cache_id)

    # Start off by generating arrays of coordinates in the original dataset
    pixel_coords = [
        np.linspace(*bound) if isinstance(bound, tuple) else bound
        for bound in bounds
    ]
    pixel_coords = np.meshgrid(*pixel_coords, indexing='ij', copy=False)

    # Keep track of the original shape of these arrays
    original_shape = pixel_coords[0].shape

    # Now loop through the dimensions of 'data' to find the corresponding
    # coordinates in the frame of view of this dataset.

    translated_coords = []
    dimensions_all = []

    invalid_all = np.zeros(original_shape, dtype=bool)

    for ipix, pix in enumerate(data.pixel_component_ids):

        # At this point, if cache_id is in PIXEL_CACHE, we know that data and
        # target_data match so we just check the bounds. Note that the bounds
        # include the AnyScalar wildcard for any dimensions that don't impact
        # the pixel coordinates here. We do this so that we don't have to
        # recompute the pixel coordinates when e.g. slicing through cubes.

        if cache_id in PIXEL_CACHE and ipix in PIXEL_CACHE[
                cache_id] and PIXEL_CACHE[cache_id][ipix]['bounds'] == bounds:

            translated_coord = PIXEL_CACHE[cache_id][ipix]['translated_coord']
            dimensions = PIXEL_CACHE[cache_id][ipix]['dimensions']
            invalid = PIXEL_CACHE[cache_id][ipix]['invalid']

        else:

            translated_coord, dimensions = translate_pixel(
                target_data, pixel_coords, pix)

            # The returned coordinates may often be a broadcasted array. To convert
            # the coordinates to integers and check which ones are within bounds, we
            # thus operate on the un-broadcasted array, before broadcasting it back
            # to the original shape.
            translated_coord = np.round(
                unbroadcast(translated_coord)).astype(int)
            invalid = (translated_coord < 0) | (translated_coord >=
                                                data.shape[ipix])

            # Since we are going to be using these coordinates later on to index an
            # array, we need the coordinates to be within the array, so we reset
            # any invalid coordinates and keep track of which pixels are invalid
            # to reset them later.
            translated_coord[invalid] = 0

            # We now populate the cache
            if cache_id is not None:

                if cache_id not in PIXEL_CACHE:
                    PIXEL_CACHE[cache_id] = {'hash': current_pixel_hash}

                PIXEL_CACHE[cache_id][ipix] = {
                    'translated_coord': translated_coord,
                    'dimensions': dimensions,
                    'invalid': invalid,
                    'bounds': bounds_for_cache(bounds, dimensions)
                }

        invalid_all |= invalid

        # Broadcast back to the original shape and add to the list
        translated_coords.append(broadcast_to(translated_coord,
                                              original_shape))

        # Also keep track of all the dimensions that contributed to this coordinate
        dimensions_all.extend(dimensions)

    translated_coords = tuple(translated_coords)

    # If a dimension from the target data for which bounds was set to an interval
    # did not actually contribute to any of the coordinates in data, then if
    # broadcast is set to False we raise an error, otherwise we proceed and
    # implicitly broadcast values along that dimension of the target data.

    if data is not target_data and not broadcast:
        for i in range(target_data.ndim):
            if isinstance(bounds[i], tuple) and i not in dimensions_all:
                raise IncompatibleDataException()

    # PERF: optimize further - check if we can extract a sub-region that
    # contains all the valid values.

    # Take subset_state into account, if present
    if subset_state is None:
        array = data.get_data(target_cid, view=translated_coords).astype(float)
        invalid_value = -np.inf
    else:
        array = data.get_mask(subset_state, view=translated_coords)
        invalid_value = False

    if np.any(invalid_all):
        if not array.flags.writeable:
            array = np.array(array, dtype=type(invalid_value))
        array[invalid_all] = invalid_value

    # Drop dimensions for which bounds were scalars
    slices = []
    for bound in bounds:
        if isinstance(bound, tuple):
            slices.append(slice(None))
        else:
            slices.append(0)

    array = array[tuple(slices)]

    if cache_id is not None:

        # For the bounds, we use a special wildcard for bounds that don't affect
        # the result. This will allow the cache to match regardless of the
        # value for those bounds. However, we only do this for scalar bounds.

        cache_bounds = bounds_for_cache(bounds, dimensions_all)

        current_array_hash = current_array_hash[:1] + (
            cache_bounds, ) + current_array_hash[2:]

        if subset_state is None:
            ARRAY_CACHE[cache_id] = {
                'hash': current_array_hash,
                'array': array
            }
        else:
            ARRAY_CACHE[cache_id] = {
                'hash': current_array_hash,
                'array': array
            }

    return array