Пример #1
0
    def test_args(self):
        # shape and ndim must match
        t = ArrayTrait(ndim=2, shape=(2, 2))

        with pytest.raises(ValueError):
            ArrayTrait(ndim=1, shape=(2, 2))

        # dtype lookup
        t = ArrayTrait(dtype="datetime64")
        assert t.dtype == np.datetime64

        # invalid dtype
        with pytest.raises(ValueError):
            ArrayTrait(dtype="notatype")
Пример #2
0
class MockArrayDataSource(InterpolationMixin, DataSource):
    data = ArrayTrait().tag(attr=True)
    coordinates = tl.Instance(Coordinates).tag(attr=True)

    def get_data(self, coordinates, coordinates_index):
        return self.create_output_array(coordinates,
                                        data=self.data[coordinates_index])
Пример #3
0
class Array(DataSource):
    """Create a DataSource from an array
    
    Attributes
    ----------
    source : np.ndarray
        Numpy array containing the source data
        
    Notes
    ------
    `native_coordinates` need to supplied by the user when instantiating this node.
    """

    source = ArrayTrait()

    @tl.validate('source')
    def _validate_source(self, d):
        a = d['value']
        try:
            a.astype(float)
        except:
            raise ValueError("Array source must be numerical")
        return a

    @common_doc(COMMON_DATA_DOC)
    def get_data(self, coordinates, coordinates_index):
        """{get_data}
        """
        s = coordinates_index
        d = self.create_output_array(coordinates, data=self.source[s])
        return d
Пример #4
0
class ArrayCoordinates1d(Coordinates1d):
    """
    1-dimensional array of coordinates.

    ArrayCoordinates1d is a basic array of 1d coordinates created from an array of coordinate values. Numerical
    coordinates values are converted to ``float``, and time coordinate values are converted to numpy ``datetime64``.
    For convenience, podpac automatically converts datetime strings such as ``'2018-01-01'`` to ``datetime64``. The
    coordinate values must all be of the same type.

    Parameters
    ----------
    name : str
        Dimension name, one of 'lat', 'lon', 'time', or 'alt'.
    coordinates : array, read-only
        Full array of coordinate values.
    units : podpac.Units
        Coordinate units.
    coord_ref_sys : str
        Coordinate reference system.
    ctype : str
        Coordinates type: 'point', 'left', 'right', or 'midpoint'.
    segment_lengths : array, float, timedelta
        When ctype is a segment type, the segment lengths for the coordinates.

    See Also
    --------
    :class:`Coordinates1d`, :class:`UniformCoordinates1d`
    """

    coords = ArrayTrait(ndim=1, read_only=True)
    coords.__doc__ = ":array: User-defined coordinate values"

    def __init__(self, coords,
                       name=None, ctype=None, units=None, segment_lengths=None, coord_ref_sys=None):
        """
        Create 1d coordinates from an array.

        Arguments
        ---------
        coords : array-like
            coordinate values.
        name : str, optional
            Dimension name, one of 'lat', 'lon', 'time', or 'alt'.
        units : Units, optional
            Coordinate units.
        coord_ref_sys : str, optional
            Coordinate reference system.
        ctype : str, optional
            Coordinates type: 'point', 'left', 'right', or 'midpoint'.
        segment_lengths : array, optional
            When ctype is a segment type, the segment lengths for the coordinates. The segment_lengths are required
            for nonmonotonic coordinates. The segment can be inferred from coordinate values for monotonic coordinates.
        """

        # validate and set coords
        self.set_trait('coords', make_coord_array(coords))

        # precalculate once
        if self.coords.size == 0:
            self._is_monotonic = None
            self._is_descending = None
            self._is_uniform = None

        elif self.coords.size == 1:
            self._is_monotonic = True
            self._is_descending = None
            self._is_uniform = True

        else:
            deltas = (self.coords[1:] - self.coords[:-1]).astype(float) * (self.coords[1] - self.coords[0]).astype(float)
            if np.any(deltas <= 0):
                self._is_monotonic = False
                self._is_descending = None
                self._is_uniform = False
            else:
                self._is_monotonic = True
                self._is_descending = self.coords[1] < self.coords[0]
                self._is_uniform = np.allclose(deltas, deltas[0])
        
        # set common properties
        super(ArrayCoordinates1d, self).__init__(
            name=name, ctype=ctype, units=units, segment_lengths=segment_lengths, coord_ref_sys=coord_ref_sys)

        # check segment lengths
        if segment_lengths is None:
            if self.ctype == 'point' or self.size == 0:
                self.set_trait('segment_lengths', None)
            elif self.dtype == np.datetime64:
                raise TypeError("segment_lengths required for datetime coordinates (if ctype != 'point')")
            elif self.size == 1:
                raise TypeError("segment_lengths required for coordinates of size 1 (if ctype != 'point')")
            elif not self.is_monotonic:
                raise TypeError("segment_lengths required for nonmonotonic coordinates (if ctype != 'point')")

    @tl.default('ctype')
    def _default_ctype(self):
        if self.size == 0 or self.size == 1 or not self.is_monotonic or self.dtype == np.datetime64:
            return 'point'
        else:
            return 'midpoint'

    @tl.default('segment_lengths')
    def _default_segment_lengths(self):
        if self.is_uniform:
            return np.abs(self.coords[1] - self.coords[0])

        deltas = np.abs(self.coords[1:] - self.coords[:-1])
        if self.is_descending:
            deltas = deltas[::-1]

        segment_lengths = np.zeros(self.coords.size)
        if self.ctype == 'left':
            segment_lengths[:-1] = deltas
            segment_lengths[-1] = segment_lengths[-2]
        elif self.ctype == 'right':
            segment_lengths[1:] = deltas
            segment_lengths[0] = segment_lengths[1]
        elif self.ctype == 'midpoint':
            segment_lengths[:-1] = deltas
            segment_lengths[1:] += deltas
            segment_lengths[1:-1] /= 2

        if self.is_descending:
            segment_lengths = segment_lengths[::-1]
        
        segment_lengths.setflags(write=False)
        return segment_lengths

    def __eq__(self, other):
        if not super(ArrayCoordinates1d, self).__eq__(other):
            return False

        if not np.array_equal(self.coordinates, other.coordinates):
            return False

        return True

    # ------------------------------------------------------------------------------------------------------------------
    # Alternate Constructors
    # ------------------------------------------------------------------------------------------------------------------

    @classmethod
    def from_xarray(cls, x, **kwargs):
        """
        Create 1d Coordinates from named xarray coords.

        Arguments
        ---------
        x : xarray.DataArray
            Nade DataArray of the coordinate values
        units : Units, optional
            Coordinate units.
        coord_ref_sys : str, optional
            Coordinate reference system.
        ctype : str, optional
            Coordinates type: 'point', 'left', 'right', or 'midpoint'.
        segment_lengths : (low, high), optional
            When ctype is a segment type, the segment lengths for the coordinates. The segment_lengths are required
            for nonmonotonic coordinates. The segment can be inferred from coordinate values for monotonic coordinates.

        Returns
        -------
        :class:`ArrayCoordinates1d`
            1d coordinates
        """

        return cls(x.data, name=x.name, **kwargs)

    @classmethod
    def from_definition(cls, d):
        """
        Create 1d coordinates from a coordinates definition.

        The definition must contain the coordinate values::

            c = ArrayCoordinates1d.from_definition({
                "values": [0, 1, 2, 3]
            })

        The definition may also contain any of the 1d Coordinates properties::

            c = ArrayCoordinates1d.from_definition({
                "values": [0, 1, 2, 3],
                "name": "lat",
                "ctype": "points"
            })

        Arguments
        ---------
        d : dict
            1d coordinates array definition

        Returns
        -------
        :class:`ArrayCoordinates1d`
            1d Coordinates

        See Also
        --------
        definition
        """

        if 'values' not in d:
            raise ValueError('ArrayCoordinates1d definition requires "values" property')

        coords = d.pop('values')
        return cls(coords, **d)

    def copy(self):
        """
        Make a deep copy of the 1d Coordinates array.

        Returns
        -------
        :class:`ArrayCoordinates1d`
            Copy of the coordinates.
        """

        kwargs = self.properties
        if self._segment_lengths:
            kwargs['segment_lengths'] = self.segment_lengths
        return ArrayCoordinates1d(self.coords, **kwargs)

    # ------------------------------------------------------------------------------------------------------------------
    # standard methods, array-like
    # ------------------------------------------------------------------------------------------------------------------

    def __len__(self):
        return self.size

    def __getitem__(self, index):
        coords = self.coords[index]
        kwargs = self.properties
        
        if self.ctype != 'point':
            if isinstance(self.segment_lengths, np.ndarray):
                kwargs['segment_lengths'] = self.segment_lengths[index]
            else:
                kwargs['segment_lengths'] = self.segment_lengths
            
            if (coords.size == 0 or coords.size == 1) and 'ctype' not in self.properties:
                kwargs['ctype'] = self.ctype

        return ArrayCoordinates1d(coords, **kwargs)

    # ------------------------------------------------------------------------------------------------------------------
    # Properties
    # ------------------------------------------------------------------------------------------------------------------

    @property
    def coordinates(self):
        """:array, read-only: Coordinate values."""

        # get coordinates and ensure read-only array with correct dtype
        coordinates = self.coords.copy()
        coordinates.setflags(write=False)
        return coordinates

    @property
    def size(self):
        """ Number of coordinates. """
        return self.coords.size

    @property
    def dtype(self):
        """:type: Coordinates dtype.

        ``float`` for numerical coordinates and numpy ``datetime64`` for datetime coordinates.
        """

        if self.size == 0:
            return None
        elif self.coords.dtype == float:
            return float
        elif np.issubdtype(self.coords.dtype, np.datetime64):
            return np.datetime64

    @property
    def is_monotonic(self):
        return self._is_monotonic

    @property
    def is_descending(self):
        return self._is_descending

    @property
    def is_uniform(self):
        return self._is_uniform

    @property
    def bounds(self):
        """ Low and high coordinate bounds. """

        # TODO are we sure this can't be a tuple?

        if self.size == 0:
            lo, hi = np.nan, np.nan
        elif self.is_monotonic:
            lo, hi = sorted([self.coords[0], self.coords[-1]])
        elif self.dtype is np.datetime64:
            lo, hi = np.min(self.coords), np.max(self.coords)
        else:
            lo, hi = np.nanmin(self.coords), np.nanmax(self.coords)

        # read-only array with the correct dtype
        bounds = np.array([lo, hi], dtype=self.dtype)
        bounds.setflags(write=False)
        return bounds

    @property
    def argbounds(self):
        if not self.is_monotonic:
            return np.argmin(self.coords), np.argmax(self.coords)
        elif not self.is_descending:
            return 0, -1
        else:
            return -1, 0

    @property
    def definition(self):
        """:dict: Serializable 1d coordinates array definition.

        The ``definition`` can be used to create new ArrayCoordinates1d::

            c = podpac.ArrayCoordinates1d([0, 1, 2, 3])
            c2 = podpac.ArrayCoordinates1d.from_definition(c.definition)

        See Also
        --------
        from_definition
        """

        d = OrderedDict()
        d['values'] = self.coords
        if self._segment_lengths:
            d['segment_lengths'] = self.segment_lengths
        d.update(self.properties)
        return d

    # ------------------------------------------------------------------------------------------------------------------
    # Methods
    # ------------------------------------------------------------------------------------------------------------------

    def select(self, bounds, outer=False, return_indices=False):
        """
        Get the coordinate values that are within the given bounds.

        The default selection returns coordinates that are within the other coordinates bounds::

            In [1]: c = ArrayCoordinates1d([0, 1, 2, 3], name='lat')

            In [2]: c.select([1.5, 2.5]).coordinates
            Out[2]: array([2.])

        The *outer* selection returns the minimal set of coordinates that contain the other coordinates::
        
            In [3]: c.intersect([1.5, 2.5], outer=True).coordinates
            Out[3]: array([1., 2., 3.])

        The *outer* selection also returns a boundary coordinate if the other coordinates are outside this
        coordinates bounds but *inside* its area bounds::
        
            In [4]: c.intersect([3.25, 3.35], outer=True).coordinates
            Out[4]: array([3.0], dtype=float64)

            In [5]: c.intersect([10.0, 11.0], outer=True).coordinates
            Out[5]: array([], dtype=float64)
        
        Arguments
        ---------
        bounds : low, high
            selection bounds
        outer : bool, optional
            If True, do an *outer* selection. Default False.
        return_indices : bool, optional
            If True, return slice or indices for the selection in addition to coordinates. Default False.

        Returns
        -------
        selection : :class:`ArrayCoordinates1d`
            ArrayCoordinates1d object with coordinates within the other coordinates bounds.
        I : slice or list
            index or slice for the intersected coordinates (only if return_indices=True)
        """

        bounds = make_coord_value(bounds[0]), make_coord_value(bounds[1])

        # empty
        if self.size == 0:
            return self._select_empty(return_indices)

        # full
        if self.bounds[0] >= bounds[0] and self.bounds[1] <= bounds[1]:
            return self._select_full(return_indices)

        # none
        if self.area_bounds[0] > bounds[1] or self.area_bounds[1] < bounds[0]:
            return self._select_empty(return_indices)

        if not outer:
            gt = self.coordinates >= bounds[0]
            lt = self.coordinates <= bounds[1]
            I = np.where(gt & lt)[0]

        elif self.is_monotonic:
            gt = np.where(self.coords >= bounds[0])[0]
            lt = np.where(self.coords <= bounds[1])[0]
            lo, hi = bounds[0], bounds[1]
            if self.is_descending:
                lt, gt = gt, lt
                lo, hi = hi, lo
            if self.coords[gt[0]] != lo:
                gt[0] -= 1
            if self.coords[lt[-1]] != hi:
                lt[-1] += 1
            start = max(0, gt[0])
            stop = min(self.size-1, lt[-1])
            I = slice(start, stop+1)

        else:
            try:
                gt = self.coords >= max(self.coords[self.coords <= bounds[0]])
            except ValueError as e:
                gt = self.coords >= -np.inf
            try:
                lt = self.coords <= min(self.coords[self.coords >= bounds[1]])
            except ValueError as e:
                lt = self.coords <= np.inf
            I = np.where(gt & lt)[0]

        if return_indices:
            return self[I], I
        else:
            return self[I]
Пример #5
0
 class MyNodeWithArrayInput(Node):
     my_array = ArrayTrait().tag(attr=True)
Пример #6
0
 class MyNode(Node):
     my_attr = ArrayTrait().tag(attr=True)
Пример #7
0
 class MyClass(tl.HasTraits):
     a = ArrayTrait(dtype=float)
Пример #8
0
 class MyClass(tl.HasTraits):
     a = ArrayTrait(ndim=2)
Пример #9
0
 class MyClass(tl.HasTraits):
     a = ArrayTrait(shape=(2, 2))
Пример #10
0
class ArrayCoordinates1d(Coordinates1d):
    """
    1-dimensional array of coordinates.

    ArrayCoordinates1d is a basic array of 1d coordinates created from an array of coordinate values. Numerical
    coordinates values are converted to ``float``, and time coordinate values are converted to numpy ``datetime64``.
    For convenience, podpac automatically converts datetime strings such as ``'2018-01-01'`` to ``datetime64``. The
    coordinate values must all be of the same type.

    Parameters
    ----------
    name : str
        Dimension name, one of 'lat', 'lon', 'time', or 'alt'.
    coordinates : array, read-only
        Full array of coordinate values.

    See Also
    --------
    :class:`Coordinates1d`, :class:`UniformCoordinates1d`
    """

    coordinates = ArrayTrait(read_only=True)

    _is_monotonic = None
    _is_descending = None
    _is_uniform = None
    _step = None
    _start = None
    _stop = None

    def __init__(self, coordinates, name=None, **kwargs):
        """
        Create 1d coordinates from an array.

        Arguments
        ---------
        coordinates : array-like
            coordinate values.
        name : str, optional
            Dimension name, one of 'lat', 'lon', 'time', or 'alt'.
        """

        # validate and set coordinates
        coordinates = make_coord_array(coordinates)
        self.set_trait("coordinates", coordinates)
        self.not_a_trait = coordinates

        # precalculate once
        if self.coordinates.size == 0:
            pass

        elif self.coordinates.size == 1:
            self._is_monotonic = True

        elif self.coordinates.ndim > 1:
            self._is_monotonic = None
            self._is_descending = None
            self._is_uniform = None

        else:
            deltas = self.deltas
            if np.any(deltas <= 0):
                self._is_monotonic = False
                self._is_descending = False
                self._is_uniform = False
            else:
                self._is_monotonic = True
                self._is_descending = self.coordinates[1] < self.coordinates[0]
                self._is_uniform = np.allclose(deltas, deltas[0])
                if self._is_uniform:
                    self._start = self.coordinates[0]
                    self._stop = self.coordinates[-1]
                    self._step = (self._stop -
                                  self._start) / (self.coordinates.size - 1)

        # set common properties
        super(ArrayCoordinates1d, self).__init__(name=name, **kwargs)

    def __eq__(self, other):
        if not self._eq_base(other):
            return False

        if not np.array_equal(self.coordinates, other.coordinates):
            return False

        return True

    # ------------------------------------------------------------------------------------------------------------------
    # Alternate Constructors
    # ------------------------------------------------------------------------------------------------------------------

    @classmethod
    def from_xarray(cls, x, **kwargs):
        """
        Create 1d Coordinates from named xarray coordinates.

        Arguments
        ---------
        x : xarray.DataArray
            Nade DataArray of the coordinate values

        Returns
        -------
        :class:`ArrayCoordinates1d`
            1d coordinates
        """

        return cls(x.data, name=x.name, **kwargs).simplify()

    @classmethod
    def from_definition(cls, d):
        """
        Create 1d coordinates from a coordinates definition.

        The definition must contain the coordinate values::

            c = ArrayCoordinates1d.from_definition({
                "values": [0, 1, 2, 3]
            })

        The definition may also contain any of the 1d Coordinates properties::

            c = ArrayCoordinates1d.from_definition({
                "values": [0, 1, 2, 3],
                "name": "lat"
            })

        Arguments
        ---------
        d : dict
            1d coordinates array definition

        Returns
        -------
        :class:`ArrayCoordinates1d`
            1d Coordinates

        See Also
        --------
        definition
        """

        if "values" not in d:
            raise ValueError(
                'ArrayCoordinates1d definition requires "values" property')

        coordinates = d["values"]
        kwargs = {k: v for k, v in d.items() if k != "values"}
        return cls(coordinates, **kwargs)

    def copy(self):
        """
        Make a deep copy of the 1d Coordinates array.

        Returns
        -------
        :class:`ArrayCoordinates1d`
            Copy of the coordinates.
        """

        return ArrayCoordinates1d(self.coordinates, **self.properties)

    def unique(self, return_index=False):
        """
        Remove duplicate coordinate values from each dimension.

        Arguments
        ---------
        return_index : bool, optional
            If True, return index for the unique coordinates in addition to the coordinates. Default False.

        Returns
        -------
        unique : :class:`ArrayCoordinates1d`
            New ArrayCoordinates1d object with unique, sorted coordinate values.
        unique_index : list of indices
            index
        """

        # shortcut, monotonic coordinates are already unique
        if self.is_monotonic:
            if return_index:
                return self.flatten(), np.arange(self.size).tolist()
            else:
                return self.flatten()

        a, I = np.unique(self.coordinates, return_index=True)
        if return_index:
            return self.flatten()[I], I
        else:
            return self.flatten()[I]

    def simplify(self):
        """Get the simplified/optimized representation of these coordinates.

        Returns
        -------
        :class:`ArrayCoordinates1d`, :class:`UniformCoordinates1d`
            UniformCoordinates1d if the coordinates are uniform, otherwise ArrayCoordinates1d
        """

        from podpac.core.coordinates.uniform_coordinates1d import UniformCoordinates1d

        if self.is_uniform:
            return UniformCoordinates1d(self.start, self.stop, self.step,
                                        **self.properties)

        return self

    def flatten(self):
        """
        Get a copy of the coordinates with a flattened array (wraps numpy.flatten).

        Returns
        -------
        :class:`ArrayCoordinates1d`
            Flattened coordinates.
        """

        if self.ndim == 1:
            return self.copy()

        return ArrayCoordinates1d(self.coordinates.flatten(),
                                  **self.properties)

    def reshape(self, newshape):
        """
        Get a copy of the coordinates with a reshaped array (wraps numpy.reshape).

        Arguments
        ---------
        newshape: int, tuple
            The new shape.

        Returns
        -------
        :class:`ArrayCoordinates1d`
            Reshaped coordinates.
        """

        return ArrayCoordinates1d(self.coordinates.reshape(newshape),
                                  **self.properties)

    # ------------------------------------------------------------------------------------------------------------------
    # standard methods, array-like
    # ------------------------------------------------------------------------------------------------------------------

    def __getitem__(self, index):
        # The following 3 lines are copied by UniformCoordinates1d.__getitem__
        if self.ndim == 1 and np.ndim(index) > 1 and np.array(
                index).dtype == int:
            index = np.array(index).flatten().tolist()
        return ArrayCoordinates1d(self.coordinates[index], **self.properties)

    # ------------------------------------------------------------------------------------------------------------------
    # Properties
    # ------------------------------------------------------------------------------------------------------------------

    @property
    def deltas(self):
        return (self.coordinates[1:] - self.coordinates[:-1]
                ).astype(float) * np.sign(self.coordinates[1] -
                                          self.coordinates[0]).astype(float)

    @property
    def ndim(self):
        return self.coordinates.ndim

    @property
    def size(self):
        """ Number of coordinates. """
        return self.coordinates.size

    @property
    def shape(self):
        return self.coordinates.shape

    @property
    def dtype(self):
        """:type: Coordinates dtype.

        ``float`` for numerical coordinates and numpy ``datetime64`` for datetime coordinates.
        """

        if self.size == 0:
            return None
        elif self.coordinates.dtype == float:
            return float
        elif np.issubdtype(self.coordinates.dtype, np.datetime64):
            return np.datetime64

    @property
    def is_monotonic(self):
        return self._is_monotonic

    @property
    def is_descending(self):
        return self._is_descending

    @property
    def is_uniform(self):
        return self._is_uniform

    @property
    def start(self):
        return self._start

    @property
    def stop(self):
        return self._stop

    @property
    def step(self):
        return self._step

    @property
    def bounds(self):
        """ Low and high coordinate bounds. """

        if self.size == 0:
            lo, hi = np.nan, np.nan
        elif self.is_monotonic:
            lo, hi = sorted([self.coordinates[0], self.coordinates[-1]])
        elif self.dtype is np.datetime64:
            lo, hi = np.min(self.coordinates), np.max(self.coordinates)
        else:
            lo, hi = np.nanmin(self.coordinates), np.nanmax(self.coordinates)

        return lo, hi

    @property
    def argbounds(self):
        if self.size == 0:
            raise RuntimeError("Cannot get argbounds for empty coordinates")

        if not self.is_monotonic:
            argbounds = np.argmin(self.coordinates), np.argmax(
                self.coordinates)
            return np.unravel_index(argbounds[0],
                                    self.shape), np.unravel_index(
                                        argbounds[1], self.shape)
        elif not self.is_descending:
            return 0, -1
        else:
            return -1, 0

    def _get_definition(self, full=True):
        d = OrderedDict()
        d["values"] = self.coordinates
        d.update(self._full_properties if full else self.properties)
        return d

    # ------------------------------------------------------------------------------------------------------------------
    # Methods
    # ------------------------------------------------------------------------------------------------------------------

    def _select(self, bounds, return_index, outer):
        if self.dtype == np.datetime64:
            _, bounds = higher_precision_time_bounds(self.bounds, bounds,
                                                     outer)

        if not outer:
            gt = self.coordinates >= bounds[0]
            lt = self.coordinates <= bounds[1]
            b = gt & lt

        elif self.is_monotonic:
            gt = np.where(self.coordinates >= bounds[0])[0]
            lt = np.where(self.coordinates <= bounds[1])[0]
            lo, hi = bounds[0], bounds[1]
            if self.is_descending:
                lt, gt = gt, lt
                lo, hi = hi, lo
            if self.coordinates[gt[0]] != lo:
                gt[0] -= 1
            if self.coordinates[lt[-1]] != hi:
                lt[-1] += 1
            start = max(0, gt[0])
            stop = min(self.size - 1, lt[-1])
            b = slice(start, stop + 1)

        else:
            try:
                gt = self.coordinates >= max(
                    self.coordinates[self.coordinates <= bounds[0]])
            except ValueError as e:
                if self.dtype == np.datetime64:
                    gt = ~np.isnat(self.coordinates)
                else:
                    gt = self.coordinates >= -np.inf
            try:
                lt = self.coordinates <= min(
                    self.coordinates[self.coordinates >= bounds[1]])
            except ValueError as e:
                if self.dtype == np.datetime64:
                    lt = ~np.isnat(self.coordinates)
                else:
                    lt = self.coordinates <= np.inf

            b = gt & lt

        if return_index:
            return self[b], b
        else:
            return self[b]
Пример #11
0
class Convolution(UnaryAlgorithm):
    """Compute a general convolution over a source node.

    This node automatically resizes the requested coordinates to avoid edge effects.

    Attributes
    ----------
    source : podpac.Node
        Source node on which convolution will be performed.
    kernel : np.ndarray, optional
        The convolution kernel. This kernel must include the dimensions of source node outputs. The dimensions for this
        array are labelled by `kernel_dims`. Any dimensions not in the source nodes outputs will be summed over.
    kernel_dims : list, optional
        A list of the dimensions for the kernel axes. If the dimensions in this list do not match the
        coordinates in the source, then any extra dimensions in the kernel are removed by adding all the values over that axis
        dimensions in the source are not convolved with any kernel.

    kernel_type : str, optional
        If kernel is not defined, kernel_type will create a kernel based on the inputs, and it will have the
        same number of axes as kernel_dims.
        The format for the created  kernels is '<kernel_type>, <kernel_size>, <kernel_params>'.
        Any kernel defined in `scipy.signal` as well as `mean` can be used. For example:
        kernel_type = 'mean, 8' or kernel_type = 'gaussian,16,8' are both valid.
        Note: These kernels are automatically normalized such that kernel.sum() == 1
    """

    kernel = ArrayTrait(dtype=float).tag(attr=True)
    kernel_dims = tl.List().tag(attr=True)
    #Takes one or the other which is hard to implement in a GUI
    kernel_type = tl.List().tag(attr=True)

    def _first_init(self, kernel=None, kernel_dims=None, kernel_type=None, kernel_ndim=None, **kwargs):
        if kernel_dims is None:
            raise TypeError("Convolution expected 'kernel_dims' to be specified when giving a 'kernel' array")

        if kernel is not None and kernel_type is not None:
            raise TypeError("Convolution expected 'kernel' or 'kernel_type', not both")

        if kernel is None:
            if kernel_type is None:
                raise TypeError("Convolution requires 'kernel' array or 'kernel_type' string")
            kernel = self._make_kernel(kernel_type, len(kernel_dims))

        if len(kernel_dims) != len(np.array(kernel).shape):
            raise TypeError(
                "The kernel_dims should contain the same number of dimensions as the number of axes in 'kernel', but len(kernel_dims) {} != len(kernel.shape) {}".format(
                    len(kernel_dims), len(np.array(kernel).shape)
                )
            )

        kwargs["kernel"] = kernel
        kwargs["kernel_dims"] = kernel_dims
        return super(Convolution, self)._first_init(**kwargs)

    @common_doc(COMMON_DOC)
    def _eval(self, coordinates, output=None, _selector=None):
        """Evaluates this nodes using the supplied coordinates.

        Parameters
        ----------
        coordinates : podpac.Coordinates
            {requested_coordinates}
        output : podpac.UnitsDataArray, optional
            {eval_output}
        _selector: callable(coordinates, request_coordinates)
            {eval_selector}

        Returns
        -------
        {eval_return}
        """
        # The size of this kernel is used to figure out the expanded size
        full_kernel = self.kernel

        # expand the coordinates
        # The next line effectively drops extra coordinates, so we have to add those later in case the
        # source is some sort of reduction Node.
        kernel_dims = [kd for kd in coordinates.dims if kd in self.kernel_dims]
        missing_dims = [kd for kd in coordinates.dims if kd not in self.kernel_dims]

        exp_coords = []
        exp_slice = []
        for dim in kernel_dims:
            coord = coordinates[dim]
            s = full_kernel.shape[self.kernel_dims.index(dim)]
            if s == 1 or not isinstance(coord, (UniformCoordinates1d, ArrayCoordinates1d)):
                exp_coords.append(coord)
                exp_slice.append(slice(None))
                continue

            if isinstance(coord, UniformCoordinates1d):
                s_start = -s // 2
                s_end = max(s // 2 - ((s + 1) % 2), 1)
                # The 1e-14 is for floating point error because if endpoint is slightly
                # in front of step * N then the endpoint is excluded
                # ALSO: MUST use size instead of step otherwise floating point error
                # makes the xarray arrays not align. The following HAS to be true:
                #     np.diff(coord.coordinates).mean() == coord.step
                exp_coords.append(
                    UniformCoordinates1d(
                        add_coord(coord.start, s_start * coord.step),
                        add_coord(coord.stop, s_end * coord.step + 1e-14 * coord.step),
                        size=coord.size - s_start + s_end,  # HAVE to use size, see note above
                        **coord.properties
                    )
                )
                exp_slice.append(slice(-s_start, -s_end))
            elif isinstance(coord, ArrayCoordinates1d):
                if not coord.is_monotonic or coord.size < 2:
                    exp_coords.append(coord)
                    exp_slice.append(slice(None))
                    continue

                arr_coords = coord.coordinates
                delta_start = arr_coords[1] - arr_coords[0]
                extra_start = np.arange(arr_coords[0] - delta_start * (s // 2), arr_coords[0], delta_start)
                delta_end = arr_coords[-1] - arr_coords[-2]
                # The 1e-14 is for floating point error to make sure endpoint is included
                extra_end = np.arange(
                    arr_coords[-1] + delta_end, arr_coords[-1] + delta_end * (s // 2) + delta_end * 1e-14, delta_end
                )
                arr_coords = np.concatenate([extra_start, arr_coords, extra_end])
                exp_coords.append(ArrayCoordinates1d(arr_coords, **coord.properties))
                exp_slice.append(slice(extra_start.size, -extra_end.size))

        # Add missing dims back in -- this is needed in case the source is a reduce node.
        exp_coords += [coordinates[d] for d in missing_dims]

        # Create expanded coordinates
        exp_slice = tuple(exp_slice)
        expanded_coordinates = Coordinates(exp_coords, crs=coordinates.crs, validate_crs=False)

        if settings["DEBUG"]:
            self._expanded_coordinates = expanded_coordinates

        # evaluate source using expanded coordinates, convolve, and then slice out original coordinates
        source = self.source.eval(expanded_coordinates, _selector=_selector)

        kernel_dims_u = kernel_dims
        kernel_dims = self.kernel_dims
        sum_dims = [d for d in kernel_dims if d not in source.dims]
        # Sum out the extra dims
        full_kernel = full_kernel.sum(axis=tuple([kernel_dims.index(d) for d in sum_dims]))
        exp_slice = [exp_slice[i] for i in range(len(kernel_dims_u)) if kernel_dims_u[i] not in sum_dims]
        kernel_dims = [d for d in kernel_dims if d in source.dims]

        # Put the kernel axes in the correct order
        # The (if d in kernel_dims) takes care of "output", which can be optionally present
        full_kernel = full_kernel.transpose([kernel_dims.index(d) for d in source.dims if (d in kernel_dims)])

        # Check for extra dimensions in the source and reshape the kernel appropriately
        if any([d not in kernel_dims for d in source.dims if d != "output"]):
            new_axis = []
            new_exp_slice = []
            for d in source.dims:
                if d in kernel_dims:
                    new_axis.append(slice(None))
                    new_exp_slice.append(exp_slice[kernel_dims.index(d)])
                else:
                    new_axis.append(None)
                    new_exp_slice.append(slice(None))
            full_kernel = full_kernel[new_axis]
            exp_slice = new_exp_slice

        if np.any(np.isnan(source)):
            method = "direct"
        else:
            method = "auto"

        if ("output" not in source.dims) or ("output" in source.dims and "output" in kernel_dims):
            result = scipy.signal.convolve(source, full_kernel, mode="same", method=method)
        else:
            # source with multiple outputs
            result = np.stack(
                [
                    scipy.signal.convolve(source.sel(output=output), full_kernel, mode="same", method=method)
                    for output in source.coords["output"]
                ],
                axis=source.dims.index("output"),
            )
        result = result[exp_slice]

        if output is None:
            missing_dims = [d for d in coordinates.dims if d not in source.dims]
            output = self.create_output_array(coordinates.drop(missing_dims), data=result)
        else:
            output[:] = result

        return output

    @staticmethod
    def _make_kernel(kernel_type, ndim):
        ktype = kernel_type.split(",")[0]
        size = int(kernel_type.split(",")[1])
        if ktype == "mean":
            k = np.ones([size] * ndim)
        else:
            args = [float(a) for a in kernel_type.split(",")[2:]]
            f = getattr(scipy.signal, ktype)
            k1d = f(size, *args)
            k = k1d.copy()
            for i in range(ndim - 1):
                k = np.tensordot(k, k1d, 0)

        return k / k.sum()
Пример #12
0
class ArrayBase(NoCacheMixin, DataSource):
    """Create a DataSource from an array -- this node is mostly meant for small experiments

    Attributes
    ----------
    source : np.ndarray
        Numpy array containing the source data
    coordinates : podpac.Coordinates
        The coordinates of the source data

    Notes
    ------
    `coordinates` need to supplied by the user when instantiating this node.

    This Node is not meant for large arrays, and cause issues with caching. As such, this Node override the default
    cache behavior as having no cache -- its data is in RAM already and caching is not helpful.

    Example
    ---------
    >>> # Create a time series of 10 32x34 images with R-G-B channels
    >>> import numpy as np
    >>> import podpac
    >>> data = np.random.rand(10, 32, 34, 3)
    >>> coords = podpac.Coordinates([podpac.clinspace(1, 10, 10, 'time'),
                                     podpac.clinspace(1, 32, 32, 'lat'),
                                     podpac.clinspace(1, 34, 34, 'lon')])
    >>> node = podpac.data.Array(source=data, coordinates=coords, outputs=['R', 'G', 'B'])
    >>> output = node.eval(coords)
    """

    source = ArrayTrait().tag(attr=True)
    coordinates = tl.Instance(Coordinates).tag(attr=True)

    _repr_keys = ["shape", "interpolation"]

    @tl.validate("source")
    def _validate_source(self, d):
        try:
            d["value"].astype(float)
        except:
            raise ValueError("Array 'source' data must be numerical")
        return d["value"]

    def _first_init(self, **kwargs):
        # If the coordinates were supplied explicitly, they may need to be deserialized.
        if isinstance(kwargs.get("coordinates"), OrderedDict):
            kwargs["coordinates"] = Coordinates.from_definition(
                kwargs["coordinates"])
        elif isinstance(kwargs.get("coordinates"), string_types):
            kwargs["coordinates"] = Coordinates.from_json(
                kwargs["coordinates"])

        return kwargs

    @property
    def shape(self):
        """Returns the shape of :attr:`self.source`

        Returns
        -------
        tuple
            Shape of :attr:`self.source`
        """
        return self.source.shape

    @common_doc(COMMON_DATA_DOC)
    def get_data(self, coordinates, coordinates_index):
        """{get_data}"""
        d = self.create_output_array(coordinates,
                                     data=self.source[coordinates_index])
        return d

    def set_coordinates(self, value):
        """ Not needed. """
        pass
Пример #13
0
class Convolution(Algorithm):
    """Compute a general convolution over a source node.

    This node automatically resizes the requested coordinates to avoid edge effects.
    
    Attributes
    ----------
    source : podpac.Node
        Source node on which convolution will be performed. 
    kernel : np.ndarray
        The convolution kernel
    kernel_ndim : int
        Number of dimensions of the kernel
    kernel_type : str, optional
        If kernel is not defined, kernel_type will create a kernel based on the inputs. 
        The format for the created  kernels is '<kernel_type>, <kernel_size>, <kernel_params>'.
        Any kernel defined in `scipy.signal` as well as `mean` can be used. For example:
        kernel_type = 'mean, 8' or kernel_type = 'gaussian,16,8' are both valid. 
        Note: These kernels are automatically normalized such that kernel.sum() == 1
    """

    source = tl.Instance(Node)
    kernel = ArrayTrait(dtype=float).tag(attr=True)
    kernel_type = tl.Unicode().tag(attr=True)
    kernel_ndim = tl.Int().tag(attr=True)

    _expanded_coordinates = tl.Instance(Coordinates)
    _full_kernel = ArrayTrait(dtype=float)

    @common_doc(COMMON_DOC)
    @node_eval
    def eval(self, coordinates, output=None):
        """Evaluates this nodes using the supplied coordinates.
        
        Parameters
        ----------
        coordinates : podpac.Coordinates
            {requested_coordinates}
        output : podpac.UnitsDataArray, optional
            {eval_output}
        
        Returns
        -------
        {eval_return}
        """
        # This should be aligned with coordinates' dimension order
        # The size of this kernel is used to figure out the expanded size
        self._full_kernel = self.get_full_kernel(coordinates)

        if len(self._full_kernel.shape) != len(coordinates.shape):
            raise ValueError(
                "shape mismatch, kernel does not match source data (%s != %s)"
                % (self._full_kernel.shape, coordinates.shape))

        # expand the coordinates
        exp_coords = []
        exp_slice = []
        for dim, s in zip(coordinates.dims, self._full_kernel.shape):
            coord = coordinates[dim]
            if s == 1 or not isinstance(coord, UniformCoordinates1d):
                exp_coords.append(coord)
                exp_slice.append(slice(None))
                continue

            s_start = -s // 2
            s_end = s // 2 - ((s + 1) % 2)
            # The 1e-07 is for floating point error because if endpoint is slightly
            # in front of step * N then the endpoint is excluded
            exp_coords.append(
                UniformCoordinates1d(
                    add_coord(coord.start, s_start * coord.step),
                    add_coord(coord.stop,
                              s_end * coord.step + 1e-07 * coord.step),
                    coord.step, **coord.properties))
            exp_slice.append(slice(-s_start, -s_end))
        exp_slice = tuple(exp_slice)
        self._expanded_coordinates = Coordinates(exp_coords)

        # evaluate source using expanded coordinates, convolve, and then slice out original coordinates
        source = self.source.eval(self._expanded_coordinates)

        if np.any(np.isnan(source)):
            method = 'direct'
        else:
            method = 'auto'

        result = scipy.signal.convolve(source,
                                       self._full_kernel,
                                       mode='same',
                                       method=method)
        result = result[exp_slice]

        if output is None:
            output = self.create_output_array(coordinates, data=result)
        else:
            output[:] = result

        return output

    @tl.default('kernel')
    def _kernel_default(self):
        kernel_type = self.kernel_type
        if not kernel_type:
            raise ValueError("Need to supply either 'kernel' as a numpy array,"
                             " or 'kernel_type' as a string.")
        ktype = kernel_type.split(',')[0]
        size = int(kernel_type.split(',')[1])
        args = [float(a) for a in kernel_type.split(',')[2:]]
        if ktype == 'mean':
            k = np.ones([size] * self.kernel_ndim)
        else:
            f = getattr(scipy.signal, ktype)
            k1d = f(size, *args)
            k = k1d.copy()
            for i in range(self.kernel_ndim - 1):
                k = np.tensordot(k, k1d, 0)

        return k / k.sum()

    def get_full_kernel(self, coordinates):
        """{full_kernel}
        """
        return self.kernel
Пример #14
0
class RotatedCoordinates(StackedCoordinates):
    """
    A grid of rotated latitude and longitude coordinates.

    RotatedCoordinates are parameterized spatial coordinates defined by a shape, rotation angle, upper left corner, and
    step size. The lower right corner can be specified instead of the step. RotatedCoordinates can also be converted
    to/from GDAL geotransform.

    Parameters
    ----------
    shape : tuple
        shape (m, n) of the grid.
    theta : float
        rotation angle, in radians
    origin : np.ndarray(shape=(2,), dtype=float)
        origin coordinates (position [0, 0])
    corner : np.ndarray(shape=(2,), dtype=float)
        opposing corner coordinates (position [m-1, n-1])
    step : np.ndarray(shape=(2,), dtype=float)
        Rotated distance between points in the grid, in each dimension. This is equivalent to the scaling of the
        affine transformation used to calculate the coordinates.
    dims : tuple
        Tuple of dimension names.
    coords : dict-like
        xarray coordinates (container of coordinate arrays)
    coordinates : tuple
        Tuple of 2d coordinate values in each dimension.
    """

    shape = tl.Tuple(tl.Integer(), tl.Integer(), read_only=True)
    theta = tl.Float(read_only=True)
    origin = ArrayTrait(shape=(2,), dtype=float, read_only=True)
    step = ArrayTrait(shape=(2,), dtype=float, read_only=True)
    dims = tl.Tuple(tl.Unicode(), tl.Unicode(), read_only=True)

    def __init__(self, shape=None, theta=None, origin=None, step=None, corner=None, dims=None):
        """
        Create a grid of rotated coordinates from a `shape`, `theta`, `origin`, and `step` or `corner`.

        Parameters
        ----------
        shape : tuple
            shape (m, n) of the grid.
        theta : float
            rotation angle, in radians
        origin : np.ndarray(shape=(2,), dtype=float)
            origin coordinates
        corner : np.ndarray(shape=(2,), dtype=float)
            opposing corner coordinates (corner or step required)
        step : np.ndarray(shape=(2,), dtype=float)
            Scaling, ie rotated distance between points in the grid, in each dimension. (corner or step required)
        dims : tuple (required)
            tuple of dimension names ('lat', 'lon', 'time', or 'alt').
        """

        self.set_trait("shape", shape)
        self.set_trait("theta", theta)
        self.set_trait("origin", origin)
        if step is None:
            deg = np.rad2deg(theta)
            a = ~rasterio.Affine.rotation(deg) * ~rasterio.Affine.translation(*origin)
            d = np.array(a * corner) - np.array(a * origin)
            step = d / np.array([shape[0] - 1, shape[1] - 1])
        self.set_trait("step", step)
        if dims is not None:
            self.set_trait("dims", dims)

    @tl.validate("dims")
    def _validate_dims(self, d):
        val = d["value"]
        for dim in val:
            if dim not in ["lat", "lon"]:
                raise ValueError("RotatedCoordinates dims must be 'lat' or 'lon', not '%s'" % dim)
        if val[0] == val[1]:
            raise ValueError("Duplicate dimension '%s'" % val[0])
        return val

    @tl.validate("shape")
    def _validate_shape(self, d):
        val = d["value"]
        if val[0] <= 0 or val[1] <= 0:
            raise ValueError("Invalid shape %s, shape must be positive" % (val,))
        return val

    @tl.validate("step")
    def _validate_step(self, d):
        val = d["value"]
        if val[0] == 0 or val[1] == 0:
            raise ValueError("Invalid step %s, step cannot be 0" % val)
        return val

    def _set_name(self, value):
        self._set_dims(value.split("_"))

    def _set_dims(self, dims):
        self.set_trait("dims", dims)

    # ------------------------------------------------------------------------------------------------------------------
    # Alternate Constructors
    # ------------------------------------------------------------------------------------------------------------------

    @classmethod
    def from_geotransform(cls, geotransform, shape, dims=None):
        affine = rasterio.Affine.from_gdal(*geotransform)
        origin = affine.f, affine.c
        deg = affine.rotation_angle
        scale = ~affine.rotation(deg) * ~affine.translation(*origin) * affine
        step = np.array([scale.e, scale.a])
        origin = affine.f + step[0] / 2, affine.c + step[1] / 2
        return cls(shape, np.deg2rad(deg), origin, step, dims=dims)

    @classmethod
    def from_definition(cls, d):
        """
        Create RotatedCoordinates from a rotated coordinates definition.

        Arguments
        ---------
        d : dict
            rotated coordinates definition

        Returns
        -------
        :class:`RotatedCoordinates`
            rotated coordinates object

        See Also
        --------
        definition
        """

        if "shape" not in d:
            raise ValueError('RotatedCoordinates definition requires "shape" property')
        if "theta" not in d:
            raise ValueError('RotatedCoordinates definition requires "theta" property')
        if "origin" not in d:
            raise ValueError('RotatedCoordinates definition requires "origin" property')
        if "step" not in d and "corner" not in d:
            raise ValueError('RotatedCoordinates definition requires "step" or "corner" property')
        if "dims" not in d:
            raise ValueError('RotatedCoordinates definition requires "dims" property')

        shape = d["shape"]
        theta = d["theta"]
        origin = d["origin"]
        kwargs = {k: v for k, v in d.items() if k not in ["shape", "theta", "origin"]}
        return RotatedCoordinates(shape, theta, origin, **kwargs)

    # ------------------------------------------------------------------------------------------------------------------
    # standard methods
    # ------------------------------------------------------------------------------------------------------------------

    def __repr__(self):
        return "%s(%s): Origin%s, Corner%s, rad[%.4f], shape%s" % (
            self.__class__.__name__,
            self.dims,
            self.origin,
            self.corner,
            self.theta,
            self.shape,
        )

    def __eq__(self, other):
        if not isinstance(other, RotatedCoordinates):
            return False

        if self.dims != other.dims:
            return False

        if self.shape != other.shape:
            return False

        if self.affine != other.affine:
            return False

        return True

    def __getitem__(self, index):
        if isinstance(index, slice):
            index = index, slice(None)

        if isinstance(index, tuple) and isinstance(index[0], slice) and isinstance(index[1], slice):
            I = np.arange(self.shape[0])[index[0]]
            J = np.arange(self.shape[1])[index[1]]
            origin = self.affine * [I[0], J[0]]
            step = self.step * [index[0].step or 1, index[1].step or 1]
            shape = I.size, J.size
            return RotatedCoordinates(shape, self.theta, origin, step, dims=self.dims)

        else:
            # convert to raw StackedCoordinates (which creates the _coords attribute that the indexing requires)
            return StackedCoordinates(self.coordinates, dims=self.dims).__getitem__(index)

    # ------------------------------------------------------------------------------------------------------------------
    # Properties
    # ------------------------------------------------------------------------------------------------------------------

    @property
    def _coords(self):
        raise RuntimeError("RotatedCoordinates do not have a _coords attribute.")

    @property
    def ndim(self):
        return 2

    @property
    def deg(self):
        """ :float: rotation angle in degrees. """
        return np.rad2deg(self.theta)

    @property
    def affine(self):
        """:rasterio.Affine: affine transformation for computing the coordinates from indexing values. Contains the
        tranlation, rotation, and scaling.
        """
        t = rasterio.Affine.translation(*self.origin)
        r = rasterio.Affine.rotation(self.deg)
        s = rasterio.Affine.scale(*self.step)
        return t * r * s

    @property
    def corner(self):
        """ :array: lower right corner. """
        return np.array(self.affine * np.array([self.shape[0] - 1, self.shape[1] - 1]))

    @property
    def geotransform(self):
        """:tuple: GDAL geotransform.
        Note: This property may not provide the correct order of lat/lon in the geotransform as this class does not
        always have knowledge of the dimension order of the specified dataset. As such it always supplies
        geotransforms assuming that dims = ['lat', 'lon']
        """
        t = rasterio.Affine.translation(self.origin[1] - self.step[1] / 2, self.origin[0] - self.step[0] / 2)
        r = rasterio.Affine.rotation(self.deg)
        s = rasterio.Affine.scale(*self.step[::-1])
        return (t * r * s).to_gdal()

    @property
    def coordinates(self):
        """ :tuple: computed coordinave values for each dimension. """
        I = np.arange(self.shape[0])
        J = np.arange(self.shape[1])
        c1, c2 = self.affine * np.meshgrid(I, J)
        return c1.T, c2.T

    @property
    def definition(self):
        d = OrderedDict()
        d["dims"] = self.dims
        d["shape"] = self.shape
        d["theta"] = self.theta
        d["origin"] = self.origin
        d["step"] = self.step
        return d

    @property
    def full_definition(self):
        return self.definition

    # ------------------------------------------------------------------------------------------------------------------
    # Methods
    # ------------------------------------------------------------------------------------------------------------------

    def copy(self):
        """
        Make a copy of the rotated coordinates.

        Returns
        -------
        :class:`RotatedCoordinates`
            Copy of the rotated coordinates.
        """
        return RotatedCoordinates(self.shape, self.theta, self.origin, self.step, dims=self.dims)

    def get_area_bounds(self, boundary):
        """Get coordinate area bounds, including boundary information, for each unstacked dimension.

        Arguments
        ---------
        boundary : dict
            dictionary of boundary offsets for each unstacked dimension. Point dimensions can be omitted.

        Returns
        -------
        area_bounds : dict
            Dictionary of (low, high) coordinates area_bounds in each unstacked dimension
        """

        # TODO the boundary offsets need to be rotated
        warnings.warning("RotatedCoordinates area_bounds are not yet correctly implemented.")
        return super(RotatedCoordinates, self).get_area_bounds(boundary)

    def select(self, bounds, outer=False, return_index=False):
        """
        Get the coordinate values that are within the given bounds in all dimensions.

        *Note: you should not generally need to call this method directly.*

        Parameters
        ----------
        bounds : dict
            dictionary of dim -> (low, high) selection bounds
        outer : bool, optional
            If True, do *outer* selections. Default False.
        return_index : bool, optional
            If True, return index for the selections in addition to coordinates. Default False.

        Returns
        -------
        selection : :class:`RotatedCoordinates`, :class:`DependentCoordinates`, :class:`StackedCoordinates`
            rotated, dependent, or stacked coordinates consisting of the selection in all dimensions.
        selection_index : list
            index for the selected coordinates, only if ``return_index`` is True.
        """

        # TODO return RotatedCoordinates when possible
        return super(RotatedCoordinates, self).select(bounds, outer=outer, return_index=return_index)
Пример #15
0
class PolarCoordinates(StackedCoordinates):
    """
    Parameterized spatial coordinates defined by a center, radius coordinates, and theta coordinates.

    Attributes
    ----------
    center
    radius
    theta
    """

    center = ArrayTrait(shape=(2, ), dtype=float, read_only=True)
    radius = tl.Instance(Coordinates1d, read_only=True)
    theta = tl.Instance(Coordinates1d, read_only=True)
    dims = tl.Tuple(tl.Unicode(), tl.Unicode(), read_only=True)

    def __init__(self, center, radius, theta=None, theta_size=None, dims=None):

        # radius
        if not isinstance(radius, Coordinates1d):
            radius = ArrayCoordinates1d(radius)

        # theta
        if theta is not None and theta_size is not None:
            raise TypeError(
                "PolarCoordinates expected theta or theta_size, not both.")
        if theta is None and theta_size is None:
            raise TypeError("PolarCoordinates requires theta or theta_size.")

        if theta_size is not None:
            theta = UniformCoordinates1d(start=0,
                                         stop=2 * np.pi,
                                         size=theta_size + 1)[:-1]
        elif not isinstance(theta, Coordinates1d):
            theta = ArrayCoordinates1d(theta)

        self.set_trait("center", center)
        self.set_trait("radius", radius)
        self.set_trait("theta", theta)
        if dims is not None:
            self.set_trait("dims", dims)

    @tl.validate("dims")
    def _validate_dims(self, d):
        val = d["value"]
        for dim in val:
            if dim not in ["lat", "lon"]:
                raise ValueError(
                    "PolarCoordinates dims must be 'lat' or 'lon', not '%s'" %
                    dim)
        if val[0] == val[1]:
            raise ValueError("Duplicate dimension '%s'" % val[0])
        return val

    @tl.validate("radius")
    def _validate_radius(self, d):
        val = d["value"]
        if np.any(val.coordinates <= 0):
            raise ValueError("PolarCoordinates radius must all be positive")
        return val

    def _set_name(self, value):
        self._set_dims(value.split("_"))

    def _set_dims(self, dims):
        self.set_trait("dims", dims)

    # ------------------------------------------------------------------------------------------------------------------
    # Alternate Constructors
    # ------------------------------------------------------------------------------------------------------------------

    @classmethod
    def from_definition(cls, d):
        if "center" not in d:
            raise ValueError(
                'PolarCoordinates definition requires "center" property')
        if "radius" not in d:
            raise ValueError(
                'PolarCoordinates definition requires "radius" property')
        if "theta" not in d and "theta_size" not in d:
            raise ValueError(
                'PolarCoordinates definition requires "theta" or "theta_size" property'
            )
        if "dims" not in d:
            raise ValueError(
                'PolarCoordinates definition requires "dims" property')

        # center
        center = d["center"]

        # radius
        if isinstance(d["radius"], list):
            radius = ArrayCoordinates1d(d["radius"])
        elif "values" in d["radius"]:
            radius = ArrayCoordinates1d.from_definition(d["radius"])
        elif "start" in d["radius"] and "stop" in d["radius"] and (
                "step" in d["radius"] or "size" in d["radius"]):
            radius = UniformCoordinates1d.from_definition(d["radius"])
        else:
            raise ValueError(
                "Could not parse radius coordinates definition with keys %s" %
                d.keys())

        # theta
        if "theta" not in d:
            theta = None
        elif isinstance(d["theta"], list):
            theta = ArrayCoordinates1d(d["theta"])
        elif "values" in d["theta"]:
            theta = ArrayCoordinates1d.from_definition(d["theta"])
        elif "start" in d["theta"] and "stop" in d["theta"] and (
                "step" in d["theta"] or "size" in d["theta"]):
            theta = UniformCoordinates1d.from_definition(d["theta"])
        else:
            raise ValueError(
                "Could not parse theta coordinates definition with keys %s" %
                d.keys())

        kwargs = {
            k: v
            for k, v in d.items() if k not in ["center", "radius", "theta"]
        }
        return PolarCoordinates(center, radius, theta, **kwargs)

    # ------------------------------------------------------------------------------------------------------------------
    # standard methods
    # ------------------------------------------------------------------------------------------------------------------

    def __repr__(self):
        return "%s(%s): center%s, shape%s" % (
            self.__class__.__name__, self.dims, self.center, self.shape)

    def __eq__(self, other):
        if not isinstance(other, PolarCoordinates):
            return False

        if not np.allclose(self.center, other.center):
            return False

        if self.radius != other.radius:
            return False

        if self.theta != other.theta:
            return False

        return True

    def __getitem__(self, index):
        if isinstance(index, slice):
            index = index, slice(None)

        if isinstance(index, tuple) and isinstance(
                index[0], slice) and isinstance(index[1], slice):
            return PolarCoordinates(self.center,
                                    self.radius[index[0]],
                                    self.theta[index[1]],
                                    dims=self.dims)
        else:
            # convert to raw StackedCoordinates (which creates the _coords attribute that the indexing requires)
            return StackedCoordinates(self.coordinates,
                                      dims=self.dims).__getitem__(index)

    # ------------------------------------------------------------------------------------------------------------------
    # Properties
    # ------------------------------------------------------------------------------------------------------------------

    @property
    def _coords(self):
        raise RuntimeError("PolarCoordinates do not have a _coords attribute.")

    @property
    def ndim(self):
        return 2

    @property
    def shape(self):
        return self.radius.size, self.theta.size

    @property
    def xdims(self):
        return ("r", "t")

    @property
    def coordinates(self):
        r, theta = np.meshgrid(self.radius.coordinates, self.theta.coordinates)
        lat = r * np.sin(theta) + self.center[0]
        lon = r * np.cos(theta) + self.center[1]
        return lat.T, lon.T

    @property
    def definition(self):
        d = OrderedDict()
        d["dims"] = self.dims
        d["center"] = self.center
        d["radius"] = self.radius.definition
        d["theta"] = self.theta.definition
        return d

    @property
    def full_definition(self):
        return self.definition

    # ------------------------------------------------------------------------------------------------------------------
    # Methods
    # ------------------------------------------------------------------------------------------------------------------

    def copy(self):
        return PolarCoordinates(self.center,
                                self.radius,
                                self.theta,
                                dims=self.dims)
Пример #16
0
class Compositor(Node):
    """Compositor
    
    Attributes
    ----------
    cache_native_coordinates : Bool
        Default is True. If native_coordinates are requested by the user, it may take a long time to calculate if the
        Compositor points to many sources. The result is relatively small and is cached by default. Caching may not be
        desired if the datasource change or is updated.
    interpolation : str, dict, optional
        {interpolation}
    is_source_coordinates_complete : Bool
        Default is False. The source_coordinates do not have to completely describe the source. For example, the source
        coordinates could include the year-month-day of the source, but the actual source also has hour-minute-second
        information. In that case, source_coordinates is incomplete. This flag is used to automatically construct
        native_coordinates.
    n_threads : int
        Default is 10 -- used when threaded is True.
        NASA data servers seem to have a hard limit of 10 simultaneous requests, which determined the default value.
    shared_coordinates : :class:`podpac.Coordinates`, optional
        Coordinates that are shared amongst all of the composited sources
    source : str
        The source is used for a unique name to cache composited products.
    source_coordinates : :class:`podpac.Coordinates`
        Description
    sources : :class:`np.ndarray`
        An array of sources. This is a numpy array as opposed to a list so that boolean indexing may be used to
        subselect the nodes that will be evaluated.
    threaded : bool, optional
        Default if False.
        When threaded is False, the compositor stops evaluated sources once the output is completely filled.
        When threaded is True, the compositor must evaluate every source.
        The result is the same, but note that because of this, threaded=False could be faster than threaded=True,
        especially if n_threads is low. For example, threaded with n_threads=1 could be much slower than non-threaded
        if the output is completely filled after the first few sources.
    source_coordinates : :class:`podpac.Coordinates`, optional
        Coordinates that make each source unique. This is used for subsetting which sources to evaluate based on the
        user-requested coordinates. It is an optimization.
    
    Notes
    -----
    Developers of new Compositor nodes need to implement the `composite` method.
    """
    shared_coordinates = tl.Instance(Coordinates, allow_none=True)
    source_coordinates = tl.Instance(Coordinates, allow_none=True)
    is_source_coordinates_complete = tl.Bool(
        False,
        help=("This allows some optimizations but assumes that a node's "
              "native_coordinates=source_coordinate + shared_coordinate "
              "IN THAT ORDER"))

    source = tl.Unicode().tag(attr=True)
    sources = ArrayTrait(ndim=1)
    cache_native_coordinates = tl.Bool(True)

    interpolation = interpolation_trait(default_value=None)

    threaded = tl.Bool(False)
    n_threads = tl.Int(10)

    @tl.default('source')
    def _source_default(self):
        source = []
        for s in self.sources[:3]:
            source.append(str(s))
        return '_'.join(source)

    @tl.default('source_coordinates')
    def _source_coordinates_default(self):
        return self.get_source_coordinates()

    def get_source_coordinates(self):
        """
        Returns the coordinates describing each source.
        This may be implemented by derived classes, and is an optimization that allows evaluation subsets of source.
        
        Returns
        -------
        :class:`podpac.Coordinates`
            Coordinates describing each source.
        """
        return None

    @tl.default('shared_coordinates')
    def _shared_coordinates_default(self):
        return self.get_shared_coordinates()

    def get_shared_coordinates(self):
        """Coordinates shared by each source.
        
        Raises
        ------
        NotImplementedError
            Description
        """
        raise NotImplementedError()

    def select_sources(self, coordinates):
        """Downselect compositor sources based on requested coordinates.
        
        This is used during the :meth:`eval` process as an optimization
        when :attr:`source_coordinates` are not pre-defined.
        
        Parameters
        ----------
        coordinates : :class:`podpac.Coordinates`
            Coordinates to evaluate at compositor sources
        
        Returns
        -------
        :class:`np.ndarray`
            Array of downselected sources
        """

        # if source coordinates are defined, use intersect
        if self.source_coordinates is not None:
            # intersecting sources only
            try:
                _, I = self.source_coordinates.intersect(coordinates,
                                                         outer=True,
                                                         return_indices=True)

            except:  # Likely non-monotonic coordinates
                _, I = self.source_coordinates.intersect(coordinates,
                                                         outer=False,
                                                         return_indices=True)

            src_subset = self.sources[I]

        # no downselection possible - get all sources compositor
        else:
            src_subset = self.sources

        return src_subset

    def composite(self, outputs, result=None):
        """Implements the rules for compositing multiple sources together.
        
        Parameters
        ----------
        outputs : list
            A list of outputs that need to be composited together
        result : UnitDataArray, optional
            An optional pre-filled array may be supplied, otherwise the output will be allocated.
        
        Raises
        ------
        NotImplementedError
        """
        raise NotImplementedError()

    def iteroutputs(self, coordinates):
        """Summary
        
        Parameters
        ----------
        coordinates : :class:`podpac.Coordinates`
            Coordinates to evaluate at compositor sources
        
        Yields
        ------
        :class:`podpac.core.units.UnitsDataArray`
            Output from source node eval method
        """
        # downselect sources based on coordinates
        src_subset = self.select_sources(coordinates)

        if len(src_subset) == 0:
            yield self.create_output_array(coordinates)
            return

        # Set the interpolation properties for sources
        if self.interpolation is not None:
            for s in src_subset.ravel():
                if trait_is_defined(self, 'interpolation'):
                    s.interpolation = self.interpolation

        # Optimization: if coordinates complete and source coords is 1D,
        # set native_coordinates unless they are set already
        # WARNING: this assumes
        #              native_coords = source_coords + shared_coordinates
        #         NOT  native_coords = shared_coords + source_coords
        if self.is_source_coordinates_complete and self.source_coordinates.ndim == 1:
            coords_subset = list(
                self.source_coordinates.intersect(
                    coordinates, outer=True).coords.values())[0]
            coords_dim = list(self.source_coordinates.dims)[0]
            for s, c in zip(src_subset, coords_subset):
                nc = merge_dims([
                    Coordinates(np.atleast_1d(c), dims=[coords_dim]),
                    self.shared_coordinates
                ])

                if trait_is_defined(s, 'native_coordinates') is False:
                    s.native_coordinates = nc

        if self.threaded:
            # TODO pool of pre-allocated scratch space
            # TODO: docstring?
            def f(src):
                return src.eval(coordinates)

            pool = ThreadPool(processes=self.n_threads)
            results = [pool.apply_async(f, [src]) for src in src_subset]

            for src, res in zip(src_subset, results):
                yield res.get()
                #src._output = None # free up memory

        else:
            output = None  # scratch space
            for src in src_subset:
                output = src.eval(coordinates, output)
                yield output
                #output[:] = np.nan

    @node_eval
    @common_doc(COMMON_COMPOSITOR_DOC)
    def eval(self, coordinates, output=None):
        """Evaluates this nodes using the supplied coordinates. 

        Parameters
        ----------
        coordinates : :class:`podpac.Coordinates`
            {requested_coordinates}
        output : podpac.UnitsDataArray, optional
            {eval_output}
            
        Returns
        -------
        {eval_return}
        """

        self._requested_coordinates = coordinates

        outputs = self.iteroutputs(coordinates)
        output = self.composite(outputs, output)
        return output

    def find_coordinates(self):
        """
        Get the available native coordinates for the Node.

        Returns
        -------
        coords_list : list
            list of available coordinates (Coordinate objects)
        """

        raise NotImplementedError("TODO")

    @property
    @common_doc(COMMON_COMPOSITOR_DOC)
    def base_definition(self):
        """Base node defintion for Compositor nodes. 
        
        Returns
        -------
        {definition_return}
        """
        d = super(Compositor, self).base_definition
        d['sources'] = self.sources
        d['interpolation'] = self.interpolation
        return d