Beispiel #1
0
    def get_coordinates(self):
        """
        Get the full WCS grid.
        """

        metadata = self.client.contents[self.layer]

        # TODO select correct boundingbox by crs

        # coordinates
        w, s, e, n = metadata.boundingBoxWGS84
        low = metadata.grid.lowlimits
        high = metadata.grid.highlimits
        xsize = int(high[0]) - int(low[0])
        ysize = int(high[1]) - int(low[1])

        coords = []
        coords.append(UniformCoordinates1d(s, n, size=ysize, name="lat"))
        coords.append(UniformCoordinates1d(w, e, size=xsize, name="lon"))

        if metadata.timepositions:
            coords.append(
                ArrayCoordinates1d(metadata.timepositions, name="time"))

        if metadata.timelimits:
            raise NotImplementedError("TODO")

        return Coordinates(coords, crs=self.crs)
Beispiel #2
0
    def get_coordinates(self):
        """
        Get the full WCS grid.
        """

        metadata = self.client.contents[self.layer]

        # coordinates
        bbox = metadata.boundingBoxWGS84
        crs = "EPSG:4326"
        logging.debug("WCS available boundingboxes: {}".format(
            metadata.boundingboxes))
        for bboxes in metadata.boundingboxes:
            if bboxes["nativeSrs"] == self.crs:
                bbox = bboxes["bbox"]
                crs = self.crs
                break

        low = metadata.grid.lowlimits
        high = metadata.grid.highlimits
        xsize = int(high[0]) - int(low[0])
        ysize = int(high[1]) - int(low[1])

        # Based on https://www.ctps.org/geoserver/web/wicket/bookmarkable/org.geoserver.wcs.web.demo.WCSRequestBuilder;jsessionid=9E2AA99F95410C694D05BA609F25527C?0
        # The above link points to a geoserver implementation, which is the reference implementation.
        # WCS version 1.0.0 always has order lon/lat while version 1.1.1 actually follows the CRS
        if self.version == "1.0.0":
            rbbox = {
                "lat": [bbox[1], bbox[3], ysize],
                "lon": [bbox[0], bbox[2], xsize]
            }
        else:
            rbbox = resolve_bbox_order(bbox, crs, (xsize, ysize))

        coords = []
        coords.append(
            UniformCoordinates1d(rbbox["lat"][0],
                                 rbbox["lat"][1],
                                 size=rbbox["lat"][2],
                                 name="lat"))
        coords.append(
            UniformCoordinates1d(rbbox["lon"][0],
                                 rbbox["lon"][1],
                                 size=rbbox["lon"][2],
                                 name="lon"))

        if metadata.timepositions:
            coords.append(
                ArrayCoordinates1d(metadata.timepositions, name="time"))

        if metadata.timelimits:
            raise NotImplementedError("TODO")

        return Coordinates(coords, crs=crs)
Beispiel #3
0
    def select_coordinates(self, udims, source_coordinates,
                           source_coordinates_index, eval_coordinates):
        """
        {interpolator_select}
        """
        new_coords = []
        new_coords_idx = []

        # iterate over the source coordinate dims in case they are stacked
        for src_dim, idx in zip(source_coordinates, source_coordinates_index):

            # TODO: handle stacked coordinates
            if isinstance(source_coordinates[src_dim], StackedCoordinates):
                raise InterpolationException(
                    'NearestPreview select does not yet support stacked dimensions'
                )

            if src_dim in eval_coordinates.dims:
                src_coords = source_coordinates[src_dim]
                dst_coords = eval_coordinates[src_dim]

                if isinstance(dst_coords, UniformCoordinates1d):
                    dst_start = dst_coords.start
                    dst_stop = dst_coords.stop
                    dst_delta = dst_coords.step
                else:
                    dst_start = dst_coords.coordinates[0]
                    dst_stop = dst_coords.coordinates[-1]
                    dst_delta = (dst_stop - dst_start) / (dst_coords.size - 1)

                if isinstance(src_coords, UniformCoordinates1d):
                    src_start = src_coords.start
                    src_stop = src_coords.stop
                    src_delta = src_coords.step
                else:
                    src_start = src_coords.coordinates[0]
                    src_stop = src_coords.coordinates[-1]
                    src_delta = (src_stop - src_start) / (src_coords.size - 1)

                ndelta = max(1, np.round(dst_delta / src_delta))
                if src_coords.size == 1:
                    c = src_coords.copy()
                else:
                    c = UniformCoordinates1d(src_start, src_stop,
                                             ndelta * src_delta,
                                             **src_coords.properties)

                if isinstance(idx, slice):
                    idx = slice(idx.start, idx.stop, int(ndelta))
                else:
                    idx = slice(idx[0], idx[-1], int(ndelta))
            else:
                c = source_coordinates[src_dim]

            new_coords.append(c)
            new_coords_idx.append(idx)

        return Coordinates(new_coords), tuple(new_coords_idx)
Beispiel #4
0
    def get_native_coordinates(self):
        """{get_native_coordinates}
        
        The default implementation tries to find the lat/lon coordinates based on dataset.affine.
        It cannot determine the alt or time dimensions, so child classes may
        have to overload this method.
        """

        # check to see if the coordinates are rotated used affine
        affine = self.dataset.transform
        if affine[1] != 0.0 or affine[3] != 0.0:
            raise NotImplementedError(
                "Rotated coordinates are not yet supported")

        # TODO: fix coordinate reference system handling
        # try:
        #     crs = self.dataset.crs['init'].upper()
        #     if crs == 'EPSG:3857':
        #         crs = 'SPHER_MERC'
        #     elif crs == 'EPSG:4326':
        #         crs = 'WGS84'
        #     else:
        #         crs = None
        # except:
        #     crs = None
        crs = None

        # get bounds
        left, bottom, right, top = self.dataset.bounds

        # rasterio reads data upside-down from coordinate conventions, so lat goes from top to bottom
        return Coordinates([
            UniformCoordinates1d(top,
                                 bottom,
                                 size=self.dataset.height,
                                 name='lat',
                                 coord_ref_sys=crs),
            UniformCoordinates1d(left,
                                 right,
                                 size=self.dataset.width,
                                 name='lon',
                                 coord_ref_sys=crs)
        ],
                           coord_ref_sys=crs)
Beispiel #5
0
    def native_coordinates(self):
        """{native_coordinates}
        
        Returns
        -------
        Coordinates
            {native_coordinates}
            
        Notes
        ------
        This is a little tricky and doesn't fit into the usual PODPAC method, as the service is actually doing the 
        data wrangling for us...
        """

        # TODO update so that we don't rely on _requested_coordinates if possible
        if not self._requested_coordinates:
            return self.wcs_coordinates

        cs = []
        for dim in self.wcs_coordinates.dims:
            if dim in self._requested_coordinates.dims:
                c = self._requested_coordinates[dim]
                if c.size == 1:
                    cs.append(ArrayCoordinates1d(c.coordinates[0], name=dim))
                elif isinstance(c, UniformCoordinates1d):
                    cs.append(
                        UniformCoordinates1d(c.bounds[0],
                                             c.bounds[1],
                                             abs(c.step),
                                             name=dim))
                else:
                    # TODO: generalize/fix this
                    # WCS calls require a regular grid, could (otherwise we have to do multiple WCS calls)
                    cs.append(
                        UniformCoordinates1d(c.bounds[0],
                                             c.bounds[1],
                                             size=c.size,
                                             name=dim))
            else:
                cs.append(self.wcs_coordinates[dim])
        c = Coordinates(cs)
        return c
Beispiel #6
0
    def get_modified_coordinates1d(self, coords, dim):
        """Returns the expanded coordinates for the requested dimension, depending on the expansion parameter for the
        given dimension.
        
        Parameters
        ----------
        dim : str
            Dimension to expand
        
        Returns
        -------
        expanded : Coordinates1d
            Expanded coordinates
        """

        coords1d = coords[dim]
        expansion = getattr(self, dim)

        if not expansion:  # i.e. if list is empty
            # no expansion in this dimension
            return coords1d

        if len(expansion) == 2:
            # use available native coordinates
            dstart = make_coord_delta(expansion[0])
            dstop = make_coord_delta(expansion[1])

            available_coordinates = self.coordinates_source.find_coordinates()
            if len(available_coordinates) != 1:
                raise ValueError(
                    "Cannot implicity expand coordinates; too many available coordinates"
                )
            acoords = available_coordinates[0][dim]
            cs = [
                acoords.select((add_coord(x, dstart), add_coord(x, dstop)))
                for x in coords1d.coordinates
            ]

        elif len(expansion) == 3:
            # use a explicit step size
            dstart = make_coord_delta(expansion[0])
            dstop = make_coord_delta(expansion[1])
            step = make_coord_delta(expansion[2])
            cs = [
                UniformCoordinates1d(add_coord(x, dstart), add_coord(x, dstop),
                                     step) for x in coords1d.coordinates
            ]

        else:
            raise ValueError("Invalid expansion attrs for '%s'" % dim)

        return ArrayCoordinates1d(np.concatenate([c.coordinates for c in cs]),
                                  **coords1d.properties)
Beispiel #7
0
    def get_modified_coordinates1d(self, coords, dim):
        """
        Get the desired 1d coordinates for the given dimension, depending on the selection attr for the given
        dimension::

        Parameters
        ----------
        coords : Coordinates
            The requested input coordinates
        dim : str
            Dimension for doing the selection

        Returns
        -------
        coords1d : ArrayCoordinates1d
            The selected coordinates for the given dimension.
        """

        coords1d = coords[dim]
        selection = getattr(self, dim)

        if not selection:
            # no selection in this dimension
            return coords1d

        if len(selection) == 1 or ((len(selection) == 2) and
                                   (selection[0] == selection[1])):
            # a single value
            coords1d = ArrayCoordinates1d(selection, **coords1d.properties)

        elif len(selection) == 2:
            # use available source coordinates within the selected bounds
            available_coordinates = self.coordinates_source.find_coordinates()
            if len(available_coordinates) != 1:
                raise ValueError(
                    "SelectCoordinates Node cannot determine the step size between bounds for dimension"
                    +
                    "{} because source node (source.find_coordinates()) has {} different coordinates."
                    .format(dim, len(available_coordinates)) +
                    "Please specify step-size for this dimension.")
            coords1d = available_coordinates[0][dim].select(selection)

        elif len(selection) == 3:
            # uniform coordinates using start, stop, and step
            coords1d = UniformCoordinates1d(*selection, **coords1d.properties)

        else:
            raise ValueError("Invalid selection attrs for '%s'" % dim)

        return coords1d
Beispiel #8
0
    def get_modified_coordinates1d(self, coords, dim):
        """
        Get the desired 1d coordinates for the given dimension, depending on the selection attr for the given
        dimension::

        Parameters
        ----------
        coords : Coordinates
            The requested input coordinates
        dim : str
            Dimension for doing the selection

        Returns
        -------
        coords1d : ArrayCoordinates1d
            The selected coordinates for the given dimension.
        """

        coords1d = coords[dim]
        selection = getattr(self, dim)

        if not selection:
            # no selection in this dimension
            return coords1d

        if len(selection) == 1:
            # a single value
            coords1d = ArrayCoordinates1d(selection, **coords1d.properties)

        elif len(selection) == 2:
            # use available source coordinates within the selected bounds
            available_coordinates = self.coordinates_source.find_coordinates()
            if len(available_coordinates) != 1:
                raise ValueError(
                    "Cannot select within bounds; too many available coordinates"
                )
            coords1d = available_coordinates[0][dim].select(selection)

        elif len(selection) == 3:
            # uniform coordinates using start, stop, and step
            coords1d = UniformCoordinates1d(*selection, **coords1d.properties)

        else:
            raise ValueError("Invalid selection attrs for '%s'" % dim)

        return coords1d
    def select_coordinates(self,
                           udims,
                           source_coordinates,
                           eval_coordinates,
                           index_type="numpy"):
        """
        {interpolator_select}
        """
        new_coords = []
        new_coords_idx = []

        source_coords, source_coords_index = source_coordinates.intersect(
            eval_coordinates, outer=True, return_index=True)

        if source_coords.size == 0:
            return source_coords, source_coords_index

        # iterate over the source coordinate dims in case they are stacked
        for src_dim, idx in zip(source_coords, source_coords_index):

            # TODO: handle stacked coordinates
            if isinstance(source_coords[src_dim], StackedCoordinates):
                raise InterpolatorException(
                    "NearestPreview select does not yet support stacked dimensions"
                )

            if src_dim in eval_coordinates.dims:
                src_coords = source_coords[src_dim]
                dst_coords = eval_coordinates[src_dim]

                if isinstance(dst_coords, UniformCoordinates1d):
                    dst_start = dst_coords.start
                    dst_stop = dst_coords.stop
                    dst_delta = dst_coords.step
                else:
                    dst_start = dst_coords.coordinates[0]
                    dst_stop = dst_coords.coordinates[-1]
                    with np.errstate(invalid="ignore"):
                        dst_delta = (dst_stop - dst_start) / (dst_coords.size -
                                                              1)

                if isinstance(src_coords, UniformCoordinates1d):
                    src_start = src_coords.start
                    src_stop = src_coords.stop
                    src_delta = src_coords.step
                else:
                    src_start = src_coords.coordinates[0]
                    src_stop = src_coords.coordinates[-1]
                    with np.errstate(invalid="ignore"):
                        src_delta = (src_stop - src_start) / (src_coords.size -
                                                              1)

                ndelta = max(1, np.round(np.abs(dst_delta / src_delta)))
                idx_offset = 0
                if src_coords.size == 1:
                    c = src_coords.copy()
                else:
                    c_test = UniformCoordinates1d(src_start, src_stop,
                                                  ndelta * src_delta,
                                                  **src_coords.properties)
                    bounds = source_coordinates[src_dim].bounds
                    # The delta/2 ensures the endpoint is included when there is a floating point rounding error
                    # the delta/2 is more than needed, but does guarantee.
                    src_stop = np.clip(src_stop + ndelta * src_delta / 2,
                                       bounds[0], bounds[1])
                    c = UniformCoordinates1d(src_start, src_stop,
                                             ndelta * src_delta,
                                             **src_coords.properties)
                    if c.size > c_test.size:  # need to adjust the index as well
                        idx_offset = int(ndelta)

                idx_start = idx.start if isinstance(idx, slice) else idx[0]
                idx_stop = idx.stop if isinstance(idx, slice) else idx[-1]
                if idx_stop is not None:
                    idx_stop += idx_offset
                idx = slice(idx_start, idx_stop, int(ndelta))
            else:
                c = source_coords[src_dim]

            new_coords.append(c)
            new_coords_idx.append(idx)

        return Coordinates(new_coords,
                           validate_crs=False), tuple(new_coords_idx)
Beispiel #10
0
    def get_wcs_coordinates(self):
        """Retrieves the native coordinates reported by the WCS service.
        
        Returns
        -------
        Coordinates
            The native coordinates reported by the WCS service.
        
        Notes
        -------
        This assumes a `time`, `lat`, `lon` order for the coordinates, and currently doesn't handle `alt` coordinates
        
        Raises
        ------
        Exception
            Raises this if the required dependencies are not installed.
        """
        if requests is not None:
            capabilities = requests.get(self.get_capabilities_url)
            if capabilities.status_code != 200:
                raise Exception("Could not get capabilities from WCS server")
            capabilities = capabilities.text

        # TODO: remove support urllib3 - requests is sufficient
        elif urllib3 is not None:
            if certifi is not None:
                http = urllib3.PoolManager(ca_certs=certifi.where())
            else:
                http = urllib3.PoolManager()

            r = http.request('GET', self.get_capabilities_url)
            capabilities = r.data
            if r.status != 200:
                raise Exception("Could not get capabilities from WCS server")
        else:
            raise Exception(
                "Do not have a URL request library to get WCS data.")

        if lxml is not None:  # could skip using lxml and always use html.parser instead, which seems to work but lxml might be faster
            capabilities = bs4.BeautifulSoup(capabilities, 'lxml')
        else:
            capabilities = bs4.BeautifulSoup(capabilities, 'html.parser')

        domain = capabilities.find('wcs:spatialdomain')
        pos = domain.find('gml:envelope').get_text().split()
        lonlat = np.array(pos, float).reshape(2, 2)
        grid_env = domain.find('gml:gridenvelope')
        low = np.array(grid_env.find('gml:low').text.split(), int)
        high = np.array(grid_env.find('gml:high').text.split(), int)
        size = high - low
        dlondlat = (lonlat[1, :] - lonlat[0, :]) / size
        bottom = lonlat[:, 1].min() + dlondlat[1] / 2
        top = lonlat[:, 1].max() - dlondlat[1] / 2
        left = lonlat[:, 0].min() + dlondlat[0] / 2
        right = lonlat[:, 0].max() - dlondlat[0] / 2

        timedomain = capabilities.find("wcs:temporaldomain")
        if timedomain is None:
            return Coordinates([
                UniformCoordinates1d(top, bottom, size=size[1], name='lat'),
                UniformCoordinates1d(left, right, size=size[0], name='lon')
            ])

        date_re = re.compile(
            '[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}')
        times = str(timedomain).replace('<gml:timeposition>',
                                        '').replace('</gml:timeposition>',
                                                    '').split('\n')
        times = np.array([t for t in times if date_re.match(t)], np.datetime64)

        return Coordinates([
            ArrayCoordinates1d(times, name='time'),
            UniformCoordinates1d(top, bottom, size=size[1], name='lat'),
            UniformCoordinates1d(left, right, size=size[0], name='lon')
        ])
Beispiel #11
0
    def _eval(self, coordinates, output=None, _selector=None):
        """Evaluates this nodes using the supplied coordinates.

        Parameters
        ----------
        coordinates : podpac.Coordinates
            {requested_coordinates}
        output : podpac.UnitsDataArray, optional
            {eval_output}
        _selector: callable(coordinates, request_coordinates)
            {eval_selector}

        Returns
        -------
        {eval_return}
        """
        # The size of this kernel is used to figure out the expanded size
        full_kernel = self.kernel

        # expand the coordinates
        # The next line effectively drops extra coordinates, so we have to add those later in case the
        # source is some sort of reduction Node.
        kernel_dims = [kd for kd in coordinates.dims if kd in self.kernel_dims]
        missing_dims = [kd for kd in coordinates.dims if kd not in self.kernel_dims]

        exp_coords = []
        exp_slice = []
        for dim in kernel_dims:
            coord = coordinates[dim]
            s = full_kernel.shape[self.kernel_dims.index(dim)]
            if s == 1 or not isinstance(coord, (UniformCoordinates1d, ArrayCoordinates1d)):
                exp_coords.append(coord)
                exp_slice.append(slice(None))
                continue

            if isinstance(coord, UniformCoordinates1d):
                s_start = -s // 2
                s_end = max(s // 2 - ((s + 1) % 2), 1)
                # The 1e-14 is for floating point error because if endpoint is slightly
                # in front of step * N then the endpoint is excluded
                # ALSO: MUST use size instead of step otherwise floating point error
                # makes the xarray arrays not align. The following HAS to be true:
                #     np.diff(coord.coordinates).mean() == coord.step
                exp_coords.append(
                    UniformCoordinates1d(
                        add_coord(coord.start, s_start * coord.step),
                        add_coord(coord.stop, s_end * coord.step + 1e-14 * coord.step),
                        size=coord.size - s_start + s_end,  # HAVE to use size, see note above
                        **coord.properties
                    )
                )
                exp_slice.append(slice(-s_start, -s_end))
            elif isinstance(coord, ArrayCoordinates1d):
                if not coord.is_monotonic or coord.size < 2:
                    exp_coords.append(coord)
                    exp_slice.append(slice(None))
                    continue

                arr_coords = coord.coordinates
                delta_start = arr_coords[1] - arr_coords[0]
                extra_start = np.arange(arr_coords[0] - delta_start * (s // 2), arr_coords[0], delta_start)
                delta_end = arr_coords[-1] - arr_coords[-2]
                # The 1e-14 is for floating point error to make sure endpoint is included
                extra_end = np.arange(
                    arr_coords[-1] + delta_end, arr_coords[-1] + delta_end * (s // 2) + delta_end * 1e-14, delta_end
                )
                arr_coords = np.concatenate([extra_start, arr_coords, extra_end])
                exp_coords.append(ArrayCoordinates1d(arr_coords, **coord.properties))
                exp_slice.append(slice(extra_start.size, -extra_end.size))

        # Add missing dims back in -- this is needed in case the source is a reduce node.
        exp_coords += [coordinates[d] for d in missing_dims]

        # Create expanded coordinates
        exp_slice = tuple(exp_slice)
        expanded_coordinates = Coordinates(exp_coords, crs=coordinates.crs, validate_crs=False)

        if settings["DEBUG"]:
            self._expanded_coordinates = expanded_coordinates

        # evaluate source using expanded coordinates, convolve, and then slice out original coordinates
        source = self.source.eval(expanded_coordinates, _selector=_selector)

        kernel_dims_u = kernel_dims
        kernel_dims = self.kernel_dims
        sum_dims = [d for d in kernel_dims if d not in source.dims]
        # Sum out the extra dims
        full_kernel = full_kernel.sum(axis=tuple([kernel_dims.index(d) for d in sum_dims]))
        exp_slice = [exp_slice[i] for i in range(len(kernel_dims_u)) if kernel_dims_u[i] not in sum_dims]
        kernel_dims = [d for d in kernel_dims if d in source.dims]

        # Put the kernel axes in the correct order
        # The (if d in kernel_dims) takes care of "output", which can be optionally present
        full_kernel = full_kernel.transpose([kernel_dims.index(d) for d in source.dims if (d in kernel_dims)])

        # Check for extra dimensions in the source and reshape the kernel appropriately
        if any([d not in kernel_dims for d in source.dims if d != "output"]):
            new_axis = []
            new_exp_slice = []
            for d in source.dims:
                if d in kernel_dims:
                    new_axis.append(slice(None))
                    new_exp_slice.append(exp_slice[kernel_dims.index(d)])
                else:
                    new_axis.append(None)
                    new_exp_slice.append(slice(None))
            full_kernel = full_kernel[new_axis]
            exp_slice = new_exp_slice

        if np.any(np.isnan(source)):
            method = "direct"
        else:
            method = "auto"

        if ("output" not in source.dims) or ("output" in source.dims and "output" in kernel_dims):
            result = scipy.signal.convolve(source, full_kernel, mode="same", method=method)
        else:
            # source with multiple outputs
            result = np.stack(
                [
                    scipy.signal.convolve(source.sel(output=output), full_kernel, mode="same", method=method)
                    for output in source.coords["output"]
                ],
                axis=source.dims.index("output"),
            )
        result = result[exp_slice]

        if output is None:
            missing_dims = [d for d in coordinates.dims if d not in source.dims]
            output = self.create_output_array(coordinates.drop(missing_dims), data=result)
        else:
            output[:] = result

        return output
Beispiel #12
0
    def _eval(self, coordinates, output=None, _selector=None):
        """Evaluates this nodes using the supplied coordinates.

        Parameters
        ----------
        coordinates : podpac.Coordinates
            {requested_coordinates}
        output : podpac.UnitsDataArray, optional
            {eval_output}
        _selector: callable(coordinates, request_coordinates)
            {eval_selector}

        Returns
        -------
        {eval_return}
        """
        # The size of this kernel is used to figure out the expanded size
        full_kernel = self._get_full_kernel(coordinates)

        # expand the coordinates
        # The next line effectively drops extra coordinates, so we have to add those later in case the
        # source is some sort of reduction Node.
        kernel_dims = [kd for kd in coordinates.dims if kd in self.kernel_dims]
        missing_dims = [
            kd for kd in coordinates.dims if kd not in self.kernel_dims
        ]

        exp_coords = []
        exp_slice = []
        for dim in kernel_dims:
            coord = coordinates[dim]
            s = full_kernel.shape[self.kernel_dims.index(dim)]
            if s == 1 or not isinstance(coord, UniformCoordinates1d):
                exp_coords.append(coord)
                exp_slice.append(slice(None))
                continue

            s_start = -s // 2
            s_end = max(s // 2 - ((s + 1) % 2), 1)
            # The 1e-07 is for floating point error because if endpoint is slightly
            # in front of step * N then the endpoint is excluded
            exp_coords.append(
                UniformCoordinates1d(
                    add_coord(coord.start, s_start * coord.step),
                    add_coord(coord.stop,
                              s_end * coord.step + 1e-07 * coord.step),
                    coord.step, **coord.properties))
            exp_slice.append(slice(-s_start, -s_end))

        # Add missing dims back in -- this is needed in case the source is a reduce node.
        exp_coords += [coordinates[d] for d in missing_dims]
        # exp_slice += [slice(None) for d in missing_dims]

        # Create expanded coordinates
        exp_slice = tuple(exp_slice)
        expanded_coordinates = Coordinates(exp_coords,
                                           crs=coordinates.crs,
                                           validate_crs=False)

        if settings["DEBUG"]:
            self._expanded_coordinates = expanded_coordinates

        # evaluate source using expanded coordinates, convolve, and then slice out original coordinates
        source = self.source.eval(expanded_coordinates, _selector=_selector)

        # Check dimensions
        if any([d not in kernel_dims for d in source.dims if d != "output"]):
            raise ValueError(
                "Kernel dims must contain all of the dimensions in source but not all of {} is in kernel_dims={}"
                .format(source.dims, kernel_dims))

        full_kernel = self._get_full_kernel(coordinates)
        kernel_dims = self.kernel_dims
        sum_dims = [d for d in kernel_dims if d not in source.dims]
        # Sum out the extra dims
        full_kernel = full_kernel.sum(
            axis=tuple([kernel_dims.index(d) for d in sum_dims]))
        kernel_dims = [d for d in kernel_dims if d in source.dims]

        # Put the kernel axes in the correct order
        # The (if d in kernel_dims) takes care of "output", which can be optionally present
        full_kernel = full_kernel.transpose(
            [kernel_dims.index(d) for d in source.dims if (d in kernel_dims)])

        if np.any(np.isnan(source)):
            method = "direct"
        else:
            method = "auto"

        if ("output" not in source.dims) or ("output" in source.dims
                                             and "output" in kernel_dims):
            result = scipy.signal.convolve(source,
                                           full_kernel,
                                           mode="same",
                                           method=method)
        else:
            # source with multiple outputs
            result = np.stack(
                [
                    scipy.signal.convolve(source.sel(output=output),
                                          full_kernel,
                                          mode="same",
                                          method=method)
                    for output in source.coords["output"]
                ],
                axis=source.dims.index("output"),
            )
        result = result[exp_slice]

        if output is None:
            missing_dims = [
                d for d in coordinates.dims if d not in source.dims
            ]
            output = self.create_output_array(coordinates.drop(missing_dims),
                                              data=result)
        else:
            output[:] = result

        return output
Beispiel #13
0
    def eval(self, coordinates, output=None):
        """Evaluates this nodes using the supplied coordinates.
        
        Parameters
        ----------
        coordinates : podpac.Coordinates
            {requested_coordinates}
        output : podpac.UnitsDataArray, optional
            {eval_output}
        
        Returns
        -------
        {eval_return}
        """
        # This should be aligned with coordinates' dimension order
        # The size of this kernel is used to figure out the expanded size
        self._full_kernel = self.get_full_kernel(coordinates)

        if len(self._full_kernel.shape) != len(coordinates.shape):
            raise ValueError(
                "shape mismatch, kernel does not match source data (%s != %s)"
                % (self._full_kernel.shape, coordinates.shape))

        # expand the coordinates
        exp_coords = []
        exp_slice = []
        for dim, s in zip(coordinates.dims, self._full_kernel.shape):
            coord = coordinates[dim]
            if s == 1 or not isinstance(coord, UniformCoordinates1d):
                exp_coords.append(coord)
                exp_slice.append(slice(None))
                continue

            s_start = -s // 2
            s_end = s // 2 - ((s + 1) % 2)
            # The 1e-07 is for floating point error because if endpoint is slightly
            # in front of step * N then the endpoint is excluded
            exp_coords.append(
                UniformCoordinates1d(
                    add_coord(coord.start, s_start * coord.step),
                    add_coord(coord.stop,
                              s_end * coord.step + 1e-07 * coord.step),
                    coord.step, **coord.properties))
            exp_slice.append(slice(-s_start, -s_end))
        exp_slice = tuple(exp_slice)
        self._expanded_coordinates = Coordinates(exp_coords)

        # evaluate source using expanded coordinates, convolve, and then slice out original coordinates
        source = self.source.eval(self._expanded_coordinates)

        if np.any(np.isnan(source)):
            method = 'direct'
        else:
            method = 'auto'

        result = scipy.signal.convolve(source,
                                       self._full_kernel,
                                       mode='same',
                                       method=method)
        result = result[exp_slice]

        if output is None:
            output = self.create_output_array(coordinates, data=result)
        else:
            output[:] = result

        return output