コード例 #1
0
    def _run_tool(self):

        dataset = self.dataset
        orig_metadata = get_metadata(dataset)[dataset]
        src_path = orig_metadata['file_path']

        #run filter
        with rasterio.open(src_path) as src:
            out_image = self._run(src, orig_metadata)
            out_meta = src.profile
        # save the resulting raster
        out_meta.update({"dtype": out_image.dtype,
                        "height": out_image.shape[0],
                         "width": out_image.shape[1],
                         "transform": None})

        new_metadata = {
            'parameter': orig_metadata['parameter'],
            'datatype': orig_metadata['datatype'],
            'file_format': orig_metadata['file_format'],
            'unit': orig_metadata['unit']
        }

        new_dset, file_path, catalog_entry = self._create_new_dataset(
            old_dataset=dataset,
            ext='.tif',
            dataset_metadata=new_metadata,
        )

        with rasterio.open(file_path, "w", **out_meta) as dest:
            dest.write(out_image)

        return {'datasets': new_dset, 'catalog_entries': catalog_entry}
コード例 #2
0
ファイル: ts_base.py プロジェクト: douggallup/quest
    def _run_tool(self):
        dataset = self.dataset

        io = load_plugins('io', 'timeseries-hdf5')['timeseries-hdf5']
        orig_metadata = get_metadata(dataset)[dataset]
        if orig_metadata['file_path'] is None:
            raise IOError('No data file available for this dataset')

        df = io.read(orig_metadata['file_path'])

        # run filter
        new_df = self._run(df)


        # setup new dataset
        new_metadata = {
            'parameter': new_df.metadata.get('parameter'),
            'unit': new_df.metadata.get('unit'),
            'datatype': orig_metadata['datatype'],
            'file_format': orig_metadata['file_format'],
        }

        new_dset, file_path, catalog_entry = self._create_new_dataset(
            old_dataset=dataset,
            ext='.h5',
            dataset_metadata=new_metadata,
        )

        # save dataframe
        io.write(file_path, new_df, new_metadata)

        return {'datasets': new_dset}
コード例 #3
0
    def _run_tool(self):
        dataset = self.dataset

        # get metadata, path etc from dataset

        orig_metadata = get_metadata(dataset)[dataset]
        elev_file = orig_metadata['file_path']

        new_dset, file_path, catalog_entry = self._create_new_dataset(
            old_dataset=dataset,
            ext='.tif',
            dataset_metadata={
                'parameter': 'streams',
                'datatype': orig_metadata['datatype'],
                'file_format': orig_metadata['file_format'],
            })

        fa = self.flow_accumulation if self.flow_accumulation is not None else wbt.d_inf_flow_accumulation(
            elev_file)
        # fa = wbt.d8_flow_accumulation(fill)
        wbt.extract_streams(
            flow_accum=fa,
            threshold=self.stream_threshold,
            output=file_path,
        )

        return {'datasets': new_dset}
コード例 #4
0
 def set_threshold_bounds(cls):
     if cls.dataset:
         orig_metadata = get_metadata(cls.dataset)[cls.dataset]
         elev_file = orig_metadata['file_path']
         fa = cls.flow_accumulation = wbt.d_inf_flow_accumulation(elev_file)
         amax = np.nanmax(fa) * .5
         amin = np.nanmean(fa)
         threshold = cls.params()['stream_threshold']
         threshold.bounds = (amin, amax)
         threshold.default = amax * 0.1
コード例 #5
0
ファイル: ts_flow_duration.py プロジェクト: dloney/quest
    def _run_tool(self):

        dataset = self.dataset

        input_ts = load_plugins('io', 'timeseries-hdf5')['timeseries-hdf5']
        orig_metadata = get_metadata(dataset)[dataset]
        parameter = orig_metadata['parameter']
        if orig_metadata['file_path'] is None:
            raise IOError('No data file available for this dataset')

        df = input_ts.read(orig_metadata['file_path'])

        # apply transformation

        # run filter
        # new_df = self._run(df, options)
        metadata = df.metadata
        if 'file_path' in metadata:
            del metadata['file_path']
        df.sort_values([parameter],
                       ascending=False,
                       na_position='last',
                       inplace=True)
        df['Rank'] = df[parameter].rank(method='min', ascending=False)
        df.dropna(inplace=True)
        df['Percent Exceeded'] = (df['Rank'] /
                                  (df[parameter].count() + 1)) * 100
        df.index = df['Percent Exceeded']

        setattr_on_dataframe(df, 'metadata', metadata)
        new_df = df
        # setup new dataset
        new_metadata = {
            'parameter': new_df.metadata.get('parameter'),
            'datatype': orig_metadata['datatype'],
            'options': self.set_options,
            'file_format': orig_metadata['file_format'],
            'unit': new_df.metadata.get('unit'),
        }

        new_dset, file_path, catalog_entry = self._create_new_dataset(
            old_dataset=dataset,
            ext='.h5',
            dataset_metadata=new_metadata,
        )

        # save dataframe
        output = load_plugins('io', 'xy-hdf5')['xy-hdf5']
        output.write(file_path, new_df, new_metadata)

        return {'datasets': new_dset, 'catalog_entries': catalog_entry}
コード例 #6
0
ファイル: rst_reprojection.py プロジェクト: douggallup/quest
    def _run_tool(self):

        dataset = self.dataset

        # get metadata, path etc from first dataset, i.e. assume all datasets
        # are in same folder. This will break if you try and combine datasets
        # from different providers

        orig_metadata = get_metadata(dataset)[dataset]
        src_path = orig_metadata['file_path']

        if self.new_crs is None:
            raise ValueError(
                "A new coordinated reference system MUST be provided")

        dst_crs = self.new_crs

        new_metadata = {
            'parameter': orig_metadata['parameter'],
            'datatype': orig_metadata['datatype'],
            'file_format': orig_metadata['file_format'],
        }

        new_dset, file_path, catalog_entry = self._create_new_dataset(
            old_dataset=dataset,
            ext='.tif',
            dataset_metadata=new_metadata,
        )

        # run filter
        with rasterio.open(src_path) as src:
            # write out tif file
            subprocess.check_output([
                'gdalwarp', src_path, file_path, '-s_srs',
                src.crs.to_string(), '-t_srs', dst_crs
            ])

        with rasterio.open(file_path) as f:
            geometry = util.bbox2poly(f.bounds.left,
                                      f.bounds.bottom,
                                      f.bounds.right,
                                      f.bounds.top,
                                      as_shapely=True)
        update_metadata(catalog_entry,
                        quest_metadata={'geometry': geometry.to_wkt()})

        return {'datasets': new_dset, 'catalog_entries': catalog_entry}
コード例 #7
0
    def _run_tool(self):
        dataset = self.dataset

        orig_metadata = get_metadata(dataset)[dataset]
        elev_file = orig_metadata['file_path']

        new_dset, file_path, catalog_entry = self._create_new_dataset(
            old_dataset=dataset, ext='.tif')

        wbt.fill_depressions(elev_file, output=file_path)

        quest_metadata = {
            'parameter': 'streams',
            'datatype': orig_metadata['datatype'],
            'file_format': orig_metadata['file_format'],
        }

        update_metadata(new_dset, quest_metadata=quest_metadata)

        return {'datasets': new_dset}
コード例 #8
0
    def _run_tool(self):
        dataset = self.elevation_dataset

        # get metadata, path etc from dataset

        orig_metadata = get_metadata(dataset)[dataset]
        elev_file = orig_metadata['file_path']

        try:
            original_outlets = [
                f['geometry'] for f in get_metadata(self.outlets)
            ]
        except:
            original_outlets = self.outlets

        new_dset, file_path, catalog_entry = self._create_new_dataset(
            old_dataset=dataset,
            ext='.tif',
            dataset_metadata={
                'parameter': 'watershed_boundary',
                'datatype': orig_metadata['datatype'],
                'file_format': orig_metadata['file_format'],
            })

        d8 = wbt.d8_pointer(elev_file)
        point_shp = points_to_shp(original_outlets)

        if self.snap_distance > 0:
            pp = wbt.vector_points_to_raster(point_shp, base=elev_file)
            snap_options = {
                'pour_pts': pp,
                'snap_dist': self.snap_distance,
            }
            fa = None
            if self.algorithm == 'nearest-stream':
                st = self.streams_dataset
                if st:
                    st = open_dataset(st, with_nodata=True, isel_band=0)
                else:
                    fa = wbt.d_inf_flow_accumulation(elev_file)
                    st = wbt.extract_streams(fa, threshold=.1)
                snap_options.update(streams=st)
            else:
                fa = fa or wbt.d_inf_flow_accumulation(elev_file)
                # fa = wbt.d8_flow_accumulation(elev_file)
                snap_options.update(flow_accum=fa)

            snap_function = self.SNAP_DISTANCE_ALGORITHMS[self.algorithm]
            snapped = snap_function(**snap_options)

            indices = np.nonzero(np.nan_to_num(snapped))
            snapped_outlets = [(snapped.x.values[row], snapped.y.values[col])
                               for col, row in zip(*indices)]
            point_shp = points_to_shp(snapped_outlets)

        wbt.watershed(
            d8_pntr=d8,
            pour_pts=point_shp,
            output=file_path,
        )

        new_catalog_entries = raster_to_polygons(file_path)

        quest_metadata = {
            'parameter': 'streams',
            'datatype': orig_metadata['datatype'],
            'file_format': orig_metadata['file_format'],
            'file_path': file_path,
        }

        update_metadata(new_dset, quest_metadata=quest_metadata)

        return {
            'datasets':
            new_dset,
            'catalog_entries':
            [new_catalog_entries, snapped_outlets, catalog_entry]
        }
コード例 #9
0
    def _run_tool(self):

        # if len(datasets) < 2:
        #     raise ValueError('There must be at LEAST two datasets for this filter')

        datasets = self.datasets

        orig_metadata = get_metadata(datasets[0])[datasets[0]]
        raster_files = [get_metadata(dataset)[dataset]['file_path'] for dataset in datasets]

        for dataset in datasets:
            if get_metadata(dataset)[dataset]['parameter'] != orig_metadata['parameter']:
                raise ValueError('Parameters must match for all datasets')
            if get_metadata(dataset)[dataset]['unit'] != orig_metadata['unit']:
                raise ValueError('Units must match for all datasets')

        new_metadata = {
            'parameter': orig_metadata['parameter'],
            'datatype': orig_metadata['datatype'],
            'file_format': orig_metadata['file_format'],
            'unit': orig_metadata['unit'],
        }

        new_dset, file_path, catalog_entry = self._create_new_dataset(
            old_dataset=datasets[0],
            ext='.tif',
            dataset_metadata=new_metadata,
        )

        open_datasets = [rasterio.open(d) for d in raster_files]
        profile = open_datasets[0].profile
        # hack to avoid nodata out of range of dtype error for NED datasets
        profile['nodata'] = -32768.0 if profile['nodata'] == -3.4028234663853e+38 else profile['nodata']
        new_data, transform = rasterio.merge.merge(open_datasets, nodata=profile['nodata'])
        for d in open_datasets:
            d.close()
        profile.pop('tiled', None)
        profile.update(
            height=new_data.shape[1],
            width=new_data.shape[2],
            transform=transform,
            driver='GTiff'
        )
        with rasterio.open(file_path, 'w', **profile) as output:
            output.write(new_data.astype(profile['dtype']))

        bbox = self.bbox

        if bbox is not None:
            bbox = box(*bbox)
            geo = gpd.GeoDataFrame({'geometry': bbox}, index=[0], crs=from_epsg(4326))
            geo = geo.to_crs(crs=profile['crs'])
            bbox = geo.geometry

            with rasterio.open(file_path, 'r') as merged:
                new_data, transform = rasterio.mask.mask(dataset=merged, shapes=bbox, all_touched=True, crop=True)

            # profile.pop('tiled', None)
            profile.update(
                height=new_data.shape[1],
                width=new_data.shape[2],
                transform=transform,
            )
            with rasterio.open(file_path, 'w', **profile) as clipped:
                clipped.write(new_data)

        with rasterio.open(file_path) as f:
            geometry = util.bbox2poly(f.bounds.left, f.bounds.bottom, f.bounds.right, f.bounds.top, as_shapely=True)
        update_metadata(catalog_entry, quest_metadata={'geometry': geometry.to_wkt()})

        return {'datasets': new_dset, 'catalog_entries': catalog_entry}