コード例 #1
0
def main(output_folder, overwrite_flag=False):
    """Download soil Available Water Capacity (AWC) raster

    Parameters
    ----------
    output_folder : str
        Folder path where files will be saved.
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).

    Returns
    -------
    None
    
    """
    # Composite SSURGO/STATSGO
    download_url = 'https://storage.googleapis.com/openet/ssurgo/AWC_WTA_0to10cm_composite.tif'

    # STATSGO Only
    # download_url = 'https://storage.googleapis.com/openet/statsgo/AWC_WTA_0to10cm_statsgo.tif'

    output_name = download_url.split('/')[-1]
    output_path = os.path.join(output_folder, output_name)
    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)

    if not os.path.isfile(output_path) or overwrite_flag:
        logging.info('\nDownloading AWC')
        logging.info('  {}'.format(download_url))
        logging.info('  {}'.format(output_path))
        _utils.url_download(download_url, output_path)
    else:
        logging.debug('\nAWC raster already downloaded')
コード例 #2
0
def main(output_folder, overwrite_flag=False):
    """Download Landsat WRS2 descending footprint shapefile

    Parameters
    ----------
    output_folder : str
        Folder path where files will be saved.
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).

    Returns
    -------
    None

    """
    download_url = ('https://landsat.usgs.gov/sites/default/files/documents/'
                    'WRS2_descending.zip')

    zip_name = 'wrs2_descending.zip'
    zip_path = os.path.join(output_folder, zip_name)

    output_name = zip_name.replace('.zip', '.shp')
    output_path = os.path.join(output_folder, output_name)
    # output_path = os.path.join(
    #     output_folder, os.path.splitext(zip_name)[0], output_name)

    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)

    if ((not os.path.isfile(zip_path) and not os.path.isfile(output_path))
            or overwrite_flag):
        logging.info('\nDownloading Landsat WRS2 descending shapefile')
        logging.info('  {}'.format(download_url))
        logging.info('  {}'.format(zip_path))
        _utils.url_download(download_url, zip_path)
    else:
        logging.info('\nFootprint shapefile already downloaded')

    if ((overwrite_flag or not os.path.isfile(output_path))
            and os.path.isfile(zip_path)):
        logging.info('\nExtracting Landsat WRS2 descending shapefile')
        logging.debug('  {}'.format(output_path))
        with zipfile.ZipFile(zip_path) as zf:
            zf.extractall(output_folder)
    else:
        logging.info('\nFootprint shapefile already extracted')

    # If the wrs2_tile_utm_zones.json doesn't exist in the output folder,
    #   copy it there.  Use the script location to figure out the input folder
    json_name = 'wrs2_tile_utm_zones.json'
    input_folder = os.path.join(os.path.dirname(os.path.dirname(sys.path[0])),
                                'landsat', 'footprints')
    input_json_path = os.path.join(input_folder, 'wrs2_tile_utm_zones.json')
    output_json_path = os.path.join(output_folder, 'wrs2_tile_utm_zones.json')
    if not os.path.isfile(output_json_path) and os.path.isfile(
            input_json_path):
        logging.info(
            '\nCopying {} to the output footprints folder'.format(json_name))
        shutil.copy(input_json_path, output_json_path)
コード例 #3
0
ファイル: download_nlcd.py プロジェクト: crobi56/pymetric-1
def main(output_folder, year='2011', overwrite_flag=False):
    """Download NLCD raster

    Parameters
    ----------
    output_folder : str
        Folder path where files will be saved.
    year : {2001, 2006, 2011}; optional
        NLCD year (the default is 2011).
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).

    Returns
    -------
    None

    """

    download_url = (
        'https://prd-tnm.s3.amazonaws.com/StagedProducts/NLCD2011/Land_Cover/'
        'CONUS/nlcd_{}_landcover_2011_edition_2014_10_10.zip').format(year)
    # download_url = (
    #     'http://www.landfire.gov/bulk/downloadfile.php?'
    #     'TYPE=nlcd{0}&FNAME=nlcd_{0}_landcover_2011_edition_2014_10_10.zip').format(year)
    # download_url = (
    #     'http://gisdata.usgs.gov/TDDS/DownloadFile.php?'
    #     'TYPE=nlcd{0}&FNAME=nlcd_{0}_landcover_2011_edition_2014_10_10.zip').format(year)

    zip_name = 'nlcd_{}_landcover_2011_edition_2014_10_10.zip'.format(year)
    zip_path = os.path.join(output_folder, zip_name)

    output_name = zip_name.replace('.zip', '.img')
    # output_path = os.path.join(output_folder, output_name)
    output_path = os.path.join(
        output_folder, os.path.splitext(zip_name)[0], output_name)

    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)

    if ((not os.path.isfile(zip_path) and not os.path.isfile(output_path)) or
            overwrite_flag):
        logging.info('\nDownloading NLCD')
        logging.info('  {}'.format(download_url))
        logging.info('  {}'.format(zip_path))
        _utils.url_download(download_url, zip_path)
    else:
        logging.info('\nNLCD raster already downloaded')

    if ((overwrite_flag or not os.path.isfile(output_path)) and
            os.path.isfile(zip_path)):
        logging.info('\nExtracting NLCD files')
        logging.debug('  {}'.format(output_path))
        with zipfile.ZipFile(zip_path) as zf:
            zf.extractall(output_folder)
    else:
        logging.info('\nNLCD raster already extracted')
コード例 #4
0
def main(output_folder, overwrite_flag=False):
    """Download Landsat WRS2 descending footprint shapefile

    Parameters
    ----------
    output_folder : str
        Folder path where files will be saved.
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).

    Returns
    -------
    None

    """
    download_url = (
        'https://landsat.usgs.gov/sites/default/files/documents/wrs2_descending.zip')

    zip_name = 'wrs2_descending.zip'
    zip_path = os.path.join(output_folder, zip_name)

    output_name = zip_name.replace('.zip', '.shp')
    output_path = os.path.join(output_folder, output_name)
    # output_path = os.path.join(
    #     output_folder, os.path.splitext(zip_name)[0], output_name)

    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)

    if ((not os.path.isfile(zip_path) and not os.path.isfile(output_path)) or
            overwrite_flag):
        logging.info('\nDownloading Landsat WRS2 descending shapefile')
        logging.info('  {}'.format(download_url))
        logging.info('  {}'.format(zip_path))
        _utils.url_download(download_url, zip_path)
    else:
        logging.debug('\nFootprint shapefile already downloaded')

    if ((overwrite_flag or not os.path.isfile(output_path)) and
            os.path.isfile(zip_path)):
        logging.info('\nExtracting Landsat WRS2 descending shapefile')
        logging.debug('  {}'.format(output_path))
        with zipfile.ZipFile(zip_path) as zf:
            zf.extractall(output_folder)
    else:
        logging.debug('\nFootprint shapefile already extracted')
コード例 #5
0
def main(start_dt, end_dt, netcdf_ws, variables=['all'],
         overwrite_flag=False):
    """Download DAYMET netcdf files

    Data is currently only available for 1980-2017

    Parameters
    ----------
    start_dt : datetime
        Start date.
    end_dt : datetime
        End date.
    netcdf_ws : str
        Root folder of DAYMET data.
    variables : list, optional
        DAYMET variables to download ('prcp', 'srad', 'vp', 'tmmn', 'tmmx').
        Set as ['all'] to download all available variables.
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).

    Returns
    -------
    None

    Notes
    -----
    https://thredds.daac.ornl.gov/thredds/catalog/ornldaac/1328/catalog.html

    """
    logging.info('\nDownloading DAYMET data')
    logging.debug('  Start date: {}'.format(start_dt))
    logging.debug('  End date:   {}'.format(end_dt))

    site_url = 'http://thredds.daac.ornl.gov/thredds/fileServer/ornldaac/1328'

    # DAYMET rasters to extract
    var_full_list = ['prcp', 'srad', 'vp', 'tmin', 'tmax']
    if not variables:
        logging.error('\nERROR: variables parameter is empty\n')
        sys.exit()
    elif type(variables) is not list:
        # DEADBEEF - I could try converting comma separated strings to lists?
        logging.warning('\nERROR: variables parameter must be a list\n')
        sys.exit()
    elif 'all' in variables:
        logging.error('\nDownloading all variables\n  {}'.format(
            ','.join(var_full_list)))
        var_list = var_full_list
    elif not set(variables).issubset(set(var_full_list)):
        logging.error('\nERROR: variables parameter is invalid\n  {}'.format(
            variables))
        sys.exit()
    else:
        var_list = variables[:]

    # Build output workspace if it doesn't exist
    if not os.path.isdir(netcdf_ws):
        os.makedirs(netcdf_ws)

    # DAYMET data is stored by year
    year_list = sorted(list(set([
        i_dt.year for i_dt in _utils.date_range(
            start_dt, end_dt + dt.timedelta(1))])))
    year_list = list(map(lambda x: '{:04d}'.format(x), year_list))

    # Set data types to upper case for comparison
    var_list = list(map(lambda x: x.lower(), var_list))

    # Each sub folder in the main folder has all imagery for 1 day
    # The path for each subfolder is the /YYYY/MM/DD
    logging.info('')
    for year_str in year_list:
        logging.info(year_str)

        # Process each file in sub folder
        for variable in var_list:
            file_name = 'daymet_v3_{}_{}_na.nc4'.format(variable, year_str)
            file_url = '{}/{}/{}'.format(site_url, year_str, file_name)
            save_path = os.path.join(netcdf_ws, file_name)

            logging.info('  {}'.format(file_name))
            logging.debug('    {}'.format(file_url))
            logging.debug('    {}'.format(save_path))
            if os.path.isfile(save_path):
                if not overwrite_flag:
                    logging.debug('    File already exists, skipping')
                    continue
                else:
                    logging.debug('    File already exists, removing existing')
                    os.remove(save_path)

            _utils.url_download(file_url, save_path)

    logging.debug('\nScript Complete')
コード例 #6
0
def main(output_folder, version='140', overwrite_flag=False):
    """Download LANDFIRE veg. type

    Parameters
    ----------
    output_folder : str
        Folder path where files will be saved.
    version : {'105', '110', '120', '130', '140'}
        LANDFIRE version string (the default is '140').
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).

    Returns
    -------
    None

    """
    version = str(version).replace('.', '')

    base_url = 'http://www.landfire.gov/bulk/downloadfile.php?FNAME='
    zip_dict = {
        '140': 'US_{0}_mosaic-US_{0}EVT_04252017.zip&TYPE=landfire'.format(version),
        '130': 'US_{0}_Mosaic-US_{0}_EVT_04232015.zip&TYPE=landfire'.format(version),
        '120': 'US_{0}_Mosaic-US_{0}_EVT_06142017.zip&TYPE=landfire'.format(version),
        '110': 'US_{0}_mosaic_Refresh-US_{0}EVT_09122104.zip&TYPE=landfire'.format(version),
        '105': 'US_{0}_mosaic_Refresh-US_{0}evt_09122104.zip&TYPE=landfire'.format(version),
    }
    download_url = base_url + zip_dict[version]

    output_name = 'US_{}_EVT'.format(version)
    zip_path = os.path.join(output_folder, output_name + '.zip')

    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)

    if not os.path.isfile(zip_path) or overwrite_flag:
        logging.info('\nDownloading LANDFIRE vegetation type')
        logging.info('  {}'.format(download_url))
        logging.info('  {}'.format(zip_path))
        _utils.url_download(download_url, zip_path)
    else:
        logging.debug('\nLANDFIRE raster already downloaded')

    if os.path.isfile(zip_path):
        logging.info('\nExtracting LANDFIRE files')
        with zipfile.ZipFile(zip_path) as zf:
            # Extract files using zip naming and folder structure
            # zf.extractall(output_folder)

            # Ignore top level zip folder name
            for member in zf.namelist():
                # Replace root folder and switch to OS separator
                output_path = list(member.split('/'))
                output_path[0] = output_name
                output_path = os.sep.join(output_path)
                output_ws = os.path.join(
                    output_folder, os.path.dirname(output_path))

                # Skip directories
                if not os.path.basename(output_path):
                    continue
                # Only keep "grid" files
                if 'Grid' not in output_path:
                    continue

                # Build output directories
                if not os.path.isdir(output_ws):
                    os.makedirs(output_ws)

                # Extract
                logging.debug('  {}'.format(output_path))
                source = zf.open(member)
                target = open(os.path.join(output_folder, output_path), "wb")
                with source, target:
                    shutil.copyfileobj(source, target)
    else:
        logging.debug('\nLANDFIRE raster already extracted')
コード例 #7
0
ファイル: gridmet_download.py プロジェクト: davidj1s/pymetric
def main(netcdf_ws=os.getcwd(),
         variables=['etr', 'pr'],
         start_date=None,
         end_date=None,
         overwrite_flag=False):
    """Download GRIDMET netcdf files

    Parameters
    ----------
    netcdf_ws : str
        Folder of GRIDMET netcdf files.
    variable : list, optional
        GRIDMET variables to download (the default is ['etr', 'ppt']).
        Choices: 'eto', 'etr', 'pr', 'srad', 'sph', 'tmmn', 'tmmx', 'vs'
    start_date : str, optional
        ISO format date (YYYY-MM-DD).
    end_date : str, optional
        ISO format date (YYYY-MM-DD).
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).

    Returns
    -------
    None
    
    """
    logging.info('Downloading GRIDMET data\n')
    site_url = 'https://www.northwestknowledge.net/metdata/data'

    # If a date is not set, process 2017
    try:
        start_dt = dt.datetime.strptime(start_date, '%Y-%m-%d')
        logging.debug('  Start date: {}'.format(start_dt))
    except:
        start_dt = dt.datetime(2017, 1, 1)
        logging.info('  Start date: {}'.format(start_dt))
    try:
        end_dt = dt.datetime.strptime(end_date, '%Y-%m-%d')
        logging.debug('  End date:   {}'.format(end_dt))
    except:
        end_dt = dt.datetime(2017, 12, 31)
        logging.info('  End date:   {}'.format(end_dt))

    # GRIDMET rasters to extract
    data_full_list = ['eto', 'etr', 'pr', 'srad', 'sph', 'tmmn', 'tmmx', 'vs']
    if not variables:
        logging.error('\nERROR: variables parameter is empty\n')
        sys.exit()
    elif type(variables) is not list:
        # DEADBEEF - I could try converting comma separated strings to lists?
        logging.warning('\nERROR: variables parameter must be a list\n')
        sys.exit()
    # elif 'all' in variables:
    #     logging.error('Downloading all variables\n  {}'.format(
    #         ','.join(data_full_list)))
    #     data_list = data_full_list
    # elif 'eto' in variables or 'etr' in variables:
    #     data_etr_list = ['srad', 'sph', 'tmmn', 'tmmx', 'vs']
    #     logging.error(
    #         'Downloading all variables needed to compute ETr/ETo\n  {}'.format(
    #             ','.join(data_etr_list)))
    #     data_list = data_etr_list
    elif not set(variables).issubset(set(data_full_list)):
        logging.error(
            '\nERROR: variables parameter is invalid\n  {}'.format(variables))
        sys.exit()
    else:
        data_list = variables

    # Build output workspace if it doesn't exist
    if not os.path.isdir(netcdf_ws):
        os.makedirs(netcdf_ws)

    # GRIDMET data is stored by year
    year_list = sorted(
        list(
            set([
                i_dt.year
                for i_dt in _utils.date_range(start_dt, end_dt +
                                              dt.timedelta(1))
            ])))
    year_list = map(lambda x: '{:04d}'.format(x), year_list)

    # Set data types to upper case for comparison
    data_list = list(map(lambda x: x.lower(), data_list))

    # Each sub folder in the main folder has all imagery for 1 day
    # The path for each subfolder is the /YYYY/MM/DD
    logging.info('')
    for year_str in year_list:
        logging.info(year_str)

        # Process each file in sub folder
        for data_str in data_list:
            file_name = '{}_{}.nc'.format(data_str, year_str)
            file_url = '{}/{}'.format(site_url, file_name)
            save_path = os.path.join(netcdf_ws, file_name)

            logging.info('  {}'.format(file_name))
            logging.debug('    {}'.format(file_url))
            logging.debug('    {}'.format(save_path))
            if os.path.isfile(save_path):
                if not overwrite_flag:
                    logging.debug('    File already exists, skipping')
                    continue
                else:
                    logging.debug('    File already exists, removing existing')
                    os.remove(save_path)

            _utils.url_download(file_url, save_path)

    logging.debug('\nScript Complete')
コード例 #8
0
ファイル: nldas_ancillary.py プロジェクト: crobi56/pymetric-1
def main(ancillary_ws=os.getcwd(),
         zero_elev_nodata_flag=False,
         overwrite_flag=False):
    """Process NLDAS ancillary data

    Parameters
    ----------
    ancillary_ws : str
        Folder of ancillary rasters.
    zero_elev_nodata_flag : bool, optional
        If True, set elevation nodata values to 0 (the default is False).
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).

    Returns
    -------
    None

    """
    logging.info('\nProcess NLDAS ancillary data')

    # Site URLs
    mask_url = 'http://ldas.gsfc.nasa.gov/nldas/asc/NLDASmask_UMDunified.asc'
    elev_url = 'http://ldas.gsfc.nasa.gov/nldas/asc/gtopomean15k.asc'

    nldas_epsg = 'EPSG:4269'
    # nldas_epsg = 'EPSG:4326'

    nldas_nodata = -9999.0

    # Site URLs
    # file_re = re.compile(
    #    'NLDAS_FORA0125_H.A(?P<YEAR>\d{4})(?P<MONTH>\d{2})(?P<DAY>\d{2}).' +
    #    '(?P<TIME>\d{4}).002.grb')
    # file_re = re.compile(
    #    'NLDAS_FORA0125_H.A(?P<DATE>\d{8}).(?P<TIME>\d{4}).002.grb')

    # Build output workspace if it doesn't exist
    if not os.path.isdir(ancillary_ws):
        os.makedirs(ancillary_ws)

    # Input paths
    input_elev_ascii = os.path.join(ancillary_ws, os.path.basename(elev_url))
    input_mask_ascii = os.path.join(ancillary_ws, os.path.basename(mask_url))

    # Output paths
    elev_ascii = os.path.join(ancillary_ws, 'nldas_elev.asc')
    mask_ascii = os.path.join(ancillary_ws, 'nldas_mask.asc')
    lat_ascii = os.path.join(ancillary_ws, 'nldas_lat.asc')
    lon_ascii = os.path.join(ancillary_ws, 'nldas_lon.asc')
    elev_raster = os.path.join(ancillary_ws, 'nldas_elev.img')
    mask_raster = os.path.join(ancillary_ws, 'nldas_mask.img')
    lat_raster = os.path.join(ancillary_ws, 'nldas_lat.img')
    lon_raster = os.path.join(ancillary_ws, 'nldas_lon.img')

    # Download the elevation data if necessary
    logging.info('\nDownloading ASCII files')
    if overwrite_flag or not os.path.isfile(input_elev_ascii):
        logging.info("  {}".format(os.path.basename(elev_url)))
        logging.debug("    {}".format(elev_url))
        logging.debug("    {}".format(input_elev_ascii))
        _utils.url_download(elev_url, input_elev_ascii)

    # Download the land/water mask if necessary
    if overwrite_flag or not os.path.isfile(input_mask_ascii):
        logging.info("  {}".format(os.path.basename(mask_url)))
        logging.debug("    {}".format(elev_url))
        logging.debug("    {}".format(input_elev_ascii))
        _utils.url_download(mask_url, input_mask_ascii)

    # The XYZ ASCII format is expecting LAT/LON/VALUE
    # Export new asc files with just the needed columns for each raster
    logging.debug('\nParsing input ASCII files')

    logging.debug('  {}'.format(elev_ascii))
    elev_df = pd.read_table(input_elev_ascii,
                            header=None,
                            sep=r"\s+",
                            engine='python',
                            names=['COL', 'ROW', 'LAT', 'LON', 'VALUE'])
    elev_df = elev_df.sort_values(['LAT', 'LON'])
    if zero_elev_nodata_flag:
        elev_df.loc[elev_df['VALUE'] == nldas_nodata, 'VALUE'] = 0
    elev_df[['LON', 'LAT', 'VALUE']].to_csv(elev_ascii,
                                            header=None,
                                            index=False)

    logging.debug('  {}'.format(input_mask_ascii))
    mask_df = pd.read_table(input_mask_ascii,
                            header=None,
                            sep=r"\s+",
                            engine='python',
                            names=['COL', 'ROW', 'LAT', 'LON', 'VALUE'])
    mask_df = mask_df.sort_values(['LAT', 'LON'])
    mask_df[['LON', 'LAT', 'VALUE']].to_csv(mask_ascii,
                                            header=None,
                                            index=False)
    mask_df[['LON', 'LAT', 'LAT']].to_csv(lat_ascii, header=None, index=False)
    mask_df[['LON', 'LAT', 'LON']].to_csv(lon_ascii, header=None, index=False)

    # Remove existing rasters if necessary
    #   -overwrite argument could be passed to gdalwarp instead
    if overwrite_flag:
        logging.info('\nRemoving existing rasters')
        if os.path.isfile(elev_raster):
            logging.info('  {}'.format(elev_raster))
            subprocess.call(['gdalmanage', 'delete', elev_raster])
        if os.path.isfile(mask_raster):
            logging.info('  {}'.format(mask_raster))
            subprocess.call(['gdalmanage', 'delete', mask_raster])
        if os.path.isfile(lat_raster):
            logging.info('  {}'.format(lat_raster))
            subprocess.call(['gdalmanage', 'delete', lat_raster])
        if os.path.isfile(lon_raster):
            logging.info('  {}'.format(lon_raster))
            subprocess.call(['gdalmanage', 'delete', lon_raster])

    # Convert XYZ ascii to raster
    logging.info('\nConverting ASCII to raster')
    if not os.path.isfile(elev_raster):
        logging.info('  {}'.format(elev_ascii))
        subprocess.call([
            'gdalwarp', '-of', 'HFA', '-t_srs', nldas_epsg, '-co',
            'COMPRESSED=TRUE', elev_ascii, elev_raster, '-ot', 'Float32',
            '-srcnodata',
            str(nldas_nodata), '-dstnodata',
            str(drigo.numpy_type_nodata(np.float32))
        ],
                        cwd=ancillary_ws)
        # subprocess.call(
        #     ['gdal_translate', '-of', 'HFA', '-a_srs', nldas_epsg,
        #      '-co', 'COMPRESSED=TRUE', elev_ascii, elev_raster],
        #     cwd=ancillary_ws)
    if not os.path.isfile(mask_raster):
        logging.info('  {}'.format(mask_ascii))
        subprocess.call([
            'gdalwarp', '-of', 'HFA', '-t_srs', nldas_epsg, '-co',
            'COMPRESSED=TRUE', mask_ascii, mask_raster
        ],
                        cwd=ancillary_ws)
    if not os.path.isfile(lat_raster):
        logging.info('  {}'.format(lat_ascii))
        subprocess.call([
            'gdalwarp', '-of', 'HFA', '-t_srs', nldas_epsg, '-co',
            'COMPRESSED=TRUE', lat_ascii, lat_raster
        ],
                        cwd=ancillary_ws)
    if not os.path.isfile(lon_raster):
        logging.info('  {}'.format(lon_ascii))
        subprocess.call([
            'gdalwarp', '-of', 'HFA', '-t_srs', nldas_epsg, '-co',
            'COMPRESSED=TRUE', lon_ascii, lon_raster
        ],
                        cwd=ancillary_ws)

    # Cleanup
    os.remove(elev_ascii)
    os.remove(mask_ascii)
    os.remove(lat_ascii)
    os.remove(lon_ascii)

    logging.debug('\nScript Complete')
コード例 #9
0
ファイル: cimis_download.py プロジェクト: crobi56/pymetric-1
def main(start_dt, end_dt, output_ws, variables=['all'], overwrite_flag=False):
    """Download CIMIS data

    Parameters
    ----------
    start_dt : datetime
        Start date.
    end_dt : datetime
        End date.
    output_ws : str
        Folder path of the output ascii files.
    variables : list
        Choices: 'ETo', 'Rs', 'Tdew', 'Tn', 'Tx', 'U2', 'all'
        'K', 'Rnl', 'Rso' can be downloaded but are not needed.
        Set as ['all'] to download all variables.
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).

    Returns
    -------
    None

    Notes
    -----
    The file on the CIMIS server do not appeared to be compressed even though
      they have a .asc.gz file extension.
    The files will be saved directly to ASCII type.

    """
    logging.info('\nDownloading CIMIS data\n')
    logging.debug('  Start date: {}'.format(start_dt))
    logging.debug('  End date:   {}'.format(end_dt))

    # Site URL
    site_url = 'http://cimis.casil.ucdavis.edu/cimis/'

    # CIMIS rasters to extract
    data_full_list = ['ETo', 'Rso', 'Rs', 'Tdew', 'Tn', 'Tx', 'U2']
    if not variables:
        logging.error('\nERROR: variables parameter is empty\n')
        sys.exit()
    elif type(variables) is not list:
        logging.error('\nERROR: variables parameter must be a list\n')
        sys.exit()
    elif 'all' in variables:
        logging.error('Downloading all variables\n  {}'.format(
            ','.join(data_full_list)))
        data_list = ['ETo', 'Rso', 'Rs', 'Tdew', 'Tn', 'Tx', 'U2']
    elif not set(variables).issubset(set(data_full_list)):
        logging.error(
            '\nERROR: variables parameter is invalid\n  {}'.format(variables))
        sys.exit()
    else:
        data_list = variables

    # Build output workspace if it doesn't exist
    if not os.path.isdir(output_ws):
        os.makedirs(output_ws)

    # Set data types to upper case for comparison
    data_list = list(map(lambda x: x.lower(), data_list))

    # Each sub folder in the main folder has all imagery for 1 day
    # The path for each subfolder is the /YYYY/MM/DD
    logging.info('')
    for input_date in _utils.date_range(start_dt, end_dt + dt.timedelta(1)):
        logging.info('{}'.format(input_date.date()))
        date_url = site_url + '/' + input_date.strftime("%Y/%m/%d")
        logging.debug('  {}'.format(date_url))

        # Download a list of all files in the date sub folder
        try:
            date_html = requests.get(date_url + '/').text
        except:
            logging.error("  ERROR: {}".format(date_url))
            continue
        file_list = sorted(
            list(set(re.findall(r'href=[\'"]?([^\'" >]+)', date_html))))
        if not file_list:
            logging.debug('  Empty file list, skipping date')
            continue

        # Create a separate folder for each day
        year_ws = os.path.join(output_ws, input_date.strftime("%Y"))
        if not os.path.isdir(year_ws):
            os.mkdir(year_ws)
        date_ws = os.path.join(year_ws, input_date.strftime("%Y_%m_%d"))
        if not os.path.isdir(date_ws):
            os.mkdir(date_ws)

        # Process each file in sub folder
        for file_name in file_list:
            if not file_name.endswith('.asc.gz'):
                continue
            elif file_name.replace('.asc.gz', '').lower() not in data_list:
                continue

            file_url = '{}/{}'.format(date_url, file_name)

            # DEADBEEF - The file on the CIMIS server do not appeared to be
            #   compressed even though they have a .asc.gz file extension.
            # Saving the files directly to ASCII type.
            save_path = os.path.join(date_ws,
                                     file_name.replace('.asc.gz', '.asc'))
            # save_path = os.path.join(date_ws, file_name)

            logging.info('  {}'.format(os.path.basename(save_path)))
            logging.debug('    {}'.format(file_url))
            logging.debug('    {}'.format(save_path))
            if os.path.isfile(save_path):
                if not overwrite_flag:
                    logging.debug('    File already exists, skipping')
                    continue
                else:
                    logging.debug('    File already exists, removing existing')
                    os.remove(save_path)

            _utils.url_download(file_url, save_path)

    logging.debug('\nScript Complete')
コード例 #10
0
ファイル: cimis_ancillary.py プロジェクト: crobi56/pymetric-1
def main(ancillary_ws, overwrite_flag=False):
    """Process CIMIS ancillary data

    Parameters
    ----------
    ancillary_ws : str
        Folder of ancillary rasters.
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).

    Returns
    -------
    None
    
    """
    logging.info('\nProcess CIMIS ancillary data')

    # Site URL
    site_url = 'http://cimis.casil.ucdavis.edu/cimis/'

    # DEM for air pressure calculation
    # http://topotools.cr.usgs.gov/gmted_viewer/gmted2010_global_grids.php
    elev_full_url = 'http://edcintl.cr.usgs.gov/downloads/sciweb1/shared/topo/downloads/GMTED/Grid_ZipFiles/mn30_grd.zip'
    elev_full_zip = os.path.join(ancillary_ws, 'mn30_grd.zip')
    elev_full_raster = os.path.join(ancillary_ws, 'mn30_grd')

    # Get CIMIS grid properties from 2010/01/01 ETo raster
    # Grid of the spatial cimis input rasters
    # cimis_extent = drigo.Extent((-410000, -660000, 610000, 460000))
    # cimis_cs = 2000
    # cimis_geo = drigo.extent_geo(cimis_extent, cimis_cs)

    # Spatial reference parameters
    cimis_proj4 = (
        '+proj=aea +lat_1=34 +lat_2=40.5 +lat_0=0 +lon_0=-120 +x_0=0 '
        '+y_0=-4000000 +ellps=GRS80 +datum=NAD83 +units=m +no_defs')
    cimis_osr = drigo.proj4_osr(cimis_proj4)
    # cimis_epsg = 3310  # NAD_1983_California_Teale_Albers
    # cimis_osr = drigo.epsg_osr(cimis_epsg)
    # Comment this line out if building GeoTIFF instead of IMG
    cimis_osr.MorphToESRI()
    cimis_proj = cimis_osr.ExportToWkt()

    # snap_xmin, snap_ymin = (0, 0)

    # Build output workspace if it doesn't exist
    if not os.path.isdir(ancillary_ws):
        os.makedirs(ancillary_ws)

    # File paths
    mask_url = site_url + '/2010/01/01/ETo.asc.gz'
    # mask_gz = os.path.join(ancillary_ws, 'cimis_mask.asc.gz')
    mask_ascii = os.path.join(ancillary_ws, 'cimis_mask.asc')
    mask_raster = os.path.join(ancillary_ws, 'cimis_mask.img')
    elev_raster = os.path.join(ancillary_ws, 'cimis_elev.img')
    lat_raster = os.path.join(ancillary_ws, 'cimis_lat.img')
    lon_raster = os.path.join(ancillary_ws, 'cimis_lon.img')

    # Download an ETo ASCII raster to generate the mask raster
    if overwrite_flag or not os.path.isfile(mask_raster):
        logging.info('\nCIMIS mask')
        logging.debug('  Downloading')
        logging.debug("    {}".format(mask_url))
        logging.debug("    {}".format(mask_ascii))
        _utils.url_download(mask_url, mask_ascii)

        # DEADBEEF - The files do not appeared to be compressed even though
        #   they have a .asc.gz file extension on the server.
        # logging.debug("    {}".format(mask_gz))
        # _utils.url_download(mask_url, mask_gz)
        #
        #   they are named .asc.gz
        # # Uncompress '.gz' file to a new file
        # logging.debug('  Uncompressing')
        # logging.debug('    {}'.format(mask_ascii))
        # try:
        #     input_f = gzip.open(mask_gz, 'rb')
        #     output_f = open(mask_ascii, 'wb')
        #     output_f.write(input_f.read())
        #     output_f.close()
        #     input_f.close()
        #     del input_f, output_f
        # except:
        #     logging.error("  ERROR EXTRACTING FILE")
        # os.remove(mask_gz)

        # # Set spatial reference of the ASCII files
        # if build_prj_flag:
        #     prj_file = open(mask_asc.replace('.asc','.prj'), 'w')
        #     prj_file.write(output_proj)
        #     prj_file.close()

        # Convert the ASCII raster to a IMG raster
        logging.debug('  Computing mask')
        logging.debug('    {}'.format(mask_raster))
        mask_array = drigo.raster_to_array(mask_ascii, return_nodata=False)
        cimis_geo = drigo.raster_path_geo(mask_ascii)
        # cimis_extent = drigo.raster_path_extent(mask_ascii)
        logging.debug('    {}'.format(cimis_geo))
        mask_array = np.isfinite(mask_array).astype(np.uint8)
        drigo.array_to_raster(mask_array,
                              mask_raster,
                              output_geo=cimis_geo,
                              output_proj=cimis_proj,
                              output_nodata=0)
        # drigo.ascii_to_raster(
        #     mask_ascii, mask_raster, np.float32, cimis_proj)
        os.remove(mask_ascii)

    # Compute latitude/longitude rasters
    if ((overwrite_flag or not os.path.isfile(lat_raster)
         or not os.path.isfile(lat_raster)) and os.path.isfile(mask_raster)):
        logging.info('\nCIMIS latitude/longitude')
        logging.debug('    {}'.format(lat_raster))
        lat_array, lon_array = drigo.raster_lat_lon_func(mask_raster)
        drigo.array_to_raster(lat_array,
                              lat_raster,
                              output_geo=cimis_geo,
                              output_proj=cimis_proj)
        logging.debug('    {}'.format(lon_raster))
        drigo.array_to_raster(lon_array,
                              lon_raster,
                              output_geo=cimis_geo,
                              output_proj=cimis_proj)

    # Compute DEM raster
    if overwrite_flag or not os.path.isfile(elev_raster):
        logging.info('\nCIMIS DEM')
        logging.debug('  Downloading GMTED2010 DEM')
        logging.debug("    {}".format(elev_full_url))
        logging.debug("    {}".format(elev_full_zip))
        if overwrite_flag or not os.path.isfile(elev_full_zip):
            _utils.url_download(elev_full_url, elev_full_zip)

        # Uncompress '.gz' file to a new file
        logging.debug('  Uncompressing')
        logging.debug('    {}'.format(elev_full_raster))
        if overwrite_flag or not os.path.isfile(elev_full_raster):
            try:
                with zipfile.ZipFile(elev_full_zip, "r") as z:
                    z.extractall(ancillary_ws)
            except:
                logging.error("  ERROR EXTRACTING FILE")
            os.remove(elev_full_zip)

        # Get the extent and cellsize from the mask
        logging.debug('  Projecting to CIMIS grid')
        cimis_cs = drigo.raster_path_cellsize(mask_raster)[0]
        cimis_extent = drigo.raster_path_extent(mask_raster)
        logging.debug('    Extent: {}'.format(cimis_extent))
        logging.debug('    Cellsize: {}'.format(cimis_cs))

        logging.info('  {}'.format(mask_ascii))
        if overwrite_flag and os.path.isfile(elev_raster):
            subprocess.call(['gdalmanage', 'delete', elev_raster])
        if not os.path.isfile(elev_raster):
            subprocess.call([
                'gdalwarp', '-r', 'average', '-t_srs', cimis_proj4, '-te',
                str(cimis_extent.xmin),
                str(cimis_extent.ymin),
                str(cimis_extent.xmax),
                str(cimis_extent.ymax), '-tr',
                str(cimis_cs),
                str(cimis_cs), '-of', 'HFA', '-co', 'COMPRESSED=TRUE',
                elev_full_raster, elev_raster
            ],
                            cwd=ancillary_ws)

    logging.debug('\nScript Complete')
コード例 #11
0
def main(ancillary_ws=os.getcwd(),
         zero_elev_nodata_flag=False,
         overwrite_flag=False):
    """Process GRIDMET ancillary data

    Parameters
    ----------
    ancillary_ws : str
        Folder of ancillary rasters.
    zero_elev_nodata_flag : bool, optional
        If True, set elevation nodata values to 0 (the default is False).
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).

    Returns
    -------
    None

    """
    logging.info('\nProcess GRIDMET ancillary rasters')

    # Site URL
    elev_url = 'https://climate.northwestknowledge.net/METDATA/data/metdata_elevationdata.nc'

    # Manually define the spatial reference and extent of the GRIDMET data
    # This could be read in from a raster
    gridmet_osr = osr.SpatialReference()
    # Assume GRIDMET data is in WGS84 not NAD83 (need to check with John)
    gridmet_osr.ImportFromEPSG(4326)
    # gridmet_osr.ImportFromEPSG(4326)
    gridmet_proj = drigo.osr_proj(gridmet_osr)
    gridmet_cs = 1. / 24  # 0.041666666666666666
    gridmet_x = -125 + gridmet_cs * 5
    gridmet_y = 49 + gridmet_cs * 10
    # gridmet_y = lon_array[0,0] - 0.5 * gridmet_cs
    # gridmet_y = lat_array[0,0] + 0.5 * gridmet_cs
    # gridmet_rows, gridmet_cols = elev_array.shape
    gridmet_geo = (gridmet_x, gridmet_cs, 0., gridmet_y, 0., -gridmet_cs)
    # gridmet_extent = drigo.geo_extent(
    #     gridmet_geo, gridmet_rows, gridmet_cols)
    # Keep track of the original/full geo-transform and extent
    # gridmet_full_geo = (
    #     gridmet_x, gridmet_cs, 0., gridmet_y, 0., -gridmet_cs)
    # gridmet_full_extent = drigo.geo_extent(
    #     gridmet_geo, gridmet_rows, gridmet_cols)
    logging.debug('  X/Y: {} {}'.format(gridmet_x, gridmet_y))
    logging.debug('  Geo: {}'.format(gridmet_geo))
    logging.debug('  Cellsize: {}'.format(gridmet_cs))

    # Build output workspace if it doesn't exist
    if not os.path.isdir(ancillary_ws):
        os.makedirs(ancillary_ws)

    # Output paths
    elev_nc = os.path.join(ancillary_ws, os.path.basename(elev_url))
    elev_raster = os.path.join(ancillary_ws, 'gridmet_elev.img')
    lat_raster = os.path.join(ancillary_ws, 'gridmet_lat.img')
    lon_raster = os.path.join(ancillary_ws, 'gridmet_lon.img')

    # Compute DEM raster
    if overwrite_flag or not os.path.isfile(elev_raster):
        logging.info('\nGRIDMET DEM')
        logging.info('  Downloading')
        logging.debug('    {}'.format(elev_url))
        logging.debug('    {}'.format(elev_nc))
        _utils.url_download(elev_url, elev_nc)
        # try:
        #     urllib.urlretrieve(elev_url, elev_nc)
        # except:
        #     logging.error("  ERROR: {}\n  FILE: {}".format(
        #         sys.exc_info()[0], elev_nc))
        #     # Try to remove the file since it may not have completely downloaded
        #     os.remove(elev_nc)

        logging.info('  Extracting')
        logging.debug('    {}'.format(elev_raster))
        elev_nc_f = netCDF4.Dataset(elev_nc, 'r')
        elev_ma = elev_nc_f.variables['elevation'][0, :, :]
        elev_array = elev_ma.data.astype(np.float32)
        # elev_nodata = float(elev_ma.fill_value)
        elev_array[(elev_array == elev_ma.fill_value) |
                   (elev_array <= -300)] = np.nan
        if zero_elev_nodata_flag:
            elev_array[np.isnan(elev_array)] = 0
        if np.all(np.isnan(elev_array)):
            logging.error(
                '\nERROR: The elevation array is all nodata, exiting\n')
            sys.exit()
        drigo.array_to_raster(elev_array,
                              elev_raster,
                              output_geo=gridmet_geo,
                              output_proj=gridmet_proj)
        elev_nc_f.close()
        # del elev_nc_f, elev_ma, elev_array, elev_nodata
        del elev_nc_f, elev_ma, elev_array
        os.remove(elev_nc)

    # Compute latitude/longitude rasters
    if ((overwrite_flag or not os.path.isfile(lat_raster)
         or not os.path.isfile(lat_raster)) and os.path.isfile(elev_raster)):
        logging.info('\nGRIDMET Latitude/Longitude')
        logging.debug('    {}'.format(lat_raster))
        lat_array, lon_array = drigo.raster_lat_lon_func(elev_raster)
        # Handle the conversion to radians in the other GRIDMET scripts
        # lat_array *= (math.pi / 180)
        drigo.array_to_raster(lat_array,
                              lat_raster,
                              output_geo=gridmet_geo,
                              output_proj=gridmet_proj)
        logging.debug('    {}'.format(lon_raster))
        drigo.array_to_raster(lon_array,
                              lon_raster,
                              output_geo=gridmet_geo,
                              output_proj=gridmet_proj)
        del lat_array, lon_array

    logging.debug('\nScript Complete')
コード例 #12
0
ファイル: download_ned.py プロジェクト: crobi56/pymetric-1
def main(extent_path, output_folder, overwrite_flag=False):
    """Download NED tiles that intersect the study_area

    Parameters
    ----------
    extent_path : str 
        File path to study area shapefile.
    output_folder : str 
        Folder path where files will be saved.
    overwrite_flag : bool, optional
        If True, overwrite existing files (the default is False).

    Returns
    -------
    None
    
    Notes
    -----
    Script assumes DEM data is in 1x1 WGS84 degree tiles.
    Download 10m (1/3 arc-second) or 30m (1 arc-second) versions from:
        10m: rockyftp.cr.usgs.gov/vdelivery/Datasets/Staged/Elevation/13/IMG
        30m: rockyftp.cr.usgs.gov/vdelivery/Datasets/Staged/Elevation/1/IMG
    For this example, only download 30m DEM.

    """
    logging.info('\nDownload NED tiles')
    # site_url = 'rockyftp.cr.usgs.gov'
    site_url = 'https://prd-tnm.s3.amazonaws.com'

    # site_folder = 'vdelivery/Datasets/Staged/Elevation/1/IMG'
    site_folder = 'StagedProducts/Elevation/1/IMG'

    # This path is what must be queried to list the links
    site_file_list_path = 'https://prd-tnm.s3.amazonaws.com/index.html?prefix=StagedProducts/Elevation/1/IMG/'

    # Use 1 degree snap point and "cellsize" to get 1x1 degree tiles
    tile_osr = drigo.epsg_osr(4326)
    tile_x, tile_y, tile_cs = 0, 0, 1

    buffer_cells = 0

    # Error checking
    if not os.path.isfile(extent_path):
        logging.error('\nERROR: The input_path does not exist\n')
        return False
    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)

    # Check that input is a shapefile

    # Get the extent of each feature
    logging.debug('  Reading extents')
    lat_lon_list = []
    shp_driver = ogr.GetDriverByName('ESRI Shapefile')
    input_ds = shp_driver.Open(extent_path, 1)
    input_osr = drigo.feature_ds_osr(input_ds)
    input_layer = input_ds.GetLayer()
    input_ftr = input_layer.GetNextFeature()
    while input_ftr:
        input_geom = input_ftr.GetGeometryRef()
        input_extent = drigo.Extent(input_geom.GetEnvelope())
        input_extent = input_extent.ogrenv_swap()
        input_ftr = input_layer.GetNextFeature()
        logging.debug('Input Extent:  {}'.format(input_extent))

        # Project study area extent to input raster coordinate system
        output_extent = drigo.project_extent(input_extent, input_osr, tile_osr)
        logging.debug('Output Extent: {}'.format(output_extent))

        # Extent needed to select 1x1 degree tiles
        tile_extent = output_extent.copy()
        tile_extent.adjust_to_snap('EXPAND', tile_x, tile_y, tile_cs)
        logging.debug('Tile Extent:   {}'.format(tile_extent))

        # Get list of avaiable tiles that intersect the extent
        lat_lon_list.extend([
            (lat, -lon)
            for lon in range(int(tile_extent.xmin), int(tile_extent.xmax))
            for lat in range(int(tile_extent.ymax), int(tile_extent.ymin), -1)
        ])
    lat_lon_list = sorted(list(set(lat_lon_list)))

    # Retrieve a list of files available on the site (keyed by lat/lon)
    logging.debug('  Retrieving NED tile list from server')
    zip_files = {
        m.group(1): x.split('/')[-1]
        for x in utils.html_link_list(site_file_list_path)
        for m in [re.search('[\w]*(n\d{2}w\d{3})[\w]*.zip', x)] if m
    }
    # logging.debug(zip_files[:10])

    # Attempt to download the tiles
    logging.debug('\nDownloading tiles')
    logging.info('')
    for lat_lon in lat_lon_list:
        logging.info('Tile: {}'.format(lat_lon))
        lat_lon_key = 'n{:02d}w{:03d}'.format(*lat_lon)

        try:
            zip_name = zip_files[lat_lon_key]
        except KeyError:
            logging.exception(
                'Error finding zip file for {}, skipping tile'.format(lat_lon))
            continue
        zip_url = '/'.join([site_url, site_folder, zip_name])
        zip_path = os.path.join(output_folder, zip_name)

        tile_path = os.path.join(output_folder, '{}.img'.format(lat_lon_key))

        logging.debug('  {}'.format(zip_url))
        logging.debug('  {}'.format(zip_path))
        logging.debug('  {}'.format(tile_path))
        if os.path.isfile(tile_path):
            if not overwrite_flag:
                logging.debug('  tile already exists, skipping')
                continue
            else:
                logging.debug('  tile already exists, removing')
                os.remove(tile_path)

        utils.url_download(zip_url, zip_path)

        logging.debug('  Extracting')
        try:
            zip_f = zipfile.ZipFile(zip_path)
            img_name = [
                x for x in zip_f.namelist()
                if re.search('[\w]*(n\d{2}w\d{3})[\w]*.img$', x)
            ][0]
            img_path = os.path.join(output_folder, img_name)
            zip_f.extract(img_name, output_folder)
            zip_f.close()
            os.rename(img_path, tile_path)
        except Exception as e:
            logging.info('  Unhandled exception: {}'.format(e))

        try:
            os.remove(zip_path)
        except Exception as e:
            logging.info('  Unhandled exception: {}'.format(e))