Esempio n. 1
0
def create_nc(img, outfile='mmaster_stack.nc', clobber=False, t0=None):
    """
    Create a NetCDF dataset with x, y, and time variables.

    :param img: Input GeoImg to base shape of x, y variables on.
    :param outfile: Filename for output NetCDF file.
    :param clobber: clobber existing dataset when creating NetCDF file.
    :param t0: Initial time for creation of time variable. Default is 01 Jan 1900.
    :type img: pybob.GeoImg
    :type outfile: str
    :type clobber: bool
    :type t0: np.datetime64
    :returns nco, to, xo, yo: output NetCDF dataset, time, x, and y variables.
    """
    nrows, ncols = img.shape

    # nc file creation fails if we don't create manually the parent directory
    outdir = os.path.dirname(outfile)
    if outdir == '':
        outdir = '.'
    mkdir_p(outdir)

    nco = netCDF4.Dataset(outfile, 'w', clobber=clobber)
    nco.createDimension('x', ncols)
    nco.createDimension('y', nrows)
    nco.createDimension('time', None)
    nco.Conventions = 'CF-1.7'
    nco.description = "Stack of co-registered DEMs produced using MMASTER (+ other sources). \n" + \
                      "MMASTER scripts and documentation: https://github.com/luc-girod/MMASTER-workflows \n" + \
                      "pybob source and documentation: https://github.com/iamdonovan/pybob"
    nco.history = "Created " + time.ctime(time.time())
    nco.source = "Robert McNabb ([email protected])"

    to = nco.createVariable('time', 'f4', ('time'))
    if t0 is None:
        to.units = 'days since 1900-01-01'
    else:
        to.units = 'days since {}'.format(np.datetime_as_string(t0))
    to.calendar = 'standard'
    to.standard_name = 'date'

    xo = nco.createVariable('x', 'f4', ('x'))  # , chunksizes=[10])
    xo.units = 'm'
    xo.standard_name = 'projection_x_coordinate'
    xo.axis = 'X'

    yo = nco.createVariable('y', 'f4', ('y'))  # chunksizes=[10])
    yo.units = 'm'
    yo.standard_name = 'projection_y_coordinate'
    yo.axis = 'Y'

    return nco, to, xo, yo
Esempio n. 2
0
            list_lat.append(lat)
            list_lon.append(lon)
    else:
        print('Working on: ' + str(nproc) + ' cores...')
        argsin = [(fn_dem, i, len(list_dem))
                  for i, fn_dem in enumerate(list_dem)]
        pool = mp.Pool(nproc, maxtasksperchild=1)
        outputs = pool.map(wrapper_raster_to_point, argsin, chunksize=1)
        pool.close()
        pool.join()

        zipped = list(zip(*outputs))

        list_h = zipped[0]
        list_lat = zipped[1]
        list_lon = zipped[2]

    dfs = []
    for i in range(len(list_h)):
        df = pd.DataFrame()
        df = df.assign(h=list_h[i], lat=list_lat[i], lon=list_lon[i])
        df['t'] = list_dt[i]
        df['frame'] = list_f[i]
        dfs.append(df)

    df_reg = pd.concat(dfs)

    fn_out = os.path.join(output_dir, reg,
                          'IODEM3_' + reg + '_' + str(out_res) + '_pt.csv')
    mkdir_p(os.path.dirname(fn_out))
    df_reg.to_csv(fn_out, index=None)
import os
import pyddem.fit_tools as ft
import xarray as xr
import multiprocessing as mp
import numpy as np
from pybob.bob_tools import mkdir_p
from glob import glob

# example to extract elevation change maps (.tif) from the elevation time series (.nc)

# all files worldwide
world_dir = '/calcul/santo/hugonnet/worldwide'
out_pdir = '/data/icesat/travail_en_cours/romain/all_dhdts'
mkdir_p(out_pdir)
# number of cores for processing
nproc = 32

# list of periods for which to derive elevation change
list_tlims = [(np.datetime64('2000-01-01'), np.datetime64('2020-01-01')),
              (np.datetime64('2000-01-01'), np.datetime64('2010-01-01')),
              (np.datetime64('2010-01-01'), np.datetime64('2020-01-01')),
              (np.datetime64('2000-01-01'), np.datetime64('2005-01-01')),
              (np.datetime64('2005-01-01'), np.datetime64('2010-01-01')),
              (np.datetime64('2010-01-01'), np.datetime64('2015-01-01')),
              (np.datetime64('2015-01-01'), np.datetime64('2020-01-01'))]


# wrapper for multi-processing
def wrapper_get_dh_stack(argsin):
    tlim, fn_stack, out_dir, i, itot = argsin

tiles = os.listdir(setsm_dir)

for tile in tiles:
    print('Searching for tile ' + tile + ' in folder ' + setsm_dir + '...')
    subtile_dir = os.path.join(setsm_dir, tile)
    seg_tar_gz_list = [os.path.join(subtile_dir, tar_file) for tar_file in os.listdir(subtile_dir) if
                       tar_file.endswith('.tar.gz')]
    print('Found ' + str(len(seg_tar_gz_list)) + ' segments in tile folder.')

    # 2/ EXTRACT ALL STRIPS

    tmp_dir = os.path.join(setsm_dir,tile, 'all_strips')

    mkdir_p(tmp_dir)

    list_tmp_dem = [os.path.join(tmp_dir, os.path.splitext(os.path.splitext(os.path.basename(seg_tar_gz))[0])[0] + '_dem.tif') for seg_tar_gz in seg_tar_gz_list]
    for seg_tar_gz in seg_tar_gz_list:
        print('Extracting dem file of segment ' + str(seg_tar_gz_list.index(seg_tar_gz) + 1) + ' out of ' + str(len(seg_tar_gz_list)))
        extract_file_from_tar_gz(seg_tar_gz, os.path.splitext(os.path.splitext(os.path.basename(seg_tar_gz))[0])[0] + '_dem.tif',
                                 list_tmp_dem[seg_tar_gz_list.index(seg_tar_gz)])


list_files = glob(os.path.join(setsm_dir,'**/*_dem.tif'),recursive=True)
fn_shp = '/data/icesat/travail_en_cours/romain/data/outlines/rgi60/regions/rgi60_merge.shp'

ds_shp = gdal.OpenEx(fn_shp, gdal.OF_VECTOR)
layer_name = os.path.splitext(os.path.basename(fn_shp))[0]
layer = ds_shp.GetLayer()
epsg_base = 4326
import os
from glob import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pybob.ddem_tools import nmad
import scipy
from pybob.bob_tools import mkdir_p
import pyddem.fit_tools as ft

dir_valid = '/data/icesat/travail_en_cours/romain/results/valid'
dir_valid_out = '/data/icesat/travail_en_cours/romain/results/valid_summary_random_sys'

mkdir_p(dir_valid_out)

list_fn_valid = glob(os.path.join(dir_valid, '*.csv'), recursive=True)

print('Found validation file list:')
print(list_fn_valid)

print('Concatenating data...')
df = pd.DataFrame()
for fn_valid in list_fn_valid:
    tmp_df = pd.read_csv(fn_valid)
    reg = int(os.path.basename(fn_valid).split('_')[2])
    if os.path.basename(fn_valid).split('_')[1] == 'ICESat':
        sensor = 'ICS'
    else:
        sensor = 'IB'
    tmp_df = tmp_df.assign(reg=reg, sensor=sensor)
    df = df.append(tmp_df)
Esempio n. 6
0
    region = '06_rgi60'
    print('Working on region: ' + region)

    out_dir = os.path.join(world_calc_dir, region, 'sensitivity', name,
                           'stacks')
    results_dir = os.path.join(world_calc_dir, region, 'sensitivity', name,
                               'vol')

    print(results_dir)

    outfile = os.path.join(results_dir, 'dh_' + region + '_' + name + '.csv')

    if not os.path.exists(outfile):

        mkdir_p(results_dir)

        #integrate glaciers globally
        fn_shp = glob(os.path.join(dir_shp, '**/*' + region + '*.shp'),
                      recursive=True)[0]
        dir_stack = out_dir
        list_fn_stack = glob(os.path.join(dir_stack, '**/*_final.nc'),
                             recursive=True)
        tt.hypsocheat_postproc_stacks_tvol(list_fn_stack,
                                           fn_shp,
                                           nproc=nproc,
                                           outfile=outfile)

        # add info from base glacier dataframe (missing glaciers, identify region, etc...)
        infile = os.path.join(results_dir,
                              'dh_' + region + '_' + name + '_int.csv')
Esempio n. 7
0
def main():
    parser = _argparser()
    args = parser.parse_args()

    if args.folder is not None:
        os.chdir(args.folder)

    print('Looking in folder {}'.format(os.getcwd()))

    flist = glob('*.zip.met')
    filenames = np.array([f.rsplit('.zip', 1)[0] for f in flist])
    filenames.sort()
    dates = [parse_aster_filename(f) for f in filenames]

    striplist = []
    # loop through the dates
    for i, s in enumerate(dates):
        # get a list of all the scenes we're currently using
        current_striplist = [item for sublist in striplist for item in sublist]
        # if the current filename is already in the sorted list, move on.
        if filenames[i] in current_striplist:
            continue
        else:
            td_list = np.array([d - s for d in dates])
            # because we sorted the filelist, we don't have to consider timedeltas
            # less than zero (i.e., scenes within a single day are chronologically ordered)
            matched_inds = np.where(
                np.logical_and(td_list >= timedelta(0),
                               td_list < timedelta(0, 600)))[0]
            # if we only get one index back, it's the scene itself.
            if len(matched_inds) == 1:
                striplist.append(filenames[matched_inds])
                continue
            # now, check that we have continuity (if we have a difference of more than 12 seconds,
            # then the scenes aren't continuous even if they come from the same day)
            matched_diff = np.diff(np.array(td_list)[matched_inds])
            break_inds = np.where(matched_diff > timedelta(0, 12))[0]
            if len(break_inds) == 0:
                pass
            else:
                # we only need the first index, add 1 because of diff
                break_ind = break_inds[0] + 1
                matched_inds = matched_inds[0:break_ind]
            # here, we make sure that we only return strips that are at most max_length long.
            for strip in sliding_window(matched_inds, args.max_length,
                                        args.max_length - 1):
                strip = list(strip)
                if len(matched_inds) > args.max_length and len(
                        strip) == args.max_length - 1:
                    strip.insert(0, strip[0] - 1)
                striplist.append(filenames[strip])
    print('Found {} strips, out of {} individual scenes'.format(
        len(striplist), len(filenames)))
    # now that the individual scenes are sorted into "strips",
    # we can create "strip" and "single" folders
    print('Moving strips to individual folders.')
    #mkdir_p('strips')
    #mkdir_p('singles')
    mkdir_p('sorted')

    for s in striplist:
        mkdir_p(os.path.sep.join(['sorted', s[0][0:25]]))
        if len(s) == 1:
            # shutil.move(s[0] + '.zip', 'singles')
            # shutil.move(s[0] + '.zip.met', 'singles')
            shutil.move(s[0] + '.zip', os.path.sep.join(['sorted',
                                                         s[0][0:25]]))
            shutil.move(s[0] + '.zip.met',
                        os.path.sep.join(['sorted', s[0][0:25]]))
        else:
            #mkdir_p(os.path.join('strips', s[0][0:25]))
            for ss in s:
                # shutil.copy(ss + '.zip', os.path.join('strips', s[0][0:25]))
                # shutil.copy(ss + '.zip.met', os.path.join('strips', s[0][0:25]))
                shutil.copy(ss + '.zip',
                            os.path.sep.join(['sorted', s[0][0:25]]))
                shutil.copy(ss + '.zip.met',
                            os.path.sep.join(['sorted', s[0][0:25]]))
    # now, clean up the current folder.
    for f in glob('*.zip*'):
        os.remove(f)
    print('Fin.')
Esempio n. 8
0
    # tile = 'S48W074'

    for tile in list_tiles[list_regions.index(region)]:
        lat, lon = SRTMGL1_naming_to_latlon(tile)
        _, utm = latlon_to_UTM(lat, lon)

        ref_utm_dir = os.path.join(world_data_dir, region, 'ref', utm)
        fn_stack = glob(os.path.join(dir_stack, utm, tile + '_final.nc'),
                        recursive=True)[0]

        fn_orig = os.path.join(
            os.path.dirname(fn_stack),
            os.path.basename(fn_stack).split('_')[0] + '.nc')
        ref_vrt = os.path.join(ref_utm_dir, 'tmp_' + utm + '.vrt')
        ref_list = glob(os.path.join(ref_utm_dir, '**/*.tif'), recursive=True)
        if not os.path.exists(ref_vrt):
            gdal.BuildVRT(ref_vrt, ref_list, resampleAlg='bilinear')

        out_dir = os.path.join(main_out_dir, region)
        mkdir_p(out_dir)

        ft.manual_refine_sampl_temporal_vgm(
            fn_orig,
            ref_vrt,
            out_dir,
            filt_ref='both',
            time_filt_thresh=[-30, 5],
            ref_dem_date=np.datetime64('2015-01-01'),
            inc_mask=inc_mask,
            gla_mask=gla_mask,
            nproc=nproc)