Ejemplo n.º 1
0
    def test_shape(self):
        """Is the transformation doing well?"""

        from salem import read_shapefile

        so = read_shapefile(get_demo_file('Hintereisferner.shp'))
        sref = read_shapefile(get_demo_file('Hintereisferner_UTM.shp'))
        st = gis.transform_geopandas(so, to_crs=sref.crs)
        self.assertFalse(st is so)
        assert_allclose(st.geometry[0].exterior.coords,
                                   sref.geometry[0].exterior.coords)

        sti = gis.transform_geopandas(so, to_crs=sref.crs, inplace=True)
        self.assertTrue(sti is so)
        assert_allclose(so.geometry[0].exterior.coords,
                                   sref.geometry[0].exterior.coords)
        assert_allclose(sti.geometry[0].exterior.coords,
                                   sref.geometry[0].exterior.coords)

        g = Grid(nxny=(1, 1), dxdy=(1, 1), ll_corner=(10., 46.), proj=wgs84)
        so = read_shapefile(get_demo_file('Hintereisferner.shp'))
        st = gis.transform_geopandas(so, to_crs=g)

        ref = np.array(so.geometry[0].exterior.coords)
        ref = ref - np.floor(ref)
        assert_allclose(ref, st.geometry[0].exterior.coords)
Ejemplo n.º 2
0
    def test_cache_working(self):

        f1 = "f1.shp"
        f1 = create_dummy_shp(f1)
        cf1 = utils.cached_shapefile_path(f1)
        self.assertFalse(os.path.exists(cf1))
        _ = read_shapefile(f1)
        self.assertFalse(os.path.exists(cf1))
        _ = read_shapefile(f1, cached=True)
        self.assertTrue(os.path.exists(cf1))
        # nested calls
        self.assertTrue(cf1 == utils.cached_shapefile_path(cf1))

        # wait a bit
        time.sleep(0.1)
        f1 = create_dummy_shp(f1)
        cf2 = utils.cached_shapefile_path(f1)
        self.assertFalse(os.path.exists(cf1))
        _ = read_shapefile(f1, cached=True)
        self.assertFalse(os.path.exists(cf1))
        self.assertTrue(os.path.exists(cf2))
        df = read_shapefile(f1, cached=True)
        np.testing.assert_allclose(df.min_x, [1.0, 2.0])
        np.testing.assert_allclose(df.max_x, [2.0, 3.0])
        np.testing.assert_allclose(df.min_y, [1.0, 1.3])
        np.testing.assert_allclose(df.max_y, [2.0, 2.3])

        self.assertRaises(ValueError, read_shapefile, "f1.sph")
        self.assertRaises(ValueError, utils.cached_shapefile_path, "f1.splash")
Ejemplo n.º 3
0
def plot_googlemap(gdir, ax=None):
    """Plots the glacier over a googlemap."""

    # TODO: center grid or corner grid???
    crs = gdir.grid.center_grid

    dofig = False
    if ax is None:
        fig = plt.figure()
        ax = fig.add_subplot(111)
        dofig = True

    s = salem.read_shapefile(gdir.get_filepath('outlines'))
    gm = salem.GoogleVisibleMap(np.array(s.geometry[0].exterior.xy[0]),
                                np.array(s.geometry[0].exterior.xy[1]),
                                crs=s.crs)

    img = gm.get_vardata()[..., 0:3]  # sometimes there is an alpha
    cmap = salem.Map(gm.grid, countries=False, nx=gm.grid.nx)
    cmap.set_rgb(img)

    cmap.set_shapefile(gdir.get_filepath('outlines'))

    cmap.plot(ax)
    title = gdir.rgi_id
    if gdir.name is not None and gdir.name != '':
        title += ': ' + gdir.name
    ax.set_title(title)

    if dofig:
        plt.tight_layout()
Ejemplo n.º 4
0
    def test_read_to_grid(self):

        g = GeoTiff(utils.get_demo_file("hef_srtm.tif"))
        sf = utils.get_demo_file("Hintereisferner_UTM.shp")

        df1 = read_shapefile_to_grid(sf, g.grid)

        df2 = transform_geopandas(read_shapefile(sf), to_crs=g.grid)
        assert_allclose(df1.geometry[0].exterior.coords, df2.geometry[0].exterior.coords)
Ejemplo n.º 5
0
    def test_shapefile_output(self):

        # Just to increase coveralls, hehe
        gdirs = up_to_inversion()
        fpath = os.path.join(TEST_DIR, "centerlines.shp")
        write_centerlines_to_shape(gdirs, fpath)

        import salem

        shp = salem.read_shapefile(fpath)
        self.assertTrue(shp is not None)
Ejemplo n.º 6
0
    def test_shapefile_output(self):

        # Just to increase coveralls, hehe
        gdirs = up_to_climate()
        fpath = os.path.join(TEST_DIR, 'centerlines.shp')
        write_centerlines_to_shape(gdirs, path=fpath)

        import salem
        shp = salem.read_shapefile(fpath)
        self.assertTrue(shp is not None)
        shp = shp.loc[shp.RGIID == 'RGI50-11.00897']
        self.assertEqual(len(shp), 3)
        self.assertEqual(shp.loc[shp.LE_SEGMENT.idxmax()].MAIN, 1)
Ejemplo n.º 7
0
    def get_ref_data(self, gdir):

        # Reference data
        df = salem.read_shapefile(get_demo_file('IceThick_SouthGlacier.shp'))
        coords = np.array([p.xy for p in df.geometry]).squeeze()
        df['lon'] = coords[:, 0]
        df['lat'] = coords[:, 1]
        df = df[['lon', 'lat', 'thick']]
        ii, jj = gdir.grid.transform(df['lon'], df['lat'], crs=salem.wgs84,
                                     nearest=True)
        df['i'] = ii
        df['j'] = jj
        df['ij'] = ['{:04d}_{:04d}'.format(i, j) for i, j in zip(ii, jj)]
        return df.groupby('ij').mean()
Ejemplo n.º 8
0
def test_plot_on_map():
    import salem
    from salem.utils import get_demo_file
    ds = salem.open_wrf_dataset(get_demo_file('wrfout_d01.nc'))
    t2_sub = ds.salem.subset(corners=((77., 20.), (97., 35.)), crs=salem.wgs84).T2.isel(time=2)
    shdf = salem.read_shapefile(get_demo_file('world_borders.shp'))
    shdf = shdf.loc[shdf['CNTRY_NAME'].isin(
        ['Nepal', 'Bhutan'])]  # GeoPandas' GeoDataFrame
    t2_sub = t2_sub.salem.subset(shape=shdf, margin=2)  # add 2 grid points
    t2_roi = t2_sub.salem.roi(shape=shdf)
    fig, ax = plt.subplots(1, 1)
    t2_roi.salem.quick_map(ax=ax)
    plt.tight_layout()
    return fig
Ejemplo n.º 9
0
    def test_download_demo_files(self):

        f = utils.get_demo_file('Hintereisferner.shp')
        self.assertTrue(os.path.exists(f))

        sh = salem.read_shapefile(f)
        self.assertTrue(hasattr(sh, 'geometry'))

        # Data files
        cfg.initialize()

        lf, df = utils.get_wgms_files()
        self.assertTrue(os.path.exists(lf))

        lf = utils.get_glathida_file()
        self.assertTrue(os.path.exists(lf))
Ejemplo n.º 10
0
def test_example_docs():
    import salem
    from salem.utils import get_demo_file
    ds = salem.open_xr_dataset(get_demo_file('wrfout_d01.nc'))

    t2 = ds.T2.isel(Time=2)
    t2_sub = t2.salem.subset(corners=((77., 20.), (97., 35.)),
                             crs=salem.wgs84)
    shdf = salem.read_shapefile(get_demo_file('world_borders.shp'))
    shdf = shdf.loc[shdf['CNTRY_NAME'].isin(
        ['Nepal', 'Bhutan'])]  # GeoPandas' GeoDataFrame
    t2_sub = t2_sub.salem.subset(shape=shdf, margin=2)  # add 2 grid points
    t2_roi = t2_sub.salem.roi(shape=shdf)
    smap = t2_roi.salem.get_map(data=t2_roi-273.15, cmap='RdYlBu_r', vmin=-14, vmax=18)
    _ = smap.set_topography(get_demo_file('himalaya.tif'))
    smap.set_shapefile(shape=shdf, color='grey', linewidth=3)
    smap.set_points(91.1, 29.6)
    smap.set_text(91.2, 29.7, 'Lhasa', fontsize=17)
    smap.set_data(ds.T2.isel(Time=1)-273.15, crs=ds.salem.grid)

    fig, ax = plt.subplots(1, 1)
    smap.visualize(ax=ax)
    plt.tight_layout()
    return fig
Ejemplo n.º 11
0
===================

Put some colors and labels on shapefiles

In this script, we use data from the `HydroSHEDS <http://www.hydrosheds.org/>`_
database to illustrate some functionalities of salem Maps. The data shows the
sub-basins of the Nam Co Lake catchment in Tibet. We navigate between the
various tributary catchments of the lake.
"""

import salem
import matplotlib.pyplot as plt

# read the shapefile
shpf = salem.get_demo_file('Lev_09_MAIN_BAS_4099000881.shp')
gdf = salem.read_shapefile(shpf)

# Get the google map which encompasses all geometries
g = salem.GoogleVisibleMap(x=[gdf.min_x.min(),
                              gdf.max_x.max()],
                           y=[gdf.min_y.min(),
                              gdf.max_y.max()],
                           maptype='satellite',
                           scale=2,
                           size_x=400,
                           size_y=400)
ggl_img = g.get_vardata()

# Get each level draining into the lake, then into the last level, and so on
gds = []
prev_id = [gdf.iloc[0].MAIN_BAS]
ds18_hist = ds18_hist.sel(lon=slice(coord[0],coord[1]), lat=slice(coord[2],coord[3]))
ds18_present = ds18_present.sel(lon=slice(coord[0],coord[1]), lat=slice(coord[2],coord[3]))
top = top.sel(lon=slice(coord[0],coord[1]), lat=slice(coord[2],coord[3]))
tdummy = tdummy.sel(lon=slice(coord[0],coord[1]), lat=slice(coord[2],coord[3]), month=5)
tdummy2 = tdummy2.sel(lon=slice(coord[0],coord[1]), lat=slice(coord[2],coord[3]), month=5)
t2 = tdummy2['lst']
t = tdummy['lst']

ds18_hist.name = '2004-2008'
ds18_present.name = '2011-2015'

map = ds18_present.salem.get_map(cmap='viridis')
#map.set_shapefile(oceans=True)
map.set_shapefile(rivers=True)
# read the ocean shapefile (data from http://www.naturalearthdata.com)
oceans = salem.read_shapefile(salem.get_demo_file('ne_50m_ocean.shp'),
                              cached=True)

river = salem.read_shapefile('/users/global/cornkle/data/pythonWorkspace/proj_CEH/shapes/rivers/ne_10m_rivers_lake_centerlines.shp', cached=True)
lakes = salem.read_shapefile('/users/global/cornkle/data/pythonWorkspace/proj_CEH/shapes/lakes/ne_10m_lakes.shp', cached=True)
map.set_shapefile(lakes, edgecolor='k', facecolor='none', linewidth=2,)


srtm_on_ds = ds18_present.salem.lookup_transform(top)
t_on_ds = ds18_present.salem.transform(t)
t2_on_ds = ds18_present.salem.transform(t2)

grid = ds18_present.salem.grid
#deforestation
g = GeoTiff(lst)
ex = grid.extent_in_crs(crs=wgs84)  # l, r, b, t
g.set_subset(corners=((ex[0], ex[2]), (ex[1], ex[3])),
Ejemplo n.º 13
0
## runoff and basins

annual_q = pd.read_csv("./data/raw/Q/Qmensual2.csv")
annual_q['Index'] = pd.to_datetime(annual_q['Index'], format="%Y-%m-%d")
annual_q = annual_q.set_index("Index", drop=True). \
               resample('Y'). \
               agg(lambda x: np.round(x.values.mean(), 2))['2000-01-01':'2014-12-31']

q_selected = annual_q.isnull().sum()[annual_q.isnull().sum() <= 6].index.tolist()
q_selected.remove("CON")
len(q_selected)

annual_q = annual_q[q_selected]
annual_q.columns

shp = salem.read_shapefile("./data/raw/Q/shape_basins_piscopV2.shp")
shp_p = salem.read_shapefile("./data/raw/Q/est_t3.shp")

adr = pd.DataFrame(shp)
shp["Area"] = (shp.to_crs({'init': 'epsg:32717'}).area)  # / 1000
shp = shp[shp["Basin"].isin(q_selected)].drop(['min_x', 'max_x', 'min_y', 'max_y'], axis=1)

annual_q = annual_q[shp.Basin.to_list()]
annual_q.shape

# from Q/s to mm
for i in annual_q.columns:
    #    annual_q[i] = annual_q[i] * 1000*365*24*60*60 / shp[shp.Basin == i].Area.tolist()
    annual_q[i] = annual_q[i] * 1000 * 24 * 30 * 3600 / shp[shp.Basin == i].Area.tolist()

ae_mean = pd.DataFrame(annual_q.apply(lambda x: np.nanmean(x)), columns=["q"]).rename_axis('Basin').reset_index()
Ejemplo n.º 14
0
2003, and the map background is from 2016. This illustrates the retreat of
the Kesselwandferner glacier.

"""

import numpy as np
import pandas as pd
import salem
from salem import get_demo_file, DataLevels, GoogleVisibleMap, Map
import matplotlib.pyplot as plt

# prepare the figure
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))

# read the shapefile and use its extent to define a ideally sized map
shp = salem.read_shapefile(get_demo_file('rgi_kesselwand.shp'))
# I you need to do a lot of maps you might want
# to use an API key and set it here with key='YOUR_API_KEY'
g = GoogleVisibleMap(x=[shp.min_x, shp.max_x], y=[shp.min_y, shp.max_y],
                     maptype='satellite')  # try out also: 'terrain'

# the google static image is a standard rgb image
ggl_img = g.get_vardata()
ax1.imshow(ggl_img)
ax1.set_title('Google static map')

# make a map of the same size as the image (no country borders)
sm = Map(g.grid, factor=1, countries=False)
sm.set_shapefile(shp)  # add the glacier outlines
sm.set_rgb(ggl_img)  # add the background rgb image
sm.visualize(ax=ax2)  # plot it
mm1=ds18_hist.min()
mm2=ds18_present.min()

percboth = np.max([perc,perc2])
minboth = np.min([mm1,mm2])

ds18_hist = (ds18_hist-mm1)/(perc-mm1)
ds18_present = (ds18_present-mm2)/(perc2-mm2)
# ds18_hist = ds18_hist.where(ds18_hist<=1)
# ds18_present = ds18_present.where(ds18_present<=1)

map = ds18_present.salem.get_map(cmap='viridis')
#map.set_shapefile(oceans=True)
map.set_shapefile(rivers=True)
# read the ocean shapefile (data from http://www.naturalearthdata.com)
oceans = salem.read_shapefile(salem.get_demo_file('ne_50m_ocean.shp'),
                              cached=True)

river = salem.read_shapefile(cnst.ANCILS + 'shapes/rivers/ne_10m_rivers_lake_centerlines.shp', cached=True)
lakes = salem.read_shapefile(cnst.ANCILS + 'shapes/lakes/ne_10m_lakes.shp', cached=True)
map.set_shapefile(lakes, edgecolor='k', facecolor='grey', linewidth=1, linestyle='dotted')


srtm_on_ds = ds18_present.salem.lookup_transform(top)
t_on_ds = ds18_present.salem.transform(t)
t2_on_ds = ds18_present.salem.transform(t2)

grid = ds18_present.salem.grid
#deforestation
g = GeoTiff(lst)
ex = grid.extent_in_crs(crs=wgs84)  # l, r, b, t
g.set_subset(corners=((ex[0], ex[2]), (ex[1], ex[3])),
Ejemplo n.º 16
0
2003, and the map background is from 2016. This illustrates the retreat of
the Kesselwandferner glacier.

"""

import numpy as np
import pandas as pd
import salem
from salem import get_demo_file, DataLevels, GoogleVisibleMap, Map
import matplotlib.pyplot as plt

# prepare the figure
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))

# read the shapefile and use its extent to define a ideally sized map
shp = salem.read_shapefile(get_demo_file('rgi_kesselwand.shp'))
# I you need to do a lot of maps you might want
# to use an API key and set it here with key='YOUR_API_KEY'
g = GoogleVisibleMap(x=[shp.min_x, shp.max_x],
                     y=[shp.min_y, shp.max_y],
                     maptype='satellite')  # try out also: 'terrain'

# the google static image is a standard rgb image
ggl_img = g.get_vardata()
ax1.imshow(ggl_img)
ax1.set_title('Google static map')

# make a map of the same size as the image (no country borders)
sm = Map(g.grid, factor=1, countries=False)
sm.set_shapefile(shp)  # add the glacier outlines
sm.set_rgb(ggl_img)  # add the background rgb image
Ejemplo n.º 17
0
def trend_all():

    #mcs = cnst.GRIDSAT_PERU + 'aggs/gridsat_WA_-40_allClouds_monthly.nc'
    mcs = cnst.GRIDSAT_PERU + 'aggs/gridsat_WA_count_-50_allClouds_monthly.nc'
    chirps = '/media/ck/Elements/SouthAmerica/CHIRPS/chirps-v2.0.monthly.nc'
    enso = '/home/ck/DIR/mymachine/ENSO/ONI.csv'  #'/home/ck/DIR/mymachine/ENSO/meiv2.data'
    fpath = cnst.network_data + 'figs/HUARAZ/'

    fname = '/home/ck/DIR/cornkle/data/HUARAZ/shapes/riosan_sel_one.shp'
    isbuffer = [-79, -74, -12, -7]

    sdf = salem.read_shapefile(fname)
    sdf = salem.transform_geopandas(sdf, to_crs=salem.wgs84)

    da3 = xr.open_dataarray(mcs).sel(lon=slice(isbuffer[0], isbuffer[1]),
                                     lat=slice(isbuffer[2], isbuffer[3]))
    ca = xr.open_dataarray(chirps).sel(longitude=slice(isbuffer[0],
                                                       isbuffer[1]),
                                       latitude=slice(isbuffer[2],
                                                      isbuffer[3]))
    # This masks out the data which is not in the region

    ens = pd.read_csv(enso,
                      sep=',',
                      engine='python',
                      names=np.arange(0, 13),
                      index_col=0)

    ca[0, :, :].salem.roi(shape=sdf).plot.pcolormesh()

    da3 = da3.salem.roi(shape=sdf).mean(['lat', 'lon']) * 100
    ca = ca.salem.roi(shape=sdf).mean(['latitude', 'longitude'])
    months = [
        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
    ]  #,4,5,6,9,10,11#,4,5,6,9,10,11,(3,5), (9,11)]#, 10,5,9]#[(12,2)]#[1,2,3,4,5,6,7,8,9,10,11,12]# #,2,3,11,12]#[(12,2)]#[1,2,3,4,5,6,7,8,9,10,11,12]# #,2,3,11,12]

    dicm = {}
    dicmean = {}

    f = plt.figure(figsize=(15, 7.5), dpi=300)

    #sdf = salem.transform_geopandas(sdf, to_crs=salem.wgs84)

    for ids, m in enumerate(months):
        method = 'mk'

        if type(m) == int:
            m = [m]

        ensmonth = ens[m[0]]

        eens = ensmonth.loc['1985':'2019']

        sig = True

        da = da3[(da3['time.month'] == m[0]) & (da3['time.year'] >= 1985) &
                 (da3['time.year'] <= 2019)]

        ch = ca[(ca['time.month'] == m[0]) & (ca['time.year'] >= 1985) &
                (ca['time.year'] <= 2019)]

        da = da - np.mean(da.values)
        ch = ch - np.mean(ch.values)

        sslope, sint, srval, spval, serr = stats.linregress(
            np.arange(len(da.values)), da.values)
        print('linear regression for shear', m, sslope, srval, spval)

        cslope, cint, crval, cpval, cerr = stats.linregress(
            np.arange(len(ch.values)), ch.values)
        print('linear regression for shear', m, cslope, crval, cpval)

        #ipdb.set_trace()

        dslope = sslope * 10
        cdslope = cslope * 10

        if len(m) == 1:
            fp = fpath + 'MCS_only_trendmap_Allmonths_count-50C_lines_Huaraz_1985-2018_' + str(
                m[0]).zfill(2) + '.png'
        else:
            fp = fpath + 'MCS_only_trendmap_1985-2018_' + str(
                m[0]).zfill(2) + '-' + str(m[1]).zfill(2) + '.png'

        ax = f.add_subplot(3, 4, ids + 1)

        x = np.arange(0, len(ch['time.year']))

        ax.plot(ch['time.year'],
                ch,
                marker='o',
                markersize=3,
                label='Trend: ' + str(np.round(cdslope, 2)) +
                'mm / month decade | ' + 'p=' + str(np.round(cpval, 2)),
                color='blue')
        ax.plot(ch['time.year'],
                cint + cslope * np.arange(0, len(ch['time.year'])),
                linestyle='dashed',
                color='blue')
        cc = []
        for enb in eens.values:
            if enb < 0:
                cc.append('lightblue')
            else:
                cc.append('red')

        ax.bar(eens.index.values, eens.values * 30, color=cc)
        plt.title('Month: ' + str(m[0]))
        ax.set_ylabel('mm month$^{-1}$ (blue)')
        ax.set_ylim(-90, 90)
        plt.legend(fontsize=6)
        ax1 = ax.twinx()
        ax1.plot(da['time.year'], da, color='orange', marker='o', markersize=3)
        ax1.plot(da['time.year'],
                 sint + sslope * np.arange(0, len(da['time.year'])),
                 color='orange',
                 linestyle='dashed')
        ax1.set_ylabel('% cloud cover/month (orange)')
        ax1.set_ylim(-20, 20)

    plt.tight_layout()
    plt.savefig(fp)
    plt.close('all')
Ejemplo n.º 18
0
    # Прописываем пути к файлам
    nc_path = r'..\..\storage\C3S-LC-L4-LCCS-Map-300m-P1Y-2018-v2.1.1.nc'
    import cartopy.crs as ccrs
    import cartopy.feature as cf
    import matplotlib.pyplot as plt
    import numpy as np

    proj = ccrs.LambertConformal(central_latitude=60,
                                 central_longitude=50,
                                 standard_parallels=(25, 25))

    print('subsetting')
    dat = salem.open_xr_dataset(nc_path)
    dat = dat['lccs_class'].isel(time=0)
    #shapes = salem.read_shapefile("..\data\\regions\szfo\Arkhangelskaya_oblast.shp")
    shapes = salem.read_shapefile(
        "..\data\\regions\szfo\Pskovskaya_oblast.shp")
    dat = dat.salem.subset(shape=shapes, margin=2)

    print('set values')
    lat = dat.coords['lat'].values
    lon = dat.coords['lon'].values
    print(dat)
    #dat=dat.values
    print(dat)

    print('plot')
    ax = plt.axes(projection=proj)
    #ax.pcolormesh(lon, lat, dat, transform=ccrs.PlateCarree())
    #dat.plot(ax=ax, transform=proj, cmap=cmap, norm=norm)

    #dat.plot(ax=ax, transform=ccrs.PlateCarree())
Ejemplo n.º 19
0
mm1 = ds18_hist.min()
mm2 = ds18_present.min()

percboth = np.max([perc, perc2])
minboth = np.min([mm1, mm2])

# ds18_hist = (ds18_hist-mm1)/(perc-mm1)
# ds18_present = (ds18_present-mm2)/(perc2-mm2)
# ds18_hist = ds18_hist.where(ds18_hist<=1)
# ds18_present = ds18_present.where(ds18_present<=1)

map = ds18_present.salem.get_map(cmap='viridis')
#map.set_shapefile(oceans=True)
map.set_shapefile(rivers=True)
# read the ocean shapefile (data from http://www.naturalearthdata.com)
oceans = salem.read_shapefile(salem.get_demo_file('ne_50m_ocean.shp'),
                              cached=True)

river = salem.read_shapefile(
    cnst.ANCILS + 'shapes/rivers/ne_10m_rivers_lake_centerlines.shp',
    cached=True)
lakes = salem.read_shapefile(cnst.ANCILS + 'shapes/lakes/ne_10m_lakes.shp',
                             cached=True)
map.set_shapefile(lakes,
                  edgecolor='k',
                  facecolor='grey',
                  linewidth=1,
                  linestyle='dotted')

srtm_on_ds = ds18_present.salem.lookup_transform(top)
t_on_ds = ds18_present.salem.transform(t)
t2_on_ds = ds18_present.salem.transform(t2)
if __name__ == '__main__':
    start_time = time.time()
    cfg.initialize()
    cfg.PATHS['dem_file'] = get_demo_file('srtm_oetztal.tif')
    cfg.PATHS['climate_file'] = get_demo_file('HISTALP_oetztal.nc')
    cfg.PATHS[
        'working_dir'] = '/home/juliaeis/PycharmProjects/find_inital_state/test_HEF'
    #cfg.PATHS['working_dir'] = os.environ.get("S_WORKDIR")
    cfg.PARAMS['border'] = 80
    cfg.PARAMS['prcp_scaling_factor']
    cfg.PARAMS['run_mb_calibration'] = True
    cfg.PARAMS['optimize_inversion_params'] = True
    cfg.PARAMS['use_intersects'] = False
    plt.rcParams['figure.figsize'] = (8, 8)  # Default plot size

    rgi = get_demo_file('rgi_oetztal.shp')
    gdirs = workflow.init_glacier_regions(salem.read_shapefile(rgi))
    workflow.execute_entity_task(tasks.glacier_masks, gdirs)
    '''
    prepare_for_initializing(gdirs)

    pool = mp.Pool()
    pool.map(find_initial_state,gdirs)
    '''
    for gdir in gdirs:
        if gdir.rgi_id == "RGI50-11.00897":
            find_initial_state(gdir)

    print(time.time() - start_time)
Ejemplo n.º 21
0
stn_xlsx_file_name = Path('input/xls/stn/pagasa.xlsx')
stn_info_df = pd.read_excel(stn_xlsx_file_name, sheet_name='sta_info')
stn_info_gdf = gpd.GeoDataFrame(stn_info_df,
                                geometry=gpd.points_from_xy(
                                    stn_info_df['lon'], stn_info_df['lat']),
                                crs=def_crs)

for basin_shp in in_shps:
    buffer = 0.25
    basin_name = basin_shp.parent.name
    stn_info_csv_file_name = out_csv_dir / '{}_stn_info.csv'.format(basin_name)
    out_csv_file_name = out_csv_dir / '{}.csv'.format(basin_name)
    if not out_csv_file_name.is_file():
        print('Processing {}'.format(basin_name))
        basin_gdf = salem.read_shapefile(basin_shp).to_crs(def_crs)  # read shp
        basin_buf_gdf = basin_gdf.copy()
        basin_buf_gdf.geometry = basin_buf_gdf.buffer(buffer)

        stn_info_df = gpd.sjoin(stn_info_gdf, basin_buf_gdf, how='inner')
        out_df = pd.DataFrame()
        if stn_info_df.size > 0:
            stn_info_df.rename(columns={
                'sta_name': 'description',
                'id': 'code'
            },
                               inplace=True)
            stn_info_df = stn_info_df[['lon', 'lat', 'code',
                                       'description']].copy()
            out_df = pd.concat(pd.read_excel(
                stn_xlsx_file_name,
Ejemplo n.º 22
0
in_shp_dir = Path('input/shp/basins')
in_xls_dir = Path('output/xls/rcm')

out_img_dir = Path('output/img/boxplot')
out_img_dir.mkdir(parents=True, exist_ok=True)

out_stat_dir = Path('output/stat/boxplot')
out_stat_dir.mkdir(parents=True, exist_ok=True)

in_shps = list(in_shp_dir.glob('*/*.shp'))
in_shps = in_shps[1:]


stat_df = []
for basin_shp in in_shps:
    basin_gdf = salem.read_shapefile(basin_shp)     # read shp
    # experiment loop
    for exp_name in EXPS:
        # variable loop
        for ivar, var_name in enumerate(VARS):
            if exp_name == 'RF':
                date_range = RF_DATE_RANGE
            else:
                date_range = PROJ_MID_DATE_RANGE

            print('Processing {}: {} - {}'.format(basin_shp.parent.name, exp_name, VARS2[ivar]))
            
            in_xlsx_file_name = '{}/{}_{}_{}.xlsx'.format(str(in_xls_dir), basin_shp.parent.name, exp_name, VARS2[ivar])

            in_df = pd.read_excel(in_xlsx_file_name, sheet_name=list(RCMS.keys()), index_col=[0, 1, 2])
            in_df = pd.concat(in_df, axis=1)
Ejemplo n.º 23
0
cfg.PARAMS['use_multiprocessing'] = False
cfg.PARAMS['border'] = 20
cfg.CONTINUE_ON_ERROR = False

# Read in the RGI file
rgisel = os.path.join(WORKING_DIR, 'rgi_selection.shp')
if not os.path.exists(rgisel):
    rgi_dir = utils.get_rgi_dir()
    regions = ['{:02d}'.format(int(p)) for p in range(1, 20)]
    files = [
        glob.glob(os.path.join(rgi_dir, '*', r + '_rgi50_*.shp'))[0]
        for r in regions
    ]
    rgidf = []
    for fs in files:
        sh = salem.read_shapefile(os.path.join(rgi_dir, fs), cached=True)
        percs = np.asarray([0, 25, 50, 75, 100])
        idppercs = np.round(percs * 0.01 * (len(sh) - 1)).astype(int)

        rgidf.append(sh.sort_values(by='Area').iloc[idppercs])
        rgidf.append(sh.sort_values(by='CenLon').iloc[idppercs])
        rgidf.append(sh.sort_values(by='CenLat').iloc[idppercs])
    rgidf = gpd.GeoDataFrame(pd.concat(rgidf))
    rgidf = rgidf.drop_duplicates('RGIId')
    rgidf.to_file(rgisel)
else:
    rgidf = salem.read_shapefile(rgisel)

rgidf = rgidf.loc[~rgidf.RGIId.isin([
    'RGI50-10.00012', 'RGI50-17.00850', 'RGI50-19.01497', 'RGI50-19.00990',
    'RGI50-19.01440'
Ejemplo n.º 24
0
import salem

shp = "./data/raw/SHP/UH.shp"

shp = salem.read_shapefile(shp).\
    to_crs({"init": "epsg:4326"}).\
    sort_values("ID")

shp["NIVEL1"] = shp["NIVEL1"].apply(lambda x: int(x))
shp["Nivel"] = shp["NIVEL1"].apply(lambda x: "Titicaca" if x == 0.0 else "Pacífico" if x == 1.0 else "Amazonas")
shp = shp[(shp.NOMBRE != "Lago Titicaca")]
shp.groupby("Nivel").count()

shp = shp.drop(['NIVEL1',"AREA_KM2",'OBJECTID', 'NOMB_UH_N1',
      'NIVEL2', 'NIVEL3', 'NIVEL4', 'NIVEL5',
      'NIVEL6', 'NIVEL7','NOMB_UH_N2', 'NOMB_UH_N3',
      'NOMB_UH_N4', 'NOMB_UH_N5', 'NOMB_UH_N6', 'NOMB_UH_N7', 'CODIGO',
      'NOMBRE', 'ORDEN', 'Shape_Leng', 'Shape_Area',
      'min_x', 'max_x', 'min_y', 'max_y'], axis=1)

shp.to_pickle("./data/processed/SHP/UH.pkl")


shdf = salem.read_shapefile("./data/raw/SHP/vertientes.shp").\
    to_crs({"init": "epsg:4326"})

shdf.plot()

shdf.to_pickle("./data/processed/SHP/sph_ver.pkl")
Ejemplo n.º 25
0
utils.mkdir(cfg.PATHS['rgi_dir'])

# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = False
cfg.PARAMS['border'] = 20
cfg.CONTINUE_ON_ERROR = False

# Read in the RGI file
rgisel = os.path.join(WORKING_DIR, 'rgi_selection.shp')
if not os.path.exists(rgisel):
    rgi_dir = utils.get_rgi_dir()
    regions = ['{:02d}'.format(int(p)) for p in range(1, 20)]
    files = [glob.glob(os.path.join(rgi_dir, '*', r + '_rgi50_*.shp'))[0] for r in regions]
    rgidf = []
    for fs in files:
        sh = salem.read_shapefile(os.path.join(rgi_dir, fs), cached=True)
        percs = np.asarray([0, 25, 50, 75, 100])
        idppercs = np.round(percs * 0.01 * (len(sh)-1)).astype(int)

        rgidf.append(sh.sort_values(by='Area').iloc[idppercs])
        rgidf.append(sh.sort_values(by='CenLon').iloc[idppercs])
        rgidf.append(sh.sort_values(by='CenLat').iloc[idppercs])
    rgidf = gpd.GeoDataFrame(pd.concat(rgidf))
    rgidf = rgidf.drop_duplicates('RGIId')
    rgidf.to_file(rgisel)
else:
    rgidf = salem.read_shapefile(rgisel)

rgidf = rgidf.loc[~rgidf.RGIId.isin(['RGI50-10.00012', 'RGI50-17.00850',
                                     'RGI50-19.01497', 'RGI50-19.00990',
                                     'RGI50-19.01440'])]
Ejemplo n.º 26
0
def get_IGP(data_path, shp_path):
    """
     Return only data in IGP adminsitrative domains (based on masking process). 
     Return type is a dictionary containing WRF-Chem outputs datasets with keys:
     
     - IGP :contains all IGP states in BGD, PAK, IND.
     - U_IGP : data for states Sindh, Punjab (PAK), Punjab (IND).
     - M_IGP : data for states Haryana, Delhi NCT, Uttar Pradesh (IND).
     - L_IGP : data for states Bihar, West Bengal (IND), Barisal, 
       Dhaka, Khulna, Rajshahi, Rangpur (BGD).
     - Single states subsets (TO DO)."
    
    WARNING: this division of IGP is arbitrary, given that there is no 
             official IGP administrative domain. 
    
    
    :param data_path:
     path to data files.
    :type data_path: string
    :param shp_path:
     path to IGP shapefiles.
    :type shp_path: string
    :return:
    dictionary of xarray.Dataset.
  :rtype: dict
 """

    import salem

    igp_data = {}  # dictionary for containing datasets.

    ds = salem.open_mf_wrf_dataset(data_path)  # open data with salem.

    # get IGP states shapefiles.
    shdf = salem.read_shapefile(shp_path)  # IGP shp.
    shdf_UIGP = shdf.loc[(shdf['HASC_1'] == 'PK.SD')
                         | (shdf['HASC_1'] == 'IN.PB')
                         | (shdf['HASC_1'] == 'PK.PB')]
    shdf_MIGP = shdf.loc[(shdf['HASC_1'] == 'IN.DL')
                         | (shdf['HASC_1'] == 'IN.HR')
                         | (shdf['HASC_1'] == 'IN.UP')]
    shdf_LIGP = shdf.loc[(shdf['HASC_1'] == 'IN.WB')
                         | (shdf['HASC_1'] == 'IN.BR')
                         | (shdf['HASC_1'] == 'BD.BA')
                         | (shdf['HASC_1'] == 'BD.KH')
                         | (shdf['HASC_1'] == 'BD.RS')
                         | (shdf['HASC_1'] == 'BD.RP')
                         | (shdf['HASC_1'] == 'BD.DH')]

    # Get data subsets.
    IGP = ds.salem.roi(shape=shdf)
    UIGP = ds.salem.roi(shape=shdf_UIGP)
    MIGP = ds.salem.roi(shape=shdf_MIGP)
    LIGP = ds.salem.roi(shape=shdf_LIGP)

    # Add to list.
    igp_data.update({'IGP': IGP})
    igp_data.update({'U_IGP': UIGP})
    igp_data.update({'M_IGP': MIGP})
    igp_data.update({'L_IGP': LIGP})

    return igp_data
Ejemplo n.º 27
0
def trend_all():

    #mcs = cnst.GRIDSAT_PERU + 'aggs/gridsat_WA_-40_allClouds_monthly.nc'
    mcs = cnst.GRIDSAT_PERU + 'aggs/gridsat_WA_count_-50_allClouds_monthly.nc'

    fpath = cnst.network_data + 'figs/HUARAZ/'

    box = [
        -79, -76, -11, -8
    ]  #[-79, -74, -12, -8]  # small latitude=slice(-25,0), longitude=slice(-81,-65) [-81,-65,-25,0]
    #box=[-79,-65,-17,-3]#  [-18,40,0,25] #
    #box = [-80, -53, -30, -1]
    #box = [-79,-69,-17,-7] #[-79, -75, -10.5, -8]

    da3 = xr.open_dataarray(mcs)  #/100
    da3 = da3.sel(lon=slice(box[0], box[1]), lat=slice(box[2], box[3]))

    grid = da3.salem.grid.regrid(factor=1)

    tir = grid.lookup_transform(
        da3, method=np.nanmean)  #t2d.salem.lookup_transform(da3['tir']) #

    grid = grid.to_dataset()
    tir = xr.DataArray(tir,
                       coords=[da3['time'], grid['y'], grid['x']],
                       dims=['time', 'latitude', 'longitude'])

    months = [
        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
    ]  #[3,4,5,6,9,10,11]#,4,5,6,9,10,11#,4,5,6,9,10,11,(3,5), (9,11)]#, 10,5,9]#[(12,2)]#[1,2,3,4,5,6,7,8,9,10,11,12]# #,2,3,11,12]#[(12,2)]#[1,2,3,4,5,6,7,8,9,10,11,12]# #,2,3,11,12]

    dicm = {}
    dicmean = {}

    f = plt.figure(figsize=(14, 9), dpi=300)

    fname = '/home/ck/DIR/cornkle/data/HUARAZ/shapes/riosan_sel_one.shp'

    sdf = salem.read_shapefile(fname)
    #sdf = salem.transform_geopandas(sdf, to_crs=salem.wgs84)

    for ids, m in enumerate(months):

        print('Doing', m)
        method = 'mk'

        if type(m) == int:
            m = [m]

        sig = False

        tirtrend, tirmean = calc_trend(tir,
                                       m,
                                       method=method,
                                       sig=sig,
                                       wilks=False)

        tirm_mean = tirmean.mean('year')

        #linemean = tirmean.mean(['year','lat','lon'])

        # plt.plot(np.arange(1985,2018), tirmean.mean(['latitude', 'longitude']))
        # return

        tirtrend_unstacked = ((tirtrend.values) * 10. /
                              tirm_mean.values) * 100.  #/ tirm_mean.values
        #ipdb.set_trace()
        tirtrend_out = xr.DataArray(tirtrend_unstacked,
                                    coords=[grid['y'], grid['x']],
                                    dims=['latitude', 'longitude'])
        tirtrend_out.name = 'tir'
        tirmean_out = xr.DataArray(tirm_mean,
                                   coords=[grid['y'], grid['x']],
                                   dims=['latitude', 'longitude'])

        # dicm[m[0]] = tirtrend_out
        # dicmean[m[0]] = tirm_mean
        #
        ti_da = tirtrend_out

        if len(m) == 1:
            fp = fpath + 'MCS_only_trendmap_Peru_count-50C_allClouds_noSIG_since2000_' + str(
                m[0]).zfill(2) + '.png'
        else:
            fp = fpath + 'MCS_only_trendmap_' + str(m[0]).zfill(2) + '-' + str(
                m[1]).zfill(2) + '.png'

        map = ti_da.salem.get_map()

        ax1 = f.add_subplot(3, 4, ids + 1)
        map.set_contour((tirm_mean) * 100,
                        interp='linear',
                        levels=[10, 20, 40, 60, 80],
                        colors='k',
                        linewidths=0.5)
        #map.set_contour((tirm_mean), interp='linear', levels=[-55,-50,-45], colors='k', linewidths=0.5)
        #.values).astype(np.float64)

        ti_da.values[ti_da.values == 0] = np.nan
        map.set_data(ti_da)  #
        #map.set_data(tirm_mean)
        map.set_shapefile(sdf,
                          facecolor='white',
                          color='red',
                          linewidth=1.5,
                          alpha=0.5)
        #map.set_geometry(sdf, linewidth=2)
        # coord = [18, 25, -28, -20]
        # geom = shpg.box(coord[0], coord[2], coord[1], coord[3])
        #map.set_geometry(geom, zorder=99, color='darkorange', linewidth=3, linestyle='--', alpha=0.3)

        #map.set_plot_params(cmap='RdBu_r', extend='both', levels=np.arange(-1.5,1.6,0.25)) #)  #, levels=np.arange(-7,7,25)  # levels=np.arange(20,101,20)  #np.arange(20,101,20)
        map.set_plot_params(cmap='RdBu',
                            extend='both',
                            levels=np.arange(-30, 31, 10))
        dic = map.visualize(ax=ax1,
                            title=str(m) + ': -50C frequency change',
                            cbar_title='% decade-1',
                            addcbar=True)
        contours = dic['contour'][0]
        plt.clabel(contours, inline=True, fontsize=7, fmt='%1.1f')

    plt.tight_layout()
    plt.savefig(fp)
    plt.close('all')
Ejemplo n.º 28
0
# Make it large if you expect your glaciers to grow large
cfg.PARAMS['border'] = 100

# Set to True for operational runs
cfg.PARAMS['continue_on_error'] = False

# We use intersects
cfg.set_intersects_db(utils.get_rgi_intersects_region_file('11', version='5'))

# Pre-download other files which will be needed later
utils.get_cru_cl_file()
utils.get_cru_file(var='tmp')
utils.get_cru_file(var='pre')

# Use the Hintereisferner RGI file for the run
rgidf = salem.read_shapefile(get_demo_file('Hintereisferner_RGI5.shp'))

# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

log.info('Starting OGGM run')
log.info('Number of glaciers: {}'.format(len(rgidf)))

# Go - initialize working directories
gdirs = workflow.init_glacier_regions(rgidf)

# Preprocessing tasks
task_list = [
    tasks.glacier_masks,
    tasks.compute_centerlines,
    tasks.initialize_flowlines,
Ejemplo n.º 29
0
    def test_shapefile_output(self):

        gdirs = up_to_climate(use_mp=True)

        fpath = os.path.join(_TEST_DIR, 'centerlines.shp')
        write_centerlines_to_shape(gdirs, path=fpath)

        import salem
        shp = salem.read_shapefile(fpath)
        self.assertTrue(shp is not None)
        shp = shp.loc[shp.RGIID == 'RGI60-11.00897']
        self.assertEqual(len(shp), 3)
        self.assertEqual(shp.loc[shp.LE_SEGMENT.idxmax()].MAIN, 1)

        fpath = os.path.join(_TEST_DIR, 'centerlines_ext.shp')
        write_centerlines_to_shape(gdirs, path=fpath,
                                   ensure_exterior_match=True)
        shp_ext = salem.read_shapefile(fpath)
        # We check the length of the segment for a change
        shp_ext = shp_ext.to_crs('EPSG:32632')
        assert_allclose(shp_ext.geometry.length, shp_ext['LE_SEGMENT'],
                        rtol=1e-3)

        fpath = os.path.join(_TEST_DIR, 'centerlines_ext_smooth.shp')
        write_centerlines_to_shape(gdirs, path=fpath,
                                   ensure_exterior_match=True,
                                   simplify_line=0.2,
                                   corner_cutting=3)
        shp_ext_smooth = salem.read_shapefile(fpath)
        # This is a bit different of course
        assert_allclose(shp_ext['LE_SEGMENT'], shp_ext_smooth['LE_SEGMENT'],
                        rtol=2)

        fpath = os.path.join(_TEST_DIR, 'flowlines.shp')
        write_centerlines_to_shape(gdirs, path=fpath, flowlines_output=True)
        shp_f = salem.read_shapefile(fpath)
        self.assertTrue(shp_f is not None)
        shp_f = shp_f.loc[shp_f.RGIID == 'RGI60-11.00897']
        self.assertEqual(len(shp_f), 3)
        self.assertEqual(shp_f.loc[shp_f.LE_SEGMENT.idxmax()].MAIN, 1)
        # The flowline is cut so shorter
        assert shp_f.LE_SEGMENT.max() < shp.LE_SEGMENT.max() * 0.8

        fpath = os.path.join(_TEST_DIR, 'widths_geom.shp')
        write_centerlines_to_shape(gdirs, path=fpath, geometrical_widths_output=True)
        # Salem can't read it
        shp_w = gpd.read_file(fpath)
        self.assertTrue(shp_w is not None)
        shp_w = shp_w.loc[shp_w.RGIID == 'RGI60-11.00897']
        self.assertEqual(len(shp_w), 90)

        fpath = os.path.join(_TEST_DIR, 'widths_corr.shp')
        write_centerlines_to_shape(gdirs, path=fpath, corrected_widths_output=True)
        # Salem can't read it
        shp_w = gpd.read_file(fpath)
        self.assertTrue(shp_w is not None)
        shp_w = shp_w.loc[shp_w.RGIID == 'RGI60-11.00897']
        self.assertEqual(len(shp_w), 90)

        # Test that one wrong glacier still works
        base_dir = os.path.join(cfg.PATHS['working_dir'], 'dummy_pergla')
        utils.mkdir(base_dir, reset=True)
        gdirs = workflow.execute_entity_task(utils.copy_to_basedir, gdirs,
                                             base_dir=base_dir, setup='all')
        os.remove(gdirs[0].get_filepath('centerlines'))
        cfg.PARAMS['continue_on_error'] = True
        write_centerlines_to_shape(gdirs)
Ejemplo n.º 30
0
cfg.PARAMS['invert_with_sliding'] = False
cfg.PARAMS['bed_shape'] = 'parabolic'
cfg.PARAMS['use_compression'] = False

# Some globals for more control on what to run
RUN_GIS_mask = False
RUN_GIS_PREPRO = False  # run GIS pre-processing tasks (before climate)
RUN_CLIMATE_PREPRO = False  # run climate pre-processing tasks
RUN_INVERSION = False  # run bed inversion
RUN_DYNAMICS = False  # run dynamics

Plotting = False
Plotting_after_calving = False

# Read RGI file
rgidf = salem.read_shapefile(RGI_FILE, cached=True)

# TODO: we must sort.() glaciers from large to small when we run in cluster
# Get ref glaciers (all glaciers with MB)
flink, mbdatadir = utils.get_wgms_files()

ids_with_mb = pd.read_csv(flink)['RGI50_ID'].values

if RUN_inPC:
    # Keep id's of glaciers in WGMS and GlathiDa V2
    keep_ids = [
        'RGI50-01.00037', 'RGI50-01.00570', 'RGI50-01.01104', 'RGI50-01.01390',
        'RGI50-01.02228', 'RGI50-01.04591', 'RGI50-01.05211', 'RGI50-01.09162',
        'RGI50-01.16316', 'RGI50-01.22699'
    ]
Ejemplo n.º 31
0
# (e.g. smoothing, dx, prcp factor...) should imply a re-calibration

mbf = 'https://dl.dropboxusercontent.com/u/20930277/ref_tstars_b60_prcpfac_25_defaults.csv'
mbf = utils.file_downloader(mbf)
shutil.copyfile(mbf, os.path.join(WORKING_DIR, 'ref_tstars.csv'))

# Initialize OGGM and set up the run parameters
# ---------------------------------------------

# Download and read in the RGI file
rgif = 'https://dl.dropboxusercontent.com/u/20930277/RGI_example_glaciers.zip'
rgif = utils.file_downloader(rgif)
with zipfile.ZipFile(rgif) as zf:
    zf.extractall(WORKING_DIR)
rgif = os.path.join(WORKING_DIR, 'RGI_example_glaciers.shp')
rgidf = salem.read_shapefile(rgif, cached=True)

# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

# rgidf = rgidf.loc[rgidf.RGIId.isin(['RGI50-01.10299'])]

print('Number of glaciers: {}'.format(len(rgidf)))

# Go - initialize working directories
# -----------------------------------

# you can use the command below to reset your run -- use with caution!
# gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
gdirs = workflow.init_glacier_regions(rgidf)
Ejemplo n.º 32
0
# вероятно в метод draw_beatiful_map

import cartopy.crs as ccrs
import cartopy

import salem

import numpy as np

import matplotlib.pyplot as plt

# читаем и открываем файлы
ds = salem.open_xr_dataset(
    r'..\..\storage\C3S-LC-L4-LCCS-Map-300m-P1Y-2018-v2.1.1.nc')
shp_path = "..\data\\regions\szfo\Arkhangelskaya_oblast.shp"
shapes = salem.read_shapefile(shp_path)
ds = ds['lccs_class'].isel(time=0)
# создаем новую сеточку долгот и широт - здесь 3600 и 1800 точек - разрешение соответственно 10 точек на градус т.е. примерно 10км (вместо 300 метров)
new_lon = np.linspace(ds.lon.values[0], ds.lon.values[-1], 3600)
new_lat = np.linspace(ds.lat.values[0], ds.lat.values[-1], 1800)
# делаем сабсет по шейпу (пока без клиппинга)
print('subset')
ds = ds.salem.subset(shape=shapes, margin=2)
# врубаем переинтерполяцию (нужно будет разобраться какие параметры можно передавать кроме новой сетки)
print('interpolation')
dsi = ds.interp(lat=new_lat, lon=new_lon)
# рисуем карту базовым методом (хз, сработает ли вся эта кухня, если начать играть с проекциями, это задача следующей фазы тестинга)
print('plt')
dsi.salem.quick_map()
print('3')
plt.show()
Ejemplo n.º 33
0
import xarray as xr
import pandas as pd
import numpy as np
import glob
import salem
import pickle

lat_new = np.arange(-18.5, 0.2, 0.01)
lon_new = np.arange(-81.5, -68.25, 0.01)

shp = "./data/raw/SHP/Sudamérica.shp"
shp = salem.read_shapefile(shp).\
    to_crs({"init": "epsg:4326"}).iloc[11:12]

#zhang2015
#P-LSH

ae_zhang = xr.concat([xr.open_dataset(f) for f in glob.glob('./data/raw/AE/Zhang/Peru/*.nc')],
               dim='time'). \
    rename({"longitude": "lon", "latitude": "lat"}). \
    assign_coords(time=list(range(1982, 2014)))

ae_zhang = ae_zhang.where((-81.5 < ae_zhang.lon) &
                          (ae_zhang.lon < -68.25) &
                          (-18.5 < ae_zhang.lat) &
                          (ae_zhang.lat < 0.2), drop=True).\
    reindex(lat=lat_new, lon=lon_new, method="nearest")

#gleam

ae_gleam = xr.open_dataset("./data/raw/AE/GLEAM/E_1980_2018_GLEAM_v3.3a_YR.nc", decode_times=False). \
Ejemplo n.º 34
0
def country_avg(dataset,country,months,ndays_per_month,margin,forecast=False,offset=0,nmonths_per_year=12):
    # Compute a country-wide area average of input dataset for specified months, using Natural Earth shapefile
    shapefile='/gws/nopw/j04/klingaman/datasets/NATURAL_EARTH/ne_110m_admin_0_countries.shp'
    shdf = salem.read_shapefile(shapefile)
    shdf = shdf.loc[shdf['admin'].isin([country])]
    
    try:
        nx = len(dataset.coords['lon'])
        lon_name='lon'
    except:
        nx = len(dataset.coords['longitude'])
        lon_name='longitude'
    try:
        ny = len(dataset.coords['lat'])
        lat_name='lat'
    except:
        ny = len(dataset.coords['latitude'])
        lat_name='latitude'
    try:
        nt = len(dataset.coords['time'])
    except:
        try:
            nt = len(dataset.coords['t'])
        except:
            nt = 1
    if forecast:
        nyr = 1
    else:
        nyr = nt//nmonths_per_year
    
    if np.amax(months) >= nmonths_per_year and nyr > 1:
	    my_nyr=nyr-1
    else:
	    my_nyr=nyr
    #print my_nyr
    nmonths = np.size(months)
    data_sub_aavg = np.zeros(my_nyr)
    for month in xrange(nmonths):
	if forecast or my_nyr==0:
            if nmonths > 1: 
                my_t = months[month] - offset
            else:
                my_t = months[0] - offset
            if my_t < 0:
                my_t = my_t + nmonths_per_year
        else:
            if nmonths > 1:
                my_t = np.arange(my_nyr)*nmonths_per_year+months[month]-offset
            else:
                my_t = np.arange(my_nyr)*nmonths_per_year+months-offset
        #print my_t
        try:
            month_sub = dataset.isel(time=my_t)
            threed_flag = True
        except:
            try:
                month_sub = dataset.isel(t=my_t)
                threed_flag = True
            except:
                month_sub = dataset
                threed_flag = False
        try:
            month_sub = month_sub.isel(surface=0)
        except:
            month_sub = month_sub
        try:
            month_sub = month_sub.salem.subset(shape=shdf)
            month_sub = month_sub.salem.roi(shape=shdf)
        except ValueError:
            month_sub = month_sub.salem.subset(corners=((shdf.min_x,shdf.min_y),(shdf.max_x,shdf.max_y)))
        if forecast or nyr == 0:
            threed_flag=False

        cntry_nx = len(month_sub.coords[lon_name])
        cntry_ny = len(month_sub.coords[lat_name])
        weights = np.empty((cntry_ny,cntry_nx))
	for x in xrange(cntry_nx):
            for y in xrange(cntry_ny):
                if threed_flag:
                    if not np.isnan(month_sub.values[0,y,x]):
                        weights[y,x] = np.cos(month_sub.coords[lat_name].values[y]*np.pi/180.)
                    else:
                        weights[y,x] = np.nan
                else:
                    if not np.isnan(month_sub.values[y,x]):
                        weights[y,x] = np.cos(month_sub.coords[lat_name].values[y]*np.pi/180.)
                    else:
                        weights[y,x] = np.nan
        weights = weights/np.nansum(weights)
        if threed_flag: # or nmonths > 1:
            axes=(1,2)
        else:
            axes=(0,1)
        if np.size(months) > 1:
	    if threed_flag:
		data_sub_aavg = data_sub_aavg + np.nansum(month_sub.values*weights,axis=axes)*ndays_per_month[month]
	    else:
            	data_sub_aavg = data_sub_aavg + np.nansum(month_sub.values*weights,axis=axes)*ndays_per_month[month]
        else:
            data_sub_aavg = np.nansum(month_sub.values*weights,axis=axes)*ndays_per_month[0]
    
    data_sub_aavg = data_sub_aavg/float(np.sum(ndays_per_month))
    return data_sub_aavg
Ejemplo n.º 35
0
cfg.PARAMS['invert_with_sliding'] = False
cfg.PARAMS['bed_shape'] = 'parabolic'
cfg.PARAMS['use_compression'] = False

# Some globals for more control on what to run
RUN_GIS_mask = False
RUN_GIS_PREPRO = False # run GIS pre-processing tasks (before climate)
RUN_CLIMATE_PREPRO = False # run climate pre-processing tasks
RUN_INVERSION = False  # run bed inversion
RUN_DYNAMICS = False  # run dynamics

Plotting = False
Plotting_after_calving = False

# Read RGI file
rgidf = salem.read_shapefile(RGI_FILE, cached=True)

# TODO: we must sort.() glaciers from large to small when we run in cluster
# Get ref glaciers (all glaciers with MB)
flink, mbdatadir = utils.get_wgms_files()

ids_with_mb = pd.read_csv(flink)['RGI50_ID'].values

if RUN_inPC:
    # Keep id's of glaciers in WGMS and GlathiDa V2
    keep_ids = ['RGI50-01.00037', 'RGI50-01.00570','RGI50-01.01104',
                'RGI50-01.01390', 'RGI50-01.02228', 'RGI50-01.04591',
                'RGI50-01.05211', 'RGI50-01.09162', 'RGI50-01.16316',
                'RGI50-01.22699']

    # Glaciers in the McNabb data base
Ejemplo n.º 36
0
import pdb
import numpy as np
from functools import partial
from salem import get_demo_file, open_xr_dataset, GeoTiff, wgs84



dat = xr.open_dataarray('/users/global/cornkle/data/pythonWorkspace/proj_CEH/topo/gtopo_1min.nc')
#dat=dat.sel(lon=slice(-18,120), lat=slice(-30,60))

#dat=dat.sel(lon=slice(-100,-40), lat=slice(-30,60))

grid = dat.salem.grid

grid50 = grid.regrid(factor=0.03)
lakes = salem.read_shapefile(salem.get_demo_file('ne_50m_rivers_lake_centerlines.shp'), cached=True)


top_on_grid50 = grid50.lookup_transform(dat, method=np.std)

sm = dat.salem.get_map(cmap='topo')
lakes = salem.read_shapefile(salem.get_demo_file('ne_50m_rivers_lake_centerlines.shp'), cached=True)
sm.set_shapefile(lakes, edgecolor='k', facecolor='none', linewidth=2,)
mask_lakes = grid.region_of_interest(shape=lakes)
sm.set_data(top_on_grid50, grid50)
sm.set_plot_params(vmin=20, vmax=500)
#sm.set_data(dat, grid)
sm.visualize()

f = plt.figure()
    'input_data/05_rgi61_GreenlandPeriphery_bea/05_rgi61_GreenlandPeriphery.shp'
)

mask_file = os.path.join(
    MAIN_PATH, 'input_data/Icemask_Topo_Iceclasses_lon_lat_average_1km.nc')

filename_coastline = os.path.join(
    MAIN_PATH, 'input_data/ne_10m_coastline/ne_10m_coastline.shp')

#Reading RACMO mask
# The mask and geo reference data
ds_geo = xr.open_dataset(mask_file, decode_times=False)
proj = pyproj.Proj('+init=EPSG:3413')
ds_geo.attrs['pyproj_srs'] = proj.srs

coast_line = salem.read_shapefile(filename_coastline)

#RGI v6
df = gpd.read_file(RGI_FILE)

rgi_area_total = df['Area'].sum()

df.set_index('RGIId')
index = df.index.values

# Get the glaciers classified by Terminus type
sub_mar = df[df['TermType'].isin([1])]
sub_lan = df[df['TermType'].isin([0])]

# Classify Marine-terminating by connectivity
sub_no_conect = sub_mar[sub_mar['Connect'].isin([0, 1])]
Ejemplo n.º 38
0
shp_path = r"C:\NETCDF_PROJECT\storage\Belgorod\Adminbndy3.shp"
MC = MaskClipper(nc_path, shp_path)
MC.create_variable('lccs_class', 0)
MC.print_types()
MC.subset()
MC.clip()
MC.draw_map()


print('F**K')
ds = salem.open_xr_dataset(r'..\..\storage\C3S-LC-L4-LCCS-Map-300m-P1Y-2018-v2.1.1.nc')

#ds = salem.open_xr_dataset(get_demo_file('wrfout_d01.nc'))
#shdf = salem.read_shapefile(get_demo_file(r"C:\NETCDF_PROJECT\storage\Belgorod\Adminbndy3.shp"))

shdf_bel = salem.read_shapefile(r"C:\NETCDF_PROJECT\storage\Belgorod\Adminbndy3.shp")

#shdf = salem.read_shapefile(get_demo_file('world_borders.shp'))
#print(type(shdf), shdf)
print(type(shdf_bel), shdf_bel)
#shdf = shdf.loc[shdf['CNTRY_NAME'].isin(['Nepal', 'Bhutan'])]
#print(type(shdf), shdf)

t2 = ds.lccs_class.isel(time=0)
#t2 = ds.T2.isel(Time=2)
#t2 = t2.salem.subset(shape=shdf, margin=2)
t2 = t2.salem.subset(shape=shdf_bel, margin=2)

#t2 = t2.salem.roi(shape=shdf)
t2 = t2.salem.roi(shape=shdf_bel)
print(t2)
Ejemplo n.º 39
0
# coord=[-14,-13,12.5,16,14.8,15.5,-15,-14] #13.6,15.2, 15.4,12.5  # weird anticorrelation
# name = 'east senegal'

ds = ds.sel(lon=slice(coord[0], coord[1]), lat=slice(coord[2], coord[3]))
ds2 = ds2.sel(lon=slice(coord[0], coord[1]), lat=slice(coord[2], coord[3]))
top = top.sel(lon=slice(coord[0], coord[1]), lat=slice(coord[2], coord[3]))
t = t.sel(lon=slice(coord[0], coord[1]), lat=slice(coord[2], coord[3]))

ds.name = '0-3UTC'
ds2.name = '16-18UTC'

map = ds.salem.get_map(cmap='viridis')
#map.set_shapefile(oceans=True)
map.set_shapefile(rivers=True)
# read the ocean shapefile (data from http://www.naturalearthdata.com)
oceans = salem.read_shapefile(salem.get_demo_file('ne_50m_ocean.shp'),
                              cached=True)

river = salem.read_shapefile(
    '/users/global/cornkle/data/pythonWorkspace/proj_CEH/shapes/rivers/ne_10m_rivers_lake_centerlines.shp',
    cached=True)
lakes = salem.read_shapefile(
    '/users/global/cornkle/data/pythonWorkspace/proj_CEH/shapes/lakes/ne_10m_lakes.shp',
    cached=True)
map.set_shapefile(
    lakes,
    edgecolor='k',
    facecolor='none',
    linewidth=2,
)

srtm = open_xr_dataset(get_demo_file('hef_srtm.tif'))
Ejemplo n.º 40
0
 def __init__(self, nc_path, shape_path):
     self.data_set = salem.open_xr_dataset(nc_path)
     self.shape_path = salem.read_shapefile(shape_path)
     pass
Ejemplo n.º 41
0
===================

Put some colors and labels on shapefiles

In this script, we use data from the `HydroSHEDS <http://www.hydrosheds.org/>`_
database to illustrate some functionalities of salem Maps. The data shows the
sub-basins of the Nam Co Lake catchment in Tibet. We navigate between the
various tributary catchments of the lake.
"""

import salem
import matplotlib.pyplot as plt

# read the shapefile
shpf = salem.get_demo_file('Lev_09_MAIN_BAS_4099000881.shp')
gdf = salem.read_shapefile(shpf)

# Get the google map which encompasses all geometries
g = salem.GoogleVisibleMap(x=[gdf.min_x.min(), gdf.max_x.max()],
                           y=[gdf.min_y.min(), gdf.max_y.max()],
                           maptype='satellite', size_x=400, size_y=400)
ggl_img = g.get_vardata()

# Get each level draining into the lake, then into the last level, and so on
gds = []
prev_id = [gdf.iloc[0].MAIN_BAS]
while True:
    gd = gdf.loc[gdf.NEXT_DOWN.isin(prev_id)]
    if len(gd) == 0:
        break
    gds.append(gd)
import time

start = time.time()

# Initialize OGGM and set up the run parameters
cfg.initialize()

# Local working directory (where OGGM will write its output)
WORKING_DIR = path.join(path.expanduser('~'), 'tmp', 'OGGM_precalibrated_run')
cfg.PATHS['working_dir'] = WORKING_DIR

# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = True

# Read RGI
rgidf = salem.read_shapefile(
    path.join(WORKING_DIR, 'RGI_example_glaciers', 'RGI_example_glaciers.shp'))
# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

log.info('Starting OGGM run')
log.info('Number of glaciers: {}'.format(len(rgidf)))

# Initialize from existing directories
gdirs = workflow.init_glacier_regions(rgidf)

# We can step directly to a new experiment!
# Random climate representative for the recent climate (1985-2015)
# This is a kinf of "commitment" run
execute_entity_task(tasks.random_glacier_evolution,
                    gdirs,
                    nyears=200,
Ejemplo n.º 43
0
Compute a land/sea mask from a shapefile.

This example illustrates the two different methods available to compute a
raster mask from shapefile polygons.
"""

import salem
import matplotlib.pyplot as plt

# make a local grid from which we will compute the mask
# we make it coarse so that we see the grid points
grid = salem.Grid(proj=salem.wgs84, x0y0=(-18, 3), nxny=(25, 15), dxdy=(1, 1))

# read the ocean shapefile (data from http://www.naturalearthdata.com)
oceans = salem.read_shapefile(salem.get_demo_file('ne_50m_ocean.shp'),
                              cached=True)

# read the lake shapefile (data from http://www.naturalearthdata.com)
lakes = salem.read_shapefile(salem.get_demo_file('ne_50m_lakes.shp'),
                              cached=True)

# The default is to keep only the pixels which center is within the polygon:
mask_default = grid.region_of_interest(shape=oceans)
mask_default = grid.region_of_interest(shape=lakes, roi=mask_default)

# But we can also compute a mask from all touched pixels
mask_all_touched = grid.region_of_interest(shape=oceans, all_touched=True)
mask_all_touched = grid.region_of_interest(shape=lakes, all_touched=True,
                                           roi=mask_all_touched)

# Make a map to check our results
Ejemplo n.º 44
0
# the results much (expectedly), so that it's ok to change it. All the rest
# (e.g. smoothing, dx, prcp factor...) should imply a re-calibration

mbf = 'https://dl.dropboxusercontent.com/u/20930277/ref_tstars_no_tidewater.csv'
mbf = utils.file_downloader(mbf)
shutil.copyfile(mbf, os.path.join(WORKING_DIR, 'ref_tstars.csv'))


# Copy the RGI file
# -----------------

# Download RGI files
rgi_dir = utils.get_rgi_dir()
rgi_shp = list(glob(os.path.join(rgi_dir, "*", rgi_reg+ '_rgi50_*.shp')))
assert len(rgi_shp) == 1
rgidf = salem.read_shapefile(rgi_shp[0], cached=True)

# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

rgidf = rgidf.loc[rgidf.RGIId.isin(['RGI50-07.01394'])]

log.info('Starting run for RGI reg: ' + rgi_reg)
log.info('Number of glaciers: {}'.format(len(rgidf)))

# Go - initialize working directories
# -----------------------------------
gdirs = workflow.init_glacier_regions(rgidf)

# Prepro tasks
task_list = [
Ejemplo n.º 45
0
def get_rgi_df(reset=False):
    """This function prepares a kind of `fake` RGI file, with the updated
    geometries for ITMIX.
    """

    # This makes an RGI dataframe with all ITMIX + WGMS + GTD glaciers
    RGI_DIR = utils.get_rgi_dir()

    df_rgi_file = os.path.join(DATA_DIR, 'itmix', 'itmix_rgi_shp.pkl')
    if os.path.exists(df_rgi_file) and not reset:
        rgidf = pd.read_pickle(df_rgi_file)
    else:
        linkf = os.path.join(DATA_DIR, 'itmix', 'itmix_rgi_links.pkl')
        df_itmix = pd.read_pickle(linkf)

        f, d = utils.get_wgms_files()
        wgms_df = pd.read_csv(f)

        f = utils.get_glathida_file()
        gtd_df = pd.read_csv(f)

        divides = []
        rgidf = []
        _rgi_ids_for_overwrite = []
        for i, row in df_itmix.iterrows():

            log.info('Prepare RGI df for ' + row.name)

            # read the rgi region
            rgi_shp = find_path(RGI_DIR, row['rgi_reg'] + '_rgi50_*.shp')
            rgi_df = salem.read_shapefile(rgi_shp, cached=True)

            rgi_parts = row.T['rgi_parts_ids']
            sel = rgi_df.loc[rgi_df.RGIId.isin(rgi_parts)].copy()

            # use the ITMIX shape where possible
            if row.name in ['Hellstugubreen', 'Freya', 'Aqqutikitsoq',
                            'Brewster', 'Kesselwandferner', 'NorthGlacier',
                            'SouthGlacier', 'Tasman', 'Unteraar',
                            'Washmawapta', 'Columbia']:
                shf = find_path(SEARCHD, '*_' + row.name + '*.shp')
                shp = salem.read_shapefile(shf)
                if row.name == 'Unteraar':
                    shp = shp.iloc[[-1]]
                if 'LineString' == shp.iloc[0].geometry.type:
                    shp.loc[shp.index[0], 'geometry'] = shpg.Polygon(shp.iloc[0].geometry)
                if shp.iloc[0].geometry.type == 'MultiLineString':
                    # Columbia
                    geometry = shp.iloc[0].geometry
                    parts = list(geometry)
                    for p in parts:
                        assert p.type == 'LineString'
                    exterior = shpg.Polygon(parts[0])
                    # let's assume that all other polygons are in fact interiors
                    interiors = []
                    for p in parts[1:]:
                        assert exterior.contains(p)
                        interiors.append(p)
                    geometry = shpg.Polygon(parts[0], interiors)
                    assert 'Polygon' in geometry.type
                    shp.loc[shp.index[0], 'geometry'] = geometry

                assert len(shp) == 1
                area_km2 = shp.iloc[0].geometry.area * 1e-6
                shp = salem.gis.transform_geopandas(shp)
                shp = shp.iloc[0].geometry
                sel = sel.iloc[[0]]
                sel.loc[sel.index[0], 'geometry'] = shp
                sel.loc[sel.index[0], 'Area'] = area_km2
            elif row.name == 'Urumqi':
                # ITMIX Urumqi is in fact two glaciers
                shf = find_path(SEARCHD, '*_' + row.name + '*.shp')
                shp2 = salem.read_shapefile(shf)
                assert len(shp2) == 2
                for k in [0, 1]:
                    shp = shp2.iloc[[k]].copy()
                    area_km2 = shp.iloc[0].geometry.area * 1e-6
                    shp = salem.gis.transform_geopandas(shp)
                    shp = shp.iloc[0].geometry
                    assert sel.loc[sel.index[k], 'geometry'].contains(shp.centroid)
                    sel.loc[sel.index[k], 'geometry'] = shp
                    sel.loc[sel.index[k], 'Area'] = area_km2
                assert len(sel) == 2
            elif len(rgi_parts) > 1:
                # Ice-caps. Make divides
                # First we gather all the parts:
                sel = rgi_df.loc[rgi_df.RGIId.isin(rgi_parts)].copy()
                # Make the multipolygon for the record
                multi = shpg.MultiPolygon([g for g in sel.geometry])
                # update the RGI attributes. We take a dummy rgi ID
                new_area = np.sum(sel.Area)
                found = False
                for i in range(len(sel)):
                    tsel = sel.iloc[[i]].copy()
                    if 'Multi' in tsel.loc[tsel.index[0], 'geometry'].type:
                        continue
                    else:
                        found = True
                        sel = tsel
                        break
                if not found:
                    raise RuntimeError()

                inif = 0.
                add = 1e-5
                if row.name == 'Devon':
                    inif = 0.001
                    add = 1e-4
                while True:
                    buff = multi.buffer(inif)
                    if 'Multi' in buff.type:
                        inif += add
                    else:
                        break
                x, y = multi.centroid.xy
                if 'Multi' in buff.type:
                    raise RuntimeError
                sel.loc[sel.index[0], 'geometry'] = buff
                sel.loc[sel.index[0], 'Area'] = new_area
                sel.loc[sel.index[0], 'CenLon'] = np.asarray(x)[0]
                sel.loc[sel.index[0], 'CenLat'] = np.asarray(y)[0]

                # Divides db
                div_sel = dict()
                for k, v in sel.iloc[0].iteritems():
                    if k == 'geometry':
                        div_sel[k] = multi
                    elif k == 'RGIId':
                        div_sel['RGIID'] = v
                    else:
                        div_sel[k] = v
                divides.append(div_sel)
            else:
                pass

            # add glacier name to the entity
            name = ['I:' + row.name] * len(sel)
            add_n = sel.RGIId.isin(wgms_df.RGI_ID.values)
            for z, it in enumerate(add_n.values):
                if it:
                    name[z] = 'W-' + name[z]
            add_n = sel.RGIId.isin(gtd_df.RGI_ID.values)
            for z, it in enumerate(add_n.values):
                if it:
                    name[z] = 'G-' + name[z]
            sel.loc[:, 'Name'] = name
            rgidf.append(sel)

            # Add divides to the original one
            adf = pd.DataFrame(divides)
            adf.to_pickle(cfg.PATHS['itmix_divs'])

        log.info('N glaciers ITMIX: {}'.format(len(rgidf)))

        # WGMS glaciers which are not already there
        # Actually we should remove the data of those 7 to be honest...
        f, d = utils.get_wgms_files()
        wgms_df = pd.read_csv(f)
        wgms_df = wgms_df.loc[~ wgms_df.RGI_ID.isin(_rgi_ids_for_overwrite)]

        log.info('N glaciers WGMS: {}'.format(len(wgms_df)))
        for i, row in wgms_df.iterrows():
            rid = row.RGI_ID
            reg = rid.split('-')[1].split('.')[0]
            # read the rgi region
            rgi_shp = find_path(RGI_DIR, reg + '_rgi50_*.shp')
            rgi_df = salem.read_shapefile(rgi_shp, cached=True)

            sel = rgi_df.loc[rgi_df.RGIId.isin([rid])].copy()
            assert len(sel) == 1

            # add glacier name to the entity
            _cor = row.NAME.replace('/', 'or').replace('.', '').replace(' ', '-')
            name = ['W:' + _cor] * len(sel)
            add_n = sel.RGIId.isin(gtd_df.RGI_ID.values)
            for z, it in enumerate(add_n.values):
                if it:
                    name[z] = 'G-' + name[z]
            for n in name:
                if len(n) > 48:
                    raise
            sel.loc[:, 'Name'] = name
            rgidf.append(sel)

        _rgi_ids_for_overwrite.extend(wgms_df.RGI_ID.values)

        # GTD glaciers which are not already there
        # Actually we should remove the data of those 2 to be honest...
        gtd_df = gtd_df.loc[~ gtd_df.RGI_ID.isin(_rgi_ids_for_overwrite)]
        log.info('N glaciers GTD: {}'.format(len(gtd_df)))

        for i, row in gtd_df.iterrows():
            rid = row.RGI_ID
            reg = rid.split('-')[1].split('.')[0]
            # read the rgi region
            rgi_shp = find_path(RGI_DIR, reg + '_rgi50_*.shp')
            rgi_df = salem.read_shapefile(rgi_shp, cached=True)

            sel = rgi_df.loc[rgi_df.RGIId.isin([rid])].copy()
            assert len(sel) == 1

            # add glacier name to the entity
            _corname = row.NAME.replace('/', 'or').replace('.', '').replace(' ', '-')
            name = ['G:' + _corname] * len(sel)
            for n in name:
                if len(n) > 48:
                    raise
            sel.loc[:, 'Name'] = name
            rgidf.append(sel)

        # Save for not computing each time
        rgidf = pd.concat(rgidf)
        rgidf.to_pickle(df_rgi_file)

    return rgidf
Ejemplo n.º 46
0
    import cartopy.crs as ccrs
    import cartopy.feature as cf
    import matplotlib.pyplot as plt
    import numpy as np

    proj = ccrs.LambertConformal(central_latitude=60,
                                 central_longitude=50,
                                 standard_parallels=(25, 25))

    print('subsetting')
    dat = salem.open_xr_dataset(nc_path)
    dat = dat['lccs_class'].isel(time=0)

    #dat.salem.quick_map()
    #shapes = salem.read_shapefile("..\data\\regions\szfo\Arkhangelskaya_oblast.shp")
    shapes = salem.read_shapefile("..\data\\regions\szfo\Respublica_Komi.shp")
    #shapes = salem.read_shapefile("..\data\\regions\szfo\Leningradskaya_oblast.shp")
    dat = dat.salem.subset(shape=shapes, margin=2)
    p = dat.plot(
        subplot_kws=dict(projection=ccrs.Orthographic(50, 70),
                         facecolor="gray"),
        transform=ccrs.PlateCarree(),
    )
    # p.axes.set_global()
    p.axes.coastlines()
    p.axes.gridlines()

    print('set values')
    lat = dat.coords['lat'].values
    lon = dat.coords['lon'].values
    print(dat)
Ejemplo n.º 47
0
# (e.g. smoothing, dx, prcp factor...) should imply a re-calibration

mbf = 'https://dl.dropboxusercontent.com/u/20930277/ref_tstars_b60_prcpfac_25_defaults.csv'
mbf = utils.file_downloader(mbf)
shutil.copyfile(mbf, os.path.join(WORKING_DIR, 'ref_tstars.csv'))

# Initialize OGGM and set up the run parameters
# ---------------------------------------------

# Download and read in the RGI file
rgif = 'https://dl.dropboxusercontent.com/u/20930277/RGI_example_glaciers.zip'
rgif = utils.file_downloader(rgif)
with zipfile.ZipFile(rgif) as zf:
    zf.extractall(WORKING_DIR)
rgif = os.path.join(WORKING_DIR, 'RGI_example_glaciers.shp')
rgidf = salem.read_shapefile(rgif, cached=True)

# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)

# rgidf = rgidf.loc[rgidf.RGIId.isin(['RGI50-01.10299'])]

print('Number of glaciers: {}'.format(len(rgidf)))


# Go - initialize working directories
# -----------------------------------

# you can use the command below to reset your run -- use with caution!
# gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
gdirs = workflow.init_glacier_regions(rgidf)
Ejemplo n.º 48
0
    cfg.PARAMS['run_mb_calibration'] = True
    cfg.PARAMS['optimize_inversion_params'] = False
    cfg.PARAMS['use_intersects'] = False

    # add to BASENAMES
    _doc = 'contains observed and searched glacier from synthetic experiment to find intial state'
    cfg.BASENAMES['synthetic_experiment'] = ('synthetic_experiment.pkl', _doc)
    _doc = 'output of reconstruction'
    cfg.BASENAMES['reconstruction_output'] = ('reconstruction_output.pkl',
                                              _doc)

    plt.rcParams['figure.figsize'] = (8, 8)  # Default plot size

    rgi = get_demo_file('rgi_oetztal.shp')
    rgidf = salem.read_shapefile(rgi)
    gdirs = workflow.init_glacier_regions(
        rgidf[rgidf.RGIId != 'RGI50-11.00945'])
    #gdirs = workflow.init_glacier_regions(rgidf)

    workflow.execute_entity_task(tasks.glacier_masks, gdirs)
    #prepare_for_initializing(gdirs)

    #synthetic_experiments(gdirs)
    #run_optimization(gdirs,synthetic_exp=True)

    for gdir in gdirs[11:]:
        #plot_experiment(gdir,cfg.PATHS['plot_dir'])
        #plot_climate(gdir,cfg.PATHS['plot_dir'])

        #plot_surface(gdir,cfg.PATHS['plot_dir'],-1,synthetic_exp=False)