Example #1
0
def test_ice_cap():

    testdir = os.path.join(get_test_dir(), 'tmp_icecap')
    utils.mkdir(testdir, reset=True)

    cfg.initialize()
    cfg.PARAMS['use_intersects'] = False
    cfg.PATHS['dem_file'] = get_demo_file('dem_RGI50-05.08389.tif')
    cfg.PARAMS['border'] = 60
    cfg.PATHS['working_dir'] = testdir

    df = gpd.read_file(get_demo_file('divides_RGI50-05.08389.shp'))
    df['Area'] = df.Area * 1e-6  # cause it was in m2
    df['RGIId'] = ['RGI50-05.08389_d{:02d}'.format(d + 1) for d in df.index]

    gdirs = workflow.init_glacier_regions(df)
    workflow.gis_prepro_tasks(gdirs)

    from salem import mercator_grid, Map
    smap = mercator_grid((gdirs[0].cenlon, gdirs[0].cenlat),
                         extent=[20000, 23000])
    smap = Map(smap)

    fig, ax = plt.subplots()
    graphics.plot_catchment_width(gdirs,
                                  ax=ax,
                                  add_intersects=True,
                                  add_touches=True,
                                  smap=smap)
    fig.tight_layout()
    shutil.rmtree(testdir)
    return fig
Example #2
0
    def _plot_city(self, ds):
        """Plot the results of gyms in a city.
        
        Parameters
        ----------
        self.ds_sorted : xr.Dataset
            xr.Dataset with Coordinates: gyms.
            
        Returns
        -------
        matplotlib.pyplot.figure : matplotlib.pyplot.figure
            Creates city plot.        
        """
        # Create extend of map [W, E, S, N]
        extent = [ds['longitude'].values.min(), ds['longitude'].values.max(),
                  ds['latitude'].values.min(), ds['latitude'].values.max()]

        # Setup colors
        colors = cm.nipy_spectral(np.linspace(0,1,len(ds['gyms'])))
        
        # Get google map. Scale is for more details. Mapytype can have
        # 'terrain' or 'satellite'
        g = GoogleVisibleMap(x=[extent[0], extent[1]], y=[extent[2],
                                extent[3]], scale=4, maptype='terrain')
        ggl_img = g.get_vardata()
        
        # Plot map
        fig, ax = plt.subplots(1, 1, figsize=(20,20))
        sm = Map(g.grid, factor=1, countries=False)
        sm.set_rgb(ggl_img)
        sm.visualize(ax=ax)
        # Plot gym points
        for i in range(0, len(ds['gyms'])):
            # Create label
            self.regcount = i
            self._rank() # Add self.rank
            _label = self.rank+' '+ds['gyms'].values[i]+': '+\
            ds['athlete_names'].values[i]+' ('+str(ds[self.how].values[i])+')'
            x, y = sm.grid.transform(ds['longitude'].values[i],
                                     ds['latitude'].values[i])
            ax.scatter(x, y, color=colors[i], s=400, label=_label)
        plt.title(self.fname+' | '+self.city+' | '+self.column+' | '+self.how)

        # Shrink current axis by 20% to make room for legend
        box = ax.get_position()
        ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
        ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
        
        plt.savefig(self.plotdir+self.fname+'_'+self.city+'_'+self.column+'_'+\
                    self.how+'.png', bbox_inches = 'tight')
        #plt.savefig(self.plotdir+self.fname+'_'+self.city+'_'+self.column+\
        #            self.how+'.png', bbox_inches = 'tight', format='eps')
        plt.show()
Example #3
0
    'https://www.google.com/maps/search/?api=1&query={0[Latitud]},{0[Longitud]}'
    .format,
    axis=1)
new_df.sort_values(by=['Indice'], inplace=True)
new_df.to_csv('Indices.csv', index=False)

g = GoogleVisibleMap(
    x=[-76.533, -76.525],
    y=[3.340, 3.375],
    scale=2,  # scale is for more details
    maptype='roadmap')

f, ax = plt.subplots(1, figsize=(12, 12))
ggl_img = g.get_vardata()

sm = Map(g.grid, factor=1, countries=False)
sm.set_rgb(ggl_img)
sm.visualize(ax=ax)
n = new_df['Indice'].to_numpy()
x = new_df['Longitud'].to_numpy()
y = new_df['Latitud'].to_numpy()

tipo = 'OUTDOOR'

groups = centers.where(centers['Modo'] == tipo).groupby('Indice_Corregido')
for name, group in groups:
    lat, long = group['Latitud'], group['Longitud']
    long, lat = sm.grid.transform(long, lat)
    ax.scatter(long,
               lat,
               s=0.1,
            print 'Deposit class CL',str(i+1).zfill(2),' mass ','%.1e'%mass_on_the_ground,' kg'

            # Save the deposit for the single classes on a ESRI rater ascii file
            output_file = runname+'_'+'CL'+str(i+1)+'_'+day+'_'+time+'.asc'

            np.savetxt(output_file, np.flipud(loading_i), \
                       header=header, fmt='%.3E',comments='')

            # Create a new figure
            f = plt.figure(i)
            plt.rcParams["font.size"] = 8.0
            cmap = plt.get_cmap('Spectral')
            norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
            
            sm = Map(g.grid, factor=1, countries=False)
            sm.set_rgb(ggl_img)  # add the background rgb image
            sm.visualize()

            # Zero-loading pixels should not be plotted
            loading_i[loading_i==0.0] = np.nan
            Zm = np.ma.masked_where(np.isnan(loading_i),loading_i)

            x, y = sm.grid.transform(Lon_stag, Lat_stag)
            plt.pcolormesh(x,y,Zm, cmap=cmap, norm=norm,alpha=0.50)
            plt.pcolormesh(Lon_stag, Lat_stag,loading_i, cmap=cmap, norm=norm,alpha=1.0)

            plt.xlim(left=np.amin(x))
            plt.xlim(right=np.amax(x))
            plt.ylim(bottom=np.amax(y))
            plt.ylim(top=np.amin(y))
Example #5
0
def Interpolating_models_ims(time='2013-10-19T22:00:00',
                             var='TD',
                             plot=True,
                             gis_path=gis_path,
                             method='okrig',
                             dem_path=work_yuval / 'AW3D30',
                             lapse_rate=5.,
                             cv=None,
                             rms=None,
                             gridsearch=False):
    """main 2d_interpolation from stations to map"""
    # cv usage is {'kfold': 5} or {'rkfold': [2, 3]}
    # TODO: try 1d modeling first, like T=f(lat)
    from sklearn.gaussian_process import GaussianProcessRegressor
    from sklearn.neighbors import KNeighborsRegressor
    from pykrige.rk import Krige
    import numpy as np
    from sklearn.svm import SVR
    from sklearn.linear_model import LinearRegression
    from sklearn.ensemble import RandomForestRegressor
    from scipy.spatial import Delaunay
    from scipy.interpolate import griddata
    from sklearn.metrics import mean_squared_error
    from aux_gps import coarse_dem
    import seaborn as sns
    import matplotlib.pyplot as plt
    import pyproj
    from sklearn.utils.estimator_checks import check_estimator
    from pykrige.compat import GridSearchCV
    lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
    ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')

    def parse_cv(cv):
        from sklearn.model_selection import KFold
        from sklearn.model_selection import RepeatedKFold
        from sklearn.model_selection import LeaveOneOut
        """input:cv number or string"""
        # check for integer:
        if 'kfold' in cv.keys():
            n_splits = cv['kfold']
            print('CV is KFold with n_splits={}'.format(n_splits))
            return KFold(n_splits=n_splits)
        if 'rkfold' in cv.keys():
            n_splits = cv['rkfold'][0]
            n_repeats = cv['rkfold'][1]
            print('CV is ReapetedKFold with n_splits={},'.format(n_splits) +
                  ' n_repeates={}'.format(n_repeats))
            return RepeatedKFold(n_splits=n_splits,
                                 n_repeats=n_repeats,
                                 random_state=42)
        if 'loo' in cv.keys():
            return LeaveOneOut()

    # from aux_gps import scale_xr
    da = create_lat_lon_mesh(points_per_degree=250)  # 500?
    awd = coarse_dem(da)
    awd = awd.values
    geo_snap = geo_pandas_time_snapshot(var=var, datetime=time, plot=False)
    if var == 'TD':
        [a, b] = np.polyfit(geo_snap['alt'].values, geo_snap['TD'].values, 1)
        if lapse_rate == 'auto':
            lapse_rate = np.abs(a) * 1000
        fig, ax_lapse = plt.subplots(figsize=(10, 6))
        sns.regplot(data=geo_snap,
                    x='alt',
                    y='TD',
                    color='r',
                    scatter_kws={'color': 'b'},
                    ax=ax_lapse)
        suptitle = time.replace('T', ' ')
        ax_lapse.set_xlabel('Altitude [m]')
        ax_lapse.set_ylabel('Temperature [degC]')
        ax_lapse.text(0.5,
                      0.95,
                      'Lapse_rate: {:.2f} degC/km'.format(lapse_rate),
                      horizontalalignment='center',
                      verticalalignment='center',
                      transform=ax_lapse.transAxes,
                      fontsize=12,
                      color='k',
                      fontweight='bold')
        ax_lapse.grid()
        ax_lapse.set_title(suptitle, fontsize=14, fontweight='bold')
#     fig.suptitle(suptitle, fontsize=14, fontweight='bold')
    alts = []
    for i, row in geo_snap.iterrows():
        lat = da.sel(lat=row['lat'], method='nearest').lat.values
        lon = da.sel(lon=row['lon'], method='nearest').lon.values
        alt = row['alt']
        if lapse_rate is not None and var == 'TD':
            da.loc[{'lat': lat, 'lon': lon}] = row[var] + \
                lapse_rate * alt / 1000.0
            alts.append(alt)
        elif lapse_rate is None or var != 'TD':
            da.loc[{'lat': lat, 'lon': lon}] = row[var]
            alts.append(alt)
    # da_scaled = scale_xr(da)
    c = np.linspace(min(da.lat.values), max(da.lat.values), da.shape[0])
    r = np.linspace(min(da.lon.values), max(da.lon.values), da.shape[1])
    rr, cc = np.meshgrid(r, c)
    vals = ~np.isnan(da.values)
    if lapse_rate is None:
        Xrr, Ycc, Z = pyproj.transform(lla,
                                       ecef,
                                       rr[vals],
                                       cc[vals],
                                       np.array(alts),
                                       radians=False)
        X = np.column_stack([Xrr, Ycc, Z])
        XX, YY, ZZ = pyproj.transform(lla,
                                      ecef,
                                      rr,
                                      cc,
                                      awd.values,
                                      radians=False)
        rr_cc_as_cols = np.column_stack(
            [XX.flatten(), YY.flatten(),
             ZZ.flatten()])
    else:
        X = np.column_stack([rr[vals], cc[vals]])
        rr_cc_as_cols = np.column_stack([rr.flatten(), cc.flatten()])
    # y = da_scaled.values[vals]
    y = da.values[vals]
    if method == 'gp-rbf':
        from sklearn.gaussian_process.kernels import RBF
        from sklearn.gaussian_process.kernels import WhiteKernel
        kernel = 1.0 * RBF(length_scale=0.25, length_scale_bounds=(1e-2, 1e3)) \
            + WhiteKernel(noise_level=0.01, noise_level_bounds=(1e-10, 1e+1))
        #        kernel = None
        model = GaussianProcessRegressor(alpha=0.0,
                                         kernel=kernel,
                                         n_restarts_optimizer=5,
                                         random_state=42,
                                         normalize_y=True)

    elif method == 'gp-qr':
        from sklearn.gaussian_process.kernels import RationalQuadratic
        from sklearn.gaussian_process.kernels import WhiteKernel
        kernel = RationalQuadratic(length_scale=100.0) \
            + WhiteKernel(noise_level=0.01, noise_level_bounds=(1e-10, 1e+1))
        model = GaussianProcessRegressor(alpha=0.0,
                                         kernel=kernel,
                                         n_restarts_optimizer=5,
                                         random_state=42,
                                         normalize_y=True)
    elif method == 'knn':
        model = KNeighborsRegressor(n_neighbors=5, weights='distance')
    elif method == 'svr':
        model = SVR(C=1.0,
                    cache_size=200,
                    coef0=0.0,
                    degree=3,
                    epsilon=0.1,
                    gamma='auto_deprecated',
                    kernel='rbf',
                    max_iter=-1,
                    shrinking=True,
                    tol=0.001,
                    verbose=False)
    elif method == 'okrig':
        model = Krige(method='ordinary',
                      variogram_model='spherical',
                      verbose=True)
    elif method == 'ukrig':
        model = Krige(method='universal',
                      variogram_model='linear',
                      verbose=True)
#    elif method == 'okrig3d':
#        # don't bother - MemoryError...
#        model = OrdinaryKriging3D(rr[vals], cc[vals], np.array(alts),
#                                  da.values[vals], variogram_model='linear',
#                                  verbose=True)
#        awd = coarse_dem(da)
#        interpolated, ss = model.execute('grid', r, c, awd['data'].values)
#    elif method == 'rkrig':
#        # est = LinearRegression()
#        est = RandomForestRegressor()
#        model = RegressionKriging(regression_model=est, n_closest_points=5,
#                                  verbose=True)
#        p = np.array(alts).reshape(-1, 1)
#        model.fit(p, X, y)
#        P = awd.flatten().reshape(-1, 1)
#        interpolated = model.predict(P, rr_cc_as_cols).reshape(da.values.shape)
#    try:
#        u = check_estimator(model)
#    except TypeError:
#        u = False
#        pass
    if cv is not None and not gridsearch:  # and u is None):
        # from sklearn.model_selection import cross_validate
        from sklearn import metrics
        cv = parse_cv(cv)
        ytests = []
        ypreds = []
        for train_idx, test_idx in cv.split(X):
            X_train, X_test = X[train_idx], X[test_idx]  # requires arrays
            y_train, y_test = y[train_idx], y[test_idx]
            model.fit(X_train, y_train)
            y_pred = model.predict(X_test)
            # there is only one y-test and y-pred per iteration over the loo.split,
            # so to get a proper graph, we append them to respective lists.
            ytests += list(y_test)
            ypreds += list(y_pred)
        true_vals = np.array(ytests)
        predicted = np.array(ypreds)
        r2 = metrics.r2_score(ytests, ypreds)
        ms_error = metrics.mean_squared_error(ytests, ypreds)
        print("R^2: {:.5f}%, MSE: {:.5f}".format(r2 * 100, ms_error))
    if gridsearch:
        cv = parse_cv(cv)
        param_dict = {
            "method": ["ordinary", "universal"],
            "variogram_model": ["linear", "power", "gaussian", "spherical"],
            # "nlags": [4, 6, 8],
            # "weight": [True, False]
        }
        estimator = GridSearchCV(Krige(),
                                 param_dict,
                                 verbose=True,
                                 cv=cv,
                                 scoring='neg_mean_absolute_error',
                                 return_train_score=True,
                                 n_jobs=1)
        estimator.fit(X, y)
        if hasattr(estimator, 'best_score_'):
            print('best_score = {:.3f}'.format(estimator.best_score_))
            print('best_params = ', estimator.best_params_)

        return estimator
        #    if (cv is not None and not u):
        #        from sklearn import metrics
        #        cv = parse_cv(cv)
        #        ytests = []
        #        ypreds = []
        #        for train_idx, test_idx in cv.split(X):
        #            X_train, X_test = X[train_idx], X[test_idx]  # requires arrays
        #            y_train, y_test = y[train_idx], y[test_idx]
        ##            model = UniversalKriging(X_train[:, 0], X_train[:, 1], y_train,
        ##                                     variogram_model='linear', verbose=False,
        ##                                     enable_plotting=False)
        #            model.X_ORIG = X_train[:, 0]
        #            model.X_ADJUSTED = model.X_ORIG
        #            model.Y_ORIG = X_train[:, 1]
        #            model.Y_ADJUSTED = model.Y_ORIG
        #            model.Z = y_train
        #            y_pred, ss = model.execute('points', X_test[0, 0],
        #                                             X_test[0, 1])
        #            # there is only one y-test and y-pred per iteration over the loo.split,
        #            # so to get a proper graph, we append them to respective lists.
        #            ytests += list(y_test)        cmap = plt.get_cmap('spring', 10)
        Q = ax.quiver(isr['X'],
                      isr['Y'],
                      isr['U'],
                      isr['V'],
                      isr['cm_per_year'],
                      cmap=cmap)
        fig.colorbar(Q, extend='max')

#            ypreds += list(y_pred)
#        true_vals = np.array(ytests)
#        predicted = np.array(ypreds)
#        r2 = metrics.r2_score(ytests, ypreds)
#        ms_error = metrics.mean_squared_error(ytests, ypreds)
#        print("R^2: {:.5f}%, MSE: {:.5f}".format(r2*100, ms_error))
#        cv_results = cross_validate(gp, X, y, cv=cv, scoring='mean_squared_error',
#                                    return_train_score=True, n_jobs=-1)
#        test = xr.DataArray(cv_results['test_score'], dims=['kfold'])
#        train = xr.DataArray(cv_results['train_score'], dims=['kfold'])
#        train.name = 'train'
#        cds = test.to_dataset(name='test')
#        cds['train'] = train
#        cds['kfold'] = np.arange(len(cv_results['test_score'])) + 1
#        cds['mean_train'] = cds.train.mean('kfold')
#        cds['mean_test'] = cds.test.mean('kfold')

# interpolated=griddata(X, y, (rr, cc), method='nearest')
    model.fit(X, y)
    interpolated = model.predict(rr_cc_as_cols).reshape(da.values.shape)
    da_inter = da.copy(data=interpolated)
    if lapse_rate is not None and var == 'TD':
        da_inter -= lapse_rate * awd / 1000.0
    if (rms is not None and cv is None):  # or (rms is not None and not u):
        predicted = []
        true_vals = []
        for i, row in geo_snap.iterrows():
            lat = da.sel(lat=row['lat'], method='nearest').lat.values
            lon = da.sel(lon=row['lon'], method='nearest').lon.values
            pred = da_inter.loc[{'lat': lat, 'lon': lon}].values.item()
            true = row[var]
            predicted.append(pred)
            true_vals.append(true)
        predicted = np.array(predicted)
        true_vals = np.array(true_vals)
        ms_error = mean_squared_error(true_vals, predicted)
        print("MSE: {:.5f}".format(ms_error))
    if plot:
        import salem
        from salem import DataLevels, Map
        import cartopy.crs as ccrs
        # import cartopy.io.shapereader as shpreader
        import matplotlib.pyplot as plt
        # fname = gis_path / 'ne_10m_admin_0_sovereignty.shp'
        # fname = gis_path / 'gadm36_ISR_0.shp'
        # ax = plt.axes(projection=ccrs.PlateCarree())
        f, ax = plt.subplots(figsize=(6, 10))
        # shdf = salem.read_shapefile(salem.get_demo_file('world_borders.shp'))
        shdf = salem.read_shapefile(gis_path / 'Israel_and_Yosh.shp')
        # shdf = shdf.loc[shdf['CNTRY_NAME'] == 'Israel']  # remove other countries
        shdf.crs = {'init': 'epsg:4326'}
        dsr = da_inter.salem.roi(shape=shdf)
        grid = dsr.salem.grid
        grid = da_inter.salem.grid
        sm = Map(grid)
        # sm.set_shapefile(gis_path / 'Israel_and_Yosh.shp')
        # sm = dsr.salem.quick_map(ax=ax)
        #        sm2 = salem.Map(grid, factor=1)
        #        sm2.set_shapefile(gis_path/'gis_osm_water_a_free_1.shp',
        #                          edgecolor='k')
        sm.set_data(dsr)
        # sm.set_nlevels(7)
        # sm.visualize(ax=ax, title='Israel {} interpolated temperature from IMS'.format(method),
        #             cbar_title='degC')
        sm.set_shapefile(gis_path / 'gis_osm_water_a_free_1.shp',
                         edgecolor='k')  # , facecolor='aqua')
        # sm.set_topography(awd.values, crs=awd.crs)
        # sm.set_rgb(crs=shdf.crs, natural_earth='hr')  # ad
        # lakes = salem.read_shapefile(gis_path/'gis_osm_water_a_free_1.shp')
        sm.set_cmap(cm='rainbow')
        sm.visualize(
            ax=ax,
            title='Israel {} interpolated temperature from IMS'.format(method),
            cbar_title='degC')
        dl = DataLevels(geo_snap[var], levels=sm.levels)
        dl.set_cmap(sm.cmap)
        x, y = sm.grid.transform(geo_snap.lon.values, geo_snap.lat.values)
        ax.scatter(x,
                   y,
                   color=dl.to_rgb(),
                   s=20,
                   edgecolors='k',
                   linewidths=0.5)
        suptitle = time.replace('T', ' ')
        f.suptitle(suptitle, fontsize=14, fontweight='bold')
        if (rms is not None or cv is not None) and (not gridsearch):
            import seaborn as sns
            f, ax = plt.subplots(1, 2, figsize=(12, 6))
            sns.scatterplot(x=true_vals,
                            y=predicted,
                            ax=ax[0],
                            marker='.',
                            s=100)
            resid = predicted - true_vals
            sns.distplot(resid, bins=5, color='c', label='residuals', ax=ax[1])
            rmean = np.mean(resid)
            rstd = np.std(resid)
            rmedian = np.median(resid)
            rmse = np.sqrt(mean_squared_error(true_vals, predicted))
            plt.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
            _, max_ = plt.ylim()
            plt.text(rmean + rmean / 10, max_ - max_ / 10,
                     'Mean: {:.2f}, RMSE: {:.2f}'.format(rmean, rmse))
            f.tight_layout()
        # lakes.plot(ax=ax, color='b', edgecolor='k')
        # lake_borders = gpd.overlay(countries, capitals, how='difference')
        # adm1_shapes = list(shpreader.Reader(fname).geometries())
        # ax = plt.axes(projection=ccrs.PlateCarree())
        # ax.coastlines(resolution='10m')
        # ax.add_geometries(adm1_shapes, ccrs.PlateCarree(),
        #                  edgecolor='black', facecolor='gray', alpha=0.5)
        # da_inter.plot.pcolormesh('lon', 'lat', ax=ax)
        #geo_snap.plot(ax=ax, column=var, cmap='viridis', edgecolor='black',
        #              legend=False)
    return da_inter
Example #6
0
def geogrid_simulator(fpath, do_maps=True, map_kwargs=None):
    """Emulates geogrid.exe, which is useful when defining new WRF domains.

    Parameters
    ----------
    fpath: str
       path to a namelist.wps file
    do_maps: bool
       if you want the simulator to return you maps of the grids as well
    map_kwargs: dict
       kwargs to pass to salem.Map()

    Returns
    -------
    (grids, maps) with:
        - grids: a list of Grids corresponding to the domains
          defined in the namelist
        - maps: a list of maps corresponding to the grids (if do_maps==True)
    """

    with open(fpath) as f:
        lines = f.readlines()

    pargs = dict()
    for l in lines:
        s = l.split('=')
        if len(s) < 2:
            continue
        s0 = s[0].strip().upper()
        s1 = list(filter(None, s[1].strip().replace('\n', '').split(',')))

        if s0 == 'PARENT_ID':
            parent_id = [int(s) for s in s1]
        if s0 == 'PARENT_GRID_RATIO':
            parent_ratio = [int(s) for s in s1]
        if s0 == 'I_PARENT_START':
            i_parent_start = [int(s) for s in s1]
        if s0 == 'J_PARENT_START':
            j_parent_start = [int(s) for s in s1]
        if s0 == 'E_WE':
            e_we = [int(s) for s in s1]
        if s0 == 'E_SN':
            e_sn = [int(s) for s in s1]
        if s0 == 'DX':
            dx = float(s1[0])
        if s0 == 'DY':
            dy = float(s1[0])
        if s0 == 'MAP_PROJ':
            map_proj = s1[0].replace("'", '').strip().upper()
        if s0 == 'REF_LAT':
            pargs['lat_0'] = float(s1[0])
        if s0 == 'REF_LON':
            pargs['ref_lon'] = float(s1[0])
        if s0 == 'TRUELAT1':
            pargs['lat_1'] = float(s1[0])
        if s0 == 'TRUELAT2':
            pargs['lat_2'] = float(s1[0])
        if s0 == 'STAND_LON':
            pargs['lon_0'] = float(s1[0])

    # Sometimes files are not complete
    pargs.setdefault('lon_0', pargs['ref_lon'])

    # define projection
    if map_proj == 'LAMBERT':
        pwrf = '+proj=lcc +lat_1={lat_1} +lat_2={lat_2} ' \
               '+lat_0={lat_0} +lon_0={lon_0} ' \
               '+x_0=0 +y_0=0 +a=6370000 +b=6370000'
        pwrf = pwrf.format(**pargs)
    elif map_proj == 'MERCATOR':
        pwrf = '+proj=merc +lat_ts={lat_1} +lon_0={lon_0} ' \
               '+x_0=0 +y_0=0 +a=6370000 +b=6370000'
        pwrf = pwrf.format(**pargs)
    elif map_proj == 'POLAR':
        pwrf = '+proj=stere +lat_ts={lat_1} +lat_0=90.0 +lon_0={lon_0} ' \
               '+x_0=0 +y_0=0 +a=6370000 +b=6370000'
        pwrf = pwrf.format(**pargs)
    else:
        raise NotImplementedError('WRF proj not implemented yet: '
                                  '{}'.format(map_proj))
    pwrf = gis.check_crs(pwrf)

    # get easting and northings from dom center (probably unnecessary here)
    e, n = pyproj.transform(wgs84, pwrf, pargs['ref_lon'], pargs['lat_0'])

    # LL corner
    nx, ny = e_we[0] - 1, e_sn[0] - 1
    x0 = -(nx - 1) / 2. * dx + e  # -2 because of staggered grid
    y0 = -(ny - 1) / 2. * dy + n

    # parent grid
    grid = gis.Grid(nxny=(nx, ny), x0y0=(x0, y0), dxdy=(dx, dy), proj=pwrf)

    # child grids
    out = [grid]
    for ips, jps, pid, ratio, we, sn in zip(i_parent_start, j_parent_start,
                                            parent_id, parent_ratio, e_we,
                                            e_sn):
        if ips == 1:
            continue
        ips -= 1
        jps -= 1
        we -= 1
        sn -= 1
        nx = we / ratio
        ny = sn / ratio
        if nx != (we / ratio):
            raise RuntimeError('e_we and ratios are incompatible: '
                               '(e_we - 1) / ratio must be integer!')
        if ny != (sn / ratio):
            raise RuntimeError('e_sn and ratios are incompatible: '
                               '(e_sn - 1) / ratio must be integer!')

        prevgrid = out[pid - 1]
        xx, yy = prevgrid.corner_grid.x_coord, prevgrid.corner_grid.y_coord
        dx = prevgrid.dx / ratio
        dy = prevgrid.dy / ratio
        grid = gis.Grid(nxny=(we, sn),
                        x0y0=(xx[ips], yy[jps]),
                        dxdy=(dx, dy),
                        pixel_ref='corner',
                        proj=pwrf)
        out.append(grid.center_grid)

    maps = None
    if do_maps:
        from salem import Map
        import shapely.geometry as shpg

        if map_kwargs is None:
            map_kwargs = {}

        maps = []
        for i, g in enumerate(out):
            m = Map(g, **map_kwargs)

            for j in range(i + 1, len(out)):
                cg = out[j]
                left, right, bottom, top = cg.extent

                s = np.array([(left, bottom), (right, bottom), (right, top),
                              (left, top)])
                l1 = shpg.LinearRing(s)
                m.set_geometry(l1,
                               crs=cg.proj,
                               linewidth=(len(out) - j),
                               zorder=5)

            maps.append(m)

    return out, maps
Example #7
0
def geogrid_simulator(fpath, do_maps=True):
    """Emulates geogrid.exe, which is useful when defining new WRF domains.

    Parameters
    ----------
    fpath: str
       path to a namelist.wps file
    do_maps: bool
       if you want the simulator to return you maps of the grids as well

    Returns
    -------
    (grids, maps) with:
        - grids: a list of Grids corresponding to the domains
          defined in the namelist
        - maps: a list of maps corresponding to the grids (if do_maps==True)
    """

    with open(fpath) as f:
        lines = f.readlines()

    pargs = dict()
    for l in lines:
        s = l.split('=')
        if len(s) < 2:
            continue
        s0 = s[0].strip().upper()
        s1 = list(filter(None, s[1].strip().replace('\n', '').split(',')))

        if s0 == 'PARENT_ID':
            parent_id = [int(s) for s in s1]
        if s0 == 'PARENT_GRID_RATIO':
            parent_ratio = [int(s) for s in s1]
        if s0 == 'I_PARENT_START':
            i_parent_start = [int(s) for s in s1]
        if s0 == 'J_PARENT_START':
            j_parent_start = [int(s) for s in s1]
        if s0 == 'E_WE':
            e_we = [int(s) for s in s1]
        if s0 == 'E_SN':
            e_sn = [int(s) for s in s1]
        if s0 == 'DX':
            dx = float(s1[0])
        if s0 == 'DY':
            dy = float(s1[0])
        if s0 == 'MAP_PROJ':
            map_proj = s1[0].replace("'", '').strip().upper()
        if s0 == 'REF_LAT':
            pargs['lat_0'] = float(s1[0])
        if s0 == 'REF_LON':
            pargs['lon_0'] = float(s1[0])
        if s0 == 'TRUELAT1':
            pargs['lat_1'] = float(s1[0])
        if s0 == 'TRUELAT2':
            pargs['lat_2'] = float(s1[0])

    # define projection
    if map_proj == 'LAMBERT':
        pwrf = '+proj=lcc +lat_1={lat_1} +lat_2={lat_2} ' \
             '+lat_0={lat_0} +lon_0={lon_0} ' \
             '+x_0=0 +y_0=0 +a=6370000 +b=6370000'
        pwrf = pwrf.format(**pargs)
    elif map_proj == 'MERCATOR':
        pwrf = '+proj=merc +lat_ts={lat_1} ' \
             '+lon_0={lon_0} ' \
             '+x_0=0 +y_0=0 +a=6370000 +b=6370000'
        pwrf = pwrf.format(**pargs)
    else:
        raise NotImplementedError('WRF proj not implemented yet: '
                                  '{}'.format(map_proj))
    pwrf = gis.check_crs(pwrf)

    # get easting and northings from dom center (probably unnecessary here)
    e, n = pyproj.transform(wgs84, pwrf, pargs['lon_0'], pargs['lat_0'])

    # LL corner
    nx, ny = e_we[0]-1, e_sn[0]-1
    x0 = -(nx-1) / 2. * dx + e  # -2 because of staggered grid
    y0 = -(ny-1) / 2. * dy + n

    # parent grid
    grid = gis.Grid(nxny=(nx, ny), ll_corner=(x0, y0), dxdy=(dx, dy),
                    proj=pwrf)

    # child grids
    out = [grid]
    for ips, jps, pid, ratio, we, sn in zip(i_parent_start, j_parent_start,
                                            parent_id, parent_ratio,
                                            e_we, e_sn):
        if ips == 1:
            continue
        ips -= 1
        jps -= 1
        we -= 1
        sn -= 1
        nx = we / ratio
        ny = sn / ratio
        if nx != (we / ratio):
            raise RuntimeError('e_we and ratios are incompatible: '
                               '(e_we - 1) / ratio must be integer!')
        if ny != (sn / ratio):
            raise RuntimeError('e_sn and ratios are incompatible: '
                               '(e_sn - 1) / ratio must be integer!')

        prevgrid = out[pid - 1]
        xx, yy = prevgrid.corner_grid.x_coord, prevgrid.corner_grid.y_coord
        dx = prevgrid.dx / ratio
        dy = prevgrid.dy / ratio
        grid = gis.Grid(nxny=(we, sn),
                        ll_corner=(xx[ips], yy[jps]),
                        dxdy=(dx, dy),
                        pixel_ref='corner',
                        proj=pwrf)
        out.append(grid.center_grid)

    maps = None
    if do_maps:
        from salem import Map
        import shapely.geometry as shpg

        maps = []
        for i, g in enumerate(out):
            m = Map(g)

            for j in range(i+1, len(out)):
                cg = out[j]
                left, right, bottom, top = cg.extent

                s = np.array([(left, bottom), (right, bottom),
                              (right, top), (left, top)])
                l1 = shpg.LinearRing(s)
                m.set_geometry(l1, crs=cg.proj, linewidth=(len(out)-j))

            maps.append(m)

    return out, maps
Example #8
0
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))

# read the shapefile and use its extent to define a ideally sized map
shp = salem.read_shapefile(get_demo_file('rgi_kesselwand.shp'))
# I you need to do a lot of maps you might want
# to use an API key and set it here with key='YOUR_API_KEY'
g = GoogleVisibleMap(x=[shp.min_x, shp.max_x], y=[shp.min_y, shp.max_y],
                     maptype='satellite')  # try out also: 'terrain'

# the google static image is a standard rgb image
ggl_img = g.get_vardata()
ax1.imshow(ggl_img)
ax1.set_title('Google static map')

# make a map of the same size as the image (no country borders)
sm = Map(g.grid, factor=1, countries=False)
sm.set_shapefile(shp)  # add the glacier outlines
sm.set_rgb(ggl_img)  # add the background rgb image
sm.visualize(ax=ax2)  # plot it
ax2.set_title('GPR measurements')

# read the point GPR data and add them to the plot
df = pd.read_csv(get_demo_file('gtd_ttt_kesselwand.csv'))
dl = DataLevels(df.THICKNESS, levels=np.arange(10, 201, 10), extend='both')
x, y = sm.grid.transform(df.POINT_LON.values, df.POINT_LAT.values)
ax2.scatter(x, y, color=dl.to_rgb(), s=50, edgecolors='k', linewidths=1)
dl.append_colorbar(ax2, label='Ice thickness (m)')

# make it nice
plt.tight_layout()
plt.show()
Example #9
0
g, maps = geogrid_simulator(fpath)
print(g)
print('ss')
print(maps[0])
maps[0].set_rgb(natural_earth='lr')
maps[0].visualize(title='Anidamiento Qollpana Marzo 2018', addcbar=False)
plt.savefig('QollpanaMarzo2018.png', dpi=300)

plt.show()
plt.close()
#ALBBCK
ds = open_xr_dataset(
    "/home/opti3040a/Documentos/WRF15-08-19/wrfout_d01_2018-03-05_00:00:00")
grid = mercator_grid(center_ll=(-60.0, -17.9), extent=(75e5, 75e5))
smap = Map(grid, nx=500)
# VAR_SSO
smap.set_data(ds.LANDMASK)
smap.visualize()
plt.show()

# # get the data at the latest time step
# ds = salem.open_wrf_dataset("/home/opti3040a/Documentos/WRF_1s/WRF_OUT/wrfout_d03_2018-06-01_09:00:00").isel(time=-1)
# print(ds.WS)
# # get the wind data at 10000 m a.s.l.
# u = ds.salem.wrf_zlevel('U', 2800.)
# v = ds.salem.wrf_zlevel('V', 2800.)
# ws = ds.salem.wrf_zlevel('WS', 2800.)

# # get the axes ready
# f, ax = plt.subplots()
Example #10
0
# read the shapefile and use its extent to define a ideally sized map
shp = salem.read_shapefile(get_demo_file('rgi_kesselwand.shp'))
# I you need to do a lot of maps you might want
# to use an API key and set it here with key='YOUR_API_KEY'
g = GoogleVisibleMap(x=[shp.min_x, shp.max_x],
                     y=[shp.min_y, shp.max_y],
                     maptype='satellite')  # try out also: 'terrain'

# the google static image is a standard rgb image
ggl_img = g.get_vardata()
ax1.imshow(ggl_img)
ax1.set_title('Google static map')

# make a map of the same size as the image (no country borders)
sm = Map(g.grid, factor=1, countries=False)
sm.set_shapefile(shp)  # add the glacier outlines
sm.set_rgb(ggl_img)  # add the background rgb image
sm.set_scale_bar(location=(0.88, 0.94))  # add scale
sm.visualize(ax=ax2)  # plot it
ax2.set_title('GPR measurements')

# read the point GPR data and add them to the plot
df = pd.read_csv(get_demo_file('gtd_ttt_kesselwand.csv'))
dl = DataLevels(df.THICKNESS, levels=np.arange(10, 201, 10), extend='both')
x, y = sm.grid.transform(df.POINT_LON.values, df.POINT_LAT.values)
ax2.scatter(x, y, color=dl.to_rgb(), s=50, edgecolors='k', linewidths=1)
dl.append_colorbar(ax2, label='Ice thickness (m)')

# make it nice
plt.tight_layout()
Example #11
0
Topographic shading
===================

Add topographic shading to a plot

"""

from salem import mercator_grid, Map, get_demo_file
import matplotlib.pyplot as plt

# prepare the figure
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 7))

# map extent
grid = mercator_grid(center_ll=(10.76, 46.79), extent=(18000, 14000))
sm = Map(grid, countries=False)
sm.set_lonlat_contours(interval=0)
sm.set_scale_bar()

# add topography
fpath = get_demo_file('hef_srtm.tif')
sm.set_topography(fpath)
sm.visualize(ax=ax1, addcbar=False, title='relief_factor=0.7 (default)')

# stronger shading
sm.set_topography(fpath, relief_factor=1.4)
sm.visualize(ax=ax2, addcbar=False, title='relief_factor=1.4')

# add color shading
z = sm.set_topography(fpath)
sm.set_data(z)
Example #12
0
    def spatial_bin_plot(self,
                         category,
                         quantity,
                         bin_step=20,
                         color='viridis'):
        # scale Dimension
        scaleDim = 5

        # Binning base on spatial
        data = self.data
        # filter the data by category
        data = data[data[self.ccategorical].isin(category)]
        # This maximum constant is what we can get from
        # the google map static image
        # greater or lower than these can produce error
        maxconst = (-86.82743293, 86.92841107, -176.1111116, 176.4292565)
        minlat = data[self.clatitude].min(
        ) if data[self.clatitude].min() > maxconst[0] else maxconst[0]
        maxlat = data[self.clatitude].max(
        ) if data[self.clatitude].max() < maxconst[1] else maxconst[1]
        minlong = data[self.clongitude].min(
        ) if data[self.clongitude].min() > maxconst[2] else maxconst[2]
        maxlong = data[self.clongitude].max(
        ) if data[self.clongitude].max() < maxconst[3] else maxconst[3]
        #print(minlat,maxlat,minlong,maxlong)
        g = GoogleVisibleMap(
            x=[minlong, maxlong], y=[minlat, maxlat],
            maptype='terrain')  # satellitetry out also: 'terrain'

        # the google static image is a standard rgb image
        ggl_img = g.get_vardata()
        #ax.imshow(ggl_img)
        # make a map of the same size as the image (no country borders)
        sm = Map(g.grid, factor=1, countries=False)
        sm.set_rgb(ggl_img)  # add the background rgb image

        #print(minlat,maxlat,minlong,maxlong)
        # make range for Latitude
        # set step
        xstep = bin_step
        ystep = bin_step
        latBin = np.linspace(data[self.clatitude].min(),
                             data[self.clatitude].max(), xstep)
        longBin = np.linspace(data[self.clongitude].min(),
                              data[self.clongitude].max(), ystep)
        #logger.debug(latBin)
        #print(longBin)

        quantBinArr = []
        quantmeanArr = []
        quantsumArr = []

        latStepLen = latBin[1] - latBin[0]
        longStepLen = longBin[1] - longBin[0]

        #treesWithoutVacant = trees.filter_ne('Tree Species','vacant site large')
        for x in range(latBin.size):
            if (x < latBin.size - 1):
                latSelMin = latBin[x] if latBin[x] < latBin[x +
                                                            1] else latBin[x +
                                                                           1]
                latSelMax = latBin[x] if latBin[x] > latBin[x +
                                                            1] else latBin[x +
                                                                           1]

                #latData = data[(data['latitude']>latBin[x])&(data['latitude']<=latBin[x+1])]
                latData = data[(data[self.clatitude] > latSelMin)
                               & (data[self.clatitude] <= latSelMax)]
                latmean = latSelMin + (latStepLen / 2)
                for y in range(longBin.size):
                    if (y < longBin.size - 1):
                        lonSelMin = longBin[y] if longBin[y] < longBin[
                            y + 1] else longBin[y + 1]
                        lonSelMax = longBin[y] if longBin[y] > longBin[
                            y + 1] else longBin[y + 1]

                        #print(lonSelMin,lonSelMax)
                        #print((latData['longitude']>lonSelMin)&(latData['longitude']<=lonSelMax))

                        #areaData = latData[(latData['longitude']>longBin[y])&latData['longitude']<longBin[y+1]]
                        areaData = latData[
                            (latData[self.clongitude] > lonSelMin)
                            & (latData[self.clongitude] <= lonSelMax)]
                        #print(areaData.shape)
                        # group the areaData by category to get the mean and sum category
                        meanCat = areaData.groupby(
                            self.ccategorical)[quantity].mean().sort_values(
                                ascending=False)
                        sumCat = areaData.groupby(
                            self.ccategorical)[quantity].sum().sort_values(
                                ascending=False)
                        """
                        # get mean for the quantity area bin
                        quantmean = areaData[quantity].mean()
                        quantsum = areaData[quantity].sum()
                        """
                        if areaData.shape[0] > 0:
                            longmean = lonSelMin + (longStepLen / 2)
                            quantmeanArr.append(meanCat.max())
                            quantsumArr.append(sumCat.max())
                            #print(meanCat)
                            #print(sumCat)
                            quantBinArr.append({
                                'lat': latmean,
                                'long': longmean,
                                'mean': meanCat,
                                'sum': sumCat
                            })
                            """
                            quantmeanArr.append(quantmean)
                            quantsumArr.append(quantsum)
                            longmean = (longBin[y]+longBin[y+1])/2
                            quantBinArr.append({'lat': latmean, 'long': longmean, 'quantmean': quantmean, 'quantsum': quantsum})
                            """

        dataFig = plt.figure(figsize=(15, 15))
        loc_ax = dataFig.add_subplot(1, 1, 1)
        sm.visualize(ax=loc_ax)  # plot it

        #    loc_ax.set_title('Distribution of Most Common Trees accross Spatial Binning: {}x{} square'.format(xstep,ystep))
        loc_ax.set_xlabel('Longitude')
        loc_ax.set_ylabel('Latitude')

        minMean = np.array(quantmeanArr).min()
        maxMean = np.array(quantmeanArr).max()
        # calculate the scale
        # we scale it using 8 level
        scale = (maxMean - minMean) / scaleDim

        #define color representation for each category
        cm = plt.get_cmap(color)
        colorArr = {}
        norm = mpl.colors.Normalize(vmin=0, vmax=len(category))

        patch_array = []
        for i in range(len(category)):
            color = cm(norm(i))
            colorArr[category[i]] = color
            patch_array.append(
                mpl.patches.Patch(color=color, label=category[i]))

        for quantBin in quantBinArr:
            x, y = sm.grid.transform(quantBin['long'], quantBin['lat'])
            scatter = loc_ax.scatter(x,
                                     y,
                                     s=(quantBin['mean'].values[0] / scale) *
                                     (longStepLen / 2) * scaleDim,
                                     c=colorArr[quantBin['mean'].index[0]],
                                     alpha=.75,
                                     edgecolors='none')
            #tooltip = plugins.PointHTMLTooltip(scatter, ['test'])
            #plugins.connect(dataFig, tooltip)

        scale_array = []
        scale_label = []
        # Make scale legend
        for i in range(scaleDim):
            #patch_array.append(mpl.patches.Patch(color='none',label=i,))
            label = '{0:.2f} < x <= {1:.2f}'.format(
                minMean + (scale * i), minMean + (scale * (i + 1)))
            scatter = plt.scatter([], [],
                                  s=(i + 1) * (longStepLen / 2) * scaleDim,
                                  marker='o',
                                  label=label,
                                  color='grey')
            #scatter = plt.plot([],[],markersize=(i+1)/scaleDim,marker='o',label=label)
            scale_array.append(scatter)
            scale_label.append(label)
            #patch_array.append(scatter.get_patches())
            #patch_array.append(mpl.lines.Line2D([],[],markersize=(i+1)/scaleDim,marker='o',label=label))

        # Legend and Title
        #legend2 = mpl.pyplot.legend(handles=scale_array, loc=1)

        legend2 = mpl.pyplot.legend(scale_array,
                                    scale_label,
                                    scatterpoints=1,
                                    loc='upper right',
                                    ncol=1,
                                    bbox_to_anchor=(1, 1)
                                    #,fontsize=8
                                    )

        #[ patch_array.append(x) for x in legend2.get_patches() ]

        #legend1 = mpl.pyplot.legend(handles=patch_array, loc=4,bbox_to_anchor=(1, 0.5))
        loc_ax.legend(handles=patch_array,
                      loc='center left',
                      bbox_to_anchor=(1, 0.5))

        mpl.pyplot.gca().add_artist(legend2)

        loc_ax.set_title('Quantity {} across Spatial Bining'.format(quantity))

        #mpld3.enable_notebook()

        return None
Example #13
0
Topographic shading
===================

Add topographic shading to a plot

"""

from salem import mercator_grid, Map, get_demo_file
import matplotlib.pyplot as plt

# prepare the figure
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 7))

# map extent
grid = mercator_grid(center_ll=(10.76, 46.79), extent=(18000, 14000))
sm = Map(grid, countries=False)
sm.set_lonlat_contours(interval=0)

# add topography
fpath = get_demo_file('hef_srtm.tif')
sm.set_topography(fpath)
sm.visualize(ax=ax1, addcbar=False, title='relief_factor=0.7 (default)')

# stronger shading
sm.set_topography(fpath, relief_factor=1.4)
sm.visualize(ax=ax2, addcbar=False, title='relief_factor=1.4')

# add color shading
z = sm.set_topography(fpath)
sm.set_data(z)
sm.set_cmap('topo')