def plt_spatial_seasonal_mean(variable, variable_id, add_colorbar=None, title=None):
    fig, axsm = plt.subplots(
        2, 2, figsize=[10, 7], subplot_kw={"projection": ccrs.PlateCarree()}
    )
    fig.suptitle(title, fontsize=16, fontweight="bold")
    axs = axsm.flatten()
    for ax, i in zip(axs, variable.season):
        im = variable.sel(season=i,).plot.contourf(
            ax=ax,
            transform=ccrs.PlateCarree(),
            cmap=cm.devon_r,
            robust=True,
            vmin=plt_dict[variable_id][plt_dict["header"].index("vmin")],
            vmax=plt_dict[variable_id][plt_dict["header"].index("vmax")],
            levels=plt_dict[variable_id][plt_dict["header"].index("levels")],
            extend="max",
            add_colorbar=add_colorbar,
        )

        ax.coastlines()
        gl = ax.gridlines()
        ax.add_feature(cy.feature.BORDERS)
        gl.top_labels = False
        ax.set_title("season: {}".format(i.values))

    plt.tight_layout()
    fig.subplots_adjust(top=0.88)

    return (
        fig,
        axs,
        im,
    )
    def process(self, channel_filter):
        ch = self.converter.convert_to(self.channels)

        ones_matrix = np.ones(ch[0].shape, dtype=np.float64)

        ch_separated = [[
            ch[i] if j == i else ones_matrix * channel_filter[i][j]
            for j in range(self.converter.pixelspace)
        ] for i in range(self.converter.pixelspace)]

        ch_converted_separated = [
            np.array(self.converter.convert_from(c), dtype=np.float64)
            for c in ch_separated
        ]

        fig, axs = plt.subplots(2, 2)
        (ax1, ax2), (ax3, ax4) = axs
        ax = [ax1, ax2, ax3]

        for i, ch_conv in enumerate(ch_converted_separated):
            ax[i].axis('off')
            self.show_image(ax[i], ch_conv)
            extent = ax[i].get_window_extent().transformed(
                fig.dpi_scale_trans.inverted())
            plt.savefig("img" + str(i) + str(self.text) + ".png",
                        dpi=1000,
                        transparent=True,
                        bbox_inches=extent)
예제 #3
0
def plt_twodhist_season(x,
                        y,
                        starty,
                        endy,
                        bins=None,
                        cmap=None,
                        range=None,
                        norm=None):
    """ Plots a two-dimensional histogram of variable x and y.
    
    Parameters:
    -----------
    x           : variable on x-axis
    y           : variable on y-axis
    starty      : string of analysis begin
    endy        : string of analysis end
    bisn        : None or int or [int, int] or array-like or [array, array]
    cmap        : Colormap or str
    range       : array-like shape(2, 2), optional,
    norm        : Normalize, optional
    """

    f, axs = plt.subplots(2, 2, figsize=[10, 10], sharex=True, sharey=True)

    for sea, ax in zip(
            x.groupby('time.season').sum('time').season, axs.flatten()):

        _pp = np.asarray(x.sel(time=(x['time.season'] == sea), ))
        _cl = np.asarray(y.sel(time=(y['time.season'] == sea), ))
        _p = _pp[~np.isnan(_pp)]
        _c = _cl[~np.isnan(_pp)]

        _p = _p[~np.isnan(_c)]
        _c = _c[~np.isnan(_c)]

        counts, xedges, yedges, im = ax.hist2d(
            _p,  #p.flatten(), # use .flatten to pass a (N,) shape array as requested by hist2d
            _c,  #l.flatten(), 
            #bins=(20,50),
            bins=bins,
            density=False,
            #     density = True,  # If False, the default, returns the number of samples in each bin. If True, returns the probability density function at the bin, bin_count / sample_count / bin_area.
            cmap=cmap,
            range=range,
            cmin=0.5,
            norm=norm)
        ax.set_title(sea.values)
        ax.set_xlabel('Precipitation')
        ax.set_ylabel('Mass Fraction of Cloud Liquid + Ice Water')

    f.subplots_adjust(top=.85, right=0.8)
    f.suptitle('2D Histogram ' + starty + '-' + endy, fontweight='bold')
    #f.suptitle("Precipitation vs. Cloud Mass")
    cbar_ax = f.add_axes([1.01, 0.15, 0.025, 0.7])
    cbar = f.colorbar(im, cax=cbar_ax)
    cbar.ax.set_ylabel('Counts')
    ax.ticklabel_format(style='sci', axis='both', scilimits=(1, 0))

    plt.tight_layout()
예제 #4
0
def custom_plot(x, y):
    fig, ax = plt.subplots(figsize=(6, 4))
    ax.plot(x, y)
    ax.set_title('Example Plot')
    ax.set_ylabel('y label')
    ax.set_xlabel('x label')
    ax.grid()
    return ax
def plt_scatter_iwp_sf_seasonal(
    ds, linreg, iteration, step, title=None, xlim=None, ylim=None
):

    fig, axsm = plt.subplots(
        2,
        2,
        figsize=[10, 7],
        sharex=True,
        sharey=True,
    )
    fig.suptitle(title, fontsize=16, fontweight="bold")

    axs = axsm.flatten()
    for ax, i in zip(axs, ds.season):
        ax.grid()
        for _lat, c in zip(iteration, cm.romaO(range(0, 256, int(256 / 4)))):
            # plot scatter
            ax.scatter(
                ds["iwp_{}_{}".format(_lat, _lat + step)].sel(season=i),
                ds["sf_{}_{}".format(_lat, _lat + step)].sel(season=i),
                label="{}, {}".format(_lat, _lat + step),
                color=c,
                alpha=0.5,
            )

            # plot regression line
            y = (
                np.linspace(0, 350)
                * linreg["slope_{}_{}".format(_lat, _lat + step)].sel(season=i).values
                + linreg["intercept_{}_{}".format(_lat, _lat + step)]
                .sel(season=i)
                .values
            )

            ax.plot(np.linspace(0, 350), y, color=c, linewidth="2")

        ax.set_ylabel("Snowfall (mm$\,$day$^{-1}$)", fontweight="bold")
        ax.set_xlabel("Ice Water Path (g$\,$m$^{-2}$)", fontweight="bold")
        ax.set_title(
            "season: {}; lat: ({}, {})".format(
                i.values, iteration[0], iteration[-1] + step
            )
        )
        ax.set_xlim(xlim)
        ax.set_ylim(ylim)

    axs[1].legend(
        loc="upper left",
        bbox_to_anchor=(1, 1),
        fontsize="small",
        fancybox=True,
    )

    plt.tight_layout()
def plt_diff_seasonal(
    true_var, estimated_var, cbar_label, vmin=None, vmax=None, levels=None, title=None
):

    fig, axsm = plt.subplots(
        2, 2, figsize=[10, 7], subplot_kw={"projection": ccrs.PlateCarree()}
    )
    fig.suptitle(title, fontsize=16, fontweight="bold")

    axs = axsm.flatten()
    for ax, i in zip(axs, true_var.season):
        im = (
            (true_var - estimated_var)
            .sel(season=i)
            .plot.contourf(
                ax=ax,
                transform=ccrs.PlateCarree(),
                cmap=cm.bam,
                robust=True,
                add_colorbar=False,
                extend="both",
                vmin=vmin,
                vmax=vmax,
                levels=levels,
            )
        )
        # Plot cosmetics
        ax.coastlines()
        gl = ax.gridlines()
        ax.add_feature(cy.feature.BORDERS)
        gl.top_labels = False
        ax.set_title("season: {}".format(i.values))

    # colorbar
    fig.subplots_adjust(right=0.8)
    cbar_ax = fig.add_axes([1, 0.15, 0.025, 0.7])
    cb = fig.colorbar(im, cax=cbar_ax, orientation="vertical", fraction=0.046, pad=0.04)
    # set cbar label
    cb.set_label(label=cbar_label, weight="bold")

    plt.tight_layout()
    fig.subplots_adjust(top=1)

    return axs
def plt_zonal_seasonal(variable_model, title=None, label=None):
    fig, axsm = plt.subplots(
        2,
        2,
        figsize=[10, 7],
        sharex=True,
        sharey=True,
    )

    fig.suptitle(title, fontsize=16, fontweight="bold")

    axs = axsm.flatten()
    for ax, i in zip(axs, variable_model.season):
        for k, c in zip(
            variable_model.model.values,
            cm.romaO(range(0, 256, int(256 / len(variable_model.model.values)))),
        ):
            variable_model.sel(season=i, model=k).plot(
                ax=ax,
                label=k,
                color=c,
            )

        ax.set_ylabel(label, fontweight="bold")
        ax.grid()
        ax.set_title("season: {}".format(i.values))

    axs[1].legend(
        loc="upper left",
        bbox_to_anchor=(1.0, 1),
        title=label,
        fontsize="small",
        fancybox=True,
    )

    return axs
예제 #8
0
def plot_bc_obsvsmodel(_summitbc,_alertbc):
    ## IPSL-CM6A-LR
    path = '~/shared-cmip6-for-ns1000k/historical/IPSL-CM6A-LR/r1i1p1f1/loadbc_Eday_IPSL-CM6A-LR_historical_r1i1p1f1_gr_18500101-20141231.nc'
    ds_ipsl = xr.open_dataset(path)
    ## CESM2
    path = '~/shared-cmip6-for-ns1000k/historical/CESM2/r1i1p1f1/loadbc_Eday_CESM2_historical_r1i1p1f1_gn_20100101-20150101.nc'
    ds_cesm = xr.open_dataset(path)
    ## CESM2-WACCM
    path = '~/shared-cmip6-for-ns1000k/historical/CESM2-WACCM/r1i1p1f1/loadbc_Eday_CESM2-WACCM_historical_r1i1p1f1_gn_20100101-20150101.nc'
    ds_waccm = xr.open_dataset(path)
    ## GFDL-CM4
    path = '~/shared-cmip6-for-ns1000k/historical/GFDL-CM4/r1i1p1f1/loadbc_Eday_GFDL-CM4_historical_r1i1p1f1_gr2_20100101-20141231.nc'
    ds_gfdl = xr.open_dataset(path)
    
    ## Define some variables to make it easier to call
    lat = 'lat'
    lon = 'lon'
    time = 'time'
    bc = 'loadbc' #black carbon loading
    mon = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'] #make a string of months to plot against
    
    ## Define geolocations for the two stations
    ## Summit
    sumlat = 72.58
    sumlon = -38.48
    ## Alert
    alelat = 82.5
    alelon = -62.34
    
    ## Select the two stations' data from the models (squeeze is used to reduce dimensions)
    ## For Summit
    ##Select grid box for the location
    sum_cesm = ds_cesm.sel(lat=sumlat,lon=sumlon,method='nearest').squeeze()
    sum_waccm = ds_waccm.sel(lat=sumlat,lon=sumlon,method='nearest').squeeze()
    sum_ipsl = ds_ipsl.sel(lat=sumlat,lon=sumlon,method='nearest').squeeze()
    sum_gfdl = ds_gfdl.sel(lat=sumlat,lon=sumlon,method='nearest').squeeze()
    ##Select the range 2010-2014
    sum_cesm = sum_cesm.sel(time=slice('2010-01-01','2014-12-31'))
    sum_waccm = sum_waccm.sel(time=slice('2010-01-01','2014-12-31'))
    sum_ipsl = sum_ipsl.sel(time=slice('2010-01-01','2014-12-31'))
    sum_gfdl = sum_gfdl.sel(time=slice('2010-01-01','2014-12-31'))
    ##Calculate the mean and standard deviation
    sum_cesm_mean = sum_cesm.groupby('time.month').mean()
    sum_cesm_std = sum_cesm.groupby('time.month').std()
    sum_waccm_mean = sum_waccm.groupby('time.month').mean()
    sum_waccm_std = sum_waccm.groupby('time.month').std()
    sum_ipsl_mean = sum_ipsl.groupby('time.month').mean()
    sum_ipsl_std = sum_ipsl.groupby('time.month').std()
    sum_gfdl_mean = sum_gfdl.groupby('time.month').mean()
    sum_gfdl_std = sum_gfdl.groupby('time.month').std()
    
    ## For Alert
    ##Select grid box for the location
    al_cesm = ds_cesm.sel(lat=alelat,lon=alelon,method='nearest').squeeze()
    al_waccm = ds_waccm.sel(lat=alelat,lon=alelon,method='nearest').squeeze()
    al_ipsl = ds_ipsl.sel(lat=alelat,lon=alelon,method='nearest').squeeze()
    al_gfdl = ds_gfdl.sel(lat=alelat,lon=alelon,method='nearest').squeeze()
    ##Select the range 2010-2014
    al_cesm = al_cesm.sel(time=slice('2010-01-01','2014-12-31'))
    al_waccm = al_waccm.sel(time=slice('2010-01-01','2014-12-31'))
    al_ipsl = al_ipsl.sel(time=slice('2010-01-01','2014-12-31'))
    al_gfdl = al_gfdl.sel(time=slice('2010-01-01','2014-12-31'))
    ##Calculate the mean and standard deviation
    al_cesm_mean = al_cesm.groupby('time.month').mean()
    al_cesm_std = al_cesm.groupby('time.month').std()
    al_waccm_mean = al_waccm.groupby('time.month').mean()
    al_waccm_std = al_waccm.groupby('time.month').std()
    al_ipsl_mean = al_ipsl.groupby('time.month').mean()
    al_ipsl_std = al_ipsl.groupby('time.month').std()
    al_gfdl_mean = al_gfdl.groupby('time.month').mean()
    al_gfdl_std = al_gfdl.groupby('time.month').std()   
    
    ## Plot the annual cycles together, two axes, with standard deviation bars for model data
    f, (ax1, ax2) = plt.subplots(1, 2, sharey=True,figsize=(16,5))

    ax1.plot(mon, _summitbc, linestyle='--', color='black', label='EBAS observed')
    ax1.set_ylabel('equivalent black carbon \n mass concentration [$\mu g/m^3$]',fontsize=14)
    ax12 = ax1.twinx()
    ax12.errorbar(mon,sum_cesm_mean[bc]*1e9,sum_cesm_std[bc]*1e9,label='CESM2',capsize=5)
    ax12.errorbar(mon,sum_waccm_mean[bc]*1e9,sum_waccm_std[bc]*1e9,label='CESM2-WACCM',capsize=5)
    ax12.errorbar(mon,sum_ipsl_mean[bc]*1e9,sum_ipsl_std[bc]*1e9,label='IPSL-CM6A-LR',capsize=5)
    ax12.errorbar(mon,sum_gfdl_mean[bc]*1e9,sum_gfdl_std[bc]*1e9,label='GFDL-CM4',capsize=5)
    ax12.set_ylabel('black carbon loading [$\mu g/m^2$]',fontsize=14)
    ax1.set_title('Summit, 2010 ($72.58^{\circ}$ N, $38.48^{\circ}$ W)', fontsize=15)
    
    ax2.plot(mon, _alertbc, linestyle='--', color='black', label='EBAS observed')
    ax2.set_ylabel('equivalent black carbon \n mass concentration [$\mu g/m^3$]',fontsize=14)
    ax22 = ax2.twinx()
    ax22.errorbar(mon,al_cesm_mean[bc]*1e9,al_cesm_std[bc]*1e9,label='CESM2',capsize=5)
    ax22.errorbar(mon,al_waccm_mean[bc]*1e9,al_waccm_std[bc]*1e9,label='CESM2-WACCM',capsize=5)
    ax22.errorbar(mon,al_ipsl_mean[bc]*1e9,al_ipsl_std[bc]*1e9,label='IPSL-CM6A-LR',capsize=5)
    ax22.errorbar(mon,al_gfdl_mean[bc]*1e9,al_gfdl_std[bc]*1e9,label='GFDL-CM4',capsize=5)
    ax22.set_ylabel('black carbon loading [$\mu g/m^2$]',fontsize=14)
    ax22.legend(bbox_to_anchor=(-0.14, 0.6))
    ax2.legend(bbox_to_anchor=(-0.145, 0.7))
    ax2.set_title('Alert, 2011-2012 ($82.50^{\circ}$ N, $62.34^{\circ}$ W)', fontsize=15)
    
    f.tight_layout()
예제 #9
0
# %%
plt.plot([1, 2], [1, 2])

# %% [markdown]
# ## Scripts and Functions

# %% [markdown]
# link to
# [script](04-interaction.py)
# (python) version of this notebook
# (you need jupytext extension for it to work)

# %%
x = [1, 2, 3, 4, 5]
y = [1, 2, 3, 2, 1]
fig, ax = plt.subplots(figsize=(6, 4))
ax.plot(x, y)
ax.set_title('Example Plot')
ax.set_ylabel('y label')
ax.set_xlabel('x label')
ax.grid()

# %% [markdown]
# lets write a function for the code above


# %%
def custom_plot(x, y):
    fig, ax = plt.subplots(figsize=(6, 4))
    ax.plot(x, y)
    ax.set_title('Example Plot')
예제 #10
0
def sp_map(*nrs, projection=ccrs.PlateCarree(), **kwargs):
    """This creates a plot in PlateCarree"""

    return plt.subplots(*nrs, subplot_kw={'projection': projection}, **kwargs)
# 3. Repeat 1&2 for a realization of Gaussian white noise usingrandn that is set to have the same variance as your data. Inwhat ways, if any, is your data non-Gaussian?
# 4. Experiment with filtering your data and vfilt. Plot the data,the filtered version, and the residual (original minus filtered) fora few choices of filter length. What choice seems most suitablefor your data and why? Note if your data doesn't have noise ormultiple scales of varability, try working with {this one}.
# 5. Re-do the steps 1&2 involving the time-domain statistics, butusing firstly the smoothed, and secondly the residual, versionsof your data. How do the statistics change dependent upon thelowpass or highpass filtering? How do you interpret this?

starty = '1985'; endy = '2014'

_clw = fn['clw'].clw.sel(time = slice(starty + '-01', endy + '-12')).sum('lev', keep_attrs= True)

_clw = _clw.where(_clw != 0)              # ignore all values = 0.
_clw = _clw.dropna('time', how = 'all')   # ignore nan
_clw.to_dataframe().describe()

# ### Compute and plot the histogram of your data.

# +
f, ax = plt.subplots(1,1, figsize=[8,5], )

_clw.to_dataframe().hist(ax = ax, bins=11)
ax.set_yscale('log')                      
# -

# There are many values around zero so I decided for a log scale. This histogram shows the distribution of _CLW_ in 30 years for each model grid point. For cloud water, cloud ice and precipitation the distributions usually look like this, many values around zero and not so many values at higher values. The occurance of high _CLW_ is less likely than low values.

# ### Mean, Variance, Skewness, Kurtosis
# Compute the sample mean, variance, skewness, and kurtosis using Matlab's mean and std functions. 
# I chose to compute those values along the _time_ axis.

fct.plt_mean_std_var_skew_kurt(_clw)

# From the mean plot we see that most of the _CLW_ occurs in the Southern Ocean, over China, west of the Andes and some spots in the Arctic such as Labrador Sea and around Svalbard. We need to check if that is true. For this I would compare it to some reanalysis data such as ERA5.
# The standard deviation gives me information about the variation in my data set. We see that most of the variation occurs all over the Arctic and west of the Andes and China. The same is true for the variance.