# In[9]:


# plot so we are sure this is going right
ocnmask = da_sic.isel(time=-30).notnull()  # take a value near the end when not likely to have missing values
ocnmask.name = 'oceanmask'
#print(ocnmask)

PlotTest = False
if PlotTest:
    tmpsic=da_sic.isel(time=30) # save one time at random for plot verification
    #tmpsic=da_sic.mean('time')
    #print(tmpsic)

    # plot one time at random to ensure it is about right Nplots has to be one more than you'd think
    (f, axes) = ice_plot.multi_polar_axis(ncols=2, nrows=1, Nplots = 3, sizefcter=3)
    tmpsic.plot.pcolormesh(cmap='Reds',ax=axes[0], x='lon', y='lat',transform=ccrs.PlateCarree())
    ocnmask.plot.pcolormesh(cmap='Reds',ax=axes[1], x='lon', y='lat',transform=ccrs.PlateCarree())


# # Climatology forecast

# In[ ]:


TestPlot = False
if TestPlot:

    # equal to code in mertics.py put here for testing
    from scipy import stats
    import statsmodels.api as sm
Example #2
0
def Update_PanArctic_Maps():
    # Plotting Info
    runType = 'forecast'
    variables = ['sic']
    metrics_all = {'sic': ['anomaly', 'mean', 'SIP'], 'hi': ['mean']}
    updateAll = False
    # Exclude some models
    MME_NO = ['hcmr']

    # Define Init Periods here, spaced by 7 days (aprox a week)
    # Now
    cd = datetime.datetime.now()
    cd = datetime.datetime(cd.year, cd.month, cd.day)  # Set hour min sec to 0.
    # Hardcoded start date (makes incremental weeks always the same)
    start_t = datetime.datetime(1950, 1, 1)  # datetime.datetime(1950, 1, 1)
    # Params for this plot
    Ndays = 7  # time period to aggregate maps to (default is 7)
    Npers = 5  # 5 number of periods to plot (from current date) (default is 14)
    NweeksUpdate = 3  # 3 Always update the most recent NweeksUpdate periods
    init_slice = np.arange(
        start_t, cd, datetime.timedelta(days=Ndays)).astype('datetime64[ns]')
    init_slice = init_slice[
        -Npers:]  # Select only the last Npers of periods (weeks) since current date

    # Forecast times to plot
    weeks = pd.to_timedelta(np.arange(0, 5, 1), unit='W')
    months = pd.to_timedelta(np.arange(2, 12, 1), unit='M')
    years = pd.to_timedelta(np.arange(1, 2), unit='Y') - np.timedelta64(
        1, 'D')  # need 364 not 365
    slices = weeks.union(months).union(years).round('1d')
    da_slices = xr.DataArray(slices, dims=('fore_time'))
    da_slices.fore_time.values.astype('timedelta64[D]')
    print(da_slices)

    # Help conversion between "week/month" period used for figure naming and the actual forecast time delta value
    int_2_days_dict = dict(zip(np.arange(0, da_slices.size), da_slices.values))
    days_2_int_dict = {v: k for k, v in int_2_days_dict.items()}

    #############################################################
    # Load in Data
    #############################################################

    E = ed.EsioData.load()

    # add missing info for climatology
    E.model_color['climatology'] = (0, 0, 0)
    E.model_linestyle['climatology'] = '--'
    E.model_marker['climatology'] = '*'
    E.model['climatology'] = {'model_label': 'Clim. Trend'}
    E.icePredicted['climatology'] = True

    mod_dir = E.model_dir

    # Get median ice edge by DOY
    median_ice_fill = xr.open_mfdataset(
        os.path.join(E.obs_dir, 'NSIDC_0051', 'agg_nc', 'ice_edge.nc')).sic
    # Get mean sic by DOY
    mean_1980_2010_sic = xr.open_dataset(
        os.path.join(E.obs_dir, 'NSIDC_0051', 'agg_nc',
                     'mean_1980_2010_sic.nc')).sic
    # Get average sip by DOY
    mean_1980_2010_SIP = xr.open_dataset(
        os.path.join(E.obs_dir, 'NSIDC_0051', 'agg_nc',
                     'hist_SIP_1980_2010.nc')).sip

    # Get recent observations
    ds_81 = xr.open_mfdataset(E.obs['NSIDC_0081']['sipn_nc'] + '_yearly/*.nc',
                              concat_dim='time',
                              autoclose=True,
                              parallel=True)  #,

    # Define models to plot
    models_2_plot = list(E.model.keys())
    models_2_plot = [
        x for x in models_2_plot
        if x not in ['piomas', 'MME', 'MME_NEW', 'uclsipn']
    ]  # remove some models
    models_2_plot = [x for x in models_2_plot
                     if E.icePredicted[x]]  # Only predictive models
    models_2_plot = ['MME'] + models_2_plot  # Add models to always plot at top
    models_2_plot.insert(
        1, models_2_plot.pop(-1))  # Move climatology from last to second

    # Get # of models and setup subplot dims
    Nmod = len(models_2_plot) + 1  #(+3 for obs, MME, and clim)
    Nc = int(np.floor(np.sqrt(Nmod)))
    # Max number of columns == 5 (plots get too small otherwise)
    Nc = 5  #np.min([Nc,5])
    Nr = int(np.ceil(Nmod / Nc))
    print(Nr, Nc, Nmod)
    assert Nc * Nr >= Nmod, 'Need more subplots'

    for cvar in variables:

        # Load in dask data from Zarr
        ds_ALL = xr.open_zarr(
            os.path.join(E.data_dir, 'model/zarr', cvar + '.zarr'))

        # Define fig dir and make if doesn't exist
        fig_dir = os.path.join(E.fig_dir, 'model', 'all_model', cvar,
                               'maps_weekly_NEW')
        if not os.path.exists(fig_dir):
            os.makedirs(fig_dir)

        # Make requested dataArray as specified above
        ds_status = xr.DataArray(np.ones(
            (init_slice.size, da_slices.size)) * np.NaN,
                                 dims=('init_time', 'fore_time'),
                                 coords={
                                     'init_time': init_slice,
                                     'fore_time': da_slices
                                 })
        ds_status.name = 'status'
        ds_status = ds_status.to_dataset()

        # Check what plots we already have
        if not updateAll:
            print("Removing figures we have already made")
            ds_status = update_status(ds_status=ds_status,
                                      fig_dir=fig_dir,
                                      int_2_days_dict=int_2_days_dict,
                                      NweeksUpdate=NweeksUpdate)

        print(ds_status.status.values)
        # Drop IC/FT we have already plotted (orthoginal only)
        ds_status = ds_status.where(
            ds_status.status.sum(dim='fore_time') < ds_status.fore_time.size,
            drop=True)

        print("Starting plots...")
        # For each init_time we haven't plotted yet

        for it in ds_status.init_time.values:
            start_time_cmod = timeit.default_timer()
            print(it)
            it_start = it - np.timedelta64(Ndays, 'D') + np.timedelta64(
                1, 'D'
            )  # Start period for init period (it is end of period). Add 1 day because when
            # we select using slice(start,stop) it is inclusive of end points. So here we are defining the start of the init AND the start of the valid time.
            # So we need to add one day, so we don't double count.

            # For each forecast time we haven't plotted yet
            ft_to_plot = ds_status.sel(init_time=it)
            ft_to_plot = ft_to_plot.where(ft_to_plot.isnull(),
                                          drop=True).fore_time

            for ft in ft_to_plot.values:

                print(ft.astype('timedelta64[D]'))
                cs_str = format(days_2_int_dict[ft],
                                '02')  # Get index of current forcast week
                week_str = format(
                    int(ft.astype('timedelta64[D]').astype('int') / Ndays),
                    '02')  # Get string of current week
                cdoy_end = pd.to_datetime(it + ft).timetuple(
                ).tm_yday  # Get current day of year end for valid time
                cdoy_start = pd.to_datetime(it_start + ft).timetuple(
                ).tm_yday  # Get current day of year end for valid time
                it_yr = str(pd.to_datetime(it).year)
                it_m = str(pd.to_datetime(it).month)

                # Get datetime64 of valid time start and end
                valid_start = it_start + ft
                valid_end = it + ft

                # Loop through variable of interest + any metrics (i.e. SIP) based on that
                for metric in metrics_all[cvar]:

                    # Set up plotting info
                    if cvar == 'sic':
                        if metric == 'mean':
                            cmap_c = matplotlib.colors.ListedColormap(
                                sns.color_palette("Blues_r", 10))
                            cmap_c.set_bad(color='lightgrey')
                            c_label = 'Sea Ice Concentration (-)'
                            c_vmin = 0
                            c_vmax = 1
                        elif metric == 'SIP':
                            cmap_c = matplotlib.colors.LinearSegmentedColormap.from_list(
                                "", ["white", "orange", "red", "#990000"])
                            cmap_c.set_bad(color='lightgrey')
                            c_label = 'Sea Ice Probability (-)'
                            c_vmin = 0
                            c_vmax = 1
                        elif metric == 'anomaly':
                            #                         cmap_c = matplotlib.colors.ListedColormap(sns.color_palette("coolwarm", 9))
                            cmap_c = matplotlib.colors.LinearSegmentedColormap.from_list(
                                "", ["red", "white", "blue"])
                            cmap_c.set_bad(color='lightgrey')
                            c_label = 'SIC Anomaly to 1980-2010 Mean'
                            c_vmin = -1
                            c_vmax = 1

                    elif cvar == 'hi':
                        if metric == 'mean':
                            cmap_c = matplotlib.colors.ListedColormap(
                                sns.color_palette("Reds_r", 10))
                            cmap_c.set_bad(color='lightgrey')
                            c_label = 'Sea Ice Thickness (m)'
                            c_vmin = 0
                            c_vmax = None
                    else:
                        raise ValueError("cvar not found.")

                    # New Plot
                    #start_time_plot = timeit.default_timer()
                    (f, axes) = ice_plot.multi_polar_axis(ncols=Nc,
                                                          nrows=Nr,
                                                          Nplots=Nmod)

                    ############################################################################
                    #                               OBSERVATIONS                               #
                    ############################################################################

                    # Plot Obs (if available)
                    ax_num = 0
                    axes[ax_num].set_title('Observed')

                    try:
                        da_obs_c = ds_ALL[metric].sel(model='Observed',
                                                      init_end=it,
                                                      fore_time=ft)
                        haveObs = True
                    except KeyError:
                        haveObs = False

                    # If obs then plot
                    if haveObs:

                        da_obs_c.plot.pcolormesh(ax=axes[ax_num],
                                                 x='lon',
                                                 y='lat',
                                                 transform=ccrs.PlateCarree(),
                                                 add_colorbar=False,
                                                 cmap=cmap_c,
                                                 vmin=c_vmin,
                                                 vmax=c_vmax)
                        axes[ax_num].set_title('Observed')
                    else:  # When were in the future (or obs are missing)
                        if metric == 'SIP':  # Plot this historical mean SIP
                            print("plotting hist obs SIP")
                            da_obs_c = mean_1980_2010_SIP.isel(
                                time=slice(cdoy_start, cdoy_end)).mean(
                                    dim='time')
                            da_obs_c.plot.pcolormesh(
                                ax=axes[ax_num],
                                x='lon',
                                y='lat',
                                transform=ccrs.PlateCarree(),
                                add_colorbar=False,
                                cmap=cmap_c,
                                vmin=c_vmin,
                                vmax=c_vmax)
                            axes[ax_num].set_title('Hist. Obs.')

                    ############################################################################
                    #                    Plot all models                                       #
                    ############################################################################
                    p = {}
                    for (i, cmod) in enumerate(models_2_plot):
                        #print(cmod)
                        i = i + 1  # shift for obs
                        axes[i].set_title(E.model[cmod]['model_label'])

                        # Select current model to plot
                        try:
                            ds_model = ds_ALL[metric].sel(model=cmod,
                                                          init_end=it,
                                                          fore_time=ft)
                            haveMod = True
                        except:
                            haveMod = False

                        # Plot
                        if haveMod:
                            p[i] = ds_model.plot.pcolormesh(
                                ax=axes[i],
                                x='lon',
                                y='lat',
                                transform=ccrs.PlateCarree(),
                                add_colorbar=False,
                                cmap=cmap_c,
                                vmin=c_vmin,
                                vmax=c_vmax)

                            axes[i].set_title(E.model[cmod]['model_label'])

                            # Clean up for current model
                            ds_model = None

                    # Make pretty
                    f.subplots_adjust(right=0.8)
                    cbar_ax = f.add_axes([0.85, 0.15, 0.05, 0.7])
                    if p:  # if its not empty
                        cbar = f.colorbar(
                            next(iter(p.values())), cax=cbar_ax,
                            label=c_label)  # use first plot to gen colorbar
                        if metric == 'anomaly':
                            cbar.set_ticks(np.arange(-1, 1.1, 0.2))
                        else:
                            cbar.set_ticks(np.arange(0, 1.1, 0.1))

                    # Set title of all plots
                    init_time_2 = pd.to_datetime(it).strftime('%Y-%m-%d')
                    init_time_1 = pd.to_datetime(it_start).strftime('%Y-%m-%d')
                    valid_time_2 = pd.to_datetime(it + ft).strftime('%Y-%m-%d')
                    valid_time_1 = pd.to_datetime(it_start +
                                                  ft).strftime('%Y-%m-%d')
                    plt.suptitle('Initialization Time: ' + init_time_1 +
                                 ' to ' + init_time_2 + '\n Valid Time: ' +
                                 valid_time_1 + ' to ' + valid_time_2,
                                 fontsize=15)  # +'\n Week '+week_str
                    plt.subplots_adjust(top=0.85)

                    # Save to file
                    f_out = os.path.join(
                        fig_dir, 'panArctic_' + metric + '_' + runType + '_' +
                        init_time_2 + '_' + cs_str + '.png')
                    f.savefig(f_out, bbox_inches='tight', dpi=200)
                    print("saved ", f_out)
                    #print("Figure took  ", (timeit.default_timer() - start_time_plot)/60, " minutes.")

                    # Mem clean up
                    p = None
                    plt.close(f)
                    da_obs_c = None

            # Done with current it
            print("Took ", (timeit.default_timer() - start_time_cmod) / 60,
                  " minutes.")

    # Update json file
    json_format = get_figure_init_times(fig_dir)
    json_dict = [{"date": cd, "label": cd} for cd in json_format]

    json_f = os.path.join(fig_dir, 'plotdates_current.json')
    with open(json_f, 'w') as outfile:
        json.dump(json_dict, outfile)

    # Make into Gifs
    # TODO: make parallel, add &
    for cit in json_format:
        subprocess.call(
            str("/home/disk/sipn/nicway/python/ESIO/scripts/makeGif.sh " +
                fig_dir + " " + cit),
            shell=True)

    print("Finished plotting panArctic Maps.")
Example #3
0
def Update_Evaluation_Maps(PaperPlots = False):

#     client = Client(n_workers=8)
#     client

    ### Paper Figure Parameters

    # Time period options
    Last_valid_time = np.datetime64('2018-11-30') # last day in valid time space to include in analysis. 
    # Use  np.datetime64('2018-11-30') to reproduce Paper.

    # Domain options to use for calculating metrics over
    customDomainName = 'ArcticSeas' 
    # Options:
    # 'ArcticSeas' will use the Arctic Seas domain shown in Supplemental information
    # 'X' where X is a model name to use as the domain (i.e. X='rasmesrl' because it is the smallest domain)

    ### Code

    metrics_all = ['anomaly','mean','SIP']
    runType = 'forecast'
    variables = ['sic']

    # Only use 'sic' here
    cvar = variables[0]

    # Get path data
    E = ed.EsioData.load()
    grid_dir = E.grid_dir

    # Load in regional data
    ds_region = xr.open_dataset(os.path.join(grid_dir, 'sio_2016_mask_Update.nc'))

    # Define fig dir and make if doesn't exist (Hard coded here)
    if PaperPlots:
        fig_dir = os.path.join('/home/disk/sipn/nicway/Nic/figures', 'model', 'MME', cvar, 'BSS')
    else:
        fig_dir = os.path.join('/home/disk/sipn/nicway/public_html/sipn/figures/model/Eval')

    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    # Load from Zarr
    ds_m = xr.open_zarr(os.path.join(E.data_dir,'model/zarr',cvar+'.zarr'))

    # Select only initlizations from January 2018 to November 2018 (for GRL paper)
    if PaperPlots:
        ds_m = ds_m.where(ds_m.valid_end<=Last_valid_time, drop=True)
    # print("Warning: using ALL data")

    # Drop models we don't want
    ds_ALL = ds_m.where(ds_m.model!='dampedAnomaly', drop=True)

    def remove_list(X, L):
        for cl in L:
            X.remove(cl)

    # Get recent observations
    ds_81 = xr.open_mfdataset(E.obs['NSIDC_0081']['sipn_nc']+'_yearly/*.nc', concat_dim='time', autoclose=True, parallel=True)

    # Hack to decode strings
    ds_ALL['model'] = [s.decode("utf-8") for s in ds_ALL.model.values]

    # # Drop models that we don't evaluate (i.e. monthly means)
    exl_mods = ['awispin','nicosipn','szapirosipn']

    # Order models to plot in
    models_first = ['climatology','dampedAnomalyTrend','MME']
    X = list(ds_ALL.model.values)
    [X.remove(a) for a in models_first]
    model_plot_order = models_first + X
    model_plot_order.remove('Observed')
    # model_plot_order



    # Get custom domain mask if we have defined it (i.e. not None)
    if customDomainName=='ArcticSeas':
        customDomain = None
        fig_ext = '' # Figure extention to separte domain names
    else:
        customDomain = ds_ALL.sel(model=customDomainName).isel(fore_time=0,init_end=40).SIP.notnull()
        # Drop coods
        customDomain = customDomain.drop(['fore_time','init_end','init_start','valid_end','valid_start','model'])
        if customDomain.sum().values==0:
            raise ValueError("Warning: you need to choose a fore_time and init_end that has model data represenative of its extent")
        fig_ext = customDomainName+'_' # Figure extention to separte domain names

    # Calculate the IIEE (Integrated Ice Edge Error)
    l = []
    for cmod in ds_ALL.model.values: 
        c_SIP_IIEE = metrics.IIEE(da_mod=ds_ALL.sel(model=cmod)['mean'], 
                                          da_obs=ds_ALL.sel(model='Observed')['mean'], 
                                          region=ds_region, 
                                          testplots=False,
                                          customDomain=customDomain)
        c_SIP_IIEE.coords['model'] = cmod
        l.append(c_SIP_IIEE)
    SIP_IIEE = xr.concat(l, dim='model')
    SIP_IIEE

    # For SIP, calculate the Brier Skill Score for panArctic 
    # BS = (SIP_model - SIP_observed)^2
    #
    l = []
    for cmod in ds_ALL.model.values: 
        c_SIP_BSS = metrics.BrierSkillScore(da_mod_sip=ds_ALL.sel(model=cmod).SIP, 
                                          da_obs_ip=ds_ALL.sel(model='Observed').SIP, 
                                          region=ds_region, 
                                          testplots=False,
                                          customDomain=customDomain)
        c_SIP_BSS.coords['model'] = cmod
        l.append(c_SIP_BSS)
    SIP_BSS = xr.concat(l, dim='model')





    def add_subplot_title(cmod, E, ax=None, BSS_val=''):
        if cmod in E.model.keys():
            ax.set_title((E.model[cmod]['model_label']).rstrip('*')+'\n('+BSS_val+')')
        else:
            ax.set_title(cmod)

    # add missing info for climatology
    E.model_color['climatology'] = (0,0,0)
    E.model_linestyle['climatology'] = '--'
    E.model_marker['climatology'] = '*'
    E.model['climatology'] = {'model_label':'Climatology\nTrend'}



    # Aggregate over space (x,y), including all pixels in valid Arctic seas (masked above with BrierSkillScore())
    BSS_agg = SIP_BSS.mean(dim=['x','y'])
    BSS_agg.load() # Compute and load result into memory

    ### At what lead time is the MME significantly (95%) better than the Damped Anomaly?

    from scipy import stats

    model_1 = 'MME'
    model_2 = 'dampedAnomalyTrend'
    alphaval = 0.05 # 95%

    t_all=[]
    p_all=[]
    cv_all=[]
    for ft in np.arange(0,BSS_agg.fore_time.size):
        x = BSS_agg.sel(model=model_1).isel(fore_time=ft)
        y = BSS_agg.sel(model=model_2).isel(fore_time=ft)

        x = x.where(x.notnull() & y.notnull(), drop=True)
        y = y.where(x.notnull() & y.notnull(), drop=True)

        df = x.size + y.size -2

        t, p = stats.ttest_ind(x, y, equal_var=False)
        cv = stats.t.ppf(1.0 - alphaval, df)

        t_all.append(t)
        p_all.append(p)
        cv_all.append(cv)


    plt.figure()
    plt.plot(BSS_agg.fore_time.values.astype('timedelta64[D]').astype(int)/7,
            abs(np.array(t_all)),'-k*', label='t-value')
    plt.plot(BSS_agg.fore_time.values.astype('timedelta64[D]').astype(int)/7,
            cv_all,'-r*', label='critical t-value')
    plt.ylabel('t-value')
    plt.legend()


    plt.figure()
    plt.plot(BSS_agg.fore_time.values.astype('timedelta64[D]').astype(int)/7,
            p_all,'-k*',label='p value')
    plt.plot(BSS_agg.fore_time.values.astype('timedelta64[D]').astype(int)/7,
            np.ones(len(p_all))*alphaval,'-r*',label='Alpha p value')
    plt.ylabel('p-value')
    plt.legend()

    # Read off what lead time in weeks where the black line crosses the red line

    # central_extent = [-3850000*0.6, 3725000*0.6, -5325000*0.45, 5850000*0.45] # (x0, x1, y0, y1
    # (f, axes) = ice_plot.multi_polar_axis(ncols=2, nrows=1, Nplots=2, 
    #                                       extent=central_extent, central_longitude=0)
    # f.set_size_inches(18.5, 10.5)

    # # Get Observed mean sea ice edge location mean(SIC in sept) > 0.15
    # # obs_SIP_OLD = ds_81['sic'].sel(time=slice(start_date_map,end_date_map)).mean(dim='time')>=0.15
    # # Fill in pole hole
    # # obs_SIP_OLD = obs_SIP_OLD.where(obs_SIP_OLD.hole_mask==0, other=1)

    # p1 = obs_SIP.plot.pcolormesh(ax=axes[0], x='lon', y='lat', 
    #                                       transform=ccrs.PlateCarree(),
    #                                       add_colorbar=False,
    #                                       cmap=cmap_c,
    #                                       vmin=c_vmin, vmax=c_vmax)
    # # ice_plot.remove_small_contours(po, thres=10**6)

    # obs_SIP_contour = obs_SIP.where((obs_SIP.lon < 179.1))

    # po = obs_SIP_contour.plot.contour(ax=axes[0], x='lon', y='lat',
    #                               transform=ccrs.PlateCarree(), #.NorthPolarStereo(central_longitude=-45),
    #                               colors=('k'),
    #                               linewidths=[1],
    #                               levels=[0.5])
    # # ice_plot.remove_small_contours(po, thres=10**6)

    ### Plot BS spatial plots valid for Summer months (Figure 3)

    # Remove some select models
    enough_init = model_plot_order.copy()
    [enough_init.remove(a) for a in exl_mods]
    print(enough_init)

    # Define lead time to plot
    week_lead_time = 4

    period_dict = {'June':[np.datetime64('2018-06-01'),np.datetime64('2018-07-01')],
                  'July':[np.datetime64('2018-07-01'),np.datetime64('2018-08-01')],
                  'August':[np.datetime64('2018-08-01'),np.datetime64('2018-09-01')],
                  'September':[np.datetime64('2018-09-01'),np.datetime64('2018-10-01')]}

    for period in period_dict:
        print(period)
        start_date_map = period_dict[period][0]
        end_date_map = period_dict[period][1]    

        # Select time slice of valid
        BS_Sept = SIP_BSS.where( (SIP_BSS.valid_start>=start_date_map) & 
                      (SIP_BSS.valid_start<=end_date_map), drop=True)

        # Average over valid time 
        SIP_BSS_init_avg = BS_Sept.sel(model=enough_init).mean(dim='init_end')

        sns.set_context("talk", font_scale=.8, rc={"lines.linewidth": 2.5})

        # Set up color maps
        cmap_c = matplotlib.colors.LinearSegmentedColormap.from_list("", ["white","orange","red","#990000"], N=10)
        cmap_c.set_bad(color = 'lightgrey')
        c_label = 'BS (0=best, 1=worst)'
        c_vmin = 0
        c_vmax = 1

        for ft in [SIP_BSS_init_avg.fore_time.values[week_lead_time]]:  

            # Grab current lead time
            c_ft_ds = SIP_BSS_init_avg.sel(fore_time=ft)

            # Drop models without any data
            c_ft_ds = c_ft_ds.where(c_ft_ds.notnull().sum(dim=['x','y'])>0, drop=True)

            nrows = np.int(np.ceil(np.sqrt(c_ft_ds.model.size))) - 1
            ncols = int(np.ceil(c_ft_ds.model.size/nrows))
            Nplots = c_ft_ds.model.size + 1
            #print(Nplots)

            # New Plot
            central_extent = [-3850000*0.6, 3725000*0.6, -5325000*0.45, 5850000*0.45] # (x0, x1, y0, y1
            (f, axes) = ice_plot.multi_polar_axis(ncols=ncols, nrows=nrows, Nplots=Nplots, 
                                                  extent=central_extent, central_longitude=0)


    #         # Get Observed mean sea ice edge location mean(SIC in sept) > 0.15
    #         obs_SIP = ds_81['sic'].sel(time=slice(start_date_map,end_date_map)).mean(dim='time')>=0.15
    #         # Fill in pole hole
    #         obs_SIP = obs_SIP.where(obs_SIP.hole_mask==0, other=1)

            obs_SIP = ds_ALL.sel(model='Observed').SIP

            # Select time slice of valid
            obs_SIP = obs_SIP.where( (obs_SIP.valid_start>=start_date_map) & 
                          (obs_SIP.valid_start<=end_date_map), drop=True)

            # Average over valid time 
            obs_SIP = obs_SIP.mean(dim='init_end').sel(fore_time=ft)

            for (i, cmod) in enumerate(c_ft_ds.model.values):
                if cmod in c_ft_ds.model.values:
                    # Plot
                    add_subplot_title(cmod, E, ax=axes[i])
                    p = c_ft_ds.sel(model=cmod).plot.pcolormesh(ax=axes[i], x='lon', y='lat', 
                                          transform=ccrs.PlateCarree(),
                                          add_colorbar=False,
                                          cmap=cmap_c,
                                          vmin=c_vmin, vmax=c_vmax)

                    # Need to clip obs so contour handles wrap around 180 correctly
                    obs_SIP_contour = obs_SIP.where((obs_SIP.lon < 179.1))
                    po = obs_SIP_contour.plot.contour(ax=axes[i], x='lon', y='lat',
                                          transform=ccrs.PlateCarree(), #.NorthPolarStereo(central_longitude=-45),
                                          colors=('k'),
                                          linewidths=[1],
                                          levels=[0.5])

                    add_subplot_title(cmod, E, ax=axes[i], BSS_val='{0:.3f}'.format(c_ft_ds.sel(model=cmod).mean(dim=['x','y']).load().item()))

            # Make pretty
            cbar_ax = f.add_axes([0.2, 0.05, .5, 0.04]) #  [left, bottom, width, height] w
            cbar = f.colorbar(p, cax=cbar_ax, label=c_label, orientation='horizontal')
            cbar.set_ticks(np.arange(-1,1.1,0.2))

            # Set title of all plots
            lead_time_days = str(ft.astype('timedelta64[D]').astype(int))
            #print(lead_time_days)

            if not PaperPlots: # only add for website plots
                cbar_ax.text(0.35, 1.1, 'Wayand et al. (2019)', fontsize=12)

            valid_start_str = pd.to_datetime(start_date_map).strftime('%Y-%m-%d')
            valid_end_str = pd.to_datetime(end_date_map).strftime('%Y-%m-%d')

            # Save to file
            f_out = os.path.join(fig_dir,fig_ext+'BSS_Avg_Valid_'+valid_start_str+'_to_'+valid_end_str+'_'+lead_time_days.zfill(3)+'_day_lead_time.png')
            f.savefig(f_out,bbox_inches='tight', dpi=300)



    ### Plot Brier Score vs lead time


    min_N_samples = 10 # Min number of samples to allow for mean
    BSS_agg_init = BSS_agg.mean(dim='init_end')
    sns.set_style("whitegrid")

    sns.set_context("talk", font_scale=1.5, rc={"lines.linewidth": 2.5})

    # Get sample size of for each lead time
    for_sample = BSS_agg.sel(model='MME').notnull().sum(dim='init_end')
    for_sample

    # Use threshold of sample size to cut off lead times
    max_lead = for_sample.where(for_sample>=min_N_samples,drop=True).fore_time.max().values.astype('timedelta64[D]').astype(int)/7

    f = plt.figure(figsize=(10,10))
    NM = 10
    ax1 = plt.subplot2grid((NM, NM), (0, 0), colspan=NM, rowspan=NM-1)
    ax2 = plt.subplot2grid((NM, NM), (NM-1, 0), colspan=NM, rowspan=1)

    for cmod in model_plot_order:
        if cmod in exl_mods:
            continue
        # Get model plotting specs
        cc = E.model_color[cmod]
        cl = E.model_linestyle[cmod]
        cm = E.model_marker[cmod]
        if cmod=='rasmesrl':
            cflag = '*'
        else:
            cflag = ''

        if cmod in ['MME','dampedAnomalyTrend','climatology']:
            lw=5
        else:
            lw = 2

        ax1.plot(BSS_agg_init.fore_time.values.astype('timedelta64[D]').astype(int)/7,
                BSS_agg_init.sel(model=cmod).values, label=E.model[cmod]['model_label'].rstrip('*')+cflag,
                color=cc,
                linestyle=cl,
                linewidth=lw,
                marker=cm)
    ax1.legend(loc='lower right', bbox_to_anchor=(1.4, 0))
    ax1.set_ylabel('Pan-Arctic BS (-)')
    ax1.set_xlim([-0.5,max_lead])
    ax1.set_xticklabels([''])

    # second axis
    ax2.plot(for_sample.fore_time.values.astype('timedelta64[D]').astype(int)/7,
             for_sample.values, '-ko')
    ax2.set_ylabel('#\nweeks')
    ax2.set_xlabel('Lead time (Weeks)')
    ax2.set_xlim(ax1.get_xlim());

    ax2.set_ylim([0,for_sample.max()+5]);
    ax2.set_yticks(np.arange(0,for_sample.max()+5,15));

    if not PaperPlots: # only add for website plots
        ax1.text(17, 0.025, 'Wayand et al. (2019)', fontsize=12)

    # Save to file
    f_out = os.path.join(fig_dir,fig_ext+'BSS_by_lead_time_PanArctic.png')
    f.savefig(f_out,bbox_inches='tight', dpi=300)
    
    
    
    

    ### Plot Brier Score vs lead time (Figure 1)
    print(BSS_agg_init.model.values)
    mean_models = []

    min_N_samples = 10 # Min number of samples to allow for mean
    BSS_agg_init = BSS_agg.mean(dim='init_end')
    sns.set_style("whitegrid")

    sns.set_context("talk", font_scale=1.5, rc={"lines.linewidth": 2.5})

    # Get sample size of for each lead time
    for_sample = BSS_agg.sel(model='MME').notnull().sum(dim='init_end')

    # Use threshold of sample size to cut off lead times
    max_lead = for_sample.where(for_sample>=min_N_samples,drop=True).fore_time.max().values.astype('timedelta64[D]').astype(int)/7

    f = plt.figure(figsize=(15,10))
    NM = 10
    ax1 = plt.subplot2grid((NM, NM), (0, 0), colspan=NM, rowspan=NM-1)
    ax2 = plt.subplot2grid((NM, NM), (NM-1, 0), colspan=NM, rowspan=1)

    for cmod in model_plot_order:
        if cmod in exl_mods:
            continue
        # Get model plotting specs
        cc = E.model_color[cmod]
        cl = E.model_linestyle[cmod]
        cm = E.model_marker[cmod]
        if cmod=='rasmesrl':
            cflag = '*'
        else:
            cflag = ''

        if cmod in ['MME','dampedAnomalyTrend','climatology']:
            lw=5
        else:
            lw = 2
            mean_models.append(cmod)
            
        ax1.plot(BSS_agg_init.fore_time.values.astype('timedelta64[D]').astype(int)/7,
                BSS_agg_init.sel(model=cmod).values, label=E.model[cmod]['model_label'].rstrip('*')+cflag,
                color=cc,
                linestyle=cl,
                linewidth=lw,
                marker=cm)
        
    # Plot the mean of BS across models 
    ax1.plot(BSS_agg_init.fore_time.values.astype('timedelta64[D]').astype(int)/7,
                BSS_agg_init.sel(model=mean_models).mean(dim='model').values, label='BS Mean',
                color='r',
                linestyle='-',
                linewidth=2,
                marker='d')
        
    ax1.legend(loc='lower right', bbox_to_anchor=(1.285, -0.1))
    ax1.set_ylabel('Pan-Arctic BS (-)')
    ax1.set_xlim([-0.5,max_lead])
    ax1.set_xticklabels([''])
    ax1.set_ylim([0.02,0.11])

    # second axis
    ax2.plot(for_sample.fore_time.values.astype('timedelta64[D]').astype(int)/7,
             for_sample.values, '-ko')
    ax2.set_ylabel('#\nweeks   ')
    ax2.set_xlabel('Lead time (Weeks)')
    ax2.set_xlim(ax1.get_xlim());

    ax2.set_ylim([0,for_sample.max()+5]);
    ax2.set_yticks(np.arange(0,for_sample.max()+5,15));

    if not PaperPlots: # only add for website plots
        ax1.text(17, 0.025, 'Wayand et al. (2019)', fontsize=12)

    # Save to file
    f_out = os.path.join(fig_dir,fig_ext+'BSS_by_lead_time_PanArctic_New.png')
    f.savefig(f_out,bbox_inches='tight', dpi=200)
    
    
    

    ### Plot the IIEE with lead time (SI)

    SIP_IIEE.load()

    min_N_samples = 10 # Min number of samples to allow for mean
    SIP_IIEE_init = SIP_IIEE.mean(dim='init_end')
    sns.set_style("whitegrid")

    sns.set_context("talk", font_scale=1.5, rc={"lines.linewidth": 2.5})

    # Get sample size of for each lead time
    for_sample = SIP_IIEE.sel(model='MME').notnull().sum(dim='init_end')

    # Use threshold of sample size to cut off lead times
    max_lead = for_sample.where(for_sample>=min_N_samples,drop=True).fore_time.max().values.astype('timedelta64[D]').astype(int)/7

    f = plt.figure(figsize=(10,10))
    NM = 10
    ax1 = plt.subplot2grid((NM, NM), (0, 0), colspan=NM, rowspan=NM-1)
    ax2 = plt.subplot2grid((NM, NM), (NM-1, 0), colspan=NM, rowspan=1)

    for cmod in model_plot_order:
        if cmod in exl_mods:
            continue
        # Get model plotting specs
        cc = E.model_color[cmod]
        cl = E.model_linestyle[cmod]
        cm = E.model_marker[cmod]
        if cmod=='rasmesrl':
            cflag = '*'
        else:
            cflag = ''
        if cmod in ['MME','dampedAnomalyTrend','climatology']:
            lw=5
        else:
            lw = 2

        ax1.plot(SIP_IIEE_init.fore_time.values.astype('timedelta64[D]').astype(int)/7,
                SIP_IIEE_init.sel(model=cmod).values, label=E.model[cmod]['model_label'].rstrip('*')+cflag,
                color=cc,
                linestyle=cl,
                linewidth=lw,
                marker=cm)
    ax1.legend(loc='lower right', bbox_to_anchor=(1.4, 0))
    ax1.set_ylabel('IIEE (Millions of km$^2$)')
    ax1.set_xlim([-0.5,max_lead])
    ax1.set_xticklabels([''])

    # second axis
    ax2.plot(for_sample.fore_time.values.astype('timedelta64[D]').astype(int)/7,
             for_sample.values, '-ko')
    ax2.set_ylabel('#\nweeks')
    ax2.set_xlabel('Lead time (Weeks)')
    ax2.set_xlim(ax1.get_xlim());

    ax2.set_ylim([0,for_sample.max()+5]);
    ax2.set_yticks(np.arange(0,for_sample.max()+5,15));

    if not PaperPlots: # only add for website plots
#        ax1.text(np.datetime64('2018-01-01'), 0.025, 'Wayand et al. (in review)', fontsize=12)
        ax1.text(7, 0.25, 'Wayand et al. (2019)', fontsize=12)


    # Save to file
    f_out = os.path.join(fig_dir,fig_ext+'IIEE_by_lead_time_PanArctic.png')
    f.savefig(f_out,bbox_inches='tight', dpi=200)







    ### Define DA methods for each model

    # copy past info from Table 1
    DA_dict = {
    'modcansipns_3':'SIC (NG)', 
    'modcansipns_4':'SIC (NG)',
    'ecmwfsipn':'SIC (3DVAR)', 
    'ecmwf':'SIC (3DVAR)',
    'yopp':'SIC (3DVAR)',
    'gfdlsipn':'No Sea Ice DA',
    'metreofr':'SIC (EnKF)',
    'szapirosipn':'No Sea Ice DA',
    'ncep-exp-bias-corr':'SIC (NG)',
    'noaasipn':'SIC (NG)',
    'usnavysipn':'SIC (3DVAR)',
    'usnavyncep':'SIC (3DVAR)',
    'usnavygofs':'SIC (3DVAR)',
    'rasmesrl':'SIC (DI), SIT* (DI)',
    'uclsipn':'No Sea Ice DA',
    'ukmetofficesipn':'SIC (3DVAR)',
    'ukmo':'SIC (3DVAR)',
    'ncep':'SIC (NG)',
    'kma':'SIC (3DVAR)'
    }

    # Add models
    DA_dict['climatology'] = 'No Sea Ice DA'
    DA_dict['dampedAnomalyTrend'] = 'SIC (DI)'
    DA_dict['MME'] = 'MME'
    DA_dict

    DA_options = sorted(list(set(DA_dict.values())))
    dict(zip(DA_options,np.arange(len(DA_options))))

    #DA_options = [DA_options[1],  DA_options[4], DA_options[5], DA_options[7], DA_options[2], DA_options[3], DA_options[6],DA_options[0],] # Reorder from simple to complex
    DA_options = [DA_options[1],  DA_options[3], DA_options[4], DA_options[6], DA_options[2], DA_options[5],DA_options[0],] # Reorder from simple to complex
    
    DA_options_i = np.arange(len(DA_options))
    DA_options_dict = dict(zip(DA_options,DA_options_i))
    DA_options_dict

    ### Plot BS by DA method (Figure 4)

    # In place a multi lead times

    # Lead times to plot
    leads2plot = [0,1,2,3,4] # indices
    sns.set_style("whitegrid")

    sns.set_context("talk", font_scale=1, rc={"lines.linewidth": 2.5})
    f, axes = plt.subplots(1, 1, figsize=(9, 5))

    for cmod in BSS_agg.model.values:
        if cmod in DA_dict.keys():
            # Get model plotting specs
            cc = E.model_color[cmod]
            cl = E.model_linestyle[cmod]
            cm = E.model_marker[cmod]
            if cmod=='MME':
                lw=4
            else:
                lw=2

            BSS_init = BSS_agg.sel(model=cmod).isel(fore_time=leads2plot).mean(dim='init_end').load()

            #rand_jit = np.random.randint(-100,100)/1000*2
            c_x = np.linspace(DA_options_dict[DA_dict[cmod]],
                              DA_options_dict[DA_dict[cmod]]+0.75,
                              len(leads2plot))
            #print(c_x)

            axes.plot(c_x, 
                    BSS_init.values,
                    color=cc,
                    linestyle='-',
                    linewidth=lw,
                    marker=cm,
                    label=E.model[cmod]['model_label'].rstrip('*'))
        else:
            print(cmod,"not in dict")
    axes.set_xticks(DA_options_i)
    axes.set_xticklabels(DA_options, rotation='45', ha='right')

    plt.legend(loc='lower right', bbox_to_anchor=(1.36, -.25))
    plt.ylabel('Pan-Arctic BS (-)')

    if not PaperPlots: # only add for website plots
        axes.text(4.2, 0.081, 'Based on Wayand et al. (2019)', fontsize=12)

    # Save to file
    f_out = os.path.join(fig_dir,fig_ext+'BSS_week_Multi_by_DA_Type.png')
    f.savefig(f_out,bbox_inches='tight', dpi=200)



    ### Plot BS by initialization time (Figure 2)

    sns.set_style("whitegrid")

    for ft_i in [4]:
        BSS_agg_fore = BSS_agg.isel(fore_time=ft_i)

        sns.set_context("talk", font_scale=1.5, rc={"lines.linewidth": 2.5})
        f = plt.figure(figsize=(10,10))
        for cmod in model_plot_order:
            # Get model plotting specs
            cc = E.model_color[cmod]
            cl = E.model_linestyle[cmod]
            cm = E.model_marker[cmod]
            if cmod in ['MME','dampedAnomalyTrend','climatology']:
                lw=5
            else:
                lw = 2
            if BSS_agg_fore.sel(model=cmod).notnull().sum().values==0:
                continue # Don't plot
            plt.plot(BSS_agg_fore.init_end.values,
                    BSS_agg_fore.sel(model=cmod).values, label=E.model[cmod]['model_label'].rstrip('*'),
                    color=cc,
                    linestyle=cl,
                    linewidth=lw,
                    marker=cm)
        plt.legend(loc='lower right', bbox_to_anchor=(1.4, -0.1))
        plt.ylabel('Pan-Arctic BS (-)')
        plt.xlabel('Initialization date')
        #plt.title(BSS_agg_fore.fore_time.values.astype('timedelta64[D]').astype(int))
        f.autofmt_xdate()


        if not PaperPlots: # only add for website plots
            plt.text(np.datetime64('2018-01-01'), 0.163, 'Based on Wayand et al. (2019)', fontsize=12)
        # Save to file
        f_out = os.path.join(fig_dir,fig_ext+'BSS_by_init_time_'+str(BSS_agg_fore.fore_time.values.astype('timedelta64[D]').astype(int))+'_days.png')
        f.savefig(f_out,bbox_inches='tight', dpi=200)

        print("Finished Eval_Weekly")
            # For each region
            for cR in ds_region.ocean_regions.values:

                # Get regional extent to plot
                crExt = ds_region.where(ds_region.mask == cR,
                                        drop=True)[['xm', 'ym']]
                crExt = [
                    crExt.xm.min().values,
                    crExt.xm.max().values,
                    crExt.ym.min().values,
                    crExt.ym.max().values
                ]

                # New Plot
                (f, axes) = ice_plot.multi_polar_axis(ncols=Nc,
                                                      nrows=Nr,
                                                      Nplots=Nmod,
                                                      extent=crExt)
                p = None  # initlaize to know if we found any data
                for (i, cmod) in enumerate(models_2_plot):
                    i = i + 1  # shift for obs
                    axes[i].set_title(E.model[cmod]['model_label'])

                    # Load in Model
                    # TODO: try filtering file list by year and month
                    model_forecast = os.path.join(
                        E.model[cmod][runType]['sipn_nc'], '*.nc')

                    # Check we have files
                    files = glob.glob(model_forecast)
                    if not files:
                        continue  # Skip this model
Example #5
0
def Update_Regional_Maps():

    # Make requested dataArray as specified above
    ds_status = xr.DataArray(np.ones(
        (init_slice.size, da_slices.size)) * np.NaN,
                             dims=('init_time', 'fore_time'),
                             coords={
                                 'init_time': init_slice,
                                 'fore_time': da_slices
                             })
    ds_status.name = 'status'
    ds_status = ds_status.to_dataset()

    # Check what plots we already have
    if not updateAll:
        print("Removing figures we have already made")
        ds_status = update_status(ds_status=ds_status,
                                  fig_dir=fig_dir,
                                  int_2_days_dict=int_2_days_dict,
                                  NweeksUpdate=NweeksUpdate)

    print(ds_status.status.values)
    # Drop IC/FT we have already plotted (orthoginal only)
    ds_status = ds_status.where(
        ds_status.status.sum(dim='fore_time') < ds_status.fore_time.size,
        drop=True)

    print("Starting plots...")
    # For each init_time we haven't plotted yet

    for it in ds_status.init_time.values:
        start_time_cmod = timeit.default_timer()
        print(it)
        it_start = it - np.timedelta64(Ndays, 'D') + np.timedelta64(
            1, 'D'
        )  # Start period for init period (it is end of period). Add 1 day because when
        # we select using slice(start,stop) it is inclusive of end points. So here we are defining the start of the init AND the start of the valid time.
        # So we need to add one day, so we don't double count.

        # For each forecast time we haven't plotted yet
        ft_to_plot = ds_status.sel(init_time=it)
        ft_to_plot = ft_to_plot.where(ft_to_plot.isnull(), drop=True).fore_time

        print('all forecast times to be plotted, ft_to_plot ',
              ft_to_plot.values)

        for ft in ft_to_plot.values:

            print('Processing forecast time: ', ft.astype('timedelta64[D]'))

            ift = days_2_int_dict[ft]  # index of ft
            cs_str = format(days_2_int_dict[ft],
                            '02')  # Get index of current forcast week
            week_str = format(iweek, '02')  # Get string of current week
            cdoy_end = pd.to_datetime(it + ft).timetuple(
            ).tm_yday  # Get current day of year end for valid time
            cdoy_start = pd.to_datetime(it_start + ft).timetuple(
            ).tm_yday  # Get current day of year end for valid time
            it_yr = str(pd.to_datetime(it).year)
            it_m = str(pd.to_datetime(it).month)

            # Get datetime64 of valid time start and end
            valid_start = it_start + ft
            valid_end = it + ft

            print(ift)
            #if ift<=5:
            #    continue

            models_2_plot = models_2_plot_master[ift]
            print('models to plot ', models_2_plot)
            # Get # of models and setup subplot dims
            Nmod = len(models_2_plot) + 2  #(+3 for obs, MME, and clim)
            Nc = int(np.floor(np.sqrt(Nmod)))
            # Max number of columns == 5 (plots get too small otherwise)
            Nc = 5  #np.min([Nc,5])
            Nr = int(np.ceil((Nmod - 1) / Nc))
            print(Nr, Nc, Nmod)
            assert Nc * Nr >= Nmod - 1, 'Need more subplots'

            # Loop through variable of interest + any metrics (i.e. SIP) based on that
            for metric in metrics_all[cvar]:

                # Set up plotting info
                if cvar == 'sic':
                    if metric == 'mean':
                        cmap_c = matplotlib.colors.ListedColormap(
                            sns.color_palette("Blues_r", 10))
                        cmap_c.set_bad(color='lightgrey')
                        c_label = 'Sea Ice Concentration (-)'
                        c_vmin = 0
                        c_vmax = 1
                    elif metric == 'SIP':
                        cmap_c = matplotlib.colors.LinearSegmentedColormap.from_list(
                            "", ["white", "orange", "red", "#990000"])
                        cmap_c.set_bad(color='lightgrey')
                        c_label = 'Sea Ice Probability (-)'
                        c_vmin = 0
                        c_vmax = 1
                    elif metric == 'anomaly':
                        #                         cmap_c = matplotlib.colors.ListedColormap(sns.color_palette("coolwarm", 9))
                        cmap_c = matplotlib.colors.LinearSegmentedColormap.from_list(
                            "", ["red", "white", "blue"])
                        cmap_c.set_bad(color='lightgrey')
                        c_label = 'SIC Anomaly to 1980-2010 Mean'
                        c_vmin = -1
                        c_vmax = 1

                elif cvar == 'hi':
                    if metric == 'mean':
                        cmap_c = matplotlib.colors.ListedColormap(
                            sns.color_palette("Reds_r", 10))
                        cmap_c.set_bad(color='lightgrey')
                        c_label = 'Sea Ice Thickness (m)'
                        c_vmin = 0
                        c_vmax = None
                else:
                    raise ValueError("cvar not found.")

                for cR in np.arange(0, len(reg2plot), 1):

                    # New Plot
                    #start_time_plot = timeit.default_timer()

                    # Get regional extent to plot
                    crExt = ds_region.where(ds_region.mask.isin(reg2plot[cR]),
                                            drop=True)[['xm', 'ym']]
                    crExt = [
                        crExt.xm.min().values,
                        crExt.xm.max().values,
                        crExt.ym.min().values,
                        crExt.ym.max().values
                    ]
                    crExt[2] = np.max([crExt[2], -3700000])

                    # New Plot
                    (f, axes) = ice_plot.multi_polar_axis(ncols=Nc,
                                                          nrows=Nr,
                                                          Nplots=Nmod,
                                                          extent=crExt)

                    ############################################################################
                    #                               OBSERVATIONS                               #
                    ############################################################################

                    # Plot Obs (if available)
                    ax_num = 0
                    axes[ax_num].set_title('Observed', fontsize=10)

                    try:
                        da_obs_c = ds_ALL[metric].sel(model=b'Observed',
                                                      init_end=it,
                                                      fore_time=ft)
                        #print('da_obs_c',da_obs_c)
                        haveObs = True  # we think there are obs...
                    except KeyError:
                        haveObs = False

                    rightnow = datetime.datetime.now()
                    if valid_start > np.datetime64(rightnow):
                        haveObs = False  # but we know there are no obs in the future...

                    # If obs then plot
                    if haveObs:
                        #da_obs_c = da_obs_c.where(ds_region.mask.isin(reg2plot[cR]))
                        da_obs_c.plot.pcolormesh(ax=axes[ax_num],
                                                 x='lon',
                                                 y='lat',
                                                 transform=ccrs.PlateCarree(),
                                                 add_colorbar=False,
                                                 cmap=cmap_c,
                                                 vmin=c_vmin,
                                                 vmax=c_vmax)
                        axes[ax_num].set_title('Observed', fontsize=10)
                    else:  # When in the future (or obs are missing)
                        #print('no obs avail yet')
                        if metric == 'SIP':  # Plot this historical mean SIP
                            print("plotting hist obs SIP")
                            da_obs_c = mean_1980_2010_SIP.isel(
                                time=slice(cdoy_start, cdoy_end)).mean(
                                    dim='time')
                            da_obs_c.plot.pcolormesh(
                                ax=axes[ax_num],
                                x='lon',
                                y='lat',
                                transform=ccrs.PlateCarree(),
                                add_colorbar=False,
                                cmap=cmap_c,
                                vmin=c_vmin,
                                vmax=c_vmax)
                            axes[ax_num].set_title('Hist. Obs.', fontsize=10)
                        else:
                            textstr = 'Not Available'
                            # these are matplotlib.patch.Patch properties
                            props = dict(boxstyle='round',
                                         facecolor='white',
                                         alpha=0.5)

                            # place a text box in upper left in axes coords
                            axes[ax_num].text(0.05,
                                              0.55,
                                              textstr,
                                              transform=axes[ax_num].transAxes,
                                              fontsize=8,
                                              verticalalignment='top',
                                              bbox=props)

                    ############################################################################
                    #                    Plot all models                                       #
                    ############################################################################
                    p = {}
                    for (i, cmod) in enumerate(models_2_plot):
                        #print(cmod)
                        i = i + 1  # shift for obs
                        axes[i].set_title(E.model[cmod]['model_label'],
                                          fontsize=10)

                        # Select current model to plot
                        try:
                            ds_model = ds_ALL[metric].sel(
                                model=cmod.encode('utf-8'),
                                init_end=it,
                                fore_time=ft)
                            haveMod = True
                        except:
                            haveMod = False

                        # Plot
                        if haveMod:
                            # Select region
                            # Lat and Long feilds have round off differences, so set to same here
                            ds_model['lat'] = ds_region.lat
                            ds_model['lon'] = ds_region.lon
                            #ds_model = ds_model.where(ds_region.mask.isin(reg2plot[cR]))

                            p[i] = ds_model.plot.pcolormesh(
                                ax=axes[i],
                                x='lon',
                                y='lat',
                                transform=ccrs.PlateCarree(),
                                add_colorbar=False,
                                cmap=cmap_c,
                                vmin=c_vmin,
                                vmax=c_vmax)

                            axes[i].set_title(E.model[cmod]['model_label'],
                                              fontsize=10)

                            # Clean up for current model
                            ds_model = None

                    # Make pretty
                    f.subplots_adjust(right=0.8)
                    cbar_ax = f.add_axes([0.85, 0.15, 0.05, 0.7])
                    if p:  # if its not empty
                        cbar = f.colorbar(
                            next(iter(p.values())), cax=cbar_ax,
                            label=c_label)  # use first plot to gen colorbar
                        if metric == 'anomaly':
                            cbar.set_ticks(np.arange(-1, 1.1, 0.2))
                        else:
                            cbar.set_ticks(np.arange(0, 1.1, 0.1))

                    # Set title of all plots
                    init_time_2 = pd.to_datetime(it).strftime('%Y-%m-%d')
                    init_time_1 = pd.to_datetime(it_start).strftime('%Y-%m-%d')
                    valid_time_2 = pd.to_datetime(it + ft).strftime('%Y-%m-%d')
                    valid_time_1 = pd.to_datetime(it_start +
                                                  ft).strftime('%Y-%m-%d')
                    if ift < 3:
                        titlesize = 15
                    elif ift < 5:
                        titlesize = 13
                    else:
                        titlesize = 11
                    plt.suptitle('Initialization Time: ' + init_time_1 +
                                 ' to ' + init_time_2 + '\n Valid Time: ' +
                                 valid_time_1 + ' to ' + valid_time_2,
                                 fontsize=titlesize)  # +'\n Week '+week_str

                    if (ift > 3):
                        plt.subplots_adjust(top=0.75)
                    else:
                        plt.subplots_adjust(top=0.85)

                    # Save to file
                    f_out = os.path.join(
                        fig_dir, 'Region_' + str(cR) + '_' + metric + '_' +
                        runType + '_' + init_time_2 + '_' + cs_str + '.png')
                    f.savefig(f_out, bbox_inches='tight', dpi=200)
                    print("saved ", f_out)
                    #print("Figure took  ", (timeit.default_timer() - start_time_plot)/60, " minutes.")

                    # Mem clean up
                    p = None
                    plt.close(f)
                    da_obs_c = None

                    #diehere

            # loop over regions

        # Done with current it
        print("Took ", (timeit.default_timer() - start_time_cmod) / 60,
              " minutes.")

    # Update json file
    json_format = get_figure_init_times(fig_dir)
    json_dict = [{"date": cd, "label": cd} for cd in json_format]

    json_f = os.path.join(fig_dir, 'plotdates_current.json')
    with open(json_f, 'w') as outfile:
        json.dump(json_dict, outfile)

    # Make into Gifs
    # TODO: make parallel, add &
#    for cit in json_format:
#        subprocess.call(str("/home/disk/sipn/nicway/python/ESIO/scripts/makeGif.sh " + fig_dir + " " + cit), shell=True)

    print("Finished plotting panArctic Maps.")
Example #6
0
                        c_vmax = 1

                elif cvar=='hi':
                    if metric=='mean':
                        cmap_c = matplotlib.colors.ListedColormap(sns.color_palette("Reds_r", 10))
                        cmap_c.set_bad(color = 'lightgrey')
                        c_label = 'Sea Ice Thickness (m)'
                        c_vmin = 0
                        c_vmax = None
                else:
                    raise ValueError("cvar not found.") 


                MME_list = []            
                # New Plot
                (f, axes) = ice_plot.multi_polar_axis(ncols=Nc, nrows=Nr, Nplots=Nmod)
                
                # Plot Obs (if available)
                ax_num = 0
                if ((it + ft) in ds_81.time.values):
                    if metric=='mean':
                        da_obs_c = ds_81.sic.sel(time=(it + ft))
                    elif metric=='SIP':
                        da_obs_c = (ds_81.sic.sel(time=(it + ft)) >=0.15).astype('int').where(ds_81.sic.sel(time=(it + ft)).notnull())
                    elif metric=='anomaly':
                        da_obs_VT = ds_81.sic.sel(time=(it + ft))
                        da_obs_mean = mean_1980_2010_sic.isel(time=cdoy)
                        da_obs_c = da_obs_VT - da_obs_mean
                    else:
                        raise ValueError('Not implemented')
                    da_obs_c.plot.pcolormesh(ax=axes[ax_num], x='lon', y='lat', 
Example #7
0
def Update_PanArctic_Maps():
    # Plotting Info
    runType = 'forecast'
    variables = ['sic']
    metrics_all = {'sic': ['anomaly', 'mean', 'SIP'], 'hi': ['mean']}
    #metrics_all = {'sic':['SIP']}
    updateAll = False
    # Some models are terrible/have serious issues, so don't include in MME
    MME_NO = ['hcmr']

    # Define Init Periods here, spaced by 7 days (aprox a week)
    # Now
    cd = datetime.datetime.now()
    cd = datetime.datetime(cd.year, cd.month, cd.day)  # Set hour min sec to 0.
    # Hardcoded start date (makes incremental weeks always the same)
    start_t = datetime.datetime(1950, 1, 1)  # datetime.datetime(1950, 1, 1)
    # Params for this plot
    Ndays = 7  # time period to aggregate maps to (default is 7)
    Npers = 29  # number of periods to plot (from current date) (default is 14)
    init_slice = np.arange(
        start_t, cd, datetime.timedelta(days=Ndays)).astype('datetime64[ns]')
    init_slice = init_slice[
        -Npers:]  # Select only the last Npers of periods (weeks) since current date

    # Forecast times to plot
    weeks = pd.to_timedelta(np.arange(0, 5, 1), unit='W')
    months = pd.to_timedelta(np.arange(2, 12, 1), unit='M')
    years = pd.to_timedelta(np.arange(1, 2), unit='Y') - np.timedelta64(
        1, 'D')  # need 364 not 365
    slices = weeks.union(months).union(years).round('1d')
    da_slices = xr.DataArray(slices, dims=('fore_time'))
    da_slices.fore_time.values.astype('timedelta64[D]')
    print(da_slices)

    # Help conversion between "week/month" period used for figure naming and the actual forecast time delta value
    int_2_days_dict = dict(zip(np.arange(0, da_slices.size), da_slices.values))
    days_2_int_dict = {v: k for k, v in int_2_days_dict.items()}

    #############################################################
    # Load in Data
    #############################################################

    E = ed.EsioData.load()
    mod_dir = E.model_dir

    # Get median ice edge by DOY
    median_ice_fill = xr.open_mfdataset(
        os.path.join(E.obs_dir, 'NSIDC_0051', 'agg_nc', 'ice_edge.nc')).sic
    # Get mean sic by DOY
    mean_1980_2010_sic = xr.open_dataset(
        os.path.join(E.obs_dir, 'NSIDC_0051', 'agg_nc',
                     'mean_1980_2010_sic.nc')).sic
    # Get average sip by DOY
    mean_1980_2010_SIP = xr.open_dataset(
        os.path.join(E.obs_dir, 'NSIDC_0051', 'agg_nc',
                     'hist_SIP_1980_2010.nc')).sip

    # Climatology model
    cmod = 'climatology'
    all_files = os.path.join(mod_dir, cmod, runType, 'sipn_nc',
                             str(cd.year) + '*.nc')
    files = glob.glob(all_files)

    obs_clim_model = xr.open_mfdataset(sorted(files),
                                       chunks={
                                           'time': 30,
                                           'x': 304,
                                           'y': 448
                                       },
                                       concat_dim='time',
                                       autoclose=True,
                                       parallel=True)

    obs_clim_model = obs_clim_model['sic']

    # Get recent observations
    ds_81 = xr.open_mfdataset(E.obs['NSIDC_0081']['sipn_nc'] + '_yearly/*.nc',
                              concat_dim='time',
                              autoclose=True,
                              parallel=True)  #,

    # Define models to plot
    models_2_plot = list(E.model.keys())
    models_2_plot = [
        x for x in models_2_plot
        if x not in ['piomas', 'MME', 'MME_NEW', 'uclsipn']
    ]  # remove some models
    models_2_plot = [x for x in models_2_plot
                     if E.icePredicted[x]]  # Only predictive models

    # Get # of models and setup subplot dims
    Nmod = len(models_2_plot) + 4  #(+3 for obs, MME, and clim)
    Nc = int(np.floor(np.sqrt(Nmod)))
    # Max number of columns == 5 (plots get too small otherwise)
    Nc = np.min([Nc, 5])
    Nr = int(np.ceil(Nmod / Nc))
    print(Nr, Nc, Nmod)
    assert Nc * Nr >= Nmod, 'Need more subplots'

    for cvar in variables:

        # Define fig dir and make if doesn't exist
        fig_dir = os.path.join(E.fig_dir, 'model', 'all_model', cvar,
                               'maps_weekly')
        if not os.path.exists(fig_dir):
            os.makedirs(fig_dir)

        # Make requested dataArray as specified above
        ds_status = xr.DataArray(np.ones(
            (init_slice.size, da_slices.size)) * np.NaN,
                                 dims=('init_time', 'fore_time'),
                                 coords={
                                     'init_time': init_slice,
                                     'fore_time': da_slices
                                 })
        ds_status.name = 'status'
        ds_status = ds_status.to_dataset()

        # Check what plots we already have
        if not updateAll:
            print("Removing figures we have already made")
            ds_status = update_status(ds_status=ds_status,
                                      fig_dir=fig_dir,
                                      int_2_days_dict=int_2_days_dict)

        print(ds_status.status.values)
        # Drop IC/FT we have already plotted (orthoginal only)
        ds_status = ds_status.where(
            ds_status.status.sum(dim='fore_time') < ds_status.fore_time.size,
            drop=True)

        print("Starting plots...")
        # For each init_time we haven't plotted yet
        start_time_cmod = timeit.default_timer()
        for it in ds_status.init_time.values:
            print(it)
            it_start = it - np.timedelta64(Ndays, 'D') + np.timedelta64(
                1, 'D'
            )  # Start period for init period (it is end of period). Add 1 day because when
            # we select using slice(start,stop) it is inclusive of end points. So here we are defining the start of the init AND the start of the valid time.
            # So we need to add one day, so we don't double count.

            # For each forecast time we haven't plotted yet
            ft_to_plot = ds_status.sel(init_time=it)
            ft_to_plot = ft_to_plot.where(ft_to_plot.isnull(),
                                          drop=True).fore_time

            for ft in ft_to_plot.values:

                print(ft.astype('timedelta64[D]'))
                cs_str = format(days_2_int_dict[ft],
                                '02')  # Get index of current forcast week
                week_str = format(
                    int(ft.astype('timedelta64[D]').astype('int') / Ndays),
                    '02')  # Get string of current week
                cdoy_end = pd.to_datetime(it + ft).timetuple(
                ).tm_yday  # Get current day of year end for valid time
                cdoy_start = pd.to_datetime(it_start + ft).timetuple(
                ).tm_yday  # Get current day of year end for valid time
                it_yr = str(pd.to_datetime(it).year)
                it_m = str(pd.to_datetime(it).month)

                # Get datetime64 of valid time start and end
                valid_start = it_start + ft
                valid_end = it + ft

                # Loop through variable of interest + any metrics (i.e. SIP) based on that
                for metric in metrics_all[cvar]:

                    # Set up plotting info
                    if cvar == 'sic':
                        if metric == 'mean':
                            cmap_c = matplotlib.colors.ListedColormap(
                                sns.color_palette("Blues_r", 10))
                            cmap_c.set_bad(color='lightgrey')
                            c_label = 'Sea Ice Concentration (-)'
                            c_vmin = 0
                            c_vmax = 1
                        elif metric == 'SIP':
                            cmap_c = matplotlib.colors.LinearSegmentedColormap.from_list(
                                "", ["white", "orange", "red", "#990000"])
                            cmap_c.set_bad(color='lightgrey')
                            c_label = 'Sea Ice Probability (-)'
                            c_vmin = 0
                            c_vmax = 1
                        elif metric == 'anomaly':
                            #                         cmap_c = matplotlib.colors.ListedColormap(sns.color_palette("coolwarm", 9))
                            cmap_c = matplotlib.colors.LinearSegmentedColormap.from_list(
                                "", ["red", "white", "blue"])
                            cmap_c.set_bad(color='lightgrey')
                            c_label = 'SIC Anomaly to 1980-2010 Mean'
                            c_vmin = -1
                            c_vmax = 1

                    elif cvar == 'hi':
                        if metric == 'mean':
                            cmap_c = matplotlib.colors.ListedColormap(
                                sns.color_palette("Reds_r", 10))
                            cmap_c.set_bad(color='lightgrey')
                            c_label = 'Sea Ice Thickness (m)'
                            c_vmin = 0
                            c_vmax = None
                    else:
                        raise ValueError("cvar not found.")

                    MME_list = []
                    # New Plot
                    start_time_plot = timeit.default_timer()
                    (f, axes) = ice_plot.multi_polar_axis(ncols=Nc,
                                                          nrows=Nr,
                                                          Nplots=Nmod)

                    ############################################################################
                    #                               OBSERVATIONS                               #
                    ############################################################################

                    # Plot Obs (if available)
                    ax_num = 0
                    axes[ax_num].set_title('Observed')
                    #ds_model = ds_model.sel(init_time=slice(it_start, it))
                    da_obs_c = ds_81.sic.sel(
                        time=slice(valid_start, valid_end))
                    # Check we found any times in target valid time range
                    if da_obs_c.time.size > 0:
                        #if ((it + ft) in ds_81.time.values):

                        if metric == 'mean':
                            da_obs_c = da_obs_c.mean(
                                dim='time')  #ds_81.sic.sel(time=(it + ft))
                        elif metric == 'SIP':
                            da_obs_c = (da_obs_c >= 0.15).mean(
                                dim='time').astype('int').where(
                                    da_obs_c.isel(time=0).notnull())
                        elif metric == 'anomaly':
                            da_obs_VT = da_obs_c.mean(dim='time')
                            da_obs_mean = mean_1980_2010_sic.isel(
                                time=slice(cdoy_start, cdoy_end)).mean(
                                    dim='time')
                            da_obs_c = da_obs_VT - da_obs_mean
                        else:
                            raise ValueError('Not implemented')
                        da_obs_c.plot.pcolormesh(ax=axes[ax_num],
                                                 x='lon',
                                                 y='lat',
                                                 transform=ccrs.PlateCarree(),
                                                 add_colorbar=False,
                                                 cmap=cmap_c,
                                                 vmin=c_vmin,
                                                 vmax=c_vmax)
                        axes[ax_num].set_title('Observed')
                        # Overlay median ice edge
                        #if metric=='mean':
                        #po = median_ice_fill.isel(time=cdoy).plot.contour(ax=axes[ax_num], x='xm', y='ym',
                        #=('#bc0f60'),
                        #linewidths=[0.5],
                        #levels=[0.5])
                        #remove_small_contours(po, thres=10)
                    else:  # When were in the future (or obs are missing)
                        if metric == 'anomaly':  # Still get climatological mean for model difference
                            da_obs_mean = mean_1980_2010_sic.isel(
                                time=slice(cdoy_start, cdoy_end)).mean(
                                    dim='time')
                        elif metric == 'SIP':  # Plot this historical mean SIP
                            da_obs_c = mean_1980_2010_SIP.isel(
                                time=slice(cdoy_start, cdoy_end)).mean(
                                    dim='time')
                            da_obs_c.plot.pcolormesh(
                                ax=axes[ax_num],
                                x='lon',
                                y='lat',
                                transform=ccrs.PlateCarree(),
                                add_colorbar=False,
                                cmap=cmap_c,
                                vmin=c_vmin,
                                vmax=c_vmax)
                            axes[ax_num].set_title('Hist. Obs.')

                    ############################################################################
                    #                    Plot climatology trend                                #
                    ############################################################################

                    i = 2
                    cmod = 'climatology'
                    axes[i].set_title('clim. trend')

                    # Check if we have any valid times in range of target dates
                    ds_model = obs_clim_model.where(
                        (obs_clim_model.time >= valid_start) &
                        (obs_clim_model.time <= valid_end),
                        drop=True)
                    if 'time' in ds_model.lat.dims:
                        ds_model.coords['lat'] = ds_model.lat.isel(
                            time=0).drop(
                                'time'
                            )  # Drop time from lat/lon dims (not sure why?)

                    # If we have any time
                    if ds_model.time.size > 0:

                        # Average over time
                        ds_model = ds_model.mean(dim='time')

                        if metric == 'mean':  # Calc ensemble mean
                            ds_model = ds_model
                        elif metric == 'SIP':  # Calc probability
                            # Issue of some ensemble members having missing data
                            ocnmask = ds_model.notnull()
                            ds_model = (ds_model >= 0.15).where(ocnmask)
                        elif metric == 'anomaly':  # Calc anomaly in reference to mean observed 1980-2010
                            ds_model = ds_model - da_obs_mean
                            # Add back lat/long (get dropped because of round off differences)
                            ds_model['lat'] = da_obs_mean.lat
                            ds_model['lon'] = da_obs_mean.lon
                        else:
                            raise ValueError('metric not implemented')

                        if 'doy' in ds_model.coords:
                            ds_model = ds_model.drop(['doy'])
                        if 'xm' in ds_model.coords:
                            ds_model = ds_model.drop(['xm'])
                        if 'ym' in ds_model.coords:
                            ds_model = ds_model.drop(['ym'])

                        # Build MME
                        if cmod not in MME_NO:  # Exclude some models (bad) from MME
                            ds_model.coords['model'] = cmod
                            MME_list.append(ds_model)

                        # Plot
                        p = ds_model.plot.pcolormesh(
                            ax=axes[i],
                            x='lon',
                            y='lat',
                            transform=ccrs.PlateCarree(),
                            add_colorbar=False,
                            cmap=cmap_c,
                            vmin=c_vmin,
                            vmax=c_vmax)

                        axes[i].set_title('clim. trend')

                    # Clean up for current model
                    ds_model = None

                    ###########################################################
                    #                     Plot Models in SIPN format          #
                    ###########################################################

                    # Plot all Models
                    p = None  # initlaize to know if we found any data
                    for (i, cmod) in enumerate(models_2_plot):
                        print(cmod)
                        i = i + 3  # shift for obs, MME, and clim
                        axes[i].set_title(E.model[cmod]['model_label'])

                        # Load in Model
                        # Find only files that have current year and month in filename (speeds up loading)
                        all_files = os.path.join(
                            E.model[cmod][runType]['sipn_nc'],
                            '*' + it_yr + '*' + it_m + '*.nc')

                        # Check we have files
                        files = glob.glob(all_files)
                        if not files:
                            continue  # Skip this model

                        # Load in model
                        ds_model = xr.open_mfdataset(sorted(files),
                                                     chunks={
                                                         'fore_time': 1,
                                                         'init_time': 1,
                                                         'nj': 304,
                                                         'ni': 448
                                                     },
                                                     concat_dim='init_time',
                                                     autoclose=True,
                                                     parallel=True)
                        ds_model.rename({'nj': 'x', 'ni': 'y'}, inplace=True)

                        # Select init period and fore_time of interest
                        ds_model = ds_model.sel(init_time=slice(it_start, it))
                        # Check we found any init_times in range
                        if ds_model.init_time.size == 0:
                            print('init_time not found.')
                            continue

                        # Select var of interest (if available)
                        if cvar in ds_model.variables:
                            #                     print('found ',cvar)
                            ds_model = ds_model[cvar]
                        else:
                            print('cvar not found.')
                            continue

                        # Get Valid time
                        ds_model = import_data.get_valid_time(ds_model)

                        # Check if we have any valid times in range of target dates
                        ds_model = ds_model.where(
                            (ds_model.valid_time >= valid_start) &
                            (ds_model.valid_time <= valid_end),
                            drop=True)
                        if ds_model.fore_time.size == 0:
                            print("no fore_time found for target period.")
                            continue

                        # Average over for_time and init_times
                        ds_model = ds_model.mean(
                            dim=['fore_time', 'init_time'])

                        if metric == 'mean':  # Calc ensemble mean
                            ds_model = ds_model.mean(dim='ensemble')
                        elif metric == 'SIP':  # Calc probability
                            # Issue of some ensemble members having missing data
                            #                         ds_model = ds_model.where(ds_model>=0.15, other=0).mean(dim='ensemble')
                            ok_ens = (
                                (ds_model.notnull().sum(dim='x').sum(dim='y'))
                                > 0)  # select ensemble members with any data
                            ds_model = ((ds_model.where(ok_ens, drop=True) >=
                                         0.15)).mean(dim='ensemble').where(
                                             ds_model.isel(
                                                 ensemble=0).notnull())
                        elif metric == 'anomaly':  # Calc anomaly in reference to mean observed 1980-2010
                            ds_model = ds_model.mean(
                                dim='ensemble') - da_obs_mean
                            # Add back lat/long (get dropped because of round off differences)
                            ds_model['lat'] = da_obs_mean.lat
                            ds_model['lon'] = da_obs_mean.lon
                        else:
                            raise ValueError('metric not implemented')
                        #print("Calc metric took  ", (timeit.default_timer() - start_time), " seconds.")

                        # drop ensemble if still present
                        if 'ensemble' in ds_model:
                            ds_model = ds_model.drop('ensemble')

                        # Build MME
                        if cmod not in MME_NO:  # Exclude some models (bad) from MME
                            ds_model.coords['model'] = cmod
                            if 'xm' in ds_model:
                                ds_model = ds_model.drop(
                                    ['xm', 'ym'])  #Dump coords we don't use
                            MME_list.append(ds_model)

                        # Plot
                        #start_time = timeit.default_timer()
                        p = ds_model.plot.pcolormesh(
                            ax=axes[i],
                            x='lon',
                            y='lat',
                            transform=ccrs.PlateCarree(),
                            add_colorbar=False,
                            cmap=cmap_c,
                            vmin=c_vmin,
                            vmax=c_vmax)
                        #print("Plotting took  ", (timeit.default_timer() - start_time), " seconds.")

                        # Overlay median ice edge
                        #if metric=='mean':
                        #po = median_ice_fill.isel(time=cdoy).plot.contour(ax=axes[i], x='xm', y='ym',
                        #colors=('#bc0f60'),
                        #linewidths=[0.5],
                        #levels=[0.5]) #, label='Median ice edge 1981-2010')
                        #remove_small_contours(po, thres=10)

                        axes[i].set_title(E.model[cmod]['model_label'])

                        # Clean up for current model
                        ds_model = None

                    # MME
                    ax_num = 1

                    if MME_list:  # If we had any models for this time
                        # Concat over all models
                        ds_MME = xr.concat(MME_list, dim='model')
                        # Set lat/lon to "model" lat/lon (round off differences)
                        if 'model' in ds_MME.lat.dims:
                            ds_MME.coords['lat'] = ds_MME.lat.isel(
                                model=0).drop('model')

                        # Plot average
                        # Don't include some models (i.e. climatology and dampedAnomaly)
                        mod_to_avg = [
                            m for m in ds_MME.model.values
                            if m not in ['climatology', 'dampedAnomaly']
                        ]
                        pmme = ds_MME.sel(model=mod_to_avg).mean(
                            dim='model').plot.pcolormesh(
                                ax=axes[ax_num],
                                x='lon',
                                y='lat',
                                transform=ccrs.PlateCarree(),
                                add_colorbar=False,
                                cmap=cmap_c,
                                vmin=c_vmin,
                                vmax=c_vmax)
                        # Overlay median ice edge
                        #if metric=='mean':
                        #po = median_ice_fill.isel(time=cdoy).plot.contour(ax=axes[ax_num], x='xm', y='ym',
                        #                                                                           colors=('#bc0f60'),
                        #                                                                           linewidths=[0.5],
                        #                                                                           levels=[0.5])
                        #remove_small_contours(po, thres=10)

                        # Save all models for given target valid_time
                        out_metric_dir = os.path.join(
                            E.model['MME'][runType]['sipn_nc'], metric)
                        if not os.path.exists(out_metric_dir):
                            os.makedirs(out_metric_dir)

                        out_init_dir = os.path.join(
                            out_metric_dir,
                            pd.to_datetime(it).strftime('%Y-%m-%d'))
                        if not os.path.exists(out_init_dir):
                            os.makedirs(out_init_dir)

                        out_nc_file = os.path.join(
                            out_init_dir,
                            pd.to_datetime(it + ft).strftime('%Y-%m-%d') +
                            '.nc')

                        # Add observations
                        # Check if exits (if we have any observations for this valid period)
                        # TODO: require ALL observations to be present, otherwise we could only get one day != week avg
                        if ds_81.sic.sel(time=slice(valid_start,
                                                    valid_end)).time.size > 0:
                            da_obs_c.coords['model'] = 'Observed'

                            # Drop coords we don't need
                            da_obs_c = da_obs_c.drop(['hole_mask', 'xm', 'ym'])
                            if 'time' in da_obs_c:
                                da_obs_c = da_obs_c.drop('time')
                            if 'xm' in ds_MME:
                                ds_MME = ds_MME.drop(['xm', 'ym'])

                            # Add obs
                            ds_MME_out = xr.concat([ds_MME, da_obs_c],
                                                   dim='model')
                        else:
                            ds_MME_out = ds_MME

                        # Add init and valid times
                        ds_MME_out.coords['init_start'] = it_start
                        ds_MME_out.coords['init_end'] = it
                        ds_MME_out.coords['valid_start'] = it_start + ft
                        ds_MME_out.coords['valid_end'] = it + ft
                        ds_MME_out.coords['fore_time'] = ft
                        ds_MME_out.name = metric

                        # Save
                        ds_MME_out.to_netcdf(out_nc_file)

                    axes[ax_num].set_title('MME')

                    # Make pretty
                    f.subplots_adjust(right=0.8)
                    cbar_ax = f.add_axes([0.85, 0.15, 0.05, 0.7])
                    if p:
                        cbar = f.colorbar(p, cax=cbar_ax, label=c_label)
                        if metric == 'anomaly':
                            cbar.set_ticks(np.arange(-1, 1.1, 0.2))
                        else:
                            cbar.set_ticks(np.arange(0, 1.1, 0.1))
                        #cbar.set_ticklabels(np.arange(0,1,0.05))

                    # Set title of all plots
                    init_time_2 = pd.to_datetime(it).strftime('%Y-%m-%d')
                    init_time_1 = pd.to_datetime(it_start).strftime('%Y-%m-%d')
                    valid_time_2 = pd.to_datetime(it + ft).strftime('%Y-%m-%d')
                    valid_time_1 = pd.to_datetime(it_start +
                                                  ft).strftime('%Y-%m-%d')
                    plt.suptitle('Initialization Time: ' + init_time_1 +
                                 ' to ' + init_time_2 + '\n Valid Time: ' +
                                 valid_time_1 + ' to ' + valid_time_2,
                                 fontsize=15)  # +'\n Week '+week_str
                    plt.subplots_adjust(top=0.85)

                    # Save to file
                    f_out = os.path.join(
                        fig_dir, 'panArctic_' + metric + '_' + runType + '_' +
                        init_time_2 + '_' + cs_str + '.png')
                    f.savefig(f_out, bbox_inches='tight', dpi=200)
                    print("saved ", f_out)
                    print("Figure took  ",
                          (timeit.default_timer() - start_time_plot) / 60,
                          " minutes.")

                    # Mem clean up
                    plt.close(f)
                    p = None
                    ds_MME = None
                    da_obs_c = None
                    da_obs_mean = None

            # Done with current it
            print("Took ", (timeit.default_timer() - start_time_cmod) / 60,
                  " minutes.")

    # Update json file
    json_format = get_figure_init_times(fig_dir)
    json_dict = [{"date": cd, "label": cd} for cd in json_format]

    json_f = os.path.join(fig_dir, 'plotdates_current.json')
    with open(json_f, 'w') as outfile:
        json.dump(json_dict, outfile)

    # Make into Gifs
    for cit in json_format:
        subprocess.call(
            str("/home/disk/sipn/nicway/python/ESIO/scripts/makeGif.sh " +
                fig_dir + " " + cit),
            shell=True)

    print("Finished plotting panArctic Maps.")