def main(): #=============================================================================== global wofostdir, sibcasadir, obsdir #------------------------------------------------------------------------------- # ================================= USER INPUT ================================= # read the settings from the rc file (mostly directory paths) rcdict = rc.read('settings.rc') sites = [s.strip(' ') for s in rcdict['sites'].split(',')] years = [int(s.strip(' ')) for s in rcdict['years'].split(',')] TER_method = rcdict['TER_method'] R10 = rcdict['R10'] # specific plotting options: #TER_method = 'grow-only' # this is to select the corresponding WOFOST output file #R10 = '0.08' # this is to select the corresponding WOFOST output file #=============================================================================== #------------------------------------------------------------------------------- # extract the needed information # input data directory paths rootdir = rcdict['rootdir'] sibcasadir = os.path.join(rootdir, 'intercomparison_study/SiBCASA_runs') wofostdir = rcdict['outputdir'] obsdir = rcdict['obsdir'] figdir = os.path.join(rootdir, 'intercomparison_study/figures') #------------------------------------------------------------------------------- # Start a directory to store OBS, SIMW (wofost), SIMB (SiBCASA) # recover the FluxNet observed data from pickle files res_timeseries = dict() res_timeseries['OBS'] = dict() res_timeseries['SIMB'] = dict() res_timeseries['SIMW'] = dict() filename = os.path.join(obsdir, 'timeseries_OBS.pickle') try: res_timeseries['OBS'] = pickle_load(open(filename, 'rb')) except IOError: print 'could not find the observations output file %s' % filename res_timeseries['OBS'] = None # recover the SiBCASA runs filename = os.path.join(sibcasadir, 'timeseries_SiBCASA.pickle') try: res_timeseries['SIMB'] = pickle_load(open(filename, 'rb')) except IOError: print 'could not find the SiBCASA output file %s' % filename res_timeseries['SIMB'] = None # recover the WOFOST runs filename = os.path.join(wofostdir, 'timeseries_%s_R10=%s_'%(TER_method,R10) +\ 'WOFOST_crop_rotation.pickle') try: print 'opening the WOFOST output file %s' % filename res_timeseries['SIMC'] = pickle_load(open(filename, 'rb')) except IOError: print 'could not find the WOFOST output file %s' % filename res_timeseries['SIMC'] = None #------------------------------------------------------------------------------- # plot the observed and simulated timeseries with pandas library # with pandas we plot all years one after another, and can zoom in on one # particular year plt.close('all') # create figure sub-folder if it doesn't already exists figsubdir = os.path.join(figdir, 'R10=%s/TER_%s/' % (R10, TER_method)) if not os.path.exists(figsubdir): print 'creating new directory %s' % figsubdir os.makedirs(figsubdir) #------------------------------------------------------------------------------- years = np.arange(2004, 2015, 1) for site in sites: for year in years: timeframe = [year, year] print site figs, axes = plt.subplots(nrows=4, ncols=1, figsize=(8, 10)) figs.subplots_adjust(0.1, 0.07, 0.98, 0.95, 0., 0.) variables = ['crop_no', 'GPP', 'TER', 'NEE'] axlabels = [ 'crop ID', r'GPP (g m$^{-2}$ d$^{-1}$)', r'TER (g m$^{-2}$ d$^{-1}$)', r'NEE (g m$^{-2}$ d$^{-1}$)' ] ylims = [(0., 14.), (-18., 2.), (-1., 12.), (-10., 10.)] start = str(int(timeframe[0])) end = str(int(timeframe[1])) print '[%s:%s]' % (start, end) fsz = 14 # fonsize of x and y axis ticks for ax, var, axlabel, ylim in zip(axes, variables, axlabels, ylims): if (var == 'crop_no'): try: OBS = res_timeseries['OBS'][site][var][ start:end].dropna() OBS[~(OBS == -9999.)].plot( ax=ax, lw=2, #OBS.plot(ax=ax, lw=2, style='-', label='obs', fontsize=fsz) crop_no = OBS[0] minobs = OBS[~(OBS == -9999.)].min() maxobs = OBS[~(OBS == -9999.)].max() except TypeError: minobs = 0. maxobs = 0. minwof = 1. maxwof = 1. elif (var == 'TER'): # observations try: OBS = res_timeseries['OBS'][site][var][ start:end].dropna() OBS[~(OBS == -9999.)].plot( ax=ax, lw=2, #OBS.plot(ax=ax, lw=2, c='b', style='+', label='obs', fontsize=fsz) minobs = OBS[~(OBS == -9999.)].min() maxobs = OBS[~(OBS == -9999.)].max() except TypeError: minobs = 0. maxobs = 0. # SIBCASA sims try: #res_timeseries['SIMB'][site]['Raut'][start:end].plot(ax=ax, #lw=2, c='g', style=':', label='SiBCASA Raut', fontsize=fsz) res_timeseries['SIMB'][site][var][start:end].plot( ax=ax, lw=2, c='g', style='--', label='SiBCASA TER', fontsize=fsz) except TypeError: pass # WOFOST sims try: #WOF = res_timeseries['SIMC'][site]['Raut'][start:end].dropna() #WOF.plot(ax=ax, lw=2, c='r', #style='_', label='WOFOST Raut', fontsize=fsz) WOF = res_timeseries['SIMC'][site][var][ start:end].dropna() WOF.plot(ax=ax, lw=2, c='r', style='x', label='WOFOST TER', fontsize=fsz) minwof = WOF.min() maxwof = WOF.max() except TypeError: minwof = 0. maxwof = 0. WOF = 0. else: # observations try: OBS = res_timeseries['OBS'][site][var][ start:end].dropna() OBS[~(OBS == -9999.)].plot( ax=ax, lw=2, #OBS.plot(ax=ax, lw=2, c='b', style='+', label='obs', fontsize=fsz) minobs = OBS[~(OBS == -9999.)].min() maxobs = OBS[~(OBS == -9999.)].max() except TypeError: minobs = 0. maxobs = 0. # SIBCASA sims try: res_timeseries['SIMB'][site][var][start:end].plot( ax=ax, lw=2, c='g', style='--', label='SiBCASA', fontsize=fsz) except TypeError: pass # WOFOST simsA try: WOF = res_timeseries['SIMC'][site][var][ start:end].dropna() #WOF[~(WOF==-9999.)].plot(ax=ax, lw=2, WOF.plot(ax=ax, lw=2, c='r', style='x', label='WOFOST', fontsize=fsz) minwof = WOF.min() maxwof = WOF.max() except TypeError: minwof = 0. maxwof = 0. WOF = 0. ax.axhline(y=0., c='k') minvar = math.floor(min(minobs, minwof)) - 1. maxvar = math.ceil(max(maxobs, maxwof)) + 1. ax.set_ylim(minvar, maxvar) #ax.set_ylim(ylim) if (var == 'GPP'): ax.legend(loc='lower left', prop={'size': 12}) #if (var=='TER'): ax.legend(loc='upper left',prop={'size':10}) ax.set_ylabel(axlabel) if var != 'NEE': ax.get_xaxis().set_visible(False) figs.suptitle(site, fontsize=14) figs.savefig( os.path.join( figsubdir, 'crop%i_%s_%i.png' % (crop_no, site, timeframe[0]))) plt.close('all') #plt.show() #------------------------------------------------------------------------------- timeframe = [2004, 2014] for site in sites: print site figs, axes = plt.subplots(nrows=4, ncols=1, figsize=(15, 10)) figs.subplots_adjust(0.1, 0.07, 0.98, 0.95, 0., 0.) variables = ['crop_no', 'GPP', 'TER', 'NEE'] axlabels = [ 'crop ID', r'GPP (g m$^{-2}$ d$^{-1}$)', r'TER (g m$^{-2}$ d$^{-1}$)', r'NEE (g m$^{-2}$ d$^{-1}$)' ] ylims = [(0., 14.), (-30., 2.), (-2., 20.), (-20., 10.)] start = str(int(timeframe[0])) end = str(int(timeframe[1])) print '[%s:%s]' % (start, end) fsz = 14 # fonsize of x and y axis ticks for ax, var, axlabel, ylim in zip(axes, variables, axlabels, ylims): if (var == 'crop_no'): try: OBS = res_timeseries['OBS'][site][var][start:end].dropna() OBS[~(OBS == -9999.)].plot( ax=ax, lw=2, #OBS.plot(ax=ax, lw=2, style='-', label='obs', fontsize=fsz) crop_no = OBS[0] minobs = OBS[~(OBS == -9999.)].min() maxobs = OBS[~(OBS == -9999.)].max() except TypeError: minobs = 0. maxobs = 0. minwof = 1. maxwof = 1. else: # observations try: OBS = res_timeseries['OBS'][site][var][start:end].dropna() OBS[~(OBS == -9999.)].plot( ax=ax, lw=2, #OBS.plot(ax=ax, lw=2, c='b', style='+', label='obs', fontsize=fsz) minobs = OBS[~(OBS == -9999.)].min() maxobs = OBS[~(OBS == -9999.)].max() except TypeError: minobs = 0. maxobs = 0. # SIBCASA sims try: res_timeseries['SIMB'][site][var][start:end].plot( ax=ax, lw=2, c='g', style='--', label='SiBCASA', fontsize=fsz) except TypeError: pass # WOFOST simsA try: WOF = res_timeseries['SIMC'][site][var][start:end].dropna() #WOF[~(WOF==-9999.)].plot(ax=ax, lw=2, WOF.plot(ax=ax, lw=2, c='r', style='x', label='WOFOST', fontsize=fsz) minwof = WOF.min() maxwof = WOF.max() except TypeError: minwof = 0. maxwof = 0. WOF = 0. ax.axhline(y=0., c='k') minvar = math.floor(min(minobs, minwof)) - 1. maxvar = math.ceil(max(maxobs, maxwof)) + 1. #ax.set_ylim(minvar,maxvar) ax.set_ylim(ylim) if (var == 'GPP'): ax.legend(loc='lower left', prop={'size': 12}) ax.set_ylabel(axlabel) if var != 'NEE': ax.get_xaxis().set_visible(False) figs.suptitle(site, fontsize=14) figs.savefig( os.path.join( figsubdir, 'timeseries_crop%i_%s_%i-%i.png' % (crop_no, site, timeframe[0], timeframe[1]))) plt.close('all')
def main(): #=============================================================================== global inputdir, codedir, outputdir, CGMSdir, ECMWFdir, optimidir,\ EUROSTATdir, custom_yns #------------------------------------------------------------------------------- # ================================= USER INPUT ================================= # read the settings from the rc file rcdict = rc.read('settings.rc') #=============================================================================== #------------------------------------------------------------------------------- # extract the needed information from the rc file sites = [s.strip(' ') for s in rcdict['sites'].split(',')] crops = [s.strip(' ') for s in rcdict['crops'].split(',')] crop_nos = [int(s.strip(' ')) for s in rcdict['crop_nos'].split(',')] years = [int(s.strip(' ')) for s in rcdict['years'].split(',')] # optimization settings force_optimization = str_to_bool(rcdict['force_optimization']) selec_method = rcdict['selec_method'] ncells = int(rcdict['ncells']) nsoils = int(rcdict['nsoils']) weather = rcdict['weather'] # directory paths outputdir = rcdict['outputdir'] inputdir = rcdict['inputdir'] codedir = rcdict['codedir'] CGMSdir = os.path.join(inputdir, 'CGMS') ECMWFdir = os.path.join(inputdir, 'ECMWF') EUROSTATdir = os.path.join(inputdir, 'EUROSTATobs') #------------------------------------------------------------------------------- # get the list of NUTS 2 region names associated to the list of FluxNet sites from WOF_00_retrieve_input_data import open_csv sitdict = open_csv(inputdir, 'sites_info2.csv', convert_to_float=False) NUTS_reg = sitdict['NUTS_reg'] #custom_yieldnsow = #------------------------------------------------------------------------------- # get local yield and sowing date information import xlrd from xlrd.xldate import xldate_as_datetime xl_workbook=xlrd.open_workbook(os.path.join(inputdir,'site_yields.xlsx')) sheet_names = xl_workbook.sheet_names() xl_sheet = xl_workbook.sheet_by_name(sheet_names[0]) xl_sites = xl_sheet.col(0) xl_years = xl_sheet.col(1) xl_crops = xl_sheet.col(2) xl_yield = xl_sheet.col(3) xl_sowda = xl_sheet.col(9) datemode = xl_workbook.datemode custom_yns = [] for si,ye,cr,so,yi in zip(xl_sites[1:38], xl_years[1:38], xl_crops[1:38], xl_sowda[1:38], xl_yield[1:38]): sit = str(si.value) yea = int(ye.value) cro = int(cr.value) if int(so.value) != -9999: sow = xldate_as_datetime(so.value, datemode) else: sow = np.nan if int(yi.value) != -9999.: yie = yi.value else: yie = np.nan custom_yns += [(sit, yea, cro, sow, yie)] for row in custom_yns: print row #------------------------------------------------------------------------------- # optimize fgap at the location / year / crops specified by user for s,site in enumerate(sites): for c,crop_name in enumerate(crops): crop_no = crop_nos[c] for year in years: # create output folder if it doesn't already exists optimidir = os.path.join(outputdir,'fgap/%i/c%i/'%(year,crop_no)) if not os.path.exists(optimidir): print 'creating new directory %s'%optimidir os.makedirs(optimidir) # we try to optimize fgap for the NUTS 2, 1, 0 regions for NUTS_level in range(3): NUTS_no = NUTS_reg[s][0:4-NUTS_level] print '\n', site, NUTS_no, year, crop_name # OPTIMIZATION OF FGAP: yldgapf = optimize_fgap(site, crop_no, crop_name, year, NUTS_no, selec_method, ncells, nsoils, weather, force_optimization)
def main(): #=============================================================================== global wofostdir, sibcasadir, obsdir #------------------------------------------------------------------------------- # ================================= USER INPUT ================================= # read the settings from the rc file (mostly directory paths) rcdict = rc.read('settings.rc') sites = [s.strip(' ') for s in rcdict['sites'].split(',')] years = [int(s.strip(' ')) for s in rcdict['years'].split(',')] TER_method = rcdict['TER_method'] R10 = rcdict['R10'] resolution = rcdict['resolution'] # specific plotting options: #TER_method = 'grow-only' # this is to select the corresponding WOFOST output file #R10 = '0.08' # this is to select the corresponding WOFOST output file #=============================================================================== #------------------------------------------------------------------------------- # extract the needed information # input data directory paths rootdir = rcdict['rootdir'] sibcasadir = os.path.join(rootdir, 'intercomparison_study/SiBCASA_runs') wofostdir = rcdict['outputdir'] obsdir = rcdict['obsdir'] figdir = os.path.join(rootdir, 'intercomparison_study/figures') #------------------------------------------------------------------------------- # Start a directory to store OBS, SIMW (wofost), SIMB (SiBCASA) # recover the FluxNet observed data from pickle files res_timeseries = dict() res_timeseries['OBS'] = dict() res_timeseries['SIMB'] = dict() res_timeseries['SIMW'] = dict() filename = os.path.join(obsdir, '%s_timeseries_OBS.pickle' % resolution) try: res_timeseries['OBS'] = pickle_load(open(filename, 'rb')) except IOError: print 'could not find the observations output file %s' % filename res_timeseries['OBS'] = None # recover the SiBCASA runs filename = os.path.join(sibcasadir, '%s_timeseries_SiBCASA.pickle' % resolution) try: res_timeseries['SIMB'] = pickle_load(open(filename, 'rb')) except IOError: print 'could not find the SiBCASA output file %s' % filename res_timeseries['SIMB'] = None # recover the WOFOST runs filename = os.path.join(wofostdir, '%s_timeseries_'%resolution +\ '%s_R10=%s_WOFOST_crop_rotation.pickle'%(TER_method,R10)) try: res_timeseries['SIMC'] = pickle_load(open(filename, 'rb')) except IOError: print 'could not find the WOFOST output file %s' % filename res_timeseries['SIMC'] = None #------------------------------------------------------------------------------- # plot the observed and simulated timeseries with pandas library # with pandas we plot all years one after another, and can zoom in on one # particular year plt.close('all') # create figure sub-folder if it doesn't already exists figsubdir = os.path.join(figdir,'R10=%s/TER_%s/'%(R10,TER_method)+\ '3-hourly_fluxes_perf') if not os.path.exists(figsubdir): print 'creating new directory %s' % figsubdir os.makedirs(figsubdir) #------------------------------------------------------------------------------- # we plot the 3-hourly fluxes of simulations versus observations for the years # and sites that perform well on the daily scale # we plot years 2005, 2009, 2013 for site BE-Lon, which showed an extremely # good result on the SIM vs OBS daily fluxes comparison years = [2005, 2009, 2013] variables = ['GPP', 'TER', 'NEE'] axlabels = [ r'GPP (g m$^{-2}$ d$^{-1}$)', r'TER (g m$^{-2}$ d$^{-1}$)', r'NEE (g m$^{-2}$ d$^{-1}$)' ] ylims = [(-60., 5.), (0., 20.), (-50., 15.)] one_to_one = np.arange(-100, 100, 10) for site in ['BE-Lon']: if site != 'IT-BCi': for year in years: timeframe = [year, year] start = str(int(timeframe[0])) + '-05-01' end = str(int(timeframe[1])) + '-07-01' print site for var, axlabel, lim in zip(variables, axlabels, ylims): fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5, 5)) fig.subplots_adjust(0.15, 0.15, 0.85, 0.85, 0., 0.) # select every 6 half-hourly flux in the observations, # to get to 3-hourly frequency OBS = res_timeseries['OBS'][site][var][::6][ start:end].dropna() # convert the 3-hourly simulated fluxes from gC.m-2.s-1 # to micromol CO2 .m-2.s-1 SIM = res_timeseries['SIMC'][site][var][start:end].dropna() SIM = SIM * 1000000. / 12.01 #micro mol CO2 per m2 per sec # the observed GPP needs a minus sign for convention if var == 'GPP': OBS = -OBS # use the min and max to frame the figure print var, min(min(OBS), min(SIM)), max(max(OBS), max(SIM)) varmin = math.floor(min(min(OBS), min(SIM))) varmax = math.ceil(max(max(OBS), max(SIM))) ax.scatter(OBS, SIM, marker='o') # fit a linear regression line in OBS/SIM scatter plot # and plot line and R2 mask = ~np.isnan(SIM) z = np.polyfit(OBS[mask], SIM[mask], 1) p = np.poly1d(z) ax.plot(one_to_one, p(one_to_one), 'r-') slope, intercept, r_value, p_value, std_err = \ linreg(OBS[mask], SIM[mask]) ax.annotate(r'r$^2$ = %.2f' % r_value**2, xy=(0.95, 0.15), xytext=(0.15, 0.9), xycoords='axes fraction', ha='center', va='center', color='r') ax.plot(one_to_one, one_to_one, c='k', lw=1) ax.set_xlabel('obs') ax.set_ylabel('sim') ax.set_xlim(varmin, varmax) ax.set_ylim(varmin, varmax) fig.suptitle(r'%s 3-hourly %s fluxes ($\mu$' % (site, var) + r'mol m$^{-2}$ s$^{-1}$)' + '\nfrom %s to %s\n' % (start, end)) fig.savefig(os.path.join( figsubdir, '%s_%s_%s.png' % (site, year, var)), dpi=300)
def main(): #=============================================================================== global wofostdir, sibcasadir, obsdir #------------------------------------------------------------------------------- # ================================= USER INPUT ================================= # read the settings from the rc file (mostly directory paths) rcdict = rc.read('settings.rc') sites = [s.strip(' ') for s in rcdict['sites'].split(',')] years = [int(s.strip(' ')) for s in rcdict['years'].split(',')] TER_method = rcdict['TER_method'] R10 = rcdict['R10'] resolution = rcdict['resolution'] # specific plotting options: #TER_method = 'grow-only' # this is to select the corresponding WOFOST output file #R10 = '0.08' # this is to select the corresponding WOFOST output file #=============================================================================== #------------------------------------------------------------------------------- # extract the needed information # input data directory paths rootdir = rcdict['rootdir'] sibcasadir = os.path.join(rootdir,'intercomparison_study/SiBCASA_runs') wofostdir = rcdict['outputdir'] obsdir = rcdict['obsdir'] figdir = os.path.join(rootdir,'intercomparison_study/figures') #------------------------------------------------------------------------------- # Start a directory to store OBS, SIMW (wofost), SIMB (SiBCASA) # recover the FluxNet observed data from pickle files res_timeseries = dict() res_timeseries['OBS'] = dict() res_timeseries['SIMB'] = dict() res_timeseries['SIMW'] = dict() filename = os.path.join(obsdir, '%s_timeseries_OBS.pickle'%resolution) try: res_timeseries['OBS'] = pickle_load(open(filename,'rb')) except IOError: print 'could not find the observations output file %s'%filename res_timeseries['OBS'] = None # recover the SiBCASA runs filename = os.path.join(sibcasadir, '%s_timeseries_SiBCASA.pickle'%resolution) try: res_timeseries['SIMB'] = pickle_load(open(filename,'rb')) except IOError: print 'could not find the SiBCASA output file %s'%filename res_timeseries['SIMB'] = None # recover the WOFOST runs filename = os.path.join(wofostdir, '%s_timeseries_'%resolution +\ '%s_R10=%s_WOFOST_crop_rotation.pickle'%(TER_method,R10)) try: res_timeseries['SIMC'] = pickle_load(open(filename,'rb')) except IOError: print 'could not find the WOFOST output file %s'%filename res_timeseries['SIMC'] = None #------------------------------------------------------------------------------- # plot the observed and simulated timeseries with pandas library # with pandas we plot all years one after another, and can zoom in on one # particular year plt.close('all') # create figure sub-folder if it doesn't already exists figsubdir = os.path.join(figdir,'R10=%s/TER_%s/'%(R10,TER_method)+\ '3-hourly_fluxes_perf') if not os.path.exists(figsubdir): print 'creating new directory %s'%figsubdir os.makedirs(figsubdir) #------------------------------------------------------------------------------- # we plot the 3-hourly fluxes of simulations versus observations for the years # and sites that perform well on the daily scale # we plot years 2005, 2009, 2013 for site BE-Lon, which showed an extremely # good result on the SIM vs OBS daily fluxes comparison years = [2005,2009,2013] variables = ['GPP','TER','NEE'] axlabels = [r'GPP (g m$^{-2}$ d$^{-1}$)', r'TER (g m$^{-2}$ d$^{-1}$)',r'NEE (g m$^{-2}$ d$^{-1}$)'] ylims = [(-60.,5.),(0.,20.),(-50.,15.)] one_to_one = np.arange(-100,100,10) for site in ['BE-Lon']: if site != 'IT-BCi': for year in years: timeframe = [year,year] start = str(int(timeframe[0]))+'-05-01' end = str(int(timeframe[1]))+'-07-01' print site for var, axlabel, lim in zip(variables,axlabels,ylims): fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5,5)) fig.subplots_adjust(0.15,0.15,0.85,0.85,0.,0.) # select every 6 half-hourly flux in the observations, # to get to 3-hourly frequency OBS = res_timeseries['OBS'][site][var][::6][start:end].dropna() # convert the 3-hourly simulated fluxes from gC.m-2.s-1 # to micromol CO2 .m-2.s-1 SIM = res_timeseries['SIMC'][site][var][start:end].dropna() SIM = SIM * 1000000. / 12.01 #micro mol CO2 per m2 per sec # the observed GPP needs a minus sign for convention if var=='GPP': OBS=-OBS # use the min and max to frame the figure print var, min(min(OBS), min(SIM)), max(max(OBS), max(SIM)) varmin = math.floor(min(min(OBS), min(SIM))) varmax = math.ceil(max(max(OBS), max(SIM))) ax.scatter(OBS, SIM, marker='o') # fit a linear regression line in OBS/SIM scatter plot # and plot line and R2 mask = ~np.isnan(SIM) z = np.polyfit(OBS[mask], SIM[mask], 1) p = np.poly1d(z) ax.plot(one_to_one,p(one_to_one),'r-') slope, intercept, r_value, p_value, std_err = \ linreg(OBS[mask], SIM[mask]) ax.annotate(r'r$^2$ = %.2f'%r_value**2, xy=(0.95, 0.15), xytext=(0.15, 0.9), xycoords='axes fraction', ha='center', va='center', color='r') ax.plot(one_to_one,one_to_one, c='k', lw=1) ax.set_xlabel('obs') ax.set_ylabel('sim') ax.set_xlim(varmin,varmax) ax.set_ylim(varmin,varmax) fig.suptitle(r'%s 3-hourly %s fluxes ($\mu$'%(site, var)+ r'mol m$^{-2}$ s$^{-1}$)'+'\nfrom %s to %s\n'%(start,end)) fig.savefig(os.path.join(figsubdir, '%s_%s_%s.png'%(site,year,var)), dpi=300)
def main(): #=============================================================================== #------------------------------------------------------------------------------- # ================================= USER INPUT ================================= # read the settings from the rc file rcdict = rc.read('settings.rc') # ============================================================================== #------------------------------------------------------------------------------- # extract the needed information from the rc file sites = [s.strip(' ') for s in rcdict['sites'].split(',')] resolution = rcdict['resolution'] # can be hourly or daily # directory paths fluxnetdir = rcdict['obsdir'] obsdir = os.path.join(fluxnetdir, 'regrouped_data') #------------------------------------------------------------------------------- if resolution == 'daily': filelist = [ 'BE-Lon_FLUXNET2015_FULLSET_DD_2004-2014.csv', 'FR-Gri_FLUXNET2015_FULLSET_DD_2004-2013.csv', 'DE-Kli_FLUXNET2015_FULLSET_DD_2004-2014.csv', 'IT-BCi_mais_2004-2009_daily.csv' ] elif resolution == '3-hourly': filelist = [ 'BE-Lon_FLUXNET2015_FULLSET_HH_2004-2014.csv', 'FR-Gri_FLUXNET2015_FULLSET_HH_2004-2013.csv', 'DE-Kli_FLUXNET2015_FULLSET_HH_2004-2014.csv', 'IT-BCi_mais_2004-2009_daily.csv' ] #------------------------------------------------------------------------------- # Extract timeseries for the different sites # read files for the diferent sites f = open_csv(obsdir, filelist, convert_to_float=True) series = dict() filepath = os.path.join(fluxnetdir, '%s_timeseries_OBS.pickle' % resolution) for fnam, site in zip(filelist, sites): print site # TA_F_DAY: average daytime Ta_day from meas and ERA (*C) # SW_IN_F: SWin from meas and ERA (W.m-2) # VPD_F: VPD consolidated from VPD_F_MDS and VPD_F_ERA (hPa) # TS_F_MDS_1 to 4: Tsoil of 4 soil layers (*C) # SWC_F_MDS_1 to 4: soil water content (%) of 4 layers (1=shallow) # NT = night-time partitioning method (gC m-2 s-1) # VUT: variable ref u* between years FLUX_variables = [ 'TA_F_DAY', 'SW_IN_F', 'VPD_F', 'TS_F_MDS_1', 'TS_F_MDS_2', 'TS_F_MDS_3', 'SWC_F_MDS_1', 'SWC_F_MDS_2', 'SWC_F_MDS_3', 'GPP_NT_VUT_REF', 'RECO_NT_VUT_REF', 'NEE_VUT_REF', 'crop', 'LAI', 'AGB', 'C_height' ] FLUX_varnames = [ 'Ta_day', 'SWin', 'VPD', 'Ts_1', 'Ts_2', 'Ts_3', 'SWC_1', 'SWC_2', 'SWC_3', 'GPP', 'TER', 'NEE', 'crop_no', 'LAI', 'AGB', 'CHT' ] IT_variables = [ 'SWC_avg', 'GPP', 'Reco', 'NEE', 'crop', 'GLAI', 'AGB', 'C_height' ] IT_varnames = [ 'SWC', 'GPP', 'TER', 'NEE', 'crop_no', 'LAI', 'AGB', 'CHT' ] # timestamps for all daily timeseries startyear = str(f[fnam]['TIMESTAMP'][0])[0:4] endyear = str(f[fnam]['TIMESTAMP'][-1])[0:4] startdate = '%s-01-01 00:00:00' % startyear enddate = '%s-12-31 23:30:00' % endyear if site == 'DE-Kli': enddate = '%s-12-31 23:00:00' % endyear series[site] = dict() if resolution == '3-hourly': tm = pd.date_range(startdate, enddate, freq='30min') if (site != 'IT-BCi'): for var, varname in zip(FLUX_variables[:12], FLUX_varnames[:12]): # if the fluxes are half-hourly, I convert them to 3-hourly if varname == 'Ta_day': series[site]['Ta'] = pd.Series(f[fnam]['TA_F'], index=tm) elif ((varname == 'SWC_2' or varname == 'SWC_3') and site == 'FR-Gri'): series[site][varname] = pd.Series([-9999.] * len(tm), index=tm) else: series[site][varname] = pd.Series(f[fnam][var], index=tm) print varname elif resolution == 'daily': tm = pd.date_range(startdate, enddate, freq='1d') if (site != 'IT-BCi'): for var, varname in zip(FLUX_variables, FLUX_varnames): series[site][varname] = pd.Series(f[fnam][var], index=tm) print varname else: tm_irreg = [ pd.to_datetime('%s-%s-%s' % (str(t)[0:4], str(t)[4:6], str(t)[6:8])) for t in f[fnam]['TIMESTAMP'] ] # since the time records has gaps in the IT-BCi data, we use a # special function to fill the gaps with -9999. values and # convert it to pandas timeseries for var, varname in zip(IT_variables, IT_varnames): #if varname == 'VPD': # ta = f[fnam]['T_avg'] # dayvar = f[fnam]['Rh_avg'] / 100. * 6.11 * np.exp(ta /\ # (238.3 + ta) * 17.2694) dayvar = f[fnam][var] series[site][varname] = convert2pandas( tm_irreg, dayvar, tm) print varname else: print "Wrong CO2 fluxes temporal resolution: must be either "+\ "'daily' or '3-hourly'" sys.exit() # we store the pandas series in one pickle file pickle_dump(series, open(filepath, 'wb')) #------------------------------------------------------------------------------- # plot timeseries # Let's plot the available micromet variables that are important for WOFOST #plot_fluxnet_micromet(obsdir,sites,[2005,2005],'-') # Let's plot GPP, TER, NEE #plot_fluxnet_daily_c_fluxes(obsdir,sites,[2004,2014],'-') #plot_fluxnet_LAI_CHT_AGB(obsdir,sites,[2004,2014],'o') #------------------------------------------------------------------------------- return series
def main(): #=============================================================================== global inputdir, codedir, outputdir, CGMSdir, obsdir\ #------------------------------------------------------------------------------- import cx_Oracle import sqlalchemy as sa from datetime import datetime #------------------------------------------------------------------------------- # ================================= USER INPUT ================================= # read the settings from the rc file rcdict = rc.read('settings.rc') #=============================================================================== #------------------------------------------------------------------------------- # extract the needed information from the rc file sites = [s.strip(' ') for s in rcdict['sites'].split(',')] crops = [s.strip(' ') for s in rcdict['crops'].split(',')] crop_nos = [int(s.strip(' ')) for s in rcdict['crop_nos'].split(',')] years = [int(s.strip(' ')) for s in rcdict['years'].split(',')] obsdir = rcdict['obsdir'] inputdir = rcdict['inputdir'] CGMSdir = os.path.join(inputdir, 'CGMS') codedir = rcdict['codedir'] #------------------------------------------------------------------------------- # get the closest CGMS grid cell id number for each FluxNet site # get the sites longitude and latitudes sitdict = open_csv(os.path.join(obsdir,'regrouped_data'), 'sites_info.txt', convert_to_float=False) site_lons = sitdict['site_lons'] site_lats = sitdict['site_lats'] # we read the CGMS grid cells coordinates from file CGMS_cells = open_csv(CGMSdir, 'CGMS_grid_list.csv', convert_to_float=True) all_grids = CGMS_cells['GRID_NO'] all_lons = CGMS_cells['LONGITUDE'] all_lats = CGMS_cells['LATITUDE'] flux_gri = dict() for i,site in enumerate(sitdict['sites']): lon = float(site_lons[i]) lat = float(site_lats[i]) # compute the distance to site for all CGMS grid cells dist_list = list() for j,grid_no in enumerate(all_grids): distance = ((all_lons[j]-lon)**2. + (all_lats[j]-lat)**2.)**(1./2.) dist_list += [distance] # select the closest grid cell indx = np.argmin(np.array(dist_list)) flux_gri[site] = all_grids[indx] print 'FluxNet site %s with lon=%5.2f, lat=%5.2f: closest grid cell is %i'%(site, lon, lat, all_grids[indx]) #------------------------------------------------------------------------------- # create new file with grid cell number in it filename = os.path.join(inputdir,'sites_info2.csv') newres = open(filename,'wb') oldres = open(os.path.join(obsdir,'regrouped_data/sites_info.txt'),'rU') reader = oldres.readlines() oldres.close() for l,line in enumerate(reader): site = line.split(',')[0].strip(' ') if l==0: line = line.strip('\n')+', gridcells\n' else: line = line.strip('\n') + ',%10i'%int(flux_gri[site]) + '\n' newres.write(line) newres.close() print '\nWe successfully created the input file with grid cell IDs:\n%s'%filename #------------------------------------------------------------------------------- # retrieve the necessary input data for all sites # settings of the connection user = "******" password = "******" tns = "EURDAS.WORLD" dsn = "oracle+cx_oracle://{user}:{pw}@{tns}".format(user=user,pw=password,tns=tns) engine = sa.create_engine(dsn) print engine # test the connection: try: connection = cx_Oracle.connect("cgms12eu_select/[email protected]") except cx_Oracle.DatabaseError: print '\nBEWARE!! The Oracle database is not responding. Probably, you are' print 'not using a computer wired within the Wageningen University network.' print '--> Get connected with ethernet cable before trying again!' sys.exit() for c,crop in enumerate(crops): crop_no = crop_nos[c] print '\nRetrieving input data for %s (CGMS id=%i)'%(crop,crop_no) # We add a timestamp at start of the retrieval start_timestamp = datetime.utcnow() # We retrieve the list of suitable soil types for the selected crop # species filename = os.path.join(CGMSdir, 'soildata_objects/', 'suitablesoilsobject_c%d.pickle'%(crop_no)) if os.path.exists(filename): suitable_stu = pickle_load(open(filename,'rb')) else: from pcse.db.cgms11 import STU_Suitability suitable_stu = STU_Suitability(engine, crop_no) suitable_stu_list = [] for item in suitable_stu: suitable_stu_list = suitable_stu_list + [item] suitable_stu = suitable_stu_list pickle_dump(suitable_stu,open(filename,'wb')) print 'retrieving suitable soils for %s'%crop # WE LOOP OVER ALL YEARS: for y, year in enumerate(years): print '\n######################## Year %i ##############'%year+\ '##########\n' # if we do a serial iteration, we loop over the grid cells that # contain arable land for grid in flux_gri.values(): retrieve_CGMS_input(grid, year, crop_no, suitable_stu, engine) # We add a timestamp at end of the retrieval, to time the process end_timestamp = datetime.utcnow() print '\nDuration of the retrieval:', end_timestamp-start_timestamp
def main(): #=============================================================================== global outputdir, obsdir #------------------------------------------------------------------------------- # ================================= USER INPUT ================================= # read the settings from the rc file rcdict = rc.read('settings.rc') #=============================================================================== #------------------------------------------------------------------------------- # extract the needed information for that script sites = [s.strip(' ') for s in rcdict['sites'].split(',')] years = [s.strip(' ') for s in rcdict['years'].split(',')] TER_method = rcdict['TER_method'] R10 = rcdict['R10'] resolution = rcdict['resolution'] # can be hourly or daily if resolution == 'daily': res = '1d' elif resolution == '3-hourly': res = '3H' # directory paths outputdir = rcdict['outputdir'] obsdir = rcdict['obsdir'] forwardir = os.path.join(outputdir, 'forward_runs') #------------------------------------------------------------------------------- # load the WOFOST runs of all crops # we store the two pandas series in one pickle file filepath = os.path.join(forwardir,'%s_timeseries_'%resolution+\ '%s_WOFOST.pickle'%TER_method) series = pickle_load(open(filepath, 'rb')) filepath = os.path.join(obsdir, 'daily_timeseries_OBS.pickle') obs = pickle_load(open(filepath, 'rb')) final_series = dict() for s, site in enumerate(sites): print site print obs[site].keys() final_series[site] = dict() # read the crop rotation from FluxNet file rotation = obs[site]['crop_no'] # slice each year's required time series, append to final series for varname in ['GPP', 'TER', 'Raut', 'Rhet', 'NEE']: print 'variable %s' % varname var = [] for year in years: # get the crop number for that year if site != 'IT-BCi': try: crop_no = rotation[year:year][0] except IndexError: # index error occurs when the year is # not in the rotation time series startdate = '%s-01-01 00:00:00' % year enddate = '%s-12-31 23:59:59' % year dtimes = pd.date_range(startdate, enddate, freq=res) na_vals = np.array(len(dtimes) * [np.nan]) var += [pd.Series(na_vals, index=dtimes)] print ' ', site, year, 'unknown crop cover: skip.' continue elif site == 'IT-BCi': if int(year) not in np.arange(2004, 2010, 1): startdate = '%s-01-01 00:00:00' % year enddate = '%s-12-31 23:59:59' % year dtimes = pd.date_range(startdate, enddate, freq=res) na_vals = np.array(len(dtimes) * [np.nan]) var += [pd.Series(na_vals, index=dtimes)] print ' ', site, year, 'unknown crop cover: skip.' continue else: crop_no = 2 # try slicing and concatenating that year's timeseries from file try: # if the GPP = 0 (failed growing season), we set TER and # NEE to zero as well if np.mean(series[site]['c%i' % crop_no]['GPP'][year:year]) == 0.: startdate = '%s-01-01 00:00:00' % year enddate = '%s-12-31 23:59:59' % year dtimes = pd.date_range(startdate, enddate, freq=res) zeros = np.array(len(dtimes) * [0.]) var += [pd.Series(zeros, index=dtimes)] else: var += [ series[site]['c%i' % crop_no][varname][year:year] ] print ' ', site, year, '%2i' % crop_no, 'slicing' except KeyError: # key error occurs when we haven't ran a crop # or a year with WOFOST startdate = '%s-01-01 00:00:00' % year enddate = '%s-12-31 23:59:59' % year dtimes = pd.date_range(startdate, enddate, freq=res) na_vals = np.array(len(dtimes) * [np.nan]) var += [pd.Series(na_vals, index=dtimes)] print ' ', site, year, '%2i' % crop_no, 'skip.' final_series[site][varname] = pd.concat(var) #final_series[site]['GPP'].plot() #plt.show() # store the final WOFOST timeseries filepath = os.path.join(outputdir,'%s_timeseries_'%resolution+\ '%s_R10=%s_WOFOST_crop_rotation.pickle'%(TER_method,R10)) pickle_dump(final_series, open(filepath, 'wb')) print 'successfully dumped %s' % filepath
def main(): #=============================================================================== global outputdir, obsdir #------------------------------------------------------------------------------- # ================================= USER INPUT ================================= # read the settings from the rc file rcdict = rc.read('settings.rc') #=============================================================================== #------------------------------------------------------------------------------- # extract the needed information for that script sites = [s.strip(' ') for s in rcdict['sites'].split(',')] years = [s.strip(' ') for s in rcdict['years'].split(',')] TER_method = rcdict['TER_method'] R10 = rcdict['R10'] resolution = rcdict['resolution'] # can be hourly or daily if resolution=='daily': res='1d' elif resolution=='3-hourly': res='3H' # directory paths outputdir = rcdict['outputdir'] obsdir = rcdict['obsdir'] forwardir = os.path.join(outputdir, 'forward_runs') #------------------------------------------------------------------------------- # load the WOFOST runs of all crops # we store the two pandas series in one pickle file filepath = os.path.join(forwardir,'%s_timeseries_'%resolution+\ '%s_WOFOST.pickle'%TER_method) series = pickle_load(open(filepath,'rb')) filepath = os.path.join(obsdir,'daily_timeseries_OBS.pickle') obs = pickle_load(open(filepath,'rb')) final_series = dict() for s,site in enumerate(sites): print site print obs[site].keys() final_series[site] = dict() # read the crop rotation from FluxNet file rotation = obs[site]['crop_no'] # slice each year's required time series, append to final series for varname in ['GPP','TER','Raut','Rhet','NEE']: print 'variable %s'%varname var = [] for year in years: # get the crop number for that year if site != 'IT-BCi': try: crop_no = rotation[year:year][0] except IndexError: # index error occurs when the year is # not in the rotation time series startdate = '%s-01-01 00:00:00'%year enddate = '%s-12-31 23:59:59'%year dtimes = pd.date_range(startdate, enddate, freq=res) na_vals = np.array(len(dtimes)*[np.nan]) var += [pd.Series(na_vals, index=dtimes)] print ' ',site, year, 'unknown crop cover: skip.' continue elif site == 'IT-BCi': if int(year) not in np.arange(2004,2010,1): startdate = '%s-01-01 00:00:00'%year enddate = '%s-12-31 23:59:59'%year dtimes = pd.date_range(startdate, enddate, freq=res) na_vals = np.array(len(dtimes)*[np.nan]) var += [pd.Series(na_vals, index=dtimes)] print ' ',site, year, 'unknown crop cover: skip.' continue else: crop_no = 2 # try slicing and concatenating that year's timeseries from file try: # if the GPP = 0 (failed growing season), we set TER and # NEE to zero as well if np.mean(series[site]['c%i'%crop_no]['GPP'][year:year]) == 0.: startdate = '%s-01-01 00:00:00'%year enddate = '%s-12-31 23:59:59'%year dtimes = pd.date_range(startdate, enddate, freq=res) zeros = np.array(len(dtimes)*[0.]) var += [pd.Series(zeros, index=dtimes)] else: var += [series[site]['c%i'%crop_no][varname][year:year]] print ' ',site, year, '%2i'%crop_no, 'slicing' except KeyError: # key error occurs when we haven't ran a crop # or a year with WOFOST startdate = '%s-01-01 00:00:00'%year enddate = '%s-12-31 23:59:59'%year dtimes = pd.date_range(startdate, enddate, freq=res) na_vals = np.array(len(dtimes)*[np.nan]) var += [pd.Series(na_vals, index=dtimes)] print ' ',site, year, '%2i'%crop_no, 'skip.' final_series[site][varname] = pd.concat(var) #final_series[site]['GPP'].plot() #plt.show() # store the final WOFOST timeseries filepath = os.path.join(outputdir,'%s_timeseries_'%resolution+\ '%s_R10=%s_WOFOST_crop_rotation.pickle'%(TER_method,R10)) pickle_dump(final_series, open(filepath,'wb')) print 'successfully dumped %s'%filepath
def main(): #=============================================================================== #------------------------------------------------------------------------------- # ================================= USER INPUT ================================= # read the settings from the rc file rcdict = rc.read('settings.rc') # ============================================================================== #------------------------------------------------------------------------------- # extract the needed information from the rc file sites = [s.strip(' ') for s in rcdict['sites'].split(',')] resolution = rcdict['resolution'] # can be hourly or daily # directory paths fluxnetdir = rcdict['obsdir'] obsdir = os.path.join(fluxnetdir, 'regrouped_data') #------------------------------------------------------------------------------- if resolution == 'daily': filelist = ['BE-Lon_FLUXNET2015_FULLSET_DD_2004-2014.csv', 'FR-Gri_FLUXNET2015_FULLSET_DD_2004-2013.csv', 'DE-Kli_FLUXNET2015_FULLSET_DD_2004-2014.csv', 'IT-BCi_mais_2004-2009_daily.csv'] elif resolution == '3-hourly': filelist = ['BE-Lon_FLUXNET2015_FULLSET_HH_2004-2014.csv', 'FR-Gri_FLUXNET2015_FULLSET_HH_2004-2013.csv', 'DE-Kli_FLUXNET2015_FULLSET_HH_2004-2014.csv', 'IT-BCi_mais_2004-2009_daily.csv'] #------------------------------------------------------------------------------- # Extract timeseries for the different sites # read files for the diferent sites f = open_csv(obsdir,filelist,convert_to_float=True) series = dict() filepath = os.path.join(fluxnetdir,'%s_timeseries_OBS.pickle'%resolution) for fnam,site in zip(filelist, sites): print site # TA_F_DAY: average daytime Ta_day from meas and ERA (*C) # SW_IN_F: SWin from meas and ERA (W.m-2) # VPD_F: VPD consolidated from VPD_F_MDS and VPD_F_ERA (hPa) # TS_F_MDS_1 to 4: Tsoil of 4 soil layers (*C) # SWC_F_MDS_1 to 4: soil water content (%) of 4 layers (1=shallow) # NT = night-time partitioning method (gC m-2 s-1) # VUT: variable ref u* between years FLUX_variables = ['TA_F_DAY', 'SW_IN_F', 'VPD_F', 'TS_F_MDS_1', 'TS_F_MDS_2', 'TS_F_MDS_3', 'SWC_F_MDS_1', 'SWC_F_MDS_2', 'SWC_F_MDS_3', 'GPP_NT_VUT_REF', 'RECO_NT_VUT_REF', 'NEE_VUT_REF', 'crop', 'LAI', 'AGB', 'C_height'] FLUX_varnames = ['Ta_day', 'SWin', 'VPD', 'Ts_1', 'Ts_2', 'Ts_3', 'SWC_1', 'SWC_2', 'SWC_3', 'GPP', 'TER', 'NEE', 'crop_no', 'LAI', 'AGB', 'CHT'] IT_variables = ['SWC_avg', 'GPP', 'Reco', 'NEE', 'crop', 'GLAI', 'AGB', 'C_height'] IT_varnames = ['SWC', 'GPP', 'TER', 'NEE', 'crop_no', 'LAI', 'AGB', 'CHT'] # timestamps for all daily timeseries startyear = str(f[fnam]['TIMESTAMP'][0])[0:4] endyear = str(f[fnam]['TIMESTAMP'][-1])[0:4] startdate = '%s-01-01 00:00:00'%startyear enddate = '%s-12-31 23:30:00'%endyear if site=='DE-Kli': enddate = '%s-12-31 23:00:00'%endyear series[site] = dict() if resolution == '3-hourly': tm = pd.date_range(startdate, enddate, freq='30min') if (site!='IT-BCi'): for var,varname in zip(FLUX_variables[:12], FLUX_varnames[:12]): # if the fluxes are half-hourly, I convert them to 3-hourly if varname == 'Ta_day': series[site]['Ta'] = pd.Series(f[fnam]['TA_F'], index=tm) elif ((varname == 'SWC_2' or varname == 'SWC_3') and site == 'FR-Gri'): series[site][varname] = pd.Series([-9999.]*len(tm), index=tm) else: series[site][varname] = pd.Series(f[fnam][var], index=tm) print varname elif resolution == 'daily': tm = pd.date_range(startdate, enddate, freq='1d') if (site!='IT-BCi'): for var,varname in zip(FLUX_variables, FLUX_varnames): series[site][varname] = pd.Series(f[fnam][var], index=tm) print varname else: tm_irreg = [pd.to_datetime('%s-%s-%s'%(str(t)[0:4],str(t)[4:6], str(t)[6:8])) for t in f[fnam]['TIMESTAMP']] # since the time records has gaps in the IT-BCi data, we use a # special function to fill the gaps with -9999. values and # convert it to pandas timeseries for var,varname in zip(IT_variables, IT_varnames): #if varname == 'VPD': # ta = f[fnam]['T_avg'] # dayvar = f[fnam]['Rh_avg'] / 100. * 6.11 * np.exp(ta /\ # (238.3 + ta) * 17.2694) dayvar = f[fnam][var] series[site][varname] = convert2pandas(tm_irreg, dayvar, tm) print varname else: print "Wrong CO2 fluxes temporal resolution: must be either "+\ "'daily' or '3-hourly'" sys.exit() # we store the pandas series in one pickle file pickle_dump(series, open(filepath,'wb')) #------------------------------------------------------------------------------- # plot timeseries # Let's plot the available micromet variables that are important for WOFOST #plot_fluxnet_micromet(obsdir,sites,[2005,2005],'-') # Let's plot GPP, TER, NEE #plot_fluxnet_daily_c_fluxes(obsdir,sites,[2004,2014],'-') #plot_fluxnet_LAI_CHT_AGB(obsdir,sites,[2004,2014],'o') #------------------------------------------------------------------------------- return series
def main(): #=============================================================================== global inputdir, codedir, outputdir, CGMSdir, ECMWFdir, optimidir, forwardir,\ EUROSTATdir, mmC, mmCO2, mmCH2O #------------------------------------------------------------------------------- # fixed molar masses for unit conversion of carbon fluxes mmC = 12.01 mmCO2 = 44.01 mmCH2O = 30.03 # ================================= USER INPUT ================================= # read the settings from the rc file rcdict = rc.read('settings.rc') #=============================================================================== #------------------------------------------------------------------------------- # extract the needed information from the rc file sites = [s.strip(' ') for s in rcdict['sites'].split(',')] #site_lons = [float(s.strip(' ')) for s in rcdict['site_lons'].split(',')] #site_lats = [float(s.strip(' ')) for s in rcdict['site_lats'].split(',')] #gridcells = [float(s.strip(' ')) for s in rcdict['gridcells'].split(',')] #NUTS_reg = [s.strip(' ') for s in rcdict['NUTS_reg'].split(',')] crops = [s.strip(' ') for s in rcdict['crops'].split(',')] crop_nos = [int(s.strip(' ')) for s in rcdict['crop_nos'].split(',')] years = [int(s.strip(' ')) for s in rcdict['years'].split(',')] # forward runs settings force_forwardsim = str_to_bool(rcdict['force_forwardsim']) selec_method = rcdict['selec_method'] ncells = int(rcdict['ncells']) nsoils = int(rcdict['nsoils']) weather = rcdict['weather'] # carbon cycle settings TER_method = rcdict[ 'TER_method'] # if grow-only: NEE = GPP + Rgrow + Rsoil Eact0 = float(rcdict['Eact0']) R10 = float(rcdict['R10']) resolution = rcdict['resolution'] # can be hourly or daily # directory paths outputdir = rcdict['outputdir'] inputdir = rcdict['inputdir'] codedir = rcdict['codedir'] CGMSdir = os.path.join(inputdir, 'CGMS') ECMWFdir = os.path.join(inputdir, 'ECMWF') EUROSTATdir = os.path.join(inputdir, 'EUROSTATobs') #------------------------------------------------------------------------------- # get the sites longitude and latitudes from WOF_00_retrieve_input_data import open_csv sitdict = open_csv(inputdir, 'sites_info2.csv', convert_to_float=False) site_lons = [float(l) for l in sitdict['site_lons']] site_lats = [float(l) for l in sitdict['site_lats']] gridcells = [int(g) for g in sitdict['gridcells']] NUTS_reg = sitdict['NUTS_reg'] #------------------------------------------------------------------------------- # run WOFOST at the location / year / crops specified by user print '\nYLDGAPF(-), grid_no, year, stu_no, stu_area(ha), '\ +'TSO(kgDM.ha-1), TLV(kgDM.ha-1), TST(kgDM.ha-1), '\ +'TRT(kgDM.ha-1), maxLAI(m2.m-2), rootdepth(cm), TAGP(kgDM.ha-1)' # we format the time series using the pandas python library, for easy plotting startdate = '%i-01-01 00:00:00' % years[0] enddate = '%i-12-31 23:59:59' % years[-1] if resolution == 'daily': dtimes = pd.date_range(startdate, enddate, freq='1d') elif resolution == '3-hourly': dtimes = pd.date_range(startdate, enddate, freq='3H') else: print "Wrong CO2 fluxes temporal resolution: must be either 'daily' or '3-hourly'" sys.exit() series = dict() for s, site in enumerate(sites): lon = site_lons[s] lat = site_lats[s] grid_no = gridcells[s] NUTS_no = NUTS_reg[s] series[site] = dict() for c, crop_name in enumerate(crops): cpno = crop_nos[c] series[site]['c%i' % cpno] = dict() list_of_gpp = np.array([]) list_of_raut = np.array([]) list_of_rhet = np.array([]) list_of_ter = np.array([]) list_of_nee = np.array([]) for year in years: # create output folder if it doesn't already exists optimidir = os.path.join(outputdir, 'fgap/%i/c%i/' % (year, cpno)) # create output folder if it doesn't already exists forwardir = os.path.join(outputdir, 'forward_runs/%i/c%i/' % (year, cpno)) if not os.path.exists(forwardir): os.makedirs(forwardir) print '\n', site, NUTS_no, year, crop_name # RETRIEVE OPTIMUM FGAP: # either the NUTS2 optimum if it exists ygf_path = os.path.join(optimidir, 'fgap_%s_optimized.pickle' % NUTS_no) # or the gapfilled version if not os.path.exists(ygf_path): ygf_file = [ f for f in os.listdir(optimidir) if (NUTS_no in f) and ('_gapfilled' in f) ][0] ygf_path = os.path.join(optimidir, ygf_file) fgap_info = pickle_load(open(ygf_path, 'rb')) yldgapf = fgap_info[2] # FORWARD SIMULATIONS: perform_yield_sim(cpno, grid_no, int(year), yldgapf, selec_method, nsoils, force_forwardsim) # POST-PROCESSING OF GPP, RAUTO, RHET, NEE: SimData = compute_timeseries_fluxes(cpno, grid_no, lon, lat, year, R10, Eact0, selec_method, nsoils, TER_method=TER_method, scale=resolution) list_of_gpp = np.concatenate([list_of_gpp, SimData[1]], axis=0) list_of_raut = np.concatenate([list_of_raut, SimData[2]], axis=0) list_of_rhet = np.concatenate([list_of_rhet, SimData[3]], axis=0) list_of_ter = np.concatenate([list_of_ter, SimData[4]], axis=0) list_of_nee = np.concatenate([list_of_nee, SimData[5]], axis=0) print dtimes, list_of_gpp series[site]['c%i' % cpno]['GPP'] = pd.Series(list_of_gpp, index=dtimes) series[site]['c%i' % cpno]['Raut'] = pd.Series(list_of_raut, index=dtimes) series[site]['c%i' % cpno]['Rhet'] = pd.Series(list_of_rhet, index=dtimes) series[site]['c%i' % cpno]['TER'] = pd.Series(list_of_ter, index=dtimes) series[site]['c%i' % cpno]['NEE'] = pd.Series(list_of_nee, index=dtimes) # we store the two pandas series in one pickle file filepath = os.path.join(outputdir,'forward_runs/'+\ '%s_timeseries_%s_WOFOST.pickle'%(resolution,TER_method)) pickle_dump(series, open(filepath, 'wb'))
def main(): #=============================================================================== global inputdir, codedir, outputdir, CGMSdir, ECMWFdir, optimidir, forwardir,\ EUROSTATdir, mmC, mmCO2, mmCH2O #------------------------------------------------------------------------------- # fixed molar masses for unit conversion of carbon fluxes mmC = 12.01 mmCO2 = 44.01 mmCH2O = 30.03 # ================================= USER INPUT ================================= # read the settings from the rc file rcdict = rc.read('settings.rc') #=============================================================================== #------------------------------------------------------------------------------- # extract the needed information from the rc file sites = [s.strip(' ') for s in rcdict['sites'].split(',')] #site_lons = [float(s.strip(' ')) for s in rcdict['site_lons'].split(',')] #site_lats = [float(s.strip(' ')) for s in rcdict['site_lats'].split(',')] #gridcells = [float(s.strip(' ')) for s in rcdict['gridcells'].split(',')] #NUTS_reg = [s.strip(' ') for s in rcdict['NUTS_reg'].split(',')] crops = [s.strip(' ') for s in rcdict['crops'].split(',')] crop_nos = [int(s.strip(' ')) for s in rcdict['crop_nos'].split(',')] years = [int(s.strip(' ')) for s in rcdict['years'].split(',')] # forward runs settings force_forwardsim = str_to_bool(rcdict['force_forwardsim']) selec_method = rcdict['selec_method'] ncells = int(rcdict['ncells']) nsoils = int(rcdict['nsoils']) weather = rcdict['weather'] # carbon cycle settings TER_method = rcdict['TER_method'] # if grow-only: NEE = GPP + Rgrow + Rsoil Eact0 = float(rcdict['Eact0']) R10 = float(rcdict['R10']) resolution = rcdict['resolution'] # can be hourly or daily # directory paths outputdir = rcdict['outputdir'] inputdir = rcdict['inputdir'] codedir = rcdict['codedir'] CGMSdir = os.path.join(inputdir, 'CGMS') ECMWFdir = os.path.join(inputdir, 'ECMWF') EUROSTATdir = os.path.join(inputdir, 'EUROSTATobs') #------------------------------------------------------------------------------- # get the sites longitude and latitudes from WOF_00_retrieve_input_data import open_csv sitdict = open_csv(inputdir, 'sites_info2.csv', convert_to_float=False) site_lons = [float(l) for l in sitdict['site_lons']] site_lats = [float(l) for l in sitdict['site_lats']] gridcells = [int(g) for g in sitdict['gridcells']] NUTS_reg = sitdict['NUTS_reg'] #------------------------------------------------------------------------------- # run WOFOST at the location / year / crops specified by user print '\nYLDGAPF(-), grid_no, year, stu_no, stu_area(ha), '\ +'TSO(kgDM.ha-1), TLV(kgDM.ha-1), TST(kgDM.ha-1), '\ +'TRT(kgDM.ha-1), maxLAI(m2.m-2), rootdepth(cm), TAGP(kgDM.ha-1)' # we format the time series using the pandas python library, for easy plotting startdate = '%i-01-01 00:00:00'%years[0] enddate = '%i-12-31 23:59:59'%years[-1] if resolution == 'daily': dtimes = pd.date_range(startdate, enddate, freq='1d') elif resolution == '3-hourly': dtimes = pd.date_range(startdate, enddate, freq='3H') else: print "Wrong CO2 fluxes temporal resolution: must be either 'daily' or '3-hourly'" sys.exit() series = dict() for s,site in enumerate(sites): lon = site_lons[s] lat = site_lats[s] grid_no = gridcells[s] NUTS_no = NUTS_reg[s] series[site] = dict() for c,crop_name in enumerate(crops): cpno = crop_nos[c] series[site]['c%i'%cpno] = dict() list_of_gpp = np.array([]) list_of_raut = np.array([]) list_of_rhet = np.array([]) list_of_ter = np.array([]) list_of_nee = np.array([]) for year in years: # create output folder if it doesn't already exists optimidir = os.path.join(outputdir,'fgap/%i/c%i/'%(year,cpno)) # create output folder if it doesn't already exists forwardir = os.path.join(outputdir,'forward_runs/%i/c%i/'%(year, cpno)) if not os.path.exists(forwardir): os.makedirs(forwardir) print '\n', site, NUTS_no, year, crop_name # RETRIEVE OPTIMUM FGAP: # either the NUTS2 optimum if it exists ygf_path = os.path.join(optimidir,'fgap_%s_optimized.pickle'%NUTS_no) # or the gapfilled version if not os.path.exists(ygf_path): ygf_file = [f for f in os.listdir(optimidir) if (NUTS_no in f) and ('_gapfilled' in f)][0] ygf_path = os.path.join(optimidir, ygf_file) fgap_info = pickle_load(open(ygf_path,'rb')) yldgapf = fgap_info[2] # FORWARD SIMULATIONS: perform_yield_sim(cpno, grid_no, int(year), yldgapf, selec_method, nsoils, force_forwardsim) # POST-PROCESSING OF GPP, RAUTO, RHET, NEE: SimData = compute_timeseries_fluxes(cpno, grid_no, lon, lat, year, R10, Eact0, selec_method, nsoils, TER_method=TER_method, scale=resolution) list_of_gpp = np.concatenate([list_of_gpp, SimData[1]], axis=0) list_of_raut = np.concatenate([list_of_raut, SimData[2]], axis=0) list_of_rhet = np.concatenate([list_of_rhet, SimData[3]], axis=0) list_of_ter = np.concatenate([list_of_ter, SimData[4]], axis=0) list_of_nee = np.concatenate([list_of_nee, SimData[5]], axis=0) print dtimes, list_of_gpp series[site]['c%i'%cpno]['GPP'] = pd.Series(list_of_gpp, index=dtimes) series[site]['c%i'%cpno]['Raut'] = pd.Series(list_of_raut, index=dtimes) series[site]['c%i'%cpno]['Rhet'] = pd.Series(list_of_rhet, index=dtimes) series[site]['c%i'%cpno]['TER'] = pd.Series(list_of_ter, index=dtimes) series[site]['c%i'%cpno]['NEE'] = pd.Series(list_of_nee, index=dtimes) # we store the two pandas series in one pickle file filepath = os.path.join(outputdir,'forward_runs/'+\ '%s_timeseries_%s_WOFOST.pickle'%(resolution,TER_method)) pickle_dump(series, open(filepath,'wb'))
def main(): #=============================================================================== global wofostdir, sibcasadir, obsdir #------------------------------------------------------------------------------- # ================================= USER INPUT ================================= # read the settings from the rc file (mostly directory paths) rcdict = rc.read('settings.rc') sites = [s.strip(' ') for s in rcdict['sites'].split(',')] years = [int(s.strip(' ')) for s in rcdict['years'].split(',')] TER_method = rcdict['TER_method'] R10 = rcdict['R10'] # specific plotting options: #TER_method = 'grow-only' # this is to select the corresponding WOFOST output file #R10 = '0.08' # this is to select the corresponding WOFOST output file #=============================================================================== #------------------------------------------------------------------------------- # extract the needed information # input data directory paths rootdir = rcdict['rootdir'] sibcasadir = os.path.join(rootdir,'intercomparison_study/SiBCASA_runs') wofostdir = rcdict['outputdir'] obsdir = rcdict['obsdir'] figdir = os.path.join(rootdir,'intercomparison_study/figures') #------------------------------------------------------------------------------- # Start a directory to store OBS, SIMW (wofost), SIMB (SiBCASA) # recover the FluxNet observed data from pickle files res_timeseries = dict() res_timeseries['OBS'] = dict() res_timeseries['SIMB'] = dict() res_timeseries['SIMW'] = dict() filename = os.path.join(obsdir, 'timeseries_OBS.pickle') try: res_timeseries['OBS'] = pickle_load(open(filename,'rb')) except IOError: print 'could not find the observations output file %s'%filename res_timeseries['OBS'] = None # recover the SiBCASA runs filename = os.path.join(sibcasadir, 'timeseries_SiBCASA.pickle') try: res_timeseries['SIMB'] = pickle_load(open(filename,'rb')) except IOError: print 'could not find the SiBCASA output file %s'%filename res_timeseries['SIMB'] = None # recover the WOFOST runs filename = os.path.join(wofostdir, 'timeseries_%s_R10=%s_'%(TER_method,R10) +\ 'WOFOST_crop_rotation.pickle') try: print 'opening the WOFOST output file %s'%filename res_timeseries['SIMC'] = pickle_load(open(filename,'rb')) except IOError: print 'could not find the WOFOST output file %s'%filename res_timeseries['SIMC'] = None #------------------------------------------------------------------------------- # plot the observed and simulated timeseries with pandas library # with pandas we plot all years one after another, and can zoom in on one # particular year plt.close('all') # create figure sub-folder if it doesn't already exists figsubdir = os.path.join(figdir,'R10=%s/TER_%s/'%(R10,TER_method)) if not os.path.exists(figsubdir): print 'creating new directory %s'%figsubdir os.makedirs(figsubdir) #------------------------------------------------------------------------------- years = np.arange(2004,2015,1) for site in sites: for year in years: timeframe = [year,year] print site figs, axes = plt.subplots(nrows=4, ncols=1, figsize=(8,10)) figs.subplots_adjust(0.1,0.07,0.98,0.95,0.,0.) variables = ['crop_no','GPP','TER','NEE'] axlabels = ['crop ID',r'GPP (g m$^{-2}$ d$^{-1}$)', r'TER (g m$^{-2}$ d$^{-1}$)',r'NEE (g m$^{-2}$ d$^{-1}$)'] ylims = [(0.,14.),(-18.,2.),(-1.,12.),(-10.,10.)] start = str(int(timeframe[0])) end = str(int(timeframe[1])) print '[%s:%s]'%(start,end) fsz = 14 # fonsize of x and y axis ticks for ax, var, axlabel, ylim in zip(axes,variables,axlabels,ylims): if (var=='crop_no'): try: OBS = res_timeseries['OBS'][site][var][start:end].dropna() OBS[~(OBS==-9999.)].plot(ax=ax, lw=2, #OBS.plot(ax=ax, lw=2, style='-', label='obs', fontsize=fsz) crop_no = OBS[0] minobs = OBS[~(OBS==-9999.)].min() maxobs = OBS[~(OBS==-9999.)].max() except TypeError: minobs = 0. maxobs = 0. minwof = 1. maxwof = 1. elif (var=='TER'): # observations try: OBS = res_timeseries['OBS'][site][var][start:end].dropna() OBS[~(OBS==-9999.)].plot(ax=ax, lw=2, #OBS.plot(ax=ax, lw=2, c='b', style='+', label='obs', fontsize=fsz) minobs = OBS[~(OBS==-9999.)].min() maxobs = OBS[~(OBS==-9999.)].max() except TypeError: minobs = 0. maxobs = 0. # SIBCASA sims try: #res_timeseries['SIMB'][site]['Raut'][start:end].plot(ax=ax, #lw=2, c='g', style=':', label='SiBCASA Raut', fontsize=fsz) res_timeseries['SIMB'][site][var][start:end].plot(ax=ax, lw=2, c='g', style='--', label='SiBCASA TER', fontsize=fsz) except TypeError: pass # WOFOST sims try: #WOF = res_timeseries['SIMC'][site]['Raut'][start:end].dropna() #WOF.plot(ax=ax, lw=2, c='r', #style='_', label='WOFOST Raut', fontsize=fsz) WOF = res_timeseries['SIMC'][site][var][start:end].dropna() WOF.plot(ax=ax, lw=2, c='r', style='x', label='WOFOST TER', fontsize=fsz) minwof = WOF.min() maxwof = WOF.max() except TypeError: minwof = 0. maxwof = 0. WOF = 0. else: # observations try: OBS = res_timeseries['OBS'][site][var][start:end].dropna() OBS[~(OBS==-9999.)].plot(ax=ax, lw=2, #OBS.plot(ax=ax, lw=2, c='b', style='+', label='obs', fontsize=fsz) minobs = OBS[~(OBS==-9999.)].min() maxobs = OBS[~(OBS==-9999.)].max() except TypeError: minobs = 0. maxobs = 0. # SIBCASA sims try: res_timeseries['SIMB'][site][var][start:end].plot(ax=ax, lw=2, c='g', style='--', label='SiBCASA', fontsize=fsz) except TypeError: pass # WOFOST simsA try: WOF = res_timeseries['SIMC'][site][var][start:end].dropna() #WOF[~(WOF==-9999.)].plot(ax=ax, lw=2, WOF.plot(ax=ax, lw=2, c='r', style='x', label='WOFOST', fontsize=fsz) minwof = WOF.min() maxwof = WOF.max() except TypeError: minwof = 0. maxwof = 0. WOF = 0. ax.axhline(y=0., c='k') minvar = math.floor(min(minobs,minwof))-1. maxvar = math.ceil(max(maxobs,maxwof))+1. ax.set_ylim(minvar,maxvar) #ax.set_ylim(ylim) if (var=='GPP'): ax.legend(loc='lower left',prop={'size':12}) #if (var=='TER'): ax.legend(loc='upper left',prop={'size':10}) ax.set_ylabel(axlabel) if var != 'NEE': ax.get_xaxis().set_visible(False) figs.suptitle(site, fontsize=14) figs.savefig(os.path.join(figsubdir,'crop%i_%s_%i.png'%(crop_no,site,timeframe[0]))) plt.close('all') #plt.show() #------------------------------------------------------------------------------- timeframe = [2004,2014] for site in sites: print site figs, axes = plt.subplots(nrows=4, ncols=1, figsize=(15,10)) figs.subplots_adjust(0.1,0.07,0.98,0.95,0.,0.) variables = ['crop_no','GPP','TER','NEE'] axlabels = ['crop ID',r'GPP (g m$^{-2}$ d$^{-1}$)', r'TER (g m$^{-2}$ d$^{-1}$)',r'NEE (g m$^{-2}$ d$^{-1}$)'] ylims = [(0.,14.),(-30.,2.),(-2.,20.),(-20.,10.)] start = str(int(timeframe[0])) end = str(int(timeframe[1])) print '[%s:%s]'%(start,end) fsz = 14 # fonsize of x and y axis ticks for ax, var, axlabel, ylim in zip(axes,variables,axlabels,ylims): if (var=='crop_no'): try: OBS = res_timeseries['OBS'][site][var][start:end].dropna() OBS[~(OBS==-9999.)].plot(ax=ax, lw=2, #OBS.plot(ax=ax, lw=2, style='-', label='obs', fontsize=fsz) crop_no = OBS[0] minobs = OBS[~(OBS==-9999.)].min() maxobs = OBS[~(OBS==-9999.)].max() except TypeError: minobs = 0. maxobs = 0. minwof = 1. maxwof = 1. else: # observations try: OBS = res_timeseries['OBS'][site][var][start:end].dropna() OBS[~(OBS==-9999.)].plot(ax=ax, lw=2, #OBS.plot(ax=ax, lw=2, c='b', style='+', label='obs', fontsize=fsz) minobs = OBS[~(OBS==-9999.)].min() maxobs = OBS[~(OBS==-9999.)].max() except TypeError: minobs = 0. maxobs = 0. # SIBCASA sims try: res_timeseries['SIMB'][site][var][start:end].plot(ax=ax, lw=2, c='g', style='--', label='SiBCASA', fontsize=fsz) except TypeError: pass # WOFOST simsA try: WOF = res_timeseries['SIMC'][site][var][start:end].dropna() #WOF[~(WOF==-9999.)].plot(ax=ax, lw=2, WOF.plot(ax=ax, lw=2, c='r', style='x', label='WOFOST', fontsize=fsz) minwof = WOF.min() maxwof = WOF.max() except TypeError: minwof = 0. maxwof = 0. WOF = 0. ax.axhline(y=0., c='k') minvar = math.floor(min(minobs,minwof))-1. maxvar = math.ceil(max(maxobs,maxwof))+1. #ax.set_ylim(minvar,maxvar) ax.set_ylim(ylim) if (var=='GPP'): ax.legend(loc='lower left',prop={'size':12}) ax.set_ylabel(axlabel) if var != 'NEE': ax.get_xaxis().set_visible(False) figs.suptitle(site, fontsize=14) figs.savefig(os.path.join(figsubdir,'timeseries_crop%i_%s_%i-%i.png'%(crop_no,site,timeframe[0],timeframe[1]))) plt.close('all')
def main(): #=============================================================================== global inputdir, outputdir, optimidir #------------------------------------------------------------------------------- # ================================= USER INPUT ================================= # read the settings from the rc file rcdict = rc.read('settings.rc') #=============================================================================== # extract the needed information from the rc file sites = [s.strip(' ') for s in rcdict['sites'].split(',')] #NUTS_reg = [s.strip(' ') for s in rcdict['NUTS_reg'].split(',')] crops = [s.strip(' ') for s in rcdict['crops'].split(',')] crop_nos = [int(s.strip(' ')) for s in rcdict['crop_nos'].split(',')] years = [int(s.strip(' ')) for s in rcdict['years'].split(',')] # directory paths outputdir = rcdict['outputdir'] inputdir = rcdict['inputdir'] #------------------------------------------------------------------------------- # get the list of NUTS 2 region names associated to the list of FluxNet sites from WOF_00_retrieve_input_data import open_csv sitdict = open_csv(inputdir, 'sites_info2.csv', convert_to_float=False) NUTS_reg = sitdict['NUTS_reg'] #------------------------------------------------------------------------------- # list the old gapfilled files to remove, and remove them all for s, site in enumerate(sites): for c, crop_name in enumerate(crops): crop_no = crop_nos[c] for year in years: optimidir = os.path.join(outputdir, 'fgap/%i/c%i/' % (year, crop_no)) files2remove = [ f for f in os.listdir(optimidir) if '_gapfilled' in f ] for f in files2remove: os.remove(os.path.join(optimidir, f)) #------------------------------------------------------------------------------- # gap fill for s, site in enumerate(sites): NUTS_no = NUTS_reg[s] for c, crop_name in enumerate(crops): crop_no = crop_nos[c] for year in years: # create output folder if it doesn't already exists optimidir = os.path.join(outputdir, 'fgap/%i/c%i/' % (year, crop_no)) # detect if there is this year needs to be gapfilled f2gapfill = [ f for f in os.listdir(optimidir) if ('_tobegapfilled' in f) and (NUTS_no in f) ] if len(f2gapfill) == 0: continue print '\nWe gap fill:', site, NUTS_no, year, crop_name # GAP-FILLING YLDGAPF for NUTS2 level: prevyear = os.path.join( optimidir.replace('%04d' % year, '%04d' % (year - 1)), 'fgap_%s_optimized.pickle' % NUTS_no) nextyear = os.path.join( optimidir.replace('%04d' % year, '%04d' % (year + 1)), 'fgap_%s_optimized.pickle' % NUTS_no) availfiles = [] availyears = [] for yr in range(1995, 2020): searchyear = os.path.join( optimidir.replace('%04d' % year, '%04d' % yr), 'fgap_%s_optimized.pickle' % NUTS_no) if os.path.exists(searchyear): availfiles.append(searchyear) availyears.append(yr) print "%d years found for gap filling:" % len( availfiles), availyears # Use average from y-1 and y+1 if prevyear in availfiles and nextyear in availfiles: optimi_info = pickle_load(open(prevyear, 'rb')) ygf_prev = optimi_info[2] optimi_info = pickle_load(open(nextyear, 'rb')) ygf_next = optimi_info[2] ygf = (ygf_prev + ygf_next) / 2.0 # simply average opt_code = 'gapfilled02' shortlist_cells = optimi_info[3] # Use previous year value elif prevyear in availfiles: optimi_info = pickle_load(open(prevyear, 'rb')) ygf = optimi_info[2] opt_code = 'gapfilled03a' shortlist_cells = optimi_info[3] print shortlist_cells # Use next year value elif nextyear in availfiles: optimi_info = pickle_load(open(nextyear, 'rb')) ygf = optimi_info[2] opt_code = 'gapfilled03b' shortlist_cells = optimi_info[3] # Use climatological average from other years if nyear > 2 elif len(availfiles) > 2: ygf = 0.0 for filename in availfiles: optimi_info = pickle_load(open(filename, 'rb')) ygf += optimi_info[2] ygf = ygf / len(availfiles) opt_code = 'gapfilled04' shortlist_cells = optimi_info[3] # Use upper NUTS level optimum (NUTS1, or NUTS0 at worst) else: try: nuts1file = os.path.join( optimidir, 'fgap_%s_optimized.pickle' % NUTS_no[0:3]) data = pickle_load(open(nuts1file, 'rb')) ygf = data[2] opt_code = 'gapfilled05a' shortlist_cells = data[3] except IOError: try: nuts0file = os.path.join( optimidir, 'fgap_%s_optimized.pickle' % NUTS_no[0:2]) data = pickle_load(open(nuts0file, 'rb')) ygf = data[2] opt_code = 'gapfilled05b' shortlist_cells = data[3] # Use default value if all previous methods fail except IOError: ygf = 0.8 opt_code = 'gapfilled06' shortlist_cells = [] print "Using ygf of %5.2f and code of %s" % (ygf, opt_code) print "created file fgap_%s_%s.pickle"%(NUTS_no, opt_code)+\ " in folder %s"%optimidir currentyear = os.path.join( optimidir, 'fgap_%s_%s.pickle' % (NUTS_no, opt_code)) pickle_dump([NUTS_no, opt_code, ygf, shortlist_cells], open(currentyear, 'wb'))