def snowCalc(data, stakeNames, temps, times, jdatelist, jdayBw, paramDict, trudbKeys, trudb, outputDir, outputname): # Settings and counters for multiple parameter values counter = 0 # previousModBs = [] modelDiffScore = 1 while modelDiffScore > 0.01 and counter < 20: # RUN MODEL data = runModel(data, stakeNames, temps, times, jdatelist, jdayBw, paramDict, trudbKeys, trudb, counter) dataKeys = data.keys() dataKeys.sort() # # REPORT ON MODEL if len(data["DataSets"]) > 0: report, resNorm = reportCreate(data, paramDict) # if resNorm < best: print "Snow back calculation run {0:d}".format(counter) print "Score: {0:2.3f}\n".format(resNorm) # Output model data to file pfnm = str(counter) outDir = makeDir(outputDir, pfnm) meltDataWrite(data, outDir) # Write report to text file reportWrite(report, outDir) # Plot model results x = [] for stake in stakeNames: x.append(data[stake]["Elevation"]) if len(data["DataSets"]) > 0: setKeys = data["DataSets"].keys() # Order all Mod first, then all Org setKeys.sort() # Plot differences between measured and modelled and test difference to previous run start = 0 end = len(setKeys) middle = end / 2 i = start while i < end / 2: modBs = np.array(data["DataSets"][setKeys[i]]) if len(previousModBs) == 0: previousModBs = copy.copy(modBs) else: if i == 0: diffModBs = modBs - previousModBs modelDiffScore = np.mean(diffModBs) # print "Average difference of modelled ablation to previous estimate: {0:2.3f}".format(modelDiffScore) # print diffModBs previousModBs = copy.copy(modBs) pltnmMosDiff = outputname + setKeys[i] + "_moddiff" xlabel = "Elevation (m.a.s.l.)" ylabel = "Model improvement (m w.e.)" plttitle = "Model improvement on previous run against Elevation" plotDifElev(pltnmMosDiff, outDir, setKeys[i], x, diffModBs, "c", xlabel, ylabel, plttitle) obsBs = np.array(data["DataSets"][setKeys[middle]]) bsDiff = obsBs - modBs pltnmBs = outputname + setKeys[i] + "_measured" xlabel = "Elevation (m.a.s.l.)" ylabel = "Measured - Modelled Melt (m w.e.)" plttitle = "Measured - Modelled against Elevation" plotDifElev(pltnmBs, outDir, setKeys[i], x, bsDiff, "r", xlabel, ylabel, plttitle) # pltnmBsmm = outputname + setKeys[i] + '_modmeas' # plotDif(pltnmBsmm, outDir, setKeys[i], obsBs, modBs, 'b') i = i + 1 middle = middle + 1 counter = counter + 1 return resNorm
def main(): # Set up list of years from command line arguments if len(sys.argv) > 1: years = [] for arg in sys.argv[1:]: try: years.append(int(arg)) except: print sys.argv sys.exit("Argument Error") print years else: years = [2005,2006,2007,2008,2009,2010,2011,2012,2013] # # Get shading data # Location of file containing a multiband raster, each band represents the shading on one day. 1 = no shade, 0 = really quite dark shadefile = '../InData/Shades/SG_shade.tif' # shadefile = '../InData/Shades/Reduced/SG_shade.tif' # Read the shade factor raster in to memory raster, transf, bandcount = getShadeFile(shadefile) writeScores = 'no' # 'yes' Can be ignored, will be set to yes if test set to run over multiple parameters (5) # # Set plotOn to 1 to plot differences between modelled and measured plotOn = 1 # for year in years: # Set parameters for the melt model choice = 'All_score' derivedParameters = {} derivedParameters['2005'] = {'ddfSnow':0.0046, 'ddfSi':0.0054, 'ddfFirn':0.0058, 'ddfIce':0.0064, 'lapse':0.0044, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} derivedParameters['2006'] = {'ddfSnow':0.0058, 'ddfSi':0.0056, 'ddfFirn':0.0058, 'ddfIce':0.0064, 'lapse':0.0040, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} derivedParameters['2007'] = {'ddfSnow':0.0036, 'ddfSi':0.0044, 'ddfFirn':0.0040, 'ddfIce':0.0040, 'lapse':0.0052, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} derivedParameters['2008'] = {'ddfSnow':0.0036, 'ddfSi':0.0044, 'ddfFirn':0.0058, 'ddfIce':0.0040, 'lapse':0.0044, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} derivedParameters['2009'] = {'ddfSnow':0.0036, 'ddfSi':0.0056, 'ddfFirn':0.0058, 'ddfIce':0.0064, 'lapse':0.0052, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} derivedParameters['2010'] = {'ddfSnow':0.0036, 'ddfSi':0.0044, 'ddfFirn':0.0058, 'ddfIce':0.0048, 'lapse':0.0040, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} derivedParameters['2011'] = {'ddfSnow':0.0058, 'ddfSi':0.0056, 'ddfFirn':0.0058, 'ddfIce':0.0060, 'lapse':0.0040, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} derivedParameters['2013'] = {'ddfSnow':0.0036, 'ddfSi':0.0044, 'ddfFirn':0.0058, 'ddfIce':0.0060, 'lapse':0.0068, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} derivedParameters['All_score'] = {'ddfSnow':0.0042, 'ddfSi':0.0056, 'ddfFirn':0.0044, 'ddfIce':0.0056, 'lapse':0.0048, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} derivedParameters['All_weight'] = {'ddfSnow':0.0047, 'ddfSi':0.0050, 'ddfFirn':0.0049, 'ddfIce':0.0052, 'lapse':0.0058, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} derivedParameters['b1_score'] = {'ddfSnow':0.0058, 'ddfSi':0.0056, 'ddfFirn':0.0054, 'ddfIce':0.0060, 'lapse':0.0040, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} derivedParameters['b1_weight'] = {'ddfSnow':0.0047, 'ddfSi':0.0050, 'ddfFirn':0.0049, 'ddfIce':0.0052, 'lapse':0.0058, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} derivedParameters['b2_score'] = {'ddfSnow':0.0038, 'ddfSi':0.0052, 'ddfFirn':0.0042, 'ddfIce':0.0056, 'lapse':0.0056, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} derivedParameters['b2_weight'] = {'ddfSnow':0.0047, 'ddfSi':0.0050, 'ddfFirn':0.0049, 'ddfIce':0.0052, 'lapse':0.0058, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500} if choice in derivedParameters.keys(): ddfSnow = [derivedParameters[choice]['ddfSnow']] ddfSi = [derivedParameters[choice]['ddfSi']] ddfFirn = [derivedParameters[choice]['ddfFirn']] ddfIce = [derivedParameters[choice]['ddfIce']] lapse = [derivedParameters[choice]['lapse']] elevLapse = [derivedParameters[choice]['elevLapse']] sfe = [derivedParameters[choice]['sfe']] ELA = [derivedParameters[choice]['ELA']] else: writeScores = 'yes' ddfSnow = range(36,60,2) ddfSnow = np.array(ddfSnow)*0.0001 ddfSnow = list(ddfSnow) ddfSi = range(44,58,2) ddfSi = np.array(ddfSi)*0.0001 ddfSi = list(ddfSi) ddfFirn = range(40,60,2) ddfFirn = np.array(ddfFirn)*0.0001 ddfFirn = list(ddfFirn) ddfIce = range(40,66,2) ddfIce = np.array(ddfIce)*0.0001 ddfIce = list(ddfIce) lapse = range(40,80,4) lapse = np.array(lapse)*0.0001 lapse = list(lapse) rangeZ = (2100 - 1150) elevLapse = [rangeZ] # Elevation dependant lapse rate sfe = [1.5] # Shading factor exponent (adjusts the shading value at each point) ELA = [1500] # Equilibrium line, for firn or ice under snow # strYear = str(year) dataLoc = '../InData/' + strYear # Temperature data. Following two lines are example of input format: # Date,Temp # 2010-01-25,-8.3 weather = 'weather' + strYear + '.csv' TfileName = os.path.join(dataLoc, weather) # Read Temperature data from csv file and convert dates to julian days. Date format '%Y-%m-%d' is SMHI's TinFile = open(TfileName,'rb') dates = [] times = [] temps = [] for line in csv.DictReader(TinFile, delimiter=','): dates.append(line['Date'].strip()) date = datetime.strptime(line['Date'].strip(),'%Y-%m-%d') jdate = datetime.strftime(date,'%j') times.append(int(jdate)) temps.append(float(line['Temp'].strip())) TinFile.close() # Stake data. Following two lines are example of input format: # Stake,Easting,Northing,Elevation,Bw,Bs,Bn,Surface # 04C,651103.586397,7536381.86553,1219,0.334,2.53,-2.196,ice stakeFileName = 'StakeData' + strYear + '.csv' SfileName =os.path.join(dataLoc, stakeFileName) # Get settings for model: AWS elevation, date of snow probing, dates for model export, first date in shading file (could start this at 1 by default but # shading file may be created for limited range of dates to reduce file size) refElev, jdayBw, jdatelist, startday = getSettings(dataLoc, times[-1]) print "For year %s following settings used: " %(strYear) print "refElev set to %s" %(refElev) print "jdayBw set to %s" %(jdayBw) # # # Settings and counters for testing multiple parameter values counter = 0 # bestBsR2 = -9999 # name refers to R-squared test but maybe replaced by norm of residuals # BsR2 = np.nan # name refers to R-squared test but maybe replaced by norm of residuals bestResNorm = 9999 # Norm of residuals version ResNorm = np.nan # Norm of residuals version writeTest = 0 # Directory for output outputDir = os.path.join('../Output/', strYear) if not os.path.exists(outputDir): os.makedirs(outputDir) outputname = strYear +'_DDM_' # # Truthing of data against field survey data. Each survey stored in sperate csv file. # The trudb is used to store both data and assessment of results truthDir = os.path.join(dataLoc,"truthing") try: truthfiles = filelist(truthDir,'csv') trudb = {} print "Truthing files: " for file in truthfiles: if int(file.split('.')[0]) not in jdatelist: print "%s does not match date given in settings file: %s" % (file, jdatelist) else: print file trudb[file.split('.')[0]] = import2vector(os.path.join(truthDir,file)) trudbKeys = trudb.keys() trudbKeys.sort() except: print "No truthing data found." # # Read stake data SinFile = open(SfileName,'rb') stakeData = {} # Read point data file with position and winter balance as of last winter probing (jdayBw) and send to model for line in csv.DictReader(SinFile, delimiter=','): stakeName = line['Stake'].strip() stakeData[stakeName] = {} # Coordinates stakeData[stakeName]['Easting'] = float(line['Easting'].strip()) stakeData[stakeName]['Northing'] = float(line['Northing'].strip()) stakeData[stakeName]['Elevation'] = float(line['Elevation'].strip()) # Get shading factor for location # Create vector for shade values vals = [] for d in range(366): vals.append(1) try: stakeData[stakeName]['Shadevals'] = GetShadeVals(stakeData[stakeName]['Easting'], stakeData[stakeName]['Northing'], raster, transf, bandcount, vals, startday) except: stakeData[stakeName]['Shadevals'] = vals print "No shade value obtained for ", stakeName # Get the measured winter balance try: stakeData[stakeName]['Org_Bw'] = float(line['Bw'].strip()) except: print "No winter balance data found (Bw column)" break # Get the measured summer balance try: stakeData[stakeName]['Org_Bs'] = float(line['Bs'].strip()) except: pass # Get the measured net balance try: stakeData[stakeName]['Org_Bn'] = float(line['Bn'].strip()) except: pass # Iterate over all possible parameter value combinations # Scoring for each parameter scores = {'ddfSnow':{}, 'ddfSi':{}, 'ddfFirn':{}, 'ddfIce':{}, 'lapse':{}, 'elevLapse':{}, 'sfe':{}, 'ELA':{}, 'refElev':{}} parUsage = {'BsScore':[], 'ddfSnow':[], 'ddfSi':[], 'ddfFirn':[], 'ddfIce':[], 'lapse':[], 'elevLapse':[], 'sfe':[], 'ELA':[], 'refElev':[]} iterationcount = len(ddfSnow)*len(ddfSi)*len(ddfFirn)*len(ddfIce)*len(lapse)*len(elevLapse)*len(sfe)*len(ELA) print "Total number of runs: %s" % (iterationcount) for it1, it2, it3, it4, it5, it6, it7, it8 in itertools.product(ddfSnow, ddfSi, ddfFirn, ddfIce, lapse, elevLapse, sfe, ELA): paramDict = {} paramDict['ddfSnow'] = it1 paramDict['ddfSi'] =it2 paramDict['ddfFirn'] = it3 paramDict['ddfIce'] = it4 paramDict['lapse'] = it5 paramDict['elevLapse'] = it6 paramDict['sfe'] = it7 paramDict['ELA'] = it8 paramDict['refElev'] = refElev # # 'data' is a copy of the original 'stakeData' data = copy.deepcopy(stakeData) stakeNames = stakeData.keys() stakeNames.sort() data['DataSets'] = {} for stake in stakeNames: # For ordered headers/keys data[stake]['Headers'] = ['MeltModel', 'Shadevals', 'Easting', 'Northing', 'Elevation', 'Org_Bw'] if 'Org_Bn' in data[stake].keys(): data[stake]['Headers'].append('Org_Bn') # Send input data to Degree Day Model object data[stake]['MeltModel'] = DdfCell(data[stake]['Easting'], data[stake]['Northing'], data[stake]['Elevation'], data[stake]['Org_Bw'], jdayBw, data[stake]['Shadevals'], paramDict) # For each julian day in the "times" vector call the meltInst method for each point object, passing the temperature and the day number. # This is what runs the model at each time step in the temperature time series file for i in range(len(temps)): data[stake]['MeltModel'].meltInst(temps[i],times[i]) for day in jdatelist: # Fetch modelled melt and net balance for each julian day specific in settings and create new entry for each loc = data[stake]['MeltModel'].jTimeSeries.index(day) data[stake]['Mod_Bs_' + str(day)] = round(data[stake]['MeltModel'].meltSumSeries[loc],3) data[stake]['Mod_Bn_' + str(day)] = round(data[stake]['MeltModel'].BnSeries[loc],3) data[stake]['Headers'].append('Mod_Bs_' + str(day)) data[stake]['Headers'].append('Mod_Bn_' + str(day)) # Fetch any truthing data available if 'trudbKeys' in locals(): if str(day) in trudbKeys: try: loc = np.where(trudb[str(day)]['Stake']==stake)[0][0] data[stake]['Org_Bs_' + str(day)] = round(trudb[str(day)]['Bs'][loc],3) data[stake]['Org_Bn_' + str(day)] = round(trudb[str(day)]['Bn'][loc],3) data[stake]['Mod_Bw_' + str(day)] = round((data[stake]['Org_Bn_' + str(day)] +data[stake]['Mod_Bs_' + str(day)]), 3) except: data[stake]['Org_Bs_' + str(day)] = np.nan data[stake]['Org_Bn_' + str(day)] = np.nan data[stake]['Mod_Bw_' + str(day)] = np.nan data[stake]['Headers'].insert(-2, 'Org_Bs_' + str(day)) data[stake]['Headers'].insert(-2, 'Org_Bn_' + str(day)) data[stake]['Headers'].insert(-2, 'Mod_Bw_' + str(day)) # Add values to lists for calculating R2 later if 'Mod_Bs_' + str(day) not in data['DataSets'].keys(): data['DataSets']['Mod_Bs_' + str(day)] = [] data['DataSets']['Mod_Bs_' + str(day)].append(data[stake]['Mod_Bs_' + str(day)]) if 'Org_Bs_' + str(day) not in data['DataSets'].keys(): data['DataSets']['Org_Bs_' + str(day)] = [] data['DataSets']['Org_Bs_' + str(day)].append(data[stake]['Org_Bs_' + str(day)]) dataKeys = data.keys() dataKeys.sort() # if len(data['DataSets']) > 0: report = copy.deepcopy(paramDict) setKeys = data['DataSets'].keys() # Order all Mod first, then all Org setKeys.sort() start = 0 end = len(setKeys) middle = end/2 i = start while i < end/2: # Calculate R2 modBs = np.array(data['DataSets'][setKeys[i]]) obsBs = np.array(data['DataSets'][setKeys[middle]]) # Fit regression line through differences. Not good test of model # popt, pcov = curve_fit(func,obsBs, modBs) # variance = np.diagonal(pcov) # SE = np.sqrt(variance) # modBsmean = nanmean(modBs) obsBsmean = nanmean(obsBs) obsBsMinModBs = obsBs - modBs obsBsMinMean = obsBs - obsBsmean SSres = (np.nansum(obsBsMinModBs**2)) SStot = (np.nansum(obsBsMinMean**2)) ResNorm = SSres**0.5 # BsR2 = 1 - (SSres / SStot) # BsR2 version # report[(setKeys[i]+'_R2')] = BsR2 # BsR2 version report[(setKeys[i]+'_RN')] = ResNorm # Norm of residuals version # scores = scoring(scores, paramDict, BsR2) # BsR2 version scores = scoring(scores, paramDict, ResNorm) # Norm of residuals version # parUsage = usageUpdate(parUsage, paramDict, BsR2) #BsR2 version parUsage = usageUpdate(parUsage, paramDict, ResNorm) # Norm of residuals version if i == 0: # if BsR2 >= bestBsR2: # BsR2 version # bestBsR2 = copy.copy(BsR2) # BsR2 version if ResNorm <= bestResNorm: # Norm of residuals version bestResNorm = copy.copy(ResNorm) # Norm of residuals version writeTest = 1 print "\nRun: {0} of {1}".format(counter+1, iterationcount) reportKeys = report.keys() reportKeys.sort() for k in reportKeys: print k, report[k] # scoreWrite(scores, outputDir) # Write out scores for each parameter after each iteration. No real need # print scores i = i+1 middle = middle+1 if writeTest == 1: # Output model data to file flnm = str(counter) outDir = makeDir(outputDir, flnm) meltDataWrite(data, outDir) # Write report to text file reportWrite(report, outDir) # Plot model results x = [] for stake in stakeNames: x.append(data[stake]['Elevation']) if len(data['DataSets']) > 0: setKeys = data['DataSets'].keys() # Order all Mod first, then all Org setKeys.sort() # Plot differences between measured and modelled if plotOn == 1: start = 0 end = len(setKeys) middle = end/2 i = start while i < end/2: modBs = np.array(data['DataSets'][setKeys[i]]) obsBs = np.array(data['DataSets'][setKeys[middle]]) bsDiff = obsBs - modBs pltnmBs = outputname + setKeys[i] + '_measured' plotDifElev(pltnmBs, outDir, setKeys[i], x, bsDiff, 'r') # pltnmBsmm = outputname + setKeys[i] + '_modmeas' # plotDif(pltnmBsmm, outDir, setKeys[i], obsBs, modBs, 'b') i = i+1 middle = middle+1 if counter %100 == 0: print "%s of %s" % (counter, iterationcount) counter = counter+1 writeTest = 0 if writeScores == 'yes': scoreWrite(scores, outputDir) parameterCheckWrite(outputDir, year, parUsage)