Пример #1
0
def main():
# Set up list of years from command line arguments
	choice = ''
	if len(sys.argv) > 1:
		argstart = 1
		if sys.argv[1] in ['b1', 'b2', 'All', 'y2005', 'y2006', 'y2007', 'y2008', 'y2009', 'y2010', 'y2011', 'y2013']:
			choice = sys.argv[1]
			argstart = 2
		years = []
		for arg in sys.argv[argstart:]:
			try:
				years.append(int(arg))
			except:
				print sys.argv
				sys.exit("Argument Error")
		print years
	else:
		years = []
		while len(years) <1:
			try:
				yearsFromUser = raw_input("Give years as space separated list: ")
				yearsAsString = yearsFromUser.strip().split(' ')
				for yearString in yearsAsString:
					years.append(int(yearString))
			except:
				years = []
#
# Run main function
	for year in years:
		strYear = str(year)
		dataLoc = '../InData/' + strYear
		# Temperature data. Following two lines are example of input format:
		# Date,Temp
		# 2010-01-25,-8.3
		weather = 'weather' + strYear + '.csv'
		TfileName = os.path.join(dataLoc, weather)
		# Read Temperature data from csv file and convert dates to julian days. Date format '%Y-%m-%d' is SMHI's
		TinFile = open(TfileName,'rb')
		dates = []
		times = []
		temps = []
		for line in csv.DictReader(TinFile, delimiter=','):
			dates.append(line['Date'].strip())
			date = datetime.strptime(line['Date'].strip(),'%Y-%m-%d')
			jdate = datetime.strftime(date,'%j')
			times.append(int(jdate))
			temps.append(float(line['Temp'].strip()))
		TinFile.close()
		# Stake data. Following two lines are example of input format:
		# Stake,Easting,Northing,Elevation,Bw,Bs,Bn,Surface
		# 04C,651103.586397,7536381.86553,1219,0.334,2.53,-2.196,ice
		stakeFileName = 'StakeData' + strYear + '.csv'
		SfileName =os.path.join(dataLoc, stakeFileName)
		# Get settings for model: AWS elevation, date of snow probing, dates for model export, first date in shading file (could start this at 1 by default but
		# shading file may be created for limited range of dates to reduce file size)
		refElev, jdayBw, jdatelist, startday = getSettings(dataLoc, times[-1])
		print "For year %s following settings used: " %(strYear)
		print "refElev set to %s" %(refElev)
		print "jdayBw set to %s" %(jdayBw)
		#
		# Directory for output
		outputDir = os.path.join('../Output/', strYear)
		if not os.path.exists(outputDir):
			os.makedirs(outputDir)
		outputname = strYear +'_DDM_'
		#
		# Truthing of data against field survey data. Each survey stored in sperate csv file.
		# The trudb is used to store both data and assessment of results
		truthDir = os.path.join(dataLoc,"truthing")
		try:
			truthfiles = filelist(truthDir,'csv')
			trudb = {}
			print "Truthing files: "
			for file in truthfiles:
				if int(file.split('.')[0]) not in jdatelist:
					print "%s does not match date given in settings file: %s" % (file, jdatelist)
				else:
					print file
				trudb[file.split('.')[0]] = import2vector(os.path.join(truthDir,file),	importError = 'no')
			trudbKeys = trudb.keys()
			trudbKeys.sort()
		except:
			print "No truthing data found."
		#
		# Read stake data
		SinFile = open(SfileName,'rb')
		stakeData = {}
		# Read point data file with position and winter balance as of last winter probing (jdayBw) and send to model
		for line in csv.DictReader(SinFile, delimiter=','):
			stakeName = line['Stake'].strip()
			stakeData[stakeName] = {}
			# Coordinates
			stakeData[stakeName]['Easting'] = float(line['Easting'].strip())
			stakeData[stakeName]['Northing'] = float(line['Northing'].strip())
			stakeData[stakeName]['Elevation'] = float(line['Elevation'].strip())
			# Get shading factor for location
			# Create vector for shade values
			vals = []
			for d in range(366):
				vals.append(1)
			try:
				stakeData[stakeName]['Shadevals'] = GetShadeVals(stakeData[stakeName]['Easting'], stakeData[stakeName]['Northing'], raster, transf, bandcount, vals, startday)
			except:
				stakeData[stakeName]['Shadevals'] = vals
				print "No shade value obtained for ", stakeName
			# Get the measured winter balance
			try:
				stakeData[stakeName]['Org_Bw'] = float(line['Bw'].strip())
			except:
				print "No winter balance data found (Bw column)"
				break
			# Get the measured summer balance
			try:
				stakeData[stakeName]['Org_Bs'] = float(line['Bs'].strip())
			except:
				pass
			# Get the measured net balance
			try:
				stakeData[stakeName]['Org_Bn'] = float(line['Bn'].strip())
			except:
				pass
		#
		# Prepare database for passing into loop and model run
		stakeNames = stakeData.keys()
		stakeNames.sort()
		data = copy.deepcopy(stakeData)
		best = 9999
		#
		# Set parameters for the melt model
		if choice in ['b1', 'b2', 'All']:
			choice = choice + '_score'
		elif choice in ['y2005', 'y2006', 'y2007', 'y2008', 'y2009', 'y2010', 'y2011', 'y2013']:
			choice = choice.strip('y')
		print "Choice of parameters: {}".format(choice)
		derivedParameters = {}
		derivedParameters['2005'] =			{'ddfSnow':0.0046, 'ddfSi':0.0054, 'ddfFirn':0.0058, 'ddfIce':0.0064, 'lapse':0.0044, 'elevLapse':(700), 'sfe':1.4, 'ELA':1500}
		derivedParameters['2006'] =			{'ddfSnow':0.0058, 'ddfSi':0.0056, 'ddfFirn':0.0058, 'ddfIce':0.0064, 'lapse':0.0040, 'elevLapse':(800), 'sfe':1.1, 'ELA':1500}
		derivedParameters['2007'] =			{'ddfSnow':0.0036, 'ddfSi':0.0044, 'ddfFirn':0.0040, 'ddfIce':0.0040, 'lapse':0.0052, 'elevLapse':(800), 'sfe':1.3, 'ELA':1500}
		derivedParameters['2008'] =			{'ddfSnow':0.0036, 'ddfSi':0.0044, 'ddfFirn':0.0058, 'ddfIce':0.0040, 'lapse':0.0044, 'elevLapse':(900), 'sfe':1.5, 'ELA':1500}
		derivedParameters['2009'] =			{'ddfSnow':0.0036, 'ddfSi':0.0056, 'ddfFirn':0.0058, 'ddfIce':0.0064, 'lapse':0.0052, 'elevLapse':(800), 'sfe':1.6, 'ELA':1500}
		derivedParameters['2010'] =			{'ddfSnow':0.0036, 'ddfSi':0.0044, 'ddfFirn':0.0058, 'ddfIce':0.0048, 'lapse':0.0040, 'elevLapse':(600), 'sfe':1.2, 'ELA':1500}
		derivedParameters['2011'] =			{'ddfSnow':0.0058, 'ddfSi':0.0056, 'ddfFirn':0.0058, 'ddfIce':0.0060, 'lapse':0.0040, 'elevLapse':(900), 'sfe':1.1, 'ELA':1500}
		derivedParameters['2013'] =			{'ddfSnow':0.0036, 'ddfSi':0.0044, 'ddfFirn':0.0058, 'ddfIce':0.0060, 'lapse':0.0068, 'elevLapse':(800), 'sfe':1.3, 'ELA':1500}
		derivedParameters['All_score'] = {'ddfSnow':0.0042, 'ddfSi':0.0056, 'ddfFirn':0.0044, 'ddfIce':0.0056, 'lapse':0.0048, 'elevLapse':(800), 'sfe':1.4, 'ELA':1500}
		derivedParameters['All_weight'] = {'ddfSnow':0.0047, 'ddfSi':0.0050, 'ddfFirn':0.0049, 'ddfIce':0.0052, 'lapse':0.0058, 'elevLapse':(800), 'sfe':1.4, 'ELA':1500}
		derivedParameters['b1_score'] =	 {'ddfSnow':0.0058, 'ddfSi':0.0056, 'ddfFirn':0.0054, 'ddfIce':0.0060, 'lapse':0.0040, 'elevLapse':(800), 'sfe':1.4, 'ELA':1500}
		derivedParameters['b1_weight'] ={'ddfSnow':0.0047, 'ddfSi':0.0050, 'ddfFirn':0.0049, 'ddfIce':0.0052, 'lapse':0.0058, 'elevLapse':(800), 'sfe':1.4, 'ELA':1500}
		derivedParameters['b2_score'] =	 {'ddfSnow':0.0038, 'ddfSi':0.0052, 'ddfFirn':0.0042, 'ddfIce':0.0056, 'lapse':0.0056, 'elevLapse':(750), 'sfe':1.4, 'ELA':1500}
		derivedParameters['b2_weight'] ={'ddfSnow':0.0047, 'ddfSi':0.0050, 'ddfFirn':0.0049, 'ddfIce':0.0052, 'lapse':0.0058, 'elevLapse':(750), 'sfe':1.4, 'ELA':1500}
		#
		try:
			paramDict = {}
			paramDict['ddfSnow'] =	derivedParameters[choice]['ddfSnow']
			paramDict['ddfSi'] = derivedParameters[choice]['ddfSi']
			paramDict['ddfFirn'] = derivedParameters[choice]['ddfFirn']
			paramDict['ddfIce'] = derivedParameters[choice]['ddfIce']
			paramDict['lapse'] = derivedParameters[choice]['lapse']
			paramDict['elevLapse'] = derivedParameters[choice]['elevLapse']
			paramDict['sfe'] = derivedParameters[choice]['sfe']
			paramDict['ELA'] = derivedParameters[choice]['ELA']
			paramDict['refElev'] = refElev
			best, resNorm = snowCalc(data, stakeNames, temps, times, jdatelist, jdayBw, paramDict, trudbKeys, trudb, outputDir, outputname, best)
		except:
			print "Running {0:d} on all paramater combinations\n".format(year)
			ddfSnow = range(36,59,4)
			ddfSnow = np.array(ddfSnow)*0.0001
			ddfSnow = list(ddfSnow)
			ddfSi = range(48,61,4)
			ddfSi = np.array(ddfSi)*0.0001
			ddfSi = list(ddfSi)
			ddfFirn = range(36,46,4)
			ddfFirn = np.array(ddfFirn)*0.0001
			ddfFirn = list(ddfFirn)
			ddfIce = range(40,65,4)
			ddfIce = np.array(ddfIce)*0.0001
			ddfIce = list(ddfIce)
			lapse = range(36,61,4)
			lapse = np.array(lapse)*0.0001
			lapse = list(lapse)
			elevLapse = range(100,1501,100) # Elevation dependant lapse rate
			sfe = range(8,15) # Shading factor exponent (adjusts the shading value at each point)
			sfe = np.array(sfe)*0.1
			sfe = list(sfe)
			ELA = [1500] # Equilibrium line, for firn or ice under snow
			parUsage = {'BsScore':[], 'ddfSnow':[], 'ddfSi':[], 'ddfFirn':[], 'ddfIce':[], 'lapse':[], 'elevLapse':[], 'sfe':[], 'ELA':[], 'refElev':[]}
			iterationcount = len(ddfSnow)*len(ddfSi)*len(ddfFirn)*len(ddfIce)*len(lapse)*len(elevLapse)*len(sfe)*len(ELA)
			print "Total number of runs: %s" % (iterationcount)
			for it1, it2, it3, it4, it5, it6, it7, it8 in itertools.product(ddfSnow, ddfSi, ddfFirn, ddfIce, lapse, elevLapse, sfe, ELA):
				paramDict = {}
				paramDict['ddfSnow'] = it1
				paramDict['ddfSi'] =it2
				paramDict['ddfFirn'] = it3
				paramDict['ddfIce'] = it4
				paramDict['lapse'] = it5
				paramDict['elevLapse'] = it6
				paramDict['sfe'] = it7
				paramDict['ELA'] = it8
				paramDict['refElev'] = refElev
				check = best
				best, resNorm = snowCalc(data, stakeNames, temps, times, jdatelist, jdayBw, paramDict, trudbKeys, trudb, outputDir, outputname, best, iterationcount)
				parUsage = usageUpdate(parUsage, paramDict, resNorm)
				if best < check:
					print "{0:d} improved score".format(iterationcount)
				iterationcount = iterationcount - 1
				if iterationcount % 100 == 0:
					print iterationcount
			parameterCheckWrite(outputDir, year, parUsage)
Пример #2
0
def main():
    """
	Start script with e.g.
	$ python LSASO.py 2005
	$ python LSASO.py p 2005
	$ python LSASO.py 2005 2006 2007
	$ python LSASO.py p 2005 2006 2007
	where 'p' indicates using parameters  values for melt rates specified in settings.txt file for each year

	To run this you will need the following:
	A shading file where each pixel represents shading on the glacier at each julian day to be modelled
	A temperature data file for each julian day to be modelled
	A stake data file containing winter balance values and coordinates for each stake
	A settings file
	The directory structure I have used here is as follows ('yyyy' should be replaced by the year):
	/InData/yyyy
	/InData/yyyy/weatheryyyy.csv
	/InData/yyyy/StakeReadings.csv
	/InData/settings.txt
	/InData/Shades/SG_shade.tif		This file is here as it is the output of another script
	/Output/
	/Output/yyyy/		These are created by this script as needed
	/Scripts/		Run the script from here.

	Format examples:
		settings.txt (note that if 'ExportDate' omitted then all dates between bW and final temperature reading exported):
			Elevation=1150
			jdayBw=115
			ExportDate=236,256
			ShadeStart=100
			ELA may be left out and the programme will use the optimisation algorithm to guess at a best value (not an estimate of ELA per se)
			ELA=1500
			These parameter settings must ALL be present or those present will be ignored
			Snow=0.0046
			Si=0.0054
			Firn=0.0058
			Ice=:0.0064
			lapse=0.0044
			elevLapse=950
			sfe=1.5

		weatheryyyy.csv:
			Date,Temp
			2010-01-25,-8.3

		StakeDatayyyy.csv:
			Stake,Easting,Northing,Elevation,Bw,Bs,Bn,Surface
			04C,651103.586397,7536381.86553,1219,0.334,2.53,-2.196,ice
	"""
    # Set up list of years from command line arguments
    paramChoice = ""
    if len(sys.argv) > 1:
        argstart = 1
        if sys.argv[1] == "p":
            paramChoice = "p"
            argstart = 2
        years = []
        for arg in sys.argv[argstart:]:
            try:
                years.append(int(arg))
            except:
                print sys.argv
                sys.exit("Argument Error")
        print years
    else:
        print main.__doc__
        years = []
        while len(years) < 1:
            try:
                yearsFromUser = raw_input("Give years as space separated list or (q)uit: ")
                yearsAsString = yearsFromUser.strip().split(" ")
                for yearString in yearsAsString:
                    years.append(int(yearString))
            except:
                if yearsFromUser == "q":
                    sys.exit()
                else:
                    years = []
    #
    # Run main function
    for year in years:
        print year
        strYear = str(year)
        dataLoc = "../InData/" + strYear
        # Temperature data. Following two lines are example of input format:
        # Date,Temp
        # 2010-01-25,-8.3
        weather = "weather" + strYear + ".csv"
        TfileName = os.path.join(dataLoc, weather)
        # Read Temperature data from csv file and convert dates to julian days. Date format '%Y-%m-%d' is SMHI's
        TinFile = open(TfileName, "rb")
        dates = []
        times = []
        temps = []
        for line in csv.DictReader(TinFile, delimiter=","):
            dates.append(line["Date"].strip())
            date = datetime.strptime(line["Date"].strip(), "%Y-%m-%d")
            jdate = datetime.strftime(date, "%j")
            times.append(int(jdate))
            temps.append(float(line["Temp"].strip()))
        TinFile.close()
        # Stake data. Following two lines are example of input format:
        # Stake,Easting,Northing,Elevation,Bw,Bs,Bn,Surface
        # 04C,651103.586397,7536381.86553,1219,0.334,2.53,-2.196,ice
        stakeFileName = "StakeData" + strYear + ".csv"
        SfileName = os.path.join(dataLoc, stakeFileName)
        # Get settings for model: AWS elevation, date of snow probing, dates for model export, first date in shading file (could start this at 1 by default but
        # shading file may be created for limited range of dates to reduce file size)
        refElev, jdayBw, jdatelist, startday, paramDict = getSettings(dataLoc, times[-1])
        print "For year %s following settings used: " % (strYear)
        print "refElev set to %s" % (refElev)
        print "jdayBw set to %s" % (jdayBw)
        #
        # Directory for output
        outputDir = os.path.join("../Output/", strYear)
        if not os.path.exists(outputDir):
            os.makedirs(outputDir)
        outputname = strYear + "_DDM_"
        #
        # Truthing of data against field survey data. Each survey stored in sperate csv file.
        # The trudb is used to store both data and assessment of results
        truthDir = os.path.join(dataLoc, "truthing")
        try:
            truthfiles = filelist(truthDir, "csv")
            trudb = {}
            print "Truthing files: "
            for file in truthfiles:
                if int(file.split(".")[0]) not in jdatelist:
                    print "%s does not match date given in settings file: %s" % (file, jdatelist)
                else:
                    print file
                trudb[file.split(".")[0]] = import2vector(os.path.join(truthDir, file), importError="no")
            trudbKeys = trudb.keys()
            trudbKeys.sort()
        except:
            print "No truthing data found."
            #
            # Read stake data
        SinFile = open(SfileName, "rb")
        stakeData = {}
        # Read point data file with position and winter balance as of last winter probing (jdayBw) and send to model
        for line in csv.DictReader(SinFile, delimiter=","):
            stakeName = line["Stake"].strip()
            stakeData[stakeName] = {}
            # Coordinates
            stakeData[stakeName]["Easting"] = float(line["Easting"].strip())
            stakeData[stakeName]["Northing"] = float(line["Northing"].strip())
            stakeData[stakeName]["Elevation"] = float(line["Elevation"].strip())
            # Get shading factor for location
            # Create vector for shade values
            vals = []
            for d in range(366):
                vals.append(1)
            try:
                stakeData[stakeName]["Shadevals"] = GetShadeVals(
                    stakeData[stakeName]["Easting"],
                    stakeData[stakeName]["Northing"],
                    raster,
                    transf,
                    bandcount,
                    vals,
                    startday,
                )
            except:
                stakeData[stakeName]["Shadevals"] = vals
                print "No shade value obtained for ", stakeName
                # Get the measured winter balance
            try:
                stakeData[stakeName]["Org_Bw"] = float(line["Bw"].strip())
            except:
                print "No winter balance data found (Bw column)"
                break
                # Get the measured summer balance
            try:
                stakeData[stakeName]["Org_Bs"] = float(line["Bs"].strip())
            except:
                pass
                # Get the measured net balance
            try:
                stakeData[stakeName]["Org_Bn"] = float(line["Bn"].strip())
            except:
                pass
                #
        paramDict["refElev"] = refElev

        # Prepare database for passing into loop and model run
        stakeNames = stakeData.keys()
        stakeNames.sort()
        data = copy.deepcopy(stakeData)
        best = 9999
        #

        if len(paramDict.keys()) > 2 and paramChoice == "p":
            resNorm = snowCalc(
                data, stakeNames, temps, times, jdatelist, jdayBw, paramDict, trudbKeys, trudb, outputDir, outputname
            )
        else:
            if len(paramDict.keys()) == 2:
                x0 = [0.0050, 0.0050, 0.0050, 0.0050, 0.0050, 1000, 1.0]
            else:
                x0 = [0.0050, 0.0050, 0.0050, 0.0050, 0.0050, 1000, 1.0, 1500]

            def trigger(x):
                if len(x0) == 7:
                    resNorm = modelOptimiser(
                        data,
                        stakeNames,
                        temps,
                        times,
                        jdatelist,
                        jdayBw,
                        refElev,
                        trudbKeys,
                        trudb,
                        x,
                        ELA=paramDict["ELA"],
                    )
                else:
                    resNorm = modelOptimiser(
                        data, stakeNames, temps, times, jdatelist, jdayBw, refElev, trudbKeys, trudb, x
                    )
                return resNorm

            res = minimize(trigger, x0, method="nelder-mead", options={"xtol": 1e-8, "disp": True})
            print (res.x)
            paramDict["ddfSnow"] = res.x[0]
            paramDict["ddfSi"] = res.x[1]
            paramDict["ddfFirn"] = res.x[2]
            paramDict["ddfIce"] = res.x[3]
            paramDict["lapse"] = res.x[4]
            paramDict["elevLapse"] = res.x[5]
            paramDict["sfe"] = res.x[6]
            if "ELA" not in paramDict.keys():
                paramDict["ELA"] = res.x[7]

            resNorm = snowCalc(
                data, stakeNames, temps, times, jdatelist, jdayBw, paramDict, trudbKeys, trudb, outputDir, outputname
            )
        for key, value in paramDict.iteritems():
            print "{} = {}".format(key, value)
        print "\n{} final score: {:2.4f}\n".format(year, resNorm)
Пример #3
0
def main():
# Set up list of years from command line arguments
	if len(sys.argv) > 1:
		years = []
		for arg in sys.argv[1:]:
			try:
				years.append(int(arg))
			except:
				print sys.argv
				sys.exit("Argument Error")
		print years
	else:
		years = [2005,2006,2007,2008,2009,2010,2011,2012,2013]
	#
	# Get shading data
	# Location of file containing a multiband raster, each band represents the shading on one day. 1 = no shade, 0 = really quite dark
	shadefile = '../InData/Shades/SG_shade.tif'
	# shadefile = '../InData/Shades/Reduced/SG_shade.tif'
	# Read the shade factor raster in to memory
	raster, transf, bandcount = getShadeFile(shadefile)
	writeScores = 'no' # 'yes' Can be ignored, will be set to yes if  test set to run over multiple parameters (5)
	#
	# Set plotOn to 1 to plot differences between modelled and measured
	plotOn = 1
	#
	for year in years:
		# Set parameters for the melt model
		choice = 'All_score'
		derivedParameters = {}
		derivedParameters['2005'] =           {'ddfSnow':0.0046, 'ddfSi':0.0054, 'ddfFirn':0.0058, 'ddfIce':0.0064, 'lapse':0.0044, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		derivedParameters['2006'] =           {'ddfSnow':0.0058, 'ddfSi':0.0056, 'ddfFirn':0.0058, 'ddfIce':0.0064, 'lapse':0.0040, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		derivedParameters['2007'] =           {'ddfSnow':0.0036, 'ddfSi':0.0044, 'ddfFirn':0.0040, 'ddfIce':0.0040, 'lapse':0.0052, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		derivedParameters['2008'] =           {'ddfSnow':0.0036, 'ddfSi':0.0044, 'ddfFirn':0.0058, 'ddfIce':0.0040, 'lapse':0.0044, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		derivedParameters['2009'] =           {'ddfSnow':0.0036, 'ddfSi':0.0056, 'ddfFirn':0.0058, 'ddfIce':0.0064, 'lapse':0.0052, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		derivedParameters['2010'] =           {'ddfSnow':0.0036, 'ddfSi':0.0044, 'ddfFirn':0.0058, 'ddfIce':0.0048, 'lapse':0.0040, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		derivedParameters['2011'] =           {'ddfSnow':0.0058, 'ddfSi':0.0056, 'ddfFirn':0.0058, 'ddfIce':0.0060, 'lapse':0.0040, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		derivedParameters['2013'] =           {'ddfSnow':0.0036, 'ddfSi':0.0044, 'ddfFirn':0.0058, 'ddfIce':0.0060, 'lapse':0.0068, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		derivedParameters['All_score'] =   {'ddfSnow':0.0042, 'ddfSi':0.0056, 'ddfFirn':0.0044, 'ddfIce':0.0056, 'lapse':0.0048, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		derivedParameters['All_weight'] = {'ddfSnow':0.0047, 'ddfSi':0.0050, 'ddfFirn':0.0049, 'ddfIce':0.0052, 'lapse':0.0058, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		derivedParameters['b1_score'] =    {'ddfSnow':0.0058, 'ddfSi':0.0056, 'ddfFirn':0.0054, 'ddfIce':0.0060, 'lapse':0.0040, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		derivedParameters['b1_weight'] =  {'ddfSnow':0.0047, 'ddfSi':0.0050, 'ddfFirn':0.0049, 'ddfIce':0.0052, 'lapse':0.0058, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		derivedParameters['b2_score'] =    {'ddfSnow':0.0038, 'ddfSi':0.0052, 'ddfFirn':0.0042, 'ddfIce':0.0056, 'lapse':0.0056, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		derivedParameters['b2_weight'] =  {'ddfSnow':0.0047, 'ddfSi':0.0050, 'ddfFirn':0.0049, 'ddfIce':0.0052, 'lapse':0.0058, 'elevLapse':(2100 - 1150), 'sfe':1.5, 'ELA':1500}
		if choice in derivedParameters.keys():
			ddfSnow = [derivedParameters[choice]['ddfSnow']]
			ddfSi = [derivedParameters[choice]['ddfSi']]
			ddfFirn = [derivedParameters[choice]['ddfFirn']]
			ddfIce = [derivedParameters[choice]['ddfIce']]
			lapse = [derivedParameters[choice]['lapse']]
			elevLapse = [derivedParameters[choice]['elevLapse']]
			sfe = [derivedParameters[choice]['sfe']]
			ELA = [derivedParameters[choice]['ELA']]
		else:
			writeScores = 'yes'
			ddfSnow = range(36,60,2)
			ddfSnow = np.array(ddfSnow)*0.0001
			ddfSnow = list(ddfSnow)
			ddfSi = range(44,58,2)
			ddfSi = np.array(ddfSi)*0.0001
			ddfSi = list(ddfSi)
			ddfFirn = range(40,60,2)
			ddfFirn = np.array(ddfFirn)*0.0001
			ddfFirn = list(ddfFirn)
			ddfIce = range(40,66,2)
			ddfIce = np.array(ddfIce)*0.0001
			ddfIce = list(ddfIce)
			lapse = range(40,80,4)
			lapse = np.array(lapse)*0.0001
			lapse = list(lapse)
			rangeZ = (2100 - 1150)
			elevLapse = [rangeZ] # Elevation dependant lapse rate
			sfe = [1.5] # Shading factor exponent (adjusts the shading value at each point)
			ELA = [1500] # Equilibrium line, for firn or ice under snow
		#
		strYear = str(year)
		dataLoc = '../InData/' + strYear
		# Temperature data. Following two lines are example of input format:
		# Date,Temp
		# 2010-01-25,-8.3
		weather = 'weather' + strYear + '.csv'
		TfileName = os.path.join(dataLoc, weather)
		# Read Temperature data from csv file and convert dates to julian days. Date format '%Y-%m-%d' is SMHI's
		TinFile = open(TfileName,'rb')
		dates = []
		times = []
		temps = []
		for line in csv.DictReader(TinFile, delimiter=','):
			dates.append(line['Date'].strip())
			date = datetime.strptime(line['Date'].strip(),'%Y-%m-%d')
			jdate = datetime.strftime(date,'%j')
			times.append(int(jdate))
			temps.append(float(line['Temp'].strip()))
		TinFile.close()
		# Stake data. Following two lines are example of input format:
		# Stake,Easting,Northing,Elevation,Bw,Bs,Bn,Surface
		# 04C,651103.586397,7536381.86553,1219,0.334,2.53,-2.196,ice
		stakeFileName = 'StakeData' + strYear + '.csv'
		SfileName =os.path.join(dataLoc, stakeFileName)
		# Get settings for model: AWS elevation, date of snow probing, dates for model export, first date in shading file (could start this at 1 by default but
		# shading file may be created for limited range of dates to reduce file size)
		refElev, jdayBw, jdatelist, startday = getSettings(dataLoc, times[-1])
		print "For year %s following settings used: " %(strYear)
		print "refElev set to %s" %(refElev)
		print "jdayBw set to %s" %(jdayBw)
		#
		#
		# Settings and counters for testing multiple parameter values
		counter = 0
		# bestBsR2 = -9999 # name refers to R-squared test but maybe replaced by norm of residuals
		# BsR2 = np.nan # name refers to R-squared test but maybe replaced by norm of residuals
		bestResNorm = 9999 # Norm of residuals version
		ResNorm = np.nan # Norm of residuals version
		writeTest = 0
		# Directory for output
		outputDir = os.path.join('../Output/', strYear)
		if not os.path.exists(outputDir):
			os.makedirs(outputDir)
		outputname = strYear +'_DDM_'
		#
		# Truthing of data against field survey data. Each survey stored in sperate csv file.
		# The trudb is used to store both data and assessment of results
		truthDir = os.path.join(dataLoc,"truthing")
		try:
			truthfiles = filelist(truthDir,'csv')
			trudb = {}
			print "Truthing files: "
			for file in truthfiles:
				if int(file.split('.')[0]) not in jdatelist:
					print "%s does not match date given in settings file: %s" % (file, jdatelist)
				else:
					print file
				trudb[file.split('.')[0]] = import2vector(os.path.join(truthDir,file))
			trudbKeys = trudb.keys()
			trudbKeys.sort()
		except:
			print "No truthing data found."
		#
		# Read stake data
		SinFile = open(SfileName,'rb')
		stakeData = {}
		# Read point data file with position and winter balance as of last winter probing (jdayBw) and send to model
		for line in csv.DictReader(SinFile, delimiter=','):
			stakeName = line['Stake'].strip()
			stakeData[stakeName] = {}
			# Coordinates
			stakeData[stakeName]['Easting'] = float(line['Easting'].strip())
			stakeData[stakeName]['Northing'] = float(line['Northing'].strip())
			stakeData[stakeName]['Elevation'] = float(line['Elevation'].strip())
			# Get shading factor for location
			# Create vector for shade values
			vals = []
			for d in range(366):
				vals.append(1)
			try:
				stakeData[stakeName]['Shadevals'] = GetShadeVals(stakeData[stakeName]['Easting'], stakeData[stakeName]['Northing'], raster, transf, bandcount, vals, startday)
			except:
				stakeData[stakeName]['Shadevals'] = vals
				print "No shade value obtained for ", stakeName
			# Get the measured winter balance
			try:
				stakeData[stakeName]['Org_Bw'] = float(line['Bw'].strip())
			except:
				print "No winter balance data found (Bw column)"
				break
			# Get the measured summer balance
			try:
				stakeData[stakeName]['Org_Bs'] = float(line['Bs'].strip())
			except:
				pass
			# Get the measured net balance
			try:
				stakeData[stakeName]['Org_Bn'] = float(line['Bn'].strip())
			except:
				pass
		# Iterate over all possible parameter value combinations
		# Scoring for each parameter
		scores = {'ddfSnow':{}, 'ddfSi':{}, 'ddfFirn':{}, 'ddfIce':{}, 'lapse':{}, 'elevLapse':{}, 'sfe':{}, 'ELA':{}, 'refElev':{}}
		parUsage = {'BsScore':[], 'ddfSnow':[], 'ddfSi':[], 'ddfFirn':[], 'ddfIce':[], 'lapse':[], 'elevLapse':[], 'sfe':[], 'ELA':[], 'refElev':[]}
		iterationcount = len(ddfSnow)*len(ddfSi)*len(ddfFirn)*len(ddfIce)*len(lapse)*len(elevLapse)*len(sfe)*len(ELA)
		print "Total number of runs: %s" % (iterationcount)
		for it1, it2, it3, it4, it5, it6, it7, it8 in itertools.product(ddfSnow, ddfSi, ddfFirn, ddfIce, lapse, elevLapse, sfe, ELA):
			paramDict = {}
			paramDict['ddfSnow'] = it1
			paramDict['ddfSi'] =it2
			paramDict['ddfFirn'] = it3
			paramDict['ddfIce'] = it4
			paramDict['lapse'] = it5
			paramDict['elevLapse'] = it6
			paramDict['sfe'] = it7
			paramDict['ELA'] = it8
			paramDict['refElev'] = refElev
			#
			# 'data' is a copy of the original 'stakeData'
			data = copy.deepcopy(stakeData)
			stakeNames = stakeData.keys()
			stakeNames.sort()
			data['DataSets'] = {}
			for stake in stakeNames:
				# For ordered headers/keys
				data[stake]['Headers'] = ['MeltModel', 'Shadevals', 'Easting', 'Northing', 'Elevation', 'Org_Bw']
				if 'Org_Bn' in data[stake].keys():
					data[stake]['Headers'].append('Org_Bn')
				# Send input data to Degree Day Model object
				data[stake]['MeltModel'] = DdfCell(data[stake]['Easting'], data[stake]['Northing'], data[stake]['Elevation'], data[stake]['Org_Bw'], jdayBw, data[stake]['Shadevals'], paramDict)
				# For each julian day in the "times" vector call the meltInst method for each point object, passing the temperature and the day number.
				# This is what runs the model at each time step in the temperature time series file
				for i in range(len(temps)):
					data[stake]['MeltModel'].meltInst(temps[i],times[i])
				for day in jdatelist:
					# Fetch modelled melt and net balance for each julian day specific in settings and create new entry for each
					loc = data[stake]['MeltModel'].jTimeSeries.index(day)
					data[stake]['Mod_Bs_' + str(day)] =  round(data[stake]['MeltModel'].meltSumSeries[loc],3)
					data[stake]['Mod_Bn_' + str(day)] =  round(data[stake]['MeltModel'].BnSeries[loc],3)
					data[stake]['Headers'].append('Mod_Bs_' + str(day))
					data[stake]['Headers'].append('Mod_Bn_' + str(day))
					# Fetch any truthing data available
					if 'trudbKeys' in locals():
						if str(day) in trudbKeys:
							try:
								loc = np.where(trudb[str(day)]['Stake']==stake)[0][0]
								data[stake]['Org_Bs_' + str(day)] = round(trudb[str(day)]['Bs'][loc],3)
								data[stake]['Org_Bn_' + str(day)] = round(trudb[str(day)]['Bn'][loc],3)
								data[stake]['Mod_Bw_' + str(day)] = round((data[stake]['Org_Bn_' + str(day)] +data[stake]['Mod_Bs_' + str(day)]), 3)
							except:
								data[stake]['Org_Bs_' + str(day)] = np.nan
								data[stake]['Org_Bn_' + str(day)] = np.nan
								data[stake]['Mod_Bw_' + str(day)] = np.nan
							data[stake]['Headers'].insert(-2, 'Org_Bs_' + str(day))
							data[stake]['Headers'].insert(-2, 'Org_Bn_' + str(day))
							data[stake]['Headers'].insert(-2, 'Mod_Bw_' + str(day))
							# Add values to lists for calculating R2 later
							if 'Mod_Bs_' + str(day) not in data['DataSets'].keys():
								data['DataSets']['Mod_Bs_' + str(day)] = []
							data['DataSets']['Mod_Bs_' + str(day)].append(data[stake]['Mod_Bs_' + str(day)])
							if 'Org_Bs_' + str(day) not in data['DataSets'].keys():
								data['DataSets']['Org_Bs_' + str(day)] = []
							data['DataSets']['Org_Bs_' + str(day)].append(data[stake]['Org_Bs_' + str(day)])
				dataKeys = data.keys()
				dataKeys.sort()
			#
			if len(data['DataSets']) > 0:
				report = copy.deepcopy(paramDict)
				setKeys = data['DataSets'].keys()
				# Order all Mod first, then all Org
				setKeys.sort()
				start = 0
				end = len(setKeys)
				middle = end/2
				i = start
				while i < end/2:
					# Calculate R2
					modBs = np.array(data['DataSets'][setKeys[i]])
					obsBs = np.array(data['DataSets'][setKeys[middle]])
					# Fit regression line through differences. Not good test of model
					# popt, pcov = curve_fit(func,obsBs, modBs)
					# variance = np.diagonal(pcov)
					# SE = np.sqrt(variance)
					#
					modBsmean = nanmean(modBs)
					obsBsmean = nanmean(obsBs)
					obsBsMinModBs = obsBs - modBs
					obsBsMinMean = obsBs - obsBsmean
					SSres = (np.nansum(obsBsMinModBs**2))
					SStot = (np.nansum(obsBsMinMean**2))
					ResNorm = SSres**0.5
					# BsR2 = 1 - (SSres / SStot) # BsR2 version
					# report[(setKeys[i]+'_R2')] = BsR2 # BsR2 version
					report[(setKeys[i]+'_RN')] = ResNorm # Norm of residuals version
					# scores = scoring(scores, paramDict, BsR2) # BsR2 version
					scores = scoring(scores, paramDict, ResNorm) # Norm of residuals version
					# parUsage = usageUpdate(parUsage, paramDict, BsR2) #BsR2 version
					parUsage = usageUpdate(parUsage, paramDict, ResNorm) # Norm of residuals version
					if i == 0:
						# if BsR2 >= bestBsR2: # BsR2 version
							# bestBsR2 = copy.copy(BsR2) # BsR2 version
						if ResNorm <= bestResNorm: # Norm of residuals version
							bestResNorm = copy.copy(ResNorm) # Norm of residuals version
							writeTest = 1
							print "\nRun: {0} of {1}".format(counter+1, iterationcount)
							reportKeys = report.keys()
							reportKeys.sort()
							for k in reportKeys:
								print k, report[k]
							# scoreWrite(scores, outputDir) # Write out scores for each parameter after each iteration. No real need
							# print scores
					i = i+1
					middle = middle+1
			if writeTest == 1:
				# Output model data to file
				flnm = str(counter)
				outDir = makeDir(outputDir, flnm)
				meltDataWrite(data, outDir)
				# 	Write report to text file
				reportWrite(report, outDir)
				# 	Plot model results
				x = []
				for stake in stakeNames:
					x.append(data[stake]['Elevation'])
				if len(data['DataSets']) > 0:
					setKeys = data['DataSets'].keys()
					# Order all Mod first, then all Org
					setKeys.sort()
					# Plot differences between measured and modelled
					if plotOn == 1:
						start = 0
						end = len(setKeys)
						middle = end/2
						i = start
						while i < end/2:
							modBs = np.array(data['DataSets'][setKeys[i]])
							obsBs = np.array(data['DataSets'][setKeys[middle]])
							bsDiff = obsBs - modBs
							pltnmBs = outputname + setKeys[i] + '_measured'
							plotDifElev(pltnmBs, outDir, setKeys[i], x, bsDiff, 'r')
							# pltnmBsmm = outputname + setKeys[i] + '_modmeas'
							# plotDif(pltnmBsmm, outDir, setKeys[i], obsBs, modBs, 'b')
							i = i+1
							middle = middle+1
			if counter %100 == 0:
				print "%s of %s" % (counter, iterationcount)
			counter = counter+1
			writeTest = 0
		if writeScores == 'yes':
			scoreWrite(scores, outputDir)
		parameterCheckWrite(outputDir, year, parUsage)