Esempio n. 1
0
def main(wdir, start, tz):

	# fixed stuff
	dataset="HRES"
	tz=int(tz)
	g=9.81

	#Set up Log
	logs = wdir+"/logs/"
	if not os.path.exists(logs):
		os.makedirs(logs)
	logfile=logs+"/logfile"+start
	logging.basicConfig(level=logging.DEBUG, filename=logfile, filemode="a+",format="%(asctime)-15s %(levelname)-8s %(message)s")
	
	#start timer
	start_time = time.time()

	# standard inputs
	demfile = wdir+'/predictors/ele.nc'
	surfile=wdir+'/forcing/SURF.nc'
	plevfile=wdir+ '/forcing/PLEV.nc'
	
	# make out path for results
	out = wdir+"/out/"
	if not os.path.exists(out):
		os.makedirs(out)

	# open DEM
	dem  = nc.Dataset(demfile)
	dem_ele = dem.variables['ele'][:]

	# time stuff
	f = nc.Dataset( plevfile)
	nctime = f.variables['time']
	dtime = pd.to_datetime(nc.num2date(nctime[:],nctime.units, calendar="standard"))
	starti = np.asscalar(np.where(dtime==start)[0]) # so can run on a single timestep
	endi = starti+1

	# compute timestep before we cut timeseries
	a=dtime[2]-dtime[1]
	step = a.seconds

	# extract timestep
	dtime= dtime[starti:endi,] 

	print("Running timestep "+ start)

	#===============================================================================
	# tscale3d - 3D interpolation of pressure level fields
	#===============================================================================

	t = t3d.main( wdir, 'grid', 't', starti,endi,dataset)
	r = t3d.main( wdir, 'grid', 'r', starti,endi,dataset)
	u = t3d.main( wdir, 'grid', 'u', starti,endi,dataset)
	v = t3d.main( wdir, 'grid', 'v', starti,endi,dataset)
	gtob = hp.Bunch(t=t,r=r,u=u,v=v, dtime=dtime)

	#===============================================================================
	# tscale2d - Generates 2D interpolations from coarse (ERA5) to fine (1km) grid
	#===============================================================================
	t2m = t2d.main( wdir, 'grid', 't2m', starti,endi,dataset)
	tp = t2d.main( wdir, 'grid', 'tp', starti,endi,dataset)
	ssrd = t2d.main( wdir, 'grid', 'ssrd', starti,endi,dataset)
	strd = t2d.main( wdir, 'grid', 'strd', starti,endi,dataset)
	tisr = t2d.main( wdir, 'grid', 'tisr', starti,endi,dataset)
	d2m = t2d.main( wdir, 'grid', 'd2m', starti,endi,dataset)
	z = t2d.main( wdir, 'grid', 'z', starti,endi,dataset) # always true as this is time invariant
	gridEle=z[:,:,0]/g
	gsob = hp.Bunch(t2m=t2m, tp=tp, ssrd=ssrd, strd=strd, tisr=tisr, d2m=d2m, z=z, gridEle=gridEle, dtime=dtime)

	#===============================================================================
	# Precip
	#===============================================================================

	def tp2rate(tp, step):
		""" convert tp from m/timestep (total accumulation over timestep) to rate in mm/h 

				Args:
					step: timstep in seconds (era5=3600, ensemble=10800)

				Note: both EDA (ensemble 3h) and HRES (1h) are accumulated over the timestep
				and therefore treated here the same.
				https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation
		"""
		tp = tp/step*60*60 # convert metres per timestep (in secs) -> m/hour 
		pmmhr = tp	*1000 # m/hour-> mm/hour
		return pmmhr

	def precipGrid(fineEle, gsob):
		'''

		'''
		
		lookups = {
			   1:0.3,
			   2:0.3,
			   3:0.3,
			   4:0.3,
			   5:0.25,
			   6:0.2,
			   7:0.2,
			   8:0.2,
			   9:0.2,
			   10:0.25,
			   11:0.3,
			   12:0.3
		}

		# Precipitation lapse rate, varies by month (Liston and Elder, 2006).
		pfis = gsob.dtime.month.map(lookups)
		

		dz=(fineEle-gsob.gridEle)/1e3  # Elevation difference in kilometers between the fine and coarse surface.
		dz2 =dz.reshape(dz.size) #make grid a vector
		pfis2 = np.repeat(pfis.values[:,None], dz2.size, axis=1)
			   
		lp=(1+pfis2.T*dz2[:,None])/(1-pfis2.T*dz2[:,None])# Precipitation correction factor.
		lp2 = lp.reshape(gsob.pmmhr.shape)
		Pf=gsob.pmmhr*lp2
		
		return Pf


	gsob.pmmhr = tp2rate(tp,step)
	grid_prate = precipGrid(dem_ele,gsob)
	

	#===============================================================================
	# Longwave
	#===============================================================================
	def instRad(sob, step):
		""" Convert SWin from accumulated quantities in J/m2 to 
		instantaneous W/m2 see: 
		https://confluence.ecmwf.int/pages/viewpage.action?pageId=104241513

		Args:
			step: timstep in seconds (era5=3600, ensemble=10800)

		Note: both EDA (ensemble 3h) and HRES (1h) are accumulated over the timestep
		and therefore treated here the same ie step=3600s (1h)
		https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation
				"""
		sob.strd = sob.strd/step  
		sob.ssrd = sob.ssrd/step
		sob.tisr = sob.tisr/step 
	
	def lwin(sob,tob):
		"""Convert to RH (should be in a function). Following MG Lawrence 
		DOI 10.1175/BAMS-86-2-225 """
		A1=7.625 
		B1=243.04 
		C1=610.94
		tc=sob.t2m-273.15
		tdc=sob.d2m-273.15
		tf=tob.t-273.15 # fout.T
		c=(A1*tc)/(B1+tc)
		RHc=100*np.exp((tdc*A1-tdc*c-B1*c)/(B1+tdc)) # Inverting eq. 8 in Lawrence.

		""" Calculate saturation vapor pressure at grid and "subgrid" [also
		through function] using the Magnus formula."""
		
		svpf=C1*np.exp(A1*tf/(B1+tf))
		svpc=C1*np.exp(A1*tc/(B1+tc))

		"""Calculate the vapor pressure at grid (c) and subgrid (f)."""
		vpf=tob.r*svpf/1e2 # RHf
		vpc=RHc*svpc/1e2

		"""Use the vapor pressure and temperature to calculate clear sky
		 # emssivity at grid and subgrid. [also function]
		Konzelmann et al. 1994
		Ta in kelvin

		 """
		x1=0.43 
		x2=5.7
		cef=0.23+x1*(vpf/tob.t)**(1/x2) #Pretty sure the use of Kelvin is correct.
		cec=0.23+x1*(vpc/sob.t2m)**(1/x2)

		"""Diagnose the all sky emissivity at grid."""
		sbc=5.67e-8
		aec=sob.strd/(sbc*sob.t2m**4)
		# need to constrain to 1 as original code?

		""" Calculate the "cloud" emissivity at grid, assume this is the same at
	 	subgrid."""
		deltae=aec-cec

		""" Use the former cloud emissivity to compute the all sky emissivity at 
		subgrid. """
		aef=cef+deltae
		LWf=aef*sbc*tob.t**4
		return(LWf)

	instRad(gsob,3600)
	ts_lwin = lwin(gsob,gtob)


#===============================================================================
# make a pob - required as input to swin routine. This object is data on each 
# pressure level interpolated to the fine grid ie has dimensions xy (finegrid) x plev
#===============================================================================

	f = nc.Dataset(plevfile)
	lev = f.variables['level'][:]
	var='t'
	ds = rc.t3d( pl=plevfile, dem =demfile)
	out_xyz_dem, lats, lons, shape= ds.demGrid()
	xdim=lev.shape[0]

	ydim=shape[0]*shape[1]
	t_interp_out = np.zeros((xdim,ydim))
	z_interp_out = np.zeros((xdim,ydim))
		
	for timestep in range(starti,endi):
		gridT,gridZ,gridLat,gridLon=ds.gridValue(var,timestep)
		t_interp, z_interp = ds.inLevelInterp(gridT,gridZ, gridLat,gridLon,out_xyz_dem)
		t_interp_out = np.dstack((t_interp_out, t_interp))
		z_interp_out = np.dstack((z_interp_out, z_interp))

	# drop init blank layer
	tinterp =t_interp_out[:,:,1:]
	zinterp =z_interp_out[:,:,1:]

	gpob= hp.Bunch(t=tinterp,z=zinterp, levels=lev)

	logging.info("made a pob!")

	dem  = nc.Dataset(demfile)
	lon = dem.variables['longitude'][:]
	lat = dem.variables['latitude'][:]
	demv = dem.variables['ele'][:]
	demlon1 = dem.variables['longitude'][:]
	demlat1 = dem.variables['latitude'][:]
	demlon = np.tile(demlon1,demlat1.size)
	demlat = np.repeat(demlat1,demlon1.size)
	demv=np.reshape(demv,demv.size)

	# why are these masked values generated?
	demv =np.ma.filled(demv, fill_value=1)
	tz = np.repeat(tz,demv.size)

	stat = pd.DataFrame({	"ele":demv, 
					"lon":demlon,
					"lat":demlat,
					"tz":tz					
					})
#===============================================================================
# Compute Shortwave
#===============================================================================	

	def swin2D(pob,sob,tob, stat, dates): 
		'''
		main edit over standard function for points:
		- 3d tob,sob reduce to 2D (reshape)
		'''
		
		timesize=len(dates)
		statsize=len(stat.ele)
		""" toposcale surface pressure using hypsometric equation - move to own 
		class """
		g=9.81
		R=287.05  # Gas constant for dry air.
		#ztemp = pob.z # geopotential height b = np.transpose(a, (2, 0, 1))
		ztemp = np.transpose(pob.z, (2, 0, 1)) # pob is originally ordered levels,stations/cells,time
		#Ttemp = pob.t
		Ttemp = np.transpose(pob.t, (2, 0, 1))# pob is originally ordered levels,stations/cells,time
		statz = np.array(stat.ele)*g
		#dz=ztemp.transpose()-statz[None,:,None] # transpose not needed but now consistent with CGC surface pressure equations
		dz=ztemp-statz # dimensions of dz : time, levels, stations
		
		# set all levels below surface to very big number so they canot be found by min
		newdz=dz
		newdz[newdz<0]=999999
		
		psf =np.zeros( (dates.size, statz.shape[0]) )

		# reshape tob.t here
		tob.tr=tob.t.reshape(tob.t.shape[0]*tob.t.shape[1], tob.t.shape[2], order='F')
		tob.trT=tob.tr.T # transpose to get right order


		# loop through timesteps
		for i in range(0,dates.size):
			
			# find overlying layer
			thisp = dz[i,:,:]==np.min(newdz[i,:,:],axis=0) # thisp is a booleen matrix of levels x stations with true indicating overlying plevel over station surface ele

			# flatten to 1 dimension order='Fortran' or row major
			thispVec =thisp.reshape(thisp.size,order='F')
			TtempVec = Ttemp.reshape(Ttemp.shape[0], Ttemp.shape[1]*Ttemp.shape[2], order='F')
			ztempVec = ztemp.reshape(ztemp.shape[0], ztemp.shape[1]*ztemp.shape[2], order='F')
			
			# booleen indexing to find temp and geopotential that correspond to lowesest overlying layer
			T1=TtempVec[i,thispVec]
			z1=ztempVec[i,thispVec]


			p1=np.tile(pob.levels[::-1],statz.shape[0])[thispVec]*1e2 #Convert to Pa. Reverse levels to ensure low ele (hig pressure) to high elel (low pressure)
			Tbar=np.mean([T1, tob.trT[i, :]],axis=0) # temperature midway between surface (toposcale T) and loweset overlying level (T1)
			""" Hypsometric equation.""" #P1 is above surface is this correct? Yes!
			psf[i,:]=(p1*np.exp((z1-statz)*(g/(Tbar*R)))) # exponent is positive ie increases pressure as surface is lower than pressure level


		""" Maybe follow Dubayah's approach (as in Rittger and Girotto) instead
		for the shortwave downscaling, other than terrain effects. """

		""" Height of the "grid" (coarse scale)"""
		Zc=sob.z.reshape(sob.z.shape[0]*sob.z.shape[1], sob.z.shape[2]).T # reshape and transpose to remove dimension and make broadcastable

		""" toa """
		SWtoa = sob.tisr.reshape(sob.tisr.shape[0]*sob.tisr.shape[1], sob.tisr.shape[2]).T  

		""" Downwelling shortwave flux of the "grid" using nearest neighbor."""
		SWc=sob.ssrd.reshape(sob.ssrd.shape[0]*sob.ssrd.shape[1], sob.ssrd.shape[2]).T 

		"""Calculate the clearness index."""
		kt=SWc/SWtoa

		#kt[is.na(kt)==T]<-0 # make sure 0/0 =0
		#kt[is.infinite(kt)==T]<-0 # make sure 0/0 =0
		kt[kt<0]=0
		kt[kt>1]=0.8 #upper limit of kt
		kt=kt


		"""
		Calculate the diffuse fraction following the regression of Ruiz-Arias 2010 
		
		"""
		kd=0.952-1.041*np.exp(-1*np.exp(2.3-4.702*kt))
		kd = kd

		""" Use this to calculate the downwelling diffuse and direct shortwave radiation at grid. """
		SWcdiff=kd*SWc
		SWcdir=(1-kd)*SWc
		SWcdiff=SWcdiff
		SWcdir=SWcdir

		""" Use the above with the sky-view fraction to calculate the 
		downwelling diffuse shortwave radiation at subgrid. """
		SWfdiff=SWcdiff
		SWfdiff = np.nan_to_num(SWfdiff) # convert nans (night) to 0

		""" Direct shortwave routine, modified from Joel. 
		Get surface pressure at "grid" (coarse scale). Can remove this
		part once surface pressure field is downloaded, or just check
		for existance. """

		ztemp = np.transpose(pob.z, (0, 2, 1))
		Ttemp = np.transpose(pob.t, (0, 2, 1))
		dz=ztemp-Zc # dimensions of dz : levels, time, stations

		# set all levels below surface to very big number so they canot be found by min
		newdz=dz
		newdz[newdz<0]=999999

		psc =np.zeros( (dates.size, statz.shape[0]) )
		for i in range(0,dates.size):
		
			#thisp.append(np.argmin(dz[:,i][dz[:,i]>0]))
			# find overlying layer
			thisp = dz[:,i,:]==np.min(newdz[:,i,:],axis=0) # thisp is a booleen matrix of levels x stations with true indicating overlying plevel over station surface ele !! time index in middle this time!!!
			z0 = Zc[i,:]
			T0 = sob.t2m.reshape(sob.t2m.shape[0]*sob.t2m.shape[1], sob.t2m.shape[2]).T[i,:]

			# flatten to 1 dimension order='Fortran' or row major
			thispVec =thisp.reshape(thisp.size,order='F')
			TtempVec = Ttemp.reshape(Ttemp.shape[1], Ttemp.shape[0]*Ttemp.shape[2], order='F') # !! order of permutations is different from pressure at finegrid routine (time is middle dimension)
			ztempVec = ztemp.reshape(ztemp.shape[1], ztemp.shape[0]*ztemp.shape[2], order='F')# !! order of permutations is different from pressure at finegrid routine (time is middle dimension)
			
			# booleen indexing to find temp and geopotential that correspond to lowesest overlying layer
			T1=TtempVec[i,thispVec]
			z1=ztempVec[i,thispVec]

			p1=np.tile(pob.levels[::-1],statz.shape[0])[thispVec]*1e2 #Convert to Pa.
			Tbar=np.mean([T0, T1],axis=0)
			""" Hypsometric equation."""
			psc[i,:] = (p1*np.exp((z1-z0)*(g/(Tbar*R))))


		
		"""compute julian dates"""
		jd= sg.to_jd(dates)

		"""
		Calculates a unit vector in the direction of the sun from the observer 
		position.
		"""
		svx,svy,svz =	sg.sunvectorMD(jd, stat.lat, stat.lon, stat.tz,statsize,timesize)
		sp=sg.sunposMD(svx,svy,svz)


		# Cosine of the zenith angle.
		#sp.zen=sp.zen*(sp.zen>0) # Sun might be below the horizon.
		muz=np.cos(sp.zen) 
		muz = muz
		# NB! psc must be in Pa (NOT hPA!).
	  
		# Calculate the "broadband" absorption coefficient. Elevation correction
		ka=(g*muz/(psc))*np.log(SWtoa/SWcdir)	
		ka = np.nan_to_num(ka) 

		# Now you can (finally) find the direct component at subgrid. 
		SWfdir=SWtoa*np.exp(-ka*psf/(g*muz))
		SWfdirCor=SWfdir #*dprod
	  
		SWfglob =  SWfdiff+ SWfdirCor
		print(" %f minutes for VECTORISED interpolation %s" % (round((time.time()/60 - start_time/60),2),"swin") )
		return SWfglob

	gtob.swin = swin2D(gpob,gsob,gtob, stat, dtime)
	gtob.swin =gtob.swin.reshape(gtob.swin.shape[0], gsob.ssrd.shape[0], gsob.ssrd.shape[1])

	ntime = len(dtime)
	stephr =a.seconds/60/60
	rtime=np.array(range(len(dtime)))*stephr


#===============================================================================
# write results
#===============================================================================	

	
	## Longwave

	#open
	f = nc.Dataset('lwin.nc','w', format='NETCDF4')

	#make dimensions
	f.createDimension('lon', len(lon))
	f.createDimension('lat', len(lat))
	f.createDimension('time', ntime)

	#make dimension variables
	longitude = f.createVariable('lon',    'f4',('lon',))
	latitude  = f.createVariable('lat',    'f4',('lat',))
	mytime = f.createVariable('time', 'i', ('time',))
	lwin = f.createVariable('lwin',    'f4',('lon','lat','time'))

	#assign dimensions
	longitude[:] = lon
	latitude[:]  = lat
	mytime[:] = rtime

	varT = np.transpose(ts_lwin ,(1, 0, 2))
	lwin[:] = np.flip(varT, 1)
	
	#metadata
	f.history = 'Created by toposcale on '+time.ctime()
	mytime.units = 'hours since '+str(dtime[0])

	f.close()

	## Shortwave

	#open
	f = nc.Dataset('swin.nc','w', format='NETCDF4')

	#make dimensions
	f.createDimension('lon', len(lon))
	f.createDimension('lat', len(lat))
	f.createDimension('time', ntime)

	#make dimension variables
	longitude = f.createVariable('lon',    'f4',('lon',))
	latitude  = f.createVariable('lat',    'f4',('lat',))
	mytime = f.createVariable('time', 'i', ('time',))
	lwin = f.createVariable('swin',    'f4',('lon','lat','time'))

	#assign dimensions
	longitude[:] = lon
	latitude[:]  = lat
	mytime[:] = rtime

	varT = np.transpose(gtob.swin ,(2, 1, 0)) #t,lat,lon -> lon,lat,t
	lwin[:] = varT
	
	#metadata
	f.history = 'Created by toposcale on '+time.ctime()
	mytime.units = 'hours since '+str(dtime[0])

	f.close()



	## ta
	
	#open
	f = nc.Dataset('ta.nc','w', format='NETCDF4')

	#make dimensions
	f.createDimension('lon', len(lon))
	f.createDimension('lat', len(lat))
	f.createDimension('time', ntime)

	#make dimension variables
	longitude = f.createVariable('lon',    'f4',('lon',))
	latitude  = f.createVariable('lat',    'f4',('lat',))
	mytime = f.createVariable('time', 'i', ('time',))
	lwin = f.createVariable('ta',    'f4',('lon','lat','time'))

	#assign dimensions
	longitude[:] = lon
	latitude[:]  = lat
	mytime[:] = rtime

	varT = np.transpose(gtob.t ,(1, 0, 2))
	lwin[:] = np.flip(varT, 1)
	
	#metadata
	f.history = 'Created by toposcale on '+time.ctime()
	mytime.units = 'hours since '+str(dtime[0])

	f.close()

	# rh
	
	#open
	f = nc.Dataset('rh.nc','w', format='NETCDF4')

	#make dimensions
	f.createDimension('lon', len(lon))
	f.createDimension('lat', len(lat))
	f.createDimension('time', ntime)

	#make dimension variables
	longitude = f.createVariable('lon',    'f4',('lon',))
	latitude  = f.createVariable('lat',    'f4',('lat',))
	mytime = f.createVariable('time', 'i', ('time',))
	lwin = f.createVariable('rh',    'f4',('lon','lat','time'))

	#assign dimensions
	longitude[:] = lon
	latitude[:]  = lat
	mytime[:] = rtime

	varT = np.transpose(gtob.r ,(1, 0, 2))
	lwin[:] = np.flip(varT, 1)
	
	#metadata
	f.history = 'Created by toposcale on '+time.ctime()
	mytime.units = 'hours since '+str(dtime[0])

	f.close()

	# u
	
	#open
	f = nc.Dataset('u.nc','w', format='NETCDF4')

	#make dimensions
	f.createDimension('lon', len(lon))
	f.createDimension('lat', len(lat))
	f.createDimension('time', ntime)

	#make dimension variables
	longitude = f.createVariable('lon',    'f4',('lon',))
	latitude  = f.createVariable('lat',    'f4',('lat',))
	mytime = f.createVariable('time', 'i', ('time',))
	lwin = f.createVariable('u',    'f4',('lon','lat','time'))

	#assign dimensions
	longitude[:] = lon
	latitude[:]  = lat
	mytime[:] = rtime

	varT = np.transpose(gtob.u ,(1, 0, 2))
	lwin[:] = np.flip(varT, 1)
	
	#metadata
	f.history = 'Created by toposcale on '+time.ctime()
	mytime.units = 'hours since '+str(dtime[0])

	f.close()

	# v
	
	#open
	f = nc.Dataset('v.nc','w', format='NETCDF4')

	#make dimensions
	f.createDimension('lon', len(lon))
	f.createDimension('lat', len(lat))
	f.createDimension('time', ntime)

	#make dimension variables
	longitude = f.createVariable('lon',    'f4',('lon',))
	latitude  = f.createVariable('lat',    'f4',('lat',))
	mytime = f.createVariable('time', 'i', ('time',))
	lwin = f.createVariable('v',    'f4',('lon','lat','time'))

	#assign dimensions
	longitude[:] = lon
	latitude[:]  = lat
	mytime[:] = rtime

	varT = np.transpose(gtob.v ,(1, 0, 2))
	lwin[:] = np.flip(varT, 1)
	
	#metadata
	f.history = 'Created by toposcale on '+time.ctime()
	mytime.units = 'hours since '+str(dtime[0])

	f.close()

	#open
	f = nc.Dataset('pint.nc','w', format='NETCDF4')

	#make dimensions
	f.createDimension('lon', len(lon))
	f.createDimension('lat', len(lat))
	f.createDimension('time', ntime)

	#make dimension variables
	longitude = f.createVariable('lon',    'f4',('lon',))
	latitude  = f.createVariable('lat',    'f4',('lat',))
	mytime = f.createVariable('time', 'i', ('time',))
	lwin = f.createVariable('pint',    'f4',('lon','lat','time'))

	#assign dimensions
	longitude[:] = lon
	latitude[:]  = lat
	mytime[:] = rtime

	varT = np.transpose(grid_prate ,(1, 0, 2))
	lwin[:] = np.flip(varT, 1)
	
	#metadata
	f.history = 'Created by toposcale on '+time.ctime()
	mytime.units = 'hours since '+str(dtime[0])

	f.close()
	

	logging.info("Toposcale complete!")
	#print("%f minutes" % round((time.time()/60 - start_time/60),2) )
	print("%f seconds for run" % round((time.time() - start_time),2) )
Esempio n. 2
0
def main(wdir, mode, start, end, dataset, member=None):

    #Set up Log
    # make out path for results
    logs = wdir + "/logs/"
    if not os.path.exists(logs):
        os.makedirs(logs)

    logfile = logs + "/logfile" + start
    #if os.path.isfile(logfile) == True:
    #os.remove(logfile)
    logging.basicConfig(level=logging.DEBUG,
                        filename=logfile,
                        filemode="a+",
                        format="%(asctime)-15s %(levelname)-8s %(message)s")
    logging.info("Running member" + str(member))
    # start timer
    start_time = time.time()

    # make these names standard:
    stationsfile = wdir + '/listpoints.txt'
    demfile = wdir + '/predictors/ele.nc'
    surfile = wdir + '/forcing/SURF.nc'
    plevfile = wdir + '/forcing/PLEV.nc'

    # make out path for results
    out = wdir + "/out/"
    if not os.path.exists(out):
        os.makedirs(out)

    # convert to python indexing and int
    if dataset == 'EDA':
        member = int(member) - 1

    if mode == "grid":
        dem = nc.Dataset(demfile)
        dem_ele = dem.variables['Band1'][:]

    # this is main dtime used in code
    f = nc.Dataset(plevfile)
    nctime = f.variables['time']
    dtime = pd.to_datetime(
        nc.num2date(nctime[:], nctime.units, calendar="standard"))
    starti = np.asscalar(np.where(dtime == start + ' 00:00:00')[0])
    endi = np.asscalar(np.where(dtime == end + ' 00:00:00')[0])
    dtime = dtime[(starti):endi, ]  #
    timesteps = len(dtime)

    g = 9.81

    logging.info("Running " + str(timesteps) + " timesteps")

    #===============================================================================
    # Point stuff
    #
    #
    #
    #
    #===============================================================================
    if mode == "points" or mode == "point":
        if os.path.isfile(stationsfile) == False:
            logging.info("No listpoints.txt found!")

        logging.info("Start points run...")

        #open points
        mystations = pd.read_csv(stationsfile)
        #===============================================================================
        # make a pob hack
        #===============================================================================

        import recapp_era5 as rc
        f = nc.Dataset(plevfile)
        lev = f.variables['level'][:]
        #dataImport.plf_get()    # pressure level air temperature
        var = 't'

        # station case
        if dataset == "HRES":
            ds = rc.t3d(pl=plevfile)
        if dataset == "EDA":
            ds = rc.t3d_eda(pl=plevfile)

        #timesteps = ds.pl.variables['time'][:].size
        out_xyz_dem, lats, lons, shape, names = ds.demGrid(stations=mystations)

        # init grid stack
        # init grid stack
        xdim = lev.shape[0]

        ydim = shape[0]
        t_interp_out = np.zeros((xdim, ydim))
        z_interp_out = np.zeros((xdim, ydim))

        for timestep in range(starti, endi):
            #logging.info(str(round(float(timestep)/float(timesteps)*100,0))+ "% done")

            gridT, gridZ, gridLat, gridLon = ds.gridValue(var, timestep)
            if dataset == "HRES":
                t_interp, z_interp = ds.inLevelInterp(gridT, gridZ, gridLat,
                                                      gridLon, out_xyz_dem)
            if dataset == "EDA":
                t_interp, z_interp = ds.inLevelInterp(gridT, gridZ, gridLat,
                                                      gridLon, out_xyz_dem,
                                                      member)

            t_interp_out = np.dstack((t_interp_out, t_interp))
            z_interp_out = np.dstack((z_interp_out, z_interp))
        # drop init blank layer
        tinterp = t_interp_out[:, :, 1:]
        zinterp = z_interp_out[:, :, 1:]

        pob = hp.Bunch(t=tinterp, z=zinterp, levels=lev)

        logging.info("made a pob!")
        #===============================================================================
        # tscale3d (results and input to radiation routines)
        #===============================================================================
        if dataset == "HRES":
            t = t3d.main(wdir, 'point', 't', starti, endi, dataset)
            r = t3d.main(wdir, 'point', 'r', starti, endi, dataset)
            u = t3d.main(wdir, 'point', 'u', starti, endi, dataset)
            v = t3d.main(wdir, 'point', 'v', starti, endi, dataset)
        if dataset == "EDA":
            t = t3d.main(wdir, 'point', 't', starti, endi, dataset, member)
            r = t3d.main(wdir, 'point', 'r', starti, endi, dataset, member)
            u = t3d.main(wdir, 'point', 'u', starti, endi, dataset, member)
            v = t3d.main(wdir, 'point', 'v', starti, endi, dataset, member)
        # compute wind speed and direction
        ws = np.sqrt(u**2 + v**2)
        wd = (180 / np.pi) * np.arctan(u / v) + np.where(
            v > 0, 180, np.where(u > 0, 360, 0))

        tob = hp.Bunch(t=t, r=r, u=u, v=v, ws=ws, wd=wd, dtime=dtime)

        # Physical filters

        # constrain RH to 0-100 interval
        # constrain r to interval 5-100 - do here as required by LWin parameterisation
        tob.r[tob.r < 5] = 5
        tob.r[tob.r > 100] = 100

        tob.ws[tob.ws < 0] = 0

        logging.info("made a tob!")
        #===============================================================================
        # tscale2d
        #===============================================================================
        if dataset == "HRES":
            t2m = t2d.main(wdir, 'point', 't2m', starti, endi, dataset)
            tp = t2d.main(wdir, 'point', 'tp', starti, endi, dataset)
            ssrd = t2d.main(wdir, 'point', 'ssrd', starti, endi, dataset)
            strd = t2d.main(wdir, 'point', 'strd', starti, endi, dataset)
            tisr = t2d.main(wdir, 'point', 'tisr', starti, endi, dataset)
            d2m = t2d.main(wdir, 'point', 'd2m', starti, endi, dataset)
            z = t2d.main(wdir, 'point', 'z', starti, endi,
                         dataset)  # always true as this is time invariant
        if dataset == "EDA":
            t2m = t2d.main(wdir, 'point', 't2m', starti, endi, dataset, member)
            tp = t2d.main(wdir, 'point', 'tp', starti, endi, dataset, member)
            ssrd = t2d.main(wdir, 'point', 'ssrd', starti, endi, dataset,
                            member)
            strd = t2d.main(wdir, 'point', 'strd', starti, endi, dataset,
                            member)
            tisr = t2d.main(wdir, 'point', 'tisr', starti, endi, dataset,
                            member)
            d2m = t2d.main(wdir, 'point', 'd2m', starti, endi, dataset, member)
            z = t2d.main(wdir, 'point', 'z', starti, endi, dataset,
                         member)  # always true as this is time invariant

        gridEle = z[0, :] / g

        sob = hp.Bunch(t2m=t2m,
                       tp=tp,
                       ssrd=ssrd,
                       strd=strd,
                       tisr=tisr,
                       d2m=d2m,
                       z=z,
                       gridEle=gridEle,
                       dtime=dtime)
        logging.info("made a sob!")

        # functions

        def tp2rate(tp, step):
            """ convert tp from m/timestep (total accumulation over timestep) to rate in mm/h 

					Args:
						step: timstep in seconds (era5=3600, ensemble=10800)

					Note: both EDA (ensemble 3h) and HRES (1h) are accumulated over the timestep
					and therefore treated here the same.
					https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation
			"""
            tp = tp / step * 60 * 60  # convert metres per timestep -> m/hour
            pmmhr = tp * 1000  # m/hour-> mm/hour
            return pmmhr

        def precipPoint(fineEle, sob):
            '''
			Args:
			fineEle:ele vector from station dataframe
			sob: contains gridEle, tp and dtime
			'''
            # convert TP to mm/hr

            lookups = {
                1: 0.35,
                2: 0.35,
                3: 0.35,
                4: 0.3,
                5: 0.25,
                6: 0.2,
                7: 0.2,
                8: 0.2,
                9: 0.2,
                10: 0.25,
                11: 0.3,
                12: 0.35
            }

            # Precipitation lapse rate, varies by month (Liston and Elder, 2006).
            pfis = sob.dtime.month.map(lookups)
            pfis = np.repeat(pfis.values[:, None], fineEle.size, axis=1)

            dz = (
                fineEle - sob.gridEle
            ) / 1e3  # Elevation difference in kilometers between the fine and coarse surface.

            lp = (1 + pfis.T * dz[:, None]) / (
                1 - pfis.T * dz[:, None])  # Precipitation correction factor.
            #Pf=sob.pmmhr.T*lp
            prate = sob.pmmhr.T * lp  # mm/hour
            psum = sob.tp.T * 1000 * lp  # mm/timestep
            return prate, psum

        #===============================================================================
        # Precip
        #===============================================================================
        a = dtime[2] - dtime[1]
        step = a.seconds
        pmmhr = tp2rate(tp, step)
        sob.pmmhr = pmmhr
        tob.prate, tob.psum = precipPoint(mystations.ele, sob)
        logging.info("made prate!")

        #===============================================================================
        # Longwave
        #===============================================================================
        def instRad(sob, step):
            """ Convert SWin from accumulated quantities in J/m2 to 
			instantaneous W/m2 see: 
			https://confluence.ecmwf.int/pages/viewpage.action?pageId=104241513

			Args:
				step: timstep in seconds (era5=3600, ensemble=10800)

			Note: both EDA (ensemble 3h) and HRES (1h) are accumulated over the timestep
			and therefore treated here the same.
			https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation
					"""
            sob.strd = sob.strd / step
            sob.ssrd = sob.ssrd / step
            sob.tisr = sob.tisr / step

        def lwin(sob, tob):
            """Convert to RH (should be in a function). Following MG Lawrence 
			DOI 10.1175/BAMS-86-2-225 """
            A1 = 7.625
            B1 = 243.04
            C1 = 610.94
            tc = sob.t2m - 273.15
            tdc = sob.d2m - 273.15
            tf = tob.t - 273.15  # fout.T
            c = (A1 * tc) / (B1 + tc)
            RHc = 100 * np.exp((tdc * A1 - tdc * c - B1 * c) /
                               (B1 + tdc))  # Inverting eq. 8 in Lawrence.
            """ Calculate saturation vapor pressure at grid and "subgrid" [also
			through function] using the Magnus formula."""

            svpf = C1 * np.exp(A1 * tf / (B1 + tf))
            svpc = C1 * np.exp(A1 * tc / (B1 + tc))
            """Calculate the vapor pressure at grid (c) and subgrid (f)."""
            vpf = tob.r * svpf / 1e2  # RHf
            vpc = RHc * svpc / 1e2
            """Use the vapor pressure and temperature to calculate clear sky
			 # emssivity at grid and subgrid. [also function]
			Konzelmann et al. 1994
			Ta in kelvin

			 """
            x1 = 0.43
            x2 = 5.7
            cef = 0.23 + x1 * (vpf / tob.t)**(
                1 / x2)  #Pretty sure the use of Kelvin is correct.
            cec = 0.23 + x1 * (vpc / sob.t2m)**(1 / x2)
            """Diagnose the all sky emissivity at grid."""
            sbc = 5.67e-8
            aec = sob.strd / (sbc * sob.t2m**4)
            # need to constrain to 1 as original code?
            """ Calculate the "cloud" emissivity at grid, assume this is the same at
		 	subgrid."""
            deltae = aec - cec
            """ Use the former cloud emissivity to compute the all sky emissivity at 
			subgrid. """
            aef = cef + deltae
            LWf = aef * sbc * tob.t**4
            return (LWf)

        instRad(sob, 3600)
        tob.lwin = lwin(sob, tob)
        logging.info("made lwin")

        def swin1D(pob, sob, tob, stat, dates, index):
            # many arrays transposed
            """ toposcale surface pressure using hypsometric equation - move to own 
				class 

				index: index of station array (numeric)
				"""
            g = 9.81
            R = 287.05  # Gas constant for dry air.
            tz = 0  # ERA5 is always utc0, used to compute sunvector
            ztemp = pob.z[:, index, :].T
            Ttemp = pob.t[:, index, :].T
            statz = stat.ele[index] * g
            dz = ztemp.T - statz  # transpose not needed but now consistent with CGC surface pressure equations

            psf = []
            # loop through timesteps
            #for i in range(starti,endi):
            for i in range(0, dates.size):

                # 	# find overlying layer
                thisp = dz[:, i] == np.min(dz[:, i][dz[:, i] > 0])

                # booleen indexing
                T1 = Ttemp[i, thisp]
                z1 = ztemp[i, thisp]
                p1 = pob.levels[thisp] * 1e2  #Convert to Pa.
                Tbar = np.mean([T1, tob.t[i, index]], axis=0)
                """ Hypsometric equation."""
                psf.append(p1 * np.exp((z1 - statz) * (g / (Tbar * R))))

            psf = np.array(psf).squeeze()

            ## Specific humidity routine.
            # mrvf=0.622.*vpf./(psf-vpf); #Mixing ratio for water vapor at subgrid.
            #  qf=mrvf./(1+mrvf); # Specific humidity at subgrid [kg/kg].
            # fout.q(:,:,n)=qf;
            """ Maybe follow Dubayah's approach (as in Rittger and Girotto) instead
				for the shortwave downscaling, other than terrain effects. """
            """ Height of the "grid" (coarse scale)"""
            Zc = sob.z[:, index]  # should this be a single value or vector?
            """ toa """
            SWtoa = sob.tisr[:, index]
            """ Downwelling shortwave flux of the "grid" using nearest neighbor."""
            SWc = sob.ssrd[:, index]
            """Calculate the clearness index."""
            kt = SWc / SWtoa

            #kt[is.na(kt)==T]<-0 # make sure 0/0 =0
            #kt[is.infinite(kt)==T]<-0 # make sure 0/0 =0
            kt[kt < 0] = 0
            kt[kt > 1] = 0.8  #upper limit of kt
            kt = kt
            """
				Calculate the diffuse fraction following the regression of Ruiz-Arias 2010 
				
				"""
            kd = 0.952 - 1.041 * np.exp(-1 * np.exp(2.3 - 4.702 * kt))
            kd = kd
            """ Use this to calculate the downwelling diffuse and direct shortwave radiation at grid. """
            SWcdiff = kd * SWc
            SWcdir = (1 - kd) * SWc
            SWcdiff = SWcdiff
            SWcdir = SWcdir
            """ Use the above with the sky-view fraction to calculate the 
				downwelling diffuse shortwave radiation at subgrid. """
            SWfdiff = stat.svf[index] * SWcdiff
            SWfdiff = np.nan_to_num(SWfdiff)  # convert nans (night) to 0
            """ Direct shortwave routine, modified from Joel. 
				Get surface pressure at "grid" (coarse scale). Can remove this
				part once surface pressure field is downloaded, or just check
				for existance. """

            ztemp = pob.z[:, index, :].T
            Ttemp = pob.t[:, index, :].T
            dz = ztemp.transpose() - sob.z[:, index]

            psc = []
            for i in range(0, dz.shape[1]):

                #thisp.append(np.argmin(dz[:,i][dz[:,i]>0]))
                thisp = dz[:, i] == np.min(dz[:, i][dz[:, i] > 0])
                z0 = sob.z[i, index]
                T0 = sob.t2m[i, index]
                T1 = Ttemp[i, thisp]
                z1 = ztemp[i, thisp]
                p1 = pob.levels[thisp] * 1e2  #Convert to Pa.
                Tbar = np.mean([T0, T1], axis=0)
                """ Hypsometric equation."""
                psc.append(p1 * np.exp((z1 - z0) * (g / (Tbar * R))))

            psc = np.array(psc).squeeze()

            #T1=Ttemp(thisp)
            #z1=ztemp(thisp)
            #p1=pob.levels(thisp)*1e2 #Convert to Pa.
            #Tbar=mean([T0 T1])
            """compute julian dates"""
            jd = sg.to_jd(dates)
            """
				Calculates a unit vector in the direction of the sun from the observer 
				position.
				"""
            sunv = sg.sunvector(jd=jd,
                                latitude=stat.lat[index],
                                longitude=stat.lon[index],
                                timezone=tz)
            """
				Computes azimuth , zenith  and sun elevation 
				for each timestamp
				"""
            sp = sg.sunpos(sunv)
            sp = sp

            # Cosine of the zenith angle.
            sp.zen = sp.zen
            #sp.zen=sp.zen*(sp.zen>0) # Sun might be below the horizon.
            muz = np.cos(sp.zen)
            muz = muz
            # NB! psc must be in Pa (NOT hPA!).
            #if np.max(psc<1.5e3): # Obviously not in Pa
            #psc=psc*1e2

            # Calculate the "broadband" absorption coefficient. Elevation correction
            # from Kris
            ka = (g * muz / (psc)) * np.log(SWtoa / SWcdir)
            #ka.set_fill_value(0)
            #ka = ka.filled()
            # set inf (from SWtoa/SWcdir at nigh, zero division) to 0 (night)
            ka[ka == -np.inf] = 0
            ka[ka == np.inf] = 0

            # Note this equation is obtained by inverting Beer's law, i.e. use
            #I_0=I_inf x exp[(-ka/mu) int_z0**inf rho dz]
            # Along with hydrostatic equation to convert to pressure coordinates then
            # solve for ka using p(z=inf)=0.

            # Now you can (finally) find the direct component at subgrid.
            SWfdir = SWtoa * np.exp(-ka * psf / (g * muz))
            """ Then perform the terrain correction. [Corripio 2003 / rpackage insol port]."""
            """compute mean horizon elevation - why negative hor.el possible??? """
            horel = (((np.arccos(np.sqrt(stat.svf[index])) * 180) / np.pi) *
                     2) - stat.slp[index]
            if horel < 0:
                horel = 0
            meanhorel = horel
            """
				normal vector - Calculates a unit vector normal to a surface defined by 
				slope inclination and slope orientation.
				"""
            nv = sg.normalvector(slope=stat.slp[index], aspect=stat.asp[index])
            """
				Method 1: Computes the intensity according to the position of the sun (sunv) and 
				dotproduct normal vector to slope.
				From corripio r package
				"""
            dotprod = np.dot(sunv, np.transpose(nv))
            dprod = dotprod.squeeze()
            dprod[dprod < 0] = 0  #negative indicates selfshading
            dprod = dprod
            """Method 2: Illumination angles. Dozier"""
            saz = sp.azi
            cosis = muz * np.cos(stat.slp[index]) + np.sin(
                sp.zen) * np.sin(stat.slp[index]) * np.cos(
                    sp.azi - stat.asp[index]
                )  # cosine of illumination angle at subgrid.
            cosic = muz  # cosine of illumination angle at grid (slope=0).
            """
				SUN ELEVATION below hor.el set to 0 - binary mask
				"""
            selMask = sp.sel
            selMask[selMask < horel] = 0
            selMask[selMask > 0] = 1
            selMask = selMask
            """
				derive incident radiation on slope accounting for self shading and cast 
				shadow and solar geometry
				BOTH formulations seem to be broken
				"""
            #SWfdirCor=selMask*(cosis/cosic)*SWfdir
            SWfdirCor = selMask * dprod * SWfdir

            SWfglob = SWfdiff + SWfdirCor
            return SWfglob
            """ 
				Missing components
				- terrain reflection
				"""
            # init grid stack

        #init first row
        ntimestamps = dtime.shape[0]
        ts_swin = np.zeros((ntimestamps))
        for stationi in range(0, mystations.shape[0]):
            '''here we test for array full of NaNs due to points being 
			outside of grid. The NaN arrays are created in the 
			interpolation but only break the code here. If array is
 			all NaN then we just fill that stations slot 
			with NaN'''
            testNans = np.count_nonzero(~np.isnan(pob.z[:, stationi, :]))
            if testNans != 0:
                ts = swin1D(pob=pob,
                            sob=sob,
                            tob=tob,
                            stat=mystations,
                            dates=dtime,
                            index=stationi)
                ts_swin = np.column_stack((ts_swin, ts))
            if testNans == 0:
                nan_vec = np.empty(ntimestamps) * np.nan
                ts_swin = np.column_stack((ts_swin, nan_vec))

        # drop init row
        tob.swin = ts_swin[:, 1:]

        #===============================================================================
        # make dataframe (write individual files plus netcdf)
        #==============================================================================

        logging.info("Writing toposcale files...")
        for i in range(0, tob.t.shape[1]):
            df = pd.DataFrame(
                {
                    "TA": tob.t[:, i],
                    "RH": tob.r[:, i] * 0.01,  #meteoio 0-1
                    "VW": tob.ws[:, i],
                    "DW": tob.wd[:, i],
                    "ILWR": tob.lwin[:, i],
                    "ISWR": tob.swin[:, i],
                    "PINT": tob.prate[i, :],
                    "PSUM": tob.psum[i, :]
                },
                index=tob.dtime)
            df.index.name = "datetime"

            # fill outstanding nan in SW routine with 0 (night)
            df.ISWR = df.ISWR.fillna(0)

            if dataset == 'EDA':
                fileout = wdir + "/out/meteo" + str(
                    i) + "_" + start + "_" + str(
                        member +
                        1) + "_.csv"  # convert member index back to 1-10

            if dataset == 'HRES':
                fileout = wdir + "/out/meteo" + "c" + str(
                    i + 1) + ".csv"  # convert member index back to 1-10
            column_order = [
                'TA', 'RH', 'VW', 'DW', 'ILWR', 'ISWR', 'PINT', 'PSUM'
            ]
            df[column_order].to_csv(path_or_buf=fileout,
                                    na_rep=-999,
                                    float_format='%.3f')
        #logging.info(fileout + " complete")

        #===============================================================================
        # Grid stuff
        #
        #===============================================================================
    if mode == "grid":
        logging.info("Running TopoSCALE3D grid")
        #===============================================================================
        # tscale3d
        #===============================================================================

        t = t3d.main(wdir, 'grid', 't', starti, endi, dataset)
        r = t3d.main(wdir, 'grid', 'r', starti, endi, dataset)
        gtob = hp.Bunch(t=t, r=r, dtime=dtime)

        #===============================================================================
        # tscale2d
        #===============================================================================
        t2m = t2d.main(wdir, 'grid', 't2m', starti, endi, dataset)
        tp = t2d.main(wdir, 'grid', 'tp', starti, endi, dataset)
        ssrd = t2d.main(wdir, 'grid', 'ssrd', starti, endi, dataset)
        strd = t2d.main(wdir, 'grid', 'strd', starti, endi, dataset)
        tisr = t2d.main(wdir, 'grid', 'tisr', starti, endi, dataset)
        d2m = t2d.main(wdir, 'grid', 'd2m', starti, endi, dataset)
        z = t2d.main(wdir, 'grid', 'z', starti, endi,
                     dataset)  # always true as this is time invariant
        gridEle = z[:, :, 0] / g
        gsob = hp.Bunch(t2m=t2m,
                        tp=tp,
                        ssrd=ssrd,
                        strd=strd,
                        tisr=tisr,
                        d2m=d2m,
                        z=z,
                        gridEle=gridEle,
                        dtime=dtime)

        def precipGrid(fineEle, gsob):
            '''
			Args:
				fineEle
				gridEle:
			'''
            # convert TP to mm/hr

            lookups = {
                1: 0.3,
                2: 0.3,
                3: 0.3,
                4: 0.3,
                5: 0.25,
                6: 0.2,
                7: 0.2,
                8: 0.2,
                9: 0.2,
                10: 0.25,
                11: 0.3,
                12: 0.3
            }

            # Precipitation lapse rate, varies by month (Liston and Elder, 2006).
            pfis = gsob.dtime.month.map(lookups)

            dz = (
                fineEle - gsob.gridEle
            ) / 1e3  # Elevation difference in kilometers between the fine and coarse surface.
            dz2 = dz.reshape(dz.size)  #make grid a vector
            pfis2 = np.repeat(pfis.values[:, None], dz2.size, axis=1)

            lp = (1 + pfis2.T * dz2[:, None]) / (
                1 - pfis2.T * dz2[:, None])  # Precipitation correction factor.
            lp2 = lp.reshape(gsob.pmmhr.shape)
            Pf = gsob.pmmhr * lp2

            return Pf

        def tp2rate(tp, step):
            """ convert tp from m/timestep (total accumulation over timestep) to rate in mm/h 

					Args:
						step: timstep in seconds (era5=3600, ensemble=10800)

					Note: both EDA (ensemble 3h) and HRES (1h) are accumulated over the timestep
					and therefore treated here the same.
					https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation
			"""
            tp = tp / step * 60 * 60  # convert metres per timestep (in secs) -> m/hour
            pmmhr = tp * 1000  # m/hour-> mm/hour
            return pmmhr

        a = dtime[2] - dtime[1]
        step = a.seconds
        gsob.pmmhr = tp2rate(tp, step)
        grid_prate = precipGrid(dem_ele, gsob)

        dem = nc.Dataset(demfile)
        lon = dem.variables['lon'][:]
        lat = dem.variables['lat'][:]
        # These packages problem on cluster
        from osgeo import gdal
        from osgeo import gdal_array
        from osgeo import osr

        for i in range(0, grid_prate.shape[2]):

            myname = wdir + '/out/prate' + str(i) + '.tif'
            array = grid_prate[::-1, :, i]
            #lat = out_xyz_dem[:,0].reshape(l.shape)
            #lon = out_xyz_dem[:,1].reshape(l.shape)

            xmin, ymin, xmax, ymax = [
                lon.min(), lat.min(),
                lon.max(), lat.max()
            ]
            nrows, ncols = np.shape(array)
            xres = (xmax - xmin) / float(ncols)
            yres = (ymax - ymin) / float(nrows)
            geotransform = (xmin, xres, 0, ymax, 0, -yres)

            output_raster = gdal.GetDriverByName('GTiff').Create(
                myname, ncols, nrows, 1, gdal.GDT_Float32)  # Open the file
            output_raster.GetRasterBand(1).WriteArray(
                array)  # Writes my array to the raster
            output_raster.SetGeoTransform(
                geotransform)  # Specify its coordinates
            srs = osr.SpatialReference()  # Establish its coordinate encoding
            srs.ImportFromEPSG(4326)  # This one specifies WGS84 lat long.
            output_raster.SetProjection(
                srs.ExportToWkt())  # Exports the coordinate system
            output_raster = None

        # for i in range(0, t.shape[2]):

        # 	myname=wdir+'/out/t'+str(i)+'.tif'
        # 	array = t[:,:,i]
        # 	#lat = out_xyz_dem[:,0].reshape(l.shape)
        # 	#lon = out_xyz_dem[:,1].reshape(l.shape)

        # 	xmin,ymin,xmax,ymax = [lon.min(),lat.min(),lon.max(),lat.max()]
        # 	nrows,ncols = np.shape(array)
        # 	xres = (xmax-xmin)/float(ncols)
        # 	yres = (ymax-ymin)/float(nrows)
        # 	geotransform=(xmin,xres,0,ymax,0, -yres)

        # 	output_raster = gdal.GetDriverByName('GTiff').Create(myname,ncols, nrows, 1 ,gdal.GDT_Float32)# Open the file
        # 	output_raster.GetRasterBand(1).WriteArray( array )  # Writes my array to the raster
        # 	output_raster.SetGeoTransform(geotransform)# Specify its coordinates
        # 	srs = osr.SpatialReference()# Establish its coordinate encoding
        # 	srs.ImportFromEPSG(4326)   # This one specifies WGS84 lat long.
        # 	output_raster.SetProjection(srs.ExportToWkt())# Exports the coordinate system
        # 	output_raster = None
        #===============================================================================
        # Longwave
        #===============================================================================
        def instRad(sob, step):
            """ Convert SWin from accumulated quantities in J/m2 to 
			instantaneous W/m2 see: 
			https://confluence.ecmwf.int/pages/viewpage.action?pageId=104241513

			Args:
				step: timstep in seconds (era5=3600, ensemble=10800)

			Note: both EDA (ensemble 3h) and HRES (1h) are accumulated over the timestep
			and therefore treated here the same.
			https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation
					"""
            sob.strd = sob.strd / step
            sob.ssrd = sob.ssrd / step
            sob.tisr = sob.tisr / step

        def lwin(sob, tob):
            """Convert to RH (should be in a function). Following MG Lawrence 
			DOI 10.1175/BAMS-86-2-225 """
            A1 = 7.625
            B1 = 243.04
            C1 = 610.94
            tc = sob.t2m - 273.15
            tdc = sob.d2m - 273.15
            tf = tob.t - 273.15  # fout.T
            c = (A1 * tc) / (B1 + tc)
            RHc = 100 * np.exp((tdc * A1 - tdc * c - B1 * c) /
                               (B1 + tdc))  # Inverting eq. 8 in Lawrence.
            """ Calculate saturation vapor pressure at grid and "subgrid" [also
			through function] using the Magnus formula."""

            svpf = C1 * np.exp(A1 * tf / (B1 + tf))
            svpc = C1 * np.exp(A1 * tc / (B1 + tc))
            """Calculate the vapor pressure at grid (c) and subgrid (f)."""
            vpf = tob.r * svpf / 1e2  # RHf
            vpc = RHc * svpc / 1e2
            """Use the vapor pressure and temperature to calculate clear sky
			 # emssivity at grid and subgrid. [also function]
			Konzelmann et al. 1994
			Ta in kelvin

			 """
            x1 = 0.43
            x2 = 5.7
            cef = 0.23 + x1 * (vpf / tob.t)**(
                1 / x2)  #Pretty sure the use of Kelvin is correct.
            cec = 0.23 + x1 * (vpc / sob.t2m)**(1 / x2)
            """Diagnose the all sky emissivity at grid."""
            sbc = 5.67e-8
            aec = sob.strd / (sbc * sob.t2m**4)
            # need to constrain to 1 as original code?
            """ Calculate the "cloud" emissivity at grid, assume this is the same at
		 	subgrid."""
            deltae = aec - cec
            """ Use the former cloud emissivity to compute the all sky emissivity at 
			subgrid. """
            aef = cef + deltae
            LWf = aef * sbc * tob.t**4
            return (LWf)

        instRad(gsob, 3600)
        ts_lwin = lwin(gsob, gtob)

        dem = nc.Dataset(demfile)
        lon = dem.variables['lon'][:]
        lat = dem.variables['lat'][:]
        # These packages problem on cluster
        from osgeo import gdal
        from osgeo import gdal_array
        from osgeo import osr

        for i in range(0, t.shape[2]):

            myname = wdir + '/out/lwin' + str(i) + '.tif'
            array = t[:, :, i]
            #lat = out_xyz_dem[:,0].reshape(l.shape)
            #lon = out_xyz_dem[:,1].reshape(l.shape)

            xmin, ymin, xmax, ymax = [
                lon.min(), lat.min(),
                lon.max(), lat.max()
            ]
            nrows, ncols = np.shape(array)
            xres = (xmax - xmin) / float(ncols)
            yres = (ymax - ymin) / float(nrows)
            geotransform = (xmin, xres, 0, ymax, 0, -yres)

            output_raster = gdal.GetDriverByName('GTiff').Create(
                myname, ncols, nrows, 1, gdal.GDT_Float32)  # Open the file
            output_raster.GetRasterBand(1).WriteArray(
                array)  # Writes my array to the raster
            output_raster.SetGeoTransform(
                geotransform)  # Specify its coordinates
            srs = osr.SpatialReference()  # Establish its coordinate encoding
            srs.ImportFromEPSG(4326)  # This one specifies WGS84 lat long.
            output_raster.SetProjection(
                srs.ExportToWkt())  # Exports the coordinate system
            output_raster = None
    logging.info("Toposcale complete!")
    logging.info("%f minutes" % round((time.time() / 60 - start_time / 60), 2))
Esempio n. 3
0
def main(coords, eraDir, outDir, startDT, endDT, startIndex):
    print(startDT)
    print(endDT)
    g = 9.81  # geopotential constant
    tz = 0  # timezone always utc0 for era5 data
    myyear = startDT.split('-')[0]

    zp_file = eraDir + "/PLEV_geopotential_" + myyear + ".nc"

    plevDict = {
        eraDir + "/PLEV_temperature_" + myyear + ".nc": "t",
        eraDir + "/PLEV_u_component_of_wind_" + myyear + ".nc": "u",
        eraDir + "/PLEV_v_component_of_wind_" + myyear + ".nc": "v",
        eraDir + "/PLEV_relative_humidity_" + myyear + ".nc": "r"
    }

    surfDict = {
        eraDir + "/SURF_2m_temperature_" + myyear + ".nc": "t2m",
        eraDir + "/SURF_2m_dewpoint_temperature_" + myyear + ".nc": "d2m",
        eraDir + "/SURF_geopotential_" + myyear + ".nc": "z",
        eraDir + "/SURF_surface_solar_radiation_downwards_" + myyear + ".nc":
        "ssrd",
        eraDir + "/SURF_surface_thermal_radiation_downwards_" + myyear + ".nc":
        "strd",
        eraDir + "/SURF_Total precipitation_" + myyear + ".nc":
        "tp",  # removed _
        eraDir + "/SURF_TOA incident solar radiation_" + myyear + ".nc": "tisr"
    }

    # read in lispoints
    lpin = pd.read_csv(coords, header=None)

    # make out path for results
    out = outDir
    if not os.path.exists(out):
        os.makedirs(out)

    # time stuff
    f = nc.Dataset(zp_file)
    nctime = f.variables['time']
    dtime = pd.to_datetime(
        nc.num2date(nctime[:],
                    nctime.units,
                    calendar="standard",
                    only_use_cftime_datetimes=False,
                    only_use_python_datetimes=True))

    if (np.array(np.where(dtime == startDT)).size == 0):
        sys.exit("SYSTEMEXIT:Start date not in netcdf, end of timeseries")

    starti = np.where(
        dtime == startDT)[0].item()  # so can run on a single timestep
    print(endDT)
    if (np.array(np.where(dtime == endDT)).size == 0):
        sys.exit("SYSTEMEXIT: End date not in netcdf, end of timeseries")

    endi = np.where(dtime == endDT)[0].item()
    year = dtime.year[0]

    # compute timestep before we cut timeseries
    a = dtime[2] - dtime[1]
    step = a.seconds
    stephr = step / (60 * 60)
    # extract timestep
    dtime = dtime[starti:endi, ]

    print(("Running timestep " + str(startDT)))

    #===============================================================================
    # tscale3d - 3D interpolation of pressure level fields
    #===============================================================================
    ele = lpin.iloc[:, 2]  #[:,3]
    lats = lpin.iloc[:, 0]  #[:,2] #[s['lat'] for s in stations]
    lons = lpin.iloc[:,
                     1] + 180  #[:,1] #[s['lon'] for s in stations] convert -180-180 to 0-360

    lp = hp.Bunch(ele=ele, lat=lats, lon=lons)

    out_xyz_dem = np.asarray([lats, lons, ele * g], order="F").transpose()

    # init gtob object
    gtob = hp.Bunch(time=dtime)

    for plev in plevDict:

        t = nc.Dataset(plev)  # key is filename
        varname = plevDict[plev]  # value of key is par shortname
        z = nc.Dataset(zp_file)

        # init grid stack
        xdim = out_xyz_dem.shape[0]
        sa_vec = np.zeros(xdim)
        #names=1:n

        for timestep in range(starti, endi):
            """
	        Return original grid temperatures and geopotential of differnet
	        pressure levels. The function are called by inLevelInterp() to
	        get the input ERA5 values.
	        
	        Args: 
	            variable: Given interpolated climate variable
	            timestep: Time need to be interpolated. Time is in interger (e.g.
	            0, 1, 2)
	            
	        Returns:
	            gridT: Grid temperatures of different pressure levels. Retruned 
	            temperature are formated in [level, lat, lon]
	            gridZ: Grid geopotential of different pressure levels. Retruned 
	            temperature are formated in [level, lat, lon]
	            gridLon: Grid longitude of pressure level variables
	            gridLat: Grid latitude of pressure level variables
	        
	        Example:
	            gridT,gridZ,gridLat,gridLon=downscaling.gridValue('Temperature',0)
	            
			"""

            gridT = t.variables[varname][timestep, :, :, :]
            gridZ = z.variables['z'][timestep, :, :, :]
            gridLat = t[
                'latitude'][:]  # coords of grid centre https://confluence.ecmwf.int/display/CKB/ERA5%3A+What+is+the+spatial+reference
            gridLat = gridLat[::-1]  # reverse to deal with ERA5 order
            gridLon = t[
                'longitude'][:]  # coords of grid centre https://confluence.ecmwf.int/display/CKB/ERA5%3A+What+is+the+spatial+reference
            gridLev = t.variables['level'][::-1]
            #return gridT,gridZ,gridLat,gridLon
            """
			This is a 2D interpolatation, and returns interpolated temperatures
			of different pressure levels.

			Interpolated domain is smaller than original domain - original (ERA5) domain
			should be one cell larger than expected point or grid domain.

			Args:
			    gridT: Grid temperatures of different pressure levels. Retruned 
			        temperature are formated in [level, lat, lon]
			    gridZ: Grid geopotential of different pressure levels. Retruned 
			        temperature are formated in [level, lat, lon]
			    gridLat: Grid longitude of pressure level variables
			    gridLon: Grid latitude of pressure level variables
			    out_xyz: Given sites, which will be interpolated.
			    
			Returns:
			    t_interp: Interpolated temperatre of different pressure levels. 
			        The returned values are fomrated in [level, lat, lon]
			    z_interp: Interpolated geopotential of different pressure levels. 
			        The returned values are fomrated in [level, lat, lon]

			Examples:
			    downscaling = downscaling(dem, geop, sa, pl)

			    out_xyz_dem, lats, lons, shape = downscaling.demGrid()
			    out_xyz_sur = downscaling.surGrid(lats, lons, None)

			    #interpolate 2-meter temperature
			    surTa = downscaling.surTa(0, out_xyz_sur)
			    #original ERA-I values
			    gridT,gridZ,gridLat,gridLon = downscaling.gridValue(variable,0)
			    #interpolate temperatures and geopotential of different 
			    pressure levels.

			    t_interp, z_interp = downscaling.inLevelInterp(gridT,gridZ,
			                                                   gridLat,gridLon,
			                                                   out_xyz_dem)
			"""

            shape = gridT.shape
            #create array to hold interpolation resultes
            t_interp = np.zeros([shape[0], len(out_xyz_dem)])
            z_interp = np.zeros([
                shape[0], len(out_xyz_dem)
            ])  # HOW MANY TIMES DO WE REALLY NEED TO COMPUTE THIS?

            #temperatue and elevation interpolation 2d
            for i in range(shape[0]):
                ft = RegularGridInterpolator((gridLat, gridLon),
                                             gridT[i, ::-1, :],
                                             'linear',
                                             bounds_error=False)
                fz = RegularGridInterpolator((gridLat, gridLon),
                                             gridZ[i, ::-1, :],
                                             'linear',
                                             bounds_error=False)
                t_interp[i, :] = ft(out_xyz_dem[:, :2])  #temperature

                z_interp[i, :] = fz(out_xyz_dem[:, :2])  #elevation

                # invert pressure levels
                #t_interp = t_interp[::-1,:]
                #z_interp = z_interp[::-1,:]
                """This is a 1D interpoation. The function return interpolated 
				upper air temperature at the given sites by 
				interpolation between different pressure levels.
				"""
            ele = out_xyz_dem[:, 2]
            size = np.arange(out_xyz_dem.shape[0])
            n = [bisect_left(z_interp[:, i], ele[i]) for i in size]
            n = [x + 1 if x == 0 else x for x in n]

            lowN = [l - 1 for l in n]

            upperT = t_interp[n, size]
            upperZ = z_interp[n, size]
            dG = upperT - t_interp[lowN, size]  #<0
            dG /= upperZ - z_interp[lowN, size]  #<0
            dG *= out_xyz_dem[:, 2] - upperZ  #>0
            dG += upperT

            pl_obs = dG

            sa_vec = np.column_stack((sa_vec, pl_obs))

        # drop init row
        sa_vec = sa_vec[:, 1:]
        # rename to variable
        setattr(gtob, varname, sa_vec)
    print("t,r,u,v done")
    #===============================================================================
    # tscale2d - Generates 2D interpolations from coarse (ERA5) to fine (1km) grid
    #===============================================================================
    gsob = hp.Bunch(dtime=dtime)
    # init grid stack
    xdim = shape[0]
    sa_vec = np.zeros((xdim))

    for surf in surfDict:

        t = nc.Dataset(surf)  # key is filename
        varname = surfDict[surf]  # value of key is par shortname
        z = nc.Dataset(zp_file)  # could be outside loop

        # init grid stack
        xdim = out_xyz_dem.shape[0]
        sa_vec = np.zeros(xdim)

        #names=1:n
        for timestep in range(starti, endi):
            """
			2D interpolated of surface firelds.
				Args:
					timestep: Timestep of interpolation as an interger (index)
					stations: pandas dataframe of input station csv file (id,lon,lat,ele)
					var: surface variarble eg "ssrd"
				Returns:
					t_sp: 2D interpolation of ERA surface field to stations points

				Example:
					surTa = ds.surTaPoint(0, mystations, 't2m')
			"""
            # read in data from variable 'varname'
            in_v = t[varname][timestep, :, :]  #geopotential
            in_v = in_v[::
                        -1, :]  # reverse latitude dimension to agree with ascending 'lat'
            lat = t.variables['latitude'][:]
            lat = lat[::-1]  # must be ascending for RegularGridInterpolator
            lon = t.variables['longitude'][:]

            # 2d interpolation
            f_sa = RegularGridInterpolator((lat, lon),
                                           in_v,
                                           'linear',
                                           bounds_error=False)
            out_xy = np.asarray([lats, lons]).T
            sa_t = f_sa(out_xy)

            # stack timepoint to existing
            sa_vec = np.column_stack((sa_vec, sa_t))

        # drop init row
        sa_vec = sa_vec[:, 1:]

        # Add to gsob
        setattr(gsob, varname, sa_vec)

    print("Made a sob")
    #===============================================================================
    # Conversions
    #===============================================================================
    """ convert tp from m/timestep (total accumulation over timestep) to rate in mm/h 

				Args:
					step: timstep in seconds (era5=3600, ensemble=10800)

				Note: both EDA (ensemble 3h) and HRES (1h) are accumulated over the timestep
				and therefore treated here the same.
			https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation
	"""
    tphrm = gsob.tp  #/step*60*60 # convert metres per timestep (in secs) -> m/hour
    gsob.pmmhr = tphrm * 1000  # m/hour-> mm/hour
    """ Convert SWin from accumulated quantities in J/m2 to 
	instantaneous W/m2 see: 
	https://confluence.ecmwf.int/pages/viewpage.action?pageId=104241513

	Args:
		step: timstep in seconds (era5=3600, ensemble=10800)

	Note: both EDA (ensemble 3h) and HRES (1h) are accumulated over the timestep
	and therefore treated here the same ie step=3600s (1h)
	https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation
	"""
    gsob.strd = gsob.strd / 3600
    gsob.ssrd = gsob.ssrd / 3600
    gsob.tisr = gsob.tisr / 3600

    gsob.gridEle = gsob.z[:, 0] / g
    gtob.prate = gsob.pmmhr  #*pcf # mm/hour
    gtob.psum = gsob.tp * 1000 * stephr  #pcf mm/timestep
    print("conversions done")
    #===============================================================================
    # TopoSCALE
    #=============================================================================

    #===============================================================================
    # Precip
    #===============================================================================
    '''
	Args:
	fineEle:ele vector from station dataframe
	sob: contains gridEle, tp and dtime
	'''
    # convert TP to mm/hr

    # lookups = {
    # 	   1:0.35,
    # 	   2:0.35,
    # 	   3:0.35,
    # 	   4:0.3,
    # 	   5:0.25,
    # 	   6:0.2,
    # 	   7:0.2,
    # 	   8:0.2,
    # 	   9:0.2,
    # 	   10:0.25,
    # 	   11:0.3,
    # 	   12:0.35
    # }

    # # Precipitation lapse rate, varies by month (Liston and Elder, 2006).
    # pfis = gsob.dtime.month.map(lookups)
    # pfis = np.repeat(pfis.values[:,None], lp.ele.size, axis=1)

    # dz=(lp.ele-gsob.gridEle)/1e3  # Elevation difference in kilometers between the fine and coarse surface.

    # pcf=(1+pfis.T*dz[:,None])/(1-pfis.T*dz[:,None])# Precipitation correction factor.
    #Pf=sob.pmmhr.T*lp

    #===============================================================================
    # Longwave
    #===============================================================================
    """Convert to RH (should be in a function). Following MG Lawrence 
	DOI 10.1175/BAMS-86-2-225 """
    A1 = 7.625
    B1 = 243.04
    C1 = 610.94
    tc = gsob.t2m - 273.15
    tdc = gsob.d2m - 273.15
    tf = gtob.t - 273.15  # fout.T
    c = (A1 * tc) / (B1 + tc)
    RHc = 100 * np.exp((tdc * A1 - tdc * c - B1 * c) /
                       (B1 + tdc))  # Inverting eq. 8 in Lawrence.
    """ Calculate saturation vapor pressure at grid and "subgrid" [also
	through function] using the Magnus formula."""

    svpf = C1 * np.exp(A1 * tf / (B1 + tf))
    svpc = C1 * np.exp(A1 * tc / (B1 + tc))
    """
	Calculate the vapor pressure at grid (c) and subgrid (f).
	"""
    vpf = gtob.r * svpf / 1e2  # RHf
    vpc = RHc * svpc / 1e2
    """
	Use the vapor pressure and temperature to calculate clear sky
	# emssivity at grid and subgrid. [also function]
	Konzelmann et al. 1994
	Ta in kelvin
	"""
    x1 = 0.43
    x2 = 5.7
    cef = 0.23 + x1 * (vpf / gtob.t)**(
        1 / x2)  #Pretty sure the use of Kelvin is correct.
    cec = 0.23 + x1 * (vpc / gsob.t2m)**(1 / x2)
    """Diagnose the all sky emissivity at grid."""
    sbc = 5.67e-8
    aec = gsob.strd / (sbc * gsob.t2m**4)
    # need to constrain to 1 as original code?
    """ 
	Calculate the "cloud" emissivity at grid, assume this is the same at
	subgrid.
	"""
    deltae = aec - cec
    """ 
	Use the former cloud emissivity to compute the all sky emissivity at 
	subgrid. 
	"""
    aef = cef + deltae
    gtob.lwin = aef * sbc * gtob.t**4

    print("Lwin done")
    #===============================================================================
    # make a pob - required as input to swin routine. This object is data on each
    # pressure level interpolated to the fine grid ie has dimensions xy (finegrid) x plev
    #===============================================================================

    f = nc.Dataset(zp_file)
    lev = f.variables['level'][:]
    var = 't'
    #	ds = rc.t3d( pl=plevfile, dem =demfile)
    #	out_xyz_dem, lats, lons, shape= ds.demGrid()
    xdim = lev.shape[0]
    ydim = out_xyz_dem.shape[0]
    t_interp_out = np.zeros((xdim, ydim))
    z_interp_out = np.zeros((xdim, ydim))

    # for timestep in range(starti,endi):
    # 	gridT,gridZ,gridLat,gridLon=ds.gridValue(var,timestep)
    # 	t_interp, z_interp = ds.inLevelInterp(gridT,gridZ, gridLat,gridLon,out_xyz_dem)
    # 	t_interp_out = np.dstack((t_interp_out, t_interp))
    # 	z_interp_out = np.dstack((z_interp_out, z_interp))

    #======================= all this can be done in first instance need to gen t/z_interp_out additionally
    tfile = list(plevDict.keys())[list(plevDict.values()).index('t')]
    t = nc.Dataset(tfile)  # key is filename
    z = nc.Dataset(zp_file)  # could be outside loop

    for timestep in range(starti, endi):

        gridT = t.variables['t'][timestep, :, :, :]
        gridZ = z.variables['z'][timestep, :, :, :]
        gridLat = t['latitude'][:]
        gridLat = gridLat[::-1]  # reverse to deal with ERA5 order
        gridLon = t['longitude'][:]
        gridLev = t.variables['level'][::-1]
        #return gridT,gridZ,gridLat,gridLon

        shape = gridT.shape
        #create array to hold interpolation resultes
        t_interp = np.zeros([shape[0], len(out_xyz_dem)])
        z_interp = np.zeros([
            shape[0], len(out_xyz_dem)
        ])  # HOW MANY TIMES DO WE REALLY NEED TO COMPUTE THIS?

        #temperatue and elevation interpolation 2d
        for i in range(shape[0]):
            ft = RegularGridInterpolator((gridLat, gridLon),
                                         gridT[i, ::-1, :],
                                         'linear',
                                         bounds_error=False)
            fz = RegularGridInterpolator((gridLat, gridLon),
                                         gridZ[i, ::-1, :],
                                         'linear',
                                         bounds_error=False)
            t_interp[i, :] = ft(out_xyz_dem[:, :2])  #temperature
            z_interp[i, :] = fz(out_xyz_dem[:, :2])  #elevation

        t_interp_out = np.dstack((t_interp_out, t_interp))
        z_interp_out = np.dstack((z_interp_out, z_interp))

        # invert pressure levels
        #t_interp = t_interp[::-1,:]
        #z_interp = z_interp[::-1,:]

    # drop init blank layer
    tinterp = t_interp_out[:, :, 1:]
    zinterp = z_interp_out[:, :, 1:]

    gpob = hp.Bunch(t=tinterp, z=zinterp, levels=lev)

    # dem  = nc.Dataset(demfile)
    # lon = dem.variables['longitude'][:]
    # lat = dem.variables['latitude'][:]
    # demv = dem.variables['ele'][:]
    # demlon1 = dem.variables['longitude'][:]
    # demlat1 = dem.variables['latitude'][:]
    # demlon = np.tile(demlon1,demlat1.size)
    # demlat = np.repeat(demlat1,demlon1.size)
    # demv=np.reshape(demv,demv.size)

    # # why are these masked values generated?
    # demv =np.ma.filled(demv, fill_value=1)
    # tz = np.repeat(tz,demv.size)

    # stat = pd.DataFrame({	"ele":demv,
    # 				"lon":demlon,
    # 				"lat":demlat,
    # 				"tz":tz
    # 				})
    #===============================================================================
    # Compute Shortwave
    #===============================================================================

    #def swin2D(pob,sob,tob, stat, dates):
    '''
	main edit over standard function for points:
	- 3d tob,sob reduce to 2D (reshape)
	'''
    """ toposcale surface pressure using hypsometric equation - move to own 
	class """
    g = 9.81
    R = 287.05  # Gas constant for dry air.
    #ztemp = pob.z # geopotential height b = np.transpose(a, (2, 0, 1))
    ztemp = np.transpose(
        gpob.z,
        (2, 0, 1))  # pob is originally ordered levels,stations/cells,time
    #Ttemp = pob.t
    Ttemp = np.transpose(
        gpob.t,
        (2, 0, 1))  # pob is originally ordered levels,stations/cells,time
    statz = np.array(lp.ele) * g
    #dz=ztemp.transpose()-statz[None,:,None] # transpose not needed but now consistent with CGC surface pressure equations
    dz = ztemp - statz  # dimensions of dz : time, levels, stations

    # set all levels below surface to very big number so they canot be found by min
    newdz = dz
    newdz[newdz < 0] = 999999

    psf = np.zeros((gsob.dtime.size, statz.shape[0]))

    # reshape tob.t here
    #gtob.tr=gtob.t.reshape(gtob.t.shape[0]*gtob.t.shape[1], gtob.t.shape[2], order='F')
    #tob.trT=tob.tr.T # transpose to get right order

    # loop through timesteps
    for i in range(0, gsob.dtime.size):

        # find overlying layer
        thisp = dz[i, :, :] == np.min(
            newdz[i, :, :], axis=0
        )  # thisp is a booleen matrix of levels x stations with true indicating overlying plevel over station surface ele

        # flatten to 1 dimension order='Fortran' or row major
        thispVec = thisp.reshape(thisp.size, order='F')
        TtempVec = Ttemp.reshape(Ttemp.shape[0],
                                 Ttemp.shape[1] * Ttemp.shape[2],
                                 order='F')
        ztempVec = ztemp.reshape(ztemp.shape[0],
                                 ztemp.shape[1] * ztemp.shape[2],
                                 order='F')

        # booleen indexing to find temp and geopotential that correspond to lowesest overlying layer
        T1 = TtempVec[i, thispVec]
        z1 = ztempVec[i, thispVec]

        p1 = np.tile(
            gpob.levels[::-1], statz.shape[0]
        )[thispVec] * 1e2  #Convert to Pa. Reverse levels to ensure low ele (hig pressure) to high elel (low pressure)
        Tbar = np.mean(
            [T1, gtob.t[:, i]], axis=0
        )  # temperature midway between surface (toposcale T) and loweset overlying level (T1)
        """ Hypsometric equation."""  #P1 is above surface is this correct? Yes!
        psf[i, :] = p1 * np.exp(
            ((z1 / g) - (statz / g)) * (g / (Tbar * R))
        )  # exponent is positive ie increases pressure as surface is lower than pressure level
    """ Maybe follow Dubayah's approach (as in Rittger and Girotto) instead
	for the shortwave downscaling, other than terrain effects. """
    """ Height of the "grid" (coarse scale)"""
    Zc = gsob.z.T  #.reshape(gsob.z.shape[0]*gsob.z.shape[1], gsob.z.shape[2]).T # reshape and transpose to remove dimension and make broadcastable
    """ toa """
    SWtoa = gsob.tisr  #.reshape(gsob.tisr.shape[0]*gsob.tisr.shape[1], gsob.tisr.shape[2]).T
    """ Downwelling shortwave flux of the "grid" using nearest neighbor."""
    SWc = gsob.ssrd  #.reshape(sob.ssrd.shape[0]*sob.ssrd.shape[1], sob.ssrd.shape[2]).T
    """Calculate the clearness index."""
    kt = SWc / SWtoa

    #kt[is.na(kt)==T]<-0 # make sure 0/0 =0
    #kt[is.infinite(kt)==T]<-0 # make sure 0/0 =0
    kt[kt < 0] = 0
    kt[kt > 1] = 0.8  #upper limit of kt
    kt = kt
    """
	Calculate the diffuse fraction following the regression of Ruiz-Arias 2010 

	"""
    kd = 0.952 - 1.041 * np.exp(-1 * np.exp(2.3 - 4.702 * kt))
    """ Use this to calculate the downwelling diffuse and direct shortwave radiation at grid. """
    SWcdiff = kd * SWc
    SWcdir = (1 - kd) * SWc
    SWcdiff = SWcdiff
    SWcdir = SWcdir
    """ Use the above with the sky-view fraction to calculate the 
	downwelling diffuse shortwave radiation at subgrid. """
    SWfdiff = SWcdiff
    SWfdiff = np.nan_to_num(SWfdiff)  # convert nans (night) to 0
    """ Direct shortwave routine, modified from Joel. 
	Get surface pressure at "grid" (coarse scale). Can remove this
	part once surface pressure field is downloaded, or just check
	for existance. """

    ztemp = np.transpose(gpob.z, (0, 2, 1))
    Ttemp = np.transpose(gpob.t, (0, 2, 1))
    dz = ztemp - Zc  # dimensions of dz : levels, time, stations

    # set all levels below surface to very big number so they canot be found by min
    newdz = dz
    newdz[newdz < 0] = 999999

    psc = np.zeros((gsob.dtime.size, statz.shape[0]))
    for i in range(0, gsob.dtime.size):

        #thisp.append(np.argmin(dz[:,i][dz[:,i]>0]))
        # find overlying layer
        thisp = dz[:, i, :] == np.min(
            newdz[:, i, :], axis=0
        )  # thisp is a booleen matrix of levels x stations with true indicating overlying plevel over station surface ele !! time index in middle this time!!!
        z0 = Zc[i, :]
        T0 = gsob.t2m.T[i, :]

        # flatten to 1 dimension order='Fortran' or row major
        thispVec = thisp.reshape(thisp.size, order='F')
        TtempVec = Ttemp.reshape(
            Ttemp.shape[1], Ttemp.shape[0] * Ttemp.shape[2], order='F'
        )  # !! order of permutations is different from pressure at finegrid routine (time is middle dimension)
        ztempVec = ztemp.reshape(
            ztemp.shape[1], ztemp.shape[0] * ztemp.shape[2], order='F'
        )  # !! order of permutations is different from pressure at finegrid routine (time is middle dimension)

        # booleen indexing to find temp and geopotential that correspond to lowesest overlying layer
        T1 = TtempVec[i, thispVec]
        z1 = ztempVec[i, thispVec]

        p1 = np.tile(gpob.levels[::-1],
                     statz.shape[0])[thispVec] * 1e2  #Convert to Pa.
        Tbar = np.mean([T0, T1], axis=0)
        """ Hypsometric equation."""
        psc[i, :] = p1 * np.exp(((z1 / g) - (z0 / g)) * (g / (Tbar * R)))
    """compute julian dates"""
    jd = sg.to_jd(gsob.dtime)
    """
	Calculates a unit vector in the direction of the sun from the observer 
	position.
	"""
    svx, svy, svz = sg.sunvectorMD(jd, lp.lat, lp.lon, tz, lp.ele.size,
                                   gsob.dtime.size)
    sp = sg.sunposMD(svx, svy, svz)

    # Cosine of the zenith angle.
    #sp.zen=sp.zen*(sp.zen>0) # Sun might be below the horizon.
    muz = np.cos(sp.zen)
    muz = muz
    # NB! psc must be in Pa (NOT hPA!).

    # Calculate the "broadband" absorption coefficient. Elevation correction
    ka = (g * muz / (psc)) * np.log(SWtoa.T / SWcdir.T)
    ka = np.nan_to_num(ka)

    # Now you can (finally) find the direct component at subgrid.
    SWfdir = SWtoa.T * np.exp(-ka * psf / (g * muz))
    SWfdirCor = SWfdir  #*dprod

    gtob.swin = SWfdiff + SWfdirCor.T
    gtob.psf = psf
    print("Swin done")

    #gtob.swin = swin2D(gpob,gsob,gtob, stat, dtime)
    #gtob.swin =gtob.swin.reshape(gtob.swin.shape[0], gsob.ssrd.shape[0], gsob.ssrd.shape[1])

    ntime = len(gsob.dtime)
    stephr = a.seconds / 60 / 60
    rtime = np.array(list(range(len(gsob.dtime)))) * stephr

    #===============================================================================
    # write results
    #===============================================================================
    ## conversuions
    T = np.single(gtob.t - 273.15)
    gtob.r[gtob.r > 100] = 100
    RH = np.single(gtob.r)
    ws = np.single(np.sqrt(gtob.u**2 + gtob.v**2))
    prate = np.single(gtob.prate)

    #===========================================================================
    # Calculate absolute humidity kg/kg
    #===========================================================================
    # Tk=273.15+20
    # RH=80.
    # ah = 13.82g/m3
    pws = calc_Pws(gtob.t)
    pw = calc_Pw(pws, RH)
    ah_gm3 = calc_AH(pw, gtob.t)  # ah in g/m3
    #AH_kgkg = ah_gm3_To_ah_kgkg(ah_gm3,gtob.psf,gtob.t )
    SH = rh2sh(pw, gtob.psf)

    # Dictionary to loop over
    varDict = {
        "t": T,
        "ws": ws,
        "shum": SH.transpose(),
        "swin": gtob.swin,
        "lwin": gtob.lwin,
        "prate": prate,
        "P": gtob.psf.transpose()
    }

    for var in varDict:

        #open
        f = nc.Dataset(outDir + "/" + var + "_" + str(startIndex + 1) + "_" +
                       str(year) + ".nc",
                       'w',
                       format='NETCDF4')

        # Implement cf H.2.1 http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/build/cf-conventions.html#idp9763584

        # #make dimensions
        # f.createDimension('time', ntime)
        # f.createDimension('lon', len(lp.lon))
        # f.createDimension('lat', len(lp.lat))

        # #make dimension variables
        # mytime = f.createVariable('time', 'i', ('time',))
        # longitude = f.createVariable('lon',    'f4',('lon',))
        # latitude  = f.createVariable('lat',    'f4',('lat',))

        # myvar = f.createVariable(var,    'f4',('time','lon'))

        # #assign dimensions
        # mytime[:] = rtime
        # longitude = lp.lon
        # latitude  = lp.lat

        # myvar[:] = varDict[var].T

        # #metadata
        # from time import ctime
        # mytime=ctime()
        # f.history = 'Created by toposcale on '+mytime
        # mytime.units = 'hours since '+str(gsob.dtime[0])
        # f.close()

        #make dimensions
        f.createDimension('time', ntime)
        f.createDimension('station', len(lp.ele))

        #make dimension variables
        mytime = f.createVariable('time', 'i', ('time', ))
        station = f.createVariable('station', 'i', ('station', ))

        #make variables
        myvar = f.createVariable(var, 'f4', ('time', 'station'))
        longitude = f.createVariable('longitude', 'f4', ('station'))
        latitude = f.createVariable('latitude', 'f4', ('station'))

        #assign dimensions
        mytime[:] = rtime
        longitude[:] = np.array(lp.lon)
        latitude[:] = np.array(lp.lat)
        station[:] = range(len(lp.ele))
        myvar[:] = varDict[var].T

        #metadata
        from time import ctime
        mycomptime = ctime()
        f.history = 'Created by tscale-cci on ' + mycomptime
        mytime.units = 'hours since ' + str(gsob.dtime[0])
        f.close()

    print("Toposcale complete!")
Esempio n. 4
0
def tscale3dmain(mymonth):
    '''
	main function that runs tscale3D for a single month of era5 input. This
	avoids massive i/o overhead of eg. 72gb PLEV.nc files.
	
	Args: mymonth (str)
	
	Example tscale3dmain("_197909.nc")
	'''
    surfile = wdir + '/forcing/SURF_' + mymonth
    plevfile = wdir + '/forcing/PLEV_' + mymonth

    #===========================================================================
    # Init t3d object
    #===========================================================================
    f = nc.Dataset(plevfile)

    lev = f.variables['level'][:]
    var = 't'

    t3d = rc.t3d(pl=plevfile, sa=surfile)
    t3d.addTime()
    a = t3d.dtime[2] - t3d.dtime[1]
    t3d.step = a.seconds
    t3d.lp = lp
    #timesteps = t3d.pl.variables['time'][:].size
    t3d.out_xyz_dem, t3d.lats, t3d.lons, t3d.shape, t3d.names = t3d.demGrid(
        stations=lp)

    #===========================================================================
    # pob
    #===========================================================================
    xdim = lev.shape[0]
    ydim = t3d.shape[0]

    t_interp_out = np.zeros((xdim, ydim))
    z_interp_out = np.zeros((xdim, ydim))

    for timestep in range(len(t3d.dtime)):
        #print(str(round(float(timestep)/float(timesteps)*100,0))+ "% done")

        gridT, gridZ, gridLat, gridLon = t3d.gridValue(var, timestep)

        t_interp, z_interp = t3d.inLevelInterp(gridT, gridZ, gridLat, gridLon,
                                               t3d.out_xyz_dem)

        t_interp_out = np.dstack((t_interp_out, t_interp))
        z_interp_out = np.dstack((z_interp_out, z_interp))

    # drop init blank layer
    tinterp = t_interp_out[:, :, 1:]
    zinterp = z_interp_out[:, :, 1:]
    pob = hp.Bunch(t=tinterp, z=zinterp, levels=lev)

    print("made a pob!")

    #===========================================================================
    # tob
    #===========================================================================

    starti = 0
    endi = len(t3d.dtime)
    t = t3d.tscale3d('t', starti, endi)
    r = t3d.tscale3d('r', starti, endi)
    u = t3d.tscale3d('u', starti, endi)
    v = t3d.tscale3d('v', starti, endi)

    # compute wind speed and direction
    ws = np.sqrt(u**2 + v**2)
    wd = (180 / np.pi) * np.arctan(u / v) + np.where(v > 0, 180,
                                                     np.where(u > 0, 360, 0))

    tob = hp.Bunch(t=t, r=r, u=u, v=v, ws=ws, wd=wd, dtime=t3d.dtime)

    # Physical filters

    # constrain RH to 0-100 interval
    # constrain r to interval 5-100 - do here as required by LWin parameterisation
    tob.r[tob.r < 5] = 5
    tob.r[tob.r > 100] = 100
    tob.ws[tob.ws < 0] = 0

    print("made a tob!")

    #===========================================================================
    # sob
    #===========================================================================

    t2m = t3d.tscale2d('t2m', starti, endi)
    tp = t3d.tscale2d('tp', starti, endi)
    ssrd = t3d.tscale2d('ssrd', starti, endi)
    strd = t3d.tscale2d('strd', starti, endi)
    tisr = t3d.tscale2d('tisr', starti, endi)
    d2m = t3d.tscale2d('d2m', starti, endi)
    z = t3d.tscale2d('z', starti, endi)

    gridEle = z[0, :] / g

    sob = hp.Bunch(t2m=t2m,
                   tp=tp,
                   ssrd=ssrd,
                   strd=strd,
                   tisr=tisr,
                   d2m=d2m,
                   z=z,
                   gridEle=gridEle,
                   dtime=t3d.dtime)
    print("made a sob!")

    #===============================================================================
    # Precip
    #===============================================================================

    pmmhr = tp2rate(tp, t3d.step)
    sob.pmmhr = pmmhr
    tob.prate, tob.psum = precipPoint(lp.ele, sob)
    print("made prate!")

    instRad(
        sob, 3600
    )  # 1h is used as even tho we use 3h or 6h data the value is accumulated over 1h - we do not lose budget in same way as P, just resolution.
    tob.lwin = lwin(sob, tob)
    print("made lwin")

    # vector method
    tob.swin, tob.psf = swin2D(pob, sob, tob, lp, dtime=t3d.dtime)

    # loop method
    # init first row
    # ntimestamps=ds.dtime.shape[0]
    # ts_swin = np.zeros((ntimestamps))
    # for stationi in range(0, lp.shape[0]):
    # 	print stationi
    # 	'''here we test for array full of NaNs due to points being
    # 	outside of grid. The NaN arrays are created in the
    # 	interpolation but only break the code here. If array is
    # 		all NaN then we just fill that stations slot
    # 	with NaN'''
    # 	testNans = np.count_nonzero(~np.isnan(pob.z[:,stationi,:]))
    # 	if testNans != 0:
    # 		ts= swin1D(pob=pob,sob=sob,tob=tob, stat=lp, dates=ds.dtime, index=stationi)
    # 		ts_swin=np.column_stack((ts_swin,ts))
    # 	if testNans == 0:
    # 		nan_vec = np.empty(ntimestamps) * np.nan
    # 		ts_swin=np.column_stack((ts_swin,nan_vec))

    # # # drop init row
    # tob.swin =ts_swin[:,1:]
    print("Made Swin")

    #===========================================================================
    # make dataframe (write individual files plus netcdf)
    #===========================================================================
    start = 1
    print("Writing toposcale files...")
    for i in range(0, tob.t.shape[1]):

        # partition
        Sf, Rf = snowPartition(tob.t[:, i], tob.prate[i, :])

        df = pd.DataFrame(
            {
                "TA": tob.t[:, i],
                "RH": tob.r[:, i],
                "P": tob.psf[:, i],
                "VW": tob.ws[:, i],
                "DW": tob.wd[:, i],
                "ILWR": tob.lwin[:, i],
                "ISWR": tob.swin[:, i],
                "PINT": tob.prate[i, :],
                "PSUM": tob.psum[i, :],
                "Snowf": np.array(Sf),
                "Rainf": np.array(Rf)
            },
            index=tob.dtime)
        df.index.name = "datetime"

        # fill outstanding nan in SW routine with 0 (night)
        df.ISWR = df.ISWR.fillna(0)

        fileout = wdir + "/out/meteo" + "c" + str(
            i + 1
        ) + "_" + mymonth + ".csv"  # convert python index back to g* sim dir index
        column_order = [
            'TA', 'RH', 'P', 'VW', 'DW', 'ILWR', 'ISWR', 'PINT', 'PSUM',
            'Snowf', 'Rainf'
        ]
        df[column_order].to_csv(path_or_buf=fileout,
                                na_rep=-999,
                                float_format='%.3f')
        print("written " + fileout)
Esempio n. 5
0
# read in lispoints
lp = pd.read_csv(lpfile)

#===============================================================================
#	Logging
#===============================================================================
#logging.basicConfig(level=logging.DEBUG, filename=wd+"/sim/"+ simdir+"/logfile", filemode="a+",
#format="%(asctime)-15s %(levelname)-8s %(message)s")

for i in range(lp.id.size):

    # station attribute structure
    stat = hp.Bunch(ele=lp.ele[i],
                    slp=lp.slp[i],
                    asp=lp.asp[i],
                    svf=lp.svf[i],
                    lon=lp.lon[i],
                    lat=lp.lat[i],
                    sro=lp.surfRough[i],
                    tz=lp.tz[i])

    #=== Pressure level object =============================================
    """ preprocess pressure level fields and add to structure """
    p = e5.Plev(fp, stat.lat, stat.lon)
    p.getVarNames()
    """ Datetime structure """
    p.addTime()
    startIndex = int(np.where(p.dtime == startTime)[0])  #"2016-08-01 18:00:00"
    endIndex = int(np.where(p.dtime == endTime)[0])  #"2016-08-01 18:00:00"
    p.dtime = p.dtime[startIndex:endIndex]

    for v in p.varnames:
                    filename=home + "/tscale_logfile",
                    filemode="a+",
                    format="%(asctime)-15s %(levelname)-8s %(message)s")

for i in tqdm(range(lp.id.size)):

    fileout = outDir + "/meteo" + "c" + str(i + 1) + ".csv"
    if os.path.exists(fileout):
        logging.info(fileout + " exists!")
        continue

    # station attribute structure , tz always =0 for case of ERA5
    stat = hp.Bunch(ele=lp.ele[i],
                    slp=lp.slp[i],
                    asp=lp.asp[i],
                    svf=lp.svf[i],
                    lon=lp.lon[i],
                    lat=lp.lat[i],
                    sro=lp.surfRough[i],
                    tz=lp.tz[i])

    #===============================================================================
    #	PLEVEL - mak a POB
    #===============================================================================
    # Pressure level object
    plev = nc.Dataset(inDir + "/PLEV.nc")

    # Datetime structure
    nctime = plev.variables['time']
    dtime = pd.to_datetime(
        nc.num2date(nctime[:], nctime.units, calendar="standard"))
    startIndex = int(np.where(dtime == startTime)[0])  #"2016-08-01 18:00:00"
Esempio n. 7
0
def main(wdir, mode, start, end, member=None):

	
	#these pathnames are standard:
	stationsfile= wdir+'/listpoints.csv'
	demfile = wdir+'/forcing/ele.nc'
	surfile=wdir+'/forcing/SURF.nc'
	plevfile=wdir+ '/forcing/PLEV.nc'



	if mode=="grid":
		dem  = nc.Dataset(demfile)
		dem_ele = dem.variables['Band1'][:]

	f = nc.Dataset( surfile)
	nctime = f.variables['time']
	dtime = pd.to_datetime(nc.num2date(nctime[:],nctime.units, calendar="standard"))
	starti = np.asscalar(np.where(dtime==start+' 00:00:00')[0])
	endi = np.asscalar(np.where(dtime==end+' 00:00:00')[0])
	dtime = dtime[starti:endi,]
	timesteps=len(dtime)
	print(timesteps)
	# constants
	g=9.81


	#===============================================================================
	#	Timer
	#===============================================================================
	import time
	start_time = time.time()

	#===============================================================================
	#	Logging
	#===============================================================================

	logging.basicConfig(level=logging.DEBUG, filename=wdir+"/logfile", filemode="a+",
		                format="%(asctime)-15s %(levelname)-8s %(message)s")

	logging.info("Running "+ str(timesteps)+ " timesteps")
	#===============================================================================
	# Point stuff
	#
	#
	#
	#
	#===============================================================================
	if mode=="points" or mode=="point":
		if os.path.isfile(stationsfile) == False:
			print("No points.csv found!")

		logging.info("Start points run...")
		
		#open points
		mystations=pd.read_csv(stationsfile)
	#===============================================================================
	# make a pob hack
	#===============================================================================

		import recapp_era5 as rc
		f = nc.Dataset(plevfile)
		lev = f.variables['level'][:]
		 #dataImport.plf_get()    # pressure level air temperature
		var='t'

		# station case
		ds = rc.tscale3dPl( pl=plevfile)
		#timesteps = ds.pl.variables['time'][:].size
		out_xyz_dem, lats, lons, shape, names= ds.demGrid(stations=mystations)

		# init grid stack
		# init grid stack
		xdim=lev.shape[0]

		ydim=shape[0]
		t_interp_out = np.zeros((xdim,ydim))
		z_interp_out = np.zeros((xdim,ydim))
			

		for timestep in range(starti,endi):
			#print(str(round(float(timestep)/float(timesteps)*100,0))+ "% done")
			gridT,gridZ,gridLat,gridLon=ds.gridValue(var,timestep)
			t_interp, z_interp = ds.inLevelInterp(gridT,gridZ, gridLat,gridLon,out_xyz_dem)
			t_interp_out = np.dstack((t_interp_out, t_interp))
			z_interp_out = np.dstack((z_interp_out, z_interp))
		# drop init blank layer
		tinterp =t_interp_out[:,:,1:]
		zinterp =z_interp_out[:,:,1:]

		pob= hp.Bunch(t=tinterp,z=zinterp, levels=lev)

		logging.info("made a pob!")
		#===============================================================================
		# tscale3d (results and input to radiation routines)
		#===============================================================================
		t = t3d.main(wdir, 'point', 't', starti,endi)
		r = t3d.main(wdir, 'point', 'r', starti,endi)
		u = t3d.main(wdir, 'point', 'u', starti,endi)
		v = t3d.main(wdir, 'point', 'v', starti,endi)
		
		# compute wind speed and direction
		ws = np.sqrt(u**2+v**2)
		wd =   (180 / np.pi) * np.arctan(u/v) + np.where(v>0,180,np.where(u>0,360,0))

		tob = hp.Bunch(t=t,r=r,u=u, v=v, ws=ws,wd=wd, dtime=dtime)
		logging.info("made a tob!")
		#===============================================================================
		# tscale2d
		#===============================================================================
		t2m = t2d.main(wdir, 'point', 't2m', starti,endi)
		tp = t2d.main(wdir, 'point', 'tp', starti,endi)
		ssrd = t2d.main(wdir, 'point', 'ssrd', starti,endi)
		strd = t2d.main(wdir, 'point', 'strd', starti,endi)
		tisr = t2d.main(wdir, 'point', 'tisr', starti,endi)
		d2m = t2d.main(wdir, 'point', 'd2m', starti,endi)
		z = t2d.main(wdir, 'point', 'z', starti,endi) # always true as this is time invariant
		gridEle=z[0,:]/g

		sob = hp.Bunch(t2m=t2m, tp=tp, ssrd=ssrd, strd=strd, tisr=tisr, d2m=d2m, z=z, gridEle=gridEle, dtime=dtime)
		logging.info("made a sob!")
		# functions

		def tp2rate(tp, step):
			""" convert tp from m/timestep (total accumulation over timestep) to rate in mm/h 

					Args:
						step: timstep in seconds (era5=3600, ensemble=10800)

					Note: both EDA (ensemble 3h) and HRES (1h) are accumulated over the timestep
					and therefore treated here the same.
					https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation
			"""
			tp = tp/step*60*60 # convert metres per timestep -> m/hour 
			pmmhr = tp	*1000 # m/hour-> mm/hour
			return pmmhr

		def precipPoint(fineEle, sob):
			'''
			Args:
			fineEle:ele vector from station dataframe
			sob: contains gridEle, tp and dtime
			'''
			# convert TP to mm/hr
			
			lookups = {
				   1:0.35,
				   2:0.35,
				   3:0.35,
				   4:0.3,
				   5:0.25,
				   6:0.2,
				   7:0.2,
				   8:0.2,
				   9:0.2,
				   10:0.25,
				   11:0.3,
				   12:0.35
			}

			# Precipitation lapse rate, varies by month (Liston and Elder, 2006).
			pfis = sob.dtime.month.map(lookups)
			pfis = np.repeat(pfis.values[:,None], fineEle.size, axis=1)

			dz=(fineEle-sob.gridEle)/1e3  # Elevation difference in kilometers between the fine and coarse surface.
			
				   
			lp=(1+pfis.T*dz[:,None])/(1-pfis.T*dz[:,None])# Precipitation correction factor.
			Pf=sob.pmmhr.T*lp
			
			return Pf






		#===============================================================================
		# Precip
		#===============================================================================
		pmmhr = tp2rate(tp,3600)
		sob.pmmhr = pmmhr
		tob.prate = precipPoint(mystations.ele, sob)
		logging.info("made prate!")
		#===============================================================================
		# Longwave
		#===============================================================================
		def instRad(sob, step):
			""" Convert SWin from accumulated quantities in J/m2 to 
			instantaneous W/m2 see: 
			https://confluence.ecmwf.int/pages/viewpage.action?pageId=104241513

			Args:
				step: timstep in seconds (era5=3600, ensemble=10800)

			Note: both EDA (ensemble 3h) and HRES (1h) are accumulated over the timestep
			and therefore treated here the same.
			https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation
					"""
			sob.strd = sob.strd/step  
			sob.ssrd = sob.ssrd/step
			sob.tisr = sob.tisr/step 
			
		def lwin(sob,tob):
			"""Convert to RH (should be in a function). Following MG Lawrence 
			DOI 10.1175/BAMS-86-2-225 """
			A1=7.625 
			B1=243.04 
			C1=610.94
			tc=sob.t2m-273.15
			tdc=sob.d2m-273.15
			tf=tob.t-273.15 # fout.T
			c=(A1*tc)/(B1+tc)
			RHc=100*np.exp((tdc*A1-tdc*c-B1*c)/(B1+tdc)) # Inverting eq. 8 in Lawrence.

			""" Calculate saturation vapor pressure at grid and "subgrid" [also
			through function] using the Magnus formula."""
			
			svpf=C1*np.exp(A1*tf/(B1+tf))
			svpc=C1*np.exp(A1*tc/(B1+tc))

			"""Calculate the vapor pressure at grid (c) and subgrid (f)."""
			vpf=tob.r*svpf/1e2 # RHf
			vpc=RHc*svpc/1e2

			"""Use the vapor pressure and temperature to calculate clear sky
			 # emssivity at grid and subgrid. [also function]
			Konzelmann et al. 1994
			Ta in kelvin

			 """
			x1=0.43 
			x2=5.7
			cef=0.23+x1*(vpf/tob.t)**(1/x2) #Pretty sure the use of Kelvin is correct.
			cec=0.23+x1*(vpc/sob.t2m)**(1/x2)

			"""Diagnose the all sky emissivity at grid."""
			sbc=5.67e-8
			aec=sob.strd/(sbc*sob.t2m**4)
			# need to constrain to 1 as original code?

			""" Calculate the "cloud" emissivity at grid, assume this is the same at
		 	subgrid."""
			deltae=aec-cec

			""" Use the former cloud emissivity to compute the all sky emissivity at 
			subgrid. """
			aef=cef+deltae
			LWf=aef*sbc*tob.t**4
			return(LWf)

		instRad(sob,3600)
		tob.lwin = lwin(sob,tob)
		logging.info("made lwin")
		#===============================================================================
		# Shortwave
		#===============================================================================
		#def swin2D(pob,sob,tob, stat, dates): # does not work yet (booleen indexing of 2d arraya fauiles as np.min returns single value when we need 161
		#	
		#	""" toposcale surface pressure using hypsometric equation - move to own 
		#	class """
		#	g=9.81
		#	R=287.05  # Gas constant for dry air.
		#	ztemp = pob.z
		#	Ttemp = pob.t
		#	statz = stat.ele*g
		#	dz=ztemp.transpose()-statz[None,:,None] # transpose not needed but now consistent with CGC surface pressure equations

		#	psf=[]
		#	# loop through timesteps
		#	for i in range(0,dz.shape[0]):
		#		
		#		# 	# find overlying layer
		#		thisp = dz[i,0,:]==np.min(dz[i,0,:][dz[i,0,:]>0])
		#		thispVec = thisp.T.reshape(thisp.size)
		#		TtempVec = Ttemp.reshape(16*161, 20)

		#		# booleen indexing
		#		T1=TtempVec[thispVec,i]


		#		T1=Ttemp[thisp.T,i]
		#		z1=ztemp[thisp.T,i]
		#		p1=pob.levels[thisp]*1e2 #Convert to Pa.
		#		Tbar=np.mean([T1, tob.t[i]],axis=0)
		#		""" Hypsometric equation."""
		#		psf.append(p1*np.exp((z1-statz)*(g/(Tbar*R))))

		#	psf=np.array(psf).squeeze()

		#	## Specific humidity routine.
		#	# mrvf=0.622.*vpf./(psf-vpf); #Mixing ratio for water vapor at subgrid.
		#	#  qf=mrvf./(1+mrvf); # Specific humidity at subgrid [kg/kg].
		#	# fout.q(:,:,n)=qf; 


		#	""" Maybe follow Dubayah's approach (as in Rittger and Girotto) instead
		#	for the shortwave downscaling, other than terrain effects. """

		#	""" Height of the "grid" (coarse scale)"""
		#	Zc=sob.z

		#	""" toa """
		#	SWtoa = sob.tisr 

		#	""" Downwelling shortwave flux of the "grid" using nearest neighbor."""
		#	SWc=sob.ssrd

		#	"""Calculate the clearness index."""
		#	kt=SWc/SWtoa

		#	#kt[is.na(kt)==T]<-0 # make sure 0/0 =0
		#	#kt[is.infinite(kt)==T]<-0 # make sure 0/0 =0
		#	kt[kt<0]=0
		#	kt[kt>1]=0.8 #upper limit of kt
		#	kt=kt


		#	"""
		#	Calculate the diffuse fraction following the regression of Ruiz-Arias 2010 
		#	
		#	"""
		#	kd=0.952-1.041*np.exp(-1*np.exp(2.3-4.702*kt))
		#	kd = kd

		#	""" Use this to calculate the downwelling diffuse and direct shortwave radiation at grid. """
		#	SWcdiff=kd*SWc
		#	SWcdir=(1-kd)*SWc
		#	SWcdiff=SWcdiff
		#	SWcdir=SWcdir

		#	""" Use the above with the sky-view fraction to calculate the 
		#	downwelling diffuse shortwave radiation at subgrid. """
		#	SWfdiff=stat.svf*SWcdiff
		#	SWfdiff.set_fill_value(0)
		#	SWfdiff = SWfdiff.filled()

		#	""" Direct shortwave routine, modified from Joel. 
		#	Get surface pressure at "grid" (coarse scale). Can remove this
		#	part once surface pressure field is downloaded, or just check
		#	for existance. """

		#	ztemp = pob.z
		#	Ttemp = pob.t
		#	dz=ztemp.transpose()-sob.z

		#	psc=[]
		#	for i in range(0,dz.shape[1]):
		#	
		#		#thisp.append(np.argmin(dz[:,i][dz[:,i]>0]))
		#		thisp = dz[:,i]==np.min(dz[:,i][dz[:,i]>0])
		#		z0 = sob.z[i]
		#		T0 = sob.t2m[i]
		#		T1=Ttemp[i,thisp]
		#		z1=ztemp[i,thisp]
		#		p1=pob.levels[thisp]*1e2 #Convert to Pa.
		#		Tbar=np.mean([T0, T1],axis=0)
		#		""" Hypsometric equation."""
		#		psc.append(p1*np.exp((z1-z0)*(g/(Tbar*R))))

		#	psc=np.array(psc).squeeze()

		#	#T1=Ttemp(thisp)
		#	#z1=ztemp(thisp)
		#	#p1=pob.levels(thisp)*1e2 #Convert to Pa.
		#	#Tbar=mean([T0 T1])
		#	
		#	"""compute julian dates"""
		#	jd= sg.to_jd(dates)

		#	"""
		#	Calculates a unit vector in the direction of the sun from the observer 
		#	position.
		#	"""
		#	sunv=sg.sunvector(jd=jd, latitude=stat.lat, longitude=stat.lon, timezone=stat.tz)

		#	"""
		#	Computes azimuth , zenith  and sun elevation 
		#	for each timestamp
		#	"""
		#	sp=sg.sunpos(sunv)
		#	sp=sp

		#	# Cosine of the zenith angle.
		#	sp.zen=sp.zen
		#	#sp.zen=sp.zen*(sp.zen>0) # Sun might be below the horizon.
		#	muz=np.cos(sp.zen) 
		#	muz = muz
		#	# NB! psc must be in Pa (NOT hPA!).
		#	#if np.max(psc<1.5e3): # Obviously not in Pa
		#		#psc=psc*1e2
		#   
		#	
		#	# Calculate the "broadband" absorption coefficient. Elevation correction
		#	# from Kris
		#	ka=(g*muz/(psc))*np.log(SWtoa/SWcdir)	
		#	ka.set_fill_value(0)
		#	ka = ka.filled()
		#	# Note this equation is obtained by inverting Beer's law, i.e. use
		#	#I_0=I_inf x exp[(-ka/mu) int_z0**inf rho dz]
		#	# Along with hydrostatic equation to convert to pressure coordinates then
		#	# solve for ka using p(z=inf)=0.
		#	
		#	
		#	# Now you can (finally) find the direct component at subgrid. 
		#	SWfdir=SWtoa*np.exp(-ka*psf/(g*muz))

		#	""" Then perform the terrain correction. [Corripio 2003 / rpackage insol port]."""

		#	"""compute mean horizon elevation - why negative hor.el possible??? """
		#	horel=(((np.arccos(np.sqrt(stat.svf))*180)/np.pi)*2)-stat.slp
		#	if horel < 0:
		#		horel = 0 
		#	meanhorel = horel

		#	"""
		#	normal vector - Calculates a unit vector normal to a surface defined by 
		#	slope inclination and slope orientation.
		#	"""
		#	nv = sg.normalvector(slope=stat.slp, aspect=stat.asp)

		#	"""
		#	Method 1: Computes the intensity according to the position of the sun (sunv) and 
		#	dotproduct normal vector to slope.
		#	From corripio r package
		#	"""
		#	dotprod=np.dot(sunv ,np.transpose(nv)) 
		#	dprod = dotprod.squeeze()
		#	dprod[dprod<0] = 0 #negative indicates selfshading
		#	dprod = dprod

		#	"""Method 2: Illumination angles. Dozier"""
		#	saz=sp.azi
		#	cosis=muz*np.cos(stat.slp)+np.sin(sp.zen)*np.sin(stat.slp)*np.cos(sp.azi-stat.asp)# cosine of illumination angle at subgrid.
		#	cosic=muz # cosine of illumination angle at grid (slope=0).

		#	"""
		#	SUN ELEVATION below hor.el set to 0 - binary mask
		#	"""
		#	selMask = sp.sel
		#	selMask[selMask<horel]=0
		#	selMask[selMask>0]=1
		#	selMask = selMask

		#	"""
		#	derive incident radiation on slope accounting for self shading and cast 
		#	shadow and solar geometry
		#	BOTH formulations seem to be broken
		#	"""
		#	#SWfdirCor=selMask*(cosis/cosic)*SWfdir
		#	SWfdirCor=selMask*dprod*SWfdir
		#   
		#	SWfglob =  SWfdiff+ SWfdirCor
		#	return SWfglob
		#	""" 
		#	Missing components
		#	- terrain reflection
		#	"""
		def swin1D(pob,sob,tob, stat, dates, index):
				# many arrays transposed
				""" toposcale surface pressure using hypsometric equation - move to own 
				class """
				g= 9.81
				R=287.05  # Gas constant for dry air.
				tz=0 # ERA5 is always utc0, used to compute sunvector
				ztemp = pob.z[:,index,:].T
				Ttemp = pob.t[:,index,:].T
				statz = stat.ele[index]*g
				dz=ztemp.T-statz # transpose not needed but now consistent with CGC surface pressure equations

				psf=[]
				# loop through timesteps
				#for i in range(starti,endi):
				for i in range(0,dz.shape[1]):
					
					# 	# find overlying layer
					thisp = dz[:,i]==np.min(dz[:,i][dz[:,i]>0])

					# booleen indexing
					T1=Ttemp[i,thisp]
					z1=ztemp[i,thisp]
					p1=pob.levels[thisp]*1e2 #Convert to Pa.
					Tbar=np.mean([T1, tob.t[i, index]],axis=0)
					""" Hypsometric equation."""
					psf.append(p1*np.exp((z1-statz)*(g/(Tbar*R))))

				psf=np.array(psf).squeeze()

				## Specific humidity routine.
				# mrvf=0.622.*vpf./(psf-vpf); #Mixing ratio for water vapor at subgrid.
				#  qf=mrvf./(1+mrvf); # Specific humidity at subgrid [kg/kg].
				# fout.q(:,:,n)=qf; 


				""" Maybe follow Dubayah's approach (as in Rittger and Girotto) instead
				for the shortwave downscaling, other than terrain effects. """

				""" Height of the "grid" (coarse scale)"""
				Zc=sob.z[:,index] # should this be a single value or vector?

				""" toa """
				SWtoa = sob.tisr[:,index]

				""" Downwelling shortwave flux of the "grid" using nearest neighbor."""
				SWc=sob.ssrd[:,index]

				"""Calculate the clearness index."""
				kt=SWc/SWtoa

				#kt[is.na(kt)==T]<-0 # make sure 0/0 =0
				#kt[is.infinite(kt)==T]<-0 # make sure 0/0 =0
				kt[kt<0]=0
				kt[kt>1]=0.8 #upper limit of kt
				kt=kt


				"""
				Calculate the diffuse fraction following the regression of Ruiz-Arias 2010 
				
				"""
				kd=0.952-1.041*np.exp(-1*np.exp(2.3-4.702*kt))
				kd = kd

				""" Use this to calculate the downwelling diffuse and direct shortwave radiation at grid. """
				SWcdiff=kd*SWc
				SWcdir=(1-kd)*SWc
				SWcdiff=SWcdiff
				SWcdir=SWcdir

				""" Use the above with the sky-view fraction to calculate the 
				downwelling diffuse shortwave radiation at subgrid. """
				SWfdiff=stat.svf[index]*SWcdiff
				SWfdiff = np.nan_to_num(SWfdiff) # convert nans (night) to 0


				""" Direct shortwave routine, modified from Joel. 
				Get surface pressure at "grid" (coarse scale). Can remove this
				part once surface pressure field is downloaded, or just check
				for existance. """

				ztemp = pob.z[:,index,:].T
				Ttemp = pob.t[:,index,:].T
				dz=ztemp.transpose()-sob.z[:,index]

				psc=[]
				for i in range(0,dz.shape[1]):
				
					#thisp.append(np.argmin(dz[:,i][dz[:,i]>0]))
					thisp = dz[:,i]==np.min(dz[:,i][dz[:,i]>0])
					z0 = sob.z[i,index]
					T0 = sob.t2m[i,index]
					T1=Ttemp[i,thisp]
					z1=ztemp[i,thisp]
					p1=pob.levels[thisp]*1e2 #Convert to Pa.
					Tbar=np.mean([T0, T1],axis=0)
					""" Hypsometric equation."""
					psc.append(p1*np.exp((z1-z0)*(g/(Tbar*R))))

				psc=np.array(psc).squeeze()

				#T1=Ttemp(thisp)
				#z1=ztemp(thisp)
				#p1=pob.levels(thisp)*1e2 #Convert to Pa.
				#Tbar=mean([T0 T1])
				
				"""compute julian dates"""
				jd= sg.to_jd(dates)

				"""
				Calculates a unit vector in the direction of the sun from the observer 
				position.
				"""
				sunv=sg.sunvector(jd=jd, latitude=stat.lat[index], longitude=stat.lon[index], timezone=tz)

				"""
				Computes azimuth , zenith  and sun elevation 
				for each timestamp
				"""
				sp=sg.sunpos(sunv)
				sp=sp

				# Cosine of the zenith angle.
				sp.zen=sp.zen
				#sp.zen=sp.zen*(sp.zen>0) # Sun might be below the horizon.
				muz=np.cos(sp.zen) 
				muz = muz
				# NB! psc must be in Pa (NOT hPA!).
				#if np.max(psc<1.5e3): # Obviously not in Pa
					#psc=psc*1e2
			   
				
				# Calculate the "broadband" absorption coefficient. Elevation correction
				# from Kris
				ka=(g*muz/(psc))*np.log(SWtoa/SWcdir)	
				ka = np.nan_to_num(ka)

				# Note this equation is obtained by inverting Beer's law, i.e. use
				#I_0=I_inf x exp[(-ka/mu) int_z0**inf rho dz]
				# Along with hydrostatic equation to convert to pressure coordinates then
				# solve for ka using p(z=inf)=0.
				
				
				# Now you can (finally) find the direct component at subgrid. 
				SWfdir=SWtoa*np.exp(-ka*psf/(g*muz))

				""" Then perform the terrain correction. [Corripio 2003 / rpackage insol port]."""

				"""compute mean horizon elevation - why negative hor.el possible??? """
				horel=(((np.arccos(np.sqrt(stat.svf[index]))*180)/np.pi)*2)-stat.slp[index]
				if horel < 0:
					horel = 0 
				meanhorel = horel

				"""
				normal vector - Calculates a unit vector normal to a surface defined by 
				slope inclination and slope orientation.
				"""
				nv = sg.normalvector(slope=stat.slp[index], aspect=stat.asp[index])

				"""
				Method 1: Computes the intensity according to the position of the sun (sunv) and 
				dotproduct normal vector to slope.
				From corripio r package
				"""
				dotprod=np.dot(sunv ,np.transpose(nv)) 
				dprod = dotprod.squeeze()
				dprod[dprod<0] = 0 #negative indicates selfshading
				dprod = dprod

				"""Method 2: Illumination angles. Dozier"""
				saz=sp.azi
				cosis=muz*np.cos(stat.slp[index])+np.sin(sp.zen)*np.sin(stat.slp[index])*np.cos(sp.azi-stat.asp[index])# cosine of illumination angle at subgrid.
				cosic=muz # cosine of illumination angle at grid (slope=0).

				"""
				SUN ELEVATION below hor.el set to 0 - binary mask
				"""
				selMask = sp.sel
				selMask[selMask<horel]=0
				selMask[selMask>0]=1
				selMask = selMask

				"""
				derive incident radiation on slope accounting for self shading and cast 
				shadow and solar geometry
				BOTH formulations seem to be broken
				"""
				#SWfdirCor=selMask*(cosis/cosic)*SWfdir
				SWfdirCor=selMask*dprod*SWfdir
			   
				SWfglob =  SWfdiff+ SWfdirCor
				return SWfglob
				""" 
				Missing components
				- terrain reflection
				"""
				# init grid stack
		#init first row
		xdim=dtime.shape[0]
		ts_swin = np.zeros((xdim))
		for i in range(0, mystations.shape[0]):
			print i
			ts= swin1D(pob=pob,sob=sob,tob=tob, stat=mystations, dates=dtime, index=i)
			ts_swin=np.column_stack((ts_swin,ts))

		# drop init row
		tob.swin =ts_swin[:,1:]
		logging.info("made swin!")
		#===============================================================================
		# make dataframe (write individual files plus netcdf)
		#===============================================================================
		for i in range(0,tob.t.shape[1]):
			df = pd.DataFrame({	"TA":tob.t[:,i], 
						"RH":tob.r[:,i],
						"WS":tob.ws[:,i],
						"WD":tob.wd[:,i], 
						"LWIN":tob.lwin[:,i], 
						"SWIN":tob.swin[:,i], 
						"PRATE":tob.prate[i,:]
						},index=tob.dtime)
			df.index.name="datetime"

			fileout=wdir+"/meteo"+str(i)+"_"+start+"_.csv"
			column_order = ['TA', 'RH', 'WS', 'WD', 'LWIN', 'SWIN', 'PRATE']
			df[column_order].to_csv(path_or_buf=fileout ,na_rep=-999,float_format='%.3f')
		#logging.info(fileout + " complete")

		#===============================================================================
		# Grid stuff
		#
		#===============================================================================
	if mode=="grid":
		print ("Running TopoSCALE3D grid")
		#===============================================================================
		# tscale3d
		#===============================================================================

		
		t = t3d.main( wdir, 'grid', 't', starti,endi)
		r = t3d.main( wdir, 'grid', 'r', starti,endi)
		gtob = hp.Bunch(t=t,r=r, dtime=dtime)

		#===============================================================================
		# tscale2d
		#===============================================================================
		t2m = t2d.main( wdir, 'grid', 't2m', starti,endi)
		tp = t2d.main( wdir, 'grid', 'tp', starti,endi)
		ssrd = t2d.main( wdir, 'grid', 'ssrd', starti,endi)
		strd = t2d.main( wdir, 'grid', 'strd', starti,endi)
		tisr = t2d.main( wdir, 'grid', 'tisr', starti,endi)
		d2m = t2d.main( wdir, 'grid', 'd2m', starti,endi)
		z = t2d.main( wdir, 'grid', 'z', starti,endi) # always true as this is time invariant
		gridEle=z[:,:,0]/g
		gsob = hp.Bunch(t2m=t2m, tp=tp, ssrd=ssrd, strd=strd, tisr=tisr, d2m=d2m, z=z, gridEle=gridEle, dtime=dtime)


		def precipGrid(fineEle, gsob):
			'''
			Args:
				fineEle
				gridEle:
			'''
			# convert TP to mm/hr
			
			lookups = {
				   1:0.35,
				   2:0.35,
				   3:0.35,
				   4:0.3,
				   5:0.25,
				   6:0.2,
				   7:0.2,
				   8:0.2,
				   9:0.2,
				   10:0.25,
				   11:0.3,
				   12:0.35
			}

			# Precipitation lapse rate, varies by month (Liston and Elder, 2006).
			pfis = gsob.dtime.month.map(lookups)
			

			dz=(fineEle-gsob.gridEle)/1e3  # Elevation difference in kilometers between the fine and coarse surface.
			dz2 =dz.reshape(dz.size) #make grid a vector
			pfis2 = np.repeat(pfis.values[:,None], dz2.size, axis=1)
				   
			lp=(1+pfis2.T*dz2[:,None])/(1-pfis2.T*dz2[:,None])# Precipitation correction factor.
			lp2 = lp.reshape(gsob.pmmhr.shape)
			Pf=gsob.pmmhr*lp2
			
			return Pf



		gsob.pmmhr = tp2rate(tp,3600)
		grid_prate = precipGrid(dem_ele,gsob)

		#===============================================================================
		# Longwave
		#===============================================================================
		def instRad(sob, step):
			""" Convert SWin from accumulated quantities in J/m2 to 
			instantaneous W/m2 see: 
			https://confluence.ecmwf.int/pages/viewpage.action?pageId=104241513

			Args:
				step: timstep in seconds (era5=3600, ensemble=10800)

			Note: both EDA (ensemble 3h) and HRES (1h) are accumulated over the timestep
			and therefore treated here the same.
			https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation
					"""
			sob.strd = sob.strd/step  
			sob.ssrd = sob.ssrd/step
			sob.tisr = sob.tisr/step 
		
		def lwin(sob,tob):
			"""Convert to RH (should be in a function). Following MG Lawrence 
			DOI 10.1175/BAMS-86-2-225 """
			A1=7.625 
			B1=243.04 
			C1=610.94
			tc=sob.t2m-273.15
			tdc=sob.d2m-273.15
			tf=tob.t-273.15 # fout.T
			c=(A1*tc)/(B1+tc)
			RHc=100*np.exp((tdc*A1-tdc*c-B1*c)/(B1+tdc)) # Inverting eq. 8 in Lawrence.

			""" Calculate saturation vapor pressure at grid and "subgrid" [also
			through function] using the Magnus formula."""
			
			svpf=C1*np.exp(A1*tf/(B1+tf))
			svpc=C1*np.exp(A1*tc/(B1+tc))

			"""Calculate the vapor pressure at grid (c) and subgrid (f)."""
			vpf=tob.r*svpf/1e2 # RHf
			vpc=RHc*svpc/1e2

			"""Use the vapor pressure and temperature to calculate clear sky
			 # emssivity at grid and subgrid. [also function]
			Konzelmann et al. 1994
			Ta in kelvin

			 """
			x1=0.43 
			x2=5.7
			cef=0.23+x1*(vpf/tob.t)**(1/x2) #Pretty sure the use of Kelvin is correct.
			cec=0.23+x1*(vpc/sob.t2m)**(1/x2)

			"""Diagnose the all sky emissivity at grid."""
			sbc=5.67e-8
			aec=sob.strd/(sbc*sob.t2m**4)
			# need to constrain to 1 as original code?

			""" Calculate the "cloud" emissivity at grid, assume this is the same at
		 	subgrid."""
			deltae=aec-cec

			""" Use the former cloud emissivity to compute the all sky emissivity at 
			subgrid. """
			aef=cef+deltae
			LWf=aef*sbc*tob.t**4
			return(LWf)

		instRad(gsob,3600)
		ts_lwin = lwin(gsob,gtob)

	logging.info("Toposcale complete!")
	logging.info(" %f minutes for setup" % round((time.time()/60 - start_time/60),2) )
	print("Toposcale complete!")