Exemplo n.º 1
0
def getROMSfilenames(self,startdatenum,operMode):
	if operMode=='oper':
		self.roms_parent_file = self.romsdir+'roms_BRIFS_parent_'+self.romsdatestring+'_his.nc'

		romsfile = self.romsdir+'roms_BRIFS_parent_'+self.romsdatestring+'_his.nc'
		romsfileop = self.romsdir+'roms_BRIFS_parent_'+self.romsdatestring+'_op_his.nc'

		if os.path.isfile(romsfile):
			self.roms_parent_file = romsfile
		elif os.path.isfile(romsfileop):
			self.roms_parent_file = romsfileop
		else:
			print "******modelData: getROMSfilenames ERROR: NEITHER FILE EXISTS:"
			print romsfile
			print romsfileop
			print "1. CHECK FILENAME \nOR \n2. MODIFY self.roms_parent_file VARIABLE IN modelData.py!"
			q()

	else:
		romsfile = self.romsdir+'roms_BRIFS_parent_'+self.romsdatestring+'_hind_his.nc'
		if os.path.isfile(romsfile):
			self.roms_parent_file = romsfile
		else:
			print "******modelData: getROMSfilenames ERROR: THE FILE DOES NOT EXIST:"
			print romsfile
			print "1. CHECK FILENAME \nOR \n2. MODIFY self.roms_parent_file VARIABLE IN modelData.py!"
			q()

	self.roms_child_file = self.roms_parent_file.replace('parent','child')

	return 	self.roms_parent_file, self.roms_child_file
Exemplo n.º 2
0
def getROMSfilenames(self, startdatenum, operMode):
    if operMode == 'oper':
        self.roms_parent_file = self.romsdir + 'roms_BRIFS_parent_' + self.romsdatestring + '_his.nc'

        romsfile = self.romsdir + 'roms_BRIFS_parent_' + self.romsdatestring + '_his.nc'
        romsfileop = self.romsdir + 'roms_BRIFS_parent_' + self.romsdatestring + '_op_his.nc'

        if os.path.isfile(romsfile):
            self.roms_parent_file = romsfile
        elif os.path.isfile(romsfileop):
            self.roms_parent_file = romsfileop
        else:
            print "******modelData: getROMSfilenames ERROR: NEITHER FILE EXISTS:"
            print romsfile
            print romsfileop
            print "1. CHECK FILENAME \nOR \n2. MODIFY self.roms_parent_file VARIABLE IN modelData.py!"
            q()

    else:
        romsfile = self.romsdir + 'roms_BRIFS_parent_' + self.romsdatestring + '_hind_his.nc'
        if os.path.isfile(romsfile):
            self.roms_parent_file = romsfile
        else:
            print "******modelData: getROMSfilenames ERROR: THE FILE DOES NOT EXIST:"
            print romsfile
            print "1. CHECK FILENAME \nOR \n2. MODIFY self.roms_parent_file VARIABLE IN modelData.py!"
            q()

    self.roms_child_file = self.roms_parent_file.replace('parent', 'child')

    return self.roms_parent_file, self.roms_child_file
Exemplo n.º 3
0
def getModelName(ptm, dateString):
    if ptm().model == 'ADRIPOM' or ptm().model == 'NADRIPOM':
        # here, dateString should be in YYYYMMDD format
        return ptm().ncdir + ptm().model + '_' + dateString + '.nc'
    elif ptm().model == 'sbADRIPOM' or ptm().model == 'sbNADRIPOM':
        dtobjStart = datetime.strptime(dateString, '%Y%m%d')
        dtobjEnd = dtobjStart + timedelta(hours=ptm().modelRuntimeInHours)
        strStart = datetime.strftime(dtobjStart, "%Y%m%d")
        return ptm().ncdir + 'zeta.' + ptm().model + '_' + strStart + '.nc'
    else:
        print("\nERROR in getModelName: unknown model name: " + ptm().model)
        q()
Exemplo n.º 4
0
def main():

    # check for input date YYYYMMDD from console:
    try:
        strdate = sys.argv[1]
        operMode = sys.argv[2]
    except:
        printHelp()

    # set OPERATIONAL wrf and roms netCDF output directories:

    if operMode == 'oper':
        wrfdir = '/home/rissaga/new_setup/Archive/Outputs/WRF/' + strdate + '_op/'
    # set HINDCAST wrf and roms netCDF output directories:
    else:
        wrfdir = '/home/rissaga/new_setup/Archive/Outputs/WRF/' + strdate + '_hind/'

    # wrfdir = '/home/mlicer/'
    romsdir = '/home/rissaga/new_setup/Archive/Outputs/ROMS/'

    plotdir = '/home/mlicer/BRIFSverif/pyVerif/' + strdate + '_' + operMode + '/'

    os.system('mkdir -p ' + plotdir)

    # determine timeWindow [hours] for comparisons:
    timeWindow = 48

    # parse start date:
    startdatenum = datetime.strptime(strdate, '%Y%m%d')
    enddatenum = startdatenum + timedelta(hours=timeWindow)

    # set WRF files and dates to read:
    today = startdatenum
    yesterday = startdatenum - timedelta(days=1)
    tomorrow = startdatenum + timedelta(days=1)
    wrfdatestring = datetime.strftime(startdatenum, '%Y-%m-%d_12:00:00')
    wrfdatestring_yesterday = datetime.strftime(
        startdatenum - timedelta(days=1), '%Y-%m-%d_12:00:00')
    wrfdatestring_tomorrow = datetime.strftime(
        startdatenum + timedelta(days=1), '%Y-%m-%d_12:00:00')

    wrf_file = wrfdir + 'wrfout_d02_' + wrfdatestring
    wrf_file_yesterday = wrfdir + 'wrfout_d02_' + wrfdatestring_yesterday
    wrf_file_tomorrow = wrfdir + 'wrfout_d02_' + wrfdatestring_tomorrow

    # specify fields for comparisons:
    observationFields=['time','LON','LAT','HEIGHT',\
    'SLEV','QC_SLEV',\
    'WTR_PRE','QC_WTR_PRE',\
    'AIR_PRE','QC_AIR_PRE']

    wrfFields = [
        'location', 'Times', 'XLONG', 'XLAT', 'pointLon', 'pointLat',
        'pointPSFC', 'pointMSLP'
    ]
    romsFields = [
        'location', 'ocean_time', 'lon_rho', 'lat_rho', 'h', 'pointLon',
        'pointLat', 'pointSSH'
    ]

    # get a list of all available observations:
    fileList = getAllObservations(strdate, timeWindow)

    # exit if empty:
    if not fileList:
        print('\n No observations found for this date: ' + strdate +
              '! \n Exiting.')
        q()
    # keep working if not empty:
    else:
        print "\nReading observation files:"
        sensorType = []
        for k in range(len(fileList)):
            print fileList[k], "..."
            # determine the sensorType, if any, from the filename:
            m = re.findall(r'.*_(\w+-\w+.?)_L1', fileList[k])
            if m:
                if 'station' in m[0]:
                    sensorType.append('')
                else:
                    sensorType.append(m[0])
        print ""

    # read the files to 'stations' data object and merge files (sensor types) from different months(years) if neccessary:
    stations, sensorType = obsData(fileList, sensorType,
                                   observationFields).read()

    # extract WRF for available grid points:
    wrf_yesterday = modelData(stations, yesterday, wrfFields, romsFields,
                              wrfdir, romsdir, operMode).readWRF()
    wrf_today = modelData(stations, today, wrfFields, romsFields, wrfdir,
                          romsdir, operMode).readWRF()
    wrf_tomorrow = modelData(stations, tomorrow, wrfFields, romsFields, wrfdir,
                             romsdir, operMode).readWRF()

    # merge WRF times and air pressures from all three days for all stations:
    wrf_t_3days, wrf_p_3days = mergeWRF(stations, wrf_yesterday, wrf_today,
                                        wrf_tomorrow, 'pointMSLP')
    # wrf_t_3days,wrf_p_3days = mergeWRF(stations,wrf_yesterday,wrf_today,wrf_tomorrow,'pointPSFC')

    # extract ROMS
    roms = modelData(stations, startdatenum, wrfFields, romsFields, wrfdir,
                     romsdir, operMode).readROMS(startdatenum, operMode)

    # compute basic statistics (BIAS, RMSE, CORR):
    stats = basicStatistics(strdate, sensorType, stations, wrf_t_3days,
                            wrf_p_3days, roms)

    # plot graphs:
    plotBRIFS(plotdir, strdate, sensorType, stations, wrf_t_3days, wrf_p_3days,
              roms, stats)
Exemplo n.º 5
0
    def __init__(self):
        # model name:
        self.model = 'sbNADRIPOM'
        # set home directory:
        self.homedir = '/home/momo/particleTracking/source_sb/'
        # set out directory for namelist:
        self.outdir = '/home/momo/particleTracking/rundir/'
        # set netcdf directory (containing hydrodynamics netcdfs):
        self.ncdir = '/home/momo/particleTracking/' + self.model + '_netcdf/'
        # start date of particle tracking (YYYYMMDDHH):
        startdate = str(2017032600)
        # initial location of spill:
        #self.lat0=44.806286
        #self.lon0=13.143596
        # release type:
        #	'singlePoint' = point release for releaseDuration from the first releasePoints point
        #	'multiplePoints'= point release for releaseDuration from all releasePoints point
        #	'polygon' = instantaneous (releaseDuration will be set to 1!!) linear
        #	release along the SIDES of releaseShape polygon points.
        #	Choosing multiple (N) points will increase total number of particles N-times!
        self.releaseType = 'singlePoint'
        #self.releaseType='multiplePoints'
        # self.releaseType='polygon'
        # releasePoints: [ [lat1,lon1] , [lat2,lon2] , ... , [latN,lonN] ]
        #self.releasePoints=[ [45.7239,13.603] , [45.5042,12.742], [44.9711, 12.609] ]
        self.releasePoints = [[45.5904065, 13.6993]]
        # self.releasePoints=[ [45.1793,12.7857], [44.673,13.0229],[45.1793,13.26],[44.882,12.601],[44.882,13.4447],[45.1793,12.7857] ]

        # depth level of the spill (1=surface)
        self.level = 1
        # perform 3D tracking: (True=3D, False=single depth level)
        self.tracking3D = False
        if self.tracking3D:
            print "WARNING: 3D tracking IS NOT YET SUPPORTED in sbADRIPOM!"
            print "Setting: self.tracking3D=False!"
            self.tracking3D = False
        # perform back-tracking (True) or regular particle tracking (False):
        self.performBacktracking = False
        # number of hours of tracking simulation ('polygon' release type will set this to 1):
        self.numHoursOfTracking = 24
        # perform daily hotstart (initialize particles from previous step) - to be continued:
        # self.performHotstart=False
        # duration of particle release in hours:
        self.releaseDuration = 2
        # number of particles PER RELEASE POINT:
        numberOfParticles = 120
        # total mass of polutant [kg]:
        self.totalMass = 20000.
        # oil type index:
        # 1 = arabian light
        # 2 = arabian medium
        # 3 = arabian heavy
        self.oilIndex = 2
        # what percentage of particles evaporates completely:
        self.completeEvapPercentage = 0.2
        # evaporation threshold for particles that do not evaporate completely:
        # i.e., self.evapThreshold=0.77 implies that 77 percent of each particle
        # which does not evaporate completely, will evaporate.
        self.evapThreshold = 0.77
        # do we read turbulent diffusivity from the ocean model or not:
        self.useOceanModelDiffusivity = False
        if self.useOceanModelDiffusivity:
            print "WARNING: useOceanModelDiffusivity IS NOT YET SUPPORTED in sbADRIPOM!!!"
            print "Setting: self.useOceanModelDiffusivity=False!!!"
            self.useOceanModelDiffusivity = False
        # default turbulent diffusivity
        # (this is overridden by the Dh and Dv from the model, if
        # self.useOceanModelDiffusivity=True
        self.Dh = 0.001
        # create startdate datetime object:
        self.startdate = datetime.strptime(startdate, "%Y%m%d%H")
        # specify number of hours in each model netcdf:
        if self.model == 'sbADRIPOM':
            self.modelRuntimeInHours = 74  #54
        elif self.model == 'sbNADRIPOM':
            self.modelRuntimeInHours = 72
        else:
            print "INVALID MODEL: " + self.model
            q()
        # specify time increment in hours in each model netcdf:
        self.dt = 1

        #=======END OF USER INPUT===============================================
        #=======================================================================
        #=======DO NOT MODIFY THE CODE BELOW!===================================
        #=======================================================================

        #determine temporal sign and limit duration of spill:
        if self.performBacktracking:
            self.sign_t = int(-1)
            self.releaseDuration = 1
        else:
            self.sign_t = int(+1)

        #determine enddate:
        self.enddate = self.startdate + self.sign_t * timedelta(
            hours=self.numHoursOfTracking)
        self.startingHour = int(datetime.strftime(self.startdate, "%H"))

        # how many dates does the simulation span (we need to include starting Hour because this may
        # brind an extra day, depending on the hours of tracking):
        self.numDaysOfTracking = int(
            math.ceil((self.startingHour + self.numHoursOfTracking) / 24) + 1)

        self.modelRuntimeInDays = int(self.modelRuntimeInHours / 24)

        # determine release coordinates according to releaseType:
        if self.releaseType == 'singlePoint':
            self.releasePoints = [[
                self.releasePoints[0][0], self.releasePoints[0][1]
            ]]
        # elif self.releaseType=='multiplePoints':
        # 	pass
        elif self.releaseType == 'polygon':
            self.releaseDuration = 1
        else:
            print("SETUP ERROR: unknown releaseType: " + self.releaseType)
            print("Should be 'singlePoint' or 'polygon'!")
            q()

        self.numberOfReleasePoints = len(self.releasePoints)
        self.numberOfParticles = max(
            numberOfParticles,
            self.releaseDuration * self.numberOfReleasePoints)
        # if numberOfParticles<release duration, create at least one particle per step:

        self.releaseDuration = min(self.releaseDuration,
                                   self.numberOfParticles,
                                   self.numHoursOfTracking)

        self.numCreatedParticlesPerStep = max(
            1, int(self.numberOfParticles / self.releaseDuration))
Exemplo n.º 6
0
def checkForNeccessaryFiles(ptm):
    print("-------------------------------------------")
    print("Starting particle tracking preprocessing...")
    print("Checking for neccessary files...")

    # create integers of start date and end date:
    startDate = int(datetime.strftime(ptm().startdate, "%Y%m%d%H"))
    endDate = int(datetime.strftime(ptm().enddate, "%Y%m%d%H"))

    # MAIN LOOP over days of tracking:
    numOfHoursToBeReadFromFiles = []
    neccessaryFiles = []
    days = []
    days_dt = []
    files = []
    hourReadStart = []
    hourReadEnd = []
    daysToRead = []

    for k in range(ptm().numDaysOfTracking):
        # current date datetime object:
        currentDatetime = ptm().startdate + ptm().sign_t * timedelta(days=k)
        # current day string:
        day_str = datetime.strftime(currentDatetime, "%Y%m%d")

        # netcdf file of current day:
        filename = getModelName(ptm, day_str)

        # add day to days if not already there:
        if day_str not in days:
            days = np.append(days, day_str)
            days_dt = np.append(days_dt, currentDatetime)

        # add current file to files if not already there:
        if filename not in files:
            files = np.append(files, filename)

        # filter out only dates for actually existing files:
        existingDates = [
            d for i, d in enumerate(days_dt) if os.path.isfile(files[i])
        ]
        existingFiles = [
            f for i, f in enumerate(files) if os.path.isfile(files[i])
        ]

    # check for first day:
    firstday = datetime.strftime(ptm().startdate, "%Y%m%d")
    # first file name - it's not yet known if it exists:
    firstfile = getModelName(ptm, firstday)
    # this will later be added to how many days we neet to read:
    increaseDaysToRead0 = 0

    if not os.path.isfile(firstfile):

        possibleFirstFileExists = False

        # loop over model runtime days at current date:
        for k in range(ptm().modelRuntimeInDays):
            # go back in time as many days as neccessary and possible to locate a
            # possible first file:
            currentDatetime = ptm().startdate - timedelta(days=k)
            daynowstr = datetime.strftime(currentDatetime, "%Y%m%d")
            filenow = getModelName(ptm, daynowstr)

            # if current file exists:
            if os.path.isfile(filenow) and not possibleFirstFileExists:
                # set flag to true and increase days to read by 1:
                possibleFirstFileExists = True
                increaseDaysToRead0 = increaseDaysToRead0 + 1
                # update existing dates and files:
                if ptm().performBacktracking:
                    #change nothing
                    existingDates = existingDates
                    existingFiles = existingFiles
                else:
                    # prepend current date and filename:
                    existingDates = np.insert(existingDates, 0,
                                              currentDatetime)
                    existingFiles = np.insert(existingFiles, 0, filenow)
            break
    else:
        possibleFirstFileExists = True

    # overall check if we have the file containing the first day data:
    if not possibleFirstFileExists:
        print("\nERROR: first date " + firstday +
              " data could not be retrieved from existing files:\n")
        print(existingFiles)
        q()

    # check for gaps in the data:
    for i in range(len(existingDates) - 1):
        # difference in days between consecutive dates in existing dates array:
        deltaDays = abs(int((existingDates[i + 1] - existingDates[i]).days))
        daysToRead = np.append(daysToRead, deltaDays)

        # if difference in insurmountable by all the hours contained in the
        # last existing file, then abort:
        if deltaDays > ptm().modelRuntimeInDays:
            daynowstr = datetime.strftime(existingDates[i], "%Y%m%d")
            print("ERROR: data gap on date " + daynowstr +
                  " exceeds modelRuntimeInDays!")
            q()

    # check for last day (note that the loop above leaves the last day out):
    deltaEndHours = int(abs((existingDates[-1] - days_dt[-1]).seconds) / 3600.)
    lastday = datetime.strftime(ptm().enddate, "%Y%m%d")
    allowed_days = [
        ptm().enddate - timedelta(days=k)
        for k in range(ptm().modelRuntimeInDays)
    ]
    allowed_lastfiles = [
        getModelName(ptm, datetime.strftime(dy, "%Y%m%d"))
        for dy in allowed_days
    ]

    # check if last file exists:
    existLastFile = False
    for lastfile in allowed_lastfiles:
        if os.path.isfile(lastfile):
            existLastFile = True
            break

    if not existLastFile:
        print("ERROR: last date could not be extracted from existing files:")
        print(lastday)
        print(allowed_lastfiles)
        q()

    # similar condition for the last date:
    if deltaEndHours > ptm().modelRuntimeInHours and not ptm(
    ).performBacktracking:
        print(
            "ERROR: end date file missing and could not be recovered from previous dates!"
        )
        q()
    # or in case of back-tracking, the initial date:
    elif ptm().performBacktracking and not os.path.isfile(lastfile):
        print("ERROR: neccessary file " + lastfile +
              " (with earliest dates) missing for backtracking!")
        q()

    # if there where no aborts until now, the final day exists,
    # therefore add final day to dayToRead:

    if not ptm().performBacktracking:
        daysToRead = np.append(daysToRead, 1)
    else:
        daysToRead = np.insert(daysToRead, 0, 1)

    # correct neccessary reading intervals for missing first day, if neccessary:
    if ptm().performBacktracking:
        daysToRead[0] = daysToRead[0] + increaseDaysToRead0

    # set temporal hourly indices for start and end of netcdf array
    # reading IN FORTRAN 1-BASED NOTATION:
    hourReadStart = []
    hourReadEnd = []
    lastHourLastDay = abs(int(datetime.strftime(ptm().enddate, "%H")))

    for i in range(len(existingDates)):
        print("I,LEN:", i, len(existingDates), range(len(existingDates)))
        # if we read only first file, up until runtimeInHours:
        # w/o backtracking:
        if i == 0 and not ptm().performBacktracking and len(
                existingDates) == 1:
            hourReadStart=np.append(hourReadStart,\
             int(datetime.strftime(ptm().startdate,"%H"))+1)
            if ptm().modelRuntimeInHours < ptm().numHoursOfTracking:
                print("ERROR: numHoursOfTracking exceeds modelRuntimeInHours.")
                print("More NetCDF files are needed!")
                q()
            else:
                hourReadEnd = np.append(hourReadEnd, ptm().numHoursOfTracking)

        #if we read more files than just first file:
        # first day w/o backtracking:
        if i == 0 and not ptm().performBacktracking and len(existingDates) > 1:
            hourReadStart=np.append(hourReadStart,\
             int(datetime.strftime(ptm().startdate,"%H"))+1)
            hourReadEnd=np.append(hourReadEnd,\
             daysToRead[i]*24)
            print('d1 ', hourReadEnd)
        #first day w backtracking:
        elif i == 0 and ptm().performBacktracking:
            hourReadStart = np.append(hourReadStart, 1)
            hourReadEnd=np.append(hourReadEnd,\
            abs(int(datetime.strftime(ptm().startdate,"%H"))+1+(daysToRead[i]-1)*24))
            print('d2 ', hourReadEnd)
        elif i > 0 and i < len(
                existingDates) - 1:  # other days, but not last day:
            hourReadStart = np.append(hourReadStart, 1)
            hourReadEnd=np.append(hourReadEnd,\
            abs(daysToRead[i]*24))
            print('d3 ', hourReadEnd)
            # last day w/o backtracking if last file exists:
        elif i > 0 and i == len(existingDates) - 1 and not ptm(
        ).performBacktracking and os.path.isfile(lastfile):
            hourReadStart = np.append(hourReadStart, 1)
            hourReadEnd=np.append(hourReadEnd,\
            lastHourLastDay+24)
            print('d4', hourReadEnd)
            # last day w/o backtracking if last file does NOT exist:
        elif i > 0 and i == len(existingDates) - 1 and not ptm(
        ).performBacktracking and not os.path.isfile(lastfile):
            hourReadStart = np.append(hourReadStart, 1)
            hourReadEnd=np.append(hourReadEnd,\
            lastHourLastDay+24)
            print('d5', hourReadEnd)
        elif i > 0 and i == len(existingDates) - 1 and ptm(
        ).performBacktracking:  # last day w backtracking:
            hourReadStart = np.append(hourReadStart, 1)
            hourReadEnd=np.append(hourReadEnd,\
            abs(daysToRead[i]*24))
            print('d6', hourReadEnd)
        else:
            continue

    # return to main:
    vsota = 0
    print('ure:', hourReadStart, hourReadEnd)

    for l, ura in enumerate(hourReadEnd):
        vsota = vsota + abs(hourReadEnd[l] - hourReadStart[l] + 1)
        print('vsota: ', vsota)

    print('led: ', existingDates)
    print('ef:', existingFiles)

    if len(existingDates) > 1:
        sumHours = int(vsota)
    else:
        sumHours = int(vsota) - 24

    existingAtmFiles = [f.replace('zeta', 'atm') for f in existingFiles]
    existingFiles = [
        f.replace('zeta.sbNADRIPOM', 'sbnadripom_uvtide')
        for f in existingFiles
    ]

    return existingFiles, existingAtmFiles, hourReadStart, hourReadEnd, sumHours
def main():

	# check for input date YYYYMMDD from console:
	try:
		strdate=sys.argv[1]
		operMode = sys.argv[2]
	except:
		printHelp()

	# set OPERATIONAL wrf and roms netCDF output directories:

	if operMode=='oper':
		wrfdir = '/home/rissaga/new_setup/Archive/Outputs/WRF/'+strdate+'_op/'
	# set HINDCAST wrf and roms netCDF output directories:
	else:
		wrfdir = '/home/rissaga/new_setup/Archive/Outputs/WRF/'+strdate+'_hind/'

	# wrfdir = '/home/mlicer/'
	romsdir = '/home/rissaga/new_setup/Archive/Outputs/ROMS/'

	plotdir = '/home/mlicer/BRIFSverif/pyVerif/'+strdate+'_'+operMode+'/'

	os.system('mkdir -p '+plotdir)

	# determine timeWindow [hours] for comparisons:
	timeWindow=48

	# parse start date:
	startdatenum = datetime.strptime(strdate,'%Y%m%d')
	enddatenum = startdatenum+timedelta(hours=timeWindow)

	# set WRF files and dates to read:
	today = startdatenum
	yesterday = startdatenum-timedelta(days=1)
	tomorrow = startdatenum+timedelta(days=1)
	wrfdatestring = datetime.strftime(startdatenum,'%Y-%m-%d_12:00:00')
	wrfdatestring_yesterday = datetime.strftime(startdatenum-timedelta(days=1),'%Y-%m-%d_12:00:00')
	wrfdatestring_tomorrow = datetime.strftime(startdatenum+timedelta(days=1),'%Y-%m-%d_12:00:00')

	wrf_file = wrfdir+'wrfout_d02_'+wrfdatestring
	wrf_file_yesterday = wrfdir+'wrfout_d02_'+wrfdatestring_yesterday
	wrf_file_tomorrow = wrfdir+'wrfout_d02_'+wrfdatestring_tomorrow

	# specify fields for comparisons:
	observationFields=['time','LON','LAT','HEIGHT',\
	'SLEV','QC_SLEV',\
	'WTR_PRE','QC_WTR_PRE',\
	'AIR_PRE','QC_AIR_PRE']

	wrfFields=['location','Times','XLONG','XLAT', 'pointLon','pointLat', 'pointPSFC','pointMSLP']
	romsFields=['location','ocean_time','lon_rho','lat_rho','h','pointLon','pointLat', 'pointSSH']

	# get a list of all available observations:
	fileList = getAllObservations(strdate,timeWindow)

	# exit if empty:
	if not fileList:
		print('\n No observations found for this date: '+strdate+'! \n Exiting.')
		q()
	# keep working if not empty:
	else:
		print "\nReading observation files:"
		sensorType=[]
		for k in range(len(fileList)):
			print fileList[k],"..."
			# determine the sensorType, if any, from the filename:
			m = re.findall(r'.*_(\w+-\w+.?)_L1',fileList[k])
			if m:
				if 'station' in m[0]:
					sensorType.append('')
				else:
					sensorType.append(m[0])
		print ""

	# read the files to 'stations' data object and merge files (sensor types) from different months(years) if neccessary:
	stations,sensorType = obsData(fileList,sensorType,observationFields).read()

	# extract WRF for available grid points:
	wrf_yesterday = modelData(stations,yesterday,wrfFields,romsFields,wrfdir,romsdir, operMode).readWRF()
	wrf_today = modelData(stations,today,wrfFields,romsFields,wrfdir,romsdir, operMode).readWRF()
	wrf_tomorrow = modelData(stations,tomorrow,wrfFields,romsFields,wrfdir,romsdir, operMode).readWRF()

	# merge WRF times and air pressures from all three days for all stations:
	wrf_t_3days,wrf_p_3days = mergeWRF(stations,wrf_yesterday,wrf_today,wrf_tomorrow,'pointMSLP')
	# wrf_t_3days,wrf_p_3days = mergeWRF(stations,wrf_yesterday,wrf_today,wrf_tomorrow,'pointPSFC')

	# extract ROMS
	roms = modelData(stations,startdatenum,wrfFields,romsFields,wrfdir,romsdir, operMode).readROMS(startdatenum,operMode)

	# compute basic statistics (BIAS, RMSE, CORR):
	stats = basicStatistics(strdate,sensorType,stations,wrf_t_3days,wrf_p_3days,roms)

	# plot graphs:
	plotBRIFS(plotdir,strdate,sensorType,stations,wrf_t_3days,wrf_p_3days,roms,stats)
Exemplo n.º 8
0
    'buffer_size': 1,
    'train_steps': None,
    'eval_steps': 100,
    'verbose_shapes': True,
    'pool_stride': 2,
    'n_layers': 4,
    'rnn_layer': True,
    'dense_layer': True,
    'one_output_per_epoch': True,
    'training_hook_n_iter': 50,
    'throttle_secs': 120,
}

if params['n_epoch_samples'] % params['time_steps'] != 0:
    print('Warning: time steps has to divide epoch in equal parts. Quitting.')
    q()

# Setup partitions and batches in the DataHandler-class
DataHandler.setup_partitions(data_folder, params['train_pct'],
                             params['val_pct'], params['batch_size'])
if model_memory:
    with open('partitions.pkl', 'rb') as f:
        DataHandler.set_partitions(pickle.load(f))
else:
    with open('partitions.pkl', 'wb') as f:
        pickle.dump(DataHandler.get_partitions(), f)

config = tf.estimator.RunConfig(
    save_checkpoints_steps=params['save_checkpoint_steps'],
    save_summary_steps=params['save_summary_steps'])
model = Model('CRNN', params)