def startup(): """ Parse command line arguments and call the :func:`main` function. """ parser = argparse.ArgumentParser() parser.add_argument('-c', '--config_file', help='Path to configuration file') parser.add_argument('-v', '--verbose', help='Verbose output', action='store_true') parser.add_argument('-d', '--debug', help='Allow pdb traces', action='store_true') args = parser.parse_args() configFile = args.config_file config = ConfigParser() config.read(configFile) rootdir = pathLocator.getRootDirectory() os.chdir(rootdir) logfile = config.get('Logging', 'LogFile') logdir = dirname(realpath(logfile)) # If log file directory does not exist, create it if not isdir(logdir): try: os.makedirs(logdir) except OSError: logfile = pjoin(os.getcwd(), 'processMultipliers.log') logLevel = config.get('Logging', 'LogLevel') verbose = config.getboolean('Logging', 'Verbose') datestamp = config.getboolean('Logging', 'Datestamp') debug = False if args.verbose: verbose = True if args.debug: debug = True flStartLog(logfile, logLevel, verbose, datestamp) if debug: main(configFile) else: try: modified_main(configFile) except Exception: # pylint: disable=W0703 # Catch any exceptions that occur and log them (nicely): tblines = traceback.format_exc().splitlines() for line in tblines: log.critical(line.lstrip())
def startup(): """ Parse command line arguments and call the :func:`main` function. """ parser = argparse.ArgumentParser() parser.add_argument('-c', '--config_file', help='Path to configuration file') parser.add_argument('-v', '--verbose', help='Verbose output', action='store_true') parser.add_argument('-d', '--debug', help='Allow pdb traces', action='store_true') args = parser.parse_args() config_file = args.config_file config = ConfigParser() config.read(config_file) rootdir = pathLocator.getRootDirectory() os.chdir(rootdir) logfile = config.get('Logging', 'LogFile') logdir = dirname(realpath(logfile)) # If log file directory does not exist, create it if not isdir(logdir): try: os.makedirs(logdir) except OSError: logfile = pjoin(os.getcwd(), 'tsmultipliers.log') logLevel = config.get('Logging', 'LogLevel') verbose = config.getboolean('Logging', 'Verbose') datestamp = config.getboolean('Logging', 'Datestamp') debug = False if args.verbose: verbose = True if args.debug: debug = True flStartLog(logfile, logLevel, verbose, datestamp) if debug: process_timeseries(config_file) else: try: process_timeseries(config_file) except Exception: # pylint: disable=W0703 # Catch any exceptions that occur and log them (nicely): tblines = traceback.format_exc().splitlines() for line in tblines: log.critical(line.lstrip()) log.info("Completed {0}".format(sys.argv[0]))
def __init__(self, configFile): config = ConfigParser() config.read(configFile) self.outputPath = config.get('Output', 'Path') self.windfieldPath = pjoin(self.outputPath, 'windfield') self.trackPath = pjoin(self.outputPath, 'tracks') self.hazardPath = pjoin(self.outputPath, 'hazard') self.domain = config.geteval('Region', 'gridLimit') self.hazardDB = pjoin(self.outputPath, 'hazard.db') self.locationDB = pjoin(self.outputPath, 'locations.db') self.datfile = config.get('Process', 'DatFile') self.excludePastProcessed = config.getboolean('Process', 'ExcludePastProcessed') pGetProcessedFiles(self.datfile) sqlite3.Connection.__init__(self, self.hazardDB, detect_types=PARSE_DECLTYPES | PARSE_COLNAMES) self.exists = True import atexit atexit.register(self.close)
def startup(): parser = argparse.ArgumentParser() parser.add_argument('-c', '--config_file', help='Path to configuration file') parser.add_argument('-v', '--verbose', help='Verbose output', action='store_true') parser.add_argument('-d', '--debug', help='Allow pdb traces', action='store_true') args = parser.parse_args() configFile = args.config_file config = ConfigParser() config.read(configFile) rootdir = pathLocator.getRootDirectory() os.chdir(rootdir) logfile = config.get('Logging','LogFile') logdir = dirname(realpath(logfile)) # If log file directory does not exist, create it if not isdir(logdir): try: os.makedirs(logdir) except OSError: logfile = pjoin(os.getcwd(), 'tcrm.log') logLevel = config.get('Logging', 'LogLevel') verbose = config.getboolean('Logging', 'Verbose') debug = False if args.verbose: verbose = True if args.debug: debug = True flStartLog(logfile, logLevel, verbose) # Switch off minor warning messages import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=UserWarning, module="pytz") warnings.filterwarnings("ignore", category=UserWarning, module="numpy") warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib") warnings.filterwarnings("ignore", category=RuntimeWarning) if debug: main(configFile) else: try: main(configFile) except Exception: # pylint: disable=W0703 # Catch any exceptions that occur and log them (nicely): tblines = traceback.format_exc().splitlines() for line in tblines: log.critical(line.lstrip())
def run(configFile, callback=None): """ Run the hazard calculations. This will attempt to run the calculation in parallel by tiling the domain, but also provides a sane fallback mechanism to execute in serial. :param str configFile: path to configuration file """ log.info("Loading hazard calculation settings") config = ConfigParser() config.read(configFile) outputPath = config.get('Output', 'Path') inputPath = pjoin(outputPath, 'windfield') gridLimit = config.geteval('Region', 'gridLimit') numsimulations = config.getint('TrackGenerator', 'NumSimulations') yrsPerSim = config.getint('TrackGenerator', 'YearsPerSimulation') minRecords = config.getint('Hazard', 'MinimumRecords') calculate_confidence = config.getboolean('Hazard', 'CalculateCI') extreme_value_distribution = config.get('Hazard', 'ExtremeValueDistribution') wf_lon, wf_lat = setDomain(inputPath) global MPI, comm MPI = attemptParallel() comm = MPI.COMM_WORLD log.info("Running hazard calculations") TG = TileGrid(gridLimit, wf_lon, wf_lat) tiles = getTiles(TG) #def progress(i): # callback(i, len(tiles)) comm.barrier() hc = HazardCalculator(configFile, TG, numsimulations, minRecords, yrsPerSim, calculate_confidence, extreme_value_distribution ) hc.dumpHazardFromTiles(tiles) log.debug("Finished hazard calculations") comm.barrier() hc.saveHazard() log.info("Completed hazard calculation")
def run(configFile, callback=None): """ Run the hazard calculations. This will attempt to run the calculation in parallel by tiling the domain, but also provides a sane fallback mechanism to execute in serial. :param configFile: str """ log.info("Loading hazard calculation settings") config = ConfigParser() config.read(configFile) outputPath = config.get('Output', 'Path') inputPath = pjoin(outputPath, 'windfield') gridLimit = config.geteval('Region', 'gridLimit') numsimulations = config.getint('TrackGenerator', 'NumSimulations') yrsPerSim = config.getint('TrackGenerator', 'YearsPerSimulation') minRecords = config.getint('Hazard', 'MinimumRecords') calculate_confidence = config.getboolean('Hazard', 'CalculateCI') wf_lon, wf_lat = setDomain(inputPath) global pp pp = attemptParallel() log.info("Running hazard calculations") TG = TileGrid(gridLimit, wf_lon, wf_lat) tiles = getTiles(TG) #def progress(i): # callback(i, len(tiles)) pp.barrier() hc = HazardCalculator(configFile, TG, numsimulations, minRecords, yrsPerSim, calculate_confidence) hc.dumpHazardFromTiles(tiles) pp.barrier() hc.saveHazard() log.info("Completed hazard calculation")
def main(configFile): """ Main function to execute the :mod:`wind`. :param str configFile: Path to configuration file. """ config = ConfigParser() config.read(configFile) doOutputDirectoryCreation(configFile) trackFile = config.get('DataProcess', 'InputFile') source = config.get('DataProcess', 'Source') delta = 1/12. outputPath = pjoin(config.get('Output','Path'), 'tracks') outputTrackFile = pjoin(outputPath, "tracks.interp.nc") # This will save interpolated track data in TCRM format: interpTrack = interpolateTracks.parseTracks(configFile, trackFile, source, delta, outputTrackFile, interpolation_type='akima') showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Calculating wind fields: ', showProgressBar) def status(done, total): pbar.update(float(done)/total) import wind wind.run(configFile, status) import impact impact.run_optional(config) if config.getboolean('WindfieldInterface', 'PlotOutput'): doWindfieldPlotting(configFile) if config.getboolean('Timeseries', 'Extract'): doTimeseriesPlotting(configFile)
def doStatistics(configFile): """ Calibrate the model with the :mod:`StatInterface` module. :param str configFile: Name of configuration file. """ from DataProcess.CalcTrackDomain import CalcTrackDomain config = ConfigParser() config.read(configFile) showProgressBar = config.get('Logging', 'ProgressBar') getRMWDistFromInputData = config.getboolean('RMW', 'GetRMWDistFromInputData') log.info('Running StatInterface') pbar = ProgressBar('Calibrating model: ', showProgressBar) # Auto-calculate track generator domain CalcTD = CalcTrackDomain(configFile) domain = CalcTD.calcDomainFromFile() pbar.update(0.05) from StatInterface import StatInterface statInterface = StatInterface.StatInterface(configFile, autoCalc_gridLimit=domain) statInterface.kdeGenesisDate() pbar.update(0.4) statInterface.kdeOrigin() pbar.update(0.5) statInterface.cdfCellBearing() pbar.update(0.6) statInterface.cdfCellSpeed() pbar.update(0.7) statInterface.cdfCellPressure() pbar.update(0.8) statInterface.calcCellStatistics() if getRMWDistFromInputData: statInterface.cdfCellSize() pbar.update(1.0) log.info('Completed StatInterface')
def __init__(self, configFile, progressbar=None): config = ConfigParser() config.read(configFile) outputPath = config.get('Output', 'Path') try: self.localityID = config.get('Region', 'LocalityID') except Exception: self.localityID = -999999 self.inputFile = pjoin(outputPath, 'hazard', 'hazard.nc') self.plotPath = pjoin(outputPath, 'plots', 'hazard') self.plotUnits = PlotUnits(config.get('Hazard', 'PlotSpeedUnits')) self.ciBounds = config.getboolean('Hazard', 'CalculateCI') self.fit = config.get('Hazard', 'ExtremeValueDistribution') self.numsimulations = config.getint("TrackGenerator", "NumSimulations") self.smooth = config.getboolean("Hazard", "SmoothPlots") self.progressbar = progressbar self.db = database.HazardDatabase(configFile)
def doStatistics(configFile): """ Calibrate the model. :param str configFile: Name of configuration file. """ from DataProcess.CalcTrackDomain import CalcTrackDomain config = ConfigParser() config.read(configFile) showProgressBar = config.get('Logging', 'ProgressBar') getRMWDistFromInputData = config.getboolean('RMW', 'GetRMWDistFromInputData') log.info('Running StatInterface') pbar = ProgressBar('Calibrating model: ', showProgressBar) # Auto-calculate track generator domain CalcTD = CalcTrackDomain(configFile) domain = CalcTD.calcDomainFromFile() pbar.update(0.05) from StatInterface import StatInterface statInterface = StatInterface.StatInterface(configFile, autoCalc_gridLimit=domain) statInterface.kdeGenesisDate() pbar.update(0.4) statInterface.kdeOrigin() pbar.update(0.5) statInterface.cdfCellBearing() pbar.update(0.6) statInterface.cdfCellSpeed() pbar.update(0.7) statInterface.cdfCellPressure() pbar.update(0.8) statInterface.calcCellStatistics() if getRMWDistFromInputData: statInterface.cdfCellSize() pbar.update(1.0) log.info('Completed StatInterface')
def loadTrackFile(configFile, trackFile, source, missingValue=0, calculateWindSpeed=True): """ Load TC track data from the given input file, from a specified source. The configFile is a configuration file that contains a section called 'source' that describes the data. This returns a collection of :class:`Track` objects that contains the details of the TC tracks in the input file. :param str configFile: Configuration file with a section ``source``. :param str trackFile: Path to a csv-formatted file containing TC data. :pararm str source: Name of the source format of the TC data. There *must* be a section in ``configFile`` matching this string, containing the details of the format of the data. :param missingValue: Replace all null values in the input data with this value (default=0). :param boolean calculateWindSpeed: Calculate maximum wind speed using a pressure-wind relation described in :func:`maxWindSpeed` :returns: A collection of :class:`Track` objects. If any of the variables are not present in the input dataset, they are (where possible) calculated (date/time/windspeed), sampled from default datasets (e.g. environmental pressure) or set to the missing value. Example:: >>> tracks = loadTrackFile('tcrm.ini', 'IBTRaCS.csv', 'IBTrACS' ) """ logger.info("Loading %s" % trackFile) inputData = colReadCSV(configFile, trackFile, source) #, #nullValue=missingValue) config = ConfigParser() config.read(configFile) inputSpeedUnits = config.get(source, 'SpeedUnits') inputPressureUnits = config.get(source, 'PressureUnits') inputLengthUnits = config.get(source, 'LengthUnits') inputDateFormat = config.get(source, 'DateFormat') if config.getboolean('DataProcess', 'FilterSeasons'): startSeason = config.getint('DataProcess', 'StartSeason') idx = np.where(inputData['season'] >= startSeason)[0] inputData = inputData[idx] # Determine the initial TC positions... indicator = getInitialPositions(inputData) # Sort date/time information if 'age' in inputData.dtype.names: year, month, day, hour, minute, datetimes = parseAge(inputData, indicator) timeElapsed = inputData['age'] else: year, month, day, hour, minute, datetimes = parseDates(inputData, indicator, inputDateFormat) timeElapsed = getTimeElapsed(indicator, year, month, day, hour, minute) # Time between observations: dt = getTimeDelta(year, month, day, hour, minute) # Calculate julian days jdays = julianDays(year, month, day, hour, minute) lat = np.array(inputData['lat'], 'd') lon = np.mod(np.array(inputData['lon'], 'd'), 360) delta_lon = np.diff(lon) delta_lat = np.diff(lat) # Split into separate tracks if large jump occurs (delta_lon > 10 degrees # or delta_lat > 5 degrees) # This avoids two tracks being accidentally combined when seasons and track # numbers match but basins are different as occurs in the IBTrACS dataset. # This problem can also be prevented if the 'tcserialno' column is # specified. indicator[np.where(delta_lon > 10)[0] + 1] = 1 indicator[np.where(delta_lat > 5)[0] + 1] = 1 pressure = filterPressure(np.array(inputData['pressure'], 'd'), inputPressureUnits, missingValue) try: windspeed = np.array(inputData['vmax'], 'd') novalue_index = np.where(windspeed == sys.maxint) windspeed = metutils.convert(windspeed, inputSpeedUnits, "mps") windspeed[novalue_index] = missingValue except (ValueError,KeyError): logger.debug("No max wind speed data - all values will be zero") windspeed = np.zeros(indicator.size, 'f') assert lat.size == indicator.size assert lon.size == indicator.size assert pressure.size == indicator.size try: rmax = np.array(inputData['rmax']) novalue_index = np.where(rmax == missingValue) rmax = metutils.convert(rmax, inputLengthUnits, "km") rmax[novalue_index] = missingValue except (ValueError, KeyError): logger.debug("No radius to max wind data - all values will be zero") rmax = np.zeros(indicator.size, 'f') if 'penv' in inputData.dtype.names: penv = np.array(inputData['penv'], 'd') else: logger.debug("No ambient MSLP data in this input file") logger.debug("Sampling data from MSLP data defined in " "configuration file") # Warning: using sampled data will likely lead to some odd behaviour # near the boundary of the MSLP grid boundaries - higher resolution # MSLP data will decrease this unusual behaviour. try: ncfile = cnfGetIniValue(configFile, 'Input', 'MSLPFile') except: logger.exception("No input MSLP file specified in configuration") raise time = getTime(year, month, day, hour, minute) penv = ltmPressure(jdays, time, lon, lat, ncfile) speed, bearing = getSpeedBearing(indicator, lon, lat, dt, missingValue=missingValue) if calculateWindSpeed: windspeed = maxWindSpeed(indicator, dt, lon, lat, pressure, penv) TCID = np.cumsum(indicator) data = np.empty(len(indicator), dtype={ 'names': trackFields, 'formats': trackTypes } ) for key, value in zip(trackFields, [indicator, TCID, year, month, day, hour, minute, timeElapsed, datetimes, lon, lat, speed, bearing, pressure, windspeed, rmax, penv]): data[key] = value tracks = [] n = np.max(TCID) for i in range(1, n + 1): track = Track(data[TCID == i]) track.trackId = (i, n) track.trackfile = trackFile getMinPressure(track, missingValue) getMaxWind(track, missingValue) tracks.append(track) return tracks
def startup(): """ Parse command line arguments, set up logging and attempt to execute the main TCRM functions. """ parser = argparse.ArgumentParser() parser.add_argument('-c', '--config_file', help='The configuration file') parser.add_argument('-v', '--verbose', help='Verbose output', action='store_true') parser.add_argument('-d', '--debug', help='Allow pdb traces', action='store_true') args = parser.parse_args() configFile = args.config_file rootdir = pathLocator.getRootDirectory() os.chdir(rootdir) config = ConfigParser() config.read(configFile) logfile = config.get('Logging', 'LogFile') logdir = dirname(realpath(logfile)) # If log file directory does not exist, create it if not isdir(logdir): try: os.makedirs(logdir) except OSError: logfile = pjoin(os.getcwd(), 'tcrm.log') logLevel = config.get('Logging', 'LogLevel') verbose = config.getboolean('Logging', 'Verbose') datestamp = config.getboolean('Logging', 'Datestamp') debug = False if args.verbose: verbose = True if args.debug: debug = True global MPI, comm MPI = attemptParallel() import atexit atexit.register(MPI.Finalize) comm = MPI.COMM_WORLD if comm.size > 1 and comm.rank > 0: logfile += '-' + str(comm.rank) verbose = False # to stop output to console else: pass #codeStatus = status() #print __doc__ + codeStatus flStartLog(logfile, logLevel, verbose, datestamp) # Switch off minor warning messages import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=UserWarning, module="pytz") warnings.filterwarnings("ignore", category=UserWarning, module="numpy") warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib") warnings.filterwarnings("ignore", category=RuntimeWarning) checkModules() if debug: main(configFile) else: try: main(configFile) except ImportError as e: log.critical("Missing module: {0}".format(e)) tblines = traceback.format_exc().splitlines() for line in tblines: log.critical(line.lstrip()) except Exception: # pylint: disable=W0703 # Catch any exceptions that occur and log them (nicely): tblines = traceback.format_exc().splitlines() for line in tblines: log.critical(line.lstrip())
def main(configFile='main.ini'): """ Main interface of TCRM that allows control and interaction with the 5 interfaces: DataProcess, StatInterface, TrackGenerator, WindfieldInterface and HazardInterface :param str configFile: Name of file containing configuration settings for running TCRM """ log.info("Starting TCRM") log.info("Configuration file: %s", configFile) doOutputDirectoryCreation(configFile) config = ConfigParser() config.read(configFile) comm.barrier() if config.getboolean('Actions', 'DownloadData'): doDataDownload(configFile) comm.barrier() if config.getboolean('Actions', 'DataProcess'): doDataProcessing(configFile) comm.barrier() if config.getboolean('Actions', 'ExecuteStat'): doStatistics(configFile) comm.barrier() if config.getboolean('Actions', 'ExecuteTrackGenerator'): doTrackGeneration(configFile) comm.barrier() if config.getboolean('Actions', 'ExecuteWindfield'): doWindfieldCalculations(configFile) comm.barrier() if config.getboolean('Actions', 'ExecuteHazard'): doHazard(configFile) comm.barrier() if config.getboolean('Actions', 'PlotData'): doDataPlotting(configFile) comm.barrier() if config.getboolean('Actions', 'CreateDatabase'): doDatabaseUpdate(configFile) comm.barrier() if config.getboolean('Actions', 'ExecuteEvaluate'): doEvaluation(config) comm.barrier() if config.getboolean('Actions', 'PlotHazard'): doHazardPlotting(configFile) comm.barrier() log.info('Completed TCRM')
def __init__(self): """ Parse command line arguments and call the :func:`main` function. """ parser = argparse.ArgumentParser() parser.add_argument('-c', '--config_file', help='Path to configuration file') parser.add_argument('-v', '--verbose', help='Verbose output', action='store_true') parser.add_argument('-d', '--debug', help='Allow pdb traces', action='store_true') args = parser.parse_args() self.configFile = args.config_file config = ConfigParser() config.read(self.configFile) logfile = config.get('Logging', 'LogFile') logdir = dirname(realpath(logfile)) # If log file directory does not exist, create it if not isdir(logdir): try: os.makedirs(logdir) except OSError: logfile = pjoin(os.getcwd(), 'processMultipliers.log') logLevel = config.get('Logging', 'LogLevel') verbose = config.getboolean('Logging', 'Verbose') datestamp = config.getboolean('Logging', 'Datestamp') if args.verbose: verbose = True flStartLog(logfile, logLevel, verbose, datestamp) # Switch off minor warning messages import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=UserWarning, module="pytz") warnings.filterwarnings("ignore", category=UserWarning, module="numpy") warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib") warnings.filterwarnings("ignore", category=RuntimeWarning) self.working_dir = config.get('Output', 'Working_dir') self.gust_file = config.get('Input', 'Gust_file') tiles = config.get('Input', 'Tiles') self.tiles = [item.strip() for item in tiles.split(',')] log.debug('List of tiles to be processed: {0}'.format(self.tiles)) log.info('Multipliers will be written out to %s', self.working_dir) # Get the multipliers, and process them if need be self.type_mapping = { 'shielding': 'Ms', 'terrain': 'Mz', 'topographic': 'Mt' } self.dirns = ['e', 'n', 'ne', 'nw', 's', 'se', 'sw', 'w'] rootdir = pathLocator.getRootDirectory() os.chdir(rootdir) try: self.main() except ImportError as e: log.critical("Missing module: {0}".format(e.strerror)) except Exception: # pylint: disable=W0703 # Catch any exceptions that occur and log them (nicely): tblines = traceback.format_exc().splitlines() for line in tblines: log.critical(line.lstrip())
def main(configFile='main.ini'): """ Main interface of TCRM that allows control and interaction with the 5 interfaces: DataProcess, StatInterface, TrackGenerator, WindfieldInterface and HazardInterface :param str configFile: Name of file containing configuration settings for running TCRM """ log.info("Starting TCRM") log.info("Configuration file: %s", configFile) doOutputDirectoryCreation(configFile) config = ConfigParser() config.read(configFile) pp.barrier() if config.getboolean('Actions', 'DownloadData'): doDataDownload(configFile) pp.barrier() if config.getboolean('Actions', 'DataProcess'): doDataProcessing(configFile) pp.barrier() if config.getboolean('Actions', 'ExecuteStat'): doStatistics(configFile) pp.barrier() if config.getboolean('Actions', 'ExecuteTrackGenerator'): doTrackGeneration(configFile) pp.barrier() if config.getboolean('Actions', 'ExecuteWindfield'): doWindfieldCalculations(configFile) pp.barrier() if config.getboolean('Actions', 'ExecuteHazard'): doHazard(configFile) pp.barrier() if config.getboolean('Actions', 'PlotData'): doDataPlotting(configFile) pp.barrier() if config.getboolean('Actions', 'PlotHazard'): doHazardPlotting(configFile) pp.barrier() if config.getboolean('Actions', 'ExecuteEvaluate'): doEvaluation(config) pp.barrier() log.info('Completed TCRM')
def run(configFile, callback=None): """ Run the wind field calculations. :param str configFile: path to a configuration file. :param func callback: optional callback function to track progress. """ log.info('Loading wind field calculation settings') # Get configuration config = ConfigParser() config.read(configFile) profileType = config.get('WindfieldInterface', 'profileType') windFieldType = config.get('WindfieldInterface', 'windFieldType') beta = config.getfloat('WindfieldInterface', 'beta') beta1 = config.getfloat('WindfieldInterface', 'beta1') beta2 = config.getfloat('WindfieldInterface', 'beta2') thetaMax = config.getfloat('WindfieldInterface', 'thetaMax') margin = config.getfloat('WindfieldInterface', 'Margin') resolution = config.getfloat('WindfieldInterface', 'Resolution') domain = config.get('WindfieldInterface', 'Domain') outputPath = config.get('Output', 'Path') windfieldPath = pjoin(outputPath, 'windfield') trackPath = pjoin(outputPath, 'tracks') gridLimit = None if config.has_option('Region', 'gridLimit'): gridLimit = config.geteval('Region', 'gridLimit') if config.has_option('WindfieldInterface', 'gridLimit'): gridLimit = config.geteval('WindfieldInterface', 'gridLimit') if config.getboolean('Timeseries', 'Extract', fallback=False): from Utilities.timeseries import Timeseries ts = Timeseries(configFile) timestepCallback = ts.extract else: timestepCallback = None multipliers = None if config.has_option('Input', 'Multipliers'): multipliers = config.get('Input', 'Multipliers') thetaMax = math.radians(thetaMax) # Attempt to start the track generator in parallel global MPI MPI = attemptParallel() comm = MPI.COMM_WORLD log.info('Running windfield generator') wfg = WindfieldGenerator(config=config, margin=margin, resolution=resolution, profileType=profileType, windFieldType=windFieldType, beta=beta, beta1=beta1, beta2=beta2, thetaMax=thetaMax, gridLimit=gridLimit, domain=domain, multipliers=multipliers, windfieldPath=windfieldPath) log.info(f'Dumping gusts to {windfieldPath}') # Get the trackfile names and count files = os.listdir(trackPath) trackfiles = [pjoin(trackPath, f) for f in files if f.startswith('tracks')] nfiles = len(trackfiles) log.info('Processing {0} track files in {1}'.format(nfiles, trackPath)) # Do the work comm.barrier() wfg.dumpGustsFromTrackfiles(trackfiles, windfieldPath, timestepCallback) try: ts.shutdown() except NameError: pass comm.barrier() log.info('Completed windfield generator')
def startup(): """ Parse command line arguments, set up logging and attempt to execute the main TCRM functions. """ parser = argparse.ArgumentParser() parser.add_argument('-c', '--config_file', help='The configuration file') parser.add_argument('-v', '--verbose', help='Verbose output', action='store_true') parser.add_argument('-d', '--debug', help='Allow pdb traces', action='store_true') args = parser.parse_args() configFile = args.config_file rootdir = pathLocator.getRootDirectory() os.chdir(rootdir) config = ConfigParser() config.read(configFile) logfile = config.get('Logging', 'LogFile') logdir = dirname(realpath(logfile)) # If log file directory does not exist, create it if not isdir(logdir): try: os.makedirs(logdir) except OSError: logfile = pjoin(os.getcwd(), 'tcrm.log') logLevel = config.get('Logging', 'LogLevel') verbose = config.getboolean('Logging', 'Verbose') datestamp = config.getboolean('Logging', 'Datestamp') debug = False if args.verbose: verbose = True #if not verbose: # logLevel = 'ERROR' # verbose = True if args.debug: debug = True global pp pp = attemptParallel() import atexit atexit.register(pp.finalize) if pp.size() > 1 and pp.rank() > 0: logfile += '-' + str(pp.rank()) verbose = False # to stop output to console else: pass #codeStatus = status() #print __doc__ + codeStatus flStartLog(logfile, logLevel, verbose, datestamp) # Switch off minor warning messages import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=UserWarning, module="pytz") warnings.filterwarnings("ignore", category=UserWarning, module="numpy") warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib") warnings.filterwarnings("ignore", category=RuntimeWarning) if debug: main(configFile) else: try: main(configFile) except Exception: # pylint: disable=W0703 # Catch any exceptions that occur and log them (nicely): tblines = traceback.format_exc().splitlines() for line in tblines: log.critical(line.lstrip())
def run(configFile, callback=None): """ Run the wind field calculations. :param str configFile: path to a configuration file. :param func callback: optional callback function to track progress. """ log.info('Loading wind field calculation settings') # Get configuration config = ConfigParser() config.read(configFile) profileType = config.get('WindfieldInterface', 'profileType') windFieldType = config.get('WindfieldInterface', 'windFieldType') beta = config.getfloat('WindfieldInterface', 'beta') beta1 = config.getfloat('WindfieldInterface', 'beta1') beta2 = config.getfloat('WindfieldInterface', 'beta2') thetaMax = config.getfloat('WindfieldInterface', 'thetaMax') margin = config.getfloat('WindfieldInterface', 'Margin') resolution = config.getfloat('WindfieldInterface', 'Resolution') domain = config.get('WindfieldInterface', 'Domain') outputPath = config.get('Output', 'Path') windfieldPath = pjoin(outputPath, 'windfield') trackPath = pjoin(outputPath, 'tracks') gridLimit = None if config.has_option('Region', 'gridLimit'): gridLimit = config.geteval('Region', 'gridLimit') if config.has_option('WindfieldInterface', 'gridLimit'): gridLimit = config.geteval('WindfieldInterface', 'gridLimit') if config.has_section('Timeseries'): if config.has_option('Timeseries', 'Extract'): if config.getboolean('Timeseries', 'Extract'): from Utilities.timeseries import Timeseries log.debug("Timeseries data will be extracted") ts = Timeseries(configFile) timestepCallback = ts.extract else: def timestepCallback(*args): """Dummy timestepCallback function""" pass else: def timestepCallback(*args): """Dummy timestepCallback function""" pass thetaMax = math.radians(thetaMax) # Attempt to start the track generator in parallel global pp pp = attemptParallel() log.info('Running windfield generator') wfg = WindfieldGenerator(config=config, margin=margin, resolution=resolution, profileType=profileType, windFieldType=windFieldType, beta=beta, beta1=beta1, beta2=beta2, thetaMax=thetaMax, gridLimit=gridLimit, domain=domain) msg = 'Dumping gusts to %s' % windfieldPath log.info(msg) # Get the trackfile names and count files = os.listdir(trackPath) trackfiles = [pjoin(trackPath, f) for f in files if f.startswith('tracks')] nfiles = len(trackfiles) def progressCallback(i): """Define the callback function""" callback(i, nfiles) msg = 'Processing %d track files in %s' % (nfiles, trackPath) log.info(msg) # Do the work pp.barrier() wfg.dumpGustsFromTrackfiles(trackfiles, windfieldPath, timestepCallback) try: ts.shutdown() except NameError: pass pp.barrier() log.info('Completed windfield generator')
config_file = args.config_file config = ConfigParser() config.read(config_file) logfile = config.get('Logging', 'LogFile') logdir = dirname(realpath(logfile)) # If log file directory does not exist, create it if not isdir(logdir): try: os.makedirs(logdir) except OSError: logfile = pjoin(os.getcwd(), 'tracks2shp.log') logLevel = config.get('Logging', 'LogLevel') verbose = config.getboolean('Logging', 'Verbose') datestamp = config.getboolean('Logging', 'Datestamp') if args.verbose: verbose = True flStartLog(logfile, logLevel, verbose, datestamp) if args.file: track_file = args.file else: track_file = config.get('DataProcess', 'InputFile') if args.source: source = args.source else:
def run(configFile, callback=None): """ Run the wind field calculations. :param str configFile: path to a configuration file. :param func callback: optional callback function to track progress. """ log.info('Loading wind field calculation settings') # Get configuration config = ConfigParser() config.read(configFile) outputPath = config.get('Output', 'Path') profileType = config.get('WindfieldInterface', 'profileType') windFieldType = config.get('WindfieldInterface', 'windFieldType') beta = config.getfloat('WindfieldInterface', 'beta') beta1 = config.getfloat('WindfieldInterface', 'beta1') beta2 = config.getfloat('WindfieldInterface', 'beta2') thetaMax = config.getfloat('WindfieldInterface', 'thetaMax') margin = config.getfloat('WindfieldInterface', 'Margin') resolution = config.getfloat('WindfieldInterface', 'Resolution') domain = config.get('WindfieldInterface', 'Domain') windfieldPath = pjoin(outputPath, 'windfield') trackPath = pjoin(outputPath, 'tracks') windfieldFormat = 'gust-%i-%04d.nc' gridLimit = None if config.has_option('Region','gridLimit'): gridLimit = config.geteval('Region', 'gridLimit') if config.has_option('WindfieldInterface', 'gridLimit'): gridLimit = config.geteval('WindfieldInterface', 'gridLimit') if config.has_section('Timeseries'): if config.has_option('Timeseries', 'Extract'): if config.getboolean('Timeseries', 'Extract'): from Utilities.timeseries import Timeseries log.debug("Timeseries data will be extracted") ts = Timeseries(configFile) timestepCallback = ts.extract else: def timestepCallback(*args): """Dummy timestepCallback function""" pass thetaMax = math.radians(thetaMax) # Attempt to start the track generator in parallel global pp pp = attemptParallel() log.info('Running windfield generator') wfg = WindfieldGenerator(config=config, margin=margin, resolution=resolution, profileType=profileType, windFieldType=windFieldType, beta=beta, beta1=beta1, beta2=beta2, thetaMax=thetaMax, gridLimit=gridLimit, domain=domain) msg = 'Dumping gusts to %s' % windfieldPath log.info(msg) # Get the trackfile names and count files = os.listdir(trackPath) trackfiles = [pjoin(trackPath, f) for f in files if f.startswith('tracks')] nfiles = len(trackfiles) def progressCallback(i): """Define the callback function""" callback(i, nfiles) msg = 'Processing %d track files in %s' % (nfiles, trackPath) log.info(msg) # Do the work pp.barrier() wfg.dumpGustsFromTrackfiles(trackfiles, windfieldPath, windfieldFormat, progressCallback, timestepCallback) try: ts.shutdown() except NameError: pass pp.barrier() log.info('Completed windfield generator')
def startup(): """ Parse the command line arguments and call the :func:`main` function. """ parser = argparse.ArgumentParser() parser.add_argument('-c', '--config_file', help='Path to configuration file') parser.add_argument('-v', '--verbose', help='Verbose output', action='store_true') parser.add_argument('-d', '--debug', help='Allow pdb traces', action='store_true') args = parser.parse_args() configFile = args.config_file config = ConfigParser() config.read(configFile) rootdir = pathLocator.getRootDirectory() os.chdir(rootdir) logfile = config.get('Logging', 'LogFile') logdir = dirname(realpath(logfile)) # If log file directory does not exist, create it if not isdir(logdir): try: os.makedirs(logdir) except OSError: logfile = pjoin(os.getcwd(), 'tcrm.log') logLevel = config.get('Logging', 'LogLevel') verbose = config.getboolean('Logging', 'Verbose') datestamp = config.getboolean('Logging', 'Datestamp') debug = False if args.verbose: verbose = True if args.debug: debug = True flStartLog(logfile, logLevel, verbose, datestamp) # Switch off minor warning messages import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=UserWarning, module="pytz") warnings.filterwarnings("ignore", category=UserWarning, module="numpy") warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib") warnings.filterwarnings("ignore", category=RuntimeWarning) if debug: main(configFile) else: try: main(configFile) except ImportError as e: log.critical("Missing module: {0}".format(e.strerror)) except Exception: # pylint: disable=W0703 # Catch any exceptions that occur and log them (nicely): tblines = traceback.format_exc().splitlines() for line in tblines: log.critical(line.lstrip())
def loadTrackFile(configFile, trackFile, source, missingValue=0, calculateWindSpeed=True): """ Load TC track data from the given input file, from a specified source. The configFile is a configuration file that contains a section called 'source' that describes the data. This returns a collection of :class:`Track` objects that contains the details of the TC tracks in the input file. :param str configFile: Configuration file with a section ``source``. :param str trackFile: Path to a csv-formatted file containing TC data. :pararm str source: Name of the source format of the TC data. There *must* be a section in ``configFile`` matching this string, containing the details of the format of the data. :param missingValue: Replace all null values in the input data with this value (default=0). :param boolean calculateWindSpeed: Calculate maximum wind speed using a pressure-wind relation described in :func:`maxWindSpeed` :returns: A collection of :class:`Track` objects. If any of the variables are not present in the input dataset, they are (where possible) calculated (date/time/windspeed), sampled from default datasets (e.g. environmental pressure) or set to the missing value. Example:: >>> tracks = loadTrackFile('tcrm.ini', 'IBTRaCS.csv', 'IBTrACS' ) """ LOG.info("Loading %s" % trackFile) inputData = colReadCSV(configFile, trackFile, source) #, #nullValue=missingValue) config = ConfigParser() config.read(configFile) inputSpeedUnits = config.get(source, 'SpeedUnits') inputPressureUnits = config.get(source, 'PressureUnits') inputLengthUnits = config.get(source, 'LengthUnits') inputDateFormat = config.get(source, 'DateFormat') if config.getboolean('DataProcess', 'FilterSeasons'): startSeason = config.getint('DataProcess', 'StartSeason') idx = np.where(inputData['season'] >= startSeason)[0] inputData = inputData[idx] # Determine the initial TC positions... indicator = getInitialPositions(inputData) # Sort date/time information if 'age' in inputData.dtype.names: year, month, day, hour, minute, datetimes = parseAge( inputData, indicator) timeElapsed = inputData['age'] else: year, month, day, hour, minute, datetimes = parseDates( inputData, indicator, inputDateFormat) timeElapsed = getTimeElapsed(indicator, year, month, day, hour, minute) # Time between observations: dt = getTimeDelta(year, month, day, hour, minute) # Calculate julian days jdays = julianDays(year, month, day, hour, minute) lat = np.array(inputData['lat'], 'd') lon = np.mod(np.array(inputData['lon'], 'd'), 360) delta_lon = np.diff(lon) delta_lat = np.diff(lat) # Split into separate tracks if large jump occurs (delta_lon > 10 degrees # or delta_lat > 5 degrees) # This avoids two tracks being accidentally combined when seasons and track # numbers match but basins are different as occurs in the IBTrACS dataset. # This problem can also be prevented if the 'tcserialno' column is # specified. indicator[np.where(delta_lon > 10)[0] + 1] = 1 indicator[np.where(delta_lat > 5)[0] + 1] = 1 pressure = filterPressure(np.array(inputData['pressure'], 'd'), inputPressureUnits, missingValue) try: windspeed = np.array(inputData['vmax'], 'd') novalue_index = np.where(windspeed == sys.maxint) windspeed = metutils.convert(windspeed, inputSpeedUnits, "mps") windspeed[novalue_index] = missingValue except (ValueError, KeyError): LOG.debug("No max wind speed data - all values will be zero") windspeed = np.zeros(indicator.size, 'f') assert lat.size == indicator.size assert lon.size == indicator.size assert pressure.size == indicator.size try: rmax = np.array(inputData['rmax']) novalue_index = np.where(rmax == missingValue) rmax = metutils.convert(rmax, inputLengthUnits, "km") rmax[novalue_index] = missingValue except (ValueError, KeyError): LOG.debug("No radius to max wind data - all values will be zero") rmax = np.zeros(indicator.size, 'f') if 'penv' in inputData.dtype.names: penv = np.array(inputData['penv'], 'd') else: LOG.debug("No ambient MSLP data in this input file") LOG.debug("Sampling data from MSLP data defined in " "configuration file") # Warning: using sampled data will likely lead to some odd behaviour # near the boundary of the MSLP grid boundaries - higher resolution # MSLP data will decrease this unusual behaviour. try: ncfile = cnfGetIniValue(configFile, 'Input', 'MSLPFile') except: LOG.exception("No input MSLP file specified in configuration") raise time = getTime(year, month, day, hour, minute) penv = ltmPressure(jdays, time, lon, lat, ncfile) if 'poci' in inputData.dtype.names: poci = np.array(inputData['poci'], 'd') else: LOG.debug("Determining poci") eps = np.random.normal(0, scale=2.5717) poci = getPoci(penv, pressure, lat, jdays, eps) speed, bearing = getSpeedBearing(indicator, lon, lat, dt, missingValue=missingValue) if calculateWindSpeed: windspeed = maxWindSpeed(indicator, dt, lon, lat, pressure, poci) TCID = np.cumsum(indicator) data = np.empty(len(indicator), dtype={ 'names': trackFields, 'formats': trackTypes }) for key, value in zip(trackFields, [ indicator, TCID, year, month, day, hour, minute, timeElapsed, datetimes, lon, lat, speed, bearing, pressure, windspeed, rmax, poci ]): data[key] = value tracks = [] n = np.max(TCID) for i in range(1, n + 1): track = Track(data[TCID == i]) track.trackId = (i, n) track.trackfile = trackFile getMinPressure(track, missingValue) getMaxWind(track, missingValue) tracks.append(track) return tracks