def __init__(self, configFile, kdeType, gridLimit, kdeStep, lonLat=None, progressbar=None): """ """ self.logger = logging.getLogger() self.progressbar = progressbar if self.progressbar: KPDF.set_callback(self.updateProgressBar) self.logger.info("Initialising KDEOrigins") self.configFile = configFile self.x = numpy.arange(gridLimit['xMin'], gridLimit['xMax'], kdeStep) self.y = numpy.arange(gridLimit['yMax'], gridLimit['yMin'], -kdeStep) self.kdeType = kdeType self.kdeStep = kdeStep config = ConfigParser() config.read(configFile) if lonLat is None: self.outputPath = config.get('Output', 'Path') self.processPath = os.path.join(self.outputPath, 'process') self.logger.debug("Loading "+os.path.join(self.processPath, 'init_lon_lat')) ll = flLoadFile(os.path.join(self.processPath, 'init_lon_lat'), '%', ',') self.lonLat = ll[:,0:2] else: self.lonLat = lonLat[:,0:2] self.bw = KPDF.MPDFOptimumBandwidth(self.lonLat) self.logger.debug("Optimal bandwidth: %f"%self.bw)
def doOutputDirectoryCreation(configFile): """ Create all the necessary output folders. :param str configFile: Name of configuration file. :raises OSError: If the directory tree cannot be created. """ config = ConfigParser() config.read(configFile) outputPath = config.get('Output', 'Path') log.info('Output will be stored under %s', outputPath) subdirs = ['tracks', 'hazard', 'windfield', 'plots', 'plots/hazard', 'plots/stats', 'log', 'process', 'process/timeseries', 'process/dat'] if not isdir(outputPath): try: os.makedirs(outputPath) except (OSError, FileExistsError): raise for subdir in subdirs: if not isdir(realpath(pjoin(outputPath, subdir))): try: os.makedirs(realpath(pjoin(outputPath, subdir))) except (OSError, FileExistsError): raise
def doHazardPlotting(configFile): """ Do the hazard plots. :param str configFile: Name of configuration file. """ import matplotlib matplotlib.use('Agg') # Use matplotlib backend config = ConfigParser() config.read(configFile) log.info('Plotting Hazard Maps') showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Plotting hazard maps: ', showProgressBar) pbar.update(0.0) from PlotInterface.AutoPlotHazard import AutoPlotHazard plotter = AutoPlotHazard(configFile, progressbar=pbar) plotter.plotMap() plotter.plotCurves() pbar.update(1.0)
def historic(self): """Load historic data and calculate histogram""" config = ConfigParser() config.read(self.configFile) inputFile = config.get('DataProcess', 'InputFile') if len(os.path.dirname(inputFile)) == 0: inputFile = pjoin(self.inputPath, inputFile) source = config.get('DataProcess', 'Source') try: tracks = loadTrackFile(self.configFile, inputFile,source) except (TypeError, IOError, ValueError): log.critical("Cannot load historical track file: {0}".format(inputFile)) raise else: startYr = 9999 endYr = 0 for t in tracks: startYr = min(startYr, min(t.Year)) endYr = max(endYr, max(t.Year)) numYears = endYr - startYr log.info("Range of years: %d - %d" % (startYr, endYr)) self.hist = self._calculate(tracks) / numYears
def doTrackGeneration(configFile): """ Do the tropical cyclone track generation. The track generation settings are read from *configFile*. :param str configFile: Name of configuration file. """ log.info('Starting track generation') config = ConfigParser() config.read(configFile) showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Simulating cyclone tracks: ', showProgressBar) def status(done, total): pbar.update(float(done)/total) import TrackGenerator TrackGenerator.run(configFile, status) pbar.update(1.0) log.info('Completed track generation')
def doWindfieldCalculations(configFile): """ Do the wind field calculations. The wind field settings are read from *configFile*. :param str configFile: Name of configuration file. """ log.info('Starting wind field calculations') config = ConfigParser() config.read(configFile) showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Calculating wind fields: ', showProgressBar) def status(done, total): pbar.update(float(done)/total) import wind wind.run(configFile, status) pbar.update(1.0) log.info('Completed wind field calculations')
def doHazardPlotting(configFile): """ Do the hazard plots (hazard maps and curves for all locations within the model domain). Plotting is performed by the :mod:`PlotInterface.AutoPlotHazard` module. :param str configFile: Name of configuration file. """ import matplotlib matplotlib.use('Agg') # Use matplotlib backend config = ConfigParser() config.read(configFile) log.info('Plotting Hazard Maps') showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Plotting hazard maps: ', showProgressBar) pbar.update(0.0) from PlotInterface.AutoPlotHazard import AutoPlotHazard plotter = AutoPlotHazard(configFile, progressbar=pbar) plotter.plotMap() plotter.plotCurves() pbar.update(1.0)
def doDataDownload(configFile): """ Check and download the data files. :param str configFile: Name of configuration file. """ log.info('Checking availability of input data sets') config = ConfigParser() config.read(configFile) showProgressBar = config.get('Logging', 'ProgressBar') for dataset in datasets.DATASETS: if not dataset.isDownloaded(): log.info('Input file %s is not available', dataset.filename) try: log.info('Attempting to download %s', dataset.filename) pbar = ProgressBar('Downloading file %s: ' % dataset.filename, showProgressBar) def status(fn, done, size): pbar.update(float(done)/size) dataset.download(status) log.info('Download successful') except IOError: log.error('Unable to download %s. Maybe a proxy problem?', dataset.filename) sys.exit(1)
def doOutputDirectoryCreation(configFile): """ Create all the necessary output folders. :param str configFile: Name of configuration file. :raises OSError: If the directory tree cannot be created. """ config = ConfigParser() config.read(configFile) outputPath = config.get('Output', 'Path') log.info('Output will be stored under %s', outputPath) subdirs = ['tracks', 'windfield', 'plots', 'plots/timeseries', 'log', 'process', 'process/timeseries'] if not isdir(outputPath): try: os.makedirs(outputPath) except OSError: raise for subdir in subdirs: if not isdir(realpath(pjoin(outputPath, subdir))): try: os.makedirs(realpath(pjoin(outputPath, subdir))) except OSError: raise
def doHazard(configFile): """ Do the hazard calculations (extreme value distribution fitting) using the :mod:`hazard` module. :param str configFile: Name of configuration file. """ log.info('Running HazardInterface') config = ConfigParser() config.read(configFile) showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Performing hazard calculations: ', showProgressBar) def status(done, total): pbar.update(float(done)/total) import hazard hazard.run(configFile) log.info('Completed HazardInterface') pbar.update(1.0)
def doTrackGeneration(configFile): """ Do the tropical cyclone track generation in :mod:`TrackGenerator`. The track generation settings are read from *configFile*. :param str configFile: Name of configuration file. """ log.info('Starting track generation') config = ConfigParser() config.read(configFile) showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Simulating cyclone tracks: ', showProgressBar) def status(done, total): pbar.update(float(done)/total) import TrackGenerator TrackGenerator.run(configFile, status) pbar.update(1.0) log.info('Completed track generation')
def doWindfieldCalculations(configFile): """ Do the wind field calculations, using :mod:`wind`. The wind field settings are read from *configFile*. :param str configFile: Name of configuration file. """ log.info('Starting wind field calculations') config = ConfigParser() config.read(configFile) showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Calculating wind fields: ', showProgressBar) def status(done, total): pbar.update(float(done)/total) import wind wind.run(configFile, status) pbar.update(1.0) log.info('Completed wind field calculations')
def doHazardPlotting(configFile): """ Do the hazard plots (hazard maps and curves for all locations within the model domain). Plotting is performed by the :mod:`PlotInterface.AutoPlotHazard` module. :param str configFile: Name of configuration file. """ config = ConfigParser() config.read(configFile) log.info('Plotting Hazard Maps') showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Plotting hazard maps: ', showProgressBar) pbar.update(0.0) from PlotInterface.AutoPlotHazard import AutoPlotHazard plotter = AutoPlotHazard(configFile, progressbar=pbar) plotter.plotMap() plotter.plotCurves() pbar.update(1.0)
def __init__(self, configFile): """ :param str configFile: Path to configuration file. """ config = ConfigParser() config.read(configFile) self.outputPath = config.get('Output', 'Path') self.wf_domain = config.geteval('Region', 'gridLimit')
def doDataDownload(configFile): """ Check and download the data files listed in the configuration file. Datasets are listed in the `Input` section of the configuration file, with the option `Datasets`. There must also be a corresponding section in the configuration file that inlcudes the url, path where the dataset will be stored and the filename that will be stored, e.g.:: [Input] Datasets=IBTRACS [IBTRACS] URL=ftp://eclipse.ncdc.noaa.gov/pub/ibtracs/v03r05/wmo/csv/Allstorms.ibtracs_wmo.v03r05.csv.gz filename=Allstorms.ibtracs_wmo.v03r05.csv path=input This will attempt to download the gzipped csv file from the given URL and save it to the given filename, in the 'input' folder under the current directory. Gzipped files are automatically unzipped. :param str configFile: Name of configuration file. :raises IOError: If the data cannot be downloaded. """ log.info('Checking availability of input data sets') config = ConfigParser() config.read(configFile) showProgressBar = config.get('Logging', 'ProgressBar') datasets.loadDatasets(configFile) for dataset in datasets.DATASETS: if not dataset.isDownloaded(): log.info('Input file %s is not available', dataset.filename) try: log.info('Attempting to download %s', dataset.filename) pbar = ProgressBar('Downloading file %s: ' % dataset.filename, showProgressBar) def status(fn, done, size): pbar.update(float(done)/size) dataset.download(status) log.info('Download successful') except IOError: log.error('Unable to download %s. Maybe a proxy problem?', dataset.filename) sys.exit(1)
def doTimeseriesPlotting(configFile): """ Run functions to plot time series output """ config = ConfigParser() config.read(configFile) outputPath = config.get('Output', 'Path') timeseriesPath = pjoin(outputPath, 'process', 'timeseries') plotPath = pjoin(outputPath, 'plots', 'timeseries') log.info("Plotting time series data to %s" % plotPath) from PlotInterface.plotTimeseries import plotTimeseries plotTimeseries(timeseriesPath, plotPath)
def doStatistics(configFile): """ Calibrate the model. :param str configFile: Name of configuration file. """ from DataProcess.CalcTrackDomain import CalcTrackDomain config = ConfigParser() config.read(configFile) showProgressBar = config.get('Logging', 'ProgressBar') getRMWDistFromInputData = config.getboolean('RMW', 'GetRMWDistFromInputData') log.info('Running StatInterface') pbar = ProgressBar('Calibrating model: ', showProgressBar) # Auto-calculate track generator domain CalcTD = CalcTrackDomain(configFile) domain = CalcTD.calcDomainFromFile() pbar.update(0.05) from StatInterface import StatInterface statInterface = StatInterface.StatInterface(configFile, autoCalc_gridLimit=domain) statInterface.kdeGenesisDate() pbar.update(0.4) statInterface.kdeOrigin() pbar.update(0.5) statInterface.cdfCellBearing() pbar.update(0.6) statInterface.cdfCellSpeed() pbar.update(0.7) statInterface.cdfCellPressure() pbar.update(0.8) statInterface.calcCellStatistics() if getRMWDistFromInputData: statInterface.cdfCellSize() pbar.update(1.0) log.info('Completed StatInterface')
def __init__(self, configFile, dt): """ Initialise required fields """ self.configFile = configFile config = ConfigParser() config.read(configFile) landMaskFile = config.get('Input', 'LandMask') self.landMask = SampleGrid(landMaskFile) self.tol = 0 # Time over land self.dt = dt
def __init__(self, configFile): config = ConfigParser() config.read(configFile) self.outputPath = config.get('Output', 'Path') self.windfieldPath = pjoin(self.outputPath, 'windfield') self.trackPath = pjoin(self.outputPath, 'tracks') self.hazardPath = pjoin(self.outputPath, 'hazard') self.domain = config.geteval('Region', 'gridLimit') self.hazardDB = pjoin(self.outputPath, 'hazard.db') self.locationDB = pjoin(self.outputPath, 'locations.db') self.datfile = config.get('Process', 'DatFile') self.excludePastProcessed = config.getboolean('Process', 'ExcludePastProcessed') pGetProcessedFiles(self.datfile) sqlite3.Connection.__init__(self, self.hazardDB, detect_types=PARSE_DECLTYPES | PARSE_COLNAMES) self.exists = True import atexit atexit.register(self.close)
def __init__(self, configFile, autoCalc_gridLimit=None, progressbar=None): """ Initialize the data and variables required for the interface """ self.configFile = configFile config = ConfigParser() config.read(configFile) self.progressbar = progressbar log.info("Initialising StatInterface") self.kdeType = config.get('StatInterface', 'kdeType') self.kde2DType = config.get('StatInterface','kde2DType') minSamplesCell = config.getint('StatInterface', 'minSamplesCell') self.kdeStep = config.getfloat('StatInterface', 'kdeStep') self.outputPath = config.get('Output', 'Path') self.processPath = pjoin(self.outputPath, 'process') missingValue = cnfGetIniValue(self.configFile, 'StatInterface', 'MissingValue', sys.maxint) gridLimitStr = cnfGetIniValue(self.configFile, 'StatInterface', 'gridLimit', '') if gridLimitStr is not '': try: self.gridLimit = eval(gridLimitStr) except SyntaxError: log.exception('Error! gridLimit is not a dictionary') else: self.gridLimit = autoCalc_gridLimit log.info('No gridLimit specified - using automatic' + ' selection: ' + str(self.gridLimit)) try: gridSpace = config.geteval('Region', 'gridSpace') gridInc = config.geteval('Region', 'gridInc') except SyntaxError: log.exception('Error! gridSpace or gridInc not dictionaries') raise self.generateDist = GenerateDistributions(self.configFile, self.gridLimit, gridSpace, gridInc, self.kdeType, minSamplesCell, missingValue) self.gridSpace = gridSpace self.gridInc = gridInc
def doWindfieldPlotting(configFile): """ Plot the wind field on a map. :param str configFile: Path to the configuration file. :Note: the file name is assumed to be 'gust.interp.nc' """ from netCDF4 import Dataset import numpy as np config = ConfigParser() config.read(configFile) outputPath = config.get('Output', 'Path') windfieldPath = pjoin(outputPath, 'windfield') inputFile = config.get('DataProcess', 'InputFile') if inputFile.endswith(".nc"): # We have a netcdf track file. Work under the assumption it is # drawn directly from TCRM. trackFile = os.path.basename(inputFile) trackId = trackFile.split('.')[1] gustFile = 'gust.{0}.nc'.format(trackId) outputWindFile = pjoin(windfieldPath, gustFile) else: # Note the assumption about the file name! outputWindFile = pjoin(windfieldPath, 'gust.001-00001.nc') plotPath = pjoin(outputPath, 'plots', 'maxwind.png') f = Dataset(outputWindFile, 'r') xdata = f.variables['lon'][:] ydata = f.variables['lat'][:] vdata = f.variables['vmax'][:] gridLimit = None if config.has_option('Region', 'gridLimit'): gridLimit = config.geteval('Region', 'gridLimit') ii = np.where((xdata >= gridLimit['xMin']) & (xdata <= gridLimit['xMax'])) jj = np.where((ydata >= gridLimit['yMin']) & (ydata <= gridLimit['yMax'])) [xgrid, ygrid] = np.meshgrid(xdata[ii], ydata[jj]) ig, jg = np.meshgrid(ii, jj) vdata = vdata[jg, ig] else: [xgrid, ygrid] = np.meshgrid(xdata, ydata) map_kwargs = dict(llcrnrlon=xgrid.min(), llcrnrlat=ygrid.min(), urcrnrlon=xgrid.max(), urcrnrlat=ygrid.max(), projection='merc', resolution='i') title = "Maximum wind speed" cbarlabel = "Wind speed ({0})".format(f.variables['vmax'].units) levels = np.arange(30, 101., 5.) saveWindfieldMap(vdata, xgrid, ygrid, title, levels, cbarlabel, map_kwargs, plotPath)
def run(configFile, callback=None): """ Run the hazard calculations. This will attempt to run the calculation in parallel by tiling the domain, but also provides a sane fallback mechanism to execute in serial. :param configFile: str """ log.info("Loading hazard calculation settings") config = ConfigParser() config.read(configFile) outputPath = config.get('Output', 'Path') inputPath = pjoin(outputPath, 'windfield') gridLimit = config.geteval('Region', 'gridLimit') numsimulations = config.getint('TrackGenerator', 'NumSimulations') yrsPerSim = config.getint('TrackGenerator', 'YearsPerSimulation') minRecords = config.getint('Hazard', 'MinimumRecords') calculate_confidence = config.getboolean('Hazard', 'CalculateCI') wf_lon, wf_lat = setDomain(inputPath) global pp pp = attemptParallel() log.info("Running hazard calculations") TG = TileGrid(gridLimit, wf_lon, wf_lat) tiles = getTiles(TG) #def progress(i): # callback(i, len(tiles)) pp.barrier() hc = HazardCalculator(configFile, TG, numsimulations, minRecords, yrsPerSim, calculate_confidence) hc.dumpHazardFromTiles(tiles) pp.barrier() hc.saveHazard() log.info("Completed hazard calculation")
def startup(): """ Parse command line arguments and call the :func:`main` function. """ parser = argparse.ArgumentParser() parser.add_argument('-c', '--config_file', help='Path to configuration file') parser.add_argument('-v', '--verbose', help='Verbose output', action='store_true') parser.add_argument('-d', '--debug', help='Allow pdb traces', action='store_true') args = parser.parse_args() configFile = args.config_file config = ConfigParser() config.read(configFile) rootdir = pathLocator.getRootDirectory() os.chdir(rootdir) logfile = config.get('Logging', 'LogFile') logdir = dirname(realpath(logfile)) # If log file directory does not exist, create it if not isdir(logdir): try: os.makedirs(logdir) except OSError: logfile = pjoin(os.getcwd(), 'processMultipliers.log') logLevel = config.get('Logging', 'LogLevel') verbose = config.getboolean('Logging', 'Verbose') datestamp = config.getboolean('Logging', 'Datestamp') debug = False if args.verbose: verbose = True if args.debug: debug = True flStartLog(logfile, logLevel, verbose, datestamp) if debug: main(configFile) else: try: modified_main(configFile) except Exception: # pylint: disable=W0703 # Catch any exceptions that occur and log them (nicely): tblines = traceback.format_exc().splitlines() for line in tblines: log.critical(line.lstrip())
def __init__(self, configFile): """ Read configuration settings, load station data and set up output recarrays. :param str configFile: path to a configuration file. """ config = ConfigParser() config.read(configFile) self.meta = False stnFile = config.get('Timeseries', 'LocationFile') self.outputPath = pjoin(config.get('Output', 'Path'), 'process', 'timeseries') self.maxfile = pjoin(config.get('Output', 'Path'), 'process', 'maxima.csv') self.minfile = pjoin(config.get('Output', 'Path'), 'process', 'minima.csv') log.info(f"Loading timeseries stations from {stnFile}") log.debug(f"Timeseries data will be written into {self.outputPath}") self.stations = [] if stnFile.endswith("shp"): try: key_name = config.get('Timeseries', 'StationID') except NoOptionError: key_name = None vertices = shpGetVertices(stnFile, key_name=key_name) for stn in list(vertices.keys()): lat = vertices[stn][0][1] lon = vertices[stn][0][0] lon = np.where(lon < 0., lon + 360., lon) self.stations.append(Station(stn, lon, lat)) else: stndata = flLoadFile(stnFile, delimiter=',') # If there are more than 3 columns, save the additional # columns as 'metadata' if stndata.shape[1] > 3: self.metadata = stndata[:, 3:] self.meta = True stnid = stndata[:, 0] stnlon = stndata[:, 1].astype(float) stnlat = stndata[:, 2].astype(float) for sid, lon, lat in zip(stnid, stnlon, stnlat): self.stations.append(Station(sid, lon, lat)) log.info(f"There are {len(self.stations)} stations that will collect timeseries data")
def run(configFile, callback=None): """ Run the hazard calculations. This will attempt to run the calculation in parallel by tiling the domain, but also provides a sane fallback mechanism to execute in serial. :param str configFile: path to configuration file """ log.info("Loading hazard calculation settings") config = ConfigParser() config.read(configFile) outputPath = config.get('Output', 'Path') inputPath = pjoin(outputPath, 'windfield') gridLimit = config.geteval('Region', 'gridLimit') numsimulations = config.getint('TrackGenerator', 'NumSimulations') yrsPerSim = config.getint('TrackGenerator', 'YearsPerSimulation') minRecords = config.getint('Hazard', 'MinimumRecords') calculate_confidence = config.getboolean('Hazard', 'CalculateCI') wf_lon, wf_lat = setDomain(inputPath) global pp pp = attemptParallel() log.info("Running hazard calculations") TG = TileGrid(gridLimit, wf_lon, wf_lat) tiles = getTiles(TG) #def progress(i): # callback(i, len(tiles)) pp.barrier() hc = HazardCalculator(configFile, TG, numsimulations, minRecords, yrsPerSim, calculate_confidence) hc.dumpHazardFromTiles(tiles) pp.barrier() hc.saveHazard() log.info("Completed hazard calculation")
def main(configFile): """ Main function for collecting and processing qind multipliers :param str configFile: Path to configuration file. """ config = ConfigParser() config.read(configFile) output_path = config.get('Output', 'Path') type_mapping = {'shielding': 'Ms', 'terrain': 'Mz', 'topographic': 'Mt'} dirns = ['e', 'n', 'ne', 'nw', 's', 'se', 'sw', 'w'] copyTranslateMultipliers(configFile, type_mapping, output_path) mergeWindMultipliers(type_mapping, dirns, output_path) combineDirections(dirns, output_path)
def __init__(self, configFile, autoCalc_gridLimit=None, progressbar=None): """ Initialize the data and variables required for the interface """ self.configFile = configFile config = ConfigParser() config.read(configFile) self.progressbar = progressbar log.info("Initialising StatInterface") self.kdeType = config.get('StatInterface', 'kdeType') minSamplesCell = config.getint('StatInterface', 'minSamplesCell') self.kdeStep = config.getfloat('StatInterface', 'kdeStep') self.outputPath = config.get('Output', 'Path') self.processPath = pjoin(self.outputPath, 'process') missingValue = cnfGetIniValue(self.configFile, 'StatInterface', 'MissingValue', sys.maxsize) gridLimitStr = cnfGetIniValue(self.configFile, 'StatInterface', 'gridLimit', '') if gridLimitStr is not '': try: self.gridLimit = eval(gridLimitStr) except SyntaxError: log.exception('Error! gridLimit is not a dictionary') else: self.gridLimit = autoCalc_gridLimit log.info('No gridLimit specified - using automatic' + ' selection: ' + str(self.gridLimit)) try: gridSpace = config.geteval('Region', 'gridSpace') gridInc = config.geteval('Region', 'gridInc') except SyntaxError: log.exception('Error! gridSpace or gridInc not dictionaries') raise self.generateDist = GenerateDistributions(self.configFile, self.gridLimit, gridSpace, gridInc, self.kdeType, minSamplesCell, missingValue) self.gridSpace = gridSpace self.gridInc = gridInc
def doTimeseriesPlotting(configFile): """ Run functions to plot time series output. :param str configFile: Path to configuration file. """ config = ConfigParser() config.read(configFile) outputPath = config.get('Output', 'Path') timeseriesPath = pjoin(outputPath, 'process', 'timeseries') plotPath = pjoin(outputPath, 'plots', 'timeseries') log.info("Plotting time series data to {0}".format(plotPath)) from PlotInterface.plotTimeseries import plotTimeseries plotTimeseries(timeseriesPath, plotPath)
def historic(self): """Calculate historical rates of longitude crossing""" LOG.info("Processing historical tracks for longitude crossings") config = ConfigParser() config.read(self.configFile) inputFile = config.get('DataProcess', 'InputFile') source = config.get('DataProcess', 'Source') timestep = config.getfloat('TrackGenerator', 'Timestep') if len(os.path.dirname(inputFile)) == 0: inputFile = pjoin(self.inputPath, inputFile) try: tracks = interpolateTracks.parseTracks(self.configFile, inputFile, source, timestep, interpolation_type='linear') except (TypeError, IOError, ValueError): LOG.critical("Cannot load historical track file: {0}".\ format(inputFile)) raise else: self.lonCrossingHist, self.lonCrossingEWHist, \ self.lonCrossingWEHist = self.findCrossings(tracks) return
def main(configFile): config = ConfigParser() config.read(configFile) doOutputDirectoryCreation(configFile) trackFile = config.get('DataProcess', 'InputFile') source = config.get('DataProcess', 'Source') delta = 1/12. outputPath = pjoin(config.get('Output','Path'), 'tracks') outputTrackFile = pjoin(outputPath, "tracks.interp.csv") # This will save interpolated track data in TCRM format: interpTrack = interpolateTracks.parseTracks(configFile, trackFile, source, delta, outputTrackFile) showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Calculating wind fields: ', showProgressBar) def status(done, total): pbar.update(float(done)/total) import wind wind.run(configFile, status) # FIXME: Add wind field and timeseries plotting doTimeseriesPlotting(configFile)
def __init__(self, configFile): """ Calculate density of TC genesis positions on a grid :param str configFile: path to a TCRM configuration file. """ config = ConfigParser() config.read(configFile) self.configFile = configFile # Define the grid: self.gridLimit = config.geteval('Region', 'gridLimit') self.gridSpace = config.geteval('Region', 'GridSpace') self.lon_range = np.arange(self.gridLimit['xMin'], self.gridLimit['xMax'] + 0.1, 1.) self.lat_range = np.arange(self.gridLimit['yMin'], self.gridLimit['yMax'] + 0.1, 1.) self.X, self.Y = np.meshgrid(self.lon_range, self.lat_range) outputPath = config.get('Output', 'Path') self.trackPath = pjoin(outputPath, 'tracks') self.plotPath = pjoin(outputPath, 'plots', 'stats') self.dataPath = pjoin(outputPath, 'process') # Determine TCRM input directory tcrm_dir = pathLocator.getRootDirectory() self.inputPath = pjoin(tcrm_dir, 'input') self.synNumYears = config.getint('TrackGenerator', 'yearspersimulation')
def __init__(self, configFile): config = ConfigParser() config.read(configFile) self.configFile = configFile outputPath = config.get('Output', 'Path') self.trackPath = pjoin(outputPath, 'tracks') self.plotPath = pjoin(outputPath, 'plots', 'stats') self.dataPath = pjoin(outputPath, 'process') # Determine TCRM input directory tcrm_dir = pathLocator.getRootDirectory() self.inputPath = pjoin(tcrm_dir, 'input') self.synNumYears = config.getint('TrackGenerator', 'yearspersimulation') try: gateFile = config.get('Input', 'CoastlineGates') except NoOptionError: LOG.exception(("No coastline gate file specified " "in configuration file")) raise gateData = np.genfromtxt(gateFile, delimiter=',') self.gates = Int.convert2vertex(gateData[:, 1], gateData[:, 2]) self.coast = list(self.gates) self.coast.append(self.gates[0])
def main(configFile): """ Main function to execute the :mod:`wind`. :param str configFile: Path to configuration file. """ config = ConfigParser() config.read(configFile) doOutputDirectoryCreation(configFile) trackFile = config.get('DataProcess', 'InputFile') source = config.get('DataProcess', 'Source') delta = 1/12. outputPath = pjoin(config.get('Output','Path'), 'tracks') outputTrackFile = pjoin(outputPath, "tracks.interp.csv") # This will save interpolated track data in TCRM format: interpTrack = interpolateTracks.parseTracks(configFile, trackFile, source, delta, outputTrackFile, interpolation_type='akima') showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Calculating wind fields: ', showProgressBar) def status(done, total): pbar.update(float(done)/total) import wind wind.run(configFile, status) doWindfieldPlotting(configFile) doTimeseriesPlotting(configFile)
def __init__(self, configFile): config = ConfigParser() config.read(configFile) self.configFile = configFile # Define the grid: gridLimit = config.geteval('Region', 'gridLimit') gridSpace = config.geteval('Region', 'GridSpace') self.lon_range = np.arange(gridLimit['xMin'], gridLimit['xMax'] + 0.1, gridSpace['x']) self.lat_range = np.arange(gridLimit['yMin'], gridLimit['yMax'] + 0.1, gridSpace['y']) outputPath = config.get('Output', 'Path') self.trackPath = pjoin(outputPath, 'tracks') self.plotPath = pjoin(outputPath, 'plots', 'stats') self.dataPath = pjoin(outputPath, 'process') # Determine TCRM input directory tcrm_dir = pathLocator.getRootDirectory() self.inputPath = pjoin(tcrm_dir, 'input') self.synNumYears = config.getint('TrackGenerator', 'yearspersimulation') # Longitude crossing gates: self.gateLons = np.arange(self.lon_range.min(), self.lon_range.max() + 0.5, 10.) self.gateLats = np.arange(self.lat_range.min(), self.lat_range.max() + 0.5, 2.) # Add configuration settings to global attributes: self.gatts = { 'history': "Longitude crossing rates for TCRM simulation", 'version': flProgramVersion() } for section in config.sections(): for option in config.options(section): key = "{0}_{1}".format(section, option) value = config.get(section, option) self.gatts[key] = value
def __init__(self, configFile, gridLimit, kdeStep, lonLat=None, progressbar=None): """ """ self.progressbar = progressbar LOGGER.info("Initialising KDEOrigin") self.x = np.arange(gridLimit['xMin'], gridLimit['xMax'], kdeStep) self.y = np.arange(gridLimit['yMax'], gridLimit['yMin'], -kdeStep) self.kdeStep = kdeStep self.kde = None self.pdf = None self.cz = None self.configFile = configFile self.config = ConfigParser() self.config.read(configFile) if lonLat is None: # Load the data from file: self.outputPath = self.config.get('Output', 'Path') self.processPath = pjoin(self.outputPath, 'process') LOGGER.debug("Loading " + pjoin(self.processPath, 'init_lon_lat')) ll = flLoadFile(pjoin(self.processPath, 'init_lon_lat'), '%', ',') self.lonLat = ll[:, 0:2] else: self.lonLat = lonLat[:, 0:2] ii = np.where((self.lonLat[:, 0] >= gridLimit['xMin']) & (self.lonLat[:, 0] <= gridLimit['xMax']) & (self.lonLat[:, 1] >= gridLimit['yMin']) & (self.lonLat[:, 1] <= gridLimit['yMax'])) self.lonLat = self.lonLat[ii] self.bw = getOriginBandwidth(self.lonLat) LOGGER.info("Bandwidth: %s", repr(self.bw))
def main(configFile): """ Main function to execute the :mod:`wind`. :param str configFile: Path to configuration file. """ config = ConfigParser() config.read(configFile) doOutputDirectoryCreation(configFile) trackFile = config.get('DataProcess', 'InputFile') source = config.get('DataProcess', 'Source') delta = 1/12. outputPath = pjoin(config.get('Output','Path'), 'tracks') outputTrackFile = pjoin(outputPath, "tracks.interp.nc") # This will save interpolated track data in TCRM format: interpTrack = interpolateTracks.parseTracks(configFile, trackFile, source, delta, outputTrackFile, interpolation_type='akima') showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Calculating wind fields: ', showProgressBar) def status(done, total): pbar.update(float(done)/total) import wind wind.run(configFile, status) import impact impact.run_optional(config) if config.getboolean('WindfieldInterface', 'PlotOutput'): doWindfieldPlotting(configFile) if config.getboolean('Timeseries', 'Extract'): doTimeseriesPlotting(configFile)
def modified_main(config_file): """ Main function to combine the multipliers with the regional wind speed data. :param str configFile: Path to configuration file. """ config = ConfigParser() config.read(config_file) input_path = config.get('Input', 'Path') try: gust_file = config.get('Input', 'Gust_file') except: gust_file = 'gust.001-00001.nc' windfield_path = pjoin(input_path, 'windfield') ncfile = pjoin(windfield_path, gust_file) multiplier_path = config.get('Input', 'Multipliers') # Load the wind data: log.info("Loading regional wind data from {0}".format(ncfile)) ncobj = Dataset(ncfile, 'r') lat = ncobj.variables['lat'][:] lon = ncobj.variables['lon'][:] delta = lon[1] - lon[0] lon = lon - delta / 2. lat = lat - delta / 2. # Wind speed: wspd = ncobj.variables['vmax'][:] # Components: uu = ncobj.variables['ua'][:] vv = ncobj.variables['va'][:] bearing = calculateBearing(uu, vv) gust = wspd Vx = uu Vy = vv P = None # WARNING, THESE COULD BE WRONG!!! # Plus it doesn't do anything, # except hightlight these var's are going in.. lon = lon lat = lat # Need to be checked !!! # Load a multiplier file to determine the projection: log.info("Using M4 data from {0}".format(multiplier_path)) processMult(gust, Vx, Vy, lon, lat, windfield_path, multiplier_path)
def __init__(self, configFile): """ Read configuration settings, load station data and set up output recarrays. :param str configFile: path to a configuration file. """ config = ConfigParser() config.read(configFile) self.meta = False stnFile = config.get('Timeseries', 'StationFile') self.outputPath = pjoin(config.get('Output', 'Path'), 'process', 'timeseries') self.maxfile = pjoin(config.get('Output', 'Path'), 'process', 'maxima.csv') self.minfile = pjoin(config.get('Output', 'Path'), 'process', 'minima.csv') log.debug("Loading stations from %s"%stnFile) log.debug("Timeseries data will be written into %s"%self.outputPath) self.stations = [] if stnFile.endswith("shp"): try: key_name = config.get('Timeseries', 'StationID') except NoOptionError: key_name = None vertices = shpGetVertices(stnFile, key_name=key_name) for stn in vertices.keys(): self.stations.append(Station(stn, vertices[stn][0][0], vertices[stn][0][1])) else: stndata = flLoadFile(stnFile, delimiter=',') # If there are more than 3 columns, save the additional # columns as 'metadata' if stndata.shape[1] > 3: self.metadata = stndata[:, 3:] self.meta = True stnid = stndata[:, 0] stnlon = stndata[:, 1].astype(float) stnlat = stndata[:, 2].astype(float) for sid, lon, lat in zip(stnid, stnlon, stnlat): self.stations.append(Station(sid, lon, lat))
def historic(self): """Load historic data and calculate histogram""" log.info("Processing historical pressure distributions") config = ConfigParser() config.read(self.configFile) inputFile = config.get('DataProcess', 'InputFile') source = config.get('DataProcess', 'Source') if len(os.path.dirname(inputFile)) == 0: inputFile = pjoin(self.inputPath, inputFile) try: tracks = loadTrackFile(self.configFile, inputFile, source) except (TypeError, IOError, ValueError): log.critical("Cannot load historical track file: {0}".format(inputFile)) raise else: self.histMean, self.histMin, \ self.histMax, self.histMed = self.calculate(tracks) self.histMinCPDist, self.histMinCP = self.calcMinPressure(tracks)
def historic(self): """Load historic data and calculate histogram""" log.info("Processing historic track records") config = ConfigParser() config.read(self.configFile) inputFile = config.get('DataProcess', 'InputFile') if len(os.path.dirname(inputFile)) == 0: inputFile = pjoin(self.inputPath, inputFile) source = config.get('DataProcess', 'Source') try: tracks = loadTrackFile(self.configFile, inputFile, source) except (TypeError, IOError, ValueError): log.critical("Cannot load historical track file: {0}".\ format(inputFile)) raise else: startYr = 9999 endYr = 0 for t in tracks: startYr = min(startYr, min(t.Year)) endYr = max(endYr, max(t.Year)) numYears = endYr - startYr log.info("Range of years: %d - %d" % (startYr, endYr)) try: self.hist = self._calculate(tracks) #self.hist = self._calculate(tracks) / numYears except (ValueError): log.critical( "KDE error: The number of observations must be larger than the number of variables" ) raise
def __init__(self, configFile): """ Calculate density of TC positions on a grid :param str configFile: path to a TCRM configuration file. """ config = ConfigParser() config.read(configFile) self.configFile = configFile # Define the grid: gridLimit = config.geteval('Region', 'gridLimit') gridSpace = config.geteval('Region', 'GridSpace') self.lon_range = np.arange(gridLimit['xMin'], gridLimit['xMax'] + 0.1, gridSpace['x']) self.lat_range = np.arange(gridLimit['yMin'], gridLimit['yMax'] + 0.1, gridSpace['y']) outputPath = config.get('Output', 'Path') self.trackPath = pjoin(outputPath, 'tracks') self.plotPath = pjoin(outputPath, 'plots', 'stats') self.dataPath = pjoin(outputPath, 'process') # Determine TCRM input directory tcrm_dir = pathLocator.getRootDirectory() self.inputPath = pjoin(tcrm_dir, 'input') self.synNumYears = config.getint('TrackGenerator', 'yearspersimulation')
def colReadCSV(configFile, dataFile, source): """ Loads a csv file containing 'column' data into a record (numpy) array with columns labelled by 'fields'. There must be a section in the ``configFile`` named ``source`` that sets out the format of the data file. :param str configFile: Path to a configuration file that holds details of the input data. :param str dataFile: Path to the input file to load. :param str source: Name of the source format. There must be a corresponding section in the ``configFile``. :returns: A :class:`numpy.ndarray` that contains the input data. """ config = ConfigParser() config.read(configFile) delimiter = config.get(source, 'FieldDelimiter') numHeadingLines = config.getint(source, 'NumberOfHeadingLines') cols = config.get(source, 'Columns').split(delimiter) usecols = [i for i,c in enumerate(cols) if c != 'skip'] data = np.genfromtxt(dataFile, dtype=None, delimiter=delimiter, usecols=usecols, comments=None, skip_header=numHeadingLines, autostrip=True) data.dtype.names = [c for c in cols if c != 'skip'] return data
def historic(self): """ Load historic data and calculate histogram. Note that the input historical data is filtered by year when it's loaded in `interpolateTracks.parseTracks()`. The timestep to interpolate to is set to match that of the synthetic event set (normally set to 1 hour). """ config = ConfigParser() config.read(self.configFile) inputFile = config.get('DataProcess', 'InputFile') if len(os.path.dirname(inputFile)) == 0: inputFile = pjoin(self.inputPath, inputFile) source = config.get('DataProcess', 'Source') try: tracks = loadTrackFile(self.configFile, inputFile, source) except (TypeError, IOError, ValueError): log.critical("Cannot load historical track file: {0}".\ format(inputFile)) raise else: startYr = 9999 endYr = 0 for t in tracks: startYr = min(startYr, min(t.Year)) endYr = max(endYr, max(t.Year)) numYears = endYr - startYr self.hist = self.calculate(tracks) / numYears
def historic(self): """Load historic data and calculate histogram""" config = ConfigParser() config.read(self.configFile) inputFile = config.get('DataProcess', 'InputFile') if len(os.path.dirname(inputFile)) == 0: inputFile = pjoin(self.inputPath, inputFile) source = config.get('DataProcess', 'Source') timestep = config.getfloat('TrackGenerator', 'Timestep') interpHistFile = pjoin(self.inputPath, "interp_tracks.csv") try: tracks = interpolateTracks.parseTracks(self.configFile, inputFile, source, timestep, interpHistFile, 'linear') except (TypeError, IOError, ValueError): log.critical("Cannot load historical track file: {0}".format(inputFile)) raise else: startYr = 9999 endYr = 0 for t in tracks: startYr = min(startYr, min(t.Year)) endYr = max(endYr, max(t.Year)) numYears = endYr - startYr self.hist = self.calculate(tracks) / numYears
def __init__(self, configFile): config = ConfigParser() config.read(configFile) self.configFile = configFile # Define the grid: gridLimit = config.geteval('Region', 'gridLimit') gridSpace = config.geteval('Region', 'GridSpace') self.lon_range = np.arange(gridLimit['xMin'], gridLimit['xMax'] + 0.1, gridSpace['x']) self.lat_range = np.arange(gridLimit['yMin'], gridLimit['yMax'] + 0.1, gridSpace['y']) outputPath = config.get('Output', 'Path') self.trackPath = pjoin(outputPath, 'tracks') self.plotPath = pjoin(outputPath, 'plots', 'stats') self.dataPath = pjoin(outputPath, 'process') # Determine TCRM input directory tcrm_dir = pathLocator.getRootDirectory() self.inputPath = pjoin(tcrm_dir, 'input') self.synNumYears = config.getint('TrackGenerator', 'yearspersimulation') # Longitude crossing gates: self.gateLons = np.arange(self.lon_range.min(), self.lon_range.max() + 0.5, 10.) self.gateLats = np.arange(self.lat_range.min(), self.lat_range.max() + 0.5, 2.) # Add configuration settings to global attributes: self.gatts = {'history': "Longitude crossing rates for TCRM simulation", 'version': flProgramVersion() } for section in config.sections(): for option in config.options(section): key = "{0}_{1}".format(section, option) value = config.get(section, option) self.gatts[key] = value
def __init__(self, configFile): config = ConfigParser() config.read(configFile) self.configFile = configFile outputPath = config.get('Output', 'Path') self.trackPath = pjoin(outputPath, 'tracks') self.plotPath = pjoin(outputPath, 'plots', 'stats') self.dataPath = pjoin(outputPath, 'process') # Determine TCRM input directory tcrm_dir = pathLocator.getRootDirectory() self.inputPath = pjoin(tcrm_dir, 'input') self.synNumYears = config.getint('TrackGenerator', 'yearspersimulation') try: gateFile = config.get('Input', 'CoastlineGates') except NoOptionError: log.exception(("No coastline gate file specified " "in configuration file")) raise gateData = np.genfromtxt(gateFile, delimiter=',') nGates = len(gateData) self.gates = Int.convert2vertex(gateData[:, 1], gateData[:, 2]) self.coast = list(self.gates) self.coast.append(self.gates[0])
def historic(self): """Calculate historical rates of longitude crossing""" log.info("Processing historical tracks for longitude crossings") config = ConfigParser() config.read(self.configFile) inputFile = config.get("DataProcess", "InputFile") source = config.get("DataProcess", "Source") timestep = config.getfloat("TrackGenerator", "Timestep") if len(os.path.dirname(inputFile)) == 0: inputFile = pjoin(self.inputPath, inputFile) try: tracks = interpolateTracks.parseTracks( self.configFile, inputFile, source, timestep, interpolation_type="linear" ) except (TypeError, IOError, ValueError): log.critical("Cannot load historical track file: {0}".format(inputFile)) raise else: self.lonCrossingHist, self.lonCrossingEWHist, self.lonCrossingWEHist = self.findCrossings(tracks) return
def __init__(self, configFile, progressbar=None): config = ConfigParser() config.read(configFile) outputPath = config.get('Output', 'Path') try: self.localityID = config.get('Region', 'LocalityID') except Exception: self.localityID = -999999 self.inputFile = pjoin(outputPath, 'hazard', 'hazard.nc') self.plotPath = pjoin(outputPath, 'plots', 'hazard') self.plotUnits = PlotUnits(config.get('Hazard', 'PlotSpeedUnits')) self.ciBounds = config.getboolean('Hazard', 'CalculateCI') self.fit = config.get('Hazard', 'ExtremeValueDistribution') self.numsimulations = config.getint("TrackGenerator", "NumSimulations") self.progressbar = progressbar self.db = database.HazardDatabase(configFile)
def startup(): """ Parse command line arguments and call the :func:`main` function. """ parser = argparse.ArgumentParser() parser.add_argument('-c', '--config_file', help='Path to configuration file') parser.add_argument('-v', '--verbose', help='Verbose output', action='store_true') parser.add_argument('-d', '--debug', help='Allow pdb traces', action='store_true') args = parser.parse_args() config_file = args.config_file config = ConfigParser() config.read(config_file) rootdir = pathLocator.getRootDirectory() os.chdir(rootdir) logfile = config.get('Logging', 'LogFile') logdir = dirname(realpath(logfile)) # If log file directory does not exist, create it if not isdir(logdir): try: os.makedirs(logdir) except OSError: logfile = pjoin(os.getcwd(), 'tsmultipliers.log') logLevel = config.get('Logging', 'LogLevel') verbose = config.getboolean('Logging', 'Verbose') datestamp = config.getboolean('Logging', 'Datestamp') debug = False if args.verbose: verbose = True if args.debug: debug = True flStartLog(logfile, logLevel, verbose, datestamp) if debug: process_timeseries(config_file) else: try: process_timeseries(config_file) except Exception: # pylint: disable=W0703 # Catch any exceptions that occur and log them (nicely): tblines = traceback.format_exc().splitlines() for line in tblines: log.critical(line.lstrip()) log.info("Completed {0}".format(sys.argv[0]))
def startup(): parser = argparse.ArgumentParser() parser.add_argument('-c', '--config_file', help='Path to configuration file') parser.add_argument('-v', '--verbose', help='Verbose output', action='store_true') parser.add_argument('-d', '--debug', help='Allow pdb traces', action='store_true') args = parser.parse_args() configFile = args.config_file config = ConfigParser() config.read(configFile) rootdir = pathLocator.getRootDirectory() os.chdir(rootdir) logfile = config.get('Logging','LogFile') logdir = dirname(realpath(logfile)) # If log file directory does not exist, create it if not isdir(logdir): try: os.makedirs(logdir) except OSError: logfile = pjoin(os.getcwd(), 'tcrm.log') logLevel = config.get('Logging', 'LogLevel') verbose = config.getboolean('Logging', 'Verbose') debug = False if args.verbose: verbose = True if args.debug: debug = True flStartLog(logfile, logLevel, verbose) # Switch off minor warning messages import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=UserWarning, module="pytz") warnings.filterwarnings("ignore", category=UserWarning, module="numpy") warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib") warnings.filterwarnings("ignore", category=RuntimeWarning) if debug: main(configFile) else: try: main(configFile) except Exception: # pylint: disable=W0703 # Catch any exceptions that occur and log them (nicely): tblines = traceback.format_exc().splitlines() for line in tblines: log.critical(line.lstrip())
def doDataProcessing(configFile): """ Parse the input data and turn it into the necessary format for the model calibration step. :param str configFile: Name of configuration file. """ config = ConfigParser() config.read(configFile) showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Processing data files: ', showProgressBar) log.info('Running Data Processing') from DataProcess.DataProcess import DataProcess dataProcess = DataProcess(configFile, progressbar=pbar) dataProcess.processData() log.info('Completed Data Processing') pbar.update(1.0)
def doDataProcessing(configFile): """ Parse the input data and turn it into the necessary format for the model calibration step, using the :mod:`DataProcess` module. :param str configFile: Name of configuration file. """ config = ConfigParser() config.read(configFile) showProgressBar = config.get('Logging', 'ProgressBar') pbar = ProgressBar('Processing data files: ', showProgressBar) log.info('Running Data Processing') from DataProcess.DataProcess import DataProcess dataProcess = DataProcess(configFile, progressbar=pbar) dataProcess.processData() log.info('Completed Data Processing') pbar.update(1.0)
def loadDatasets(configFile): """ Load the details of the datasets to be downloaded from the configuration settings. This updates the :data:`DATASETS` list. """ config = ConfigParser() config.read(configFile) datasets = config.get('Input', 'Datasets').split(',') global DATASETS for dataset in datasets: url = config.get(dataset, 'URL') path = config.get(dataset, 'path') if config.has_option(dataset, 'filename'): filename = config.get(dataset, 'filename') else: filename = None data = DataSet(dataset, url, path, filename) DATASETS.append(data)
def main(configFile): from Utilities.loadData import loadTrackFile from Utilities.config import ConfigParser from os.path import join as pjoin, normpath, dirname baseDir = normpath(pjoin(dirname(__file__), '..')) inputPath = pjoin(baseDir, 'input') config = ConfigParser() config.read(configFile) inputFile = config.get('DataProcess', 'InputFile') source = config.get('DataProcess', 'Source') gridLimit = config.geteval('Region', 'gridLimit') xx = np.arange(gridLimit['xMin'], gridLimit['xMax'] + .1, 0.1) yy = np.arange(gridLimit['yMin'], gridLimit['yMax'] + .1, 0.1) xgrid, ygrid = np.meshgrid(xx, yy) if len(dirname(inputFile)) == 0: inputFile = pjoin(inputPath, inputFile) try: tracks = loadTrackFile(configFile, inputFile, source) except (TypeError, IOError, ValueError): log.critical("Cannot load historical track file: {0}".format(inputFile)) raise title = source outputPath = config.get('Output', 'Path') outputPath = pjoin(outputPath, 'plots', 'stats') outputFile = pjoin(outputPath, 'tctracks.png') map_kwargs = dict(llcrnrlon=xgrid.min(), llcrnrlat=ygrid.min(), urcrnrlon=xgrid.max(), urcrnrlat=ygrid.max(), projection='merc', resolution='i') figure = TrackMapFigure() figure.add(tracks, xgrid, ygrid, title, map_kwargs) figure.plot() saveFigure(figure, outputFile)
def __init__(self, configFile, progressbar=None): #CalcTD = CalcTrackDomain(configFile) #self.domain = CalcTD.calc() self.configFile = configFile self.progressbar = progressbar self.logger = logging.getLogger(__name__) self.logger.info("Initialising DataProcess") config = ConfigParser() config.read(configFile) self.outputPath = config.get('Output', 'Path') self.processPath = pjoin(self.outputPath, 'process') # Determine TCRM input directory tcrm_dir = pathLocator.getRootDirectory() self.tcrm_input_dir = pjoin(tcrm_dir, 'input') landmask = config.get('Input', 'LandMask') self.landmask = SampleGrid(landmask) fmt = config.get('Output', 'Format') self.ncflag = False if fmt.startswith("nc"): self.logger.debug("Output format is netcdf") self.ncflag = True self.data = {} #dimensions = {records} # variables = {init_index(records), # genesis_index(records), # non_init_index(records), # lon(records), lat(records), # year(records), month(records), # day(records), hour(records), # minute(records), julianday(records), # bearing(records), speed(records), # pressure(records), lsflag(records), } # global_attributes = dict(description= # source_file=, # source_file_moddate, # landmask_file=, # version=,) elif fmt.startswith("txt"): self.logger.debug("Output format is text") self.origin_year = pjoin(self.processPath, 'origin_year')