Exemplo n.º 1
0
Arquivo: tcrm.py Projeto: jmettes/tcrm
def doHazard(configFile):
    """
    Do the hazard calculations (extreme value distribution fitting)
    using the :mod:`hazard` module.

    :param str configFile: Name of configuration file.

    """

    log.info('Running HazardInterface')

    config = ConfigParser()
    config.read(configFile)

    showProgressBar = config.get('Logging', 'ProgressBar')
    pbar = ProgressBar('Performing hazard calculations: ', showProgressBar)

    def status(done, total):
        pbar.update(float(done)/total)

    import hazard
    hazard.run(configFile)

    log.info('Completed HazardInterface')
    pbar.update(1.0)
Exemplo n.º 2
0
    def historic(self):
        """Load historic data and calculate histogram"""
        config = ConfigParser()
        config.read(self.configFile)
        inputFile = config.get('DataProcess', 'InputFile')
        if len(os.path.dirname(inputFile)) == 0:
            inputFile = pjoin(self.inputPath, inputFile)

        source = config.get('DataProcess', 'Source')

        try:
            tracks = loadTrackFile(self.configFile, inputFile,source)

        except (TypeError, IOError, ValueError):
            log.critical("Cannot load historical track file: {0}".format(inputFile))
            raise
        else:
            startYr = 9999
            endYr = 0
            for t in tracks:
                startYr = min(startYr, min(t.Year))
                endYr = max(endYr, max(t.Year))
            numYears = endYr - startYr
            log.info("Range of years: %d - %d" % (startYr, endYr))
            self.hist = self._calculate(tracks) / numYears
Exemplo n.º 3
0
def doOutputDirectoryCreation(configFile):
    """
    Create all the necessary output folders.

    :param str configFile: Name of configuration file.
    :raises OSError: If the directory tree cannot be created.

    """

    config = ConfigParser()
    config.read(configFile)

    outputPath = config.get('Output', 'Path')

    log.info('Output will be stored under %s', outputPath)

    subdirs = ['tracks', 'windfield', 'plots', 'plots/timeseries',
               'log', 'process', 'process/timeseries']

    if not isdir(outputPath):
        try:
            os.makedirs(outputPath)
        except OSError:
            raise
    for subdir in subdirs:
        if not isdir(realpath(pjoin(outputPath, subdir))):
            try:
                os.makedirs(realpath(pjoin(outputPath, subdir)))
            except OSError:
                raise
Exemplo n.º 4
0
def main(configFile):
    """
    Main function to execute the :mod:`wind`.

    :param str configFile: Path to configuration file.

    """
    config = ConfigParser()
    config.read(configFile)
    doOutputDirectoryCreation(configFile)

    trackFile = config.get('DataProcess', 'InputFile')
    source = config.get('DataProcess', 'Source')
    delta = 1/12.
    outputPath = pjoin(config.get('Output','Path'), 'tracks')
    outputTrackFile = pjoin(outputPath, "tracks.interp.csv")

    # This will save interpolated track data in TCRM format:
    interpTrack = interpolateTracks.parseTracks(configFile, trackFile,
                                                source, delta,
                                                outputTrackFile,
                                                interpolation_type='akima')

    showProgressBar = config.get('Logging', 'ProgressBar')

    pbar = ProgressBar('Calculating wind fields: ', showProgressBar)

    def status(done, total):
        pbar.update(float(done)/total)

    import wind
    wind.run(configFile, status)

    doWindfieldPlotting(configFile)
    doTimeseriesPlotting(configFile)
Exemplo n.º 5
0
    def __init__(self, configFile):
        """
        Calculate density of TC positions on a grid

        :param str configFile: path to a TCRM configuration file.
        """

        config = ConfigParser()
        config.read(configFile)
        self.configFile = configFile

        # Define the grid:
        gridLimit = config.geteval('Region', 'gridLimit')
        gridSpace = config.geteval('Region', 'GridSpace')

        self.lon_range = np.arange(gridLimit['xMin'],
                                   gridLimit['xMax'] + 0.1,
                                   gridSpace['x'])
        self.lat_range = np.arange(gridLimit['yMin'],
                                   gridLimit['yMax'] + 0.1,
                                   gridSpace['y'])

        outputPath = config.get('Output', 'Path')
        self.trackPath = pjoin(outputPath, 'tracks')
        self.plotPath = pjoin(outputPath, 'plots', 'stats')
        self.dataPath = pjoin(outputPath, 'process')

        # Determine TCRM input directory
        tcrm_dir = pathLocator.getRootDirectory()
        self.inputPath = pjoin(tcrm_dir, 'input')

        self.synNumYears = config.getint('TrackGenerator',
                                         'yearspersimulation')
Exemplo n.º 6
0
Arquivo: tcrm.py Projeto: squireg/tcrm
def doDataDownload(configFile):
    """
    Check and download the data files.

    :param str configFile: Name of configuration file.
    
    """
    
    log.info('Checking availability of input data sets')

    config = ConfigParser()
    config.read(configFile)

    showProgressBar = config.get('Logging', 'ProgressBar')

    for dataset in datasets.DATASETS:
        if not dataset.isDownloaded():
            log.info('Input file %s is not available', dataset.filename)
            try:
                log.info('Attempting to download %s', dataset.filename)

                pbar = ProgressBar('Downloading file %s: ' % dataset.filename,
                                   showProgressBar)

                def status(fn, done, size):
                    pbar.update(float(done)/size)

                dataset.download(status)
                log.info('Download successful')
            except IOError:
                log.error('Unable to download %s. Maybe a proxy problem?',
                          dataset.filename)
                sys.exit(1)
Exemplo n.º 7
0
def main(configFile):

    config = ConfigParser()
    config.read(configFile)
    doOutputDirectoryCreation(configFile)
    
    trackFile = config.get('DataProcess', 'InputFile') 
    source = config.get('DataProcess', 'Source')
    delta = 1/12.
    outputPath = pjoin(config.get('Output','Path'), 'tracks')
    outputTrackFile = pjoin(outputPath, "tracks.interp.csv")

    # This will save interpolated track data in TCRM format:
    interpTrack = interpolateTracks.parseTracks(configFile, trackFile, source, delta, 
                                                outputTrackFile)
    showProgressBar = config.get('Logging', 'ProgressBar')

    pbar = ProgressBar('Calculating wind fields: ', showProgressBar)

    def status(done, total):
        pbar.update(float(done)/total)

    import wind
    wind.run(configFile, status)

    # FIXME: Add wind field and timeseries plotting
    
    doTimeseriesPlotting(configFile)
Exemplo n.º 8
0
    def historic(self):
        """Load historic data and calculate histogram"""
        config = ConfigParser()
        config.read(self.configFile)
        inputFile = config.get('DataProcess', 'InputFile')
        if len(os.path.dirname(inputFile)) == 0:
            inputFile = pjoin(self.inputPath, inputFile)

        source = config.get('DataProcess', 'Source')

        timestep = config.getfloat('TrackGenerator', 'Timestep')

        interpHistFile = pjoin(self.inputPath, "interp_tracks.csv")
        try:
            tracks = interpolateTracks.parseTracks(self.configFile,
                                                   inputFile,
                                                   source,
                                                   timestep,
                                                   interpHistFile, 'linear')
        except (TypeError, IOError, ValueError):
            log.critical("Cannot load historical track file: {0}".format(inputFile))
            raise
        else:
            startYr = 9999
            endYr = 0
            for t in tracks:
                startYr = min(startYr, min(t.Year))
                endYr = max(endYr, max(t.Year))
            numYears = endYr - startYr
            self.hist = self.calculate(tracks) / numYears
Exemplo n.º 9
0
    def __init__(self, configFile):

        config = ConfigParser()
        config.read(configFile)
        self.configFile = configFile
        
        outputPath = config.get('Output', 'Path')
        self.trackPath = pjoin(outputPath, 'tracks')
        self.plotPath = pjoin(outputPath, 'plots', 'stats')
        self.dataPath = pjoin(outputPath, 'process')

        # Determine TCRM input directory
        tcrm_dir = pathLocator.getRootDirectory()
        self.inputPath = pjoin(tcrm_dir, 'input')

        self.synNumYears = config.getint('TrackGenerator',
                                         'yearspersimulation')

        try:
            gateFile = config.get('Input', 'CoastlineGates')
        except NoOptionError:
            log.exception(("No coastline gate file specified "
                          "in configuration file"))
            raise
        
        gateData = np.genfromtxt(gateFile, delimiter=',')
        nGates = len(gateData)
        self.gates = Int.convert2vertex(gateData[:, 1], gateData[:, 2])
        self.coast = list(self.gates)
        self.coast.append(self.gates[0])
Exemplo n.º 10
0
Arquivo: tcrm.py Projeto: squireg/tcrm
def doTrackGeneration(configFile):
    """
    Do the tropical cyclone track generation.

    The track generation settings are read from *configFile*.
    
    :param str configFile: Name of configuration file.
    
    """

    log.info('Starting track generation')

    config = ConfigParser()
    config.read(configFile)

    showProgressBar = config.get('Logging', 'ProgressBar')

    pbar = ProgressBar('Simulating cyclone tracks: ', showProgressBar)

    def status(done, total):
        pbar.update(float(done)/total)

    import TrackGenerator
    TrackGenerator.run(configFile, status)

    pbar.update(1.0)
    log.info('Completed track generation')
Exemplo n.º 11
0
    def historic(self):
        """Calculate historical rates of longitude crossing"""

        log.info("Processing historical tracks for longitude crossings")
        config = ConfigParser()
        config.read(self.configFile)
        inputFile = config.get("DataProcess", "InputFile")
        source = config.get("DataProcess", "Source")

        timestep = config.getfloat("TrackGenerator", "Timestep")

        if len(os.path.dirname(inputFile)) == 0:
            inputFile = pjoin(self.inputPath, inputFile)

        try:
            tracks = interpolateTracks.parseTracks(
                self.configFile, inputFile, source, timestep, interpolation_type="linear"
            )
        except (TypeError, IOError, ValueError):
            log.critical("Cannot load historical track file: {0}".format(inputFile))
            raise
        else:
            self.lonCrossingHist, self.lonCrossingEWHist, self.lonCrossingWEHist = self.findCrossings(tracks)

        return
Exemplo n.º 12
0
def colReadCSV(configFile, dataFile, source):
    """
    Loads a csv file containing 'column' data into a record (numpy)
    array with columns labelled by 'fields'. There must be a section in
    the ``configFile`` named ``source`` that sets out the format of the
    data file.

    :param str configFile: Path to a configuration file that holds details
                           of the input data.
    :param str dataFile: Path to the input file to load.
    :param str source: Name of the source format. There must be a
                       corresponding section in the ``configFile``.

    :returns: A :class:`numpy.ndarray` that contains the input data.
    
    """
    config = ConfigParser()
    config.read(configFile)
    delimiter = config.get(source, 'FieldDelimiter')
    numHeadingLines = config.getint(source, 'NumberOfHeadingLines')
    cols = config.get(source, 'Columns').split(delimiter)

    usecols = [i for i,c in enumerate(cols) if c != 'skip']

    data = np.genfromtxt(dataFile, dtype=None, delimiter=delimiter,
            usecols=usecols, comments=None, skip_header=numHeadingLines, 
            autostrip=True)

    data.dtype.names = [c for c in cols if c != 'skip']

    return data
Exemplo n.º 13
0
Arquivo: tcrm.py Projeto: squireg/tcrm
def doWindfieldCalculations(configFile):
    """
    Do the wind field calculations. The wind field settings are read
    from *configFile*.

    :param str configFile: Name of configuration file.

    """

    log.info('Starting wind field calculations')

    config = ConfigParser()
    config.read(configFile)

    showProgressBar = config.get('Logging', 'ProgressBar')

    pbar = ProgressBar('Calculating wind fields: ', showProgressBar)

    def status(done, total):
        pbar.update(float(done)/total)

    import wind
    wind.run(configFile, status)

    pbar.update(1.0)
    log.info('Completed wind field calculations')
Exemplo n.º 14
0
Arquivo: tcrm.py Projeto: squireg/tcrm
def doHazardPlotting(configFile):
    """
    Do the hazard plots.

    :param str configFile: Name of configuration file.
    
    """
    
    import matplotlib
    matplotlib.use('Agg')  # Use matplotlib backend

    config = ConfigParser()
    config.read(configFile)

    log.info('Plotting Hazard Maps')

    showProgressBar = config.get('Logging', 'ProgressBar')
    pbar = ProgressBar('Plotting hazard maps: ', showProgressBar)
    pbar.update(0.0)

    from PlotInterface.AutoPlotHazard import AutoPlotHazard
    plotter = AutoPlotHazard(configFile, progressbar=pbar)
    plotter.plotMap()
    plotter.plotCurves()

    pbar.update(1.0)
Exemplo n.º 15
0
    def __init__(self, configFile, kdeType, gridLimit, kdeStep, lonLat=None, progressbar=None):
        """
        
        """
        self.logger = logging.getLogger()
        self.progressbar = progressbar
        if self.progressbar:
            KPDF.set_callback(self.updateProgressBar)
        self.logger.info("Initialising KDEOrigins")
        self.configFile = configFile
        self.x = numpy.arange(gridLimit['xMin'], gridLimit['xMax'], kdeStep)
        self.y = numpy.arange(gridLimit['yMax'], gridLimit['yMin'], -kdeStep)

        self.kdeType = kdeType
        self.kdeStep = kdeStep

        config = ConfigParser()
        config.read(configFile)

        if lonLat is None:
            self.outputPath = config.get('Output', 'Path')
            self.processPath = os.path.join(self.outputPath, 'process')
            self.logger.debug("Loading "+os.path.join(self.processPath,
                                                  'init_lon_lat'))
            ll = flLoadFile(os.path.join(self.processPath, 'init_lon_lat'),
                            '%', ',')
            self.lonLat = ll[:,0:2]
        else:
            self.lonLat = lonLat[:,0:2]

        self.bw = KPDF.MPDFOptimumBandwidth(self.lonLat)
        self.logger.debug("Optimal bandwidth: %f"%self.bw)
Exemplo n.º 16
0
Arquivo: tcrm.py Projeto: jmettes/tcrm
def doHazardPlotting(configFile):
    """
    Do the hazard plots (hazard maps and curves for all locations within
    the model domain). Plotting is performed by the
    :mod:`PlotInterface.AutoPlotHazard` module.

    :param str configFile: Name of configuration file.

    """

    import matplotlib
    matplotlib.use('Agg')  # Use matplotlib backend

    config = ConfigParser()
    config.read(configFile)

    log.info('Plotting Hazard Maps')

    showProgressBar = config.get('Logging', 'ProgressBar')
    pbar = ProgressBar('Plotting hazard maps: ', showProgressBar)
    pbar.update(0.0)

    from PlotInterface.AutoPlotHazard import AutoPlotHazard
    plotter = AutoPlotHazard(configFile, progressbar=pbar)
    plotter.plotMap()
    plotter.plotCurves()

    pbar.update(1.0)
Exemplo n.º 17
0
def startup():
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--config_file', 
                        help='Path to configuration file')
    parser.add_argument('-v', '--verbose', help='Verbose output', 
                        action='store_true')
    parser.add_argument('-d', '--debug', help='Allow pdb traces',
                        action='store_true')
    args = parser.parse_args()

    configFile = args.config_file
    config = ConfigParser()
    config.read(configFile)

    rootdir = pathLocator.getRootDirectory()
    os.chdir(rootdir)

    logfile = config.get('Logging','LogFile')
    logdir = dirname(realpath(logfile))

    # If log file directory does not exist, create it
    if not isdir(logdir):
        try:
            os.makedirs(logdir)
        except OSError:
            logfile = pjoin(os.getcwd(), 'tcrm.log')

    logLevel = config.get('Logging', 'LogLevel')
    verbose = config.getboolean('Logging', 'Verbose')
    debug = False

    if args.verbose:
        verbose = True

    if args.debug:
        debug = True

    flStartLog(logfile, logLevel, verbose)
    # Switch off minor warning messages
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)
    warnings.filterwarnings("ignore", category=UserWarning, module="pytz")
    warnings.filterwarnings("ignore", category=UserWarning, module="numpy")
    warnings.filterwarnings("ignore", category=UserWarning,
                            module="matplotlib")
    
    warnings.filterwarnings("ignore", category=RuntimeWarning)
    
    if debug:
        main(configFile)
    else:
        try:
            main(configFile)
        except Exception:  # pylint: disable=W0703
            # Catch any exceptions that occur and log them (nicely):
            tblines = traceback.format_exc().splitlines()
            for line in tblines:
                log.critical(line.lstrip())
Exemplo n.º 18
0
    def __init__(self, configFile):
        """
        :param str configFile: Path to configuration file.
        """
        config = ConfigParser()
        config.read(configFile)

        self.outputPath = config.get('Output', 'Path')
        self.wf_domain = config.geteval('Region', 'gridLimit')
Exemplo n.º 19
0
    def __init__(self, configFile, tilegrid, numSim, minRecords, yrsPerSim,
                 calcCI=False):
        """
        Initialise HazardCalculator object.

        :param str configFile: path to TCRM configuration file.
        :param tilegrid: :class:`TileGrid` instance
        :param int numSim: number of simulations created.
        :param int minRecords: minimum number of valid wind speed values required
                               to do fitting.
        :param int yrsPerSim:
        """
        config = ConfigParser()
        config.read(configFile)

        self.nodata = -9999.
        self.years = np.array(config.get('Hazard',
                                         'Years').split(',')).astype('f')
        self.outputPath = pjoin(config.get('Output', 'Path'), 'hazard')
        self.inputPath = pjoin(config.get('Output', 'Path'), 'windfield')
        gridLimit = config.geteval('Region', 'gridLimit')

        self.numSim = numSim
        self.minRecords = minRecords
        self.yrsPerSim = yrsPerSim
        self.calcCI = calcCI
        if self.calcCI:
            log.debug("Bootstrap confidence intervals will be calculated")
            self.sample_size = config.getint('Hazard', 'SampleSize')
            self.prange = config.getint('Hazard', 'PercentileRange')

        self.tilegrid = tilegrid
        lon, lat = self.tilegrid.getDomainExtent()

        # Create arrays for storing output data:
        self.loc = np.zeros((len(lat), len(lon)), dtype='f')
        self.shp = np.zeros((len(lat), len(lon)), dtype='f')
        self.scale = np.zeros((len(lat), len(lon)), dtype='f')
        self.Rp = np.zeros((len(self.years), len(lat), len(lon)), dtype='f')

        self.RPupper = np.zeros((len(self.years), len(lat), len(lon)), dtype='f')
        self.RPlower = np.zeros((len(self.years), len(lat), len(lon)), dtype='f')

        self.global_atts = {'history': ('TCRM hazard simulation - '
                            'return period wind speeds'),
                            'version': flProgramVersion(),
                            'Python_ver': sys.version}


        # Add configuration settings to global attributes:
        for section in config.sections():
            for option in config.options(section):
                key = "{0}_{1}".format(section, option)
                value = config.get(section, option)
                self.global_atts[key] = value
Exemplo n.º 20
0
def startup():
    """
    Parse command line arguments and call the :func:`main` function.

    """
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--config_file',
                        help='Path to configuration file')
    parser.add_argument('-v', '--verbose', help='Verbose output',
                        action='store_true')
    parser.add_argument('-d', '--debug', help='Allow pdb traces',
                        action='store_true')
    args = parser.parse_args()
    config_file = args.config_file
    config = ConfigParser()
    config.read(config_file)

    rootdir = pathLocator.getRootDirectory()
    os.chdir(rootdir)

    logfile = config.get('Logging', 'LogFile')
    logdir = dirname(realpath(logfile))
    # If log file directory does not exist, create it
    if not isdir(logdir):
        try:
            os.makedirs(logdir)
        except OSError:
            logfile = pjoin(os.getcwd(), 'tsmultipliers.log')

    logLevel = config.get('Logging', 'LogLevel')
    verbose = config.getboolean('Logging', 'Verbose')
    datestamp = config.getboolean('Logging', 'Datestamp')
    debug = False

    if args.verbose:
        verbose = True

    if args.debug:
        debug = True

    flStartLog(logfile, logLevel, verbose, datestamp)
    
    if debug:
        process_timeseries(config_file)
    else:
        try:
            process_timeseries(config_file)
        except Exception:  # pylint: disable=W0703
            # Catch any exceptions that occur and log them (nicely):
            tblines = traceback.format_exc().splitlines()
            for line in tblines:
                log.critical(line.lstrip())
    
    log.info("Completed {0}".format(sys.argv[0]))
Exemplo n.º 21
0
def run(configFile, callback=None):
    """
    Run the hazard calculations.

    This will attempt to run the calculation in parallel by tiling the
    domain, but also provides a sane fallback mechanism to execute
    in serial.

    :param configFile: str

    """

    log.info("Loading hazard calculation settings")

    config = ConfigParser()
    config.read(configFile)

    outputPath = config.get('Output', 'Path')
    inputPath = pjoin(outputPath, 'windfield')
    gridLimit = config.geteval('Region', 'gridLimit')
    numsimulations = config.getint('TrackGenerator', 'NumSimulations')
    yrsPerSim = config.getint('TrackGenerator', 'YearsPerSimulation')
    minRecords = config.getint('Hazard', 'MinimumRecords')
    calculate_confidence = config.getboolean('Hazard', 'CalculateCI')

    wf_lon, wf_lat = setDomain(inputPath)

    global pp
    pp = attemptParallel()

    log.info("Running hazard calculations")
    TG = TileGrid(gridLimit, wf_lon, wf_lat)
    tiles = getTiles(TG)

    #def progress(i):
    #    callback(i, len(tiles))

    pp.barrier()
    hc = HazardCalculator(configFile, TG,
                          numsimulations,
                          minRecords,
                          yrsPerSim,
                          calculate_confidence)




    hc.dumpHazardFromTiles(tiles)

    pp.barrier()

    hc.saveHazard()

    log.info("Completed hazard calculation")
Exemplo n.º 22
0
def doDataDownload(configFile):
    """
    Check and download the data files listed in the configuration file.
    Datasets are listed in the `Input` section of the configuration
    file, with the option `Datasets`. There must also be a corresponding
    section in the configuration file that inlcudes the url, path where
    the dataset will be stored and the filename that will be stored, e.g.::

        [Input]
        Datasets=IBTRACS

        [IBTRACS]
        URL=ftp://eclipse.ncdc.noaa.gov/pub/ibtracs/v03r05/wmo/csv/Allstorms.ibtracs_wmo.v03r05.csv.gz
        filename=Allstorms.ibtracs_wmo.v03r05.csv
        path=input

    This will attempt to download the gzipped csv file from the given URL
    and save it to the given filename, in the 'input' folder under the current
    directory. Gzipped files are automatically unzipped. 

    
    :param str configFile: Name of configuration file.
    :raises IOError: If the data cannot be downloaded.
    

    """

    log.info('Checking availability of input data sets')

    config = ConfigParser()
    config.read(configFile)

    showProgressBar = config.get('Logging', 'ProgressBar')

    datasets.loadDatasets(configFile)
    for dataset in datasets.DATASETS:
        if not dataset.isDownloaded():
            log.info('Input file %s is not available', dataset.filename)
            try:
                log.info('Attempting to download %s', dataset.filename)

                pbar = ProgressBar('Downloading file %s: ' % dataset.filename,
                                   showProgressBar)

                def status(fn, done, size):
                    pbar.update(float(done)/size)

                dataset.download(status)
                log.info('Download successful')
            except IOError:
                log.error('Unable to download %s. Maybe a proxy problem?',
                          dataset.filename)
                sys.exit(1)
Exemplo n.º 23
0
def doTimeseriesPlotting(configFile):
    """
    Run functions to plot time series output
    """
    config = ConfigParser()
    config.read(configFile)

    outputPath = config.get('Output', 'Path')
    timeseriesPath = pjoin(outputPath, 'process', 'timeseries')
    plotPath = pjoin(outputPath, 'plots', 'timeseries')
    log.info("Plotting time series data to %s" % plotPath)
    from PlotInterface.plotTimeseries import plotTimeseries
    plotTimeseries(timeseriesPath, plotPath)
Exemplo n.º 24
0
def doWindfieldPlotting(configFile):
    """
    Plot the wind field on a map.

    :param str configFile: Path to the configuration file.
    
    :Note: the file name is assumed to be 'gust.interp.nc'

    """
    from netCDF4 import Dataset
    import numpy as np
    config = ConfigParser()
    config.read(configFile)

    outputPath = config.get('Output', 'Path')
    windfieldPath = pjoin(outputPath, 'windfield')

    # Note the assumption about the file name!
    outputWindFile = pjoin(windfieldPath, 'gust.interp.nc')
    plotPath = pjoin(outputPath, 'plots', 'maxwind.png')

    f = Dataset(outputWindFile, 'r')

    xdata = f.variables['lon'][:]
    ydata = f.variables['lat'][:]

    vdata = f.variables['vmax'][:]
    
    gridLimit = None
    if config.has_option('Region','gridLimit'):
        gridLimit = config.geteval('Region', 'gridLimit')
        ii = np.where((xdata >= gridLimit['xMin']) &
                      (xdata <= gridLimit['xMax']))
        jj = np.where((ydata >= gridLimit['yMin']) &
                      (ydata <= gridLimit['yMax']))
        [xgrid, ygrid] = np.meshgrid(xdata[ii], ydata[jj])
        ig, jg = np.meshgrid(ii, jj)
        vdata = vdata[jg, ig]
    else:
        [xgrid, ygrid] = np.meshgrid(xdata, ydata)
    map_kwargs = dict(llcrnrlon=xgrid.min(),
                      llcrnrlat=ygrid.min(),
                      urcrnrlon=xgrid.max(),
                      urcrnrlat=ygrid.max(),
                      projection='merc',
                      resolution='i')
    title = "Maximum wind speed"
    cbarlabel = "Wind speed ({0})".format(f.variables['vmax'].units)
    levels = np.arange(30, 101., 5.)
    saveWindfieldMap(vdata, xgrid, ygrid, title, levels, 
                     cbarlabel, map_kwargs, plotPath)
Exemplo n.º 25
0
    def __init__(self, configFile, progressbar=None):
        """
        Initialize the data include tool instance, Nan value and all
        full path names of the files in which data will be stored.
        """
        #CalcTD = CalcTrackDomain(configFile)
        #self.domain = CalcTD.calc()
        self.configFile = configFile
        self.progressbar = progressbar
        self.logger = logging.getLogger(__name__)
        self.logger.info("Initialising DataProcess")

        config = ConfigParser()
        config.read(configFile)

        self.outputPath = config.get('Output', 'Path')
        self.processPath = pjoin(self.outputPath, 'process')

        # Determine TCRM input directory
        tcrm_dir = pathLocator.getRootDirectory()
        self.tcrm_input_dir = pjoin(tcrm_dir, 'input')

        landmask = config.get('Input', 'LandMask')

        self.landmask = SampleGrid(landmask)

        fmt = config.get('Output', 'Format')

        self.ncflag = False
        if fmt.startswith("nc"):
            self.logger.debug("Output format is netcdf")
            self.ncflag = True
            self.data = {}
            #dimensions = {records}
            # variables = {init_index(records),
            #             genesis_index(records),
            #             non_init_index(records),
            #             lon(records), lat(records),
            #             year(records), month(records),
            #             day(records), hour(records),
            #             minute(records), julianday(records),
            #             bearing(records), speed(records),
            #             pressure(records), lsflag(records), }
            # global_attributes = dict(description=
            #                         source_file=,
            #                         source_file_moddate,
            #                         landmask_file=,
            #                         version=,)
        elif fmt.startswith("txt"):
            self.logger.debug("Output format is text")
            self.origin_year = pjoin(self.processPath, 'origin_year')
Exemplo n.º 26
0
    def __init__(self, configFile):
        """
        Read configuration settings, load station data and set up
        output recarrays.
        
        :param str configFile: path to a configuration file.
        """

        config = ConfigParser()
        config.read(configFile)

        self.meta = False

        stnFile = config.get('Timeseries', 'StationFile')
        self.outputPath = pjoin(config.get('Output', 'Path'), 
                                    'process', 'timeseries')

        self.maxfile = pjoin(config.get('Output', 'Path'), 
                                    'process', 'maxima.csv')
        self.minfile = pjoin(config.get('Output', 'Path'), 
                                    'process', 'minima.csv')


        log.debug("Loading stations from %s"%stnFile)
        log.debug("Timeseries data will be written into %s"%self.outputPath)
        self.stations = []
        if stnFile.endswith("shp"):
            try:
                key_name = config.get('Timeseries', 'StationID')
            except NoOptionError:
                key_name = None
                
            vertices = shpGetVertices(stnFile, key_name=key_name)

            for stn in vertices.keys():
                self.stations.append(Station(stn, vertices[stn][0][0], 
                                                  vertices[stn][0][1]))

        
        else:
            stndata = flLoadFile(stnFile, delimiter=',')
            # If there are more than 3 columns, save the additional 
            # columns as 'metadata'
            if stndata.shape[1] > 3:
                self.metadata = stndata[:, 3:]
                self.meta = True
            stnid = stndata[:, 0]
            stnlon = stndata[:, 1].astype(float)
            stnlat = stndata[:, 2].astype(float)
            for sid, lon, lat in zip(stnid, stnlon, stnlat):
                self.stations.append(Station(sid, lon, lat))
Exemplo n.º 27
0
    def __init__(self, configFile, autoCalc_gridLimit=None,
                 progressbar=None):
        """
        Initialize the data and variables required for the interface
        """
        self.configFile = configFile
        config = ConfigParser()
        config.read(configFile)
        self.progressbar = progressbar

        log.info("Initialising StatInterface")

        self.kdeType = config.get('StatInterface', 'kdeType')
        self.kde2DType = config.get('StatInterface','kde2DType')
        minSamplesCell = config.getint('StatInterface', 'minSamplesCell')
        self.kdeStep = config.getfloat('StatInterface', 'kdeStep')
        self.outputPath = config.get('Output', 'Path')
        self.processPath = pjoin(self.outputPath, 'process')

        missingValue = cnfGetIniValue(self.configFile, 'StatInterface',
                                      'MissingValue', sys.maxint)

        gridLimitStr = cnfGetIniValue(self.configFile, 'StatInterface',
                                      'gridLimit', '')

        if gridLimitStr is not '':
            try:
                self.gridLimit = eval(gridLimitStr)
            except SyntaxError:
                log.exception('Error! gridLimit is not a dictionary')
        else:
            self.gridLimit = autoCalc_gridLimit
            log.info('No gridLimit specified - using automatic' +
                     ' selection: ' + str(self.gridLimit))

        try:
            gridSpace = config.geteval('Region', 'gridSpace')
            gridInc = config.geteval('Region', 'gridInc')
        except SyntaxError:
            log.exception('Error! gridSpace or gridInc not dictionaries')
            raise

        self.generateDist = GenerateDistributions(self.configFile,
                                                  self.gridLimit,
                                                  gridSpace, gridInc,
                                                  self.kdeType,
                                                  minSamplesCell,
                                                  missingValue)
        self.gridSpace = gridSpace
        self.gridInc = gridInc
Exemplo n.º 28
0
Arquivo: tcrm.py Projeto: squireg/tcrm
def doStatistics(configFile):
    """
    Calibrate the model.

    :param str configFile: Name of configuration file.
    
    """
    from DataProcess.CalcTrackDomain import CalcTrackDomain

    config = ConfigParser()
    config.read(configFile)

    showProgressBar = config.get('Logging', 'ProgressBar')
    getRMWDistFromInputData = config.getboolean('RMW',
                                                'GetRMWDistFromInputData')

    log.info('Running StatInterface')
    pbar = ProgressBar('Calibrating model: ', showProgressBar)
    
    # Auto-calculate track generator domain
    CalcTD = CalcTrackDomain(configFile)
    domain = CalcTD.calcDomainFromFile()

    pbar.update(0.05)

    from StatInterface import StatInterface
    statInterface = StatInterface.StatInterface(configFile,
                                                autoCalc_gridLimit=domain)
    statInterface.kdeGenesisDate()
    pbar.update(0.4)

    statInterface.kdeOrigin()
    pbar.update(0.5)

    statInterface.cdfCellBearing()
    pbar.update(0.6)

    statInterface.cdfCellSpeed()
    pbar.update(0.7)

    statInterface.cdfCellPressure()
    pbar.update(0.8)

    statInterface.calcCellStatistics()

    if getRMWDistFromInputData:
        statInterface.cdfCellSize()

    pbar.update(1.0)
    log.info('Completed StatInterface')
Exemplo n.º 29
0
    def __init__(self, configFile, dt):
        """
        Initialise required fields
        """
        self.configFile = configFile

        config = ConfigParser()
        config.read(configFile)

        landMaskFile = config.get('Input', 'LandMask')

        self.landMask = SampleGrid(landMaskFile)
        self.tol = 0 # Time over land
        self.dt = dt
Exemplo n.º 30
0
    def __init__(self, configFile):
        """
        Calculate density of TC genesis positions on a grid

        :param str configFile: path to a TCRM configuration file.
        """

        config = ConfigParser()
        config.read(configFile)
        self.configFile = configFile

        # Define the grid:
        gridLimit = config.geteval('Region', 'gridLimit')
        gridSpace = config.geteval('Region', 'GridSpace')

        self.lon_range = np.arange(gridLimit['xMin'],
                                   gridLimit['xMax'] + 0.1,
                                   0.1)
        self.lat_range = np.arange(gridLimit['yMin'],
                                   gridLimit['yMax'] + 0.1,
                                   0.1)

        self.X, self.Y = np.meshgrid(self.lon_range, self.lat_range)

        outputPath = config.get('Output', 'Path')
        self.trackPath = pjoin(outputPath, 'tracks')
        self.plotPath = pjoin(outputPath, 'plots', 'stats')
        self.dataPath = pjoin(outputPath, 'process')

        # Determine TCRM input directory
        tcrm_dir = pathLocator.getRootDirectory()
        self.inputPath = pjoin(tcrm_dir, 'input')

        self.synNumYears = config.getint('TrackGenerator',
                                         'yearspersimulation')

        cellnumber = 0
        self.gridCells = []
        for k in xrange(len(self.lon_range) - 1):
            for l in xrange(len(self.lat_range) - 1):
                ymin = self.lat_range[l]
                ymax = self.lat_range[l] + gridSpace['y']
                xmin = self.lon_range[k]
                xmax = self.lon_range[k] + gridSpace['x']
                self.gridCells.append(gridCell(xmin, ymin, xmax, ymax,
                                               cellnumber, (k, l)))
                cellnumber += 1
Exemplo n.º 31
0
    def __init__(self, configFile, progressbar=None):

        config = ConfigParser()
        config.read(configFile)

        outputPath = config.get('Output', 'Path')

        try:
            self.localityID = config.get('Region', 'LocalityID')
        except Exception:
            self.localityID = -999999

        self.inputFile = pjoin(outputPath, 'hazard', 'hazard.nc')
        self.plotPath = pjoin(outputPath, 'plots', 'hazard')
        self.plotUnits = PlotUnits(config.get('Hazard', 'PlotSpeedUnits'))
        self.ciBounds = config.getboolean('Hazard', 'CalculateCI')
        self.fit = config.get('Hazard', 'ExtremeValueDistribution')
        self.numsimulations = config.getint("TrackGenerator", "NumSimulations")

        self.progressbar = progressbar

        self.db = database.HazardDatabase(configFile)
Exemplo n.º 32
0
def run(configFile):
    """
    Run database update

    :param str configFile: path to a configuration file.

    """

    log.info('Running database update')

    config = ConfigParser()
    config.read(configFile)
    outputPath = config.get('Output', 'Path')
    location_db = pjoin(outputPath, 'locations.db')
    if not os.path.exists(location_db):
        location_file = config.get('Input', 'LocationFile')
        buildLocationDatabase(location_db, location_file)

    global pp
    pp = attemptParallel()

    db = HazardDatabase(configFile)

    db.createDatabase()
    db.setLocations()

    pp.barrier()
    db.processEvents()
    pp.barrier()

    db.processHazard()

    pp.barrier()
    db.processTracks()
    pp.barrier()

    db.close()
    log.info("Created and populated database")
    log.info("Finished running database creation")
Exemplo n.º 33
0
    def historic(self):
        """Load historic data and calculate histogram"""
        log.info("Processing historical pressure distributions")
        config = ConfigParser()
        config.read(self.configFile)
        inputFile = config.get('DataProcess', 'InputFile')
        source = config.get('DataProcess', 'Source')

        if len(os.path.dirname(inputFile)) == 0:
            inputFile = pjoin(self.inputPath, inputFile)

        try:
            tracks = loadTrackFile(self.configFile, inputFile, source)
        except (TypeError, IOError, ValueError):
            log.critical(
                "Cannot load historical track file: {0}".format(inputFile))
            raise
        else:
            self.histMean, self.histMin, \
                self.histMax, self.histMed = self.calculate(tracks)

            self.histMinCPDist, self.histMinCP = self.calcMinPressure(tracks)
Exemplo n.º 34
0
def loadDatasets(configFile):
    """
    Load the details of the datasets to be downloaded from the
    configuration settings. This updates the :data:`DATASETS`
    list.

    """

    config = ConfigParser()
    config.read(configFile)
    datasets = config.get('Input', 'Datasets').split(',')

    global DATASETS
    for dataset in datasets:
        url = config.get(dataset, 'URL')
        path = config.get(dataset, 'path')
        if config.has_option(dataset, 'filename'):
            filename = config.get(dataset, 'filename')
        else:
            filename = None
        data = DataSet(dataset, url, path, filename)
        DATASETS.append(data)
Exemplo n.º 35
0
def main(configFile):
    """
    Main function to execute the :mod:`wind`.

    :param str configFile: Path to configuration file.

    """
    config = ConfigParser()
    config.read(configFile)
    doOutputDirectoryCreation(configFile)

    trackFile = config.get('DataProcess', 'InputFile')
    source = config.get('DataProcess', 'Source')
    delta = 1 / 12.
    outputPath = pjoin(config.get('Output', 'Path'), 'tracks')
    outputTrackFile = pjoin(outputPath, "tracks.interp.nc")

    # This will save interpolated track data in TCRM format:
    interpTrack = interpolateTracks.parseTracks(configFile,
                                                trackFile,
                                                source,
                                                delta,
                                                outputTrackFile,
                                                interpolation_type='akima')

    showProgressBar = config.get('Logging', 'ProgressBar')

    pbar = ProgressBar('Calculating wind fields: ', showProgressBar)

    def status(done, total):
        pbar.update(float(done) / total)

    import wind
    wind.run(configFile, status)

    doWindfieldPlotting(configFile)
    if config.getboolean('Timeseries', 'Extract'):
        doTimeseriesPlotting(configFile)
Exemplo n.º 36
0
def colReadCSV(configFile, dataFile, source):
    """
    Loads a csv file containing 'column' data into a record (numpy)
    array with columns labelled by 'fields'. There must be a section in
    the ``configFile`` named ``source`` that sets out the format of the
    data file.

    :param str configFile: Path to a configuration file that holds details
                           of the input data.
    :param str dataFile: Path to the input file to load.
    :param str source: Name of the source format. There must be a
                       corresponding section in the ``configFile``.

    :returns: A :class:`numpy.ndarray` that contains the input data.

    """
    config = ConfigParser()
    config.read(configFile)
    delimiter = config.get(source, 'FieldDelimiter')
    numHeadingLines = config.getint(source, 'NumberOfHeadingLines')
    cols = config.get(source, 'Columns').split(delimiter)

    usecols = [i for i, c in enumerate(cols) if c != 'skip']
    names = [c for c in cols if c != 'skip']
    try:
        data = np.genfromtxt(dataFile, dtype=None, delimiter=delimiter,
                             usecols=usecols, names=names, skip_header=numHeadingLines,
                             autostrip=True, encoding=None)
    except IOError:
        log.exception("File not found: {0}".format(dataFile))
        raise IOError("File not found: {0}".format(dataFile))
    except TypeError:
        log.exception(("{0} is not a string, filehandle "
                       "or generator.").format(dataFile))
        raise TypeError(("{0} is not a string, filehandle "
                         "or generator.").format(dataFile))

    return data
Exemplo n.º 37
0
    def __init__(self, configFile, auto_calc_grid_limit):
        """
        :type  configFile: string
        :param configFile: Configuration file name

        :type  auto_calc_grid_limit: :class:`dict`
        :param auto_calc_grid_limit: the domain where the frequency will be calculated.
                                     The :class:`dict` should contain the keys
                                     :attr:`xMin`, :attr:`xMax`, :attr:`yMin`
                                     and :attr:`yMax`. The *x*  variable bounds the
                                     longitude and the *y* variable bounds
                                     the latitude.
        """

        config = ConfigParser()
        config.read(configFile)

        if config.has_option('TrackGenerator', 'gridLimit'):
            self.tg_domain = config.geteval('TrackGenerator', 'gridLimit')
        else:
            self.tg_domain = auto_calc_grid_limit

        self.outputPath = config.get('Output', 'Path')
Exemplo n.º 38
0
    def historic(self):
        """Calculate historical rates of landfall"""

        LOG.info("Processing landfall rates of historical tracks")
        config = ConfigParser()
        config.read(self.configFile)
        inputFile = config.get('DataProcess', 'InputFile')
        source = config.get('DataProcess', 'Source')

        if len(os.path.dirname(inputFile)) == 0:
            inputFile = pjoin(self.inputPath, inputFile)

        try:
            tracks = loadTrackFile(self.configFile, inputFile, source)
        except (TypeError, IOError, ValueError):
            LOG.critical("Cannot load historical track file: {0}".\
                         format(inputFile))
            raise
        else:
            self.historicLandfall, self.historicOffshore = \
                                        self.processTracks(tracks)

        return
Exemplo n.º 39
0
def doDataProcessing(configFile):
    """
    Parse the input data and turn it into the necessary format
    for the model calibration step, using the :mod:`DataProcess` module.

    :param str configFile: Name of configuration file.

    """

    config = ConfigParser()
    config.read(configFile)

    showProgressBar = config.get('Logging', 'ProgressBar')

    pbar = ProgressBar('Processing data files: ', showProgressBar)

    log.info('Running Data Processing')

    from DataProcess.DataProcess import DataProcess
    dataProcess = DataProcess(configFile, progressbar=pbar)
    dataProcess.processData()

    log.info('Completed Data Processing')
    pbar.update(1.0)
Exemplo n.º 40
0
    def processData(self, restrictToWindfieldDomain=False):
        """
        Process raw data into ASCII files that can be read by the main
        components of the system

        :param bool restrictToWindfieldDomain: if True, only process data
            within the wind field domain, otherwise, process data from
            across the track generation domain.

        """
        config = ConfigParser()
        config.read(self.configFile)

        self.logger.info("Running {0}".format(flModuleName()))

        if config.has_option('DataProcess', 'InputFile'):
            inputFile = config.get('DataProcess', 'InputFile')

        if config.has_option('DataProcess', 'Source'):
            source = config.get('DataProcess', 'Source')
            self.logger.info('Loading %s dataset', source)
            fn = config.get(source, 'filename')
            path = config.get(source, 'path')
            inputFile = pjoin(path, fn)

        # If input file has no path information, default to tcrm input folder
        if len(os.path.dirname(inputFile)) == 0:
            inputFile = pjoin(self.tcrm_input_dir, inputFile)

        self.logger.info("Processing {0}".format(inputFile))

        self.source = config.get('DataProcess', 'Source')

        inputData = colReadCSV(self.configFile, inputFile, self.source)

        inputSpeedUnits = config.get(self.source, 'SpeedUnits')
        inputPressureUnits = config.get(self.source, 'PressureUnits')
        inputLengthUnits = config.get(self.source, 'LengthUnits')
        startSeason = config.getint('DataProcess', 'StartSeason')

        indicator = loadData.getInitialPositions(inputData)
        lat = np.array(inputData['lat'], 'd')
        lon = np.mod(np.array(inputData['lon'], 'd'), 360)

        if restrictToWindfieldDomain:
            # Filter the input arrays to only retain the tracks that
            # pass through the windfield domain.
            CD = CalcTrackDomain(self.configFile)
            self.domain = CD.calcDomainFromTracks(indicator, lon, lat)
            domainIndex = self.extractTracks(indicator, lon, lat)
            inputData = inputData[domainIndex]
            indicator = indicator[domainIndex]
            lon = lon[domainIndex]
            lat = lat[domainIndex]

        if self.progressbar is not None:
            self.progressbar.update(0.125)

        # Sort date/time information
        try:
            dt = np.empty(indicator.size, 'f')
            dt[1:] = np.diff(inputData['age'])
        except (ValueError, KeyError):

            try:
                self.logger.info(("Filtering input data by season:"
                                  "season > {0}".format(startSeason)))
                # Find indicies that satisfy minimum season filter
                idx = np.where(inputData['season'] >= startSeason)[0]
                # Filter records:
                inputData = inputData[idx]
                indicator = indicator[idx]
                lon = lon[idx]
                lat = lat[idx]
            except (ValueError, KeyError):
                pass

            year, month, day, hour, minute, datetimes \
                = loadData.parseDates(inputData, indicator)

            # Time between observations:
            dt = loadData.getTimeDelta(year, month, day, hour, minute)

            # Calculate julian days:
            jdays = loadData.julianDays(year, month, day, hour, minute)

        delta_lon = np.diff(lon)
        delta_lat = np.diff(lat)

        # Split into separate tracks if large jump occurs (delta_lon >
        # 15 degrees or delta_lat > 5 degrees) This avoids two tracks
        # being accidentally combined when seasons and track numbers
        # match but basins are different as occurs in the IBTrACS
        # dataset.  This problem can also be prevented if the
        # 'tcserialno' column is specified.
        indicator[np.where(delta_lon > 15)[0] + 1] = 1
        indicator[np.where(delta_lat > 5)[0] + 1] = 1

        # Save information required for frequency auto-calculation
        try:
            origin_seasonOrYear = np.array(inputData['season'],
                                           'i').compress(indicator)
            header = 'Season'
        except (ValueError, KeyError):
            origin_seasonOrYear = year.compress(indicator)
            header = 'Year'

        flSaveFile(self.origin_year,
                   np.transpose(origin_seasonOrYear),
                   header,
                   ',',
                   fmt='%d')

        pressure = np.array(inputData['pressure'], 'd')
        novalue_index = np.where(pressure == sys.maxint)
        pressure = metutils.convert(pressure, inputPressureUnits, "hPa")
        pressure[novalue_index] = sys.maxint

        # Convert any non-physical central pressure values to maximum integer
        # This is required because IBTrACS has a mix of missing value codes
        # (i.e. -999, 0, 9999) in the same global dataset.
        pressure = np.where((pressure < 600) | (pressure > 1100), sys.maxint,
                            pressure)

        if self.progressbar is not None:
            self.progressbar.update(0.25)

        try:
            vmax = np.array(inputData['vmax'], 'd')
        except (ValueError, KeyError):
            self.logger.warning("No max wind speed data")
            vmax = np.empty(indicator.size, 'f')
        else:
            novalue_index = np.where(vmax == sys.maxint)
            vmax = metutils.convert(vmax, inputSpeedUnits, "mps")
            vmax[novalue_index] = sys.maxint

        assert lat.size == indicator.size
        assert lon.size == indicator.size
        assert pressure.size == indicator.size
        #assert vmax.size == indicator.size

        try:
            rmax = np.array(inputData['rmax'])
            novalue_index = np.where(rmax == sys.maxint)
            rmax = metutils.convert(rmax, inputLengthUnits, "km")
            rmax[novalue_index] = sys.maxint

            self._rmax(rmax, indicator)
            self._rmaxRate(rmax, dt, indicator)
        except (ValueError, KeyError):
            self.logger.warning("No rmax data available")

        if self.ncflag:
            self.data['index'] = indicator

        # ieast : parameter used in latLon2Azi
        # FIXME: should be a config setting describing the input data.
        ieast = 1

        # Determine the index of initial cyclone observations, excluding
        # those cyclones that have only one observation. This is used
        # for calculating initial bearing and speed
        indicator2 = np.where(indicator > 0, 1, 0)
        initIndex = np.concatenate(
            [np.where(np.diff(indicator2) == -1, 1, 0), [0]])

        # Calculate the bearing and distance (km) of every two
        # consecutive records using ll2azi
        bear_, dist_ = maputils.latLon2Azi(lat, lon, ieast, azimuth=0)
        assert bear_.size == indicator.size - 1
        assert dist_.size == indicator.size - 1
        bear = np.empty(indicator.size, 'f')
        bear[1:] = bear_
        dist = np.empty(indicator.size, 'f')
        dist[1:] = dist_

        self._lonLat(lon, lat, indicator, initIndex)
        self._bearing(bear, indicator, initIndex)
        self._bearingRate(bear, dt, indicator)
        if self.progressbar is not None:
            self.progressbar.update(0.375)
        self._speed(dist, dt, indicator, initIndex)
        self._speedRate(dist, dt, indicator)
        self._pressure(pressure, indicator)
        self._pressureRate(pressure, dt, indicator)
        self._windSpeed(vmax)

        try:
            self._frequency(year, indicator)
            self._juliandays(jdays, indicator, year)
        except (ValueError, KeyError):
            pass

        self.logger.info("Completed {0}".format(flModuleName()))
        if self.progressbar is not None:
            self.progressbar.update(0.5)
Exemplo n.º 41
0
if __name__ == "__main__":
    try:
        configFile = sys.argv[1]
    except IndexError:
        # Try loading config file with same name as python script
        configFile = __file__.rstrip('.py') + '.ini'
        # If no filename is specified and default filename does not exist =>
        # raise error
        if not os.path.exists(configFile):
            error_msg = ("No configuration file specified, please type: "
                         "python tcrm.py -c {config filename}.ini")
            raise IOError, error_msg
    # If config file does not exist => raise error
    if not os.path.exists(configFile):
        error_msg = "Configuration file '" + configFile + "' not found"
        raise IOError, error_msg

    config = ConfigParser()
    config.read(configFile)

    logFile = config.read('Logging', 'LogFile')
    logLevel = config.read('Logging', 'LogLevel')
    verbose = config.read('Logging', 'Verbose')

    flStartLog(logFile, logLevel, verbose)

    dp = DataProcess(configFile)
    dp.processData()
    logging.shutdown()
Exemplo n.º 42
0
def startup():
    """
    Parse command line arguments, set up logging and attempt
    to execute the main TCRM functions.

    """

    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--config_file', help='The configuration file')
    parser.add_argument('-v', '--verbose', help='Verbose output',
                        action='store_true')
    parser.add_argument('-d', '--debug', help='Allow pdb traces',
                        action='store_true')
    args = parser.parse_args()

    configFile = args.config_file

    rootdir = pathLocator.getRootDirectory()
    os.chdir(rootdir)

    config = ConfigParser()
    config.read(configFile)

    logfile = config.get('Logging', 'LogFile')
    logdir = dirname(realpath(logfile))

    # If log file directory does not exist, create it
    if not isdir(logdir):
        try:
            os.makedirs(logdir)
        except OSError:
            logfile = pjoin(os.getcwd(), 'tcrm.log')

    logLevel = config.get('Logging', 'LogLevel')
    verbose = config.getboolean('Logging', 'Verbose')
    datestamp = config.getboolean('Logging', 'Datestamp')
    debug = False

    if args.verbose:
        verbose = True

    if args.debug:
        debug = True

    global MPI, comm
    MPI = attemptParallel()
    import atexit
    atexit.register(MPI.Finalize)
    comm = MPI.COMM_WORLD
    if comm.size > 1 and comm.rank > 0:
        logfile += '-' + str(comm.rank)
        verbose = False  # to stop output to console
    else:
        pass
        #codeStatus = status()
        #print __doc__ + codeStatus

    flStartLog(logfile, logLevel, verbose, datestamp)

    # Switch off minor warning messages
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)
    warnings.filterwarnings("ignore", category=UserWarning, module="pytz")
    warnings.filterwarnings("ignore", category=UserWarning, module="numpy")
    warnings.filterwarnings("ignore", category=UserWarning,
                            module="matplotlib")

    warnings.filterwarnings("ignore", category=RuntimeWarning)
    checkModules()

    if debug:
        main(configFile)
    else:
        try:
            main(configFile)
        except ImportError as e:
            log.critical("Missing module: {0}".format(e))
            tblines = traceback.format_exc().splitlines()
            for line in tblines:
                log.critical(line.lstrip())
        except Exception:  # pylint: disable=W0703
            # Catch any exceptions that occur and log them (nicely):
            tblines = traceback.format_exc().splitlines()
            for line in tblines:
                log.critical(line.lstrip())
Exemplo n.º 43
0
def main(configFile='main.ini'):
    """
    Main interface of TCRM that allows control and interaction with the
    5 interfaces: DataProcess, StatInterface, TrackGenerator,
    WindfieldInterface and HazardInterface

    :param str configFile: Name of file containing configuration settings
                           for running TCRM

    """

    log.info("Starting TCRM")
    log.info("Configuration file: %s", configFile)

    doOutputDirectoryCreation(configFile)

    config = ConfigParser()
    config.read(configFile)

    comm.barrier()

    if config.getboolean('Actions', 'DownloadData'):
        doDataDownload(configFile)

    comm.barrier()

    if config.getboolean('Actions', 'DataProcess'):
        doDataProcessing(configFile)

    comm.barrier()

    if config.getboolean('Actions', 'ExecuteStat'):
        doStatistics(configFile)

    comm.barrier()

    if config.getboolean('Actions', 'ExecuteTrackGenerator'):
        doTrackGeneration(configFile)

    comm.barrier()

    if config.getboolean('Actions', 'ExecuteWindfield'):
        doWindfieldCalculations(configFile)

    comm.barrier()

    if config.getboolean('Actions', 'ExecuteHazard'):
        doHazard(configFile)

    comm.barrier()

    if config.getboolean('Actions', 'PlotData'):
        doDataPlotting(configFile)

    comm.barrier()

    if config.getboolean('Actions', 'CreateDatabase'):
        doDatabaseUpdate(configFile)

    comm.barrier()
    if config.getboolean('Actions', 'ExecuteEvaluate'):
        doEvaluation(config)

    comm.barrier()

    if config.getboolean('Actions', 'PlotHazard'):
        doHazardPlotting(configFile)

    comm.barrier()

    log.info('Completed TCRM')
Exemplo n.º 44
0
def doDataPlotting(configFile):
    """
    Plot the pre-processed input data. Requires the Data Processing step to have
    been executed first (``Actions -- DataProcess = True``)

    :param str configFile: Name of configuration file.

    """

    config = ConfigParser()
    config.read(configFile)

    showProgressBar = config.get('Logging', 'ProgressBar')
    pbar = ProgressBar('Plotting results: ', showProgressBar)

    outputPath = config.get('Output', 'Path')

    statsPlotPath = pjoin(outputPath, 'plots', 'stats')
    processPath = pjoin(outputPath, 'process')

    pRateData = flLoadFile(pjoin(processPath, 'pressure_rate'))
    pAllData = flLoadFile(pjoin(processPath, 'all_pressure'))
    bRateData = flLoadFile(pjoin(processPath, 'bearing_rate'))
    bAllData = flLoadFile(pjoin(processPath, 'all_bearing'))
    sRateData = flLoadFile(pjoin(processPath, 'speed_rate'))
    sAllData = flLoadFile(pjoin(processPath, 'all_speed'))
    freq = flLoadFile(pjoin(processPath, 'frequency'))


    indLonLat = flLoadFile(pjoin(processPath, 'cyclone_tracks'),
                           delimiter=',')
    indicator = indLonLat[:, 0]
    lonData = indLonLat[:, 1]
    latData = indLonLat[:, 2]

    jdayobs = flLoadFile(pjoin(processPath, 'jday_obs'), delimiter=',')
    jdaygenesis = flLoadFile(pjoin(processPath, 'jday_genesis'), delimiter=',')


    from PlotInterface.plotStats import PlotPressure, PlotBearing, \
        PlotSpeed, PlotFrequency, PlotDays, PlotLonLat

    log.info('Plotting pressure data')
    pbar.update(0.05)
    PrsPlot = PlotPressure(statsPlotPath, "png")
    PrsPlot.plotPressure(pAllData)
    PrsPlot.plotPressureRate(pRateData)
    PrsPlot.plotMinPressure(indicator, pAllData)

    #FIXME: To be moved into `PlotPressure` class.
    #plotting.minPressureLat(pAllData, latData)

    log.info('Plotting bearing data')
    pbar.update(0.15)
    BearPlot = PlotBearing(statsPlotPath, "png")
    BearPlot.plotBearing(bAllData)
    BearPlot.plotBearingRate(bRateData)

    log.info('Plotting speed data')
    pbar.update(0.25)
    SpeedPlot = PlotSpeed(statsPlotPath, "png")
    SpeedPlot.plotSpeed(sAllData)
    SpeedPlot.plotSpeedRate(sRateData)

    log.info('Plotting longitude and latitude data')
    pbar.update(0.45)

    # FIXME: To be moved to it's own class in PlotStats
    LLPlot = PlotLonLat(statsPlotPath, "png")
    LLPlot.plotLonLat(lonData, latData, indicator)

    pbar.update(0.65)

    log.info('Plotting frequency data')
    pbar.update(0.85)
    FreqPlot = PlotFrequency(statsPlotPath, "png")
    FreqPlot.plotFrequency(freq[:, 0], freq[:, 1])

    DayPlot = PlotDays(statsPlotPath, "png")
    DayPlot.plotJulianDays(jdayobs, jdaygenesis)

    pbar.update(1.0)
Exemplo n.º 45
0
def main(config_file):
    """
    Main function to combine the multipliers with the regional wind
    speed data.

    :param str configFile: Path to configuration file.

    """

    config = ConfigParser()
    config.read(config_file)
    input_path = config.get('Input', 'Path')
    try:
        gust_file = config.get('Input', 'Gust_file')
    except:
        gust_file = 'gust.001-00001.nc'
    windfield_path = pjoin(input_path, 'windfield')
    ncfile = pjoin(windfield_path, gust_file)
    multiplier_path = config.get('Input', 'Multipliers')

    # Load the wind data:
    log.info("Loading regional wind data from {0}".format(ncfile))
    ncobj = Dataset(ncfile, 'r')

    lat = ncobj.variables['lat'][:]
    lon = ncobj.variables['lon'][:]

    delta = lon[1] - lon[0]
    lon = lon - delta / 2.
    lat = lat - delta / 2.

    # Wind speed:
    wspd = ncobj.variables['vmax'][:]

    # Components:
    uu = ncobj.variables['ua'][:]
    vv = ncobj.variables['va'][:]

    bearing = calculateBearing(uu, vv)

    # Load a multiplier file to determine the projection:
    m4_max_file = pjoin(multiplier_path, 'm4_ne.tif')
    log.info("Using M4 data from {0}".format(m4_max_file))

    # Reproject the wind speed and bearing data:
    wind_raster_file = pjoin(windfield_path, 'region_wind.tif')
    wind_raster = createRaster(wspd,
                               lon,
                               lat,
                               delta,
                               -delta,
                               filename=wind_raster_file)
    bear_raster = createRaster(bearing, lon, lat, delta, -delta)
    uu_raster = createRaster(uu, lon, lat, delta, -delta)
    vv_raster = createRaster(vv, lon, lat, delta, -delta)

    log.info("Reprojecting regional wind data")
    wind_prj_file = pjoin(windfield_path, 'gust_prj.tif')
    bear_prj_file = pjoin(windfield_path, 'bear_prj.tif')
    uu_prj_file = pjoin(windfield_path, 'uu_prj.tif')
    vv_prj_file = pjoin(windfield_path, 'vv_prj.tif')

    reprojectDataset(wind_raster, m4_max_file, wind_prj_file)
    reprojectDataset(bear_raster,
                     m4_max_file,
                     bear_prj_file,
                     resampling_method=GRA_NearestNeighbour)
    reprojectDataset(uu_raster,
                     m4_max_file,
                     uu_prj_file,
                     resampling_method=GRA_NearestNeighbour)
    reprojectDataset(vv_raster,
                     m4_max_file,
                     vv_prj_file,
                     resampling_method=GRA_NearestNeighbour)

    wind_prj_ds = gdal.Open(wind_prj_file, gdal.GA_ReadOnly)
    wind_prj = wind_prj_ds.GetRasterBand(1)
    bear_prj_ds = gdal.Open(bear_prj_file, gdal.GA_ReadOnly)
    bear_prj = bear_prj_ds.GetRasterBand(1)
    uu_prj_ds = gdal.Open(uu_prj_file, gdal.GA_ReadOnly)
    uu_prj = uu_prj_ds.GetRasterBand(1)
    vv_prj_ds = gdal.Open(vv_prj_file, gdal.GA_ReadOnly)
    vv_prj = vv_prj_ds.GetRasterBand(1)
    wind_proj = wind_prj_ds.GetProjection()
    wind_geot = wind_prj_ds.GetGeoTransform()

    wind_data = wind_prj.ReadAsArray()
    bear_data = bear_prj.ReadAsArray()
    uu_data = uu_prj.ReadAsArray()
    vv_data = vv_prj.ReadAsArray()
    bearing = calculateBearing(uu_data, vv_data)

    # The local wind speed array:
    local = np.zeros(wind_data.shape, dtype='float32')

    indices = {
        0: {
            'dir': 'n',
            'min': 0.,
            'max': 22.5
        },
        1: {
            'dir': 'ne',
            'min': 22.5,
            'max': 67.5
        },
        2: {
            'dir': 'e',
            'min': 67.5,
            'max': 112.5
        },
        3: {
            'dir': 'se',
            'min': 112.5,
            'max': 157.5
        },
        4: {
            'dir': 's',
            'min': 157.5,
            'max': 202.5
        },
        5: {
            'dir': 'sw',
            'min': 202.5,
            'max': 247.5
        },
        6: {
            'dir': 'w',
            'min': 247.5,
            'max': 292.5
        },
        7: {
            'dir': 'nw',
            'min': 292.5,
            'max': 337.5
        },
        8: {
            'dir': 'n',
            'min': 337.5,
            'max': 360.
        }
    }
    log.info("Processing all directions")
    for i in indices.keys():
        dn = indices[i]['dir']
        log.info("Processing {0}".format(dn))
        m4_file = pjoin(multiplier_path, 'm4_{0}.tif'.format(dn.lower()))
        m4 = loadRasterFile(m4_file)
        idx = np.where((bear_data >= indices[i]['min'])
                       & (bear_data < indices[i]['max']))
        local[idx] = wind_data[idx] * m4[idx]

    rows, cols = local.shape
    output_file = pjoin(windfield_path, 'local_wind.tif')
    log.info("Creating output file: {0}".format(output_file))
    # Save the local wind field to a raster file with the SRS of the
    # multipliers
    drv = gdal.GetDriverByName("GTiff")
    dst_ds = drv.Create(output_file, cols, rows, 1, gdal.GDT_Float32,
                        ['BIGTIFF=NO', 'SPARSE_OK=TRUE'])
    dst_ds.SetGeoTransform(wind_geot)
    dst_ds.SetProjection(wind_proj)
    dst_band = dst_ds.GetRasterBand(1)
    dst_band.SetNoDataValue(-9999)
    dst_band.WriteArray(local)

    # dst_band.FlushCache()

    del dst_ds
    log.info("Completed")
Exemplo n.º 46
0
def startup():
    """
    Parse the command line arguments and call the :func:`main`
    function.

    """
    parser = argparse.ArgumentParser()
    parser.add_argument('-c',
                        '--config_file',
                        help='Path to configuration file')
    parser.add_argument('-v',
                        '--verbose',
                        help='Verbose output',
                        action='store_true')
    parser.add_argument('-d',
                        '--debug',
                        help='Allow pdb traces',
                        action='store_true')
    args = parser.parse_args()

    configFile = args.config_file
    config = ConfigParser()
    config.read(configFile)

    rootdir = pathLocator.getRootDirectory()
    os.chdir(rootdir)

    logfile = config.get('Logging', 'LogFile')
    logdir = dirname(realpath(logfile))

    # If log file directory does not exist, create it
    if not isdir(logdir):
        try:
            os.makedirs(logdir)
        except OSError:
            logfile = pjoin(os.getcwd(), 'tcrm.log')

    logLevel = config.get('Logging', 'LogLevel')
    verbose = config.getboolean('Logging', 'Verbose')
    datestamp = config.getboolean('Logging', 'Datestamp')
    debug = False

    if args.verbose:
        verbose = True

    if args.debug:
        debug = True

    flStartLog(logfile, logLevel, verbose, datestamp)
    # Switch off minor warning messages
    import warnings
    warnings.filterwarnings("ignore", category=DeprecationWarning)
    warnings.filterwarnings("ignore", category=UserWarning, module="pytz")
    warnings.filterwarnings("ignore", category=UserWarning, module="numpy")
    warnings.filterwarnings("ignore",
                            category=UserWarning,
                            module="matplotlib")

    warnings.filterwarnings("ignore", category=RuntimeWarning)

    if debug:
        main(configFile)
    else:
        try:
            main(configFile)
        except ImportError as e:
            log.critical("Missing module: {0}".format(e.strerror))
        except Exception:  # pylint: disable=W0703
            # Catch any exceptions that occur and log them (nicely):
            tblines = traceback.format_exc().splitlines()
            for line in tblines:
                log.critical(line.lstrip())
Exemplo n.º 47
0
def run(configFile, callback=None):
    """
    Run the wind field calculations.

    :param str configFile: path to a configuration file.
    :param func callback: optional callback function to track progress.

    """

    log.info('Loading wind field calculation settings')

    # Get configuration

    config = ConfigParser()
    config.read(configFile)

    profileType = config.get('WindfieldInterface', 'profileType')
    windFieldType = config.get('WindfieldInterface', 'windFieldType')
    beta = config.getfloat('WindfieldInterface', 'beta')
    beta1 = config.getfloat('WindfieldInterface', 'beta1')
    beta2 = config.getfloat('WindfieldInterface', 'beta2')
    thetaMax = config.getfloat('WindfieldInterface', 'thetaMax')
    margin = config.getfloat('WindfieldInterface', 'Margin')
    resolution = config.getfloat('WindfieldInterface', 'Resolution')
    domain = config.get('WindfieldInterface', 'Domain')

    outputPath = config.get('Output', 'Path')
    windfieldPath = pjoin(outputPath, 'windfield')
    trackPath = pjoin(outputPath, 'tracks')

    gridLimit = None
    if config.has_option('Region', 'gridLimit'):
        gridLimit = config.geteval('Region', 'gridLimit')

    if config.has_option('WindfieldInterface', 'gridLimit'):
        gridLimit = config.geteval('WindfieldInterface', 'gridLimit')

    if config.getboolean('Timeseries', 'Extract', fallback=False):
        from Utilities.timeseries import Timeseries
        ts = Timeseries(configFile)
        timestepCallback = ts.extract
    else:
        timestepCallback = None

    multipliers = None
    if config.has_option('Input', 'Multipliers'):
        multipliers = config.get('Input', 'Multipliers')

    thetaMax = math.radians(thetaMax)

    # Attempt to start the track generator in parallel
    global MPI
    MPI = attemptParallel()
    comm = MPI.COMM_WORLD

    log.info('Running windfield generator')

    wfg = WindfieldGenerator(config=config,
                             margin=margin,
                             resolution=resolution,
                             profileType=profileType,
                             windFieldType=windFieldType,
                             beta=beta,
                             beta1=beta1,
                             beta2=beta2,
                             thetaMax=thetaMax,
                             gridLimit=gridLimit,
                             domain=domain,
                             multipliers=multipliers,
                             windfieldPath=windfieldPath)

    log.info(f'Dumping gusts to {windfieldPath}')

    # Get the trackfile names and count

    files = os.listdir(trackPath)
    trackfiles = [pjoin(trackPath, f) for f in files if f.startswith('tracks')]
    nfiles = len(trackfiles)

    log.info('Processing {0} track files in {1}'.format(nfiles, trackPath))

    # Do the work

    comm.barrier()

    wfg.dumpGustsFromTrackfiles(trackfiles, windfieldPath, timestepCallback)
    try:
        ts.shutdown()
    except NameError:

        pass

    comm.barrier()

    log.info('Completed windfield generator')
Exemplo n.º 48
0
class KDEOrigin(object):
    """
    Initialise the class for generating the genesis probability distribution.
    Initialisation will load the required data (genesis locations) and
    calculate the optimum bandwidth for the kernel density method.

    :param str configFile: Path to the configuration file.
     :param dict gridLimit: The bounds of the model domain. The
                           :class:`dict` should contain the keys
                           :attr:`xMin`, :attr:`xMax`, :attr:`yMin`
                           and :attr:`yMax`. The *x* variable bounds
                           the longitude and the *y* variable
                           bounds the latitude.
    :param float kdeStep: Increment of the ordinate values at which
                          the distributions will be calculated.
                          Default=`0.1`
    :param lonLat: If given, a 2-d array of the longitude and latitude
                   of genesis locations. If not given, attempt to load
                   an ``init_lon_lat`` file from the processed files.
    :param progressbar: A :meth:`SimpleProgressBar` object to print
                        progress to STDOUT.
    :type  lonLat: :class:`numpy.ndarray`
    :type  progressbar: :class:`Utilities.progressbar` object.


    """
    def __init__(self,
                 configFile,
                 gridLimit,
                 kdeStep,
                 lonLat=None,
                 progressbar=None):
        """

        """
        self.progressbar = progressbar
        LOGGER.info("Initialising KDEOrigin")
        self.x = np.arange(gridLimit['xMin'], gridLimit['xMax'], kdeStep)
        self.y = np.arange(gridLimit['yMax'], gridLimit['yMin'], -kdeStep)

        self.kdeStep = kdeStep
        self.kde = None
        self.pdf = None
        self.cz = None

        self.configFile = configFile
        self.config = ConfigParser()
        self.config.read(configFile)

        if lonLat is None:
            # Load the data from file:
            self.outputPath = self.config.get('Output', 'Path')
            self.processPath = pjoin(self.outputPath, 'process')
            LOGGER.debug("Loading " + pjoin(self.processPath, 'init_lon_lat'))
            ll = flLoadFile(pjoin(self.processPath, 'init_lon_lat'), '%', ',')
            self.lonLat = ll[:, 0:2]
        else:
            self.lonLat = lonLat[:, 0:2]

        ii = np.where((self.lonLat[:, 0] >= gridLimit['xMin'])
                      & (self.lonLat[:, 0] <= gridLimit['xMax'])
                      & (self.lonLat[:, 1] >= gridLimit['yMin'])
                      & (self.lonLat[:, 1] <= gridLimit['yMax']))

        self.lonLat = self.lonLat[ii]

        self.bw = getOriginBandwidth(self.lonLat)
        LOGGER.info("Bandwidth: %s", repr(self.bw))

    def generateKDE(self, save=False, plot=False):
        """
        Generate the PDF for cyclone origins using kernel density
        estimation technique then save it to a file path provided by
        user.

        :param float bw: Optional, bandwidth to use for generating the PDF.
                         If not specified, use the :attr:`bw` attribute.
        :param boolean save: If ``True``, save the resulting PDF to a
                             netCDF file called 'originPDF.nc'.
        :param boolean plot: If ``True``, plot the resulting PDF.

        :returns: ``x`` and ``y`` grid and the PDF values.

        """

        self.kde = KDEMultivariate(self.lonLat, bw=self.bw, var_type='cc')
        xx, yy = np.meshgrid(self.x, self.y)
        xy = np.vstack([xx.ravel(), yy.ravel()])
        pdf = self.kde.pdf(data_predict=xy)
        pdf = pdf.reshape(xx.shape)

        self.pdf = pdf.transpose()

        if save:
            dimensions = {
                0: {
                    'name': 'lat',
                    'values': self.y,
                    'dtype': 'f',
                    'atts': {
                        'long_name': ' Latitude',
                        'units': 'degrees_north'
                    }
                },
                1: {
                    'name': 'lon',
                    'values': self.x,
                    'dtype': 'f',
                    'atts': {
                        'long_name': 'Longitude',
                        'units': 'degrees_east'
                    }
                }
            }

            variables = {
                0: {
                    'name': 'gpdf',
                    'dims': ('lat', 'lon'),
                    'values': np.array(pdf),
                    'dtype': 'f',
                    'atts': {
                        'long_name': 'TC Genesis probability distribution',
                        'units': ''
                    }
                }
            }

            ncSaveGrid(pjoin(self.processPath, 'originPDF.nc'), dimensions,
                       variables)

        if plot:
            from PlotInterface.maps import FilledContourMapFigure, \
                saveFigure, levels

            lvls, exponent = levels(pdf.max())

            [gx, gy] = np.meshgrid(self.x, self.y)

            map_kwargs = dict(llcrnrlon=self.x.min(),
                              llcrnrlat=self.y.min(),
                              urcrnrlon=self.x.max(),
                              urcrnrlat=self.y.max(),
                              projection='merc',
                              resolution='i')

            cbarlabel = r'Genesis probability ($\times 10^{' + \
                        str(exponent) + '}$)'
            figure = FilledContourMapFigure()
            figure.add(pdf * (10**-exponent), gx, gy, 'TC Genesis probability',
                       lvls * (10**-exponent), cbarlabel, map_kwargs)
            figure.plot()

            outputFile = pjoin(self.outputPath, 'plots', 'stats',
                               'originPDF.png')
            saveFigure(figure, outputFile)

        return self.x, self.y, self.pdf

    def generateCdf(self, save=False):
        """
        Generate the CDFs corresponding to PDFs of cyclone origins,
        then save it on a file path provided by user

        :param boolean save: If ``True``, save the CDF to a netcdf file
                             called 'originCDF.nc'. If ``False``, return
                             the CDF.

        """
        xx, yy = np.meshgrid(self.x, self.y)
        xy = np.vstack([xx.ravel(), yy.ravel()])
        self.cz = self.kde.cdf(data_predict=xy)

        if save:
            outputFile = pjoin(self.processPath, 'originCDF.nc')
            dimensions = {
                0: {
                    'name': 'lat',
                    'values': self.y,
                    'dtype': 'f',
                    'atts': {
                        'long_name': 'Latitude',
                        'units': 'degrees_north'
                    }
                },
                1: {
                    'name': 'lon',
                    'values': self.x,
                    'dtype': 'f',
                    'atts': {
                        'long_name': 'Longitude',
                        'units': 'degrees_east'
                    }
                }
            }

            variables = {
                0: {
                    'name': 'gcdf',
                    'dims': ('lat', 'lon'),
                    'values': np.array(self.cz),
                    'dtype': 'f',
                    'atts': {
                        'long_name': ('TC Genesis cumulative '
                                      'distribution'),
                        'units': ''
                    }
                }
            }

            ncSaveGrid(outputFile, dimensions, variables)
        else:
            return self.cz

    def updateProgressBar(self, step, stepMax):
        """
        Callback function to update progress bar from C code

        :param int n: Current step.
        :param int nMax: Maximum step.

        """
        if self.progressbar:
            self.progressbar.update(step / float(stepMax), 0.0, 0.7)
Exemplo n.º 49
0
    def __init__(self,
                 configFile,
                 tilegrid,
                 numSim,
                 minRecords,
                 yrsPerSim,
                 calcCI=False):
        """
        Initialise HazardCalculator object.

        :param str configFile: path to TCRM configuration file.
        :param tilegrid: :class:`TileGrid` instance
        :param int numSim: number of simulations created.
        :param int minRecords: minimum number of valid wind speed values required
                               to do fitting.
        :param int yrsPerSim:
        """
        config = ConfigParser()
        config.read(configFile)

        self.nodata = -9999.
        self.years = np.array(config.get('Hazard',
                                         'Years').split(',')).astype('f')
        self.outputPath = pjoin(config.get('Output', 'Path'), 'hazard')
        self.inputPath = pjoin(config.get('Output', 'Path'), 'windfield')
        gridLimit = config.geteval('Region', 'gridLimit')

        self.numSim = numSim
        self.minRecords = minRecords
        self.yrsPerSim = yrsPerSim
        self.calcCI = calcCI
        if self.calcCI:
            log.debug("Bootstrap confidence intervals will be calculated")
            self.sample_size = config.getint('Hazard', 'SampleSize')
            self.prange = config.getint('Hazard', 'PercentileRange')

        self.tilegrid = tilegrid
        lon, lat = self.tilegrid.getDomainExtent()

        # Create arrays for storing output data:
        self.loc = np.zeros((len(lat), len(lon)), dtype='f')
        self.shp = np.zeros((len(lat), len(lon)), dtype='f')
        self.scale = np.zeros((len(lat), len(lon)), dtype='f')
        self.Rp = np.zeros((len(self.years), len(lat), len(lon)), dtype='f')

        self.RPupper = np.zeros((len(self.years), len(lat), len(lon)),
                                dtype='f')
        self.RPlower = np.zeros((len(self.years), len(lat), len(lon)),
                                dtype='f')

        self.global_atts = {
            'title': ('TCRM hazard simulation - '
                      'return period wind speeds'),
            'tcrm_version': flProgramVersion(),
            'python_version': sys.version
        }

        # Add configuration settings to global attributes:
        for section in config.sections():
            for option in config.options(section):
                key = "{0}_{1}".format(section, option)
                value = config.get(section, option)
                self.global_atts[key] = value
Exemplo n.º 50
0
def doWindfieldPlotting(configFile):
    """
    Plot the wind field on a map.

    :param str configFile: Path to the configuration file.

    :Note: the file name is assumed to be 'gust.interp.nc'

    """
    from netCDF4 import Dataset
    import numpy as np
    from PlotInterface.maps import saveWindfieldMap

    config = ConfigParser()
    config.read(configFile)
    outputPath = config.get('Output', 'Path')
    windfieldPath = pjoin(outputPath, 'windfield')

    inputFile = config.get('DataProcess', 'InputFile')
    if inputFile.endswith(".nc"):
        # We have a netcdf track file. Work under the assumption it is
        # drawn directly from TCRM.
        trackFile = os.path.basename(inputFile)
        trackId = trackFile.split('.')[1]
        gustFile = 'gust.{0}.nc'.format(trackId)
        outputWindFile = pjoin(windfieldPath, gustFile)
    else:
        # Note the assumption about the file name!
        outputWindFile = pjoin(windfieldPath, 'gust.001-00001.nc')
    plotPath = pjoin(outputPath, 'plots', 'maxwind.png')

    f = Dataset(outputWindFile, 'r')

    xdata = f.variables['lon'][:]
    ydata = f.variables['lat'][:]

    vdata = f.variables['vmax'][:]

    gridLimit = None
    if config.has_option('Region','gridLimit'):
        gridLimit = config.geteval('Region', 'gridLimit')
        ii = np.where((xdata >= gridLimit['xMin']) &
                      (xdata <= gridLimit['xMax']))
        jj = np.where((ydata >= gridLimit['yMin']) &
                      (ydata <= gridLimit['yMax']))
        [xgrid, ygrid] = np.meshgrid(xdata[ii], ydata[jj])
        ig, jg = np.meshgrid(ii, jj)
        vdata = vdata[jg, ig]
    else:
        [xgrid, ygrid] = np.meshgrid(xdata, ydata)
    map_kwargs = dict(llcrnrlon=xgrid.min(),
                      llcrnrlat=ygrid.min(),
                      urcrnrlon=xgrid.max(),
                      urcrnrlat=ygrid.max(),
                      projection='merc',
                      resolution='i')
    title = "Maximum wind speed"
    cbarlabel = "Wind speed ({0})".format(f.variables['vmax'].units)
    levels = np.arange(30, 101., 5.)
    saveWindfieldMap(vdata, xgrid, ygrid, title, levels,
                     cbarlabel, map_kwargs, plotPath)
Exemplo n.º 51
0
def process_timeseries(config_file):
    """
    Process a set of timeseries files to include the multiplier values.

    The combined multiplier values are stored in a shape file as fields,
    and records are keyed by the same code that is used to select
    stations for sampling.

    :param str config_file: Path to a configuration file.

    """

    config = ConfigParser()
    config.read(config_file)

    stnFile = config.get('Input', 'LocationFile')
    key_name = config.get('Timeseries', 'StationID')
    inputPath = pjoin(config.get('Output', 'Path'), 'process', 'timeseries')
    outputPath = pjoin(inputPath, 'local')

    if not isdir(outputPath):
        try:
            os.makedirs(outputPath)
        except OSError:
            raise

    log.info("Loading stations from %s" % stnFile)
    log.info("Timeseries data will be written into %s" % outputPath)

    directions = ['n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw']

    sf = shapefile.Reader(stnFile)
    field_names = [sf.fields[i][0] for i in range(1, len(sf.fields))]
    try:
        key_index = field_names.index(key_name)
    except ValueError:
        log.exception("{0} not a field in {1}".format(key_name, stnFile))
        raise

    min_data = DynamicRecArray(dtype={
        'names': MINMAX_NAMES,
        'formats': MINMAX_TYPES
    })
    max_data = DynamicRecArray(dtype={
        'names': MINMAX_NAMES,
        'formats': MINMAX_TYPES
    })

    records = sf.records()
    indexes = []
    for dir in directions:
        fieldname = 'm4_%s' % dir
        indexes.append(field_names.index(fieldname))

    for record in records:
        stnId = record[key_index]
        inputFile = pjoin(inputPath, 'ts.{0}.csv'.format(stnId))
        outputFile = pjoin(outputPath, 'ts.{0}.csv'.format(stnId))
        if os.path.isfile(inputFile):
            # Load multipliers for this location:
            mvals = [float(record[i]) for i in indexes]
            maxdata, mindata = tsmultiply(inputFile, tuple(mvals), outputFile)
            min_data.append(tuple(mindata))
            max_data.append(tuple(maxdata))

        else:
            log.debug("No timeseries file for {0}".format(stnId))

    # Save local minima/maxima
    maxfile = pjoin(outputPath, 'local_maxima.csv')
    minfile = pjoin(outputPath, 'local_minima.csv')
    maxheader = ('Station,Time,Longitude,Latitude,Speed,'
                 'UU,VV,Bearing,Pressure')
    np.savetxt(maxfile,
               max_data.data,
               fmt=MINMAX_FMT,
               delimiter=',',
               header=maxheader,
               comments='')
    np.savetxt(minfile,
               min_data.data,
               fmt=MINMAX_FMT,
               delimiter=',',
               header=maxheader,
               comments='')
Exemplo n.º 52
0
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--config_file', help='Input configuration file')
    parser.add_argument('-f', '--file', help='Input TC track file')
    parser.add_argument('-s',
                        '--source',
                        help='Input TC track file source format')
    parser.add_argument('-v',
                        '--verbose',
                        help='Print log messages to STDOUT',
                        action='store_true')
    args = parser.parse_args()

    config_file = args.config_file
    config = ConfigParser()
    config.read(config_file)

    logfile = config.get('Logging', 'LogFile')
    logdir = dirname(realpath(logfile))

    # If log file directory does not exist, create it
    if not isdir(logdir):
        try:
            os.makedirs(logdir)
        except OSError:
            logfile = pjoin(os.getcwd(), 'tracks2shp.log')

    logLevel = config.get('Logging', 'LogLevel')
    verbose = config.getboolean('Logging', 'Verbose')
    datestamp = config.getboolean('Logging', 'Datestamp')
Exemplo n.º 53
0
    def __init__(self):
        """
        Parse command line arguments and call the :func:`main` function.

        """
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            '--config_file',
                            help='Path to configuration file')
        parser.add_argument('-v',
                            '--verbose',
                            help='Verbose output',
                            action='store_true')
        parser.add_argument('-d',
                            '--debug',
                            help='Allow pdb traces',
                            action='store_true')
        args = parser.parse_args()

        self.configFile = args.config_file
        config = ConfigParser()
        config.read(self.configFile)

        logfile = config.get('Logging', 'LogFile')
        logdir = dirname(realpath(logfile))

        # If log file directory does not exist, create it
        if not isdir(logdir):
            try:
                os.makedirs(logdir)
            except OSError:
                logfile = pjoin(os.getcwd(), 'processMultipliers.log')

        logLevel = config.get('Logging', 'LogLevel')
        verbose = config.getboolean('Logging', 'Verbose')
        datestamp = config.getboolean('Logging', 'Datestamp')

        if args.verbose:
            verbose = True

        flStartLog(logfile, logLevel, verbose, datestamp)
        # Switch off minor warning messages
        import warnings
        warnings.filterwarnings("ignore", category=DeprecationWarning)
        warnings.filterwarnings("ignore", category=UserWarning, module="pytz")
        warnings.filterwarnings("ignore", category=UserWarning, module="numpy")
        warnings.filterwarnings("ignore",
                                category=UserWarning,
                                module="matplotlib")

        warnings.filterwarnings("ignore", category=RuntimeWarning)

        self.working_dir = config.get('Output', 'Working_dir')
        self.gust_file = config.get('Input', 'Gust_file')

        tiles = config.get('Input', 'Tiles')
        self.tiles = [item.strip() for item in tiles.split(',')]
        log.debug('List of tiles to be processed: {0}'.format(self.tiles))
        log.info('Multipliers will be written out to %s', self.working_dir)

        # Get the multipliers, and process them if need be
        self.type_mapping = {
            'shielding': 'Ms',
            'terrain': 'Mz',
            'topographic': 'Mt'
        }
        self.dirns = ['e', 'n', 'ne', 'nw', 's', 'se', 'sw', 'w']

        rootdir = pathLocator.getRootDirectory()
        os.chdir(rootdir)

        try:
            self.main()
        except ImportError as e:
            log.critical("Missing module: {0}".format(e.strerror))
        except Exception:  # pylint: disable=W0703
            # Catch any exceptions that occur and log them (nicely):
            tblines = traceback.format_exc().splitlines()
            for line in tblines:
                log.critical(line.lstrip())
Exemplo n.º 54
0
from extremes import returnLevels, empReturnPeriod, returnPeriodUncertainty, gpdSelectThreshold
from distributions import fittedPDF

import random

import seaborn as sns
sns.set_context("paper")
figsize = (6.5, 4.5)
sns.set_style("whitegrid")

# Load the configuration file from the TCHA18, then open the database
# and get teh list of available locations.

configFile = "/home/547/cxa547/tcrmconfig/tcrm2.1.ini"
config = ConfigParser()
config.read(configFile)
outputPath = config.get('Output', 'Path')
plotPath = os.path.join(outputPath, 'plots', 'convergence')
NumSimulations = config.getint('TrackGenerator', 'NumSimulations')

db = database.HazardDatabase(configFile)
locations = db.getLocations()
locNameList = list(locations['locName'])

# The following step performs the calculations. First a helper
# function to add nicely formatted grid lines on a logarithmic axis.
# The second function (`plotConvergenceTest`) loads the data from the
# database, then splits into two separate collections (called `d1` and
# `d2`). For each of these, we then calculate empirical ARI values and
# plot alongside each other. We also plot the annual exceedance
# probability as an alternate view on the likelihood of extreme winds.
Exemplo n.º 55
0
def run(configFile, callback=None):
    """
    Run the wind field calculations.

    :param str configFile: path to a configuration file.
    :param func callback: optional callback function to track progress.

    """

    log.info('Loading wind field calculation settings')

    # Get configuration

    config = ConfigParser()
    config.read(configFile)

    profileType = config.get('WindfieldInterface', 'profileType')
    windFieldType = config.get('WindfieldInterface', 'windFieldType')
    beta = config.getfloat('WindfieldInterface', 'beta')
    beta1 = config.getfloat('WindfieldInterface', 'beta1')
    beta2 = config.getfloat('WindfieldInterface', 'beta2')
    thetaMax = config.getfloat('WindfieldInterface', 'thetaMax')
    margin = config.getfloat('WindfieldInterface', 'Margin')
    resolution = config.getfloat('WindfieldInterface', 'Resolution')
    domain = config.get('WindfieldInterface', 'Domain')

    outputPath = config.get('Output', 'Path')
    windfieldPath = pjoin(outputPath, 'windfield')
    trackPath = pjoin(outputPath, 'tracks')

    gridLimit = None
    if config.has_option('Region', 'gridLimit'):
        gridLimit = config.geteval('Region', 'gridLimit')

    if config.has_option('WindfieldInterface', 'gridLimit'):
        gridLimit = config.geteval('WindfieldInterface', 'gridLimit')

    if config.has_section('Timeseries'):
        if config.has_option('Timeseries', 'Extract'):
            if config.getboolean('Timeseries', 'Extract'):
                from Utilities.timeseries import Timeseries
                log.debug("Timeseries data will be extracted")
                ts = Timeseries(configFile)
                timestepCallback = ts.extract
            else:

                def timestepCallback(*args):
                    """Dummy timestepCallback function"""
                    pass

    else:

        def timestepCallback(*args):
            """Dummy timestepCallback function"""
            pass

    thetaMax = math.radians(thetaMax)

    # Attempt to start the track generator in parallel
    global pp
    pp = attemptParallel()

    log.info('Running windfield generator')

    wfg = WindfieldGenerator(config=config,
                             margin=margin,
                             resolution=resolution,
                             profileType=profileType,
                             windFieldType=windFieldType,
                             beta=beta,
                             beta1=beta1,
                             beta2=beta2,
                             thetaMax=thetaMax,
                             gridLimit=gridLimit,
                             domain=domain)

    msg = 'Dumping gusts to %s' % windfieldPath
    log.info(msg)

    # Get the trackfile names and count

    files = os.listdir(trackPath)
    trackfiles = [pjoin(trackPath, f) for f in files if f.startswith('tracks')]
    nfiles = len(trackfiles)

    def progressCallback(i):
        """Define the callback function"""
        callback(i, nfiles)

    msg = 'Processing %d track files in %s' % (nfiles, trackPath)
    log.info(msg)

    # Do the work

    pp.barrier()

    wfg.dumpGustsFromTrackfiles(trackfiles, windfieldPath, timestepCallback)

    try:
        ts.shutdown()
    except NameError:
        pass

    pp.barrier()

    log.info('Completed windfield generator')
Exemplo n.º 56
0
def loadTrackFile(configFile,
                  trackFile,
                  source,
                  missingValue=0,
                  calculateWindSpeed=True):
    """
    Load TC track data from the given input file, from a specified source.
    The configFile is a configuration file that contains a section called
    'source' that describes the data.
    This returns a collection of :class:`Track` objects that contains
    the details of the TC tracks in the input file.

    :param str configFile: Configuration file with a section ``source``.
    :param str trackFile: Path to a csv-formatted file containing TC data.
    :pararm str source: Name of the source format of the TC data. There
                        *must* be a section in ``configFile`` matching
                        this string, containing the details of the format
                        of the data.
    :param missingValue: Replace all null values in the input data with
                         this value (default=0).
    :param boolean calculateWindSpeed: Calculate maximum wind speed using
                                       a pressure-wind relation described
                                       in :func:`maxWindSpeed`

    :returns: A collection of :class:`Track` objects.
              If any of the variables are not present in the input
              dataset, they are (where possible) calculated
              (date/time/windspeed), sampled from default datasets
              (e.g. environmental pressure) or set to the missing value.

    Example::

      >>> tracks = loadTrackFile('tcrm.ini', 'IBTRaCS.csv', 'IBTrACS' )

    """

    LOG.info("Loading %s" % trackFile)
    inputData = colReadCSV(configFile, trackFile, source)  #,
    #nullValue=missingValue)

    config = ConfigParser()
    config.read(configFile)

    inputSpeedUnits = config.get(source, 'SpeedUnits')
    inputPressureUnits = config.get(source, 'PressureUnits')
    inputLengthUnits = config.get(source, 'LengthUnits')
    inputDateFormat = config.get(source, 'DateFormat')

    if config.getboolean('DataProcess', 'FilterSeasons'):
        startSeason = config.getint('DataProcess', 'StartSeason')
        idx = np.where(inputData['season'] >= startSeason)[0]
        inputData = inputData[idx]

    # Determine the initial TC positions...
    indicator = getInitialPositions(inputData)

    # Sort date/time information
    if 'age' in inputData.dtype.names:
        year, month, day, hour, minute, datetimes = parseAge(
            inputData, indicator)
        timeElapsed = inputData['age']
    else:
        year, month, day, hour, minute, datetimes = parseDates(
            inputData, indicator, inputDateFormat)
        timeElapsed = getTimeElapsed(indicator, year, month, day, hour, minute)

    # Time between observations:
    dt = getTimeDelta(year, month, day, hour, minute)

    # Calculate julian days
    jdays = julianDays(year, month, day, hour, minute)

    lat = np.array(inputData['lat'], 'd')
    lon = np.mod(np.array(inputData['lon'], 'd'), 360)
    delta_lon = np.diff(lon)
    delta_lat = np.diff(lat)

    # Split into separate tracks if large jump occurs (delta_lon > 10 degrees
    # or delta_lat > 5 degrees)
    # This avoids two tracks being accidentally combined when seasons and track
    # numbers match but basins are different as occurs in the IBTrACS dataset.
    # This problem can also be prevented if the 'tcserialno' column is
    # specified.
    indicator[np.where(delta_lon > 10)[0] + 1] = 1
    indicator[np.where(delta_lat > 5)[0] + 1] = 1

    pressure = filterPressure(np.array(inputData['pressure'], 'd'),
                              inputPressureUnits, missingValue)
    try:
        windspeed = np.array(inputData['vmax'], 'd')
        novalue_index = np.where(windspeed == sys.maxint)
        windspeed = metutils.convert(windspeed, inputSpeedUnits, "mps")
        windspeed[novalue_index] = missingValue
    except (ValueError, KeyError):
        LOG.debug("No max wind speed data - all values will be zero")
        windspeed = np.zeros(indicator.size, 'f')
    assert lat.size == indicator.size
    assert lon.size == indicator.size
    assert pressure.size == indicator.size

    try:
        rmax = np.array(inputData['rmax'])
        novalue_index = np.where(rmax == missingValue)
        rmax = metutils.convert(rmax, inputLengthUnits, "km")
        rmax[novalue_index] = missingValue

    except (ValueError, KeyError):
        LOG.debug("No radius to max wind data - all values will be zero")
        rmax = np.zeros(indicator.size, 'f')

    if 'penv' in inputData.dtype.names:
        penv = np.array(inputData['penv'], 'd')
    else:
        LOG.debug("No ambient MSLP data in this input file")
        LOG.debug("Sampling data from MSLP data defined in "
                  "configuration file")
        # Warning: using sampled data will likely lead to some odd behaviour
        # near the boundary of the MSLP grid boundaries - higher resolution
        # MSLP data will decrease this unusual behaviour.

        try:
            ncfile = cnfGetIniValue(configFile, 'Input', 'MSLPFile')
        except:
            LOG.exception("No input MSLP file specified in configuration")
            raise
        time = getTime(year, month, day, hour, minute)
        penv = ltmPressure(jdays, time, lon, lat, ncfile)

    if 'poci' in inputData.dtype.names:
        poci = np.array(inputData['poci'], 'd')
    else:
        LOG.debug("Determining poci")
        eps = np.random.normal(0, scale=2.5717)
        poci = getPoci(penv, pressure, lat, jdays, eps)

    speed, bearing = getSpeedBearing(indicator,
                                     lon,
                                     lat,
                                     dt,
                                     missingValue=missingValue)

    if calculateWindSpeed:
        windspeed = maxWindSpeed(indicator, dt, lon, lat, pressure, poci)

    TCID = np.cumsum(indicator)

    data = np.empty(len(indicator),
                    dtype={
                        'names': trackFields,
                        'formats': trackTypes
                    })
    for key, value in zip(trackFields, [
            indicator, TCID, year, month, day, hour, minute, timeElapsed,
            datetimes, lon, lat, speed, bearing, pressure, windspeed, rmax,
            poci
    ]):
        data[key] = value

    tracks = []
    n = np.max(TCID)
    for i in range(1, n + 1):
        track = Track(data[TCID == i])
        track.trackId = (i, n)
        track.trackfile = trackFile
        getMinPressure(track, missingValue)
        getMaxWind(track, missingValue)
        tracks.append(track)

    return tracks