Exemplo n.º 1
0
    def historic(self):
        """Calculate historical rates of longitude crossing"""

        LOG.info("Processing historical tracks for longitude crossings")
        config = ConfigParser()
        config.read(self.configFile)
        inputFile = config.get('DataProcess', 'InputFile')
        source = config.get('DataProcess', 'Source')

        timestep = config.getfloat('TrackGenerator', 'Timestep')

        if len(os.path.dirname(inputFile)) == 0:
            inputFile = pjoin(self.inputPath, inputFile)

        try:
            tracks = interpolateTracks.parseTracks(self.configFile,
                                                   inputFile,
                                                   source,
                                                   timestep,
                                                   interpolation_type='linear')
        except (TypeError, IOError, ValueError):
            LOG.critical("Cannot load historical track file: {0}".\
                         format(inputFile))
            raise
        else:
            self.lonCrossingHist, self.lonCrossingEWHist, \
                self.lonCrossingWEHist = self.findCrossings(tracks)

        return
Exemplo n.º 2
0
    def __init__(self, configFile):

        config = ConfigParser()
        config.read(configFile)

        self.outputPath = config.get('Output', 'Path')
        self.windfieldPath = pjoin(self.outputPath, 'windfield')
        self.trackPath = pjoin(self.outputPath, 'tracks')
        self.hazardPath = pjoin(self.outputPath, 'hazard')
        self.domain = config.geteval('Region', 'gridLimit')
        self.hazardDB = pjoin(self.outputPath, 'hazard.db')
        self.locationDB = pjoin(self.outputPath, 'locations.db')
        self.datfile = config.get('Process', 'DatFile')
        self.excludePastProcessed = config.getboolean('Process',
                                                      'ExcludePastProcessed')

        pGetProcessedFiles(self.datfile)

        sqlite3.Connection.__init__(self,
                                    self.hazardDB,
                                    detect_types=PARSE_DECLTYPES
                                    | PARSE_COLNAMES)

        self.exists = True

        import atexit
        atexit.register(self.close)
Exemplo n.º 3
0
    def historic(self):
        """Load historic data and calculate histogram"""
        log.info("Processing historic track records")
        config = ConfigParser()
        config.read(self.configFile)
        inputFile = config.get('DataProcess', 'InputFile')
        if len(os.path.dirname(inputFile)) == 0:
            inputFile = pjoin(self.inputPath, inputFile)

        source = config.get('DataProcess', 'Source')

        try:
            tracks = loadTrackFile(self.configFile, inputFile, source)

        except (TypeError, IOError, ValueError):
            log.critical("Cannot load historical track file: {0}".\
                         format(inputFile))
            raise
        else:
            startYr = 9999
            endYr = 0
            for t in tracks:
                startYr = min(startYr, min(t.Year))
                endYr = max(endYr, max(t.Year))
            numYears = endYr - startYr
            log.info("Range of years: %d - %d" % (startYr, endYr))
            try:
                self.hist = self._calculate(tracks)
            #self.hist = self._calculate(tracks) / numYears
            except (ValueError):
                log.critical(
                    "KDE error: The number of observations must be larger than the number of variables"
                )
                raise
Exemplo n.º 4
0
def doTrackGeneration(configFile):
    """
    Do the tropical cyclone track generation in :mod:`TrackGenerator`.

    The track generation settings are read from *configFile*.

    :param str configFile: Name of configuration file.

    """

    log.info('Starting track generation')

    config = ConfigParser()
    config.read(configFile)

    showProgressBar = config.get('Logging', 'ProgressBar')

    pbar = ProgressBar('Simulating cyclone tracks: ', showProgressBar)

    def status(done, total):
        pbar.update(float(done)/total)

    import TrackGenerator
    TrackGenerator.run(configFile, status)

    pbar.update(1.0)
    log.info('Completed track generation')
Exemplo n.º 5
0
    def __init__(self, configFile):
        """
        Calculate density of TC genesis positions on a grid

        :param str configFile: path to a TCRM configuration file.
        """

        config = ConfigParser()
        config.read(configFile)
        self.configFile = configFile

        # Define the grid:
        self.gridLimit = config.geteval('Region', 'gridLimit')
        self.gridSpace = config.geteval('Region', 'GridSpace')

        self.lon_range = np.arange(self.gridLimit['xMin'],
                                   self.gridLimit['xMax'] + 0.1, 1.)
        self.lat_range = np.arange(self.gridLimit['yMin'],
                                   self.gridLimit['yMax'] + 0.1, 1.)

        self.X, self.Y = np.meshgrid(self.lon_range, self.lat_range)

        outputPath = config.get('Output', 'Path')
        self.trackPath = pjoin(outputPath, 'tracks')
        self.plotPath = pjoin(outputPath, 'plots', 'stats')
        self.dataPath = pjoin(outputPath, 'process')

        # Determine TCRM input directory
        tcrm_dir = pathLocator.getRootDirectory()
        self.inputPath = pjoin(tcrm_dir, 'input')

        self.synNumYears = config.getint('TrackGenerator',
                                         'yearspersimulation')
Exemplo n.º 6
0
    def historic(self):
        """
        Load historic data and calculate histogram.
        Note that the input historical data is filtered by year
        when it's loaded in `interpolateTracks.parseTracks()`.

        The timestep to interpolate to is set to match that of the
        synthetic event set (normally set to 1 hour).
        """
        config = ConfigParser()
        config.read(self.configFile)
        inputFile = config.get('DataProcess', 'InputFile')
        if len(os.path.dirname(inputFile)) == 0:
            inputFile = pjoin(self.inputPath, inputFile)

        source = config.get('DataProcess', 'Source')

        try:
            tracks = loadTrackFile(self.configFile, inputFile, source)
        except (TypeError, IOError, ValueError):
            log.critical("Cannot load historical track file: {0}".\
                         format(inputFile))
            raise
        else:
            startYr = 9999
            endYr = 0
            for t in tracks:
                startYr = min(startYr, min(t.Year))
                endYr = max(endYr, max(t.Year))
            numYears = endYr - startYr

            self.hist = self.calculate(tracks) / numYears
Exemplo n.º 7
0
def doWindfieldCalculations(configFile):
    """
    Do the wind field calculations, using :mod:`wind`. The wind
    field settings are read from *configFile*.

    :param str configFile: Name of configuration file.

    """

    log.info('Starting wind field calculations')

    config = ConfigParser()
    config.read(configFile)

    showProgressBar = config.get('Logging', 'ProgressBar')

    pbar = ProgressBar('Calculating wind fields: ', showProgressBar)

    def status(done, total):
        pbar.update(float(done)/total)

    import wind
    wind.run(configFile, status)

    pbar.update(1.0)
    log.info('Completed wind field calculations')
Exemplo n.º 8
0
def doHazard(configFile):
    """
    Do the hazard calculations (extreme value distribution fitting)
    using the :mod:`hazard` module.

    :param str configFile: Name of configuration file.

    """

    log.info('Running HazardInterface')

    config = ConfigParser()
    config.read(configFile)

    showProgressBar = config.get('Logging', 'ProgressBar')
    pbar = ProgressBar('Performing hazard calculations: ', showProgressBar)

    def status(done, total):
        pbar.update(float(done)/total)

    import hazard
    hazard.run(configFile)

    log.info('Completed HazardInterface')
    pbar.update(1.0)
Exemplo n.º 9
0
def doHazardPlotting(configFile):
    """
    Do the hazard plots (hazard maps and curves for all locations within
    the model domain). Plotting is performed by the
    :mod:`PlotInterface.AutoPlotHazard` module.

    :param str configFile: Name of configuration file.

    """

    config = ConfigParser()
    config.read(configFile)

    log.info('Plotting Hazard Maps')

    showProgressBar = config.get('Logging', 'ProgressBar')
    pbar = ProgressBar('Plotting hazard maps: ', showProgressBar)
    pbar.update(0.0)

    from PlotInterface.AutoPlotHazard import AutoPlotHazard
    plotter = AutoPlotHazard(configFile, progressbar=pbar)
    plotter.plotMap()
    plotter.plotCurves()

    pbar.update(1.0)
Exemplo n.º 10
0
    def __init__(self, configFile):

        config = ConfigParser()
        config.read(configFile)
        self.configFile = configFile

        outputPath = config.get('Output', 'Path')
        self.trackPath = pjoin(outputPath, 'tracks')
        self.plotPath = pjoin(outputPath, 'plots', 'stats')
        self.dataPath = pjoin(outputPath, 'process')

        # Determine TCRM input directory
        tcrm_dir = pathLocator.getRootDirectory()
        self.inputPath = pjoin(tcrm_dir, 'input')

        self.synNumYears = config.getint('TrackGenerator',
                                         'yearspersimulation')

        try:
            gateFile = config.get('Input', 'CoastlineGates')
        except NoOptionError:
            LOG.exception(("No coastline gate file specified "
                           "in configuration file"))
            raise

        gateData = np.genfromtxt(gateFile, delimiter=',')

        self.gates = Int.convert2vertex(gateData[:, 1], gateData[:, 2])
        self.coast = list(self.gates)
        self.coast.append(self.gates[0])
Exemplo n.º 11
0
def doOutputDirectoryCreation(configFile):
    """
    Create all the necessary output folders.

    :param str configFile: Name of configuration file.
    :raises OSError: If the directory tree cannot be created.

    """

    config = ConfigParser()
    config.read(configFile)

    outputPath = config.get('Output', 'Path')

    log.info('Output will be stored under %s', outputPath)

    subdirs = ['tracks', 'hazard', 'windfield', 'plots', 'plots/hazard',
               'plots/stats', 'log', 'process', 'process/timeseries',
               'process/dat']

    if not isdir(outputPath):
        try:
            os.makedirs(outputPath)
        except (OSError, FileExistsError):
            raise
    for subdir in subdirs:
        if not isdir(realpath(pjoin(outputPath, subdir))):
            try:
                os.makedirs(realpath(pjoin(outputPath, subdir)))
            except (OSError, FileExistsError):
                raise
Exemplo n.º 12
0
    def __init__(self, configFile, tilegrid, numSim, minRecords, yrsPerSim,
                 calcCI=False, evd='GEV'):
        """
        Initialise HazardCalculator object.

        :param str configFile: path to TCRM configuration file.
        :param tilegrid: :class:`TileGrid` instance
        :param int numSim: number of simulations created.
        :param int minRecords: minimum number of valid wind speed values required
                               to do fitting.
        :param int yrsPerSim:
        :param boolean calcCI:
        :param str extreme_value_distribution: evd to use. Options so far are GEV
                                               and GPD.
        """
        config = ConfigParser()
        config.read(configFile)

        self.nodata = -9999.
        self.years = np.array(config.get('Hazard',
                                         'Years').split(',')).astype('f')
        self.outputPath = pjoin(config.get('Output', 'Path'), 'hazard')
        self.inputPath = pjoin(config.get('Output', 'Path'), 'windfield')
        gridLimit = config.geteval('Region', 'gridLimit')

        self.numSim = numSim
        self.minRecords = minRecords
        self.yrsPerSim = yrsPerSim
        self.calcCI = calcCI
        if self.calcCI:
            log.debug("Bootstrap confidence intervals will be calculated")
            self.sample_size = config.getint('Hazard', 'SampleSize')
            self.prange = config.getint('Hazard', 'PercentileRange')
        self.evd = evd

        self.tilegrid = tilegrid
        lon, lat = self.tilegrid.getDomainExtent()

        # Create arrays for storing output data:
        self.loc = np.zeros((len(lat), len(lon)), dtype='f')
        self.shp = np.zeros((len(lat), len(lon)), dtype='f')
        self.scale = np.zeros((len(lat), len(lon)), dtype='f')
        self.Rp = np.zeros((len(self.years), len(lat), len(lon)), dtype='f')

        self.RPupper = np.zeros((len(self.years), len(lat), len(lon)), dtype='f')
        self.RPlower = np.zeros((len(self.years), len(lat), len(lon)), dtype='f')

        self.global_atts = {'title': ('TCRM hazard simulation - '
                            'return period wind speeds'),
                            'tcrm_version': flProgramVersion(),
                            'python_version': sys.version}


        # Add configuration settings to global attributes:
        for section in config.sections():
            for option in config.options(section):
                key = "{0}_{1}".format(section, option)
                value = config.get(section, option)
                self.global_atts[key] = value
Exemplo n.º 13
0
def startup():
    """
    Parse command line arguments and call the :func:`main` function.

    """
    parser = argparse.ArgumentParser()
    parser.add_argument('-c',
                        '--config_file',
                        help='Path to configuration file')
    parser.add_argument('-v',
                        '--verbose',
                        help='Verbose output',
                        action='store_true')
    parser.add_argument('-d',
                        '--debug',
                        help='Allow pdb traces',
                        action='store_true')
    args = parser.parse_args()

    configFile = args.config_file
    config = ConfigParser()
    config.read(configFile)

    rootdir = pathLocator.getRootDirectory()
    os.chdir(rootdir)

    logfile = config.get('Logging', 'LogFile')
    logdir = dirname(realpath(logfile))

    # If log file directory does not exist, create it
    if not isdir(logdir):
        try:
            os.makedirs(logdir)
        except OSError:
            logfile = pjoin(os.getcwd(), 'processMultipliers.log')

    logLevel = config.get('Logging', 'LogLevel')
    verbose = config.getboolean('Logging', 'Verbose')
    datestamp = config.getboolean('Logging', 'Datestamp')
    debug = False

    if args.verbose:
        verbose = True

    if args.debug:
        debug = True

    flStartLog(logfile, logLevel, verbose, datestamp)

    if debug:
        main(configFile)
    else:
        try:
            modified_main(configFile)
        except Exception:  # pylint: disable=W0703
            # Catch any exceptions that occur and log them (nicely):
            tblines = traceback.format_exc().splitlines()
            for line in tblines:
                log.critical(line.lstrip())
Exemplo n.º 14
0
def doWindfieldPlotting(configFile):
    """
    Plot the wind field on a map.

    :param str configFile: Path to the configuration file.

    :Note: the file name is assumed to be 'gust.interp.nc'

    """
    from netCDF4 import Dataset
    import numpy as np
    config = ConfigParser()
    config.read(configFile)
    outputPath = config.get('Output', 'Path')
    windfieldPath = pjoin(outputPath, 'windfield')

    inputFile = config.get('DataProcess', 'InputFile')
    if inputFile.endswith(".nc"):
        # We have a netcdf track file. Work under the assumption it is
        # drawn directly from TCRM.
        trackFile = os.path.basename(inputFile)
        trackId = trackFile.split('.')[1]
        gustFile = 'gust.{0}.nc'.format(trackId)
        outputWindFile = pjoin(windfieldPath, gustFile)
    else:
        # Note the assumption about the file name!
        outputWindFile = pjoin(windfieldPath, 'gust.001-00001.nc')
    plotPath = pjoin(outputPath, 'plots', 'maxwind.png')

    f = Dataset(outputWindFile, 'r')

    xdata = f.variables['lon'][:]
    ydata = f.variables['lat'][:]

    vdata = f.variables['vmax'][:]

    gridLimit = None
    if config.has_option('Region', 'gridLimit'):
        gridLimit = config.geteval('Region', 'gridLimit')
        ii = np.where((xdata >= gridLimit['xMin'])
                      & (xdata <= gridLimit['xMax']))
        jj = np.where((ydata >= gridLimit['yMin'])
                      & (ydata <= gridLimit['yMax']))
        [xgrid, ygrid] = np.meshgrid(xdata[ii], ydata[jj])
        ig, jg = np.meshgrid(ii, jj)
        vdata = vdata[jg, ig]
    else:
        [xgrid, ygrid] = np.meshgrid(xdata, ydata)
    map_kwargs = dict(llcrnrlon=xgrid.min(),
                      llcrnrlat=ygrid.min(),
                      urcrnrlon=xgrid.max(),
                      urcrnrlat=ygrid.max(),
                      projection='merc',
                      resolution='i')
    title = "Maximum wind speed"
    cbarlabel = "Wind speed ({0})".format(f.variables['vmax'].units)
    levels = np.arange(30, 101., 5.)
    saveWindfieldMap(vdata, xgrid, ygrid, title, levels, cbarlabel, map_kwargs,
                     plotPath)
Exemplo n.º 15
0
    def __init__(self, configFile):
        """
        :param str configFile: Path to configuration file.
        """
        config = ConfigParser()
        config.read(configFile)

        self.outputPath = config.get('Output', 'Path')
        self.wf_domain = config.geteval('Region', 'gridLimit')
Exemplo n.º 16
0
def run(configFile, callback=None):
    """
    Run the hazard calculations.

    This will attempt to run the calculation in parallel by tiling the
    domain, but also provides a sane fallback mechanism to execute
    in serial.

    :param str configFile: path to configuration file

    """

    log.info("Loading hazard calculation settings")

    config = ConfigParser()
    config.read(configFile)

    outputPath = config.get('Output', 'Path')
    inputPath = pjoin(outputPath, 'windfield')
    gridLimit = config.geteval('Region', 'gridLimit')
    numsimulations = config.getint('TrackGenerator', 'NumSimulations')
    yrsPerSim = config.getint('TrackGenerator', 'YearsPerSimulation')
    minRecords = config.getint('Hazard', 'MinimumRecords')
    calculate_confidence = config.getboolean('Hazard', 'CalculateCI')
    extreme_value_distribution = config.get('Hazard', 'ExtremeValueDistribution')

    wf_lon, wf_lat = setDomain(inputPath)

    global MPI, comm
    MPI = attemptParallel()
    comm = MPI.COMM_WORLD
    log.info("Running hazard calculations")
    TG = TileGrid(gridLimit, wf_lon, wf_lat)
    tiles = getTiles(TG)

    #def progress(i):
    #    callback(i, len(tiles))

    comm.barrier()
    hc = HazardCalculator(configFile, TG,
                          numsimulations,
                          minRecords,
                          yrsPerSim,
                          calculate_confidence,
                          extreme_value_distribution
                          )




    hc.dumpHazardFromTiles(tiles)
    log.debug("Finished hazard calculations")
    comm.barrier()

    hc.saveHazard()

    log.info("Completed hazard calculation")
Exemplo n.º 17
0
def modified_main(config_file):
    """
    Main function to combine the multipliers with the regional wind
    speed data.

    :param str configFile: Path to configuration file.

    """
    config = ConfigParser()
    config.read(config_file)
    input_path = config.get('Input', 'Path')
    try:
        gust_file = config.get('Input', 'Gust_file')
    except:
        gust_file = 'gust.001-00001.nc'
    windfield_path = pjoin(input_path, 'windfield')
    ncfile = pjoin(windfield_path, gust_file)
    multiplier_path = config.get('Input', 'Multipliers')

    # Load the wind data:
    log.info("Loading regional wind data from {0}".format(ncfile))
    ncobj = Dataset(ncfile, 'r')

    lat = ncobj.variables['lat'][:]
    lon = ncobj.variables['lon'][:]

    delta = lon[1] - lon[0]
    lon = lon - delta / 2.
    lat = lat - delta / 2.

    # Wind speed:
    wspd = ncobj.variables['vmax'][:]

    # Components:
    uu = ncobj.variables['ua'][:]
    vv = ncobj.variables['va'][:]

    bearing = calculateBearing(uu, vv)

    gust = wspd
    Vx = uu
    Vy = vv
    P = None

    #  WARNING, THESE COULD BE WRONG!!!
    # Plus it doesn't do anything,
    # except hightlight these var's are going in..
    lon = lon
    lat = lat
    # Need to be checked !!!

    # Load a multiplier file to determine the projection:

    log.info("Using M4 data from {0}".format(multiplier_path))

    processMult(gust, Vx, Vy, lon, lat, windfield_path, multiplier_path)
Exemplo n.º 18
0
    def __init__(self, configFile):
        """
        Read configuration settings, load station data and set up
        output recarrays.

        :param str configFile: path to a configuration file.
        """

        config = ConfigParser()
        config.read(configFile)

        self.meta = False

        stnFile = config.get('Timeseries', 'LocationFile')
        self.outputPath = pjoin(config.get('Output', 'Path'),
                                'process', 'timeseries')

        self.maxfile = pjoin(config.get('Output', 'Path'),
                             'process', 'maxima.csv')
        self.minfile = pjoin(config.get('Output', 'Path'),
                             'process', 'minima.csv')


        log.info(f"Loading timeseries stations from {stnFile}")
        log.debug(f"Timeseries data will be written into {self.outputPath}")
        self.stations = []
        if stnFile.endswith("shp"):
            try:
                key_name = config.get('Timeseries', 'StationID')
            except NoOptionError:
                key_name = None

            vertices = shpGetVertices(stnFile, key_name=key_name)

            for stn in list(vertices.keys()):
                lat = vertices[stn][0][1]
                lon = vertices[stn][0][0]
                lon = np.where(lon < 0., lon + 360., lon)
                self.stations.append(Station(stn, lon, lat))


        else:
            stndata = flLoadFile(stnFile, delimiter=',')
            # If there are more than 3 columns, save the additional
            # columns as 'metadata'
            if stndata.shape[1] > 3:
                self.metadata = stndata[:, 3:]
                self.meta = True
            stnid = stndata[:, 0]
            stnlon = stndata[:, 1].astype(float)
            stnlat = stndata[:, 2].astype(float)
            for sid, lon, lat in zip(stnid, stnlon, stnlat):
                self.stations.append(Station(sid, lon, lat))
        log.info(f"There are {len(self.stations)} stations that will collect timeseries data")
Exemplo n.º 19
0
def doDataDownload(configFile):
    """
    Check and download the data files listed in the configuration file.
    Datasets are listed in the `Input` section of the configuration
    file, with the option `Datasets`. There must also be a corresponding
    section in the configuration file that inlcudes the url, path where
    the dataset will be stored and the filename that will be stored, e.g.::

        [Input]
        Datasets=IBTRACS

        [IBTRACS]
        URL=ftp://eclipse.ncdc.noaa.gov/pub/ibtracs/v03r05/wmo/csv/Allstorms.ibtracs_wmo.v03r05.csv.gz
        filename=Allstorms.ibtracs_wmo.v03r05.csv
        path=input

    This will attempt to download the gzipped csv file from the given URL
    and save it to the given filename, in the 'input' folder under the current
    directory. Gzipped files are automatically unzipped.


    :param str configFile: Name of configuration file.
    :raises IOError: If the data cannot be downloaded.


    """

    log.info('Checking availability of input data sets')

    config = ConfigParser()
    config.read(configFile)

    showProgressBar = config.get('Logging', 'ProgressBar')

    datasets.loadDatasets(configFile)
    for dataset in datasets.DATASETS:
        if not dataset.isDownloaded():
            log.info('Input file %s is not available', dataset.filename)
            try:
                log.info('Attempting to download %s', dataset.filename)

                pbar = ProgressBar('Downloading file %s: ' % dataset.filename,
                                   showProgressBar)

                def status(fn, done, size):
                    pbar.update(float(done)/size)

                dataset.download(status)
                log.info('Download successful')
            except IOError:
                log.error('Unable to download %s. Maybe a proxy problem?',
                          dataset.filename)
                sys.exit(1)
Exemplo n.º 20
0
    def __init__(self, configFile):

        config = ConfigParser()
        config.read(configFile)

        # Check for wind multiplier file path in config file
        if config.has_option('Input', 'RawMultipliers'):
            self.WMPath = config.get('Input', 'RawMultipliers')
            log.info('Using multiplier files from {0}'.format(self.WMPath))
        else:
            log.info(
                'Using default multiplier files from /g/data/fj6/multipliers/')
            self.WMPath = '/g/data/fj6/multipliers/'
Exemplo n.º 21
0
def doStatistics(configFile):
    """
    Calibrate the model with the :mod:`StatInterface` module.

    :param str configFile: Name of configuration file.

    """
    from DataProcess.CalcTrackDomain import CalcTrackDomain

    config = ConfigParser()
    config.read(configFile)

    showProgressBar = config.get('Logging', 'ProgressBar')
    getRMWDistFromInputData = config.getboolean('RMW',
                                                'GetRMWDistFromInputData')

    log.info('Running StatInterface')
    pbar = ProgressBar('Calibrating model: ', showProgressBar)

    # Auto-calculate track generator domain
    CalcTD = CalcTrackDomain(configFile)
    domain = CalcTD.calcDomainFromFile()

    pbar.update(0.05)

    from StatInterface import StatInterface
    statInterface = StatInterface.StatInterface(configFile,
                                                autoCalc_gridLimit=domain)
    statInterface.kdeGenesisDate()
    pbar.update(0.4)

    statInterface.kdeOrigin()
    pbar.update(0.5)

    statInterface.cdfCellBearing()
    pbar.update(0.6)

    statInterface.cdfCellSpeed()
    pbar.update(0.7)

    statInterface.cdfCellPressure()
    pbar.update(0.8)

    statInterface.calcCellStatistics()

    if getRMWDistFromInputData:
        statInterface.cdfCellSize()

    pbar.update(1.0)
    log.info('Completed StatInterface')
Exemplo n.º 22
0
    def __init__(self, configFile, progressbar=None):

        #CalcTD = CalcTrackDomain(configFile)
        #self.domain = CalcTD.calc()
        self.configFile = configFile
        self.progressbar = progressbar
        self.logger = logging.getLogger(__name__)
        self.logger.info("Initialising DataProcess")

        config = ConfigParser()
        config.read(configFile)

        self.outputPath = config.get('Output', 'Path')
        self.processPath = pjoin(self.outputPath, 'process')

        # Determine TCRM input directory
        tcrm_dir = pathLocator.getRootDirectory()
        self.tcrm_input_dir = pjoin(tcrm_dir, 'input')

        landmask = config.get('Input', 'LandMask')

        self.landmask = SampleGrid(landmask)

        fmt = config.get('Output', 'Format')

        self.ncflag = False
        if fmt.startswith("nc"):
            self.logger.debug("Output format is netcdf")
            self.ncflag = True
            self.data = {}
            #dimensions = {records}
            # variables = {init_index(records),
            #             genesis_index(records),
            #             non_init_index(records),
            #             lon(records), lat(records),
            #             year(records), month(records),
            #             day(records), hour(records),
            #             minute(records), julianday(records),
            #             bearing(records), speed(records),
            #             pressure(records), lsflag(records), }
            # global_attributes = dict(description=
            #                         source_file=,
            #                         source_file_moddate,
            #                         landmask_file=,
            #                         version=,)
        elif fmt.startswith("txt"):
            self.logger.debug("Output format is text")
            self.origin_year = pjoin(self.processPath, 'origin_year')
Exemplo n.º 23
0
    def __init__(self, configFile, dt):
        """
        Initialise required fields

        """

        self.configFile = configFile

        config = ConfigParser()
        config.read(configFile)

        landMaskFile = config.get('Input', 'LandMask')

        self.landMask = SampleGrid(landMaskFile)
        self.tol = 0  # Time over land
        self.dt = dt
Exemplo n.º 24
0
def main(configFile):
    """
    Main function for collecting and processing qind multipliers
    
    :param str configFile: Path to configuration file.
    """

    config = ConfigParser()
    config.read(configFile)
    output_path = config.get('Output', 'Path')

    type_mapping = {'shielding': 'Ms', 'terrain': 'Mz', 'topographic': 'Mt'}
    dirns = ['e', 'n', 'ne', 'nw', 's', 'se', 'sw', 'w']

    copyTranslateMultipliers(configFile, type_mapping, output_path)
    mergeWindMultipliers(type_mapping, dirns, output_path)
    combineDirections(dirns, output_path)
Exemplo n.º 25
0
    def __init__(self, configFile, progressbar=None):

        config = ConfigParser()
        config.read(configFile)

        outputPath = config.get('Output', 'Path')

        try:
            self.localityID = config.get('Region', 'LocalityID')
        except Exception:
            self.localityID = -999999

        self.inputFile = pjoin(outputPath, 'hazard', 'hazard.nc')
        self.plotPath = pjoin(outputPath, 'plots', 'hazard')
        self.plotUnits = PlotUnits(config.get('Hazard', 'PlotSpeedUnits'))

        self.progressbar = progressbar
Exemplo n.º 26
0
    def __init__(self, configFile, autoCalc_gridLimit=None, progressbar=None):
        """
        Initialize the data and variables required for the interface
        """
        self.configFile = configFile
        config = ConfigParser()
        config.read(configFile)
        self.progressbar = progressbar

        log.info("Initialising StatInterface")

        self.kdeType = config.get('StatInterface', 'kdeType')
        minSamplesCell = config.getint('StatInterface', 'minSamplesCell')
        self.kdeStep = config.getfloat('StatInterface', 'kdeStep')
        self.outputPath = config.get('Output', 'Path')
        self.processPath = pjoin(self.outputPath, 'process')

        missingValue = cnfGetIniValue(self.configFile, 'StatInterface',
                                      'MissingValue', sys.maxsize)

        gridLimitStr = cnfGetIniValue(self.configFile, 'StatInterface',
                                      'gridLimit', '')

        if gridLimitStr is not '':
            try:
                self.gridLimit = eval(gridLimitStr)
            except SyntaxError:
                log.exception('Error! gridLimit is not a dictionary')
        else:
            self.gridLimit = autoCalc_gridLimit
            log.info('No gridLimit specified - using automatic' +
                     ' selection: ' + str(self.gridLimit))

        try:
            gridSpace = config.geteval('Region', 'gridSpace')
            gridInc = config.geteval('Region', 'gridInc')
        except SyntaxError:
            log.exception('Error! gridSpace or gridInc not dictionaries')
            raise

        self.generateDist = GenerateDistributions(self.configFile,
                                                  self.gridLimit, gridSpace,
                                                  gridInc, self.kdeType,
                                                  minSamplesCell, missingValue)
        self.gridSpace = gridSpace
        self.gridInc = gridInc
Exemplo n.º 27
0
def doTimeseriesPlotting(configFile):
    """
    Run functions to plot time series output.

    :param str configFile: Path to configuration file.

    """

    config = ConfigParser()
    config.read(configFile)

    outputPath = config.get('Output', 'Path')
    timeseriesPath = pjoin(outputPath, 'process', 'timeseries')
    plotPath = pjoin(outputPath, 'plots', 'timeseries')
    log.info("Plotting time series data to {0}".format(plotPath))
    from PlotInterface.plotTimeseries import plotTimeseries
    plotTimeseries(timeseriesPath, plotPath)
Exemplo n.º 28
0
    def __init__(self, configFile):

        config = ConfigParser()
        config.read(configFile)
        self.configFile = configFile

        # Define the grid:
        gridLimit = config.geteval('Region', 'gridLimit')
        gridSpace = config.geteval('Region', 'GridSpace')

        self.lon_range = np.arange(gridLimit['xMin'], gridLimit['xMax'] + 0.1,
                                   gridSpace['x'])
        self.lat_range = np.arange(gridLimit['yMin'], gridLimit['yMax'] + 0.1,
                                   gridSpace['y'])

        outputPath = config.get('Output', 'Path')
        self.trackPath = pjoin(outputPath, 'tracks')
        self.plotPath = pjoin(outputPath, 'plots', 'stats')
        self.dataPath = pjoin(outputPath, 'process')

        # Determine TCRM input directory
        tcrm_dir = pathLocator.getRootDirectory()
        self.inputPath = pjoin(tcrm_dir, 'input')

        self.synNumYears = config.getint('TrackGenerator',
                                         'yearspersimulation')

        # Longitude crossing gates:
        self.gateLons = np.arange(self.lon_range.min(),
                                  self.lon_range.max() + 0.5, 10.)

        self.gateLats = np.arange(self.lat_range.min(),
                                  self.lat_range.max() + 0.5, 2.)

        # Add configuration settings to global attributes:
        self.gatts = {
            'history': "Longitude crossing rates for TCRM simulation",
            'version': flProgramVersion()
        }

        for section in config.sections():
            for option in config.options(section):
                key = "{0}_{1}".format(section, option)
                value = config.get(section, option)
                self.gatts[key] = value
Exemplo n.º 29
0
    def __init__(self, configFile):
        """
        Calculate central pressure distributions on a grid

        :param str configFile: path to a TCRM configuration file.
        """

        config = ConfigParser()
        config.read(configFile)
        self.configFile = configFile

        # Determine TCRM input directory
        tcrm_dir = pathLocator.getRootDirectory()
        self.inputPath = pjoin(tcrm_dir, 'input')

        # Define the grid:
        gridLimit = config.geteval('Region', 'gridLimit')
        gridSpace = config.geteval('Region', 'GridSpace')

        self.lon_range = np.arange(gridLimit['xMin'],
                                   gridLimit['xMax'] + 0.1,
                                   gridSpace['x'])
        self.lat_range = np.arange(gridLimit['yMin'],
                                   gridLimit['yMax'] + 0.1,
                                   gridSpace['y'])

        self.gridLimit = gridLimit
        outputPath = config.get('Output', 'Path')
        self.trackPath = pjoin(outputPath, 'tracks')
        self.plotPath = pjoin(outputPath, 'plots', 'stats')
        self.dataPath = pjoin(outputPath, 'process')

        self.synNumYears = config.getint('TrackGenerator',
                                         'yearspersimulation')
        cellnumber = 0
        self.gridCells = []
        for k in xrange(len(self.lon_range) - 1):
            for l in xrange(len(self.lat_range) - 1):
                ymin = self.lat_range[l]
                ymax = self.lat_range[l] + gridSpace['y']
                xmin = self.lon_range[k]
                xmax = self.lon_range[k] + gridSpace['x']
                self.gridCells.append(GridCell(xmin, ymin, xmax, ymax,
                                               cellnumber, (k, l)))
                cellnumber += 1
Exemplo n.º 30
0
def main(configFile):
    from Utilities.loadData import loadTrackFile
    from Utilities.config import ConfigParser
    from os.path import join as pjoin, normpath, dirname
    baseDir = normpath(pjoin(dirname(__file__), '..'))
    inputPath = pjoin(baseDir, 'input')
    config = ConfigParser()
    config.read(configFile)

    inputFile = config.get('DataProcess', 'InputFile')
    source = config.get('DataProcess', 'Source')

    gridLimit = config.geteval('Region', 'gridLimit')

    xx = np.arange(gridLimit['xMin'], gridLimit['xMax'] + .1, 0.1)
    yy = np.arange(gridLimit['yMin'], gridLimit['yMax'] + .1, 0.1)

    xgrid, ygrid = np.meshgrid(xx, yy)

    if len(dirname(inputFile)) == 0:
        inputFile = pjoin(inputPath, inputFile)

    try:
        tracks = loadTrackFile(configFile, inputFile, source)
    except (TypeError, IOError, ValueError):
        log.critical("Cannot load historical track file: {0}".format(inputFile))
        raise

    title = source
    outputPath = config.get('Output', 'Path')
    outputPath = pjoin(outputPath, 'plots', 'stats')
    outputFile = pjoin(outputPath, 'tctracks.png')

    map_kwargs = dict(llcrnrlon=xgrid.min(),
                      llcrnrlat=ygrid.min(),
                      urcrnrlon=xgrid.max(),
                      urcrnrlat=ygrid.max(),
                      projection='merc',
                      resolution='i')

    figure = TrackMapFigure()
    figure.add(tracks, xgrid, ygrid, title, map_kwargs)
    figure.plot()
    saveFigure(figure, outputFile)