예제 #1
0
    def __init__(self, stationid, longitude, latitude):

        self.id = stationid
        self.lon = longitude
        self.lat = latitude
        self.data = DynamicRecArray(dtype={'names': OUTPUT_NAMES,
                                           'formats':OUTPUT_TYPES})
예제 #2
0
def process_timeseries(config_file):
    """
    Process a set of timeseries files to include the multiplier values.

    The combined multiplier values are stored in a shape file as fields,
    and records are keyed by the same code that is used to select
    stations for sampling.

    :param str config_file: Path to a configuration file.

    """

    config = ConfigParser()
    config.read(config_file)

    stnFile = config.get('Input', 'LocationFile')
    key_name = config.get('Timeseries', 'StationID')
    inputPath = pjoin(config.get('Output', 'Path'), 'process', 'timeseries')
    outputPath = pjoin(inputPath, 'local')

    if not isdir(outputPath):
        try:
            os.makedirs(outputPath)
        except OSError:
            raise

    log.info("Loading stations from %s" % stnFile)
    log.info("Timeseries data will be written into %s" % outputPath)

    directions = ['n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw']

    sf = shapefile.Reader(stnFile)
    field_names = [sf.fields[i][0] for i in range(1, len(sf.fields))]
    try:
        key_index = field_names.index(key_name)
    except ValueError:
        log.exception("{0} not a field in {1}".format(key_name, stnFile))
        raise

    min_data = DynamicRecArray(dtype={
        'names': MINMAX_NAMES,
        'formats': MINMAX_TYPES
    })
    max_data = DynamicRecArray(dtype={
        'names': MINMAX_NAMES,
        'formats': MINMAX_TYPES
    })

    records = sf.records()
    indexes = []
    for dir in directions:
        fieldname = 'm4_%s' % dir
        indexes.append(field_names.index(fieldname))

    for record in records:
        stnId = record[key_index]
        inputFile = pjoin(inputPath, 'ts.{0}.csv'.format(stnId))
        outputFile = pjoin(outputPath, 'ts.{0}.csv'.format(stnId))
        if os.path.isfile(inputFile):
            # Load multipliers for this location:
            mvals = [float(record[i]) for i in indexes]
            maxdata, mindata = tsmultiply(inputFile, tuple(mvals), outputFile)
            min_data.append(tuple(mindata))
            max_data.append(tuple(maxdata))

        else:
            log.debug("No timeseries file for {0}".format(stnId))

    # Save local minima/maxima
    maxfile = pjoin(outputPath, 'local_maxima.csv')
    minfile = pjoin(outputPath, 'local_minima.csv')
    maxheader = ('Station,Time,Longitude,Latitude,Speed,'
                 'UU,VV,Bearing,Pressure')
    np.savetxt(maxfile,
               max_data.data,
               fmt=MINMAX_FMT,
               delimiter=',',
               header=maxheader,
               comments='')
    np.savetxt(minfile,
               min_data.data,
               fmt=MINMAX_FMT,
               delimiter=',',
               header=maxheader,
               comments='')
예제 #3
0
def process_timeseries(config_file):
    """
    Process a set of timeseries files to include the multiplier values.

    The combined multiplier values are stored in a shape file as fields,
    and records are keyed by the same code that is used to select
    stations for sampling.

    :param str config_file: Path to a configuration file. 

    """

    config = ConfigParser()
    config.read(config_file)

    stnFile = config.get('Timeseries', 'StationFile')
    key_name = config.get('Timeseries', 'StationID')
    inputPath = pjoin(config.get('Output', 'Path'), 
                                  'process', 'timeseries')
    outputPath = pjoin(inputPath, 'local')
    
    if not isdir(outputPath):
        try:
            os.makedirs(outputPath)
        except OSError:
            raise
        
    log.info("Loading stations from %s"%stnFile)
    log.info("Timeseries data will be written into %s"%outputPath)

    directions = ['n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw']

    sf = shapefile.Reader(stnFile)
    field_names = [sf.fields[i][0] for i in range(1, len(sf.fields))]
    try:
        key_index = field_names.index(key_name)
    except ValueError:
        log.exception("{0} not a field in {1}".format(key_name, stnFile))
        raise

    min_data = DynamicRecArray(dtype={'names': MINMAX_NAMES,
                                      'formats': MINMAX_TYPES})
    max_data = DynamicRecArray(dtype={'names': MINMAX_NAMES,
                                      'formats': MINMAX_TYPES})

    records = sf.records()
    indexes = []
    for dir in directions:
        fieldname = 'm4_%s' % dir
        indexes.append(field_names.index(fieldname))

    for record in records:
        stnId = record[key_index]
        inputFile = pjoin(inputPath, 'ts.{0}.csv'.format(stnId))
        outputFile = pjoin(outputPath, 'ts.{0}.csv'.format(stnId))
        if os.path.isfile(inputFile):
            # Load multipliers for this location:
            mvals = [float(record[i]) for i in indexes]
            maxdata, mindata = tsmultiply(inputFile, tuple(mvals), outputFile)
            min_data.append(tuple(mindata))
            max_data.append(tuple(maxdata))

        else:
            log.debug("No timeseries file for {0}".format(stnId))

    # Save local minima/maxima
    maxfile = pjoin(outputPath, 'local_maxima.csv')
    minfile = pjoin(outputPath, 'local_minima.csv')
    maxheader = ('Station,Time,Longitude,Latitude,Speed,'
                        'UU,VV,Bearing,Pressure')
    np.savetxt(maxfile, max_data.data, fmt=MINMAX_FMT, delimiter=',',
               header=maxheader, comments='')
    np.savetxt(minfile, min_data.data, fmt=MINMAX_FMT, delimiter=',',
               header=maxheader, comments='')
예제 #4
0
    def shutdown(self):
        """
        Write the data to file, each station to a separate file.
        """

        header = 'Station,Time,Longitude,Latitude,Speed,UU,VV,Bearing,Pressure'
        maxheader = ('Station,Time,Longitude,Latitude,Speed,'
                     'UU,VV,Bearing,Pressure')

        max_data = DynamicRecArray(dtype={
            'names': MINMAX_NAMES,
            'formats': MINMAX_TYPES
        })

        min_data = DynamicRecArray(dtype={
            'names': MINMAX_NAMES,
            'formats': MINMAX_TYPES
        })

        for stn in self.stations:

            if np.any(stn.data.data['Speed'] > 0.0):
                fname = pjoin(self.outputPath, 'ts.%s.csv' % str(stn.id))
                np.savetxt(fname,
                           np.array(stn.data.data),
                           fmt=OUTPUT_FMT,
                           delimiter=',',
                           header=header,
                           comments='')
                max_step = np.argmax(stn.data.data['Speed'])
                min_step = np.argmin(stn.data.data['Pressure'])
                max_data.append(tuple(stn.data.data[max_step]))
                min_data.append(tuple(stn.data.data[min_step]))

        np.savetxt(self.maxfile,
                   max_data.data,
                   fmt=MINMAX_FMT,
                   delimiter=',',
                   header=maxheader,
                   comments='')
        np.savetxt(self.minfile,
                   min_data.data,
                   fmt=MINMAX_FMT,
                   delimiter=',',
                   header=maxheader,
                   comments='')
        """
        for stn in self.stations:
            if type(self.maxdata[stn.id][3]) == datetime.datetime:
                self.maxdata[stn.id][3] = self.maxdata[stn.id][3].strftime(ISO_FORMAT)
                self.mindata[stn.id][3] = self.mindata[stn.id][3].strftime(ISO_FORMAT)
                self.maxdata[stn.id][0] = str(int(stn.id))
                self.mindata[stn.id][0] = str(int(stn.id))
                maxdata.append(self.maxdata[stn.id])
                mindata.append(self.mindata[stn.id])

        for stn in self.stnid:
            if type(self.maxdata[stn][3]) == datetime.datetime:
                self.maxdata[stn][3] = self.maxdata[stn][3].strftime(ISO_FORMAT)
                self.mindata[stn][3] = self.mindata[stn][3].strftime(ISO_FORMAT)
                self.maxdata[stn][0] = str(int(stn))
                self.mindata[stn][0] = str(int(stn))
                maxdata.append(self.maxdata[stn])
                mindata.append(self.mindata[stn])
            else:
                pass

        np.savetxt(maxfname, np.array(maxdata), fmt='%s',
                   header=maxheader, delimiter=',')
                     #['%s','%7.3f','%7.3f','%s','%6.2f','%6.2f',
                     #  '%6.2f','%6.2f','%7.2f'] )
        minfname = pjoin(self.outputPath, 'minpressure.csv')

        np.savetxt(minfname, np.array(mindata), fmt='%s',
                   header=maxheader, delimiter=',')
                   #['%s','%7.3f','%7.3f','%s','%6.2f','%6.2f',
                   #  '%6.2f','%6.2f','%7.2f'] )
        """
        log.info("Station data written to file")
예제 #5
0
    def shutdown(self):
        """
        Write the data to file, each station to a separate file.
        """

        header = 'Station,Time,Longitude,Latitude,Speed,UU,VV,Bearing,Pressure'
        maxheader = ('Station,Time,Longitude,Latitude,Speed,'
                        'UU,VV,Bearing,Pressure')
        
        max_data = DynamicRecArray(dtype={'names': MINMAX_NAMES,
                                          'formats':MINMAX_TYPES})

        min_data = DynamicRecArray(dtype={'names': MINMAX_NAMES,
                                          'formats':MINMAX_TYPES})

        for stn in self.stations:
            
            if np.any(stn.data.data['Speed'] > 0.0):
                fname = pjoin(self.outputPath, 'ts.%s.csv' % str(stn.id))
                np.savetxt(fname, np.array(stn.data.data), fmt=OUTPUT_FMT,
                           delimiter=',', header=header, comments='')
                max_step = np.argmax(stn.data.data['Speed'])
                min_step = np.argmin(stn.data.data['Pressure'])
                max_data.append(tuple(stn.data.data[max_step]))
                min_data.append(tuple(stn.data.data[min_step]))
                
        
        np.savetxt(self.maxfile, max_data.data, fmt=MINMAX_FMT, delimiter=',',
                    header=maxheader, comments='')
        np.savetxt(self.minfile, min_data.data, fmt=MINMAX_FMT, delimiter=',',
                    header=maxheader, comments='')
        """
        for stn in self.stations:
            if type(self.maxdata[stn.id][3]) == datetime.datetime:
                self.maxdata[stn.id][3] = self.maxdata[stn.id][3].strftime(ISO_FORMAT)
                self.mindata[stn.id][3] = self.mindata[stn.id][3].strftime(ISO_FORMAT)
                self.maxdata[stn.id][0] = str(int(stn.id))
                self.mindata[stn.id][0] = str(int(stn.id))
                maxdata.append(self.maxdata[stn.id])
                mindata.append(self.mindata[stn.id]) 
        
        for stn in self.stnid:
            if type(self.maxdata[stn][3]) == datetime.datetime:
                self.maxdata[stn][3] = self.maxdata[stn][3].strftime(ISO_FORMAT)
                self.mindata[stn][3] = self.mindata[stn][3].strftime(ISO_FORMAT)
                self.maxdata[stn][0] = str(int(stn))
                self.mindata[stn][0] = str(int(stn))
                maxdata.append(self.maxdata[stn])
                mindata.append(self.mindata[stn])
            else:
                pass

        np.savetxt(maxfname, np.array(maxdata), fmt='%s', 
                   header=maxheader, delimiter=',')
                     #['%s','%7.3f','%7.3f','%s','%6.2f','%6.2f',
                     #  '%6.2f','%6.2f','%7.2f'] )
        minfname = pjoin(self.outputPath, 'minpressure.csv')

        np.savetxt(minfname, np.array(mindata), fmt='%s', 
                   header=maxheader, delimiter=',')             
                   #['%s','%7.3f','%7.3f','%s','%6.2f','%6.2f',
                   #  '%6.2f','%6.2f','%7.2f'] )
        """
        log.info("Station data written to file")