Exemplo n.º 1
0
def vaporPressureFromTemp(temp, temp_units, debug=False):
    # need temperature in degrees Kelvin
    k_temp = convertUnits(temp, temp_units, 'K')
    # calculate saturation vapor pressure (sat_vp)
    vp = 6.11 * N.exp(5420. * ((k_temp - 273.15) / (273.15 * k_temp)))
    if debug: print 'vaporPressureFromTemp', vp, k_temp, temp
    return vp
Exemplo n.º 2
0
 def _postUnpack(self, dataset_path, data, **kwargs):
     to_units = kwargs.get('units', None)
     if to_units is not None:
         from_units = self.getDatasetUnits(dataset_path)
         if from_units is not None:
             data = convertUnits(data, from_units, to_units)
     return data
Exemplo n.º 3
0
 def _prePack(self, dataset_path, data, **kwargs):
     from_units = kwargs.get('units', None)
     if from_units is not None:
         to_units = self.getDatasetUnits(dataset_path)
         if to_units is not None:
             if '*' in to_units: to_units = to_units.split('*')[0]
         data = convertUnits(data, from_units, to_units)
     return data
Exemplo n.º 4
0
    def _postUnpack(self, dataset_path, data, **kwargs):
        # check for units conversion
        out_units = kwargs.get('units', None)
        if out_units is None: return data

        units = self.datasetAttribute(dataset_path, 'units', None)
        if units is not None:
            if out_units != units:
                return convertUnits(data, units, out_units)
        else:
            errmsg = '"%s" dataset has no attribute named "units"'
            raise AttributeError, errmsg % dataset_path
Exemplo n.º 5
0
    def _serialize(self, dataset_key, data, data_attrs, **kwargs):
        data_units = self._dataUnits(dataset_key, data_attrs)
        serial_units = kwargs.get('units', data_units)
        if data_units == serial_units:
            return data, data_attrs

        if data_units is None:
            errmsg = "Cannot convert '%s' dataset to '%s' units."
            raise KeyError, errmsg % (dataset_key.name, serial_units)

        per_degree = kwargs.get('per_degree',False)
        try:
            if not per_degree:
                data = convertUnits(data, data_units, serial_units)
            else:
                data = convertUnits(data, 'd'+data_units, 'd'+serial_units)
        except:
            errmsg = "Cannot convert '%s' from '%s' to '%s'."
            raise KeyError, errmsg % (dataset_key.name, data_units, serial_units)

        data_attrs['units'] = serial_units
        return data, data_attrs
Exemplo n.º 6
0
def dewpointFromHumidityAndTemp(relative_humidity,
                                temp,
                                temp_units,
                                debug=False):
    # saturation vapor pressure (sat_vp)
    sat_vp = vaporPressureFromTemp(temp, temp_units, debug)
    if debug:
        print 'dewpointFromHumidityAndTemp', relative_humidity, temp, temp_units
        print '    saturation vapor pressure', sat_vp
    # actual vapor pressure (vp)
    if isinstance(relative_humidity, N.ndarray):
        relative_humidity[N.where(relative_humidity == 0)] = N.nan
        vp = (relative_humidity * sat_vp) / 100.
    else:
        if relative_humidity == 0: vp = N.nan
        else: vp = (relative_humidity * sat_vp) / 100.
    if debug: print '    actual vapor pressure', vp
    # dewpoint temperature in degrees Kelvin
    k_dew_point = 1. / ((1. / 273.15) - (N.log(vp / 6.11) / 5420.))
    # convert back to units of input temperature
    dew_point = convertUnits(k_dew_point, 'K', temp_units)
    if debug: print '    dew_point', k_dew_point, dew_point
    return dew_point
Exemplo n.º 7
0
            analysis_date = message.analDate
            print '    "analDate" =', analysis_date
            fcast_time = message.forecastTime
            if verbose: print '        "forecastTime" =', fcast_time
            if fcast_time > 158:  # forecastTime is MINUTES
                fcast_time = analysis_date + relativedelta(minutes=fcast_time)
            else:  # forecastTime is hoors
                fcast_time = analysis_date + relativedelta(hours=fcast_time)
            if verbose: print '        forecast datetime =', fcast_time
            fcast_date = fcast_time.date()

            if fcast_date > last_obs_date:
                data = message.values[ndfd_indexes].data
                data = data.reshape(source_shape)
                data[N.where(data == 9999)] = N.nan
                data = convertUnits(data, 'K', 'F')
                print '        forecast date =', fcast_date
                daily.append((fcast_date, data))
            print ' '
        gribs.close()

    temps[temp_var] = tuple(sorted(daily, key=lambda x: x[0]))

#print '\n\n', temps['mint']
#print '\n\n', temps['maxt']

target_year = fcast_date.year
manager = \
factory.tempextsFileManager(source, target_year, region, 'temps', mode='a')
print '\nsaving forecast to', manager.filepath
Exemplo n.º 8
0
    def applyBias(self,
                  dem_lons,
                  dem_lats,
                  dem_data,
                  dem_data_units,
                  stn_lons,
                  stn_lats,
                  stn_bias,
                  stn_bias_units,
                  report_rate=1000,
                  debug=False,
                  performance=False):
        """ Apply the calculated station temperature bias to the grid nodes. 
        """
        PERF_MSG = 'processed %d grid nodes in'
        PERF_MSG_SUFFIX = ' ... total = %d of %d'
        reporter = self.reporter

        search_radius = self.search_radius
        c_parm = self.c_parm
        vicinity = self.vicinity

        min_count = report_rate - 1

        dem_grid_shape = dem_lons.shape
        dem_grid_size = dem_lons.size

        # create empty in-memory arrays for calculated grids
        biased_data = N.empty(shape=dem_grid_shape, dtype=float)
        dem_data_bias = N.empty(shape=dem_grid_shape, dtype=float)

        num_nodes_processed = 0
        no_change = 0
        start_count = datetime.now()

        # make sure station and dem data are in the same units
        if stn_bias_units != dem_data_units:
            stn_bias = convertUnits(stn_bias, stn_bias_units, dem_data_units)

        # loop thru the nodes of the raw grid and apply the station bias
        for x in range(dem_grid_shape[0]):
            for y in range(dem_grid_shape[1]):
                if performance:
                    # report performance every 'report_rate' passes thru loop
                    if num_nodes_processed > min_count and\
                       num_nodes_processed % report_rate == 0:
                        msg = PERF_MSG % (report_rate)
                        sfx = PERF_MSG_SUFFIX % (num_nodes_processed,
                                                 dem_grid_size)
                        reporter.logPerformance(start_count, msg, sfx)
                        start_count = datetime.now()

                node_lon = dem_lons[x, y]
                node_lat = dem_lats[x, y]
                node_value = dem_data[x, y]
                if not self._passesApplyBiasTest(node_value, node_lon,
                                                 node_lat, stn_bias, stn_lons,
                                                 stn_lats):
                    biased_data[x, y] = dem_data[x, y]
                    dem_data_bias[x, y] = 0.
                    num_nodes_processed += 1
                    no_change += 1
                    continue

                # get indexes of all stations within search radius of grid node
                # bbox will be different for each grid node
                bbox = (node_lon - search_radius, node_lon + search_radius,
                        node_lat - search_radius, node_lat + search_radius)
                indexes = N.where((stn_lons >= bbox[0]) & (stn_lons <= bbox[1])
                                  & (stn_lats >= bbox[2])
                                  & (stn_lats <= bbox[3]))

                # no stations within search radius
                if len(indexes[0]) < 1:
                    # NO ADJUSTMENT CAN BE MADE
                    biased_data[x, y] = dem_data[x, y]
                    dem_data_bias[x, y] = 0.
                    num_nodes_processed += 1
                    no_change += 1
                    continue

                # coordinates of all station in search area
                area_lons = stn_lons[indexes]
                area_lats = stn_lats[indexes]

                # test stations for 'nearness' to the grid node
                bbox = (node_lon - vicinity, node_lon + vicinity,
                        node_lat - vicinity, node_lat + vicinity)
                nearby = N.where((area_lons >= bbox[0])
                                 & (area_lons <= bbox[1])
                                 & (area_lats >= bbox[2])
                                 & (area_lats <= bbox[3]))

                # in order to use MQ we must have either 2 'nearby' stations
                # or 2 in each quadrant surrounding the node
                if (len(nearby[0]) < 1 and not allQuadrants(
                        node_lon, node_lat, area_lons, area_lats)):
                    # NO ADJUSTMENT CAN BE MADE
                    biased_data[x, y] = dem_data[x, y]
                    dem_data_bias[x, y] = 0.
                    num_nodes_processed += 1
                    no_change += 1
                    continue

                # run multiquadric interpolation on BIAS
                data_bias = interp.mq(node_lat, node_lon, area_lats, area_lons,
                                      stn_bias[indexes], c_parm)
                if N.isfinite(data_bias):
                    # apply valid bias
                    value = dem_data[x, y] - data_bias
                else:
                    # invalid bias ... NO ADJUSTMENT CAN BE MADE
                    value = dem_data[x, y]
                    data_bias = 0.
                    no_change += 1

                if N.isfinite(value):
                    biased_data[x, y] = value
                    dem_data_bias[x, y] = data_bias
                else:
                    biased_data[x, y] = dem_data[x, y]
                    dem_data_bias[x, y] = 0.
                    no_change += 1

                num_nodes_processed += 1

        # log performance for nodes not yet reported
        unreported = num_nodes_processed % report_rate
        if performance and unreported > 0:
            msg = PERF_MSG % (unreported)
            sfx = PERF_MSG_SUFFIX % (num_nodes_processed, dem_grid_size)
            reporter.logPerformance(start_count, msg, sfx)

        return biased_data, dem_data_bias, (num_nodes_processed, no_change)
Exemplo n.º 9
0
    def calculateBias(self,
                      algorithm,
                      stn_uids,
                      stn_lons,
                      stn_lats,
                      stn_data,
                      stn_data_units,
                      raw_lons,
                      raw_lats,
                      raw_data,
                      raw_data_units,
                      report_rate=100,
                      debug=False,
                      performance=False):
        """ Calculate the weighted difference between the data value at
        each station and the nearby grid nodes. It will use multiquadric
        interpolation except when there are an insufficient number of grid
        nodes nearby, then it will use a simple inverse distance weighted
        average.
        """
        # local refernces to instance attributes
        reporter = self.reporter
        vicinity = self.vicinity

        min_count = report_rate - 1
        PERF_MSG = 'processed %d stations (%d total) in'

        # initialize station temperature bias arrays
        stn_interp_data = []
        stn_data_bias = []
        num_stations = len(stn_uids)

        # initialize tracking variables
        algorithm_counts = [0, 0, 0]
        station_count = 0
        stations_bad_data = 0
        stations_outside = 0
        insufficient_coverage = 0
        bias_not_calculated = 0
        start_report = datetime.now()

        # make sure station and dem data are in the same units
        if raw_data_units != stn_data_units:
            raw_data = convertUnits(raw_data, raw_data_units, stn_data_units)

        # loop though list of stations making adjustments to both station and
        # grid node temperature extremes
        for indx in range(num_stations):
            # the following is good for a limited test loop
            #for indx in (84,85,278,330,337,345,360,368,444,476):
            # report performance every 'report_rate' passes thru the loop
            if performance and (station_count > min_count
                                and station_count % report_rate == 0):
                reporter.logPerformance(
                    start_report, PERF_MSG % (report_rate, station_count))
                start_report = datetime.now()

            # extract observation data for this station
            stn_id = stn_uids[indx]
            stn_lon = stn_lons[indx]
            stn_lat = stn_lats[indx]
            stn_info = 'station %d (%s) at [%-9.5f, %-9.5f]' % (
                indx, stn_id, stn_lon, stn_lat)

            # station is not within the bounding boxx for this run
            if not self._pointInBounds(stn_lon, stn_lat):
                stn_interp_data.append(N.inf)
                stn_data_bias.append(N.inf)
                stations_outside += 1
                station_count += 1
                continue

            stn_value = stn_data[indx]
            # check for invalid data value for this station
            # this shouldn't happen if station data prep is done right !!!
            if not N.isfinite(stn_value):
                # set missing values and skip to next iteration
                stn_interp_data.append(N.inf)
                stn_data_bias.append(N.inf)
                stations_bad_data += 1
                station_count += 1
                if debug:
                    print 'skipped ', stn_info
                    print '... bad data value', stn_values
                continue

            # additional check that may be required by sub-classed data types
            if not self._passesCalcBiasTest(stn_value, stn_lon, stn_lat,
                                            raw_data, raw_lons, raw_lats):
                stn_interp_data.append(stn_value)
                stn_data_bias.append(0.)
                station_count += 1
                bias_not_calculated += 1
                continue

            # apply appripriate bias calculation algorithm
            if algorithm == 'mq':
                result = self.doMQInterp(stn_lon, stn_lat, stn_info, raw_lons,
                                         raw_lats, raw_data, debug)
            else:
                result = self.doIDWInterp(stn_lon, stn_lat, stn_info, raw_lons,
                                          raw_lats, raw_data, debug)

            if result is None:
                # set missing values and skip to next iteration
                stn_interp_data.append(N.inf)
                stn_data_bias.append(N.inf)
                insufficient_coverage += 1
                station_count += 1
                continue

            interpolated_value = result[1]
            data_bias = interpolated_value - stn_value
            estimated_value = interpolated_value - data_bias

            stn_data_bias.append(data_bias)
            stn_interp_data.append(estimated_value)

            station_count += 1
            algorithm_counts[result[0]] += 1

        if performance:
            unreported = station_count % report_rate
            if unreported > 0:
                reporter.logPerformance(start_report,
                                        PERF_MSG % (unreported, station_count))

        # convert the interpolated precip and bias to numpy arrays
        stn_interp_data = N.array(stn_interp_data, dtype=float)
        stn_data_bias = N.array(stn_data_bias, dtype=float)
        indexes = N.where(N.isnan(stn_data_bias) | N.isinf(stn_data_bias))
        bad_bias_count = len(indexes[0])

        statistics = (station_count, algorithm_counts[2], algorithm_counts[1],
                      bad_bias_count, stations_bad_data, stations_outside,
                      insufficient_coverage, bias_not_calculated)
        return stn_interp_data, stn_data_bias, statistics