def test_pet(self):

        # confirm that an input array of all NaNs for temperature results in the same array returned
        all_nan_temps = np.full(self.fixture_temps_celsius.shape, np.NaN)
        computed_pet = indices.pet(all_nan_temps,
                                   self.fixture_latitude_degrees,
                                   self.fixture_data_year_start_monthly)
        np.testing.assert_equal(
            computed_pet, all_nan_temps,
            'All-NaN input array does not result in the expected all-NaN result'
        )

        # confirm that a masked input array of all NaNs for temperature results in the same masked array returned
        masked_all_nan_temps = np.ma.array(all_nan_temps)
        computed_pet = indices.pet(masked_all_nan_temps,
                                   self.fixture_latitude_degrees,
                                   self.fixture_data_year_start_monthly)
        np.testing.assert_equal(
            computed_pet, masked_all_nan_temps,
            'All-NaN masked input array does not result in the expected all-NaN masked result'
        )

        # confirm that a missing/None latitude value raises an error
        np.testing.assert_raises(ValueError, indices.pet,
                                 self.fixture_temps_celsius, None,
                                 self.fixture_data_year_start_monthly)

        # confirm that a missing/None latitude value raises an error
        np.testing.assert_raises(ValueError, indices.pet,
                                 self.fixture_temps_celsius, np.NaN,
                                 self.fixture_data_year_start_monthly)

        # confirm that an invalid latitude value raises an error
        self.assertRaises(
            ValueError,
            indices.pet,
            self.fixture_temps_celsius,
            91.0,  # latitude > 90 is invalid 
            self.fixture_data_year_start_monthly)

        # confirm that an invalid latitude value raises an error
        np.testing.assert_raises(
            ValueError,
            indices.pet,
            self.fixture_temps_celsius,
            -91.0,  # latitude < -90 is invalid 
            self.fixture_data_year_start_monthly)

        # compute PET from the monthly temperatures, latitude, and initial years -- if this runs without
        # error then this test passes, as the underlying method(s) being used to compute PET will be tested
        # in the relevant test_compute.py or test_thornthwaite.py codes
        computed_pet = indices.pet(self.fixture_temps_celsius,
                                   self.fixture_latitude_degrees,
                                   self.fixture_data_year_start_monthly)
Beispiel #2
0
def test_pet(temps_celsius, latitude_degrees, data_year_start_monthly):

    # confirm that an input temperature array of only NaNs
    # results in the same all NaNs array being returned
    all_nan_temps = np.full(temps_celsius.shape, np.NaN)
    computed_pet = indices.pet(all_nan_temps, latitude_degrees,
                               data_year_start_monthly)
    np.testing.assert_equal(
        computed_pet, all_nan_temps, "All-NaN input array does not result in "
        "the expected all-NaN result")

    # confirm that a masked input temperature array of
    # only NaNs results in the same masked array being returned
    masked_all_nan_temps = np.ma.array(all_nan_temps)
    computed_pet = indices.pet(masked_all_nan_temps, latitude_degrees,
                               data_year_start_monthly)
    np.testing.assert_equal(
        computed_pet, masked_all_nan_temps,
        "All-NaN masked input array does not result in "
        "the expected all-NaN masked result")

    # confirm that a missing/None latitude value raises an error
    np.testing.assert_raises(ValueError, indices.pet, temps_celsius, None,
                             data_year_start_monthly)

    # confirm that a missing/None latitude value raises an error
    np.testing.assert_raises(ValueError, indices.pet, temps_celsius, np.NaN,
                             data_year_start_monthly)

    # confirm that an invalid latitude value raises an error
    pytest.raises(
        ValueError,
        indices.pet,
        temps_celsius,
        91.0,  # latitude > 90 is invalid
        data_year_start_monthly)

    # confirm that an invalid latitude value raises an error
    np.testing.assert_raises(
        ValueError,
        indices.pet,
        temps_celsius,
        -91.0,  # latitude < -90 is invalid
        data_year_start_monthly)

    # compute PET from the monthly temperatures, latitude, and initial years -- if this runs without
    # error then this test passes, as the underlying method(s) being used to compute PET will be tested
    # in the relevant test_compute.py or test_eto.py codes
    indices.pet(temps_celsius, latitude_degrees, data_year_start_monthly)

    # compute PET from the monthly temperatures, latitude (as an array), and initial years -- if this runs without
    # error then this test passes, as the underlying method(s) being used to compute PET will be tested
    # in the relevant test_compute.py or test_eto.py codes
    indices.pet(temps_celsius, np.array([latitude_degrees]),
                data_year_start_monthly)
Beispiel #3
0
    def _compute_and_write_division(self, div_index):
        """
        Computes indices for a single division, writing the output into NetCDF.
        
        :param div_index: 
        """

        # only process specified divisions
        if self.divisions is not None and div_index not in self.divisions:
            return

        # open the NetCDF files
        with netCDF4.Dataset(self.divisions_file, 'a') as divisions_dataset:

            climdiv_id = divisions_dataset['division'][div_index]

            # only process divisions within CONUS, 101 - 4811
            if climdiv_id > 4811:
                return

            logger.info('Processing indices for division %s', climdiv_id)

            # read the division of input temperature values
            temperature = divisions_dataset[self.var_name_temperature][
                div_index, :]  # assuming (divisions, time) orientation

            # initialize the latitude outside of the valid range, in order to use this within a conditional below to verify a valid latitude
            latitude = -100.0

            # latitudes are only available for certain divisions, make sure we have one for this division index
            if div_index < divisions_dataset['lat'][:].size:

                # get the actual latitude value (assumed to be in degrees north) for the latitude slice specified by the index
                latitude = divisions_dataset['lat'][div_index]

            # only proceed if the latitude value is within valid range
            if not np.isnan(latitude) and (latitude < 90.0) and (latitude >
                                                                 -90.0):

                # convert temperatures from Fahrenheit to Celsius, if necessary
                temperature_units = divisions_dataset[
                    self.var_name_temperature].units
                if temperature_units in [
                        'degree_Fahrenheit', 'degrees Fahrenheit', 'degrees F',
                        'fahrenheit', 'Fahrenheit', 'F'
                ]:

                    # TODO make sure this application of the ufunc is any faster  pylint: disable=fixme
                    temperature = scipy.constants.convert_temperature(
                        temperature, 'F', 'C')

                elif temperature_units not in [
                        'degree_Celsius', 'degrees Celsius', 'degrees C',
                        'celsius', 'Celsius', 'C'
                ]:

                    raise ValueError(
                        'Unsupported temperature units: \'{0}\''.format(
                            temperature_units))

                # use the numpy.apply_along_axis() function for computing indices such as PET that take a single time series
                # array as input (i.e. each division's time series is the initial 1-D array argument to the function we'll apply)

                logger.info('\tComputing PET for division %s', climdiv_id)

                logger.info('\t\tCalculating PET using Thornthwaite method')

                # compute PET across all longitudes of the latitude slice
                # Thornthwaite PE
                pet_time_series = indices.pet(
                    temperature,
                    latitude_degrees=latitude,
                    data_start_year=self.data_start_year)

                # the above returns PET in millimeters, note this for further consideration
                pet_units = 'millimeter'

                # write the PET values to NetCDF
                lock.acquire()
                divisions_dataset['pet'][div_index, :] = np.reshape(
                    pet_time_series, (1, pet_time_series.size))
                divisions_dataset.sync()
                lock.release()

            else:

                pet_time_series = np.full(temperature.shape, np.NaN)
                pet_units = None

            # read the division's input precipitation and available water capacity values
            precip_time_series = divisions_dataset[self.var_name_precip][
                div_index, :]  # assuming (divisions, time) orientation

            if div_index < divisions_dataset[self.var_name_soil][:].size:
                awc = divisions_dataset[self.var_name_soil][
                    div_index]  # assuming (divisions) orientation
                awc += 1  # AWC values need to include top inch, values from the soil file do not, so we add top inch here
            else:
                awc = np.NaN

            # allocate arrays to contain a latitude slice of Palmer values
            time_size = divisions_dataset['time'].size
            division_shape = (time_size)
            pdsi = np.full(division_shape, np.NaN)
            phdi = np.full(division_shape, np.NaN)
            pmdi = np.full(division_shape, np.NaN)
            scpdsi = np.full(division_shape, np.NaN)
            zindex = np.full(division_shape, np.NaN)

            # compute SPI and SPEI for the current division only if we have valid inputs
            if not np.isnan(precip_time_series).all():

                # put precipitation into inches if not already
                mm_to_inches_multiplier = 0.0393701
                possible_mm_units = ['millimeters', 'millimeter', 'mm']
                if divisions_dataset[
                        self.var_name_precip].units in possible_mm_units:
                    precip_time_series = precip_time_series * mm_to_inches_multiplier

                if not np.isnan(pet_time_series).all():

                    # compute Palmer indices if we have valid inputs
                    if not np.isnan(awc):

                        # if PET is in mm, convert to inches
                        if pet_units in possible_mm_units:
                            pet_time_series = pet_time_series * mm_to_inches_multiplier

                        # PET is in mm, convert to inches since the Palmer uses imperial units
                        pet_time_series = pet_time_series * mm_to_inches_multiplier

                        logger.info('\tComputing PDSI for division %s',
                                    climdiv_id)

                        # compute Palmer indices
                        palmer_values = indices.scpdsi(
                            precip_time_series, pet_time_series, awc,
                            self.data_start_year, self.calibration_start_year,
                            self.calibration_end_year)

                        scpdsi = palmer_values[0]
                        pdsi = palmer_values[1]
                        phdi = palmer_values[2]
                        pmdi = palmer_values[3]
                        zindex = palmer_values[4]

                        # write the PDSI values to NetCDF
                        lock.acquire()
                        divisions_dataset['pdsi'][div_index, :] = np.reshape(
                            pdsi, (1, pdsi.size))
                        divisions_dataset['phdi'][div_index, :] = np.reshape(
                            phdi, (1, phdi.size))
                        divisions_dataset['pmdi'][div_index, :] = np.reshape(
                            pmdi, (1, pmdi.size))
                        divisions_dataset['scpdsi'][div_index, :] = np.reshape(
                            pdsi, (1, scpdsi.size))
                        divisions_dataset['zindex'][div_index, :] = np.reshape(
                            zindex, (1, zindex.size))
                        divisions_dataset.sync()
                        lock.release()

                    # process the SPI and SPEI at the specified month scales
                    for months in self.scale_months:

                        logger.info(
                            '\tComputing SPI/SPEI/PNP at %s-month scale for division %s',
                            months, climdiv_id)

                        #TODO ensure that the precipitation and PET values are using the same units  pylint: disable=fixme

                        # compute SPEI/Gamma
                        spei_gamma = indices.spei_gamma(months,
                                                        precip_time_series,
                                                        pet_mm=pet_time_series)

                        # compute SPEI/Pearson
                        spei_pearson = indices.spei_pearson(
                            months,
                            self.data_start_year,
                            precip_time_series,
                            pet_mm=pet_time_series,
                            calibration_year_initial=self.
                            calibration_start_year,
                            calibration_year_final=self.calibration_end_year)

                        # compute SPI/Gamma
                        spi_gamma = indices.spi_gamma(precip_time_series,
                                                      months)

                        # compute SPI/Pearson
                        spi_pearson = indices.spi_pearson(
                            precip_time_series, months, self.data_start_year,
                            self.calibration_start_year,
                            self.calibration_end_year)

                        # compute PNP
                        pnp = indices.percentage_of_normal(
                            precip_time_series, months, self.data_start_year,
                            self.calibration_start_year,
                            self.calibration_end_year)

                        # create variable names which should correspond to the appropriate scaled index output variables
                        scaled_name_suffix = str(months).zfill(2)
                        spei_gamma_variable_name = 'spei_gamma_' + scaled_name_suffix
                        spei_pearson_variable_name = 'spei_pearson_' + scaled_name_suffix
                        spi_gamma_variable_name = 'spi_gamma_' + scaled_name_suffix
                        spi_pearson_variable_name = 'spi_pearson_' + scaled_name_suffix
                        pnp_variable_name = 'pnp_' + scaled_name_suffix

                        # write the SPI, SPEI, and PNP values to NetCDF
                        lock.acquire()
                        divisions_dataset[spei_gamma_variable_name][
                            div_index, :] = np.reshape(spei_gamma,
                                                       (1, spei_gamma.size))
                        divisions_dataset[spei_pearson_variable_name][
                            div_index, :] = np.reshape(spei_pearson,
                                                       (1, spei_pearson.size))
                        divisions_dataset[spi_gamma_variable_name][
                            div_index, :] = np.reshape(spi_gamma,
                                                       (1, spi_gamma.size))
                        divisions_dataset[spi_pearson_variable_name][
                            div_index, :] = np.reshape(spi_pearson,
                                                       (1, spi_pearson.size))
                        divisions_dataset[pnp_variable_name][
                            div_index, :] = np.reshape(pnp, (1, pnp.size))
                        divisions_dataset.sync()
                        lock.release()