def test_depth_exceeding_model_depth(self):
        """Test that an error is returned if the depth
        (z axis) is outside of the limits given by the model.
        """
        reservoir = self.create_reservoir(
            x_min=19.0, x_max=19.1, y_min=40.0, y_max=40.1, z_min_m=-40000.0)
        with self.assertRaises(AssertionError):
            e.exec_model(reservoir)

        reservoir = self.create_reservoir(
            x_min=19.0, x_max=19.1, y_min=40.0, y_max=40.1, z_max_m=4.0)
        with self.assertRaises(AssertionError):
            e.exec_model(reservoir)
    def test_forecast_small_area(self):
        """Test the dataframe output of a model forecast.
        where the area being forecast for is small and the
        area is within the model forecasting region.
        The grid resolution matches the model resolution
        but the returned result overlaps a single model square.
        """
        reservoir = self.create_reservoir(
            x_min=5.7, x_max=5.8, y_min=44.8, y_max=44.9)
        comparison_filename = os.path.join(DIRPATH,
                                           'data/forecast_area_small_area.csv')
        forecast_values, magnitude_list, mc, depth = e.exec_model(reservoir)

        if self.write_csv_comparison_files:
            forecast_values.to_csv(comparison_filename, index_label='Index')

        forecast_values.index.name = 'Index'

        comparison_forecast_values = read_csv(
            comparison_filename, index_col='Index')
        assert_frame_equal(forecast_values,
                           comparison_forecast_values)
        self.assertEqual(mc, 1.0)
        self.assertEqual(depth, 30.0)
        self.assertEqual(magnitude_list, ['5.0', '5.1', '5.2', '5.3', '5.4', '5.5', '5.6', '5.7', '5.8', '5.9', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8', '6.9', '7.0', '7.1', '7.2', '7.3', '7.4', '7.5', '7.6', '7.7', '7.8', '7.9', '8.0', '8.1', '8.2', '8.3', '8.4', '8.5', '8.6', '8.7', '8.8', '8.9', '9.0']) # noqa
 def test_depth_less_than_model_depth(self):
     """Test that the depth returned from the model is
     always 30km, even if the depth given is smaller.
     """
     reservoir = self.create_reservoir(
         x_min=19.0, x_max=19.1, y_min=40.0, y_max=40.1, z_min_m=-400.0,
         z_max_m=0.0)
     forecast_values, magnitude_list, mc, depth = e.exec_model(reservoir)
     self.assertEqual(depth, 30.0)
    def test_forecast_no_result(self):
        """Test the dataframe output of a model forecast.
        where the requested area does not overlap with the
        model area. No rows are expected to be produced.
        """
        reservoir = self.create_reservoir(
            x_min=15.7, x_max=15.8, y_min=44.8, y_max=44.9)
        comparison_filename = os.path.join(DIRPATH,
                                           'data/forecast_area_no_result.csv')
        forecast_values, magnitude_list, mc, depth = e.exec_model(reservoir)

        if self.write_csv_comparison_files:
            forecast_values.to_csv(comparison_filename, index_label='Index')

        forecast_values.index.name = 'Index'

        comparison_forecast_values = read_csv(
            comparison_filename, index_col='Index')
        assert_frame_equal(forecast_values,
                           comparison_forecast_values)
    def test_forecast_fine_resolution(self):
        """Test the dataframe output of a model forecast.
        where the area being forecast for is smaller than
        the resolution of the model  and the
        area is within the model forecasting region.
        """
        reservoir = self.create_reservoir(
            resolution_cartesian=0.05,
            x_min=5.70, x_max=5.75, y_min=44.75, y_max=44.80)
        comparison_filename = os.path.join(
            DIRPATH, 'data/forecast_area_fine_resolution.csv')
        forecast_values, magnitude_list, mc, depth_km = e.exec_model(reservoir)

        if self.write_csv_comparison_files:
            forecast_values.to_csv(comparison_filename, index_label='Index')

        forecast_values.index.name = 'Index'

        comparison_forecast_values = read_csv(
            comparison_filename, index_col='Index')
        assert_frame_equal(forecast_values,
                           comparison_forecast_values)
    def test_forecast_small_overlap(self):
        """Test the dataframe output of a model forecast.
        where the area being forecast for is small and the
        area is within the model forecasting region.
        The grid resolution matches the model resolution
        but the returned result overlaps 4 model values.
        """
        reservoir = self.create_reservoir(
            x_min=5.65, x_max=5.75, y_min=44.75, y_max=44.85)
        comparison_filename = os.path.join(
            DIRPATH, 'data/forecast_area_small_overlap.csv')
        forecast_values, magnitude_list, mc, depth = e.exec_model(reservoir)

        if self.write_csv_comparison_files:
            forecast_values.to_csv(comparison_filename, index_label='Index')

        forecast_values.index.name = 'Index'

        comparison_forecast_values = read_csv(
            comparison_filename, index_col='Index')
        assert_frame_equal(forecast_values,
                           comparison_forecast_values)
Exemple #7
0
    def _run(self, **kwargs):
        """
        :param kwargs: Model specific keyword value parameters.
        """
        self.logger.debug('Importing model specific configuration ...')
        # Simple chain map can combine parameters if they are a flat hierarchy
        # and in-place modification not required. otherwise see ChainMapTree
        # in parser.py.
        model_config = ChainMap(kwargs.get('model_parameters', {}),
                                self._default_model_parameters)

        self.logger.debug(
            'Received model configuration: {!r}'.format(model_config))

        # Validations on data
        epoch_duration = model_config['epoch_duration']
        forecast_duration = (
            model_config['datetime_end'] -  # noqa
            model_config['datetime_start']).total_seconds()

        if not epoch_duration:
            epoch_duration = forecast_duration
        elif epoch_duration > forecast_duration:

            self.logger.info("The epoch duration is less than the "
                             "total time of forecast")
            epoch_duration = forecast_duration
            self.logger.info("The epoch duration has been set to the"
                             f"forcast duration: {forecast_duration} (s)")
        datetime_list = [
            model_config['datetime_start'] +  # noqa
            timedelta(seconds=int(epoch_duration * i))
            for i in range(int(forecast_duration // epoch_duration) + 1)
        ]

        self.logger.debug('Importing reservoir geometry ...')
        try:
            reservoir_geom = kwargs['reservoir']['geom']
        except KeyError:
            self.logger.info('No reservoir exists.')
            raise WerHiResSmoM1Italy5yError("No reservoir provided.")

        self.logger.info("Calling the WerHiResSmoM1Italy5y model...")

        # Return arrays for each result attribute.
        try:
            (forecast_values, mag_list, mc,
             depth_km) = werner_model.exec_model(reservoir_geom)
        except Exception:
            # sarsonl This is not nice, but we need to raise an error twice
            # if one occurs in the model to get a sensible traceback statement
            err = traceback.print_exc()
            forecast_values = None
        else:
            err = False
        if err:
            raise
        # Quirk of set-up means that we need to raise another error.
        if forecast_values is None:
            raise WerHiResSmoM1Italy5yError(
                'Error raised in WerHiResSmoM1Italy5y model')

        self.logger.debug("Result received from WerHiResSmoM1Italy5y model.")

        # Read values into database
        min_mag = min(mag_list)
        max_mag = max(mag_list)
        # Assume that the increment between bins is static and positive
        mag_increment = round(float(mag_list[1]) - float(mag_list[0]), 1)
        subgeoms = []
        samples = []
        for index, row in forecast_values.iterrows():
            # Validate the depths list in the parsing stage.
            for min_depth, max_depth in \
                    zip(reservoir_geom['z'], reservoir_geom['z'][1:]):
                depth_fraction = (max_depth - min_depth) / (depth_km * 1000.0)
                samples = []
                for start_date, end_date in zip(datetime_list,
                                                datetime_list[1:]):
                    result_bins = []
                    for mag_bin in mag_list:
                        event_number = row[mag_bin] / depth_fraction
                        result_bins.append(
                            orm.MFDBin(
                                referencemagnitude=mag_bin,
                                eventnumber_value=event_number,
                                # Question: a variance/uncertainty at this level
                                # won't propagate to OQ hazard, so what would
                                # be preferential to store, given the
                                # choice between:
                                # uncertainty/variance/confidencelevel/any?
                                eventnumber_uncertainty=np.sqrt(event_number)))

                    mfd_curve = orm.DiscreteMFD(minmag=min_mag,
                                                maxmag=max_mag,
                                                binwidth=mag_increment,
                                                magbins=result_bins)

                    samples.append(
                        orm.ModelResultSample(starttime=start_date,
                                              endtime=end_date,
                                              mc_value=mc,
                                              discretemfd=mfd_curve))

                subgeom = orm.Reservoir(x_min=row['min_lon'],
                                        x_max=row['max_lon'],
                                        y_min=row['min_lat'],
                                        y_max=row['max_lat'],
                                        z_min=min_depth,
                                        z_max=max_depth,
                                        samples=samples)

                subgeoms.append(subgeom)

        # Top level reservoir contains the total dimensions of the
        # requested search area.
        reservoir = orm.Reservoir(x_min=min(reservoir_geom['x']),
                                  x_max=max(reservoir_geom['x']),
                                  y_min=min(reservoir_geom['y']),
                                  y_max=max(reservoir_geom['y']),
                                  z_min=min(reservoir_geom['z']),
                                  z_max=max(reservoir_geom['z']),
                                  subgeometries=subgeoms)
        self.logger.info(f"{len(samples)} valid forecast samples")

        return ModelResult.ok(
            data={"reservoir": reservoir},
            warning=self.stderr if self.stderr else self.stdout)