def test_get_even_magnitude_completeness(self):
        '''
        Tests the function to render an evenly spaced completeness table at
        0.1 interval spacing
        '''
        # Common case - many rows
        self.catalogue = Catalogue()
        self.catalogue.data['magnitude'] = np.array([4.5, 5.0])
        comp_table = np.array([[1990., 4.0],
                               [1960., 4.5],
                               [1900., 4.8]])
        expected_table = np.array([[1990., 4.0],
                                   [1990., 4.1],
                                   [1990., 4.2],
                                   [1990., 4.3],
                                   [1990., 4.4],
                                   [1960., 4.5],
                                   [1960., 4.6],
                                   [1960., 4.7],
                                   [1900., 4.8],
                                   [1900., 4.9],
                                   [1900., 5.0]])
        np.testing.assert_array_almost_equal(expected_table,
            utils.get_even_magnitude_completeness(comp_table,
                                                  self.catalogue)[0])

        # Common case - only one value
        comp_table = np.array([[1990., 4.0]])
        np.testing.assert_array_almost_equal(np.array([[1990., 4.0]]),
            utils.get_even_magnitude_completeness(comp_table,
                                                  self.catalogue)[0])
    def test_get_even_magnitude_completeness(self):
        '''
        Tests the function to render an evenly spaced completeness table at
        0.1 interval spacing
        '''
        # Common case - many rows
        self.catalogue = Catalogue()
        self.catalogue.data['magnitude'] = np.array([4.5, 5.0])
        comp_table = np.array([[1990., 4.0],
                               [1960., 4.5],
                               [1900., 4.8]])
        expected_table = np.array([[1990., 4.0],
                                   [1990., 4.1],
                                   [1990., 4.2],
                                   [1990., 4.3],
                                   [1990., 4.4],
                                   [1960., 4.5],
                                   [1960., 4.6],
                                   [1960., 4.7],
                                   [1900., 4.8],
                                   [1900., 4.9],
                                   [1900., 5.0]])
        np.testing.assert_array_almost_equal(expected_table,
                                             utils.get_even_magnitude_completeness(comp_table,
                                                                                   self.catalogue)[0])

        # Common case - only one value
        comp_table = np.array([[1990., 4.0]])
        np.testing.assert_array_almost_equal(np.array([[1990., 4.0]]),
                                             utils.get_even_magnitude_completeness(comp_table,
                                                                                   self.catalogue)[0])
Beispiel #3
0
    def test_get_adjustment_factor(self):
        '''
        Tests the function that should return an input adjustment factor if
        the magnitude is from a "complete" event - and a zero otherwise
        '''
        # Good case - when event is in the first completeness period
        comp_table = np.array([[1990., 4.0], [1960., 4.5], [1900., 5.5]])
        self.catalogue.data['magnitude'] = np.array([4.5, 5.7])
        comp_table = utils.get_even_magnitude_completeness(
            comp_table, self.catalogue)[0]

        tfact = 0.5
        self.assertAlmostEqual(
            tfact,
            _get_adjustment(4.2, 1995., comp_table[0, 1], comp_table[:, 0],
                            tfact))

        # Good case - when event is in a previous completeness period
        self.assertAlmostEqual(
            tfact,
            _get_adjustment(4.7, 1985., comp_table[0, 1], comp_table[:, 0],
                            tfact))

        # Bad case - below minimum completeness in file
        self.assertFalse(
            _get_adjustment(3.8, 1990., comp_table[0, 1], comp_table[:, 0],
                            tfact))

        # Bad case - below an earlier completeness threshold
        self.assertFalse(
            _get_adjustment(4.8, 1950., comp_table[0, 1], comp_table[:, 0],
                            tfact))

        # Good case when only one completeness period is needed
        comp_table = np.array([[1960., 4.5]])
        self.assertAlmostEqual(
            1.0,
            _get_adjustment(5.0, 1990., comp_table[0, 1], comp_table[:, 0],
                            tfact))
        # Bad case when only one completeness period is needed
        self.assertFalse(
            _get_adjustment(4.0, 1990., comp_table[0, 1], comp_table[:, 0],
                            tfact))
    def test_get_adjustment_factor(self):
        '''
        Tests the function that should return an input adjustment factor if
        the magnitude is from a "complete" event - and a zero otherwise
        '''
        # Good case - when event is in the first completeness period
        comp_table = np.array([[1990., 4.0],
                               [1960., 4.5],
                               [1900., 5.5]])
        self.catalogue.data['magnitude'] = np.array([4.5, 5.7])
        comp_table = utils.get_even_magnitude_completeness(comp_table,
                                                           self.catalogue)[0]

        tfact = 0.5
        self.assertAlmostEqual(tfact,
            _get_adjustment(4.2, 1995., comp_table[0, 1], comp_table[:, 0],
                            tfact))

        # Good case - when event is in a previous completeness period
        self.assertAlmostEqual(tfact,
            _get_adjustment(4.7, 1985., comp_table[0, 1], comp_table[:, 0],
                            tfact))

        # Bad case - below minimum completeness in file
        self.assertFalse(_get_adjustment(3.8, 1990., comp_table[0, 1],
                                         comp_table[:, 0], tfact))

        # Bad case - below an earlier completeness threshold
        self.assertFalse(_get_adjustment(4.8, 1950., comp_table[0, 1],
                                         comp_table[:, 0], tfact))

        # Good case when only one completeness period is needed
        comp_table = np.array([[1960., 4.5]])
        self.assertAlmostEqual(1.0,
            _get_adjustment(5.0, 1990., comp_table[0, 1], comp_table[:, 0],
                            tfact))
        # Bad case when only one completeness period is needed
        self.assertFalse(_get_adjustment(4.0, 1990., comp_table[0, 1],
                                         comp_table[:, 0], tfact))
    def run_analysis(self,
                     catalogue,
                     config,
                     completeness_table=None,
                     smoothing_kernel=None):
        '''
        Runs an analysis of smoothed seismicity in the manner
        originally implemented by Frankel (1995)

        :param catalogue:
            Instance of the openquake.hmtk.seismicity.catalogue.Catalogue class
            catalogue.data dictionary containing the following -
            'year' - numpy.ndarray vector of years
            'longitude' - numpy.ndarray vector of longitudes
            'latitude' - numpy.ndarray vector of latitudes
            'depth' - numpy.ndarray vector of depths

        :param dict config:
            Configuration settings of the algorithm:
            * 'Length_Limit' - Maximum number of bandwidths for use in
            smoothing (Float)
            * 'BandWidth' - Bandwidth (km) of the Smoothing Kernel (Float)
            * 'increment' - Output incremental (True) or cumulative a-value
            (False)

        :param np.ndarray completeness_table:
            Completeness of the catalogue assuming evenly spaced magnitudes
            from most recent bin to oldest bin [year, magnitude]

        :param smoothing_kernel:
            Smoothing kernel as instance of :class:
            `openquake.hmtk.seismicity.smoothing.kernels.base.BaseSmoothingKernel`

        :returns:
            Full smoothed seismicity data as np.ndarray, of the form
            [Longitude, Latitude, Depth, Observed, Smoothed]
        '''

        self.catalogue = catalogue
        if smoothing_kernel:
            self.kernel = smoothing_kernel
        else:
            self.kernel = IsotropicGaussian()

        # If no grid limits are specified then take from catalogue
        if isinstance(self.grid_limits, list):
            self.grid_limits = Grid.make_from_list(self.grid_limits)
            assert self.grid_limits['xmax'] >= self.grid_limits['xmin']
            assert self.grid_limits['xspc'] > 0.0
            assert self.grid_limits['ymax'] >= self.grid_limits['ymin']
            assert self.grid_limits['yspc'] > 0.0
        elif isinstance(self.grid_limits, float):
            self.grid_limits = Grid.make_from_catalogue(
                self.catalogue, self.grid_limits,
                config['Length_Limit'] * config['BandWidth'])

        completeness_table, mag_inc = utils.get_even_magnitude_completeness(
            completeness_table, self.catalogue)

        end_year = self.catalogue.end_year

        # Get Weichert factor
        t_f, _ = utils.get_weichert_factor(self.beta, completeness_table[:, 1],
                                           completeness_table[:, 0], end_year)
        # Get the grid
        self.create_3D_grid(self.catalogue, completeness_table, t_f, mag_inc)
        if config['increment']:
            # Get Hermann adjustment factors
            fval, fival = utils.hermann_adjustment_factors(
                self.bval, completeness_table[0, 1], config['increment'])
            self.data[:, -1] = fval * fival * self.data[:, -1]

        # Apply smoothing
        smoothed_data, sum_data, sum_smooth = self.kernel.smooth_data(
            self.data, config, self.use_3d)
        print('Smoothing Total Rate Comparison - '
              'Observed: %.6g, Smoothed: %.6g' % (sum_data, sum_smooth))
        self.data = np.column_stack([self.data, smoothed_data])
        return self.data
    def run_analysis(self, catalogue, config, completeness_table=None,
                     smoothing_kernel=None):
        '''
        Runs an analysis of smoothed seismicity in the manner
        originally implemented by Frankel (1995)

        :param catalogue:
            Instance of the openquake.hmtk.seismicity.catalogue.Catalogue class
            catalogue.data dictionary containing the following -
            'year' - numpy.ndarray vector of years
            'longitude' - numpy.ndarray vector of longitudes
            'latitude' - numpy.ndarray vector of latitudes
            'depth' - numpy.ndarray vector of depths

        :param dict config:
            Configuration settings of the algorithm:
            * 'Length_Limit' - Maximum number of bandwidths for use in
            smoothing (Float)
            * 'BandWidth' - Bandwidth (km) of the Smoothing Kernel (Float)
            * 'increment' - Output incremental (True) or cumulative a-value
            (False)

        :param np.ndarray completeness_table:
            Completeness of the catalogue assuming evenly spaced magnitudes
            from most recent bin to oldest bin [year, magnitude]

        :param smoothing_kernel:
            Smoothing kernel as instance of :class:
            `openquake.hmtk.seismicity.smoothing.kernels.base.BaseSmoothingKernel`

        :returns:
            Full smoothed seismicity data as np.ndarray, of the form
            [Longitude, Latitude, Depth, Observed, Smoothed]
        '''

        self.catalogue = catalogue
        if smoothing_kernel:
            self.kernel = smoothing_kernel
        else:
            self.kernel = IsotropicGaussian()

        # If no grid limits are specified then take from catalogue
        if isinstance(self.grid_limits, list):
            self.grid_limits = Grid.make_from_list(self.grid_limits)
            assert self.grid_limits['xmax'] >= self.grid_limits['xmin']
            assert self.grid_limits['xspc'] > 0.0
            assert self.grid_limits['ymax'] >= self.grid_limits['ymin']
            assert self.grid_limits['yspc'] > 0.0
        elif isinstance(self.grid_limits, float):
            self.grid_limits = Grid.make_from_catalogue(
                self.catalogue, self.grid_limits,
                config['Length_Limit'] * config['BandWidth'])

        completeness_table, mag_inc = utils.get_even_magnitude_completeness(
            completeness_table,
            self.catalogue)

        end_year = self.catalogue.end_year

        # Get Weichert factor
        t_f, _ = utils.get_weichert_factor(self.beta,
                                           completeness_table[:, 1],
                                           completeness_table[:, 0],
                                           end_year)
        # Get the grid
        self.create_3D_grid(self.catalogue, completeness_table, t_f, mag_inc)
        if config['increment']:
            # Get Hermann adjustment factors
            fval, fival = utils.hermann_adjustment_factors(
                self.bval,
                completeness_table[0, 1], config['increment'])
            self.data[:, -1] = fval * fival * self.data[:, -1]

        # Apply smoothing
        smoothed_data, sum_data, sum_smooth = self.kernel.smooth_data(
            self.data, config, self.use_3d)
        print('Smoothing Total Rate Comparison - '
              'Observed: %.6g, Smoothed: %.6g' % (sum_data, sum_smooth))
        self.data = np.column_stack([self.data, smoothed_data])
        return self.data