def test_get_even_magnitude_completeness(self):
        '''
        Tests the function to render an evenly spaced completeness table at
        0.1 interval spacing
        '''
        # Common case - many rows
        self.catalogue = Catalogue()
        self.catalogue.data['magnitude'] = np.array([4.5, 5.0])
        comp_table = np.array([[1990., 4.0],
                               [1960., 4.5],
                               [1900., 4.8]])
        expected_table = np.array([[1990., 4.0],
                                   [1990., 4.1],
                                   [1990., 4.2],
                                   [1990., 4.3],
                                   [1990., 4.4],
                                   [1960., 4.5],
                                   [1960., 4.6],
                                   [1960., 4.7],
                                   [1900., 4.8],
                                   [1900., 4.9],
                                   [1900., 5.0]])
        np.testing.assert_array_almost_equal(expected_table,
            utils.get_even_magnitude_completeness(comp_table,
                                                  self.catalogue)[0])

        # Common case - only one value
        comp_table = np.array([[1990., 4.0]])
        np.testing.assert_array_almost_equal(np.array([[1990., 4.0]]),
            utils.get_even_magnitude_completeness(comp_table,
                                                  self.catalogue)[0])
Ejemplo n.º 2
0
    def test_get_even_magnitude_completeness(self):
        '''
        Tests the function to render an evenly spaced completeness table at
        0.1 interval spacing
        '''
        # Common case - many rows
        self.catalogue = Catalogue()
        self.catalogue.data['magnitude'] = np.array([4.5, 5.0])
        comp_table = np.array([[1990., 4.0], [1960., 4.5], [1900., 4.8]])
        expected_table = np.array([[1990., 4.0], [1990., 4.1], [1990., 4.2],
                                   [1990., 4.3], [1990., 4.4], [1960., 4.5],
                                   [1960., 4.6], [1960., 4.7], [1900., 4.8],
                                   [1900., 4.9], [1900., 5.0]])
        np.testing.assert_array_almost_equal(
            expected_table,
            utils.get_even_magnitude_completeness(comp_table,
                                                  self.catalogue)[0])

        # Common case - only one value
        comp_table = np.array([[1990., 4.0]])
        np.testing.assert_array_almost_equal(
            np.array([[1990., 4.0]]),
            utils.get_even_magnitude_completeness(comp_table,
                                                  self.catalogue)[0])
Ejemplo n.º 3
0
    def test_get_adjustment_factor(self):
        '''
        Tests the function that should return an input adjustment factor if
        the magnitude is from a "complete" event - and a zero otherwise
        '''
        # Good case - when event is in the first completeness period
        comp_table = np.array([[1990., 4.0], [1960., 4.5], [1900., 5.5]])
        self.catalogue.data['magnitude'] = np.array([4.5, 5.7])
        comp_table = utils.get_even_magnitude_completeness(
            comp_table, self.catalogue)[0]

        tfact = 0.5
        self.assertAlmostEqual(
            tfact,
            _get_adjustment(4.2, 1995., comp_table[0, 1], comp_table[:, 0],
                            tfact))

        # Good case - when event is in a previous completeness period
        self.assertAlmostEqual(
            tfact,
            _get_adjustment(4.7, 1985., comp_table[0, 1], comp_table[:, 0],
                            tfact))

        # Bad case - below minimum completeness in file
        self.assertFalse(
            _get_adjustment(3.8, 1990., comp_table[0, 1], comp_table[:, 0],
                            tfact))

        # Bad case - below an earlier completeness threshold
        self.assertFalse(
            _get_adjustment(4.8, 1950., comp_table[0, 1], comp_table[:, 0],
                            tfact))

        # Good case when only one completeness period is needed
        comp_table = np.array([[1960., 4.5]])
        self.assertAlmostEqual(
            1.0,
            _get_adjustment(5.0, 1990., comp_table[0, 1], comp_table[:, 0],
                            tfact))
        # Bad case when only one completeness period is needed
        self.assertFalse(
            _get_adjustment(4.0, 1990., comp_table[0, 1], comp_table[:, 0],
                            tfact))
    def test_get_adjustment_factor(self):
        '''
        Tests the function that should return an input adjustment factor if
        the magnitude is from a "complete" event - and a zero otherwise
        '''
        # Good case - when event is in the first completeness period
        comp_table = np.array([[1990., 4.0],
                               [1960., 4.5],
                               [1900., 5.5]])
        self.catalogue.data['magnitude'] = np.array([4.5, 5.7])
        comp_table = utils.get_even_magnitude_completeness(comp_table,
                                                           self.catalogue)[0]

        tfact = 0.5
        self.assertAlmostEqual(tfact,
            _get_adjustment(4.2, 1995., comp_table[0, 1], comp_table[:, 0],
                            tfact))

        # Good case - when event is in a previous completeness period
        self.assertAlmostEqual(tfact,
            _get_adjustment(4.7, 1985., comp_table[0, 1], comp_table[:, 0],
                            tfact))

        # Bad case - below minimum completeness in file
        self.assertFalse(_get_adjustment(3.8, 1990., comp_table[0, 1],
                                         comp_table[:, 0], tfact))

        # Bad case - below an earlier completeness threshold
        self.assertFalse(_get_adjustment(4.8, 1950., comp_table[0, 1],
                                         comp_table[:, 0], tfact))

        # Good case when only one completeness period is needed
        comp_table = np.array([[1960., 4.5]])
        self.assertAlmostEqual(1.0,
            _get_adjustment(5.0, 1990., comp_table[0, 1], comp_table[:, 0],
                            tfact))
        # Bad case when only one completeness period is needed
        self.assertFalse(_get_adjustment(4.0, 1990., comp_table[0, 1],
                                         comp_table[:, 0], tfact))
Ejemplo n.º 5
0
    def run_analysis(self,
                     catalogue,
                     config,
                     completeness_table=None,
                     smoothing_kernel=None):
        '''
        Runs an analysis of smoothed seismicity in the manner
        originally implemented by Frankel (1995)

        :param catalogue:
            Instance of the hmtk.seismicity.catalogue.Catalogue class
            catalogue.data dictionary containing the following -
            'year' - numpy.ndarray vector of years
            'longitude' - numpy.ndarray vector of longitudes
            'latitude' - numpy.ndarray vector of latitudes
            'depth' - numpy.ndarray vector of depths

        :param dict config:
            Configuration settings of the algorithm:
            * 'Length_Limit' - Maximum number of bandwidths for use in
                               smoothing (Float)
            * 'BandWidth' - Bandwidth (km) of the Smoothing Kernel (Float)
            * 'increment' - Output incremental (True) or cumulative a-value
                            (False)

        :param np.ndarray completeness_table:
            Completeness of the catalogue assuming evenly spaced magnitudes
            from most recent bin to oldest bin [year, magnitude]

        :param smoothing_kernel:
            Smoothing kernel as instance of :class:
                hmtk.seismicity.smoothing.kernels.base.BaseSmoothingKernel

        :returns:
            Full smoothed seismicity data as np.ndarray, of the form
            [Longitude, Latitude, Depth, Observed, Smoothed]
        '''

        self.catalogue = catalogue
        if smoothing_kernel:
            self.kernel = smoothing_kernel
        else:
            self.kernel = IsotropicGaussian()

        # If no grid limits are specified then take from catalogue
        if isinstance(self.grid_limits, list):
            self.grid_limits = Grid.make_from_list(self.grid_limits)
            assert self.grid_limits['xmax'] >= self.grid_limits['xmin']
            assert self.grid_limits['xspc'] > 0.0
            assert self.grid_limits['ymax'] >= self.grid_limits['ymin']
            assert self.grid_limits['yspc'] > 0.0
        elif isinstance(self.grid_limits, float):
            self.grid_limits = Grid.make_from_catalogue(
                self.catalogue, self.grid_limits,
                config['Length_Limit'] * config['BandWidth'])

        completeness_table, mag_inc = utils.get_even_magnitude_completeness(
            completeness_table, self.catalogue)

        end_year = self.catalogue.end_year

        # Get Weichert factor
        t_f, _ = utils.get_weichert_factor(self.beta, completeness_table[:, 1],
                                           completeness_table[:, 0], end_year)
        # Get the grid
        self.create_3D_grid(self.catalogue, completeness_table, t_f, mag_inc)
        if config['increment']:
            # Get Hermann adjustment factors
            fval, fival = utils.hermann_adjustment_factors(
                self.bval, completeness_table[0, 1], config['increment'])
            self.data[:, -1] = fval * fival * self.data[:, -1]

        # Apply smoothing
        smoothed_data, sum_data, sum_smooth = self.kernel.smooth_data(
            self.data, config, self.use_3d)
        print 'Smoothing Total Rate Comparison - ' \
            'Observed: %.6e, Smoothed: %.6e' % (sum_data, sum_smooth)
        self.data = np.column_stack([self.data, smoothed_data])
        return self.data
Ejemplo n.º 6
0
    def run_analysis(self, catalogue, config, completeness_table=None, 
        smoothing_kernel=None, end_year=None):
        '''
        Runs an analysis of smoothed seismicity in the manner
        originally implemented by Frankel (1995)

        :param catalogue: 
            Instance of the hmtk.seismicity.catalogue.Catalogue class
            catalogue.data dictionary containing the following - 
            'year' - numpy.ndarray vector of years
            'longitude' - numpy.ndarray vector of longitudes
            'latitude' - numpy.ndarray vector of latitudes
            'depth' - numpy.ndarray vector of depths
        
        :param dict config:
            Configuration settings of the algorithm:
            * 'Length_Limit' - Maximum number of bandwidths for use in 
                               smoothing (Float)
            * 'BandWidth' - Bandwidth (km) of the Smoothing Kernel (Float)
            * 'increment' - Output incremental (True) or cumulative a-value
                            (False)

        :param np.ndarray completeness_table: 
            Completeness of the catalogue assuming evenly spaced magnitudes 
            from most recent bin to oldest bin [year, magnitude]

        :param smoothing_kernel:
            Smoothing kernel as instance of :class: 
                hmtk.seismicity.smoothing.kernels.base.BaseSmoothingKernel

        :param float end_year:
            Year considered as the final year for the analysis. If not given
            the program will automatically take the last year in the catalogue.

        :returns:
            Full smoothed seismicity data as np.ndarray, of the form 
            [Longitude, Latitude, Depth, Observed, Smoothed]
        '''
        
        self.catalogue = catalogue
        if smoothing_kernel:
            self.kernel = smoothing_kernel
        else:
            self.kernel = IsotropicGaussian()
        

        # If no grid limits are specified then take from catalogue
        if not isinstance(self.grid_limits, dict):
            self.get_grid_from_catalogue(config)

        completeness_table, mag_inc = utils.get_even_magnitude_completeness(
            completeness_table,
            self.catalogue)
        
        if not end_year:
            end_year = np.max(self.catalogue.data['year'])

        # Get Weichert factor
        t_f, _ = utils.get_weichert_factor(self.beta,
                                           completeness_table[:, 1],
                                           completeness_table[:, 0],
                                           end_year)
        # Get the grid
        self.create_3D_grid(self.catalogue, completeness_table, t_f, mag_inc)           
        if config['increment']:
            # Get Hermann adjustment factors
            fval, fival = utils.hermann_adjustment_factors(self.bval,
                completeness_table[0, 1], config['increment'])
            self.data[:, -1] = fval * fival * self.data[:, -1]

        # Apply smoothing
        smoothed_data, sum_data, sum_smooth = self.kernel.smooth_data(
            self.data, config, self.use_3d)
        print 'Smoothing Total Rate Comparison - ' \
            'Observed: %.6e, Smoothed: %.6e' % (sum_data, sum_smooth)
        self.data = np.column_stack([self.data, smoothed_data])
        return self.data
Ejemplo n.º 7
0
    def run_analysis(self, catalogue, config, completeness_table=None, smoothing_kernel=IsotropicGaussianWoo):
        '''
        Runs an analysis of smoothed seismicity in the manner
        originally implemented by Frankel (1995)
 
        :param catalogue:
            Instance of the hmtk.seismicity.catalogue.Catalogue class
            catalogue.data dictionary containing the following -
            'year' - numpy.ndarray vector of years
            'longitude' - numpy.ndarray vector of longitudes
            'latitude' - numpy.ndarray vector of latitudes
            'depth' - numpy.ndarray vector of depths
            'magnitude' - numpy.ndarray vector of magnitudes
 
        :param dict config:
            Configuration settings of the algorithm:
            * 'Length_Limit' - Maximum number of bandwidths for use in
                               smoothing (Float)
            * 'BandWidth' - Bandwidth (km) of the Smoothing Kernel (Float)
            * 'increment' - Output incremental (True) or cumulative a-value
                            (False)
 
        :param np.ndarray completeness_table:
            Completeness of the catalogue assuming evenly spaced magnitudes
            from most recent bin to oldest bin [year, magnitude]
 
        :param smoothing_kernel:
            Smoothing kernel as instance of :class:
                hmtk.seismicity.smoothing.kernels.base.BaseSmoothingKernel
 
        :returns:
            Full smoothed seismicity data as np.ndarray, of the form
            [Longitude, Latitude, Depth, Observed, Smoothed]
        '''
        config['min_magnitude']
        
        self.catalogue = catalogue
        self.completeness_table = completeness_table
        self.config = config
        self.add_bandwith_values()
 
        use3d=config['use3d']
        cells, spacement = self._create_grid(use3d=use3d)
        k = Frankel_1995(self.c, self.d)
        
        ct, dm = utils.get_even_magnitude_completeness(completeness_table, 
                                                       catalogue, 
                                                       magnitude_increment=0.5)

        last_year = catalogue.end_year
                
        # get data
        X = self.catalogue.data['magnitude']
        magnitude_bin = dm
        
        min_magnitude = self.config['min_magnitude'] if self.config['min_magnitude'] else min(X)
        max_magnitude = self.config['max_magnitude'] if self.config['max_magnitude'] else max(X)
        
        # divide bins catalog_bins
        bins = np.arange(min_magnitude, max_magnitude + magnitude_bin, magnitude_bin)

        h , m = [], []
        for b in bins:
            _h = k.H(b + magnitude_bin/2.) * self.config['bandwidth_h_limit']
            _m = b + magnitude_bin/2.

            observation_time = self._get_observation_time(_m, ct, last_year)
  
            fid = open('/Users/pirchiner/Desktop/tmp_woo.%s.csv'%_m, 'wt')
            # Create header list
            header_info = ['Longitude', 'Latitude', 'Depth', 'Magnitude','Rate']
            writer = csv.DictWriter(fid, fieldnames=header_info)
            headers = dict((name0, name0) for name0 in header_info)
            # Write to file
            writer.writerow(headers)
            
            _i = np.logical_and( X >= _m - magnitude_bin/2., 
                                 X  < _m + magnitude_bin/2.)
            
            print _m, observation_time

            for c in cells:
                x0 = c[0]
                y0 = c[1]
                z0 = 0 if not use3d else c[2]  
    
                r = haversine(np.array(x0), 
                              np.array(y0), 
                              catalogue.data['longitude'][_i],
                              catalogue.data['latitude'][_i])
                #print _h
                _j = np.logical_and(0 < r, r <= _h)
                #print len(r), len(_j)
                r = r[_j]

                _k = k.kernel(_m, r) / observation_time
                
                rate = _k.sum()
                
                #print len(_k), _k.sum()
                #print x0, y0, _m, _k.sum()
                row_dict = {'Longitude': '%.5f' % x0,
                            'Latitude': '%.5f' % y0,
                            'Depth': '%.3f' % 0,
                            'Magnitude': '%.5e' % _m,
                            'Rate': '%.5e' % rate }
                writer.writerow(row_dict)

            fid.close()