def get_unclassified(tot_lab, pck_fname, out_cata, path_out): """ Create a text file (.csv formatted) with the unclassified earthquakes :param tot_lab: :param pck_fname: Name of the pickle file with the catalogue :param out_cata: Name of the .csv output catalogue :param path_out: Path to output folder """ # ID of the unclassified earthquakes tr_undef = abs(tot_lab - 1) # Load the catalogue of unclassified earthquakes catalogue = pickle.load(open(pck_fname, 'rb')) # Select the unclassified selector = CatalogueSelector(catalogue, create_copy=False) catalogue = selector.select_catalogue(tr_undef) print('') print('# earthquakes: {:d}'.format(len(catalogue.data['longitude']))) # Sub-catalogue csv_filename = out_cata + "_TR_unclassified.csv" csv_filename = os.path.join(path_out, csv_filename) # Write the purged catalogue csvcat = CsvCatalogueWriter(csv_filename) csvcat.write_file(catalogue) print("Catalogue successfully written to %s" % csv_filename)
def test_catalogue_writer_no_purging(self): ''' Tests the writer without any purging ''' # Write to file writer = CsvCatalogueWriter(self.output_filename) writer.write_file(self.catalogue) parser = CsvCatalogueParser(self.output_filename) cat2 = parser.read_file() self.check_catalogues_are_equal(self.catalogue, cat2)
def test_catalogue_writer_only_mag_table_purging(self): ''' Tests the writer only purging according to the magnitude table ''' # Write to file writer = CsvCatalogueWriter(self.output_filename) writer.write_file(self.catalogue, magnitude_table=self.magnitude_table) parser = CsvCatalogueParser(self.output_filename) cat2 = parser.read_file() expected_catalogue = Catalogue() expected_catalogue.data['eventID'] = ['1', '3', '5'] expected_catalogue.data['magnitude'] = np.array([5.6, 4.8, 5.0]) expected_catalogue.data['year'] = np.array([1960, 1970, 1990]) expected_catalogue.data['ErrorStrike'] = np.array( [np.nan, np.nan, np.nan]) self.check_catalogues_are_equal(expected_catalogue, cat2)
def test_catalogue_writer_only_flag_purging(self): ''' Tests the writer only purging according to the flag ''' # Write to file writer = CsvCatalogueWriter(self.output_filename) writer.write_file(self.catalogue, flag_vector=self.flag) parser = CsvCatalogueParser(self.output_filename) cat2 = parser.read_file() expected_catalogue = Catalogue() expected_catalogue.data['eventID'] = ['1', '2', '3', '4'] expected_catalogue.data['magnitude'] = np.array([5.6, 5.4, 4.8, 4.3]) expected_catalogue.data['year'] = np.array([1960, 1965, 1970, 1980]) expected_catalogue.data['ErrorStrike'] = np.array( [np.nan, np.nan, np.nan, np.nan]) self.check_catalogues_are_equal(expected_catalogue, cat2)
def test_catalogue_writer_only_mag_table_purging(self): ''' Tests the writer only purging according to the magnitude table ''' # Write to file writer = CsvCatalogueWriter(self.output_filename) writer.write_file(self.catalogue, magnitude_table=self.magnitude_table) parser = CsvCatalogueParser(self.output_filename) cat2 = parser.read_file() expected_catalogue = Catalogue() expected_catalogue.data['eventID'] = ['1', '3', '5'] expected_catalogue.data['magnitude'] = np.array([5.6, 4.8, 5.0]) expected_catalogue.data['year'] = np.array([1960, 1970, 1990]) expected_catalogue.data['ErrorStrike'] = np.array([np.nan, np.nan, np.nan]) self.check_catalogues_are_equal(expected_catalogue, cat2)
def test_catalogue_writer_only_flag_purging(self): ''' Tests the writer only purging according to the flag ''' # Write to file writer = CsvCatalogueWriter(self.output_filename) writer.write_file(self.catalogue, flag_vector=self.flag) parser = CsvCatalogueParser(self.output_filename) cat2 = parser.read_file() expected_catalogue = Catalogue() expected_catalogue.data['eventID'] = ['1', '2', '3', '4'] expected_catalogue.data['magnitude'] = np.array([5.6, 5.4, 4.8, 4.3]) expected_catalogue.data['year'] = np.array([1960, 1965, 1970, 1980]) expected_catalogue.data['ErrorStrike'] = np.array([np.nan, np.nan, np.nan, np.nan]) self.check_catalogues_are_equal(expected_catalogue, cat2)
def create_sub_catalogue(alen, aaa, pck_fname, treg_fname, out_cata, out_path): """ Creates .csv files with the subcatalogues :param alen: Number of earthquakes in the original catalogue :param aaa: List of the labels used to define the various tectonic regions :param pck_fname: Name of the file with the pickled catalogue :param treg_fname: Name of the .hdf5 file with the classification of the catalogue :param out_cata: Name of the .hdf5 file with the classification of the catalogue :param out_path: Name of the .hdf5 file with the classification of the catalogue :returns: A :class:`numpy.ndarray` vector of length N where N is the number of earthquakes in the original catalogue. """ # The output vector tot_lab = np.zeros(alen) print(' ') fmt = '# earthquakes in the catalogue: {:d}' print(fmt.format(len(tot_lab))) # Loop over the tectonic regions for label in (aaa): # Output file name csv_filename = out_cata + "_TR_{:s}.csv".format(label) csv_filename = os.path.join(out_path, csv_filename) # Read the TR classification f = h5py.File(treg_fname, 'r') tr = f[label][:] f.close() if sum(tr) > 0: tmp_lab = tr * 1 tot_lab = tot_lab + tmp_lab catalogue = pickle.load(open(pck_fname, 'rb')) for lab in ['month', 'day', 'hour', 'minute', 'second']: idx = np.isnan(catalogue.data[lab]) if lab == 'day' or lab == 'month': catalogue.data[lab][idx] = 1 elif lab == 'second': catalogue.data[lab][idx] = 0.0 else: catalogue.data[lab][idx] = 0 selector = CatalogueSelector(catalogue, create_copy=False) catalogue = selector.select_catalogue(tr) catalogue.data['hour'] = catalogue.data['hour'].astype(int) catalogue.data['minute'] = catalogue.data['minute'].astype(int) print(' ') fmt = '# earthquakes in this TR : {:d}' print(fmt.format(len(catalogue.data['longitude']))) # Sub-catalogue print(csv_filename) csvcat = CsvCatalogueWriter(csv_filename) # Write the purged catalogue csvcat.write_file(catalogue) print("Catalogue successfully written to %s" % csv_filename) return tot_lab