def test_hyp_output_dicts(self): self.hyp_file() events = parse_hyp('hyptest.hyp') event = events[0] a, b, c = hyp_output_dicts(event, False, {'moment_tensor_space': np.matrix([[1., 2.], [2., 1.], [1., 2.], [2., 1.], [ 1., 2.], [2., 1.]]), 'ln_pdf': np.matrix([0, 0.7]), 'probability': np.matrix([[1., 2.]]), 'total_number_samples': 400}) self.assertEqual(len(a.split('\n')), len(event['hyp_file'])+1) self.assertTrue('MOMENTTENSOR' in a) self.assertEqual(float(a.split()[a.split().index('MTNN')+1]), 2.0) self.assertEqual(len(b), 169) events = parse_hyp('hyptest.hyp') event = events[0] a, b, c = hyp_output_dicts(event, False, {'moment_tensor_space': np.matrix([[1., 2., 3.], [2., 1., 3.], [1., 2., 3.], [2., 1., 3.], [ 1., 2., 3.], [2., 1., 3.]]), 'ln_pdf': np.matrix([0, 0.7, 0]), 'probability': np.matrix([[1., 2., 1.]]), 'total_number_samples': 400}) self.assertEqual(len(a.split('\n')), len(event['hyp_file'])+1) self.assertTrue('MOMENTTENSOR' in a) self.assertEqual(float(a.split()[a.split().index('MTNN')+1]), 2.0) self.assertEqual(len(b), 233) events = parse_hyp('hyptest.hyp') event = events[0] event['hyp_file'].pop(14) a, b, c = hyp_output_dicts(event, False, {'moment_tensor_space': np.matrix([[1., 2., 3.], [2., 1., 3.], [1., 2., 3.], [2., 1., 3.], [ 1., 2., 3.], [2., 1., 3.]]), 'ln_pdf': np.matrix([0, 0.7, 0]), 'probability': np.matrix([[1., 2., 1.]]), 'total_number_samples': 400}) self.assertEqual(len(a.split('\n')), len(event['hyp_file'])+2) self.assertTrue('MOMENTTENSOR' in a) self.assertEqual(float(a.split()[a.split().index('MTNN')+1]), 2.0) self.assertEqual(len(b), 233) event.pop('hyp_file') a, b, c = hyp_output_dicts(event, False, {'moment_tensor_space': np.matrix([[1., 2., 3.], [2., 1., 3.], [1., 2., 3.], [2., 1., 3.], [ 1., 2., 3.], [2., 1., 3.]]), 'ln_pdf': np.matrix([0, 0.7, 0]), 'probability': np.matrix([[1., 2., 1.]]), 'total_number_samples': 400}) self.assertEqual(len(a.split('\n')), 36) self.assertTrue('MOMENTTENSOR' in a) self.assertEqual(float(a.split()[a.split().index('MTNN')+1]), 2.0) self.assertEqual(len(b), 233) events = parse_hyp('hyptest.hyp') event = events[0] event['hyp_file'].pop(14) a, b, c = hyp_output_dicts(event, False, {'moment_tensor_space': np.matrix([[1., -0.51969334, 3.], [2., 0.22610635, 3.], [1., 0.29358698, 3.], [2., 0.58532165, 3.], [ 1., -0.27015115, 3.], [2., -0.42073549, 3.]]), 'ln_pdf': np.matrix([0, 0.7, 0]), 'probability': np.matrix([[1., 2., 1.]]), 'total_number_samples': 400}) self.assertEqual(len(a.split('\n')), len(event['hyp_file'])+2) self.assertFalse('MOMENTTENSOR' in a) self.assertTrue('FOCALMECH' in a) self.assertAlmostEqual( float(a.split()[a.split().index('Mech')+1]), 0.5*180/np.pi, 5) self.assertEqual(len(b), 233) a, b, c = hyp_output_dicts(event, False, {'moment_tensor_space': np.matrix([[1., -0.51969334, 3.], [2., 0.22610635, 3.], [1., 0.29358698, 3.], [2., 0.58532165, 3.], [1., -0.27015115, 3.], [2., -0.42073549, 3.]]), 'probability': np.matrix([[1., 2., 1.]]), 'ln_pdf': np.matrix([0, 0.7, 0]), 'total_number_samples': 400, 'g': np.array([0.2, 0.2, 0.2]), 'd': np.array([0.2, 0.2, 0.2]), 'k': np.array([0.2, 0.2, 0.2]), 'h': np.array([0.2, 0.2, 0.2]), 's': np.array([0.2, 0.2, 0.2]), 'S1': np.array([0.2, 0.2, 0.2]), 'D1': np.array([0.2, 0.2, 0.2]), 'R1': np.array([0.2, 0.2, 0.2]), 'u': np.array([0.2, 0.2, 0.2]), 'v': np.array([0.2, 0.2, 0.2]), 'S2': np.array([0.2, 0.2, 0.2]), 'D2': np.array([0.2, 0.2, 0.2]), 'R2': np.array([0.2, 0.2, 0.2]), 'ln_bayesian_evidence': (1, 10)}) self.assertEqual(len(a.split('\n')), len(event['hyp_file'])+2) self.assertFalse('MOMENTTENSOR' in a) self.assertTrue('FOCALMECH' in a) self.assertAlmostEqual( float(a.split()[a.split().index('Mech')+1]), 0.5*180/np.pi, 5) self.assertEqual(len(b), 545)
def test_parse_hyp(self): self.hyp_file() events = parse_hyp('hyptest.hyp') self.assertEqual(len(events), 1) self.assertTrue('PPolarity' in events[0].keys()) self.assertTrue('UID' in events[0].keys()) self.assertTrue('hyp_file' in events[0].keys()) self.assertEqual(events[0]['UID'], "20150126222324275") self.assertEqual(len(events[0].keys()), 3)
def test_hyp_output(self): self.hyp_file() events = parse_hyp('hyptest.hyp') event = events[0] a, b, c = hyp_output_dicts(event, False, {'moment_tensor_space': np.matrix([[1., 2.], [2., 1.], [1., 2.], [2., 1.], [ 1., 2.], [2., 1.]]), 'ln_pdf': np.matrix([0, 0.7, 0]), 'probability': np.matrix([[1., 2.]]), 'total_number_samples': 400}) try: os.remove('mtfitOUTPUTTEST.hyp') except Exception: pass try: os.remove('mtfitOUTPUTTEST.mt') except Exception: pass self.assertFalse(os.path.exists('mtfitOUTPUTTEST.hyp')) self.assertFalse(os.path.exists('mtfitOUTPUTTEST.mt')) fid, out_str = hyp_output([a, b], fid='mtfitOUTPUTTEST.hyp') self.assertTrue(os.path.exists('mtfitOUTPUTTEST.hyp')) self.assertTrue(os.path.exists('mtfitOUTPUTTEST.mt')) try: os.remove('mtfitOUTPUTTEST.hyp') except Exception: pass try: os.remove('mtfitOUTPUTTEST.mt') except Exception: pass event['hyp_file'].pop(14) a, b, c = hyp_output_dicts(event, False, {'moment_tensor_space': np.matrix([[1., -0.51969334, 3.], [2., 0.22610635, 3.], [1., 0.29358698, 3.], [2., 0.58532165, 3.], [1., -0.27015115, 3.], [2., -0.42073549, 3.]]), 'probability': np.matrix([[1., 2., 1.]]), 'ln_pdf': np.matrix([0, 0.7, 0]), 'total_number_samples': 400, 'g': np.array([0.2, 0.2, 0.2]), 'd': np.array([0.2, 0.2, 0.2]), 'k': np.array([0.2, 0.2, 0.2]), 'h': np.array([0.2, 0.2, 0.2]), 's': np.array([0.2, 0.2, 0.2]), 'S1': np.array([0.2, 0.2, 0.2]), 'D1': np.array([0.2, 0.2, 0.2]), 'R1': np.array([0.2, 0.2, 0.2]), 'u': np.array([0.2, 0.2, 0.2]), 'v': np.array([0.2, 0.2, 0.2]), 'S2': np.array([0.2, 0.2, 0.2]), 'D2': np.array([0.2, 0.2, 0.2]), 'R2': np.array([0.2, 0.2, 0.2]), 'ln_bayesian_evidence': 1.+10}) try: os.remove('mtfitOUTPUTTEST.hyp') except Exception: pass try: os.remove('mtfitOUTPUTTEST.mt') except Exception: pass self.assertFalse(os.path.exists('mtfitOUTPUTTEST.hyp')) self.assertFalse(os.path.exists('mtfitOUTPUTTEST.mt')) fid, out_str = hyp_output([a, b], fid='mtfitOUTPUTTEST.hyp') self.assertTrue(os.path.exists('mtfitOUTPUTTEST.hyp')) self.assertTrue(os.path.exists('mtfitOUTPUTTEST.mt')) try: os.remove('mtfitOUTPUTTEST.hyp') except Exception: pass try: os.remove('mtfitOUTPUTTEST.mt') except Exception: pass
def test_read_matlab_output(self): self.hyp_file() events = parse_hyp('hyptest.hyp') event = events[0] mdict, sdict = full_pdf_output_dicts(event, False, {'moment_tensor_space': np.matrix([[1., 2.], [2., 1.], [1., 2.], [2., 1.], [ 1., 2.], [2., 1.]]), 'ln_pdf': np.matrix([0, 0.7]), 'dV': 1, 'probability': np.matrix([[1., 2.]]), 'total_number_samples': 400}) MATLAB_output([mdict, sdict], 'hyptest.mat') event, stations = read_matlab_output('hyptest.mat') self.assertAlmostEqual(event, mdict['Events'], 10) self.assertAlmostEqual( stations['azimuth'], mdict['Stations'][:, 1], 10) self.assertAlmostEqual( stations['takeoff_angle'], mdict['Stations'][:, 2], 10) self.assertAlmostEqual( stations['polarity'], mdict['Stations'][:, 3], 10)
def test_read_binary_output(self): self.hyp_file() events = parse_hyp('hyptest.hyp') event = events[0] x = {'moment_tensor_space': np.matrix([[1., 2.], [2., 1.], [1., 2.], [2., 1.], [1., 2.], [2., 1.]]), 'dkl': 1.2, 'ln_pdf': np.matrix( [0, 0.7]), 'probability': np.matrix([[1., 2.]]), 'total_number_samples': 400} a, b, c = hyp_output_dicts(event, False, x) try: os.remove('mtfitOUTPUTTEST.hyp') except Exception: pass try: os.remove('mtfitOUTPUTTEST.mt') except Exception: pass self.assertFalse(os.path.exists('mtfitOUTPUTTEST.hyp')) self.assertFalse(os.path.exists('mtfitOUTPUTTEST.mt')) fid, out_str = hyp_output([a, b], fid='mtfitOUTPUTTEST.hyp') y = read_binary_output('mtfitOUTPUTTEST.mt') self.assertTrue(os.path.exists('mtfitOUTPUTTEST.hyp')) self.assertTrue(os.path.exists('mtfitOUTPUTTEST.mt')) self.assertEqual(sorted(y[0].keys()), sorted(x.keys())) self.assertTrue( (y[0]['moment_tensor_space'] == x['moment_tensor_space']).all()) self.assertEqual(y[0]['dkl'], x['dkl']) try: os.remove('mtfitOUTPUTTEST.hyp') except Exception: pass try: os.remove('mtfitOUTPUTTEST.mt') except Exception: pass x = {'moment_tensor_space': np.matrix([[1., -0.51969334, 3.], [2., 0.22610635, 3.], [1., 0.29358698, 3.], [2., 0.58532165, 3.], [1., -0.27015115, 3.], [2., -0.42073549, 3.]]), 'probability': np.matrix([[1., 2., 1.]]), 'dkl': 2.4, 'ln_pdf': np.matrix([0, 0.7, 0]), 'total_number_samples': 400, 'g': np.array([0.1, 0.2, 0.3]), 'd': np.array([0.2, 0.2, 0.2]), 'k': np.array([0.2, 0.2, 0.2]), 'h': np.array([0.2, 0.2, 0.2]), 's': np.array([0.2, 0.2, 0.2]), 'S1': np.array([0.2, 0.2, 0.2]), 'D1': np.array([0.2, 0.2, 0.2]), 'R1': np.array([0.2, 0.2, 0.2]), 'u': np.array([0.2, 0.2, 0.2]), 'v': np.array([0.2, 0.2, 0.2]), 'S2': np.array([0.2, 0.2, 0.2]), 'D2': np.array([0.2, 0.2, 0.2]), 'R2': np.array([0.2, 0.2, 0.2]), 'ln_bayesian_evidence': 1.+10.} a, b, c = hyp_output_dicts(event, False, x) try: os.remove('mtfitOUTPUTTEST.hyp') except Exception: pass try: os.remove('mtfitOUTPUTTEST.mt') except Exception: pass self.assertFalse(os.path.exists('mtfitOUTPUTTEST.hyp')) self.assertFalse(os.path.exists('mtfitOUTPUTTEST.mt')) fid, out_str = hyp_output([a, b], fid='mtfitOUTPUTTEST.hyp') y = read_binary_output('mtfitOUTPUTTEST.mt') self.assertTrue(os.path.exists('mtfitOUTPUTTEST.hyp')) self.assertTrue(os.path.exists('mtfitOUTPUTTEST.mt')) self.assertEqual(sorted(y[0].keys()), sorted(x.keys())) self.assertTrue((y[0]['g'] == x['g']).all()) self.assertEqual( y[0]['ln_bayesian_evidence'], x['ln_bayesian_evidence']) self.assertEqual(y[0]['dkl'], x['dkl']) try: os.remove('mtfitOUTPUTTEST.hyp') except Exception: pass try: os.remove('mtfitOUTPUTTEST.mt') except Exception: pass
def test_full_pdf_output_dicts(self): self.hyp_file() events = parse_hyp('hyptest.hyp') event = events[0] mdict, sdict = full_pdf_output_dicts(event, False, {'moment_tensor_space': np.matrix([[1., 2.], [2., 1.], [1., 2.], [2., 1.], [ 1., 2.], [2., 1.]]), 'ln_pdf': np.matrix([0, 0.7]), 'dV': 1, 'probability': np.matrix([[1., 2.]]), 'total_number_samples': 400}) self.assertTrue('Other' in mdict.keys()) self.assertTrue('Events' in mdict.keys()) self.assertTrue('Stations' in mdict.keys()) self.assertTrue('MTSpace' in mdict['Events'].keys()) self.assertTrue('Probability' in mdict['Events'].keys()) self.assertTrue('ln_pdf' in mdict['Events'].keys()) self.assertTrue('UID' in mdict['Events'].keys()) self.assertTrue('NSamples' in mdict['Events'].keys()) self.assertEqual(mdict['Stations'].shape, (20, 4)) event['PPolarityProbability'] = event['PPolarity'].copy() event['PPolarityProbability']['Measured'] = np.matrix([[0.6, 0.4], [0.7, 0.3], [0.8, 0.2], [0.67, 0.33], [0.94, 0.06], [0.32, 0.68], [0.96, 0.04], [0.76, 0.24], [0.82, 0.18], [0.12, 0.88], [0.57, 0.43], [0.68, 0.32], [0.51, 0.49], [0.68, 0.32], [0.50, 0.50], [0.02, 0.98], [0.6, 0.4], [0.7, 0.3], [0.8, 0.2], [0.67, 0.33] ]) event.pop('PPolarity') mdict, sdict = full_pdf_output_dicts(event, ['PPolarityProbability'], {'moment_tensor_space': np.matrix([[1., 2.], [2., 1.], [1., 2.], [ 2., 1.], [1., 2.], [2., 1.]]), 'ln_pdf': np.matrix([0, 0.7]), 'dV': 1, 'probability': np.matrix([[1., 2.]]), 'total_number_samples': 400}) self.assertTrue('Other' in mdict.keys()) self.assertTrue('Events' in mdict.keys()) self.assertTrue('Stations' in mdict.keys()) self.assertTrue('MTSpace' in mdict['Events'].keys()) self.assertTrue('Probability' in mdict['Events'].keys()) self.assertTrue('ln_pdf' in mdict['Events'].keys()) self.assertTrue('UID' in mdict['Events'].keys()) self.assertTrue('NSamples' in mdict['Events'].keys()) self.assertEqual(mdict['Stations'].shape, (20, 4)) self.assertTrue(all(mdict['Stations'][:, 3] == np.array( [0.6, 0.7, 0.8, 0.67, 0.94, -0.68, 0.96, 0.76, 0.82, -0.88, 0.57, 0.68, 0.51, 0.68, 0.5, -0.98, 0.6, 0.7, 0.8, 0.67]))) mdict, sdict = full_pdf_output_dicts(event, False, {'moment_tensor_space': np.matrix([[1., 2.], [2., 1.], [1., 2.], [2., 1.], [ 1., 2.], [2., 1.]]), 'ln_pdf': np.matrix([0, 0.7]), 'dV': 1, 'probability': np.matrix([[1., 2.]]), 'total_number_samples': 400}) self.assertTrue('Other' in mdict.keys()) self.assertTrue('Events' in mdict.keys()) self.assertTrue('Stations' in mdict.keys()) self.assertTrue('MTSpace' in mdict['Events'].keys()) self.assertTrue('Probability' in mdict['Events'].keys()) self.assertTrue('ln_pdf' in mdict['Events'].keys()) self.assertTrue('UID' in mdict['Events'].keys()) self.assertTrue('NSamples' in mdict['Events'].keys()) self.assertEqual(mdict['Stations'].shape, (20, 4))
def run_mtfit(catalog, nlloc_dir, parallel=True, n=8, algorithm='iterate', phy_mem=1, inversion_options='PPolarity', number_location_samples=5000, MT=True, DC=True): """ Wrapper on mtfit to run over a catalog for which there are already polarity picks and .scatangle nlloc files in the specified dir :param catalog: Catalog of events :param nlloc_dir: Directory with the necessary nlloc and .scatangle files :param parallel: Run in parallel? :param n: Number of cores :param algorithm: MTfit inversion algorithm :param phy_mem: A soft memory limit of 1Gb of RAM for estimating the sample sizes. This is only a soft limit, so no errors are thrown if the memory usage increases above this. :param inversion_options: What data to include in the inversion :param number_location_samples: How many random samples to draw from the NLLoc location PDF :return: """ for ev in catalog: eid = str(ev.resource_id).split('/')[-1] print('Running mtfit for {}'.format(eid)) nlloc_fs = glob('{}/{}*'.format( nlloc_dir, str(ev.resource_id).split('/')[-1].split('_')[0])) # Find the hyp file with update pol information print(nlloc_fs) try: hyp_path = [ path for path in nlloc_fs if path.endswith('.hyp') and 'sum' not in path.split('.') and path.split('_')[-1].startswith('pol') ][0] except IndexError as msg: print('No NLLoc location for this event. Probably low SNR?') continue print(hyp_path) # Read in data dict data = parse_hyp(hyp_path) print(data) print(type(data)) print(data['PPolarity']) data['UID'] = '{}_ppolarity'.format(eid) # Set the convert flag to convert the output to other source parameterisations convert = True # Set location uncertainty file path location_pdf_file_path = [ path for path in nlloc_fs if path.endswith('.scatangle') ][0] # Handle location uncertainty # Set number of location samples to use (randomly sampled from PDF) as this # reduces calculation time # (each location sample is equivalent to running an additional event) bin_scatangle = True if DC: ### First run for DC contrained solution max_samples = 100000 dc = True print('Running DC for {}'.format(eid)) mtfit(data, location_pdf_file_path=location_pdf_file_path, algorithm=algorithm, parallel=parallel, inversion_options=inversion_options, phy_mem=phy_mem, dc=dc, max_samples=max_samples, convert=convert, bin_scatangle=bin_scatangle, number_location_samples=number_location_samples, n=n) if MT: ### Now for full MT # Change max_samples for MT inversion max_samples = 1000000 dc = False print('Running full MT for {}'.format(eid)) # Create the inversion object with the set parameters. mtfit(data, location_pdf_file_path=location_pdf_file_path, algorithm=algorithm, parallel=parallel, inversion_options=inversion_options, phy_mem=phy_mem, max_samples=max_samples, convert=convert, dc=dc, bin_scatangle=bin_scatangle, number_location_samples=number_location_samples, n=n) return