def run(test=False): # test flag is to improve run speed when testing # Get data: from example_data import double_couple_data from MTfit.inversion import Inversion data = double_couple_data() print("Running Double-Couple example\n\n\tInput data dictionary:") # Print data print(data) print('Data is pickled to Double_Couple_Example.inv') with open('Double_Couple_Example.inv', 'wb') as f: pickle.dump(data, f) # Set parameters algorithm = 'iterate' # uses an iterative random sampling approach parallel = False # Runs on a dingle thread. phy_mem = 1 # uses a soft limit of 1Gb of RAM for estimating the sample sizes (This is only a soft limit, so no errors are thrown if the memory usage increases above this) dc = True # runs the inversion in the double-couple space. max_samples = 100000 # runs the inversion for 100,000 samples. if test: max_samples = 1000 # Inversion # Create the inversion object with the set parameters. inversion_object = Inversion(data_file='Double_Couple_Example.inv', algorithm=algorithm, parallel=parallel, phy_mem=phy_mem, dc=dc, max_samples=max_samples, convert=True) # Run the forward model based inversion inversion_object.forward()
def run(test=False): # test flag is to improve run speed when testing # Get data: from example_data import p_polarity_data data = p_polarity_data() print("Running P Polarity example\n\n\tInput data dictionary:") # Print data print(data) # Set inversion parameters # uses an iterative random sampling approach (see :ref:`iterative-sampling-label`). algorithm = 'iterate' # tries to run in parallel using Multiprocessing. parallel = True # uses a soft limit of 500Mb of RAM for estimating the sample sizes (This is only a soft limit, so no errors are thrown if the memory usage increases above this). phy_mem = 0.5 # runs the full moment tensor inversion. dc = False # runs the inversion for 1,000,000 samples. max_samples = 1000000 # Test parameters if test: max_samples = 1000 # Set-up inversion object: from MTfit.inversion import Inversion # Inversion # Create the inversion object with the set parameters.. inversion_object = Inversion(data, algorithm=algorithm, parallel=parallel, phy_mem=phy_mem, dc=dc, max_samples=max_samples, convert=True) # Run the forward model based inversion inversion_object.forward() # Run1 End # Denser sampling # Runs the inversion for 10,000,000 samples. max_samples = 10000000 data[ 'UID'] = 'P_Polarity_Example_Dense_Output' # Change UID (and output file name) if test: max_samples = 1000 # Create the inversion object with the set parameters.. inversion_object = Inversion(data, algorithm=algorithm, parallel=parallel, phy_mem=phy_mem, dc=dc, max_samples=max_samples, convert=True) # Run the forward model based inversion inversion_object.forward()
def run(test=False): # test flag is to improve run speed when testing # Get data: from example_data import location_uncertainty_data, location_uncertainty_angles data = location_uncertainty_data() print("Running Location Uncertainty example\n\n\tInput data dictionary:") # Print data print(data) # save location_uncertainty_angles with open('Location_Uncertainty.scatangle', 'w') as f: f.write(location_uncertainty_angles()) # Set inversion parameters algorithm = 'time' # uses an time limited random sampling approach (see :ref:`iterative-sampling-label`) parallel = False # Doesn't run in multiple threads using :mod:`multiprocessing`. phy_mem = 1 # uses a soft limit of 1Gb of RAM for estimating the sample sizes (This is only a soft limit, so no errors are thrown if the memory usage increases above this) dc = False # runs the inversion in the double-couple space. max_time = 60 # runs the inversion for 60 seconds. inversion_options = 'PPolarity' # Just uses PPolarity rather than all the data in the dictionary if test: max_time = 10 phy_mem = 0.01 # Set-up inversion object: from MTfit.inversion import Inversion # Inversion # Location uncertainty path location_pdf_file_path = 'Location_Uncertainty.scatangle' # Create the inversion object with the set parameters.. inversion_object = Inversion(data, location_pdf_file_path=location_pdf_file_path, algorithm=algorithm, parallel=parallel, phy_mem=phy_mem, dc=dc, inversion_options=inversion_options, max_time=max_time, convert=True) # Run the forward model based inversion inversion_object.forward() # Run1 End # Reduce the number of station samples to increase the # number of moment tensor samples tried number_station_samples = 10000 # Create the inversion object with the set parameters.. inversion_object = Inversion(data, location_pdf_file_path=location_pdf_file_path, algorithm=algorithm, parallel=parallel, phy_mem=phy_mem, dc=dc, inversion_options=inversion_options, max_time=max_time, number_station_samples=number_station_samples, convert=True) # Run the forward model based inversion inversion_object.forward()
def run(test=False): # test flag is to improve run speed when testing # Get data: from example_data import time_inversion_data data = time_inversion_data() print("Running Time limited example\n\n\tInput data dictionary:") # Print data print(data) # print 'Data is pickled to Double_Couple_Example.inv' # import cPickle # cPickle.dump(data,open('Double_Couple_Example.inv','wb')) # Set parameters algorithm = 'time' # uses an time limited random sampling approach (see :ref:`iterative-sampling-label`) parallel = False # runs in a single thread. phy_mem = 1 # uses a soft limit of 1Gb of RAM for estimating the sample sizes (This is only a soft limit, so no errors are thrown if the memory usage increases above this) dc = False # runs the inversion in the double-couple space. max_time = 120 # runs the inversion for 120 seconds. inversion_options = 'PPolarity,P/SHAmplitudeRatio' # Just uses PPolarity and P/SHRMS Amplitude Ratios rather than all the data in the dictionary if test: max_time = 10 # Set-up inversion object: from MTfit.inversion import Inversion # Inversion # Create the inversion object with the set parameters. inversion_object = Inversion(data, algorithm=algorithm, parallel=parallel, phy_mem=phy_mem, dc=dc, inversion_options=inversion_options, max_time=max_time, convert=True) # Run the forward model based inversion inversion_object.forward() # Run1 End # DC Inversion dc = True # Create the inversion object with the set parameters. inversion_object = Inversion(data, algorithm=algorithm, parallel=parallel, phy_mem=phy_mem, dc=dc, inversion_options=inversion_options, max_time=max_time, convert=True) # Run the forward model based inversion inversion_object.forward()
data = synthetic_event() if test: # Identical code for running build test if case.lower() == 'ppolarity': data['UID'] += '_ppolarity' algorithm = 'iterate' parallel = parallel phy_mem = 1 dc = True max_samples = 100 inversion_options = 'PPolarity' convert = True inversion_object = Inversion(data, algorithm=algorithm, parallel=parallel, inversion_options=inversion_options, phy_mem=phy_mem, dc=dc, max_samples=max_samples, convert=convert) inversion_object.forward() max_samples = 100 inversion_object = Inversion(data, algorithm=algorithm, parallel=parallel, inversion_options=inversion_options, phy_mem=phy_mem, dc=not dc, max_samples=max_samples, convert=convert) inversion_object.forward() elif case.lower() == 'ar':
def run(test=False): # test flag is to improve run speed when testing # Get data: from example_data import p_sh_amplitude_ratio_data data = p_sh_amplitude_ratio_data() print("Running P/SH Amplitude Ratio example\n\n\tInput data dictionary:") # Print data print(data) # Set inversion parameters # uses an iterative random sampling approach (see :ref:`iterative-sampling-label`). algorithm = 'iterate' # tries to run in parallel using Multiprocessing. parallel = True # uses a soft limit of 1Gb of RAM for estimating the sample sizes (This is only a soft limit, so no errors are thrown if the memory usage increases above this). phy_mem = 1 # runs the full moment tensor inversion . dc = False # runs the inversion for 1,000,000 samples. max_samples = 1000000 if test: max_samples = 1000 # Set-up inversion object: from MTfit.inversion import Inversion # Inversion # Create the inversion object with the set parameters.. inversion_object = Inversion(data, algorithm=algorithm, parallel=parallel, phy_mem=phy_mem, dc=dc, max_samples=max_samples, convert=True) # Run the forward model based inversion inversion_object.forward() # Run1 End # Change UID (and output file name) data['UID'] = 'P_SH_Amplitude_Ratio_Example_Time_Output' # Time sampling # runs the inversion for a given time period. algorithm = 'time' if test: # Run inversion for test max_time = 10 else: # Length of time to run for in seconds. max_time = 300 # Create the inversion object with the set parameters.. inversion_object = Inversion(data, algorithm=algorithm, parallel=parallel, phy_mem=phy_mem, dc=dc, max_time=max_time, convert=True) # Run the forward model based inversion inversion_object.forward()
def run(parallel=True, test=False): # Import inversion from MTfit.inversion import Inversion # Get Data from example_data import relative_data data = relative_data() if test: # Identical code for running build test algorithm = 'iterate' parallel = parallel phy_mem = 1 dc = True max_samples = 100 inversion_options = ['PPolarity', 'PAmplitude'] convert = True inversion_object = Inversion(data, algorithm=algorithm, parallel=parallel, inversion_options=inversion_options, phy_mem=phy_mem, dc=dc, max_samples=max_samples, convert=convert, multiple_events=True, relative=True) inversion_object.forward() algorithm = 'mcmc' chain_length = 100 burn_length = 100 max_samples = 100 inversion_object = Inversion(data, algorithm=algorithm, parallel=False, inversion_options=inversion_options, phy_mem=phy_mem, dc=not dc, max_acceptance_rate=0.3, min_acceptance_rate=0.1, chain_length=chain_length, burn_length=burn_length, convert=convert, multiple_events=True, relative=True) inversion_object.forward() return # Begin # P Polarity and Relative P Amplitude Inversion # Set inversion parameters # Use an iteration random sampling algorithm algorithm = 'iterate' # Run in parallel if set on command line parallel = parallel # uses a soft memory limit of 1Gb of RAM for estimating the sample sizes # (This is only a soft limit, so no errors are thrown if the memory usage # increases above this) phy_mem = 1 # Run in double-couple space only dc = True # Run for 10 million samples - quite coarse for relative inversion of two events max_samples = 10000000 # Set to only use P Polarity data inversion_options = ['PPolarity', 'PAmplitude'] # Set the convert flag to convert the output to other source parameterisations convert = True # Create the inversion object with the set parameters. inversion_object = Inversion(data, algorithm=algorithm, parallel=parallel, inversion_options=inversion_options, phy_mem=phy_mem, dc=dc, max_samples=max_samples, convert=convert, multiple_events=True, relative_amplitude=True) # Run the forward model inversion_object.forward() # Run the full moment tensor inversion # Use mcmc algorithm for full mt space as random sampling can require a # prohibitive number of samples algorithm = 'mcmc' # Set McMC parameters burn_length = 30000 chain_length = 100000 min_acceptance_rate = 0.1 max_acceptance_rate = 0.3 # Create the inversion object with the set parameters. inversion_object = Inversion(data, algorithm=algorithm, parallel=False, inversion_options=inversion_options, phy_mem=phy_mem, dc=not dc, chain_length=chain_length, max_acceptance_rate=max_acceptance_rate, min_acceptance_rate=min_acceptance_rate, burn_length=burn_length, convert=convert, multiple_events=True, relative_amplitude=True) # Run the forward model inversion_object.forward()