Example #1
0
    def precompute(self):
        stations = list(self.stations.iterrows())
        channel = self.config['wavefield_channel']
        if channel == "all":
            channels = ['E', 'N', 'Z']
        else:
            channels = [channel]
        for i, station in stations[self.rank:len(self.stations):self.size]:
            for cha in channels:
                self.function(station, channel=cha)

        if self.rank == 0:
            wfile = glob(
                os.path.join(self.args.project_path, 'greens',
                             '*' + station['sta'] + '.*.h5'))[0]
            ofile = os.path.join(self.args.project_path,
                                 'wavefield_example.png')
            with WaveField(wfile) as wf:
                wf.plot_snapshot(self.npts / self.Fs * 0.3,
                                 outfile=ofile,
                                 stations=[(station['lat'], station['lon'])])
Example #2
0
def get_ns(wf1, source_conf, insta):

    # Nr of time steps in traces
    if insta:
        # get path to instaseis db
        #ToDo: ugly.
        dbpath = json.load(
            open(os.path.join(source_conf['project_path'],
                              'config.json')))['wavefield_path']
        # open
        db = instaseis.open_db(dbpath)
        # get a test seismogram to determine...
        stest = db.get_seismograms(source=instaseis.ForceSource(latitude=0.0,
                                                                longitude=0.0),
                                   receiver=instaseis.Receiver(latitude=10.,
                                                               longitude=0.0),
                                   dt=1. / source_conf['sampling_rate'])[0]

        nt = stest.stats.npts
        Fs = stest.stats.sampling_rate
    else:
        with WaveField(wf1) as wf1:
            nt = int(wf1.stats['nt'])
            Fs = round(wf1.stats['Fs'], 8)

    # Necessary length of zero padding for carrying out
    # frequency domain correlations/convolutions
    n = next_fast_len(2 * nt - 1)

    # Number of time steps for synthetic correlation
    n_lag = int(source_conf['max_lag'] * Fs)
    if nt - 2 * n_lag <= 0:
        click.secho('Resetting maximum lag to %g seconds: Synthetics are too\
 short for a maximum lag of %g seconds.' % (nt // 2 / Fs, n_lag / Fs))
        n_lag = nt // 2

    n_corr = 2 * n_lag + 1

    return nt, n, n_corr, Fs
Example #3
0
def get_ns(all_conf, insta=False):
    # Nr of time steps in traces
    if insta:
        # get path to instaseis db
        dbpath = all_conf.config['wavefield_path']

        # open
        db = instaseis.open_db(dbpath)
        # get a test seismogram to determine...
        stest = db.get_seismograms(source=instaseis.ForceSource(latitude=0.0,
                                                                longitude=0.),
                                   receiver=instaseis.Receiver(latitude=10.,
                                                               longitude=0.),
                                   dt=1. / all_conf.config
                                   ['wavefield_sampling_rate'])[0]
        nt = stest.stats.npts
        Fs = stest.stats.sampling_rate
    else:
        any_wavefield = glob(os.path.join(all_conf.config['project_path'],
                                          'greens', '*.h5'))[-1]
        with WaveField(any_wavefield) as wf1:
            nt = int(wf1.stats['nt'])
            Fs = round(wf1.stats['Fs'], 8)
            n = wf1.stats['npad']
    # # Necessary length of zero padding
    # # for carrying out frequency domain correlations/convolutions
    # n = next_fast_len(2 * nt - 1)

    # Number of time steps for synthetic correlation
    n_lag = int(all_conf.source_config['max_lag'] * Fs)
    if nt - 2 * n_lag <= 0:
        n_lag_old = n_lag
        n_lag = nt // 2
        warn('Resetting maximum lag to %g seconds:\
 Synthetics are too short for %g seconds.' % (n_lag / Fs, n_lag_old / Fs))

    n_corr = 2 * n_lag + 1

    return nt, n, n_corr, Fs
Example #4
0
def get_ns(wf1, source_conf):

    # Nr of time steps in traces
    with WaveField(wf1) as wf1:
        nt = int(wf1.stats['nt'])
        Fs = round(wf1.stats['Fs'], 8)

    print('Sampling rate (Hz) %g' % Fs)

    # Necessary length of zero padding for carrying out frequency domain correlations/convolutions
    n = next_fast_len(2 * nt - 1)

    # Number of time steps for synthetic correlation
    n_lag = int(source_conf['max_lag'] * Fs)
    if nt - 2 * n_lag <= 0:
        click.secho('Resetting maximum lag to %g seconds: Synthetics are too\
 short for a maximum lag of %g seconds.' % (nt // 2 / Fs, n_lag / Fs))
        n_lag = nt // 2

    n_corr = 2 * n_lag + 1

    return nt, n, n_corr
Example #5
0
def g1g2_kern(wf1str, wf2str, kernel, adjt, src, source_conf, insta):

    measr_conf = json.load(
        open(os.path.join(source_conf['source_path'], 'measr_config.json')))

    bandpass = measr_conf['bandpass']

    if bandpass == None:
        filtcnt = 1
    elif type(bandpass) == list:
        if type(bandpass[0]) != list:
            filtcnt = 1
        else:
            filtcnt = len(bandpass)

    ntime, n, n_corr, Fs = get_ns(wf1str, source_conf, insta)
    # use a one-sided taper: The seismogram probably has a non-zero end,
    # being cut off whereever the solver stopped running.
    taper = cosine_taper(ntime, p=0.01)
    taper[0:ntime // 2] = 1.0

    ########################################################################
    # Prepare filenames and adjoint sources
    ########################################################################

    filenames = []
    adjt_srcs = []
    adjt_srcs_cnt = 0

    for ix_f in range(filtcnt):

        filename = kernel + '.{}.npy'.format(ix_f)
        filenames.append(filename)
        #if os.path.exists(filename):
        #   continue

        f = Stream()
        for a in adjt:
            adjtfile = a + '*.{}.sac'.format(ix_f)
            adjtfile = glob(adjtfile)
            try:
                f += read(adjtfile[0])[0]
                f[-1].data = my_centered(f[-1].data, n_corr)
                adjt_srcs_cnt += 1
            except IndexError:
                print('No adjoint source found: {}\n'.format(a))
                break

        adjt_srcs.append(f)


########################################################################
# Compute the kernels
########################################################################

    with NoiseSource(src) as nsrc:

        ntraces = nsrc.src_loc[0].shape[0]

        if insta:
            # open database
            dbpath = json.load(
                open(os.path.join(source_conf['project_path'],
                                  'config.json')))['wavefield_path']
            # open and determine Fs, nt
            db = instaseis.open_db(dbpath)
            # get receiver locations
            lat1 = geograph_to_geocent(float(wf1[2]))
            lon1 = float(wf1[3])
            rec1 = instaseis.Receiver(latitude=lat1, longitude=lon1)
            lat2 = geograph_to_geocent(float(wf2[2]))
            lon2 = float(wf2[3])
            rec2 = instaseis.Receiver(latitude=lat2, longitude=lon2)

        else:
            wf1 = WaveField(wf1str)
            wf2 = WaveField(wf2str)

        kern = np.zeros((filtcnt, ntraces, len(adjt)))

        ########################################################################
        # Loop over locations
        ########################################################################
        for i in range(ntraces):

            # noise source spectrum at this location
            # For the kernel, this contains only the basis functions of the
            # spectrum without weights; might still be location-dependent,
            # for example when constraining sensivity to ocean
            S = nsrc.get_spect(i)

            if S.sum() == 0.:
                # The spectrum has 0 phase so only checking absolute value here
                continue

            ####################################################################
            # Get synthetics
            ####################################################################
            if insta:
                # get source locations
                lat_src = geograph_to_geocent(nsrc.src_loc[1, i])
                lon_src = nsrc.src_loc[0, i]
                fsrc = instaseis.ForceSource(latitude=lat_src,
                                             longitude=lon_src,
                                             f_r=1.e12)

                s1 = np.ascontiguousarray(
                    db.get_seismograms(
                        source=fsrc,
                        receiver=rec1,
                        dt=1. / source_conf['sampling_rate'])[0].data * taper)
                s2 = np.ascontiguousarray(
                    db.get_seismograms(
                        source=fsrc,
                        receiver=rec2,
                        dt=1. / source_conf['sampling_rate'])[0].data * taper)

            else:
                s1 = np.ascontiguousarray(wf1.data[i, :] * taper)
                s2 = np.ascontiguousarray(wf2.data[i, :] * taper)

            spec1 = np.fft.rfft(s1, n)
            spec2 = np.fft.rfft(s2, n)

            g1g2_tr = np.multiply(np.conjugate(spec1), spec2)
            c = np.multiply(g1g2_tr, S)

            #######################################################################
            # Get Kernel at that location
            #######################################################################
            corr_temp = my_centered(np.fft.ifftshift(np.fft.irfft(c, n)),
                                    n_corr)

            #######################################################################
            # Apply the 'adjoint source'
            #######################################################################
            for ix_f in range(filtcnt):
                f = adjt_srcs[ix_f]

                if f == None:
                    continue
                for j in range(len(f)):
                    delta = f[j].stats.delta

                    kern[ix_f, i, j] = np.dot(corr_temp, f[j].data) * delta

                    #elif measr_conf['mtype'] in ['envelope']:
                    #    if j == 0:
                    #        corr_temp_h = corr_temp
                    #        print(corr_temp_h)
                    #    if j == 1:
                    #        corr_temp_h = hilbert(corr_temp)
                    #        print(corr_temp_h)
                    #
                    #    kern[ix_f,i,j] = np.dot(corr_temp,f[j].data) * delta

            if i % 50000 == 0:
                print("Finished {} source locations.".format(i))

    if not insta:
        wf1.file.close()
        wf2.file.close()

    for ix_f in range(filtcnt):
        filename = filenames[ix_f]
        if kern[ix_f, :, :].sum() != 0:
            np.save(filename, kern[ix_f, :, :])
    return ()
Example #6
0
def run_preprocessing(source_config):

    configfile = os.path.join(source_config['project_path'],
    'config.json')
    config = json.load(open(configfile))
    
    files = glob(os.path.join(config['wavefield_path'],'*.h5'))
    processed_path = os.path.join(source_config['source_path'],
        'wavefield_processed')
    
    if not os.path.exists(processed_path):
        os.mkdir(processed_path)
        
    # very simple embarrassingly parallel loop
    comm = MPI.COMM_WORLD
    size = comm.Get_size()
    rank = comm. Get_rank()

    files = files[rank::size]
    
    for file in files:

        newfile = os.path.join(processed_path, os.path.basename(file))

        if os.path.exists(newfile):
            print("File {} was already processed, skipping.".format(os.path.basename(file)))
            continue

        else:
            print("Preprocessing {}".format(os.path.basename(file)))
        

         
        if source_config['preprocess_truncate_sec'] is not None:
            
            # truncating
            
            with WaveField(file) as wf:
                wf.truncate(newfile,float(source_config['preprocess_truncate_sec']))


        if source_config['preprocess_decimation_factor'] is not None:

            # Already truncated file?
            if os.path.exists(newfile):
                newfile_temp = newfile + '.temp'
                with WaveField(newfile) as wf:
                    wf.decimate(decimation_factor=source_config['preprocess_decimation_factor'],
                                outfile=newfile_temp,
                                taper_width=0.005)
                os.system("mv {} {}".format(newfile_temp,newfile))
            else:
                with WaveField(file) as wf:
                    wf.decimate(decimation_factor=source_config['preprocess_decimation_factor'],
                                outfile=newfile,
                                taper_width=0.005)
                




            
        if source_config['preprocess_filter_kind'] == 'bandpass':

            # The file has been written previously by wavefield.truncate
            if os.path.exists(newfile):
                with WaveField(newfile,w='a') as wf:
                    wf.filter_all(
                        source_config['preprocess_filter_kind'],
                        overwrite=True,
                        freqmin=source_config['preprocess_filter_params'][0],
                        freqmax=source_config['preprocess_filter_params'][1],
                        corners=source_config['preprocess_filter_params'][2],
                        zerophase=source_config['preprocess_filter_params'][3])

            else:
                # The file still has to be written
                with WaveField(file) as wf:
                    wf.filter_all(
                        source_config['preprocess_filter_kind'],
                        overwrite=False,
                        freqmin=source_config['preprocess_filter_params'][0],
                        freqmax=source_config['preprocess_filter_params'][1],
                        corners=source_config['preprocess_filter_params'][2],
                        zerophase=source_config['preprocess_filter_params'][3],
                        outfile=newfile)
Example #7
0
def compute_correlation(input_files, all_conf, nsrc, all_ns, taper,
                        insta=False):
    """
    Compute noise cross-correlations from two .h5 'wavefield' files.
    Noise source distribution and spectrum is given by starting_model.h5
    It is assumed that noise sources are delta-correlated in space.

    Metainformation: Include the reference station names for both stations
    from wavefield files, if possible. Do not include geographic information
    from .csv file as this might be error-prone. Just add the geographic
    info later if needed.
    """

    wf1, wf2 = input_files
    ntime, n, n_corr, Fs = all_ns
    ntraces = nsrc.src_loc[0].shape[0]
    correlation = np.zeros(n_corr)

    if insta:
        # open database
        dbpath = all_conf.config['wavefield_path']

        # open
        db = instaseis.open_db(dbpath)
        # get receiver locations
        station1 = wf1[0]
        station2 = wf2[0]
        lat1 = geograph_to_geocent(float(wf1[2]))
        lon1 = float(wf1[3])
        rec1 = instaseis.Receiver(latitude=lat1, longitude=lon1)
        lat2 = geograph_to_geocent(float(wf2[2]))
        lon2 = float(wf2[3])
        rec2 = instaseis.Receiver(latitude=lat2, longitude=lon2)
    else:
        wf1 = WaveField(wf1)
        wf2 = WaveField(wf2)
        station1 = wf1.stats['reference_station']
        station2 = wf2.stats['reference_station']

        # Make sure all is consistent
        if False in (wf1.sourcegrid[1, 0:10] == wf2.sourcegrid[1, 0:10]):
            raise ValueError("Wave fields not consistent.")

        if False in (wf1.sourcegrid[1, -10:] == wf2.sourcegrid[1, -10:]):
            raise ValueError("Wave fields not consistent.")

        if False in (wf1.sourcegrid[0, -10:] == nsrc.src_loc[0, -10:]):
            raise ValueError("Wave field and source not consistent.")

    # Loop over source locations
    print_each_n = max(5, round(max(ntraces // 5, 1), -1))
    for i in range(ntraces):

        # noise source spectrum at this location
        S = nsrc.get_spect(i)

        if S.sum() == 0.:
            # If amplitude is 0, continue. (Spectrum has 0 phase anyway.)
            continue

        if insta:
            # get source locations
            lat_src = geograph_to_geocent(nsrc.src_loc[1, i])
            lon_src = nsrc.src_loc[0, i]
            fsrc = instaseis.ForceSource(latitude=lat_src,
                                         longitude=lon_src,
                                         f_r=1.e12)
            Fs = all_conf.config['wavefield_sampling_rate']
            s1 = db.get_seismograms(source=fsrc, receiver=rec1,
                                    dt=1. / Fs)[0].data * taper
            s2 = db.get_seismograms(source=fsrc, receiver=rec2,
                                    dt=1. / Fs)[0].data * taper
            s1 = np.ascontiguousarray(s1)
            s2 = np.ascontiguousarray(s2)
            spec1 = np.fft.rfft(s1, n)
            spec2 = np.fft.rfft(s2, n)

        else:
            if not wf1.fdomain:
                # read Green's functions
                s1 = np.ascontiguousarray(wf1.data[i, :] * taper)
                s2 = np.ascontiguousarray(wf2.data[i, :] * taper)
                # Fourier transform for greater ease of convolution
                spec1 = np.fft.rfft(s1, n)
                spec2 = np.fft.rfft(s2, n)
            else:
                spec1 = np.ascontiguousarray(wf1.data[i, :])
                spec2 = np.ascontiguousarray(wf2.data[i, :])

        # convolve G1G2
        g1g2_tr = np.multiply(np.conjugate(spec1), spec2)

        # convolve noise source
        c = np.multiply(g1g2_tr, S)

        # transform back
        correlation += my_centered(np.fft.fftshift(np.fft.irfft(c, n)),
                                   n_corr) * nsrc.surf_area[i]
        # occasional info
        if i % print_each_n == 0 and all_conf.config['verbose']:
            print("Finished {} of {} source locations.".format(i, ntraces))
# end of loop over all source locations #######################################
    return(correlation, station1, station2)
Example #8
0
from matplotlib.mlab import griddata
import matplotlib.tri as tri

#################################
v = 1.
stations = [(0., 0.)]
lonmin = -120.
lonmax = 120.
latmin = -60.
latmax = 60.
latc = 0.0
lonc = 0.0
resolution = 4
fps = 0.5

wf = WaveField(sys.argv[1])
t_min = float(sys.argv[2])
t_max = float(sys.argv[3])
t_step = float(sys.argv[4])
filename = sys.argv[5]
#################################

FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Wavefield',
                artist='Matplotlib',
                comment='Movie support!')
writer = FFMpegWriter(fps=fps, metadata=metadata)

fig = plt.figure()
plt.subplot(111)
Example #9
0
config = json.load(open(os.path.join(projectpath, 'config.json')))
source_config = json.load(open(os.path.join(sourcepath, 'source_config.json')))
print 'Loaded config files.'

if source_config['preprocess_do']:
    ext = '*.h5'
    wavefield_path = os.path.join(sourcepath, 'wavefield_processed')
else:
    ext = '*.h5'
    wavefield_path = config['wavefield_path']

wfs = glob(os.path.join(wavefield_path, ext))
if wfs != []:
    print 'Found wavefield.'

with WaveField(wfs[0]) as wf:
    df = wf.stats['Fs']
    nt = wf.stats['nt']
    # The number of points for the fft is larger due to zeropadding --> apparent higher frequency sampling\n",
    n = next_fast_len(2 * nt - 1)

    freq = np.fft.rfftfreq(n, d=1. / df)

    taper = cosine_taper(len(freq), 0.01)
    print 'Determined frequency axis.'


def get_distance(grid, location):
    def f(lat, lon, location):
        return abs(gps2dist_azimuth(lat, lon, location[0], location[1])[0])
Example #10
0
def compute_kernel(input_files,
                   output_file,
                   all_conf,
                   nsrc,
                   all_ns,
                   taper,
                   insta=False):

    ntime, n, n_corr, Fs = all_ns
    wf1, wf2, adjt = input_files
    ########################################################################
    # Prepare filenames and adjoint sources
    ########################################################################
    adjt_srcs = open_adjoint_sources(all_conf, adjt, n_corr)
    if None in adjt_srcs:
        return (None)
    else:
        if all_conf.config["verbose"]:
            print("========================================")
            print("Computing: " + output_file)
    # Uniform spatial weights. (current model is in the adjoint source)
    nsrc.distr_basis = np.ones(nsrc.distr_basis.shape)
    ntraces = nsrc.src_loc[0].shape[0]
    # [comp1, comp2] = [wf1, wf2] # keep these strings in case need to be rotated

    if insta:
        # open database
        dbpath = all_conf.config['wavefield_path']
        # open and determine Fs, nt
        db = instaseis.open_db(dbpath)
        # get receiver locations
        lat1 = geograph_to_geocent(float(wf1[2]))
        lon1 = float(wf1[3])
        rec1 = instaseis.Receiver(latitude=lat1, longitude=lon1)
        lat2 = geograph_to_geocent(float(wf2[2]))
        lon2 = float(wf2[3])
        rec2 = instaseis.Receiver(latitude=lat2, longitude=lon2)

    else:
        wf1 = WaveField(wf1)
        wf2 = WaveField(wf2)
        # Make sure all is consistent
        if False in (wf1.sourcegrid[1, 0:10] == wf2.sourcegrid[1, 0:10]):
            raise ValueError("Wave fields not consistent.")

        if False in (wf1.sourcegrid[1, -10:] == wf2.sourcegrid[1, -10:]):
            raise ValueError("Wave fields not consistent.")

        if False in (wf1.sourcegrid[0, -10:] == nsrc.src_loc[0, -10:]):
            raise ValueError("Wave field and source not consistent.")

    kern = np.zeros(
        (nsrc.spect_basis.shape[0], all_conf.filtcnt, ntraces, len(adjt)))
    if all_conf.source_config["rotate_horizontal_components"]:
        tempfile = output_file + ".h5_temp"
        temp = wf1.copy_setup(tempfile, ntraces=ntraces, nt=n_corr)
        map_temp_datasets = {0: temp.data}
        for ix_spec in range(1, nsrc.spect_basis.shape[0]):
            dtmp = temp.file.create_dataset('data{}'.format(ix_spec),
                                            temp.data.shape,
                                            dtype=np.float32)
            map_temp_datasets[ix_spec] = dtmp

    # Loop over locations

    print_each_n = max(5, round(max(ntraces // 5, 1), -1))

    # preload wavefield and spectrum
    S_all = nsrc.get_spect_all()
    wf1_data = np.asarray(wf1.data)
    wf2_data = np.asarray(wf2.data)

    for i in range(ntraces):

        # noise source spectrum at this location
        # For the kernel, this contains only the basis functions of the
        # spectrum without weights; might still be location-dependent,
        # for example when constraining sensivity to ocean
        #S = nsrc.get_spect(i)
        S = S_all[i, :]

        if S.sum() == 0.:
            # The spectrum has 0 phase so only checking
            # absolute value here
            continue
        if insta:
            # get source locations
            lat_src = geograph_to_geocent(nsrc.src_loc[1, i])
            lon_src = nsrc.src_loc[0, i]
            fsrc = instaseis.ForceSource(latitude=lat_src,
                                         longitude=lon_src,
                                         f_r=1.e12)
            dt = 1. / all_conf.source_config['sampling_rate']
            s1 = db.get_seismograms(source=fsrc, receiver=rec1,
                                    dt=dt)[0].data * taper
            s1 = np.ascontiguousarray(s1)
            s2 = db.get_seismograms(source=fsrc, receiver=rec2,
                                    dt=dt)[0].data * taper
            s2 = np.ascontiguousarray(s2)
            spec1 = np.fft.rfft(s1, n)
            spec2 = np.fft.rfft(s2, n)

        else:
            if not wf1.fdomain:
                s1 = np.ascontiguousarray(wf1_data[i, :] * taper)
                s2 = np.ascontiguousarray(wf2_data[i, :] * taper)
                # if horizontal component rotation: perform it here
                # more convenient before FFT to avoid additional FFTs
                spec1 = np.fft.rfft(s1, n)
                spec2 = np.fft.rfft(s2, n)
            else:
                spec1 = wf1_data[i, :]
                spec2 = wf2_data[i, :]

        g1g2_tr = np.multiply(np.conjugate(spec1), spec2)
        # spectrum
        for ix_spec in range(nsrc.spect_basis.shape[0]):
            c = np.multiply(g1g2_tr, nsrc.spect_basis[ix_spec, :])
            ###################################################################
            # Get Kernel at that location
            ###################################################################
            ctemp = np.fft.fftshift(np.fft.irfft(c, n))
            corr_temp = my_centered(ctemp, n_corr)
            if all_conf.source_config["rotate_horizontal_components"]:
                map_temp_datasets[ix_spec][i, :] = corr_temp

            ###################################################################
            # Apply the 'adjoint source'
            ###################################################################
            for ix_f in range(all_conf.filtcnt):
                f = adjt_srcs[ix_f]

                if f is None:
                    continue

                for j in range(len(f)):
                    delta = f[j].stats.delta
                    kern[ix_spec, ix_f, i,
                         j] = np.dot(corr_temp, f[j].data) * delta

            if i % print_each_n == 0 and all_conf.config['verbose']:
                print("Finished {} of {} source locations.".format(i, ntraces))
    if not insta:
        wf1.file.close()
        wf2.file.close()

    if all_conf.source_config["rotate_horizontal_components"]:
        temp.file.close()
    return kern
Example #11
0
# preparations:
os.mkdir('test/testdata/testsrc/step_0/corr')
os.system('cp -R test/testdata/testsrc/wavefield_processed_archived \
test/testdata/testsrc/wavefield_processed')
os.system('cp test/testdata/config_archived.json \
test/testdata/config.json')
os.system('cp test/testdata/testsrc/measr_config_archived.json \
	test/testdata/testsrc/measr_config.json')
os.system('cp test/testdata/testsrc/source_config_archived.json \
test/testdata/testsrc/source_config.json')

m_a_options = {'g_speed': g_speed, 'window_params': window_params}
m_func = rm.get_measure_func(mtype)

wf = WaveField('test/testdata/wavefield_vel/NET.STA1..CHA.h5')
nlocs = wf.stats['ntraces']

# create perturbation
d_q = 2 * (np.random.rand(nlocs, ) - 0.5)

# evaluate original misfit and load original gradient
m_a_options = {'g_speed': g_speed, 'window_params': window_params}
m_func = rm.get_measure_func(mtype)

# open the files....
obs = read('test/testdata/testsrc/observed_correlations/*.sac')[0]
syn = read('test/testdata/testsrc/step_0/corr_archived/*.sac')[0]
syn.stats.sac = {}
syn.stats.sac['dist'] = obs.stats.sac.dist
msr_o = m_func(obs, **m_a_options)
Example #12
0
def g1g2_kern(wf1, wf2, corr_file, kernel, adjt, src, source_conf, scale=1.0):

    #ToDo: Take care of saving metainformation
    #ToDo: Think about how to manage different types of sources (numpy array vs. get from configuration -- i.e. read source from file as option)
    #ToDo: check whether to include autocorrs from user (now hardcoded off)
    #ToDo: Parallel loop(s)
    #ToDo tests

    ntime, n, n_corr = get_ns(wf1, source_conf)

    taper = cosine_taper(ntime, p=0.05)

    with WaveField(wf1) as wf1, WaveField(wf2) as wf2:

        if wf1.stats['Fs'] != wf2.stats['Fs']:
            msg = 'Sampling rates of synthetic green\'s functions must match.'
            raise ValueError(msg)

        # initialize new hdf5 files for correlation and green's function correlation
        #with wf1.copy_setup(corr_file,nt=n_corr) as correl, NoiseSource(src) as nsrc:
        #with wf1.copy_setup(corr_file,nt=n_corr) as correl:

        with NoiseSource(src) as nsrc:

            correlation = np.zeros(n_corr)

            kern = np.zeros(wf1.stats['ntraces'])

            # Try to use a trick: Use causal and acausal part of f separately.
            f = read(adjt)[0]
            f.data = my_centered(f.data, n_corr)

            n_acausal_samples = (f.stats.npts - 1) / 2
            specf = np.fft.rfft(f[n_acausal_samples:], n)

            # Loop over source locations
            #with click.progressbar(range(wf1.stats['ntraces']),\
            #label='Correlating...' ) as ind:
            for i in range(wf1.stats['ntraces']):

                #print(i)
                s1 = np.ascontiguousarray(wf1.data[i, :] * taper) * scale
                #print(s1.sum())
                spec1 = np.fft.rfft(s1, n)
                #print(spec1.sum())
                T = np.multiply(np.conj(spec1), nsrc.get_spect(i))
                # plt.plot(np.abs(spec1)/np.max(np.abs(spec1)))
                # plt.plot(np.abs(T)/np.max(np.abs(T)))
                # plt.show()

                # it would be cleaner to use ifftshift here!
                T = np.fft.ifftshift(np.fft.irfft(T,
                                                  n))[0:len(s1)]  #[-len(s1):]
                # if i in [1,2,3,4,5]:
                #     plt.plot(T[::-1]/np.max(np.abs(T)))
                #     plt.plot(s1/np.max(np.abs(s1)),'--')
                #     plt.show()
                # Get s2 in the shape of f
                # we need to add half of the length of f before G to replace the
                # acausal part that G does not have to start with:

                #s2 = np.zeros(n)
                s2 = np.ascontiguousarray(wf2.data[i, :] * taper) * scale
                #print(s2.sum())
                #s2[n_acausal_samples:n_acausal_samples+ntime] = s2_caus
                spec2 = np.fft.rfft(s2, n)
                # plt.plot(s2_caus/np.max(np.abs(s2_caus)))
                # plt.plot(f.data/np.max(np.abs(f.data)))
                # plt.plot(s2/np.max(np.abs(s2)))
                # plt.plot(n_acausal_samples,0.5,'rd')

                # plt.show()

                # transform both f and s2 to fourier d
                # (again, zeropadding but just to avoid circular convolution)

                # plt.plot(np.abs(spec2)/np.max(np.abs(spec2)))
                # plt.plot(np.abs(specf)/np.max(np.abs(specf)))
                # plt.show()
                g2f_tr = np.multiply(np.conj(spec2), specf)
                #print(specf.sum())
                #plt.plot(n_acausal_samples,0.5,'rd')
                #plt.plot(n,0.5,'gd')

                # it would be cleaner to use ifftshift here!
                u_dagger = np.fft.ifftshift(np.fft.irfft(
                    g2f_tr, n))[0:len(s1)]  #[-len(s1):]
                # plt.plot(u_dagger/np.max(np.abs(u_dagger)))
                # plt.plot(T/np.max(np.abs(T)))

                # plt.show()

                # The frequency spectrum of the noise source is included here
                ## corr_temp = my_centered(np.fft.ifftshift(np.fft.irfft(c,n)),n_corr)
                # A Riemann sum -- one could actually build in a more fancy integration here
                #print(f.stats.delta)
                kern[i] = np.dot(u_dagger, T) * f.stats.delta
                #print(kern[i])

                if i % 50000 == 0:
                    print("Finished {} source locations.".format(i))

                #np.save(kernel,kern)
            return (kern)
Example #13
0
def g1g2_corr(wf1, wf2, corr_file, kernel, adjt, src, source_conf, kernelrun):

    #ToDo: Take care of saving metainformation
    #ToDo: Think about how to manage different types of sources (numpy array vs. get from configuration -- i.e. read source from file as option)
    #ToDo: check whether to include autocorrs from user (now hardcoded off)
    #ToDo: Parallel loop(s)
    #ToDo tests

    ntime, n, n_corr = get_ns(wf1, source_conf)

    taper = cosine_taper(ntime, p=0.05)

    with WaveField(wf1) as wf1, WaveField(wf2) as wf2:

        if wf1.stats['Fs'] != wf2.stats['Fs']:
            msg = 'Sampling rates of synthetic green\'s functions must match.'
            raise ValueError(msg)

        # initialize new hdf5 files for correlation and green's function correlation
        #with wf1.copy_setup(corr_file,nt=n_corr) as correl, NoiseSource(src) as nsrc:
        #with wf1.copy_setup(corr_file,nt=n_corr) as correl:

        with NoiseSource(src) as nsrc:

            correlation = np.zeros(n_corr)

            if kernelrun:

                #if not os.path.exists(adjt):
                #    print('Adjoint source %s not found, skipping kernel.')
                #    return()

                kern = np.zeros((wf1.stats['ntraces'], len(adjt)))

                f = Stream()
                for adjtfile in adjt:
                    if adjtfile == '-':
                        return
                    f += read(adjtfile)[0]
                    f[-1].data = my_centered(f[-1].data, n_corr)

            # Loop over source locations
            #with click.progressbar(range(wf1.stats['ntraces']),\
            #label='Correlating...' ) as ind:
            for i in range(wf1.stats['ntraces']):

                # noise source spectrum at this location
                # if calculating kernel, the spectrum is location independent.
                S = nsrc.get_spect(i)

                if S.sum() == 0.:  # The spectrum has 0 phase anyway
                    continue

                s1 = np.ascontiguousarray(wf1.data[i, :] * taper)
                s2 = np.ascontiguousarray(wf2.data[i, :] * taper)

                spec1 = np.fft.rfft(s1, n)
                spec2 = np.fft.rfft(s2, n)

                g1g2_tr = np.multiply(np.conjugate(spec1), spec2)
                # if i%50000 == 0:
                #     print(g1g2_tr[0:10],file=None)
                #     print(g1g2_tr.max(),file=None)

                c = np.multiply(g1g2_tr, S)

                # if i%50000==0:
                #     print(c[0:10],file=None)
                #     print(c.max(),file=None)

                if kernelrun:

                    corr_temp = my_centered(
                        np.fft.ifftshift(np.fft.irfft(c, n)), n_corr)
                    ##if i%50000 == 0:
                    #    ##print(corr_temp[0:10],file=None)
                    #print(corr_temp.max(),file=None)
                    # A Riemann sum
                    for j in range(len(adjt)):
                        kern[i, j] = np.dot(corr_temp,
                                            f[j].data) * f[j].stats.delta

                else:

                    correlation += my_centered(
                        np.fft.ifftshift(np.fft.irfft(c, n)), n_corr)

                if i % 50000 == 0:
                    print("Finished {} source locations.".format(i))

        if kernelrun:
            np.save(kernel, kern)

        else:
            trace = Trace()
            trace.stats.sampling_rate = wf1.stats['Fs']
            trace.data = correlation
            trace.write(filename=corr_file, format='SAC')
Example #14
0
    def setup_source_startingmodel(self, args):

        # plotting:
        colors = ['purple', 'g', 'b', 'orange']
        colors_cmaps = [
            plt.cm.Purples, plt.cm.Greens, plt.cm.Blues, plt.cm.Oranges
        ]
        print("Setting up source starting model.", end="\n")
        with io.open(os.path.join(args.source_model, 'source_config.yml'),
                     'r') as fh:
            source_conf = yaml.safe_load(fh)

        with io.open(os.path.join(source_conf['project_path'], 'config.yml'),
                     'r') as fh:
            conf = yaml.safe_load(fh)

        with io.open(source_conf['source_setup_file'], 'r') as fh:
            parameter_sets = yaml.safe_load(fh)
            if conf['verbose']:
                print("The following input parameters are used:", end="\n")
                pp = pprint.PrettyPrinter()
                pp.pprint(parameter_sets)

        # load the source locations of the grid
        grd = np.load(os.path.join(conf['project_path'], 'sourcegrid.npy'))

        # add the approximate spherical surface elements
        if grd.shape[-1] < 50000:
            surf_el = get_spherical_surface_elements(grd[0], grd[1])
        else:
            warn('Large grid; surface element computation slow. Using \
approximate surface elements.')
            surf_el = np.ones(grd.shape[-1]) * conf['grid_dx_in_m']**2

        # get the relevant array sizes
        wfs = glob(os.path.join(conf['project_path'], 'greens', '*.h5'))
        if wfs != []:
            if conf['verbose']:
                print('Found wavefield stats.')
            else:
                pass
        else:
            raise FileNotFoundError('No wavefield database found. Run \
precompute_wavefield first.')
        with WaveField(wfs[0]) as wf:
            df = wf.stats['Fs']
            n = wf.stats['npad']
        freq = np.fft.rfftfreq(n, d=1. / df)
        n_distr = len(parameter_sets)
        coeffs = np.zeros((grd.shape[-1], n_distr))
        spectra = np.zeros((n_distr, len(freq)))

        # fill in the distributions and the spectra
        for i in range(n_distr):
            coeffs[:, i] = self.distribution_from_parameters(
                grd, parameter_sets[i], conf['verbose'])

            # plot
            outfile = os.path.join(args.source_model,
                                   'source_starting_model_distr%g.png' % i)
            if create_plot:
                plot_grid(grd[0],
                          grd[1],
                          coeffs[:, i],
                          outfile=outfile,
                          cmap=colors_cmaps[i % len(colors_cmaps)],
                          sequential=True,
                          normalize=False,
                          quant_unit='Spatial weight (-)',
                          axislabelpad=-0.1,
                          size=10)

            spectra[i, :] = self.spectrum_from_parameters(
                freq, parameter_sets[i])

        # plotting the spectra
        # plotting is not necessarily done to make sure code runs on clusters
        if create_plot:
            fig1 = plt.figure()
            ax = fig1.add_subplot('111')
            for i in range(n_distr):
                ax.plot(freq,
                        spectra[i, :] / spectra.max(),
                        color=colors[i % len(colors_cmaps)])

            ax.set_xlabel('Frequency / Nyquist Frequency')
            plt.xticks([
                0,
                freq.max() * 0.25,
                freq.max() * 0.5,
                freq.max() * 0.75,
                freq.max()
            ], ['0', '0.25', '0.5', '0.75', '1'])
            ax.set_ylabel('Rel. PSD norm. to strongest spectrum (-)')
            fig1.savefig(
                os.path.join(args.source_model,
                             'source_starting_model_spectra.png'))

        # Save to an hdf5 file
        with h5py.File(
                os.path.join(args.source_model, 'iteration_0',
                             'starting_model.h5'), 'w') as fh:
            fh.create_dataset('coordinates', data=grd)
            fh.create_dataset('frequencies', data=freq)
            fh.create_dataset('model', data=coeffs.astype(np.float))
            fh.create_dataset('spectral_basis', data=spectra.astype(np.float))
            fh.create_dataset('surface_areas', data=surf_el.astype(np.float))

        # Save to an hdf5 file
        with h5py.File(os.path.join(args.source_model, 'spectral_model.h5'),
                       'w') as fh:
            uniform_spatial = np.ones(coeffs.shape) * 1.0
            fh.create_dataset('coordinates', data=grd)
            fh.create_dataset('frequencies', data=freq)
            fh.create_dataset('model', data=uniform_spatial.astype(np.float))
            fh.create_dataset('spectral_basis', data=spectra.astype(np.float))
            fh.create_dataset('surface_areas', data=surf_el.astype(np.float))
Example #15
0
import matplotlib.tri as tri     


#################################
v = 1.
stations = [(0.,0.)]
lonmin=-120.
lonmax=120.
latmin=-60.
latmax=60.
latc=0.0
lonc=0.0
resolution = 4
fps = 0.5

wf = WaveField(sys.argv[1])
t_min = float(sys.argv[2])
t_max = float(sys.argv[3])
t_step = float(sys.argv[4])
filename = sys.argv[5]
#################################

FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Wavefield', artist='Matplotlib',
                comment='Movie support!')
writer = FFMpegWriter(fps=fps, metadata=metadata)

fig = plt.figure()
plt.subplot(111)

map_x = wf.sourcegrid[0]
Example #16
0
def g1g2_corr(wf1, wf2, corr_file, src, source_conf, insta):
    """
    Compute noise cross-correlations from two .h5 'wavefield' files.
    Noise source distribution and spectrum is given by starting_model.h5
    It is assumed that noise sources are delta-correlated in space.
    """

    #ToDo: check whether to include autocorrs from user (now hardcoded off)
    #ToDo: Parallel loop(s)
    #ToDo tests

    # Metainformation: Include the reference station names for both stations
    # from wavefield files, if possible. Do not include geographic information
    # from .csv file as this might be error-prone. Just add the geographic
    # info later if needed.

    with NoiseSource(src) as nsrc:

        ntime, n, n_corr, Fs = get_ns(wf1, source_conf, insta)

        # use a one-sided taper: The seismogram probably has a non-zero end,
        # being cut off whereever the solver stopped running.
        taper = cosine_taper(ntime, p=0.01)
        taper[0:ntime // 2] = 1.0
        ntraces = nsrc.src_loc[0].shape[0]
        print(taper.shape)
        correlation = np.zeros(n_corr)

        if insta:
            # open database
            dbpath = json.load(
                open(os.path.join(source_conf['project_path'],
                                  'config.json')))['wavefield_path']
            # open and determine Fs, nt
            db = instaseis.open_db(dbpath)
            # get receiver locations
            lat1 = geograph_to_geocent(float(wf1[2]))
            lon1 = float(wf1[3])
            rec1 = instaseis.Receiver(latitude=lat1, longitude=lon1)
            lat2 = geograph_to_geocent(float(wf2[2]))
            lon2 = float(wf2[3])
            rec2 = instaseis.Receiver(latitude=lat2, longitude=lon2)

        else:
            wf1 = WaveField(wf1)
            wf2 = WaveField(wf2)

        # Loop over source locations
        for i in range(ntraces):

            # noise source spectrum at this location
            S = nsrc.get_spect(i)

            if S.sum() == 0.:
                #If amplitude is 0, continue. (Spectrum has 0 phase anyway. )
                continue

            if insta:
                # get source locations
                lat_src = geograph_to_geocent(nsrc.src_loc[1, i])
                lon_src = nsrc.src_loc[0, i]
                fsrc = instaseis.ForceSource(latitude=lat_src,
                                             longitude=lon_src,
                                             f_r=1.e12)

                s1 = np.ascontiguousarray(
                    db.get_seismograms(
                        source=fsrc,
                        receiver=rec1,
                        dt=1. / source_conf['sampling_rate'])[0].data * taper)
                s2 = np.ascontiguousarray(
                    db.get_seismograms(
                        source=fsrc,
                        receiver=rec2,
                        dt=1. / source_conf['sampling_rate'])[0].data * taper)

            else:
                # read Green's functions
                s1 = np.ascontiguousarray(wf1.data[i, :] * taper)
                s2 = np.ascontiguousarray(wf2.data[i, :] * taper)

            # Fourier transform for greater ease of convolution
            spec1 = np.fft.rfft(s1, n)
            spec2 = np.fft.rfft(s2, n)

            # convolve G1G2
            g1g2_tr = np.multiply(np.conjugate(spec1), spec2)

            # convolve noise source
            c = np.multiply(g1g2_tr, S)

            # transform back
            correlation += my_centered(np.fft.ifftshift(np.fft.irfft(c, n)),
                                       n_corr) * nsrc.surf_area[i]

            # occasional info
            if i % 50000 == 0:
                print("Finished {} source locations.".format(i))


###################### end of loop over all source locations ###################

        if not insta:
            wf1.file.close()
            wf2.file.close()

        # save output
        trace = Trace()
        trace.stats.sampling_rate = Fs
        trace.data = correlation
        # try to add some meta data
        try:
            sta1 = wf1.stats['reference_station']
            sta2 = wf2.stats['reference_station']
            trace.stats.station = sta1.split('.')[1]
            trace.stats.network = sta1.split('.')[0]
            trace.stats.location = sta1.split('.')[2]
            trace.stats.channel = sta1.split('.')[3]
            trace.stats.sac = {}
            trace.stats.sac['kuser0'] = sta2.split('.')[1]
            trace.stats.sac['kuser1'] = sta2.split('.')[0]
            trace.stats.sac['kuser2'] = sta2.split('.')[2]
            trace.stats.sac['kevnm'] = sta2.split('.')[3]
        except:
            pass

        trace.write(filename=corr_file, format='SAC')