コード例 #1
0
def read_ascii(path, NR, nt):
    from numpy import loadtxt
    from obspy.core import Stream, Stats, Trace
    dat_type = 'semd'
    comp1 = 'FXX'
    comp2 = 'FXY'
    stream = Stream()
    for rec_x in range(0,NR):
        file_name_in1 = path + 'P.R' + str(int(rec_x+1)) + '.' + comp1 + '.' + dat_type
        file_name_in2 = path + 'P.R' + str(int(rec_x+1)) + '.' + comp2 + '.' + dat_type
        xz1 = np.genfromtxt(file_name_in1)
        xz2 = np.genfromtxt(file_name_in2)
        deg = 0.0
        alpha = np.arctan(xz2[:nt,1]/(1.0e-40 + xz1[:nt,1])) # angle of projection
        direction = np.sign(np.cos(deg*np.pi/180.0)*xz1[:nt,1]*np.cos(alpha) + np.sin(deg*np.pi/180.0)*xz2[:nt,1]*np.cos(alpha))    
        data = direction*np.sqrt(xz1[:nt,1]**2 + xz2[:nt,1]**2)*np.cos(alpha) # scalar radial component

        stats = Stats()
        stats.filename = path + 'P.R' + str(int(rec_x+1))
        stats.starttime = xz1[0,0]
        stats.delta = xz1[1,0] - xz1[0,0]
        stats.npts = len(xz1[:nt,0])

        try:
            parts = filename.split('.')
            stats.network = parts[0]
            stats.station = parts[1]
            stats.channel = temp[2]
        except:
            pass

        stream.append(Trace(data=data[:], header=stats))

    return stream
コード例 #2
0
def processData(name):
    # Data is assumed to be in PICKLE format in Data/<eventname>/Originals/
    dir = 'Data/' + name + '/'
    stalist = glob.glob(dir + '/Originals/*PICKLE')

    for s in range(len(stalist)):
        try:
            onestation = read(stalist[s], format='PICKLE')

            # Cut components to the same length
            onestation.trim(starttime=onestation[0].stats.starttime + 300,
                            endtime=onestation[0].stats.endtime - 300)

            # Find if component names are different
            seisZ = onestation.select(channel='BHZ')
            if len(seisZ) > 1:
                seisZ = seisZ.select(location='10')

            seisN = onestation.select(channel='BHN')
            if len(seisN) == 0:
                seisN = onestation.select(channel='BH2')
            if len(seisN) > 1:
                seisN = seisN.select(location='10')

            seisE = onestation.select(channel='BHE')
            if len(seisE) == 0:
                seisE = onestation.select(channel='BH1')
            if len(seisE) > 1:
                seisE = seisE.select(location='10')

            print(seisZ, seisN, seisE)
            seisZ[0].stats = onestation[0].stats

            # Rotate components from North and East to Radial and Transverse
            [seisRtmp, seisTtmp
             ] = obspy.signal.rotate.rotate_ne_rt(seisN[0].data, seisE[0].data,
                                                  seisZ[0].stats['baz'])
            seisR = seisN[0].copy()
            seisR.stats['channel'] = 'BHR'
            seisR.data = seisRtmp
            seisT = seisN[0].copy()
            seisT.stats['channel'] = 'BHT'
            seisT.data = seisTtmp

            # Copy values into stats for vertical component
            seisZ[0].stats = onestation[0].stats

            # Produce new stream with Vertical, Radial and Transverse
            seisnew = Stream()
            seisnew.append(seisZ[0])
            seisnew.append(seisR)
            seisnew.append(seisT)
            # Write out in PICKLE format
            filename = dir + seisZ[0].stats.network + '.' + seisZ[
                0].stats.station + '.PICKLE'
            seisnew.write(filename, 'PICKLE')
        except:
            print('FAILED for', stalist[s])

    print('Data process complete')
コード例 #3
0
ファイル: readers.py プロジェクト: gregdavies/seisflowsSF
def ascii(path, filenames):
    from numpy import loadtxt
    from obspy.core import Stream, Stats, Trace

    stream = Stream()
    for filename in filenames:
        stats = Stats()
        data = loadtxt(path + '/' + filename)

        stats.filename = filename
        stats.starttime = data[0, 0]
        stats.sampling_rate = data[0, 1] - data[0, 0]
        stats.npts = len(data[:, 0])

        try:
            parts = filename.split('.')
            stats.network = parts[0]
            stats.station = parts[1]
            stats.channel = temp[2]
        except:
            pass

        stream.append(Trace(data=data[:, 1], header=stats))

    return stream
コード例 #4
0
ファイル: processData.py プロジェクト: Fran89/pydmt
def selectUniqueTraces(tr,args):

    # Test on orizontal component, since if only vertical component exists, 
    # no xcorr on horiz allowed --> crash
  
    st = Stream()
    List = []
    ST = []
    CleanList = []

    for i in range(len(tr)):
        if(tr[i].stats.channel[2:3] == "N"):
           List.append(tr[i].stats.station)

    for i in range(len(tr)):
       a = List.count(tr[i].stats.station)
       if(a > 1):
           ST.append(tr[i].stats.station)

    d = Counter(ST)
    for key in d:
        CleanList.append(key) 
    
    for i in range(len(tr)):
        if CleanList.count(tr[i].stats.station) == 0:
           st.append(tr[i]) 

    return st 
コード例 #5
0
ファイル: processing.py プロジェクト: junxie01/ANTS
def split_traces(s, length_in_sec, min_len, verbose, ofid):
    """
    Split an ObsPy stream object with multiple traces into a stream with traces of a predefined
    maximum length.
    """

    s_new = Stream()

    #- loop through traces ------------------------------------------------------------------------

    for k in np.arange(len(s)):

        #- set initial start time
        start = s[k].stats.starttime

        #- march through the trace until the endtime is reached
        while start < s[k].stats.endtime - min_len:
            s_copy = s[k].copy()
            s_copy.trim(start,
                        start + length_in_sec - 1 / (s[k].stats.sampling_rate))
            s_new.append(s_copy)
            del s_copy
            collect()
            start += length_in_sec

    return s_new
コード例 #6
0
    def write_adjoint_traces(self, path, syn, dat, channel):
        """ Computes adjoint traces from observed and synthetic traces
        """
        nt, dt, _ = self.get_time_scheme(syn)
        nr, _ = self.get_network_size(syn)

        Del = np.loadtxt(path +'/'+ '../../delta_syn_ij')
        rsd = np.loadtxt(path +'/'+ '../../rsd_ij')

        # initialize trace arrays
        adj = Stream()
        for i in range(nr):
            adj.append(Trace(
                data=np.zeros(nt, dtype='float32'),
                header=syn[i].stats))

        # generate adjoint traces
        for i in range(nr):
            for j in range(i):
                si = syn[i].data
                sj = syn[j].data

                adj[i].data += rsd[i,j] * \
                               self.adjoint_dd(si, sj, +Del[i,j], nt, dt)
                adj[j].data -= rsd[i,j] * \
                               self.adjoint_dd(sj, si, -Del[i,j], nt, dt)


        # optional weighting
        adj = self.apply_weights(adj)

        # write adjoint traces
        self.writer(adj, path, channel)
コード例 #7
0
def spectral_residuals(config, spec_st, sourcepar):
    """
    Compute spectral residuals with respect to an average spectral model.

    Saves a stream of residuals to disk using pickle.
    """
    # Use weighted means
    means = sourcepar.means_weight
    params_name = ('Mw', 'fc', 't_star')
    sourcepar_mean = dict(
        zip(params_name, [means['Mw'], means['fc'], means['t_star']]))
    residuals = Stream()
    for station in set(x.stats.station for x in spec_st.traces):
        spec_st_sel = spec_st.select(station=station)
        for spec in spec_st_sel.traces:
            if spec.stats.channel[-1] != 'H':
                continue

            xdata = spec.get_freq()
            synth_mean_mag = spectral_model(xdata, **sourcepar_mean)

            res = spec.copy()
            res.data_mag = spec.data_mag - synth_mean_mag
            res.data = mag_to_moment(res.data_mag)
            residuals.append(res)

    # Save residuals as pickle file
    evid = config.hypo.evid
    res_file = os.path.join(config.options.outdir, evid + '-residuals.pickle')
    logger.info('Spectral residuals saved to: %s' % res_file)
    with open(res_file, 'wb') as fp:
        pickle.dump(residuals, fp)
コード例 #8
0
def process_syn(stream,
                starttime,
                endtime,
                sampling_rate,
                npts,
                filt_freq,
                max_percentage=0.05):

    stream_process = Stream()
    for tr in stream:
        new_tr = tr.copy()
        cut_func(new_tr, starttime, endtime)

        # detrend, demean, taper
        new_tr.detrend("linear")
        new_tr.detrend("demean")
        new_tr.taper(max_percentage=max_percentage, type="hann")

        # geometric compensation
        # filter and interpolation
        filter_synt(new_tr, filt_freq)
        new_tr.interpolate(sampling_rate=sampling_rate,\
                starttime=new_tr.stats.starttime,npts=npts)

        # detrend, demean, taper
        new_tr.detrend("linear")
        new_tr.detrend("demean")
        new_tr.taper(max_percentage=max_percentage, type="hann")

        new_tr.data = np.require(new_tr.data, dtype=np.float32)
        stream_process.append(new_tr)

    return stream_process
コード例 #9
0
ファイル: DP_daily.py プロジェクト: chengyuxuannju/DPratio
def get_data(filedir, year, julday):
    """
    Function to read all available receiver functions that meet SNR threshold
    :param filedir: String()
    :param julday: Integer
    :param year: Integer
    :return: Stream() objects
    """
    # Define empty streams
    trNZ = Stream()
    trNP = Stream()
    trZ = Stream()
    trP = Stream()

    filename = str(year) + "." + str(julday)

    # Loop through directory and load files
    for file in os.listdir(filedir):
        if fnmatch.fnmatch(file, filename + '.BHZ'):
            tr = read(filedir + file)
            trZ.append(tr[0])
        elif fnmatch.fnmatch(file, filename + '.P'):
            tr = read(filedir + file)
            trP.append(tr[0])

    return trZ, trP
コード例 #10
0
def process_syn(stream,starttime,endtime,sampling_rate,npts,filt_freq,max_percentage=0.05) :

    stream_process = Stream()
    for tr in stream :
        new_tr=tr.copy()
        cut_func(new_tr, starttime, endtime)

        # detrend, demean, taper
        new_tr.detrend("linear")
        new_tr.detrend("demean")
        new_tr.taper(max_percentage=max_percentage, type="hann")

        # geometric compensation
        # filter and interpolation
        filter_synt(new_tr, filt_freq)
        new_tr.interpolate(sampling_rate=sampling_rate,\
                starttime=new_tr.stats.starttime,npts=npts)

        # detrend, demean, taper
        new_tr.detrend("linear")
        new_tr.detrend("demean")
        new_tr.taper(max_percentage=max_percentage, type="hann")

        new_tr.data = np.require(new_tr.data, dtype=np.float32)
        stream_process.append(new_tr)

    return stream_process
コード例 #11
0
ファイル: readers.py プロジェクト: bch0w/seisflows
def ascii(path, filename):
    """
    Reads SPECFEM3D-style ASCII data

    :type path: str
    :param path: path to datasets
    :type filenames: list
    :param filenames: files to read
    """
    st = Stream()
    stats = Stats()

    time, data = loadtxt(os.path.join(path, filename)).T

    stats.filename = filename
    stats.starttime = time[0]
    stats.delta = time[1] - time[0]
    stats.npts = len(data)

    try:
        parts = filename.split(".")
        stats.network = parts[0]
        stats.station = parts[1]
        stats.channel = parts[2]
    except:
        pass

    st.append(Trace(data=data, header=stats))

    return st
コード例 #12
0
def ascii(path, filenames):
    """ Reads SPECFEM3D-style ascii data
    """
    from numpy import loadtxt
    from obspy.core import Stream, Stats, Trace

    stream = Stream()
    for filename in filenames:
        stats = Stats()
        data = loadtxt(path +'/'+ filename)

        stats.filename = filename
        stats.starttime = data[0,0]
        stats.sampling_rate = data[0,1] - data[0,0]
        stats.npts = len(data[:,0])

        try:
            parts = filename.split('.')
            stats.network = parts[0]
            stats.station = parts[1]
            stats.channel = temp[2]
        except:
            pass

        stream.append(Trace(data=data[:,1], header=stats))

    return stream
コード例 #13
0
def selectUniqueTraces(tr, args):

    # Test on orizontal component, since if only vertical component exists,
    # no xcorr on horiz allowed --> crash

    st = Stream()
    List = []
    ST = []
    CleanList = []

    for i in range(len(tr)):
        if (tr[i].stats.channel[2:3] == "N"):
            List.append(tr[i].stats.station)

    for i in range(len(tr)):
        a = List.count(tr[i].stats.station)
        if (a > 1):
            ST.append(tr[i].stats.station)

    d = Counter(ST)
    for key in d:
        CleanList.append(key)

    for i in range(len(tr)):
        if CleanList.count(tr[i].stats.station) == 0:
            st.append(tr[i])

    return st
コード例 #14
0
ファイル: bs_lsr.py プロジェクト: alanfbaird/ms_attenuation
def noise_window_trace(st,window_len_time):
    """Get noise window for st"""
    noise_st = Stream()
    for tr in st:
        noise_tr = tr.slice(tr.stats.starttime,tr.stats.starttime+window_len_time)
        noise_st.append(noise_tr)
    return noise_st
コード例 #15
0
ファイル: read_gns_sm_data.py プロジェクト: obspy/branches
 def set_dummy(self):
     """
     Create a dummy stream object. This is used by the sm_gui.py script 
     in case no data is found.
     """
     smdict = {}
     smdict['lat'] = 0.
     smdict['lon'] = 0.
     smdict['site'] = 'None'
     smdict['site-name'] = 'None'
     smdict['instrument'] = 'None'
     smdict['eventtime'] = UTCDateTime(1970, 1, 1, 0, 0, 0, 0) 
     smdict['hypodep'] = 0.
     smdict['centdep'] = 0.
     smdict['lilax'] = 0.
     smdict['compdir'] = 0.
     smdict['epicdist'] = 0.
     smdict['prepend'] = 0.
     smdict['append'] = 0.
     smdict['Ml'] = 0.
     smdict['Ms'] = 0.
     smdict['Mw'] = 0.
     smdict['Mb'] = 0.
     st = Stream()
     stats = {'network': '', 'delta': 1.0,
              'station': 'No data available', 'location': '',
              'starttime': UTCDateTime(1970, 1, 1, 0, 0, 0, 0),
              'npts': 100, 'calib': 1.0,
              'sampling_rate': 1.0, 'channel': 'None','smdict':smdict}
     data = np.zeros(100)
     for i in xrange(9):
         st.append(Trace(data,stats))
     return st
コード例 #16
0
    def write_adjoint_traces(self, path, syn, dat, channel):
        """ Computes adjoint traces from observed and synthetic traces
        """
        nt, dt, _ = self.get_time_scheme(syn)
        nr, _ = self.get_network_size(syn)

        Del = np.loadtxt(path + '/' + '../../delta_syn_ij')
        rsd = np.loadtxt(path + '/' + '../../rsd_ij')

        # initialize trace arrays
        adj = Stream()
        for i in range(nr):
            adj.append(
                Trace(data=np.zeros(nt, dtype='float32'), header=syn[i].stats))

        # generate adjoint traces
        for i in range(nr):
            for j in range(i):
                si = syn[i].data
                sj = syn[j].data

                adj[i].data += rsd[i,j] * \
                               self.adjoint_dd(si, sj, +Del[i,j], nt, dt)
                adj[j].data -= rsd[i,j] * \
                               self.adjoint_dd(sj, si, -Del[i,j], nt, dt)

        # optional weighting
        adj = self.apply_weights(adj)

        # write adjoint traces
        self.writer(adj, path, channel)
コード例 #17
0
    def combine(self, stats, tag='obs'):
        """ Combines data from multiple sources
        """
        dirnames = self.dirnames
        filenames = self.filenames

        nt = PAR.NT_PADDED
        dt = PAR.DT
        nr = PAR.NREC

        for ii, filename in enumerate(filenames):
            # create object to hold summed data
            data_sum = Stream()
            for ir in range(nr):
                data_sum.append(Trace(
                    data=np.zeros(nt, dtype='float32'),
                    header=globals()[tag][dirnames[0]][filename][ir].stats))

            # linear combination over sources
            for jj, dirname in enumerate(dirnames):
                data = self.copy_data(dirname, filename)
                imin = int(stats['ts'][jj]/dt)
                imax = imin + nt
                for ir in range(nr):
                    data[ir].data *= stats['wr'][ir,jj]
                    data[ir].data *= stats['ws'][jj]
                    data_sum[ir].data[imin:imax] += data[ir].data[imin:imax]

            # save to disk
            fullname = solver.cwd +'/'+ 'traces/' + tag
            preprocess.writer(data_sum, fullname, filename)
コード例 #18
0
def build_spectra(config, st):
    """
    Build spectra and the spec_st object.

    Computes S-wave (displacement) spectra from
    accelerometers and velocimeters, uncorrected for attenuation,
    corrected for instrumental constants, normalized by
    hypocentral distance.
    """
    logger.info('Building spectra...')
    spec_st = Stream()
    specnoise_st = Stream()

    # sort by trace id
    for trace in sorted(st, key=lambda tr: tr.id):
        try:
            _check_data_len(config, trace)
            trace_signal, trace_noise = _cut_signal_noise(config, trace)
            _check_noise_level(trace_signal, trace_noise)
            spec = _build_spectrum(config, trace_signal)
            specnoise = _build_spectrum(config, trace_noise)
            _check_spectral_sn_ratio(config, spec, specnoise)
        except RuntimeError as msg:
            # RuntimeError is for skipped spectra
            logger.warning(msg)
            continue
        except ValueError as msg:
            # ValueError is for ignored spectra, which are still stored
            logger.warning(msg)
            trace.stats.ignore = True
            spec.stats.ignore = True
            specnoise.stats.ignore = True
        spec_st.append(spec)
        specnoise_st.append(specnoise)

    if not spec_st:
        logger.error('No spectra left! Exiting.')
        ssp_exit()

    # build H component
    _build_H(spec_st, specnoise_st, config.wave_type)

    # convert the spectral amplitudes to moment magnitude
    for spec in spec_st:
        spec.data_mag = moment_to_mag(spec.data)
        spec.data_log_mag = moment_to_mag(spec.data_log)

    # apply station correction if a residual file is specified in config
    spec_st = station_correction(spec_st, config)

    # build the weight spectrum
    weight_st = _build_weight_st(config, spec_st, specnoise_st)

    logger.info('Building spectra: done')
    if config.weighting == 'noise':
        for specnoise in specnoise_st:
            specnoise.data_mag = moment_to_mag(specnoise.data)
        return spec_st, specnoise_st, weight_st
    else:
        return spec_st
コード例 #19
0
ファイル: request.py プロジェクト: mitchburnett/pisces
def get_waveforms(session, wfdisc, station=None, channel=None, starttime=None, 
        endtime=None, wfids=None):
    """
    Get waveforms.

    Parameters
    ----------
    session : sqlalchemy.orm.Session instance
        Must be bound.
    wfdisc : mapped Wfdisc table class
    station, channel : str, optional
        Desired station, channel code strings
    starttimes, endtimes : float, optional
        Epoch start times, end times.  
        Traces will be cut to these times.
    wfids : iterable of int, optional
        Wfdisc wfids.  Obviates the above arguments and just returns full Wfdisc
        row waveforms.

    Returns
    -------
    obspy.Stream
        Traces are merged and cut to requested times.

    """
    #TODO: add evids= option?, use with stawin= option in .execute method?
    #TODO: implement get_arrivals if arrivals=True
    Wfdisc = wfdisc

    st = Stream()
    if not wfids:
        t1 = float(starttime)
        t2 = float(endtime)
        sta = station
        chan = channel

        t1_utc = UTCDateTime(float(t1))
        t2_utc = UTCDateTime(float(t2))

        wfs = get_wfdisc_rows( session,Wfdisc, sta, chan, t1, t2)

        #TODO: do arrival stuff here
        for wf in wfs:
            try:
                tr = wfdisc2trace(wf)
                tr.trim(t1_utc, t2_utc)
                st.append(tr)
            except AttributeError:
                #tr is None b/c data couldn't be read
                pass
    else:
        wfs = get_wfdisc_rows( session,Wfdisc, wfids=wfids)
        for wf in wfs:
            try:
                tr = wfdisc2trace(wf)
                st.append(tr)
            except AttributeError:
                pass

    return st
コード例 #20
0
def ReadData(wildcast, origintime, window_start, window_length):
    """
    Read-in coda waveforms in the interested time window.
    """
    fname_list = glob(wildcast)
    fname_list.sort()
    data_stream = Stream()
    for fname in fname_list:
        try:
            tr = read(fname, format='SAC')[0]
            ##########
            if origintime == None: origintime = tr.stats.starttime
            starttime = origintime + window_start
            endtime = starttime + window_length
            tr.trim(starttime, endtime)
            tr.detrend('linear')
            tr.detrend('demean')
            data_stream.append(tr)
        except Exception as ex: 
            continue
            print (ex)
    if len(data_stream) == 0:
        print ('There are NO seismograms coressponding to %s!' % wildcast)
        print ('STOP')
        exit()
    return data_stream
コード例 #21
0
ファイル: processing.py プロジェクト: echolite/ANTS
def split_traces(s, length_in_sec, min_len, verbose, ofid):
    """
    Split an ObsPy stream object with multiple traces into a stream with traces of a predefined
    maximum length.
    """

    s_new = Stream()

    # - loop through traces ------------------------------------------------------------------------

    for k in np.arange(len(s)):

        # - set initial start time
        start = s[k].stats.starttime

        # - march through the trace until the endtime is reached
        while start < s[k].stats.endtime - min_len:
            s_copy = s[k].copy()
            s_copy.trim(start, start + length_in_sec - 1 / (s[k].stats.sampling_rate))
            s_new.append(s_copy)
            del s_copy
            collect()
            start += length_in_sec

    return s_new
コード例 #22
0
 def test_SavingSmallASCII(self):
     """
     Tests writing small ASCII strings.
     """
     tempfile = NamedTemporaryFile().name
     st = Stream()
     st.append(Trace(data=np.fromstring("A" * 8, "|S1")))
     st.write(tempfile, format="MSEED")
     os.remove(tempfile)
コード例 #23
0
 def test_SavingSmallASCII(self):
     """
     Tests writing small ASCII strings.
     """
     tempfile = NamedTemporaryFile().name
     st = Stream()
     st.append(Trace(data=np.fromstring("A" * 8, "|S1")))
     st.write(tempfile, format="MSEED")
     os.remove(tempfile)
コード例 #24
0
def bin_all(stream1, stream2=None, pws=False):
    """ 
    Function to bin all streams into a single trace.
    This can be done using a linear stack (i.e., simple
    mean), or using phase-weighted stacking.

    Parameters
    ----------
    stream1 : :class:`~obspy.core.Stream`
        Stream of equal-length seismograms to be stacked into
        a single trace.
    stream2 : :class:`~obspy.core.Stream`
        Optionally stack a second stream in the same operation.
    pws : bool
        Whether or not to perform phase-weighted stacking

    Returns
    -------
    stack : :class:`~obspy.core.Stream`
        Stream containing one or two stacked traces,
        depending on the number of input streams

    """

    # Initialize empty stack stream
    stack = Stream()
    for stream in [stream1, stream2]:
        try:
            # Copy stats from stream1
            stats = stream[0].stats

            # Initialize arrays
            array = np.zeros(len(stream[0].data))
            pweight = np.zeros(len(stream[0].data), dtype=complex)

            # Get phase weights
            for tr in stream:
                array += tr.data
                hilb = hilbert(tr.data)
                phase = np.arctan2(hilb.imag, hilb.real)
                pweight += np.exp(1j * phase)

            # Normalize
            array = array / len(stream)
            weight = np.real(abs(pweight / len(stream)))
            # Regular linear stack
            if not pws:
                weight = np.ones(len(stream[0].data))

            # Put back into traces
            stack.append(Trace(data=weight * array, header=stats))

        except:
            continue

    return stack
コード例 #25
0
 def _read_data(self):
     # Read data
     data = [ None for _ in self.station.name ]
     for i in range(self.station.nsta):
         st = Stream()
         for c in self.header.component:
             st.append(read('%s/%s.%c.dat'%(self.header.datadir,self.station.name[i],c),format='SAC')[0])
         data[i] = st
     
     return data
コード例 #26
0
def process_traces(config, st):
    """Remove mean, deconvolve and ignore unwanted components."""
    out_st = Stream()
    for id in sorted(set(tr.id for tr in st)):
        # We still use a stream, since the trace can have
        # gaps or overlaps
        st_sel = st.select(id=id)
        network, station, location, channel = id.split('.')
        # build a list of all possible ids, from station only
        # to full net.sta.loc.chan
        ss = [
            station,
        ]
        ss.append('.'.join((network, station)))
        ss.append('.'.join((network, station, location)))
        ss.append('.'.join((network, station, location, channel)))
        if config.use_stations is not None:
            combined = ("(" + ")|(".join(config.use_stations) + ")").replace(
                '.', '\.')
            if not any(re.match(combined, s) for s in ss):
                logger.warning('%s: ignored from config file' % id)
                continue
        if config.ignore_stations is not None:
            combined = ("(" + ")|(".join(config.ignore_stations) +
                        ")").replace('.', '\.')
            if any(re.match(combined, s) for s in ss):
                logger.warning('%s: ignored from config file' % id)
                continue
        try:
            _add_hypo_dist_and_arrivals(config, st_sel)
            trace = _merge_stream(config, st_sel)
            trace.stats.ignore = False
            trace_process = _process_trace(config, trace)
            out_st.append(trace_process)
        except (ValueError, RuntimeError):
            continue

    if len(out_st) == 0:
        logger.error('No traces left! Exiting.')
        ssp_exit()

    # Rotate traces, if SH or SV is requested
    if config.wave_type in ['SH', 'SV']:
        for id in sorted(set(tr.id[:-1] for tr in out_st)):
            net, sta, loc, chan = id.split('.')
            st_sel = out_st.select(network=net,
                                   station=sta,
                                   location=loc,
                                   channel=chan + '?')
            t0 = max(tr.stats.starttime for tr in st_sel)
            t1 = min(tr.stats.endtime for tr in st_sel)
            st_sel.trim(t0, t1)
            st_sel.rotate('NE->RT')

    return out_st
コード例 #27
0
def _build_weight_st(config, spec_st, specnoise_st):
    """Build the weight spectrum."""
    weight_st = Stream()
    spec_ids = set(sp.id[:-1] for sp in spec_st if not sp.stats.ignore)
    for specid in spec_ids:
        try:
            spec_h = _select_spectra(spec_st, specid + 'H')[0]
            specnoise_h = _select_spectra(specnoise_st, specid + 'H')[0]
        except Exception:
            continue
        weight = _build_weight(config, spec_h, specnoise_h)
        weight_st.append(weight)
    return weight_st
コード例 #28
0
ファイル: bs_lsr.py プロジェクト: alanfbaird/ms_attenuation
def window_trace(st,before_p,after_p):
    """Cuts a window around an obspy trace using a before and after p pick time (sec)"""
    p_st = Stream()
    for tr in st:
        #Removing traces without p picks
        if tr.stats.sac['t0'] != -12345.0:
            
            p_pick=tr.stats.starttime+tr.stats.sac['t0']
            p_tr = tr.slice(p_pick-before_p,p_pick+after_p)
            p_st.append(p_tr)
    
                
    return p_st
コード例 #29
0
def test_SKS():
    import matplotlib
    matplotlib.use('Agg')
    import numpy as np
    from obspy.core import Stream
    from obspy.signal.rotate import rotate_ne_rt
    from telewavesim import utils as ut
    from telewavesim import wiggle as wg

    modfile = resource_filename('telewavesim',
                                'examples/models/model_SKS.txt')
    wvtype = 'SV'
    npts = 3000  # Number of samples
    dt = 0.05  # Sample distance in seconds
    slow = 0.04  # Horizontal slowness (or ray parameter) in s/km
    baz = np.arange(0., 360., 10.)
    model = ut.read_model(modfile)
    t1 = ut.calc_ttime(model, slow, wvtype=wvtype)
    assert round(t1, 1) == 21.6
    trR = Stream()
    trT = Stream()
    # Loop over range of data
    for bb in baz:
        # Calculate the plane wave seismograms
        trxyz = ut.run_plane(model, slow, npts, dt, bb, wvtype=wvtype)
        # Extract East, North and Vertical
        ntr = trxyz[0]
        etr = trxyz[1]
        ztr = trxyz[2]
        # Copy to radial and transverse
        rtr = ntr.copy()
        ttr = etr.copy()
        # Rotate to radial and transverse
        rtr.data, ttr.data = rotate_ne_rt(ntr.data, etr.data, bb)
        # Append to streams
        trR.append(rtr)
        trT.append(ttr)

    # Set frequency corners in Hz
    f1 = 0.01
    f2 = 0.2
    # Filter to get wave-like traces
    trR.filter('bandpass', freqmin=f1, freqmax=f2, corners=2, zerophase=True)
    trT.filter('bandpass', freqmin=f1, freqmax=f2, corners=2, zerophase=True)
    # Plot as wiggles
    with tempfile.TemporaryDirectory() as tempdir:
        wg.pw_wiggles_baz(trR, trT, 'test', btyp='baz', scale=0.05,
                          t1=t1, tmin=0., tmax=40, save=True,
                          ftitle=join(tempdir, 'sks'),
                          wvtype='SV')
コード例 #30
0
ファイル: util_helpers.py プロジェクト: uafgeotools/pysep
def get_streams_from_dir(ddir):
    '''
    Get streams from dir (created by mass downloader)
    mseed to stream
    '''
    from obspy.core import Trace, Stream

    st = Stream()
    tr = Trace()

    for mseed_trace in glob.iglob(ddir + '/*.mseed'):
        tr = obspy.read(mseed_trace)
        st.append(tr[0])

    return st
コード例 #31
0
def rad2segy(fname, outfile):

    file_header, trace_headers, arr = read_rad(fname)

    dt = int(file_header.get('SPR_SAMPLING_INTERVAL', 0))

    out = Stream()
    out.stats = Stats()

    # Text header.
    header = [
        'Created by seg22segy.',
        'More info to come.',
    ]
    out.stats.textual_file_header = ''.encode()
    for line in header:
        out.stats.textual_file_header += '{:80s}'.format(line).encode()

    # Binary header.
    out.stats.binary_file_header = SEGYBinaryFileHeader()
    out.stats.binary_file_header.trace_sorting_code = 4
    out.stats.binary_file_header.sample_interval_in_microseconds_of_original_field_recording = dt
    out.stats.binary_file_header.seg_y_format_revision_number = 0x0100

    # Trace data.
    for i, trace in enumerate(arr):

        # Make the trace.
        tr = Trace(trace)

        # Add required data.
        tr.stats.delta = dt / 1e6  # In microseconds.
        tr.stats.starttime = 0  # Not strictly required.

        # Add yet more to the header (optional).
        tr.stats.segy = {'trace_header': SEGYTraceHeader()}
        tr.stats.segy.trace_header.trace_sequence_number_within_line = i + 1
        tr.stats.segy.trace_header.receiver_group_elevation = 0
        tr.stats.segy.trace_header.sampling_rate = 1 / dt

        # Append the trace to the stream.
        out.append(tr)

    outbase, ext = os.path.splitext(fname)
    outfile = outfile or '{}_{}.sgy'.format(outbase, BANDS[ext])
    out.write(outfile, format='SEGY', data_encoding=3)  # 3:int16, 5:float32

    return outfile
コード例 #32
0
    def _format_data(self, timeseries, channels, stats):
        """Format all data lines.

        Parameters
        ----------
            timeseries : obspy.core.Stream
                Stream containing traces with channel listed in channels
            channels : sequence
                List and order of channel values to output.

        Returns
        -------
        str
            A string formatted to be the data lines in a PCDCP file.
        """
        buf = []

        # create new stream
        timeseriesLocal = Stream()
        # Use a copy of the trace so that we don't modify the original.
        for trace in timeseries:
            traceLocal = trace.copy()
            if traceLocal.stats.channel == "D":
                traceLocal.data = ChannelConverter.get_minutes_from_radians(
                    traceLocal.data
                )

            # TODO - we should look into multiplying the trace all at once
            # like this, but this gives an error on Windows at the moment.
            # traceLocal.data = \
            #     numpy.round(numpy.multiply(traceLocal.data, 100)).astype(int)

            timeseriesLocal.append(traceLocal)

        traces = [timeseriesLocal.select(channel=c)[0] for c in channels]
        starttime = float(traces[0].stats.starttime)
        delta = traces[0].stats.delta

        for i in range(len(traces[0].data)):
            buf.append(
                self._format_values(
                    datetime.utcfromtimestamp(starttime + i * delta),
                    (t.data[i] for t in traces),
                    stats,
                )
            )

        return "".join(buf)
コード例 #33
0
def time_difference(isource, j):
    """Compute the time difference between data and synthetics

	Input:
	isource = index of the source
	j = scale at which we run the inversion process"""

    namedir1 = 'Source_' + str(isource + 1)
    os.chdir(namedir1)

    filename_d = 'OUTPUT_FILES/data_process.su'
    filename_s = 'OUTPUT_FILES/synthetics_process.su'
    filename_i = 'OUTPUT_FILES/Up_file_single.su'
    stream_d = read(filename_d, format='SU', byteorder='<')
    stream_s = read(filename_s, format='SU', byteorder='<')
    stream_i = read(filename_i, format='SU')

    misfit = 0.0
    stream_adj = Stream()
    for irec in range(0, nrec):
        adj = numpy.zeros(nt_s)
        trace_i = stream_i[irec].copy()
        if irec >= rstart - 1 and irec <= rend - 1:
            trace_d = stream_d[irec].copy()
            trace_s = stream_s[irec].copy()
            if trace_d.data.size != trace_s.data.size:
                raise ValueError(
                    "Data and synthetic signals should have the same length")
            nstep = trace_s.data.size
            adj_temp = numpy.zeros(nt_ref)
            starttime = tstart[j - 1] + irec * 25.0 * sstart[j - 1]
            istart = int(starttime / dt_ref)
            for it in range(0, nstep):
                misfit += 0.5 * numpy.power(
                    f * trace_s.data[it] - trace_d.data[it], 2.0)
                adj_temp[istart + it] = f * trace_s.data[it] - trace_d.data[it]
            trace_adj = Trace(data=adj_temp, header=trace_s.stats)
            trace_adj.interpolate(sampling_rate=1.0 / dt_s,
                                  starttime=trace_adj.stats.starttime,
                                  npts=nt_s)
        else:
            trace_adj = Trace(data=adj, header=trace_i.stats)
        trace_adj.data = numpy.require(trace_adj.data, dtype=numpy.float32)
        stream_adj.append(trace_adj)
    stream_adj.write('SEM/Up_file_single.su.adj', format='SU')
    os.chdir('..')

    return misfit
コード例 #34
0
def process(isource, j):
    """Read the DWT results in SU file
	and apply preprocessing

	Input:
	isource = index of the source
	j = scale at which we run the inversion process"""

    namedir1 = 'Source_' + str(isource + 1)
    os.chdir(namedir1)

    stream_d = read('OUTPUT_FILES/data_DWT.su', format='SU', byteorder='<')
    stream_s = read('OUTPUT_FILES/synthetics_DWT.su',
                    format='SU',
                    byteorder='<')

    stream_d_new = Stream()
    stream_s_new = Stream()
    for irec in range(0, nrec):
        trace_d = stream_d[irec].copy()
        trace_s = stream_s[irec].copy()
        # Window
        starttime = tstart[j - 1] + irec * 25.0 * sstart[j - 1]
        endtime = tend[j - 1] + irec * 25.0 * send[j - 1]
        cut_func(trace_d, starttime, endtime)
        cut_func(trace_s, starttime, endtime)
        # Tapering
        trace_d.taper(max_percentage=0.8, type="hann")
        trace_s.taper(max_percentage=0.8, type="hann")
        # Filtering
        filter_synt(trace_d, filt_freq)
        filter_synt(trace_s, filt_freq)
        # Tapering
        trace_d.taper(max_percentage=0.8, type="hann")
        trace_s.taper(max_percentage=0.8, type="hann")
        trace_d.data = numpy.require(trace_d.data, dtype=numpy.float32)
        trace_s.data = numpy.require(trace_s.data, dtype=numpy.float32)
        stream_d_new.append(trace_d)
        stream_s_new.append(trace_s)
    stream_d_new.write('OUTPUT_FILES/data_process.su',
                       format='SU',
                       byteorder='<')
    stream_s_new.write('OUTPUT_FILES/synthetics_process.su',
                       format='SU',
                       byteorder='<')
    os.chdir('..')
コード例 #35
0
def write_segy(f, data):
    """
    Write a 2D NumPY array to an open file handle f.
    """
    stream = Stream()

    # Data is in [0, 1] so rescale to 8-bit.
    # USING 16-bit because can't save as 8-bit int in ObsPy.
    data = np.int16((data - 0.5) * 255)

    for i, trace in enumerate(data):

        # Make the trace.
        tr = Trace(trace)

        # Add required data.
        tr.stats.delta = 0.004

        # Add yet more to the header (optional).
        tr.stats.segy = {'trace_header': SEGYTraceHeader()}
        tr.stats.segy.trace_header.trace_sequence_number_within_line = i + 1
        tr.stats.segy.trace_header.receiver_group_elevation = 0

        # Append the trace to the stream.
        stream.append(tr)

    # Text header.
    stream.stats = AttribDict()
    stream.stats.textual_file_header = '{:80s}'.format(
        'Generated by Keats.').encode()
    stream.stats.textual_file_header += '{:80s}'.format(
        'Sample interval unknown.').encode()
    stream.stats.textual_file_header += '{:80s}'.format(
        'IEEE floats.').encode()

    # Binary header.
    stream.stats.binary_file_header = SEGYBinaryFileHeader()
    stream.stats.binary_file_header.trace_sorting_code = 4
    stream.stats.binary_file_header.seg_y_format_revision_number = 0x0100

    # Write the data.
    # Encoding should be 8, but that doesn't work.
    stream.write(f, format='SEGY', data_encoding=3, byteorder=sys.byteorder)

    return f
コード例 #36
0
ファイル: cleanSetMT.py プロジェクト: Fran89/pydmt
def purgeStream(st,l):

    # Parameters: st: Stream, l: linst with station number [0 to nr stations]

    new=Stream()

    ls_Sta = getSTationslist(st)
    for i in range(len(l)):
        a = l[i]-i
        ls_Sta.pop(a)

    for i in range(len(st)):
        sta = st[i].stats.station
        rem = [y for y in ls_Sta if sta == y]
        if (len(rem)==1):
           new.append(st[i])
    
    return new
コード例 #37
0
ファイル: cleanSetMT.py プロジェクト: pattylin/pydmt
def purgeStream(st, l):

    # Parameters: st: Stream, l: linst with station number [0 to nr stations]

    new = Stream()

    ls_Sta = getSTationslist(st)
    for i in range(len(l)):
        a = l[i] - i
        ls_Sta.pop(a)

    for i in range(len(st)):
        sta = st[i].stats.station
        rem = [y for y in ls_Sta if sta == y]
        if (len(rem) == 1):
            new.append(st[i])

    return new
コード例 #38
0
    def _format_data(self, timeseries, channels, stats):
        """Format all data lines.

        Parameters
        ----------
            timeseries : obspy.core.Stream
                Stream containing traces with channel listed in channels
            channels : sequence
                List and order of channel values to output.

        Returns
        -------
        str
            A string formatted to be the data lines in a PCDCP file.
        """
        buf = []

        # create new stream
        timeseriesLocal = Stream()
        # Use a copy of the trace so that we don't modify the original.
        for trace in timeseries:
            traceLocal = trace.copy()
            if traceLocal.stats.channel == "D":
                traceLocal.data = ChannelConverter.get_minutes_from_radians(traceLocal.data)

            # TODO - we should look into multiplying the trace all at once
            # like this, but this gives an error on Windows at the moment.
            # traceLocal.data = \
            #     numpy.round(numpy.multiply(traceLocal.data, 100)).astype(int)

            timeseriesLocal.append(traceLocal)

        traces = [timeseriesLocal.select(channel=c)[0] for c in channels]
        starttime = float(traces[0].stats.starttime)
        delta = traces[0].stats.delta

        for i in xrange(len(traces[0].data)):
            buf.append(
                self._format_values(
                    datetime.utcfromtimestamp(starttime + i * delta), (t.data[i] for t in traces), stats
                )
            )

        return "".join(buf)
コード例 #39
0
def time_difference(isource, j):
	"""Compute the time difference between data and synthetics

	Input:
	isource = index of the source
	j = scale at which we run the inversion process"""

	namedir1 = 'Source_' + str(isource + 1)
	os.chdir(namedir1)

	filename_d = 'OUTPUT_FILES/data_process.su'
	filename_s = 'OUTPUT_FILES/synthetics_process.su'
	filename_i = 'OUTPUT_FILES/Up_file_single.su'
	stream_d = read(filename_d, format='SU', byteorder='<')
	stream_s = read(filename_s, format='SU', byteorder='<')
	stream_i = read(filename_i, format='SU')

	misfit = 0.0
	stream_adj = Stream()
	for irec in range(0, nrec):
		adj = numpy.zeros(nt_s)
		trace_i = stream_i[irec].copy()
		if irec >= rstart - 1 and irec <= rend - 1:
			trace_d = stream_d[irec].copy()
			trace_s = stream_s[irec].copy()
			if trace_d.data.size != trace_s.data.size:
				raise ValueError("Data and synthetic signals should have the same length")
			nstep = trace_s.data.size
			adj_temp = numpy.zeros(nt_ref)
			starttime = tstart[j - 1] + irec * 25.0 * sstart[j - 1]
			istart = int(starttime / dt_ref)
			for it in range(0, nstep):
				misfit += 0.5 * numpy.power(f * trace_s.data[it] - trace_d.data[it], 2.0)
				adj_temp[istart + it] = f * trace_s.data[it] - trace_d.data[it]
			trace_adj = Trace(data=adj_temp, header=trace_s.stats)
			trace_adj.interpolate(sampling_rate=1.0 / dt_s, starttime=trace_adj.stats.starttime, npts=nt_s)
		else:
			trace_adj = Trace(data=adj, header=trace_i.stats)
		trace_adj.data = numpy.require(trace_adj.data, dtype=numpy.float32)
		stream_adj.append(trace_adj)
	stream_adj.write('SEM/Up_file_single.su.adj', format='SU')
	os.chdir('..')

	return misfit
コード例 #40
0
ファイル: DWT.py プロジェクト: ArianeDucellier/multiscale
def compute_DWT(isource, j):
    """Read the results of the simulation in SU file
	and compute the wavelet transform

	Input:
	isource = index of the source
	j = scale at which we run the inversion process"""

    namedir1 = 'Source_' + str(isource + 1)
    os.chdir(namedir1)

    filename_d = '../../Data/data_shot' + str(isource + 1) + '.su'
    filename_s = 'OUTPUT_FILES/Up_file_single.su'
    stream_d = read(filename_d, format='SU')
    stream_s = read(filename_s, format='SU')

    stream_d_DWT = Stream()
    stream_s_DWT = Stream()
    for irec in range(0, nrec):
        trace_d = stream_d[irec].copy()
        trace_s = stream_s[irec].copy()
        # Interpolation: We need the same sampling rate to carry out the DWT
        trace_d.interpolate(sampling_rate=1.0 / dt_ref,
                            starttime=trace_d.stats.starttime,
                            npts=nt_ref)
        trace_s.interpolate(sampling_rate=1.0 / dt_ref,
                            starttime=trace_s.stats.starttime,
                            npts=nt_ref)
        # Discrete Wavelet Transform
        data = trace_d.data
        synthetics = trace_s.data
        (data_DWT, NA_d) = WT(data, nt_ref, j)
        (synthetics_DWT, NA_s) = WT(synthetics, nt_ref, j)
        trace_d_DWT = Trace(data=data_DWT, header=trace_d.stats)
        trace_s_DWT = Trace(data=synthetics_DWT, header=trace_s.stats)
        trace_d_DWT.data = numpy.require(trace_d_DWT.data, dtype=numpy.float32)
        trace_s_DWT.data = numpy.require(trace_s_DWT.data, dtype=numpy.float32)
        stream_d_DWT.append(trace_d_DWT)
        stream_s_DWT.append(trace_s_DWT)
    stream_d_DWT.write('OUTPUT_FILES/data_DWT.su', format='SU', byteorder='<')
    stream_s_DWT.write('OUTPUT_FILES/synthetics_DWT.su',
                       format='SU',
                       byteorder='<')
    os.chdir('..')
コード例 #41
0
def _build_H_and_weight(spec_st, specnoise_st, wave_type='S'):
    """
    Add to spec_st the "H" component.

    H component is obtained from the modulus of all the available components.

    The same for noise, if requested. In this case we compute
    weighting function as well.
    """
    if specnoise_st:
        noise_weight = True
    else:
        noise_weight = False
    weight_st = Stream()
    stalist = set(sp.id[:-1] for sp in spec_st if not sp.stats.ignore)
    for specid in stalist:
        network, station, location, code = specid.split('.')
        spec_st_sel = spec_st.select(
            network=network, station=station, location=location)
        spec_st_sel = Stream(sp for sp in spec_st_sel if not sp.stats.ignore)
        if noise_weight:
            specnoise_st_sel = specnoise_st.select(
                network=network, station=station, location=location)
            specnoise_st_sel = Stream(
                sp for sp in specnoise_st_sel if not sp.stats.ignore)
        # 'code' is band+instrument code
        for code in set(x.stats.channel[:-1] for x in spec_st_sel):
            spec_h = _compute_h(spec_st_sel, code, wave_type)
            if spec_h is None:
                continue
            spec_st.append(spec_h)

            # Compute "H" component for noise, if requested,
            # and weighting function.
            if noise_weight:
                specnoise_h = _compute_h(specnoise_st_sel, code, wave_type)
                if specnoise_h is not None:
                    specnoise_st.append(specnoise_h)

                # Weighting function is the ratio between "H" components
                # of signal and noise
                weight = _build_weight(spec_h, specnoise_h)
                weight_st.append(weight)
    return weight_st
コード例 #42
0
def test_Porter2011():
    import matplotlib
    matplotlib.use('Agg')
    import numpy as np
    from obspy.core import Stream
    from telewavesim import utils as ut
    from telewavesim import wiggle as wg

    modfile = resource_filename('telewavesim',
                                'examples/models/model_Porter2011.txt')
    wvtype = 'P'
    npts = 3000  # Number of samples
    dt = 0.01  # Sample distance in seconds
    slow = 0.06  # Horizontal slowness (or ray parameter) in s/km
    baz = np.arange(0., 360., 10.)
    model = ut.read_model(modfile)
    trR = Stream()
    trT = Stream()
    # Loop over range of data
    for bb in baz:
        # Calculate the plane waves seismograms
        trxyz = ut.run_plane(model, slow, npts, dt, bb, wvtype=wvtype,
                             obs=False)
        # Then the transfer functions in Z-R-T coordinate system
        tfs = ut.tf_from_xyz(trxyz, pvh=False)
        # Append to streams
        trR.append(tfs[0])
        trT.append(tfs[1])
    # Set frequency corners in Hz
    f1 = 0.01
    f2 = 1.0
    # Filter to get wave-like traces
    trR.filter('bandpass', freqmin=f1, freqmax=f2, corners=2, zerophase=True)
    trT.filter('bandpass', freqmin=f1, freqmax=f2, corners=2, zerophase=True)
    # Stack over all traces
    trR_stack, trT_stack = ut.stack_all(trR, trT, pws=True)
    # Plot as wiggles
    with tempfile.TemporaryDirectory() as tempdir:
        wg.rf_wiggles_baz(trR, trT, trR_stack, trT_stack, 'test', btyp='baz',
                          scale=1.e3, tmin=-5., tmax=8., save=True,
                          ftitle=join(tempdir, 'porter2011.png'),
                          wvtype='P')
コード例 #43
0
def remove21Comp(st):

    nn = Stream()
    StazOb = getStationslist(st)
    lis = []

    for i in range(len(StazOb)):
        c = 0
        for j in range(len(st)):
            if (st[j].stats.station == StazOb[i]):
                c = c + 1
            if (c == 3):
                lis.append(st[j].stats.station)
                break

    for i in range(len(lis)):
        for j in range(len(st)):
            if (st[j].stats.station == lis[i]):
                nn.append(st[j])

    return nn
コード例 #44
0
ファイル: alpha_mod.py プロジェクト: ftilmann/miic
def dir_read_stream(base_dir='', pattern='*.raw', sort_flag=True, \
                    format='None (Automatic)'):
    """ Read all files in specified directory into one single stream object.

    Reads all files in the directory assuming one trace per file and stores it
    in one stream.
    """

    import glob

    if sort_flag:
        file_list = sorted(glob.glob(os.path.join(base_dir, pattern)))
    else:
        file_list = glob.glob(os.path.join(base_dir, pattern))
    stack_st = Stream()
    for this_file in file_list:
        st, _, _, _ = stream_read(filename=this_file, format=format)
        for tr in st:
            stack_st.append(tr)

    return(stack_st)
コード例 #45
0
def process(isource, j):
	"""Read the DWT results in SU file
	and apply preprocessing

	Input:
	isource = index of the source
	j = scale at which we run the inversion process"""

	namedir1 = 'Source_' + str(isource + 1)
	os.chdir(namedir1)

	stream_d = read('OUTPUT_FILES/data_DWT.su', format='SU', byteorder='<')
	stream_s = read('OUTPUT_FILES/synthetics_DWT.su', format='SU', byteorder='<')

	stream_d_new = Stream()
	stream_s_new = Stream()
	for irec in range(0, nrec):
		trace_d = stream_d[irec].copy()
		trace_s = stream_s[irec].copy()
		# Window
		starttime = tstart[j - 1] + irec * 25.0 * sstart[j - 1]
		endtime = tend[j - 1] + irec * 25.0 * send[j - 1]
		cut_func(trace_d, starttime, endtime)
		cut_func(trace_s, starttime, endtime)
		# Tapering
		trace_d.taper(max_percentage=0.8, type="hann")
		trace_s.taper(max_percentage=0.8, type="hann")
		# Filtering
		filter_synt(trace_d, filt_freq)
		filter_synt(trace_s, filt_freq)
		# Tapering
		trace_d.taper(max_percentage=0.8, type="hann")
		trace_s.taper(max_percentage=0.8, type="hann")
		trace_d.data = numpy.require(trace_d.data, dtype=numpy.float32)
		trace_s.data = numpy.require(trace_s.data, dtype=numpy.float32)
		stream_d_new.append(trace_d)
		stream_s_new.append(trace_s)
	stream_d_new.write('OUTPUT_FILES/data_process.su', format='SU', byteorder='<')
	stream_s_new.write('OUTPUT_FILES/synthetics_process.su', format='SU', byteorder='<')
	os.chdir('..')
コード例 #46
0
ファイル: processData.py プロジェクト: Fran89/pydmt
def remove21Comp(st):


    nn = Stream()
    StazOb = getStationslist(st)
    lis    = []

    for i in range(len(StazOb)):
        c=0
        for j in range(len(st)):
            if(st[j].stats.station==StazOb[i]):
              c=c+1
            if(c==3):
              lis.append(st[j].stats.station)
              break

    for i in range(len(lis)):
        for j in range(len(st)):
           if(st[j].stats.station==lis[i]):
             nn.append(st[j])

    return nn
コード例 #47
0
ファイル: DWT.py プロジェクト: ArianeDucellier/multiscale
def compute_DWT(isource, j):
	"""Read the results of the simulation in SU file
	and compute the wavelet transform

	Input:
	isource = index of the source
	j = scale at which we run the inversion process"""

	namedir1 = 'Source_' + str(isource + 1)
	os.chdir(namedir1)

	filename_d = '../../Data/data_shot' + str(isource + 1) + '.su'
	filename_s = 'OUTPUT_FILES/Up_file_single.su'
	stream_d = read(filename_d, format='SU')
	stream_s = read(filename_s, format='SU')

	stream_d_DWT = Stream()
	stream_s_DWT = Stream()
	for irec in range(0, nrec):
		trace_d = stream_d[irec].copy()
		trace_s = stream_s[irec].copy()
		# Interpolation: We need the same sampling rate to carry out the DWT
		trace_d.interpolate(sampling_rate=1.0 / dt_ref, starttime=trace_d.stats.starttime, npts=nt_ref)
		trace_s.interpolate(sampling_rate=1.0 / dt_ref, starttime=trace_s.stats.starttime, npts=nt_ref)
		# Discrete Wavelet Transform
		data = trace_d.data
		synthetics = trace_s.data
		(data_DWT, NA_d) = WT(data, nt_ref, j)
		(synthetics_DWT, NA_s) = WT(synthetics, nt_ref, j)
		trace_d_DWT = Trace(data=data_DWT, header=trace_d.stats)
		trace_s_DWT = Trace(data=synthetics_DWT, header=trace_s.stats)
		trace_d_DWT.data = numpy.require(trace_d_DWT.data, dtype=numpy.float32)
		trace_s_DWT.data = numpy.require(trace_s_DWT.data, dtype=numpy.float32)
		stream_d_DWT.append(trace_d_DWT)
		stream_s_DWT.append(trace_s_DWT)
	stream_d_DWT.write('OUTPUT_FILES/data_DWT.su', format='SU', byteorder='<')
	stream_s_DWT.write('OUTPUT_FILES/synthetics_DWT.su', format='SU', byteorder='<')
	os.chdir('..')
コード例 #48
0
ファイル: processing.py プロジェクト: echolite/ANTS
def slice_traces(s, length_in_sec, min_len, verbose, ofid):
    """
    Slice an ObsPy stream object with multiple traces; The stream of new (sliced) traces merely contains
    references to the original trace.
    """
    s_new = Stream()

    # - loop through traces ------------------------------------------------------------------------

    for k in np.arange(len(s)):

        # - set initial start time
        start = s[k].stats.starttime

        # - march through the trace until the endtime is reached
        while start < s[k].stats.endtime - min_len:
            s_part = s[k].slice(start, start + length_in_sec - 1 / (s[k].stats.sampling_rate))

            s_new.append(s_part)

            start += length_in_sec

    return s_new
コード例 #49
0
def getDataViaArcLink(t1,t2,inv,args):

    client = Client(user='******')

    data = Stream()

    a = inv.keys() 
    for i in range(len(a)):
        l = a[i].split('.')
        if len(l) <= 2:
           pass
        else:
           Net = l[0]
           Sta = l[1]
           Cha = l[3]

           try: 
             st = client.getWaveform(Net, Sta, "", Cha, UTCDateTime(t1), UTCDateTime(t2), metadata="TRUE")
             data.append(st[0])
           except:
             pass
   
    return data
コード例 #50
0
 def test_allDataTypesAndEndiansInSingleFile(self):
     """
     Tests all data and endian types into a single file.
     """
     tempfile = NamedTemporaryFile().name
     st1 = Stream()
     data = np.random.randint(-1000, 1000, 500)
     for dtype in ["i2", "i4", "f4", "f8", "S1"]:
         for enc in ["<", ">", "="]:
             st1.append(Trace(data=data.astype(np.dtype(enc + dtype))))
     # this will raise a UserWarning - ignoring for test
     with warnings.catch_warnings(record=True):
         warnings.simplefilter('ignore', UserWarning)
         st1.write(tempfile, format="MSEED")
         # read everything back (int16 gets converted into int32)
         st2 = read(tempfile)
         for dtype in ["i4", "i4", "f4", "f8", "S1"]:
             for enc in ["<", ">", "="]:
                 tr = st2.pop(0).data
                 self.assertEqual(tr.dtype.kind + str(tr.dtype.itemsize),
                                  dtype)
                 # byte order is always native (=)
                 np.testing.assert_array_equal(tr, data.astype("=" + dtype))
         os.remove(tempfile)
コード例 #51
0
ファイル: cleanSetMT.py プロジェクト: calum-chamberlain/pydmt
def purgeStream(st,l):
    """

    :param st: Stream to analyse
    :type st: obspy.core.stream.Stream
    :param l: list of station numbers
    :type l: list
    :return:
    """

    new = Stream()

    ls_Sta = getSTationslist(st)
    for i in range(len(l)):
        a = l[i]-i
        ls_Sta.pop(a)

    for i in range(len(st)):
        sta = st[i].stats.station
        rem = [y for y in ls_Sta if sta == y]
        if len(rem) == 1:
           new.append(st[i])
    
    return new
コード例 #52
0
def trim_tails(tr):
	day = timedelta(days=1)
	second = timedelta(seconds=1)
	t1 = tr.stats.starttime
	t2 = tr.stats.endtime
	t_in =  UTCDateTime((t1+day).year, (t1+day).month, (t1+day).day, 00, 00, 00)
	t_out = UTCDateTime((t2-day).year, (t2-day).month, (t2-day).day, 23, 59, 59.99)
	tr1 = tr.copy()
	tr2 = tr.copy()
	tr3 = tr.copy()
	ST = Stream()
	if t_in == t1:
		tr1.trim(t1, t_out)
		tr2.trim(t_out + second, t2)
		#ST.append(tr1)
		ST.append(tr2)
	elif t_out == t2:
		tr2.trim(t1, t_in) 
		tr1.trim(t_in, t2)
		ST.append(tr2)
		#ST.append(tr1)
	elif t_in == t1 and t_out == t2:
		tr1.trim(t1,t2)
		#ST.append(tr1)
	else:
		tr2.trim(t1, t_in)
		tr3.trim(t_out + second, t2)
		tr1.trim(t_in, t_out)
		#ST.append(tr1)
		ST.append(tr2)
		ST.append(tr3)
	print t1, t2
	print t_in, t_out
	while t_out > t_in:
		tr_cut = tr1.copy()
		tr_cut.trim(t_in + second, t_in + day)
		print tr_cut
		ST.append(tr_cut)
		t_in += day
	return ST
コード例 #53
0
ファイル: readGreens.py プロジェクト: Fran89/pydmt
def aquireGreens(greenFile,args):


   # define strem to load the greens in time
   green=Stream()

   # displacement o velocity
   ivel = args.dva

   # max number of stations (i.e.: distance) allowed. 
   # when modify this value, modify also the same value
   # range, vred and t0 into FKRPROG and recomplie
   Max_nr_dists = 100
   # dim(n2) (4097)

   # read twice the Green.1 file. One to access integers and one for reals 
   F = FortranFile(greenFile)
   I = FortranFile(greenFile)

   # First Line of header. This line is an mix float-int array 
   h_1f   = F.readReals()
   h_1i   = I.readInts()
   alpha  = h_1f[0]
   depth  = h_1f[1]
   fl     = h_1f[2]
   fu     = h_1f[3]
   dt     = h_1f[4]
   n1     = h_1i[5]
   n2     = h_1i[6]
   df     = h_1f[7]
   nyq    = h_1i[8]
   nrange = h_1i[9]
   nskip  = h_1i[10]

   # Second Line of header. This line is a 10 integer vector (isrc in code) 
   # If h_2i[j] == 1 then compute green for this source element
   # else do not.
   h_2f   = F.readReals()  # Usealess for this line but neaded 
                          # to place pointers at the end of this array
                          # for next header line
   h_2i   = I.readInts()
   isrc   = h_2i

   # Third Line of header. Mix float-int array. All float and one int
   # this heder inludes 6 float arry with 70 elemnts and one integer(nmax)
   # after the first 4 vectors.
   # Name of vectors and constant:
   # d(70),a(70),b(70),rho(70),nmax(1),qa(70),qb(70)
   h_3f   = F.readReals()
   h_3i   = I.readInts()
   beg    = 0
   inc    = 70
   d      = h_3f[beg:beg+inc]   
   beg    = beg+inc
   a      = h_3f[beg:beg+inc]
   beg    = beg+inc
   b      = h_3f[beg:beg+inc]
   beg    = beg+inc
   rho    = h_3f[beg:beg+inc]
   beg    = beg+inc
   mmax   = h_3i[beg:beg+1] 
   beg    = beg+1
   qa     = h_3f[beg:beg+inc]
   beg    = beg+inc
   qb     = h_3f[beg:beg+inc]
   
   # 4th line: range of distances, vred and t0
   # this refers to stations lines into earth model
   # maximum station allowed in 100 and is hardcoded into
   # FKRPROG.f To extend the number of stationallowed, modify the code
   # recomplie and modify parameters at the beginn of this procedure
   # Vectors of float
   h_4f   = F.readReals()
   h_4i   = I.readInts()
   beg    = 0
   inc    = Max_nr_dists
   Range  = h_4f[beg:beg+inc]
   beg    = beg+inc
   vred   = h_4f[beg:beg+inc]
   beg    = beg+inc
   t0     = h_4f[beg:beg+inc]

   # 5th line --> loop over distances to access cmplx spectral values
   # 3 loops:
   # 1 i: 1->n2 (n2=npts/2) -  omega(float),nkk(int)
   #   2  j: 1->nrange
   #      3  k: 1->10
   #           read(aa),(bb) --> gg(j,i,k)=cmplx(sngl(aa),sngl(bb))
   omega = [0.0 for x in range(0,n2+0)]
   freq  = [0.0 for x in range(0,n2+0)]
   # (complex spectral matrix) 
   # z --- k: foundamental mts
   # y --- i: (npts/2). nyq = npts/2 + 1
   # x --- j: distsances
   gg    = [ [ [ 0.0 for z in range(0,10+0)] \
                   for y in range(0,n2+0)]     \
                   for x in range(0,nrange+0)] 

   for i in range(0,n2+0):
       h_5f   = F.readReals()
       omega[i] = h_5f[0]
       
       for j in range(0,nrange+0):
           
           for k in range(0,10+0):
               
               FF = F.readDouble()
               aa = complex(FF[0],FF[1])
               gg[j][i][k] = aa
 
       
   # here we have the resulting matrix gg with complex spectra values
   # for each distance, each mtfound, take vector of complex (generate data(j), 
   # make conjg, add complex for nyq, spectra->time, damping, 
   # integration if required(disp)
   pi   = acos(-1.0)
   twpi = 2.*pi
   n    = 2 * (nyq-1)
   nm     = n2
   npoint = n
   rep    = 'n'
   tau = dt
   fmax = fu
   inst = 0

   for j in range(0,nrange+0):
       t0x = (Range[j])/(vred[j])
       yr  = 0.0

       for k in range(0,10+0):

           if (isrc[k] ==1):

              # inizialize data
              data  = numpy.array(numpy.zeros(n2+0+n2),dtype=complex)
#             data  = [0.0 for x in range(0,n2+0+n2)]
              for i in range(0,n2+0):
                  # arrange data
                  data[i] = gg[j][i][k]
                  # arrang frequency
                  freq = i*df
                  if(freq < df):
                     freq = 0.01*df
                  om = twpi * freq
              for i in range(n2+0,nyq):
                  data[i] = complex(0.0,0.0)
 
              # conjug
              for i in range(1,n2+0):
                  data[n+0-i] = data[i].conjugate()

              data[0]     = complex(0.0,0.0)
              data[nyq-1] = complex(0.0,0.0)


              # From spectraToTime
              data = four1(data,n,1,dt,df)

              # Apply damping factor
              fac = exp(alpha*t0x)
              dfac = exp(alpha*dt)
              for i in range(len(data)):
                data[i]=(data[i])*fac
                fac = fac * dfac
              
              # velocity to displavement if required
              if(ivel=='1'):
                 data=velTodisData(data,dt)

              # put data into trace
              prel = 0
              prel = int(eval(args.pre) / eval(args.delta))
              length=numpy.arange(len(data)+prel)*0.0
              t=Trace(length)
              for i in range(len(data)):
                  t.data[i+prel]=data[i]
              t.stats['delta']   = dt
              t.stats['dist']    = Range[j]
              name = 'GREEN_' + str(t.stats['dist'])
              t.stats['station'] = name


              # update stats and apply -1 for tss,xds,zss,zdd
              if k==7:
                 t.stats['channel'] = 'tss'  
                 for i in range(len(data)):
                     t.data[i]             = t.data[i]*(-1)
              if k==4:
                 t.stats['channel'] = 'tds'  
                 for i in range(len(data)):
                     t.data[i]             = t.data[i]*(+1)
              if k==6:
                 t.stats['channel'] = 'xss'  
                 for i in range(len(data)):
                     t.data[i]             = t.data[i]*(+1)
              if k==3:
                 t.stats['channel'] = 'xds'  
                 for i in range(len(data)):
                     t.data[i]             = t.data[i]*(-1)
              if k==1:
                 t.stats['channel'] = 'xdd'  
                 for i in range(len(data)):
                     t.data[i]             = t.data[i]*(+1)
              if k==5:
                 t.stats['channel'] = 'zss'  
                 for i in range(len(data)):
                     t.data[i]             = t.data[i]*(-1)
              if k==2:
                 t.stats['channel'] = 'zds'  
                 for i in range(len(data)):
                     t.data[i]             = t.data[i]*(+1)
              if k==0:
                 t.stats['channel'] = 'zdd'  
                 for i in range(len(data)):
                     t.data[i]             = t.data[i]*(-1)
              if k==8:
                 t.stats['channel'] = 'ex1'  
#                t.data[i]                 = t.data[i]
#                if(args.iso=='1'):
#                  for i in range(len(data)):
#                    t.data[i]             = 0.0
#                else:
#                  for i in range(len(data)):
#                    t.data[i]             = 0.0
              if k==9:
                 t.stats['channel'] = 'ex2'  
#                t.data[i]                 = t.data[i]
#                if(args.iso=='1'):
#                  for i in range(len(data)):
#                    t.data[i]             = 0.0 
#                else:
#                  for i in range(len(data)):
#                    t.data[i]             = 0.0



              green.append(t)


   return green
コード例 #54
0
ファイル: readGreens.py プロジェクト: Fran89/pydmt
def reorderGreen(gr,Sta):

    temp = Stream()
    out  = Stream()

    for i in range(len(Sta)):
        for n in range(len(gr)):
            if(gr[n].stats.station == Sta[i]):
               temp.append(gr[n])

        out.append(temp[7])
        out.append(temp[4])
        out.append(temp[6])
        out.append(temp[3])
        out.append(temp[1])
        out.append(temp[5])
        out.append(temp[2])
        out.append(temp[0])
        out.append(temp[8])
        out.append(temp[9])
        temp = Stream()

    return out
コード例 #55
0
def _getPreview(session, **kwargs):
    # build up query
    query = session.query(WaveformChannel)
    # start and end time
    try:
        start = kwargs.get('start_datetime')
        start = UTCDateTime(start)
    except:
        start = UTCDateTime() - 60 * 20
    finally:
        query = query.filter(WaveformChannel.endtime > start.datetime)
    try:
        end = kwargs.get('end_datetime')
        end = UTCDateTime(end)
    except:
        # 10 minutes
        end = UTCDateTime()
    finally:
        query = query.filter(WaveformChannel.starttime < end.datetime)
    # process arguments
    if 'trace_ids' in kwargs:
        # filter over trace id list
        trace_ids = kwargs.get('trace_ids', '')
        trace_filter = or_()
        for trace_id in trace_ids.split(','):
            temp = trace_id.split('.')
            if len(temp) != 4:
                continue
            trace_filter.append(and_(
                WaveformChannel.network == temp[0],
                WaveformChannel.station == temp[1],
                WaveformChannel.location == temp[2],
                WaveformChannel.channel == temp[3]))
        if trace_filter.clauses:
            query = query.filter(trace_filter)
    else:
        # filter over network/station/location/channel id
        for key in ['network_id', 'station_id', 'location_id',
                    'channel_id']:
            text = kwargs.get(key, None)
            if text == None:
                continue
            col = getattr(WaveformChannel, key[:-3])
            if text == "":
                query = query.filter(col == None)
            elif '*' in text or '?' in text:
                text = text.replace('?', '_')
                text = text.replace('*', '%')
                query = query.filter(col.like(text))
            else:
                query = query.filter(col == text)
    # execute query
    results = query.all()
    session.close()
    # create Stream
    st = Stream()
    for result in results:
        preview = result.getPreview()
        st.append(preview)
    # merge and trim
    st = mergePreviews(st)
    st.trim(start, end)
    return st, start, end
コード例 #56
0
ファイル: core.py プロジェクト: egdorf/obspy
def readASC(filename, headonly=False, skip=0, delta=None, length=None,
                                                  **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Handler ASCII file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: ASCII file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :type skip: int, optional
    :param skip: Number of lines to be skipped from top of file. If defined
        only one trace is read from file.
    :type delta: float, optional
    :param delta: If "skip" is used, "delta" defines sample offset in seconds.
    :type length: int, optional
    :param length: If "skip" is used, "length" defines the number of values to
        be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy.core import read
    >>> st = read("/path/to/QFILE-TEST-ASC.ASC")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    3 Trace(s) in Stream:
    .TEST..BHN | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .TEST..BHE | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .WET..HHZ  | 2010-01-01T01:01:05.999000Z - ... | 100.0 Hz, 4001 samples
    """
    fh = open(filename, 'rt')
    # read file and split text into channels
    channels = []
    headers = {}
    data = StringIO()
    for line in fh.readlines()[skip:]:
        if line.isspace():
            # blank line
            # check if any data fetched yet
            if len(headers) == 0 and data.len == 0:
                continue
            # append current channel
            data.seek(0)
            channels.append((headers, data))
            # create new channel
            headers = {}
            data = StringIO()
            if skip:
                # if skip is set only one trace is read, everything else makes
                # no sense.
                break
            continue
        elif line[0].isalpha():
            # header entry
            key, value = line.split(':', 1)
            key = key.strip()
            value = value.strip()
            headers[key] = value
        elif not headonly:
            # data entry - may be written in multiple columns
            data.write(line.strip() + ' ')
    fh.close()
    # create ObsPy stream object
    stream = Stream()
    # custom header
    custom_header = {}
    if delta:
        custom_header["delta"] = delta
    if length:
        custom_header["npts"] = length

    for headers, data in channels:
        # create Stats
        header = Stats(custom_header)
        header['sh'] = {}
        channel = [' ', ' ', ' ']
        # generate headers
        for key, value in headers.iteritems():
            if key == 'DELTA':
                header['delta'] = float(value)
            elif key == 'LENGTH':
                header['npts'] = int(value)
            elif key == 'CALIB':
                header['calib'] = float(value)
            elif key == 'STATION':
                header['station'] = value
            elif key == 'COMP':
                channel[2] = value[0]
            elif key == 'CHAN1':
                channel[0] = value[0]
            elif key == 'CHAN2':
                channel[1] = value[0]
            elif key == 'START':
                # 01-JAN-2009_01:01:01.0
                # 1-OCT-2009_12:46:01.000
                header['starttime'] = toUTCDateTime(value)
            else:
                # everything else gets stored into sh entry
                if key in SH_KEYS_INT:
                    header['sh'][key] = int(value)
                elif key in SH_KEYS_FLOAT:
                    header['sh'][key] = float(value)
                else:
                    header['sh'][key] = value
        # set channel code
        header['channel'] = ''.join(channel)
        if headonly:
            # skip data
            stream.append(Trace(header=header))
        else:
            # read data
            data = loadtxt(data, dtype='float32', ndlim=1)

            # cut data if requested
            if skip and length:
                data = data[:length]

            # use correct value in any case
            header["npts"] = len(data)

            stream.append(Trace(data=data, header=header))
    return stream
コード例 #57
0
ファイル: core.py プロジェクト: egdorf/obspy
def readQ(filename, headonly=False, data_directory=None, byteorder='=',
          **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Handler Q file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: Q header file to be read. Must have a `QHD` file
        extension.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :type data_directory: str, optional
    :param data_directory: Data directory where the corresponding QBN file can
        be found.
    :type byteorder: ``'<'``, ``'>'``, or ``'='``, optional
    :param byteorder: Enforce byte order for data file. This is important for
        Q files written in older versions of Seismic Handler, which don't
        explicit state the `BYTEORDER` flag within the header file. Defaults
        to ``'='`` (local byte order).
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    Q files consists of two files per data set:

     * a ASCII header file with file extension `QHD` and the
     * binary data file with file extension `QBN`.

    The read method only accepts header files for the ``filename`` parameter.
    ObsPy assumes that the corresponding data file is within the same directory
    if the ``data_directory`` parameter is not set. Otherwise it will search
    in the given ``data_directory`` for a file with the `QBN` file extension.
    This function should NOT be called directly, it registers via the
    ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    .. rubric:: Example

    >>> from obspy.core import read
    >>> st = read("/path/to/QFILE-TEST.QHD")
    >>> st    #doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    3 Trace(s) in Stream:
    .TEST..BHN | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .TEST..BHE | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .WET..HHZ  | 2010-01-01T01:01:05.999000Z - ... | 100.0 Hz, 4001 samples
    """
    if not headonly:
        if not data_directory:
            data_file = os.path.splitext(filename)[0] + '.QBN'
        else:
            data_file = os.path.basename(os.path.splitext(filename)[0])
            data_file = os.path.join(data_directory, data_file + '.QBN')
        if not os.path.isfile(data_file):
            msg = "Can't find corresponding QBN file at %s."
            raise IOError(msg % data_file)
        fh_data = open(data_file, 'rb')
    # loop through read header file
    fh = open(filename, 'rt')
    line = fh.readline()
    cmtlines = int(line[5:7]) - 1
    # comment lines
    comments = []
    for _i in xrange(0, cmtlines):
        comments += [fh.readline()]
    # trace lines
    traces = {}
    i = -1
    id = ''
    for line in fh:
        cid = int(line[0:2])
        if cid != id:
            id = cid
            i += 1
        traces.setdefault(i, '')
        traces[i] += line[3:].strip()
    # create stream object
    stream = Stream()
    for id in sorted(traces.keys()):
        # fetch headers
        header = {}
        header['sh'] = {
            "FROMQ": True,
            "FILE": os.path.splitext(os.path.split(filename)[1])[0],
        }
        channel = ['', '', '']
        npts = 0
        for item in traces[id].split('~'):
            key = item.strip()[0:4]
            value = item.strip()[5:].strip()
            if key == 'L001':
                npts = header['npts'] = int(value)
            elif key == 'L000':
                continue
            elif key == 'R000':
                header['delta'] = float(value)
            elif key == 'R026':
                header['calib'] = float(value)
            elif key == 'S001':
                header['station'] = value
            elif key == 'C000' and value:
                channel[2] = value[0]
            elif key == 'C001' and value:
                channel[0] = value[0]
            elif key == 'C002' and value:
                channel[1] = value[0]
            elif key == 'C003':
                if value == '<' or value == '>':
                    byteorder = header['sh']['BYTEORDER'] = value
            elif key == 'S021':
                # 01-JAN-2009_01:01:01.0
                # 1-OCT-2009_12:46:01.000
                header['starttime'] = toUTCDateTime(value)
            elif key == 'S022':
                header['sh']['P-ONSET'] = toUTCDateTime(value)
            elif key == 'S023':
                header['sh']['S-ONSET'] = toUTCDateTime(value)
            elif key == 'S024':
                header['sh']['ORIGIN'] = toUTCDateTime(value)
            elif key:
                key = INVERTED_SH_IDX.get(key, key)
                if key in SH_KEYS_INT:
                    header['sh'][key] = int(value)
                elif key in SH_KEYS_FLOAT:
                    header['sh'][key] = float(value)
                else:
                    header['sh'][key] = value
        # set channel code
        header['channel'] = ''.join(channel)
        # remember record number
        header['sh']['RECNO'] = len(stream) + 1
        if headonly:
            # skip data
            stream.append(Trace(header=header))
        else:
            if not npts:
                stream.append(Trace(header=header))
                continue
            # read data
            data = fh_data.read(npts * 4)
            dtype = byteorder + 'f4'
            data = np.fromstring(data, dtype=dtype)
            # convert to system byte order
            data = np.require(data, '=f4')
            stream.append(Trace(data=data, header=header))
    if not headonly:
        fh_data.close()
    return stream
コード例 #58
0
ファイル: core.py プロジェクト: INGV-Milano/obspy_dyna
def readDYNA(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a DYNA 1.0 ASCII file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: ASCII file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy.core import read
    >>> st = read("/path/to/IT.ARL..HGE.D.20140120.071240.X.ACC.ASC")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    1 Trace(s) in Stream:
    IT.ARL..E | 2014-01-20T07:12:30.000000Z - 2014-01-20T07:13:14.980000Z | 200.0 Hz, 8997 samples
    """
    headers = {}
    data = StringIO()

    # read file
    fh = open(filename, 'rt')
    for i in xrange(55): 
        key, value = fh.readline().strip().split(':',1)
        headers[key.strip()] = value.strip()

    # create ObsPy stream object
    stream = Stream()
    header = Stats()
    header['dyna'] = {}

    header['network'] = headers['NETWORK']
    header['station'] = headers['STATION_CODE']
    header['location'] = headers['LOCATION'] 
    header['channel'] = headers['STREAM']
    try:
        header['starttime'] = toUTCDateTime(headers['DATE_TIME_FIRST_SAMPLE_YYYYMMDD_HHMMSS']) # use toUTCDateTime to convert from DYNA format
    except:
        header['starttime'] = toUTCDateTime('19700101_000000')
    header['sampling_rate'] = 1/float(headers['SAMPLING_INTERVAL_S'])
    header['delta'] = float(headers['SAMPLING_INTERVAL_S'])
    header['npts'] = int(headers['NDATA'])
    header['calib'] = 1 # not in file header

    ##DYNA dict float data
    header['dyna']['EVENT_LATITUDE_DEGREE'] = strtofloat(headers['EVENT_LATITUDE_DEGREE'])
    header['dyna']['EVENT_LONGITUDE_DEGREE'] = strtofloat(headers['EVENT_LONGITUDE_DEGREE'])
    header['dyna']['EVENT_DEPTH_KM'] = strtofloat(headers['EVENT_DEPTH_KM'])
    header['dyna']['HYPOCENTER_REFERENCE'] = headers['HYPOCENTER_REFERENCE']
    header['dyna']['MAGNITUDE_W'] = strtofloat(headers['MAGNITUDE_W'])
    header['dyna']['MAGNITUDE_L'] = strtofloat(headers['MAGNITUDE_L'])
    header['dyna']['STATION_LATITUDE_DEGREE'] = strtofloat(headers['STATION_LATITUDE_DEGREE'])
    header['dyna']['STATION_LONGITUDE_DEGREE'] = strtofloat(headers['STATION_LONGITUDE_DEGREE'])
    header['dyna']['VS30_M_S'] = strtofloat(headers['VS30_M/S']) 
    header['dyna']['EPICENTRAL_DISTANCE_KM'] = strtofloat(headers['EPICENTRAL_DISTANCE_KM'])
    header['dyna']['EARTHQUAKE_BACKAZIMUTH_DEGREE'] = strtofloat(headers['EARTHQUAKE_BACKAZIMUTH_DEGREE'])
    header['dyna']['DURATION_S'] = strtofloat(headers['DURATION_S'])
    header['dyna']['INSTRUMENTAL_FREQUENCY_HZ'] = strtofloat(headers['INSTRUMENTAL_FREQUENCY_HZ'])
    header['dyna']['INSTRUMENTAL_DAMPING'] = strtofloat(headers['INSTRUMENTAL_DAMPING'])
    header['dyna']['FULL_SCALE_G'] = strtofloat(headers['FULL_SCALE_G'])
    
    # data type is acceleration
    if headers['DATA_TYPE'] == "ACCELERATION" \
    or headers['DATA_TYPE'] == "ACCELERATION RESPONSE SPECTRUM":
        header['dyna']['PGA_CM_S_2'] = strtofloat(headers['PGA_CM/S^2'])
        header['dyna']['TIME_PGA_S'] = strtofloat(headers['TIME_PGA_S'])
    # data type is velocity
    if headers['DATA_TYPE'] == "VELOCITY" \
    or headers['DATA_TYPE'] == "PSEUDO-VELOCITY RESPONSE SPECTRUM":
        header['dyna']['PGV_CM_S'] = strtofloat(headers['PGV_CM/S'])
        header['dyna']['TIME_PGV_S'] = strtofloat(headers['TIME_PGV_S'])
    # data type is displacement
    if headers['DATA_TYPE'] == "DISPLACEMENT" \
    or headers['DATA_TYPE'] == "DISPLACEMENT RESPONSE SPECTRUM":
        header['dyna']['PGD_CM'] = strtofloat(headers['PGD_CM'])
        header['dyna']['TIME_PGD_S'] = strtofloat(headers['TIME_PGD_S'])
            
    header['dyna']['LOW_CUT_FREQUENCY_HZ'] = strtofloat(headers['LOW_CUT_FREQUENCY_HZ'])
    header['dyna']['HIGH_CUT_FREQUENCY_HZ'] = strtofloat(headers['HIGH_CUT_FREQUENCY_HZ'])

    ##DYNA dict int data
    header['dyna']['STATION_ELEVATION_M'] = strtoint(headers['STATION_ELEVATION_M'])
    header['dyna']['N_BIT_DIGITAL_CONVERTER'] =  strtoint(headers['N_BIT_DIGITAL_CONVERTER'])
    header['dyna']['FILTER_ORDER'] = strtoint(headers['FILTER_ORDER'])

    ##DYNA dict string data
    header['dyna']['EVENT_NAME'] = headers['EVENT_NAME']
    header['dyna']['EVENT_ID'] = headers['EVENT_ID']
    header['dyna']['EVENT_DATE_YYYYMMDD'] = headers['EVENT_DATE_YYYYMMDD']
    header['dyna']['EVENT_TIME_HHMMSS'] = headers['EVENT_TIME_HHMMSS']
    header['dyna']['MAGNITUDE_W_REFERENCE'] = headers['MAGNITUDE_W_REFERENCE']
    header['dyna']['MAGNITUDE_L_REFERENCE'] = headers['MAGNITUDE_L_REFERENCE']
    header['dyna']['FOCAL_MECHANISM'] = headers['FOCAL_MECHANISM']
    header['dyna']['STATION_NAME'] = headers['STATION_NAME']
    header['dyna']['SITE_CLASSIFICATION_EC8'] = headers['SITE_CLASSIFICATION_EC8']
    header['dyna']['MORPHOLOGIC_CLASSIFICATION'] = headers['MORPHOLOGIC_CLASSIFICATION']
    header['dyna']['DATE_TIME_FIRST_SAMPLE_PRECISION'] = headers['DATE_TIME_FIRST_SAMPLE_PRECISION']
    header['dyna']['UNITS'] = headers['UNITS']
    header['dyna']['INSTRUMENT'] = headers['INSTRUMENT']
    header['dyna']['INSTRUMENT_ANALOG_DIGITAL'] = headers['INSTRUMENT_ANALOG/DIGITAL']
    header['dyna']['BASELINE_CORRECTION'] = headers['BASELINE_CORRECTION']
    header['dyna']['FILTER_TYPE'] = headers['FILTER_TYPE']
    header['dyna']['LATE_NORMAL_TRIGGERED'] = headers['LATE/NORMAL_TRIGGERED']
    header['dyna']['HEADER_FORMAT'] = headers['HEADER_FORMAT']
    header['dyna']['DATABASE_VERSION'] = headers['DATABASE_VERSION']
    header['dyna']['DATA_TYPE'] = headers['DATA_TYPE']
    header['dyna']['PROCESSING'] = headers['PROCESSING']
    header['dyna']['DATA_TIMESTAMP_YYYYMMDD_HHMMSS'] = headers['DATA_TIMESTAMP_YYYYMMDD_HHMMSS']
    header['dyna']['USER1'] = headers['USER1']
    header['dyna']['USER2'] = headers['USER2']
    header['dyna']['USER3'] = headers['USER3']
    header['dyna']['USER4'] = headers['USER4']
    
    if headonly:
        # skip data
        stream.append(Trace(header=header))
    else:
        # read data
        data = np.loadtxt(fh, dtype='float32')
        if headers['DATA_TYPE'][-8:] == "SPECTRUM":
            data_1 = np.array([], dtype=np.float32)
            data_2 = np.array([], dtype=np.float32)
            for j in xrange(len(data)):
                for i in xrange(2):
                    if i == 0:
                        data_1 = np.append(data_1,data[j][i])
                    elif i == 1:
                        data_2 = np.append(data_2,data[j][i])
            stream.append(Trace(data=data_1, header=header))
            stream.append(Trace(data=data_2, header=header))
        else:
            stream.append(Trace(data=data, header=header))
    
    fh.close()
    return stream
コード例 #59
0
ファイル: core.py プロジェクト: INGV-Milano/obspy_dyna
def readITACA(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a ITACA ASCII file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: ASCII file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy.core import read
    >>> st = read("/path/to/19971014_152309ITDPC_NCR__WEX.DAT")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    1 Trace(s) in Stream:
    IT.NCR..HNE | 1970-01-01T00:00:00.000000Z - 1970-01-01T00:00:32.795000Z | 200.0 Hz, 6560 samples
    """
    headers = {}
    data = StringIO()

    # read file
    fh = open(filename, 'rt')
    for i in xrange(43): 
        key, value = fh.readline().strip().split(':')
        headers[key.strip()] = value.strip()

    # create ObsPy stream object
    stream = Stream()
    header = Stats()
    header['itaca'] = {}

    header['network'] = filename[-18:-16]
    header['station'] = headers['STATION_CODE']
    header['location'] = ''
    if headers['COMPONENT'] == 'WE': header['channel'] = 'HNE'
    # EW should *NEVER* appear, but we handle it anyway
    if headers['COMPONENT'] == 'EW': header['channel'] = 'HNE'
    #  -just in case ;)
    if headers['COMPONENT'] == 'NS': header['channel'] = 'HNN'
    if headers['COMPONENT'] == 'UP': header['channel'] = 'HNZ'
    try:
        tfs = headers['EVENT_DATE_YYYYMMDD'] + '_' + headers['TIME_FIRST_SAMPLE_S']
        header['starttime'] = toUTCDateTime(tfs) # use toUTCDateTime to convert from DYNA format
        if re.match('^00', headers['TIME_FIRST_SAMPLE_S']) and re.match('^23', headers['EVENT_TIME_HHMMSS']):
            header['starttime'] = header['starttime'] + 86400
        if re.match('^23', headers['TIME_FIRST_SAMPLE_S']) and re.match('^00', headers['EVENT_TIME_HHMMSS']):
            header['starttime'] = header['starttime'] - 86400
    except:
        header['starttime'] = toUTCDateTime('19700101_000000')
    header['sampling_rate'] = 1/float(headers['SAMPLING_INTERVAL_S'])
    header['delta'] = float(headers['SAMPLING_INTERVAL_S'])
    header['npts'] = int(headers['NDATA'])
    header['calib'] = 1 # not in file header

    ##ITACA dict float data
    header['itaca']['EVENT_LATITUDE_DEGREE'] = strtofloat(headers['EVENT_LATITUDE_DEGREE'])
    header['itaca']['EVENT_LONGITUDE_DEGREE'] = strtofloat(headers['EVENT_LONGITUDE_DEGREE'])
    header['itaca']['EVENT_DEPTH_KM'] = strtofloat(headers['EVENT_DEPTH_KM'])
    header['itaca']['MAGNITUDE_L'] = strtofloat(headers['MAGNITUDE_L'])
    header['itaca']['MAGNITUDE_S'] = strtofloat(headers['MAGNITUDE_S'])
    header['itaca']['MAGNITUDE_W'] = strtofloat(headers['MAGNITUDE_W'])
    header['itaca']['STATION_LATITUDE_DEGREE'] = strtofloat(headers['STATION_LATITUDE_DEGREE'])
    header['itaca']['STATION_LONGITUDE_DEGREE'] = strtofloat(headers['STATION_LONGITUDE_DEGREE'])
    header['itaca']['EPICENTRAL_DISTANCE_KM'] = strtofloat(headers['EPICENTRAL_DISTANCE_KM'])
    header['itaca']['EARTHQUAKE_BACKAZIMUTH_DEGREE'] = strtofloat(headers['EARTHQUAKE_BACKAZIMUTH_DEGREE'])
    header['itaca']['DURATION_S'] = strtofloat(headers['DURATION_S'])
    header['itaca']['INSTRUMENTAL_FREQUENCY_HZ'] = strtofloat(headers['INSTRUMENTAL_FREQUENCY_HZ'])
    header['itaca']['INSTRUMENTAL_DAMPING'] = strtofloat(headers['INSTRUMENTAL_DAMPING'])
    header['itaca']['FULL_SCALE_G'] = strtofloat(headers['FULL_SCALE_G'])
    
    # data type is acceleration
    if headers['DATA_TYPE'] == "UNPROCESSED ACCELERATION" \
    or headers['DATA_TYPE'] == "PROCESSED ACCELERATION" \
    or headers['DATA_TYPE'][-8:] == "SPECTRUM":
        header['itaca']['PGA_CM_S_2'] = strtofloat(headers['PGA_CM/S^2'])
        header['itaca']['TIME_PGA_S'] = strtofloat(headers['TIME_PGA_S'])
    # data type is velocity
    if headers['DATA_TYPE'] == "VELOCITY":
        header['itaca']['PGV_CM_S'] = strtofloat(headers['PGV_CM/S'])
        header['itaca']['TIME_PGV_S'] = strtofloat(headers['TIME_PGV_S'])
    # data type is displacement
    if headers['DATA_TYPE'] == "DISPLACEMENT":
        header['itaca']['PGD_CM'] = strtofloat(headers['PGD_CM'])
        header['itaca']['TIME_PGD_S'] = strtofloat(headers['TIME_PGD_S'])
    
    header['itaca']['LOW_CUT_FREQUENCY_HZ'] = strtofloat(headers['LOW_CUT_FREQUENCY_HZ'])
    header['itaca']['HIGH_CUT_FREQUENCY_HZ'] = strtofloat(headers['HIGH_CUT_FREQUENCY_HZ'])

    ##ITACA dict int data
    header['itaca']['STATION_ELEVATION_M'] = strtoint(headers['STATION_ELEVATION_M'])
    header['itaca']['N_BIT_DIGITAL_CONVERTER'] =  strtoint(headers['N_BIT_DIGITAL_CONVERTER'])
    header['itaca']['FILTER_ORDER'] = strtoint(headers['FILTER_ORDER'])

    ##ITACA dict string data
    header['itaca']['EVENT_NAME'] = headers['EVENT_NAME']
    header['itaca']['EVENT_DATE_YYYYMMDD'] = headers['EVENT_DATE_YYYYMMDD']
    header['itaca']['EVENT_TIME_HHMMSS'] = headers['EVENT_TIME_HHMMSS']
    header['itaca']['FOCAL_MECHANISM'] = headers['FOCAL_MECHANISM']
    header['itaca']['STATION_NAME'] = headers['STATION_NAME']
    header['itaca']['SITE_CLASSIFICATION_EC8'] = headers['SITE_CLASSIFICATION_EC8']
    header['itaca']['MORPHOLOGIC_CLASSIFICATION'] = headers['MORPHOLOGIC_CLASSIFICATION']
    header['itaca']['COMPONENT'] = headers['COMPONENT']
    header['itaca']['UNITS'] = headers['UNITS']
    header['itaca']['INSTRUMENT'] = headers['INSTRUMENT']
    header['itaca']['INSTRUMENT_ANALOG_DIGITAL'] = headers['INSTRUMENT_ANALOG/DIGITAL']
    header['itaca']['BASELINE_CORRECTION'] = headers['BASELINE_CORRECTION']
    header['itaca']['FILTER_TYPE'] = headers['FILTER_TYPE']
    header['itaca']['LATE_NORMAL_TRIGGERED'] = headers['LATE/NORMAL_TRIGGERED']
    header['itaca']['DATA_VERSION'] = headers['DATA_VERSION']
    header['itaca']['DATA_TYPE'] = headers['DATA_TYPE']
    
    if headonly:
    # skip data
        stream.append(Trace(header=header))
    else:
       # read data
        data = np.loadtxt(fh, dtype='float32')
        if headers['DATA_TYPE'][-8:] == "SPECTRUM":
            data_1 = np.array([], dtype=np.float32)
            data_2 = np.array([], dtype=np.float32)
            for j in xrange(len(data)):
                for i in xrange(2):
                    if i == 0:
                        data_1 = np.append(data_1,data[j][i])
                    elif i == 1:
                        data_2 = np.append(data_2,data[j][i])
            stream.append(Trace(data=data_1, header=header))
            stream.append(Trace(data=data_2, header=header))
        else:
            stream.append(Trace(data=data, header=header))
    
    fh.close()
    return stream
コード例 #60
0
ファイル: ffisynTEST.py プロジェクト: philcummins/ffipy
def main(argv=sys.argv): 
    
    #Earth's parameters 
    #~ beta = 4.e3 #m/s 
    #~ rho = 3.e3 #kg/m^3 
    #~ mu = rho*beta*beta
    
    PLotSt = ["IU.TRQA.00.LHZ",
             "IU.LVC.00.LHZ",
             "II.NNA.00.LHZ",
              "IU.RAR.00.LHZ"]
             
             
    #PlotSubf = [143, 133, 123, 113, 103, 93,
     #           83, 73, 63, 53]
    PlotSubf = [6,3]

    
    
    #Set rup_vel = 0 to have a point source solution
    RupVel = 2.1 #Chilean eq from Lay et al
    t_h     = 10. # Half duration for each sf  
    noiselevel = 0.0# L1 norm level of noise
    mu =40e9
    #W-Phase filter 
    corners = 4.
    fmin = 0.001
    fmax = 0.005
    
    ### Data from Chilean 2010 EQ (Same as W phase inv.) 
    strike = 18.
    dip    = 18.
    rake   = 104. # 109.
    
    rakeA = rake + 45.
    rakeB = rake - 45.
    
    
    ### Fault's grid parameters
    nsx   = 21 #Number of sf along strike
    nsy   = 11 #Number of sf along dip
    flen  = 600. #Fault's longitude [km] along strike
    fwid  = 300. #Fault's longitude [km] along dip
    direc = 0    #Directivity 0 = bilateral
    Min_h = 10.  #Min depth of the fault
    
    
    ### Derivated parameters:
    nsf = nsx*nsy
    sflen = flen/float(nsx)         
    sfwid = fwid/float(nsy)
    swp = [1, 0, 2] # useful to swap (lat,lon, depth)  
    mindist = flen*fwid # minimun dist to the hypcen (initializing)
    
    ###Chessboard
    #weight = np.load("RealSol.npy") 
    weight = np.zeros(nsf)
    weight[::2] = 1 
    #weight[::2] = 1 
    #~ weight[10]=15
    #~ weight[5001]=10
    #~ weight[3201]=2
    
    
    
    ## Setting dirs and reading files.
    GFdir = "/home/roberto/data/GFS/"
    workdir = os.path.abspath(".")+"/"
    datadir = workdir + "DATA/"
    tracesfilename = workdir + "goodtraces.dat"
    tracesdir = workdir + "WPtraces/"
    
    try:
        reqfilename    = glob.glob(workdir + '*.syn.req')[0]
    except IndexError:   
        print "There is not *.syn.req file in the dir"
        sys.exit()
    
    basename = reqfilename.split("/")[-1][:-4]
    
    if not os.path.exists(tracesfilename): 
        print tracesfilename, "does not exist."
        exit()
    
    if not os.path.exists(datadir):
            os.makedirs(datadir)
    
    if not os.path.exists(tracesdir):
            os.makedirs(tracesdir)
 
    tracesfile = open(tracesfilename)    
    reqfile =  open(reqfilename)    
    
    trlist = readtraces(tracesfile)
    eqdata = readreq(reqfile)    

    tracesfile.close()
    reqfile.close()   
    
    ####Hypocentre from
    ### http://earthquake.usgs.gov/earthquakes/eqinthenews/2010/us2010tfan/    
    cmteplat = -35.91#-35.85#-36.03#-35.83
    cmteplon = -72.73#-72.72#-72.83# -72.67
    cmtepdepth= 35.
    eq_hyp = (cmteplat,cmteplon,cmtepdepth)
    
    
      ############
    

    # Defining the sf system
    grid, sblt = fault_grid('CL-2010',cmteplat,cmteplon,
                            cmtepdepth, direc,
                            Min_h, strike, dip, rake, flen,fwid ,nsx,nsy,
                            Verbose=False,ffi_io=True,gmt_io=True)
    
    print ('CL-2010',cmteplat,cmteplon,
                            cmtepdepth, direc,
                            Min_h, strike, dip, rake, flen,fwid ,nsx,nsy)
    print grid[0][1]
    #sys.exit()
    #This calculation is inside of the loop
    #~ NP = [strike, dip, rake]
    #~ M = np.array(NodalPlanetoMT(NP))  
    #~ Mp = np.sum(M**2)/np.sqrt(2)    
     
    #############################################################################
    ######Determining the sf closest to the hypocentre:    
    min_Dist_hyp_subf = flen *fwid
    for subf in range(nsf):
        sblat   = grid[subf][1]
        sblon   = grid[subf][0]
        sbdepth = grid[subf][2]              
        sf_hyp =  (sblat,sblon, sbdepth)        
        Dist_hyp_subf = hypo2dist(eq_hyp,sf_hyp)
        if Dist_hyp_subf < min_Dist_hyp_subf:
            min_Dist_hyp_subf = Dist_hyp_subf
            min_sb_hyp = sf_hyp
            hyp_subf = subf
    ####Determining trimming times:    
    test_tr = read(GFdir + "H003.5/PP/GF.0001.SY.LHZ.SAC")[0]
    t0 = test_tr.stats.starttime
    TrimmingTimes = {}   # Min. Distace from the fault to each station. 
    A =0
    for trid in trlist:     
        metafile = workdir + "DATA/" + "META." + trid + ".xml"
        META = DU.getMetadataFromXML(metafile)[trid]
        stlat = META['latitude']
        stlon = META['longitude'] 
        dist =   locations2degrees(min_sb_hyp[0],min_sb_hyp[1],\
                                   stlat,stlon) 
        parrivaltime = getTravelTimes(dist,min_sb_hyp[2])[0]['time']        
        ta = t0 + parrivaltime
        tb = ta + round(15.*dist) 
        TrimmingTimes[trid] = (ta, tb)
        
    
    ###########################

      
    
    DIST = []
    # Ordering the stations in terms of distance
    for trid in trlist: 
        metafile = workdir + "DATA/" + "META." + trid + ".xml"
        META = DU.getMetadataFromXML(metafile)[trid]
        lat = META['latitude']
        lon = META['longitude']
        trdist = locations2degrees(cmteplat,
                                   cmteplon,lat,lon) 
        DIST.append(trdist)   

    DistIndex = lstargsort(DIST)
    trlist = [trlist[i] for i in DistIndex]
  
    stdistribution = StDistandAzi(trlist, eq_hyp , workdir + "DATA/")
    StDistributionPlot(stdistribution)
    #exit()
    #Main loop
   

 

        
    for subf in range(nsf):
        print subf
        sflat   = grid[subf][1]
        sflon   = grid[subf][0]           
        sfdepth = grid[subf][2]
        #~ strike = grid[subf][3] #+ 360.
        #~ dip    = grid[subf][4]
        #~ rake   = grid[subf][5] #     
        NP = [strike, dip, rake]  
        NPA = [strike, dip, rakeA]
        NPB = [strike, dip, rakeB]        


        
        M = np.array(NodalPlanetoMT(NP))   
        MA = np.array(NodalPlanetoMT(NPA)) 
        MB = np.array(NodalPlanetoMT(NPB)) 
        #Time delay is calculated as the time in which 
        #the rupture reach the subfault
            
        sf_hyp = (sflat, sflon, sfdepth) 
        Dist_ep_subf = hypo2dist(eq_hyp,sf_hyp)
        
        if Dist_ep_subf < mindist:
            mindist = Dist_ep_subf
            minsubf = subf
        
                
        if RupVel == 0:
            t_d = eqdata['time_shift']
        else:
            t_d = round(Dist_ep_subf/RupVel) #-59.
       
        print sflat, sflon, sfdepth
        # Looking for the best depth dir:
        depth = []
        depthdir = []
        for file in os.listdir(GFdir):
            if file[-2:] == ".5":
                depthdir.append(file)
                depth.append(float(file[1:-2]))            
        BestDirIndex = np.argsort(abs(sfdepth\
                                  - np.array(depth)))[0]      
        hdir = GFdir + depthdir[BestDirIndex] + "/"     
        
        ###

        SYN = np.array([])
        SYNA = np.array([])
        SYNB = np.array([])
        for trid in trlist:     
            
            metafile = workdir + "DATA/" + "META." + trid + ".xml"
            META = DU.getMetadataFromXML(metafile)[trid]
            lat = META['latitude']
            lon = META['longitude']  
            
            #Subfault loop               
            #GFs Selection:
            ##Change to folloing loop
            
            dist = locations2degrees(sflat,sflon,lat,lon)                                
            azi =  -np.pi/180.*gps2DistAzimuth(lat,lon,
                       sflat,sflon)[2] 
            trPPsy,  trRRsy, trRTsy,  trTTsy = \
                                       GFSelectZ(hdir,dist)          
            
            
 
            
            trROT =  MTrotationZ(azi, trPPsy,  trRRsy, trRTsy,  trTTsy) 
            orig = trROT[0].stats.starttime  
            dt = trROT[0].stats.delta                       

            trianglen = 2.*int(t_h/dt)-1.
            FirstValid = int(trianglen/2.) + 1 # to delete
            window = triang(trianglen)
            window /= np.sum(window)
            #window = np.array([1.])
            
      
            
            
            parrivaltime = getTravelTimes(dist,sfdepth)[0]['time']
            
            t1 = TrimmingTimes[trid][0] - t_d
            t2 = TrimmingTimes[trid][1] - t_d
            
            
            
            for trR in trROT:
                trR.data *= 10.**-21 ## To get M in Nm                   
                trR.data -= trR.data[0]
                AUX1 = len(trR)
                trR.data = convolve(trR.data,window,mode='valid') 
                AUX2 = len(trR)
                mean = np.mean(np.hstack((trR.data[0]*np.ones(FirstValid),\
                               trR.data[:60./trR.stats.delta*1.-FirstValid+1])))
                #mean = np.mean(trR.data[:60])
                trR.data -= mean      
                trR.data = bp.bandpassfilter(trR.data,len(trR), trR.stats.delta,\
                                             corners , 1 , fmin, fmax)  
                t_l = dt*0.5*(AUX1 - AUX2)                             
                trR.trim(t1-t_l,t2-t_l, pad=True, fill_value=trR.data[0])  #We lost t_h due to the convolution        
            


                   
            #~ for trR in trROT:
                #~ trR.data *= 10.**-23 ## To get M in Nm                   
                #~ trR.data -= trR.data[0]
 
                #~ trR.data = convolve(trR.data,window,mode='same') 

                #~ #mean = np.mean(np.hstack((trR.data[0]*np.ones(FirstValid),\
                               #~ #trR.data[:60./trR.stats.delta*1.-FirstValid+1])))
                #~ mean = np.mean(trR.data[:60])
                #~ trR.data -= mean      
                #~ trR.data = bp.bandpassfilter(trR.data,len(trR), trR.stats.delta,\
                                             #~ corners , 1 , fmin, fmax)  
                            
                #~ trR.trim(t1,t2,pad=True, fill_value=trR.data[0])     
           
            trROT = np.array(trROT)  
            syn  =  np.dot(trROT.T,M) 
            synA =  np.dot(trROT.T,MA)
            synB =  np.dot(trROT.T,MB)
            
            SYN = np.append(SYN,syn)  
            SYNA = np.append(SYNA,synA)
            SYNB = np.append(SYNB,synB)
            
            
        print np.shape(A), np.shape(np.array([SYN]))    
        if subf == 0: 
            A = np.array([SYN])
            AA = np.array([SYNA])
            AB = np.array([SYNB])
        else:
            A = np.append(A,np.array([SYN]),0)    
            AA = np.append(AA,np.array([SYNA]),0)
            AB = np.append(AB,np.array([SYNB]),0)
            
            
            
    AC = np.vstack((AA,AB))
    print np.shape(AC)
    print np.shape(weight)
    B = np.dot(A.T,weight)
    stsyn = Stream()
    n = 0
    Ntraces= {}
    for trid in trlist: 
        spid = trid.split(".")        
        print trid
        NMIN = 1. + (TrimmingTimes[trid][1] - TrimmingTimes[trid][0]) / dt
        Ntraces[trid] = (n,NMIN + n)
        trsyn = Trace(B[n:NMIN+n])   
        n += NMIN        
        trsyn.stats.network = spid[0]
        trsyn.stats.station = spid[1]
        trsyn.stats.location = spid[2]
        trsyn.stats.channel = spid[3] 
        trsyn = AddNoise(trsyn,level = noiselevel)
        #trsyn.stats.starttime = 
        stsyn.append(trsyn)
        
       
    stsyn.write(workdir+"WPtraces/" + basename + ".decov.trim.mseed",
                 format="MSEED")           
                
    #####################################################    
    # Plotting:
    #####################################################
    #we are going to reflect the y axis later, so:
    print minsubf
    hypsbloc = [minsubf / nsy , -(minsubf % nsy) - 2]

    #Creating the strike and dip axis:
    StrikeAx= np.linspace(0,flen,nsx+1)
    DipAx= np.linspace(0,fwid,nsy+1)
    DepthAx = DipAx*np.sin(np.pi/180.*dip) + Min_h    
    hlstrike = StrikeAx[hypsbloc[0]] + sflen*0.5
        
    hldip = DipAx[hypsbloc[1]] + sfwid*0.5 
    hldepth = DepthAx[hypsbloc[1]] + sfwid*0.5*np.sin(np.pi/180.*dip)
       
    StrikeAx = StrikeAx - hlstrike
    DipAx =     DipAx   - hldip
 

    
    XX, YY = np.meshgrid(StrikeAx, DepthAx)
    XX, ZZ = np.meshgrid(StrikeAx, DipAx )

   
    sbarea = sflen*sfwid
    
    SLIPS = weight.reshape(nsx,nsy).T#[::-1,:]
    SLIPS /= mu*1.e6*sbarea
    
    ######Plot:#####################
    plt.figure()
    ax = host_subplot(111)
    im = ax.pcolor(XX, YY, SLIPS, cmap="jet")    
    ax.set_ylabel('Depth [km]')       
    ax.set_ylim(DepthAx[-1],DepthAx[0])  
    
    # Creating a twin plot 
    ax2 = ax.twinx()
    #im2 = ax2.pcolor(XX, ZZ, SLIPS[::-1,:], cmap="Greys") 
    im2 = ax2.pcolor(XX, ZZ, SLIPS[::-1,:], cmap="jet")    
    
    ax2.set_ylabel('Distance along the dip [km]')
    ax2.set_xlabel('Distance along the strike [km]')    
    ax2.set_ylim(DipAx[0],DipAx[-1])
    ax2.set_xlim(StrikeAx[0],StrikeAx[-1])       
                         
                         
    ax.axis["bottom"].major_ticklabels.set_visible(False) 
    ax2.axis["bottom"].major_ticklabels.set_visible(False)
    ax2.axis["top"].set_visible(True)
    ax2.axis["top"].label.set_visible(True)
    
    
    divider = make_axes_locatable(ax)
    cax = divider.append_axes("bottom", size="5%", pad=0.1)
    cb = plt.colorbar(im, cax=cax, orientation="horizontal")
    cb.set_label("Slip [m]") 
    ax2.plot([0], [0], '*', ms=225./(nsy+4))
    ax2.set_xticks(ax2.get_xticks()[1:-1])
    #ax.set_yticks(ax.get_yticks()[1:])
    #ax2.set_yticks(ax2.get_yticks()[:-1])
    

    
    #########Plotting the selected traces:
    nsp = len(PLotSt) * len(PlotSubf)
    plt.figure(figsize=(13,11))
    plt.title("Synthetics for rake = " + str(round(rake)))
    mindis = []
    maxdis = []
    for i, trid in enumerate(PLotSt):   
        x = np.arange(0,Ntraces[trid][1]-Ntraces[trid][0],
                      dt)
        for j, subf in enumerate(PlotSubf):
            y = A[subf, Ntraces[trid][0]:Ntraces[trid][1]]
            if j == 0:
                yy = y
            else:
                yy = np.vstack((yy,y))        
        maxdis.append(np.max(yy))
        mindis.append(np.min(yy))
        
    

    for i, trid in enumerate(PLotSt):   
        x = np.arange(0,Ntraces[trid][1]-Ntraces[trid][0],
                      dt)

        for j, subf in enumerate(PlotSubf):
            y = A[subf, Ntraces[trid][0]:Ntraces[trid][1]]
            plt.subplot2grid((len(PlotSubf), len(PLotSt)),
                              (j, i))                                
            plt.plot(x,y, linewidth=2.5)
            if j == 0:
                plt.title(trid)
            fig = plt.gca()            
            fig.axes.get_yaxis().set_ticks([])
            fig.set_ylabel(str(subf),rotation=0)
            fig.set_xlim((x[0],x[-1]))
            fig.set_ylim((mindis[i],maxdis[i]))
            if subf != PlotSubf[-1]:
                fig.axes.get_xaxis().set_ticks([])

    
    plt.show()