Пример #1
0
def sum_adjoint_with_weighting(adj_stream, meta_info, weight_dict):
    new_stream = Stream()
    new_meta = {}
    done_comps = []
    # sum using components weight
    for comp, comp_weights in weight_dict.iteritems():
        for chan_id, chan_weight in comp_weights.iteritems():
            if comp not in done_comps:
                done_comps.append(comp)
                adj_tr = adj_stream.select(id=chan_id)[0]
                comp_tr = adj_tr.copy()
                comp_tr.data *= chan_weight
                comp_tr.stats.location = ""
                comp_tr.stats.channel = comp
                new_stream.append(comp_tr)
                new_meta[comp_tr.id] = meta_info[adj_tr.id].copy()
                new_meta[comp_tr.id]["misfit"] = \
                    chan_weight * meta_info[adj_tr.id]["misfit"]
            else:
                adj_tr = adj_stream.select(id=chan_id)[0]
                comp_tr = new_stream.select(channel="*%s" % comp)[0]
                comp_tr.data += chan_weight * adj_tr.data
                new_meta[comp_tr.id]["misfit"] += \
                    chan_weight * meta_info[adj_tr.id]["misfit"]
    return new_stream, new_meta
Пример #2
0
def sum_adj_on_component(adj_stream, weight_dict):
    """
    Sum adjoint source on different channels but same component
    together, like "II.AAK.00.BHZ" and "II.AAK.10.BHZ" to form
    "II.AAK.BHZ"

    :param adj_stream: adjoint source stream
    :param weight_dict: weight dictionary, should be something like
        {"Z":{"II.AAK.00.BHZ": 0.5, "II.AAK.10.BHZ": 0.5},
         "R":{"II.AAK.00.BHR": 0.3, "II.AAK.10.BHR": 0.7},
         "T":{"II.AAK..BHT": 1.0}}
    :return: summed adjoint source stream
    """
    new_stream = Stream()
    done_comps = []
    for comp, comp_weights in weight_dict.iteritems():
        for chan_id, chan_weight in comp_weights.iteritems():
            if comp not in done_comps:
                done_comps.append(comp)
                comp_tr = adj_stream.select(id=chan_id)[0]
                comp_tr.data *= chan_weight
                comp_tr.stats.location = ""
            else:
                comp_tr.data += \
                    chan_weight * adj_stream.select(id=chan_id)[0].data
        new_stream.append(comp_tr)
    return new_stream
Пример #3
0
def split2stations(stream, min_len=None, merge_traces=None, keep_masked=False):
    """
    Splits a stream in a list of streams, sorted by the stations inside
    stream object. Merges traces with the same ID to one trace.

    :param stream:
    :type  stream:
    :param merge_traces: defines if traces should be merged,
                         or just the longest continious record is kept.
    :type  merge_traces: bool or none
    """
    stream.sort(['station'])

    stream_list = []
    st_tmp = Stream()

    statname = stream[0].stats.station
    for trace in stream:
        # Collect traces from same station
        if trace.stats.station == statname:
            st_tmp.append(trace)

        else:

            if merge_traces is True:
                try:
                    st_tmp.merge()
                except:
                    st_tmp = keep_longest(st_tmp)
            elif merge_traces is False:
                st_tmp = keep_longest(st_tmp)

            stream_list.append(st_tmp)
            statname = trace.stats.station
            st_tmp = Stream()
            st_tmp.append(trace)

    if merge_traces is True:
        try:
            st_tmp.merge()
        except:
            st_tmp = keep_longest(st_tmp)
    elif merge_traces is False:
        st_tmp = keep_longest(st_tmp)

    stream_list.append(st_tmp)

    if not keep_masked or min_len:
        for station in stream_list:
            station.sort(['channel'])
            for trace in station:
                if type(trace.data) == np.ma.core.MaskedArray:
                    stream_list.remove(station)
                    break

                elif trace.stats.npts < min_len:
                    stream_list.remove(station)
                    break

    return (stream_list)
 def timeseries(self, instrcode="NV.ENWF..MH?"):
     network = instrcode.split('.')[0]
     station = instrcode.split('.')[1]
     location = instrcode.split('.')[2]
     channels = instrcode.split('.')[3]
     self.inv = self._inv.select(station=station, location=location, channel=channels)
     chans = []
     for c in ['E', 'N', 'Z']:
         chans.append(channels[:2] + c)
     # An anti-alias (low-pass) filter is applied when decimation is called.
     print("---\nDownloading Seismic Data.")
     st_obs = Stream()
     for channel in chans:
         st = client.timeseries(network=network, 
                                station=station, 
                                location=location, 
                                channel=channel, 
                                starttime=str(st_bpr[0].stats.starttime), 
                                endtime=str(st_bpr[0].stats.endtime + 1.0),
                                filter=["decimate=1.0"])
         for tr in st:
             st_obs.append(tr)
             
     print("---")
     return st_obs
Пример #5
0
def flex_cut_stream(st, cut_start, cut_end, dynamic_npts=0):
    """
    Flexible cut stream. But checks for the time.

    :param st: input stream
    :param cut_start: cut starttime
    :param cut_end: cut endtime
    :param dynamic_npts: the dynamic number of points before cut_start
        and after
        cut_end
    :return: the cutted stream
    """
    if not isinstance(st, Stream):
        raise TypeError("flex_cut_stream method only accepts obspy.Stream "
                        "the first Argument")
    new_st = Stream()
    count = 0
    for tr in st:
        flex_cut_trace(tr, cut_start, cut_end, dynamic_npts=dynamic_npts)
        # throw out small piece of data at this step
        if tr.stats.starttime <= cut_start and tr.stats.endtime >= cut_end:
            new_st.append(tr)
            count += 1
    if count == 0:
        raise ValueError("None of traces in Stream satisfy the "
                         "cut time length")
    return new_st
Пример #6
0
def order_stream(stream, components=['X', 'Y', 'Z', 'F']):
    '''
    Order all traces in the stream by the orientation
    '''
    from obspy import Stream

    if len(stream) == 0:
        raise ValueError(
            "We cannot reorder the components of an empty stream object")

    nstream = Stream()
    for component in components:
        tstream = stream.select(component=component)
        if not tstream:
            # Copy the header of another trace and change the component
            # The following assumes the channel is always three characters
            stats = stream[0].stats.copy()
            stats.channel = stats.channel[:-1] + component
            stats.npts = 0
            nstream.append(Trace(np.array([]), header=stats))
        elif len(tstream) != 1:
            raise ValueError(
                "The obspy Stream can not have mutliple identical components.  Recommend merging by component."
            )
        else:
            nstream += tstream
    return nstream
Пример #7
0
    def test_bp_filterbank(self):
        from obspy.signal.freqattributes import cfrequency

        st = Stream()
        fst = Stream()
        data_trace = self.data_trace.copy()
        dt = data_trace.stats.delta
        data_trace.data = np.zeros(400)
        data_trace.data[200] = 1 * 2 * np.pi
        freqmin = 1.0
        freqmax = 4.0
        freq_step = (freqmax - freqmin) / 2.0
        n_bank = 5
        for i in xrange(n_bank):
            dtrace = data_trace.copy()
            fr_min = freqmin + i * freq_step
            fr_max = freqmax + i * freq_step
            dtrace.filter('bandpass',
                          freqmin=fr_min,
                          freqmax=fr_max,
                          zerophase=True)
            st.append(dtrace)
            ftrace = self.data_trace.copy()
            ftrace.data = np.real(np.convolve(ftrace.data, dtrace.data,
                                              'same'))
            cf = cfrequency(ftrace.data, 1 / dt, 0, 0)
            self.assertTrue(cf > fr_min and cf < fr_max)
 def test_allDataTypesAndEndiansInSingleFile(self):
     """
     Tests all data and endian types into a single file.
     """
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st1 = Stream()
         data = np.random.randint(-1000, 1000, 500)
         for dtype in ["i2", "i4", "f4", "f8", "S1"]:
             for enc in ["<", ">", "="]:
                 st1.append(Trace(data=data.astype(np.dtype(enc + dtype))))
         # this will raise a UserWarning - ignoring for test
         with warnings.catch_warnings(record=True):
             warnings.simplefilter('ignore', UserWarning)
             st1.write(tempfile, format="MSEED")
             # read everything back (int16 gets converted into int32)
             st2 = read(tempfile)
             for dtype in ["i4", "i4", "f4", "f8", "S1"]:
                 for enc in ["<", ">", "="]:
                     tr = st2.pop(0).data
                     self.assertEqual(tr.dtype.kind +
                                      str(tr.dtype.itemsize),
                                      dtype)
                     # byte order is always native (=)
                     np.testing.assert_array_equal(tr,
                                                   data.astype("=" + dtype))
Пример #9
0
def flex_cut_stream(st, cut_start, cut_end, dynamic_npts=0):
    """
    Flexible cut stream. But checks for the time.

    :param st: input stream
    :param cut_start: cut starttime
    :param cut_end: cut endtime
    :param dynamic_npts: the dynamic number of points before cut_start
        and after
        cut_end
    :return: the cutted stream
    """
    if not isinstance(st, Stream):
        raise TypeError("flex_cut_stream method only accepts obspy.Stream "
                        "the first Argument")
    new_st = Stream()
    count = 0
    for tr in st:
        flex_cut_trace(tr, cut_start, cut_end, dynamic_npts=dynamic_npts)
        # throw out small piece of data at this step
        if tr.stats.starttime <= cut_start and tr.stats.endtime >= cut_end:
            new_st.append(tr)
            count += 1
    if count == 0:
        raise ValueError("None of traces in Stream satisfy the "
                         "cut time length")
    return new_st
Пример #10
0
def empty_cf(config, st):
    # creates an empty CF stream between starttime and start_t
    st_CF = Stream()
    for station in config.stations:
        for wave_type in config.wave_type:
            tr_CF = st.select(station=station).copy()
            tr_CF = tr_CF.trim(config.starttime,
                               config.starttime + config.start_t)
            tr_CF = tr_CF[0]
            tr_CF.data = np.zeros(tr_CF.data.shape)
            tr_CF.stats.channel = wave_type
            st_CF.append(tr_CF)

    #-----resampling CF if wanted-------------------------------------------
    fs_data = st[0].stats.sampling_rate
    if config.sampl_rate_cf:
        if config.sampl_rate_cf < fs_data:
            # we don't need to resample if there are less than 2 points
            if len(st_CF[0]) < 2:
                for tr_CF in st_CF:
                    tr_CF.stats.sampling_rate = config.sampl_rate_cf
            else:
                st_CF.resample(config.sampl_rate_cf)
    else:
        config.sampl_rate_cf = fs_data

    return st_CF
Пример #11
0
def sum_adjoint_no_weighting(adj_stream, meta_info):
    """
    Add same components in adjoint source together without
    extra weight, i.e., equal weight.

    :param adj_stream:
    :param meta_info:
    :return:
    """
    new_stream = Stream()
    new_meta = {}
    done_comps = []
    for tr in adj_stream:
        comp = tr.stats.channel[-1]
        # print(comp, done_comps)
        if comp not in done_comps:
            done_comps.append(comp)
            comp_tr = tr.copy()
            comp_tr.stats.location = ""
            comp_tr.stats.channel = "MX" + comp
            new_stream.append(comp_tr)
            new_meta[comp_tr.id] = deepcopy(meta_info[tr.id])
        else:
            comp_tr = new_stream.select(component=comp)[0]
            comp_tr.data += tr.data
            new_meta[comp_tr.id]["misfit"] += meta_info[tr.id]["misfit"]

    return new_stream, new_meta
Пример #12
0
def sum_adjoint_with_weighting(adj_stream, meta_info, weight_dict):
    new_stream = Stream()
    new_meta = {}
    done_comps = []
    # sum using components weight
    for comp, comp_weights in weight_dict.items():
        for chan_id, chan_weight in comp_weights.items():
            if comp not in done_comps:
                done_comps.append(comp)
                adj_tr = adj_stream.select(id=chan_id)[0]
                comp_tr = adj_tr.copy()
                comp_tr.data *= chan_weight
                comp_tr.stats.location = ""
                comp_tr.stats.channel = comp
                new_stream.append(comp_tr)
                new_meta[comp_tr.id] = meta_info[adj_tr.id].copy()
                new_meta[comp_tr.id]["misfit"] = \
                    chan_weight * meta_info[adj_tr.id]["misfit"]
            else:
                adj_tr = adj_stream.select(id=chan_id)[0]
                comp_tr = new_stream.select(channel="*%s" % comp)[0]
                comp_tr.data += chan_weight * adj_tr.data
                new_meta[comp_tr.id]["misfit"] += \
                    chan_weight * meta_info[adj_tr.id]["misfit"]
    return new_stream, new_meta
Пример #13
0
def _simple_proc(st, sampling_rate=10, njobs=1):
    """
    A parallel version of `_proc`, i.e., Basic processing including downsampling, detrend, and demean.

    :param st: an obspy stream
    :param sampling_rate: expected sampling rate
    :param njobs: number of jobs or CPU to use
    :return st: stream after processing
    """

    # downsampling, detrend, demean
    do_work = partial(_proc, sampling_rate=sampling_rate)

    # trace_list = []
    # for tr in st:
    #     trace_list.append(tr)
    #
    st2 = Stream()
    logging.info("simple processing for full event correlogram.")
    print("simple processing for full event correlogram.")
    if njobs == 1:
        logging.info('do work sequential (%d cores)', njobs)
        for tr in tqdm(st, total=len(st)):
            tr2 = do_work(tr)
            st2.append(tr2)
    else:
        logging.info('do work parallel (%d cores)', njobs)
        pool = multiprocessing.Pool(njobs)
        for tr2 in tqdm(pool.imap_unordered(do_work, st), total=len(st)):
            st2.append(tr2)
        pool.close()
        pool.join()

    return st2
Пример #14
0
def sum_adjoint_no_weighting(adj_stream, meta_info):
    """
    Add same components in adjoint source together without
    extra weight, i.e., equal weight.

    :param adj_stream:
    :param meta_info:
    :return:
    """
    new_stream = Stream()
    new_meta = {}
    done_comps = []
    for tr in adj_stream:
        comp = tr.stats.channel[-1]
        # print(comp, done_comps)
        if comp not in done_comps:
            done_comps.append(comp)
            comp_tr = tr.copy()
            comp_tr.stats.location = ""
            comp_tr.stats.channel = "MX" + comp
            new_stream.append(comp_tr)
            new_meta[comp_tr.id] = deepcopy(meta_info[tr.id])
        else:
            comp_tr = new_stream.select(component=comp)[0]
            comp_tr.data += tr.data
            new_meta[comp_tr.id]["misfit"] += meta_info[tr.id]["misfit"]

    return new_stream, new_meta
Пример #15
0
def summary_cf(config, st, rec_memory=None):

    # Compute decay constants
    if config.rosenberger_decay_const is not None:
        rosenberger_decay_const = config.rosenberger_decay_const
    else:
        rosenberger_decay_const = config.decay_const
    #
    if config.sigma_gauss:
        sigma_gauss = int(config.sigma_gauss / config.delta)
    else:
        sigma_gauss = int(config.decay_const / config.delta / 2)
    #
    st_CF = Stream()
    for station in config.stations:
        for wave_type in config.wave_type:
            st_select = st.select(station=station)
            tr_CF = st.select(station=station)[0].copy()
            tr_CF.stats.channel = wave_type
            st_CF.append(tr_CF)
            hos_sigma = config['hos_sigma_' + wave_type]
            MBfilter_CF_kwargs = {
                'st': st_select,
                'frequencies': config.frequencies,
                'CN_HP': config.CN_HP,
                'CN_LP': config.CN_LP,
                'filter_norm': config.filter_norm,
                'filter_npoles': config.filter_npoles,
                'var_w': config.win_type,
                'CF_type': config.ch_function,
                'CF_decay_win': config.decay_const,
                'hos_order': config.hos_order,
                'rosenberger_decay_win': rosenberger_decay_const,
                'rosenberger_filter_power': config.rosenberger_filter_power,
                'rosenberger_filter_threshold':
                config.rosenberger_filter_threshold,
                'rosenberger_normalize_each':
                config.rosenberger_normalize_each,
                'wave_type': wave_type,
                'hos_sigma': hos_sigma[station],
                'rec_memory': rec_memory
            }
            HP2, CF, Tn2, Nb2 = MBfilter_CF(**MBfilter_CF_kwargs)

            if config.ch_function == 'envelope':
                tr_CF.data = np.sqrt(np.power(CF, 2).mean(axis=0))
            if config.ch_function == 'kurtosis':
                kurt_argmax = np.amax(CF, axis=0)
                tr_CF.data = GaussConv(kurt_argmax, sigma_gauss)

    #-----resampling CF if wanted-------------------------------------------
    fs_data = st[0].stats.sampling_rate
    if config.sampl_rate_cf:
        if config.sampl_rate_cf < fs_data:
            st_CF.resample(config.sampl_rate_cf)
    else:
        config.sampl_rate_cf = fs_data

    return st_CF
Пример #16
0
class TemporarySDSDirectory(object):
    """
    Handles creation and deletion of a temporary SDS directory structure.
    To be used with "with" statement.
    """
    sampling_rate = 0.1
    networks = ("AB", "CD")
    stations = ("XYZ", "ZZZ3")
    locations = ("", "00")
    channels = ("HHZ", "HHN", "HHE", "BHZ", "BHN", "BHE")

    def __init__(self, year, doy, time=None):
        """
        Set which day's midnight (00:00 hours) is used as a day break in the
        testing (to split the test data into two files).

        If `time` is specified it overrides `year` and `doy`.
        """
        if time:
            self.time = time
        else:
            self.time = UTCDateTime("%d-%03dT00:00:00" % (year, doy))
        delta = 1.0 / self.sampling_rate

        self.stream = Stream()
        for net in self.networks:
            for sta in self.stations:
                for loc in self.locations:
                    for cha in self.channels:
                        tr = Trace(
                            data=np.arange(100, dtype=np.int32),
                            header=dict(
                                network=net, station=sta, location=loc,
                                channel=cha, sampling_rate=self.sampling_rate,
                                starttime=self.time - 30 * delta))

                        # cut into two seamless traces
                        tr1 = tr.slice(endtime=self.time + 5 * delta)
                        tr2 = tr.slice(starttime=self.time + 6 * delta)
                        self.stream.append(tr1)
                        self.stream.append(tr2)

    def __enter__(self):
        self.old_dir = os.getcwd()
        self.tempdir = tempfile.mkdtemp(prefix='obspy-sdstest-')
        for tr_ in self.stream:
            t_ = tr_.stats.starttime
            full_path = SDS_FMTSTR.format(year=t_.year, doy=t_.julday,
                                          sds_type="D", **tr_.stats)
            full_path = os.path.join(self.tempdir, full_path)
            dirname, filename = os.path.split(full_path)
            if not os.path.isdir(dirname):
                os.makedirs(dirname)
            tr_.write(full_path, format="MSEED")
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):  # @UnusedVariable
        os.chdir(self.old_dir)
        shutil.rmtree(self.tempdir)
Пример #17
0
def list2stream(list):

    stream = Stream()
    for station in list:
        for trace in station:
            stream.append(trace)

    return stream
Пример #18
0
def get_stream_days(station, channel, first_day, num_days, fs, gain=1e18, wf_dir=None):
    Logger.info("Getting data stream for station %s, channel %s..." % (station, channel))

    if not wf_dir:
        wf_dir = DEFAULT_WF_DIR
    Logger.info("Getting data stream from directory %s..." % (wf_dir))
    
    days = [first_day + n*24*3600 for n in range(num_days)]
    
    st = Stream()
    for day in days:
        #daystr = day.strftime("%Y%m%d")
        #path_search = os.path.join(WF_DIR, daystr, "*.%s*%s*" % (station, channel))
        daystr = day.strftime("%j")
        year = day.strftime("%Y")
        path_search = os.path.join(wf_dir, year, daystr, "*.%s..%s*" % (station, channel))

        Logger.info("Looking for data for day %s" % daystr)
        file_list = glob(path_search)
        
        if len(file_list) > 0:
            for file in file_list:
                Logger.info("Reading file %s" % file)
                tmp = read(file)
                tmp.merge(method=1)
                if len(tmp) > 1:
                    raise ValueError("More than one trace read from file, that's weird...")
                if tmp[0].stats.sampling_rate != fs:
                    tmp.resample(fs)
                st.append(tmp[0])
        else:
            Logger.info("No data found for day %s" % day)
            Logger.info("\t\tSearch string was: %s" % path_search)
    
    # Merge
    Logger.info("Merging stream")
    st.merge(method=1, fill_value=0)
    Logger.info(st)
    
    # Fill gaps with noise
    st = fill_time_gaps_noise(st)

    # Convert to nm/s
    trace = st[0]
    trace.data *= gain

    # High-pass at 2 Hz
    trace.filter("highpass", freq=2)
    
    Logger.info("Final trace: ")
    Logger.info(trace)
    
    Logger.info("\tFinal Stream:")
    Logger.info("\tSampling rate: %f" % fs)
    Logger.info("\tStart time: %s" % trace.stats.starttime.strftime("%Y-%m-%d %H:%M:%S"))
    Logger.info("\tEnd time: %s" % trace.stats.endtime.strftime("%Y-%m-%d %H:%M:%S"))

    return trace
 def test_SavingSmallASCII(self):
     """
     Tests writing small ASCII strings.
     """
     st = Stream()
     st.append(Trace(data=np.fromstring("A" * 8, "|S1")))
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st.write(tempfile, format="MSEED")
Пример #20
0
    def write(self, filename):
        """
            writes to segy file

            :param filename: file name
            :note: the function uses the `obspy` module.
            """

        if not OBSPY_AVAILABLE:
            raise RuntimeError("This feature (SimpleSEGYWriter.write())"+\
                    " depends on obspy, which is not installed, see "+\
                    "https://github.com/obspy/obspy for install guide")

        if getMPISizeWorld() > 1:
            raise RuntimeError("Writing segy files with multiple ranks is"+\
                    " not yet supported.")

        stream = Stream()

        for i in range(len(self.__receiver_group)):
            trace = Trace(data=np.array(self.__trace[i], dtype='float32'))
            # Attributes in trace.stats will overwrite everything in
            # trace.stats.segy.trace_header (in Hz)
            trace.stats.sampling_rate = 1. / self.getSamplingInterval()
            #trace.stats.starttime = UTCDateTime(2011,11,11,0,0,0)
            if not hasattr(trace.stats, 'segy.trace_header'):
                trace.stats.segy = {}
            trace.stats.segy.trace_header = SEGYTraceHeader()
            trace.stats.segy.trace_header.trace_identification_code = 1
            trace.stats.segy.trace_header.trace_sequence_number_within_line = i + 1
            trace.stats.segy.trace_header.scalar_to_be_applied_to_all_coordinates = -int(
                self.COORDINATE_SCALE)
            trace.stats.segy.trace_header.coordinate_units = 1
            trace.stats.segy.trace_header.source_coordinate_x = int(
                self.__source[0] * self.COORDINATE_SCALE)
            trace.stats.segy.trace_header.source_coordinate_y = int(
                self.__source[1] * self.COORDINATE_SCALE)
            trace.stats.segy.trace_header.group_coordinate_x = int(
                self.__receiver_group[i][0] * self.COORDINATE_SCALE)
            trace.stats.segy.trace_header.group_coordinate_y = int(
                self.__receiver_group[i][1] * self.COORDINATE_SCALE)

            # Add trace to stream
            stream.append(trace)
        # A SEGY file has file wide headers. This can be attached to the stream
        # object.  If these are not set, they will be autocreated with default
        # values.
        stream.stats = AttribDict()
        stream.stats.textual_file_header = 'C.. ' + self.__text + '\nC.. with esys.escript.downunder r%s\nC.. %s' % (
            getVersion(), time.asctime())
        stream.stats.binary_file_header = SEGYBinaryFileHeader()

        if getMPIRankWorld() < 1:
            stream.write(filename,
                         format="SEGY",
                         data_encoding=1,
                         byteorder=sys.byteorder)
Пример #21
0
def process_wf_eve(eve,inv,st,proc_params):
    st_proc = Stream()
    for i,net in enumerate(inv):
        for sta in inv[i].stations:
            st_sta = st.copy().select(network=net.code, station=sta.code)
            st_sta.merge(method=0,fill_value='latest')
            for tr in st_sta:
                tr_proc = process_wf_tr(tr,eve,sta,proc_params)
                st_proc.append(tr_proc)
    return st_proc
Пример #22
0
def savesegy(DASdata, filename):
    stream = Stream()
    for i in range(DASdata.data.shape[0]):
        data = DASdata.data[i, :]
        DASdt = np.median(np.diff(DASdata.taxis))
        if DASdt < 0.001:
            dt = 0.001
            data = gjsignal.lpfilter(data, DASdt, 500)
            oldtaxis = np.arange(len(data)) * DASdt
            newtaxis = np.arange(0, oldtaxis[-1], dt)
            data = np.interp(newtaxis, oldtaxis, data)
        else:
            dt = DASdt

        data = np.require(data, dtype=np.float32)
        trace = Trace(data=data)

        # Attributes in trace.stats will overwrite everything in
        # trace.stats.segy.trace_header
        trace.stats.delta = dt
        # SEGY does not support microsecond precision! Any microseconds will
        # be discarded.
        trace.stats.starttime = UTCDateTime(DASdata.start_time)

        # If you want to set some additional attributes in the trace header,
        # add one and only set the attributes you want to be set. Otherwise the
        # header will be created for you with default values.
        if not hasattr(trace.stats, 'segy.trace_header'):
            trace.stats.segy = {}
        trace.stats.segy.trace_header = SEGYTraceHeader()
        trace.stats.segy.trace_header.trace_sequence_number_within_line = DASdata.chans[
            i]
        trace.stats.segy.trace_header.x_coordinate_of_ensemble_position_of_this_trace = int(
            DASdata.mds[i] * 1000)  # in millimeter
        trace.stats.segy.trace_header.lag_time_A = DASdata.start_time.microsecond

        # Add trace to stream
        stream.append(trace)

    # A SEGY file has file wide headers. This can be attached to the stream
    # object.  If these are not set, they will be autocreated with default
    # values.
    stream.stats = AttribDict()
    stream.stats.textual_file_header = 'Textual Header!'
    stream.stats.binary_file_header = SEGYBinaryFileHeader()
    stream.stats.binary_file_header.trace_sorting_code = 5

    print(stream)
    stream.write(filename,
                 format='SEGY',
                 data_encoding=1,
                 byteorder=sys.byteorder)
Пример #23
0
def convert_adjs_to_stream(adjsrcs):
    """
    Convert adjoint sources into stream. So all obspy
    tools will be available for processing. Return values
    for adjoint stream and other information
    """
    meta_info = {}
    adj_stream = Stream()
    for adj in adjsrcs:
        _tr, _meta = _convert_adj_to_trace(adj)
        adj_stream.append(_tr)
        meta_info[_tr.id] = _meta
    return adj_stream, meta_info
Пример #24
0
def convert_adjs_to_stream(adjsrcs):
    """
    Convert adjoint sources into stream. So all obspy
    tools will be available for processing. Return values
    for adjoint stream and other information
    """
    meta_info = {}
    adj_stream = Stream()
    for adj in adjsrcs:
        _tr, _meta = convert_adj_to_trace(adj)
        adj_stream.append(_tr)
        meta_info[_tr.id] = _meta
    return adj_stream, meta_info
Пример #25
0
def _stack(path, **kwargs):

    files = glob.glob(path + "/*")
    # stations = glob.glob(path + "/*")
    # print(stations)

    # for sta in stations:
    #     files = glob.glob(sta + "/*")

    st = Stream()
    for file in files:
        try:
            tr = read(file)[0]
        except:
            continue
        st.append(tr)

    # get channels
    channels = []
    for tr in st:
        chn = tr.stats.channel
        if chn not in channels:
            channels.append(chn)

    method = kwargs["stack"]["method"]
    power = kwargs["stack"]["power"]
    outpath = kwargs["io"]["outpath"] + "/1a_stack"
    try:
        os.makedirs(outpath)
    except:
        pass

    stream = st.copy()
    for chn in channels:

        st = stream.select(channel=chn)

        if method == "linear":
            tr = linear_stack(st, normalize=True)
        elif method == "PWS":
            tr = pws_stack(st, power, normalize=True)
        elif method == "bootstrap_linear" or method == "bootstrap_PWS":
            # note tr here is a stream containing two traces, mean and std
            tr = _bootstrap(st, normalize=True, **kwargs)

        filen = outpath + "/" + tr.id + "_%s.pkl" % method
        tr.write(filename=filen, format="PICKLE")

    return 0
Пример #26
0
def sum_adj_on_component(adj_stream, weight_flag, weight_dict=None):
    """
    Sum adjoint source on different channels but same component
    together, like "II.AAK.00.BHZ" and "II.AAK.10.BHZ" to form
    "II.AAK.BHZ"

    :param adj_stream: adjoint source stream
    :param weight_dict: weight dictionary, should be something like
        {"Z":{"II.AAK.00.BHZ": 0.5, "II.AAK.10.BHZ": 0.5},
         "R":{"II.AAK.00.BHR": 0.3, "II.AAK.10.BHR": 0.7},
         "T":{"II.AAK..BHT": 1.0}}
    :return: summed adjoint source stream
    """
    if weight_dict is None:
        raise ValueError("weight_dict should be assigned if you want"
                         "to add")

    new_stream = Stream()
    done_comps = []

    if not weight_flag:
        # just add same components without weight
        for tr in adj_stream:
            comp = tr.stats.channel[-1]
            if comp not in done_comps:
                comp_tr = tr
                comp_tr.stats.location = ""
                new_stream.append(comp_tr)
            else:
                comp_tr = new_stream.select("*%s" % comp)
                comp_tr.data += tr.data
    else:
        # sum using components weight
        for comp, comp_weights in weight_dict.iteritems():
            for chan_id, chan_weight in comp_weights.iteritems():
                if comp not in done_comps:
                    done_comps.append(comp)
                    comp_tr = adj_stream.select(id=chan_id)[0]
                    comp_tr.data *= chan_weight
                    comp_tr.stats.location = ""
                    comp_tr.stats.channel = comp
                    new_stream.append(comp_tr)
                else:
                    comp_tr = new_stream.select(channel="*%s" % comp)[0]
                    comp_tr.data += \
                        chan_weight * adj_stream.select(id=chan_id)[0].data

    return new_stream
Пример #27
0
        def write(self, filename):
            """
            writes to segy file

            :param filename: file name
            :note: the function uses the `obspy` module.
            """

            if not OBSPY_AVAILABLE:
                raise RuntimeError("This feature (SimpleSEGYWriter.write())"+\
                        " depends on obspy, which is not installed, see "+\
                        "https://github.com/obspy/obspy for install guide")

            if getMPISizeWorld() > 1:
                raise RuntimeError("Writing segy files with multiple ranks is"+\
                        " not yet supported.")

            stream=Stream()

            for i in range(len(self.__receiver_group)):
                    trace = Trace(data=np.array(self.__trace[i], dtype='float32'))
                    # Attributes in trace.stats will overwrite everything in
                    # trace.stats.segy.trace_header (in Hz)
                    trace.stats.sampling_rate = 1./self.getSamplingInterval()
                    #trace.stats.starttime = UTCDateTime(2011,11,11,0,0,0)
                    if not hasattr(trace.stats, 'segy.trace_header'):
                        trace.stats.segy = {}
                    trace.stats.segy.trace_header = SEGYTraceHeader()
                    trace.stats.segy.trace_header.trace_identification_code=1
                    trace.stats.segy.trace_header.trace_sequence_number_within_line = i + 1
                    trace.stats.segy.trace_header.scalar_to_be_applied_to_all_coordinates = -int(self.COORDINATE_SCALE)
                    trace.stats.segy.trace_header.coordinate_units=1
                    trace.stats.segy.trace_header.source_coordinate_x=int(self.__source[0] * self.COORDINATE_SCALE)
                    trace.stats.segy.trace_header.source_coordinate_y=int(self.__source[1] * self.COORDINATE_SCALE)
                    trace.stats.segy.trace_header.group_coordinate_x=int(self.__receiver_group[i][0] * self.COORDINATE_SCALE)
                    trace.stats.segy.trace_header.group_coordinate_y=int(self.__receiver_group[i][1] * self.COORDINATE_SCALE)

                    # Add trace to stream
                    stream.append(trace)
            # A SEGY file has file wide headers. This can be attached to the stream
            # object.  If these are not set, they will be autocreated with default
            # values.
            stream.stats = AttribDict()
            stream.stats.textual_file_header = 'C.. '+self.__text+'\nC.. with esys.escript.downunder r%s\nC.. %s'%(getVersion(),time.asctime())
            stream.stats.binary_file_header = SEGYBinaryFileHeader()

            if getMPIRankWorld()<1:
                stream.write(filename, format="SEGY", data_encoding=1,byteorder=sys.byteorder)
Пример #28
0
def ReadGcfFile(gcf_file):
    #read blocks of 1024 into different traces then join them
    gcf_block=GCFBlock()
    st=Stream()
    max_size=1024
    with open(gcf_file,'rb') as f:
        while True:
            buf=f.read(_gcf_block_size)
            if not buf: break
            gcf_block.ParseBlockMemory(buf)
            st.append(MakeGcfTrace(gcf_block))
            if(gcf_block.header['is_status'])==True:
                print "ststus"
            
    st.merge()
    return st
Пример #29
0
def read_segd(filename):
    fp = open(filename, 'rb')
    generalh = _read_ghb1(fp)
    generalh.update(_read_ghb2(fp))
    generalh.update(_read_ghb3(fp))
    sch = {}
    for n in range(generalh['n_channel_sets_per_record']):
        try:
            _sch = _read_sch(fp)
        except SEGDScanTypeError:
            continue
        sch[_sch['channel_set_number']] = _sch
    size = generalh['extended_header_length']*32
    extdh = _read_extdh(fp, size)
    ext_hdr_lng = generalh['external_header_length']
    if ext_hdr_lng == 0xFF:
        ext_hdr_lng = generalh['external_header_blocks']
    size = ext_hdr_lng*32
    extrh = _read_extrh(fp, size)
    sample_rate = extdh['sample_rate_in_us']/1e6
    npts = extdh['number_of_samples_in_trace']
    size = npts
    st = Stream()
    convert_to_int = True
    for n in range(extdh['total_number_of_traces']):
        traceh, data = _read_trace_data_block(fp, size)
        # check if all traces can be converted to int
        convert_to_int = convert_to_int and np.all(np.mod(data, 1) == 0)
        # _print_dict(traceh, '***TRACEH:')
        tr = Trace(data)
        tr.stats.station = str(traceh['unit_serial_number'])
        tr.stats.channel = _band_code(1./sample_rate)
        tr.stats.channel += _instrument_orientation_code[traceh['sensor_code']]
        tr.stats.delta = sample_rate
        tr.stats.starttime = generalh['time']
        tr.stats.segd = _build_segd_header(generalh, sch, extdh, extrh, traceh)
        st.append(tr)
    fp.close()
    # for n, _sch in sch.iteritems():
    #     _print_dict(_sch, '***SCH %d:' % n)
    # _print_dict(extdh, '***EXTDH:')
    # print('***EXTRH:\n %s' % extrh)
    # _print_dict(generalh, '***GENERALH:')
    if convert_to_int:
        for tr in st:
            tr.data = tr.data.astype(np.int32)
    return st
Пример #30
0
def sum_adj_on_component(adj_stream, weight_flag, weight_dict=None):
    """
    Sum adjoint source on different channels but same component
    together, like "II.AAK.00.BHZ" and "II.AAK.10.BHZ" to form
    "II.AAK.BHZ"

    :param adj_stream: adjoint source stream
    :param weight_dict: weight dictionary, should be something like
        {"Z":{"II.AAK.00.BHZ": 0.5, "II.AAK.10.BHZ": 0.5},
         "R":{"II.AAK.00.BHR": 0.3, "II.AAK.10.BHR": 0.7},
         "T":{"II.AAK..BHT": 1.0}}
    :return: summed adjoint source stream
    """
    if weight_dict is None:
        raise ValueError("weight_dict should be assigned if you want" "to add")

    new_stream = Stream()
    done_comps = []

    if not weight_flag:
        # just add same components without weight
        for tr in adj_stream:
            comp = tr.stats.channel[-1]
            if comp not in done_comps:
                comp_tr = tr
                comp_tr.stats.location = ""
                new_stream.append(comp_tr)
            else:
                comp_tr = new_stream.select("*%s" % comp)
                comp_tr.data += tr.data
    else:
        # sum using components weight
        for comp, comp_weights in weight_dict.iteritems():
            for chan_id, chan_weight in comp_weights.iteritems():
                if comp not in done_comps:
                    done_comps.append(comp)
                    comp_tr = adj_stream.select(id=chan_id)[0]
                    comp_tr.data *= chan_weight
                    comp_tr.stats.location = ""
                    comp_tr.stats.channel = comp
                    new_stream.append(comp_tr)
                else:
                    comp_tr = new_stream.select(channel="*%s" % comp)[0]
                    comp_tr.data += \
                        chan_weight * adj_stream.select(id=chan_id)[0].data

    return new_stream
Пример #31
0
def interpolate_stream(stream, sampling_rate, starttime=None, npts=None):
    """
    For a fairly large stream, use stream.interpolate() is not a wise
    choice since if there is one trace fails, then the whole interpolation
    will stop. So it is better to operate interpolation on the trace
    level
    """
    st_new = Stream()
    if not isinstance(stream, Stream):
        raise TypeError("Input stream must be type of obspy.Stream")
    for tr in stream:
        try:
            tr.interpolate(sampling_rate, starttime=starttime, npts=npts)
            st_new.append(tr)
        except ValueError as err:
            print("Error in interpolation on '%s':%s" % (tr.id, err))
    return st_new
Пример #32
0
def interpolate_stream(stream, sampling_rate, starttime=None, npts=None):
    """
    For a fairly large stream, use stream.interpolate() is not a wise
    choice since if there is one trace fails, then the whole interpolation
    will stop. So it is better to operate interpolation on the trace
    level
    """
    st_new = Stream()
    if not isinstance(stream, Stream):
        raise TypeError("Input stream must be type of obspy.Stream")
    for tr in stream:
        try:
            tr.interpolate(sampling_rate, starttime=starttime, npts=npts)
            st_new.append(tr)
        except ValueError as err:
            print("Error in interpolation on '%s':%s" % (tr.id, err))
    return st_new
Пример #33
0
def get_waveforms(st, event, waveform_type, units):
    """
    Get real or simulated waveforms for a Stream.

    Parameters
    ----------
    st : `obspy.Stream` object
        Stream for which to get real or simulated waveforms.
    event : :class:`~quakemigrate.io.event.Event` object
        Light class encapsulating waveforms, coalescence information, picks and
        location information for a given event.
    waveform_type : {"real", "wa"}
        Whether to get real or Wood-Anderson simulated waveforms.
    units : {"displacement", "velocity"}
        Units to return waveforms in.

    Returns
    -------
    st_out : `obspy.Stream` object
        Stream of real or Wood-Anderson simulated waveforms in the requested
        units.

    """

    # Work on a copy
    st = st.copy()
    st_out = Stream()

    velocity = True if units == "velocity" else False

    for tr in st:
        # Check there is data present
        if bool(tr) and tr.data.max() != tr.data.min():
            try:
                if waveform_type == "real":
                    tr = event.data.get_real_waveform(tr, velocity)
                else:
                    tr = event.data.get_wa_waveform(tr, velocity)
                st_out.append(tr)
            except (util.ResponseNotFoundError,
                    util.ResponseRemovalError) as e:
                logging.warning(e)

    return st_out
Пример #34
0
def _sliding_autocorrelation(tr, length=3600, overlap=1800,
                             filter=[0.5, 4], corners=2, zerophase=True):
    """
    Sliding autocorrelation for noise data.

    :param tr:
    :param length:
    :param overlap:
    :param filter:
    :param corners:
    :param zerophase:
    :return:
    """
    trace = tr.copy()
    time_series = iter_time(tr=trace, length=length, overlap=overlap)
    # print(time_series)
    if len(time_series) < 1:
        return 0

    st = Stream()
    for t1, t2 in time_series:
        tr2 = trace.copy()
        tr2.trim(starttime=t1, endtime=t2)

        # get gaps
        gap_st = Stream([tr2])
        gaps = gap_st.get_gaps()
        if len(gaps) > 0:
            continue

        npts = tr2.stats.npts
        data = tr2.data
        cc = correlate(in1=data, in2=data, mode="full")
        tr2.data = np.copy(cc)
        tr2.filter(type="bandpass", freqmin=filter[0], freqmax=filter[1],
                   corners=corners, zerophase=zerophase)
        tr2.data = tr2.data[npts:]
        st.append(tr2)

    if len(st) < 1:
        return 0
    else:
        return st
Пример #35
0
def get_ref_data(stream,
                 inv,
                 model='ak135f_1s',
                 eventid=None,
                 origins=None,
                 m_tensor=None,
                 source_dc=None):

    ref_stream = Stream()

    rlats = []
    rlons = []
    geom = geometrical_center(inv)
    d, az, baz = gps2dist_azimuth(origins.latitude, origins.longitude,
                                  geom.latitude, geom.longitude)
    for i, trace in enumerate(stream):
        dist = degrees2kilometers(trace.stats.distance) * 1000.

        rlat, rlon = dist_azimuth2gps(origins.latitude, origins.longitude, az,
                                      dist)
        if rlon > 180:
            rlon = 180. - rlon

        print(rlat, rlon)
        rlats.append(rlat)
        rlons.append(rlon)
        print('Receiving trace %i of %i.' % (i + 1, len(stream)))
        stream_tmp, cat_void = get_syngine_data(model,
                                                reclat=rlats,
                                                reclon=rlons,
                                                eventid=eventid,
                                                origins=origins,
                                                m_tensor=m_tensor,
                                                source_dc=source_dc)

        trace_tmp = stream_tmp[0].copy()
        trace_tmp.stats.station = trace.stats.station
        trace_tmp.stats.starttime = trace.stats.starttime
        trace_tmp.stats.distance = trace.stats.distance
        ref_stream.append(trace_tmp)

    return ref_stream
def eqt_continue_sac2mseed(input_dir, output_dir):
    if os.path.isdir(output_dir):
        print('============================================================================')
    print(f' *** {output_dir} already exists!')
    inp = input(" --> Type (Yes or y) to create a new empty directory! otherwise it will overwrite!   ")
    if inp.lower() == "yes" or inp.lower() == "y":
        shutil.rmtree(output_dir)
    os.makedirs(os.path.join(output_dir, 'mseed_xfj'))
    trn = 0
    csv_file = open(os.path.join(output_dir, "fname.csv"), 'w', newline='')
    output_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
    output_writer.writerow(['fname', 'E', 'N', 'Z'])
    for root, dirs, files in os.walk(input_dir, topdown=True):
        dirs.sort()
        if len(files) != 0:
            st = Stream()
            datastr = root.split('/')[-1].split('.')[0]
            utc_start = UTCDateTime(datastr) - 8 * 3600
            if utc_start < UTCDateTime('2021/05/21/') - 8 * 3600:
                continue
        for f in sorted(files):
            tr = read(os.path.join(root, f))
            st.append(tr[0])
        if len(files) != 0:
            nets = []
            stas = []
            st.merge()
            st.trim(utc_start, utc_start + 3600, pad=True, fill_value=0)
            for i in range(len(st)):
                net = st[i].stats.network
                sta = st[i].stats.station
                receiver_type = st[i].stats.channel[:-1]
                if net in nets and sta in stas:
                    continue
                else:
                    nets.append(net)
                    stas.append(sta)
                    fname = datastr + f'.{net}.{sta}.mseed'
                    now_st = st.select(network=net, station=sta)
                    now_st.write(os.path.join(output_dir, 'mseed_xfj', fname), format='MSEED')
                    output_writer.writerow([fname, receiver_type + 'E', receiver_type + 'N', receiver_type + 'Z'])
                    csv_file.flush()
Пример #37
0
def get_stream_1day(station, channel, starttime, endtime, fs, gain=1e18, wf_dir=None):
    Logger.info("Getting data stream for station %s, channel %s..." % (station, channel))

    if not wf_dir:
        wf_dir = DEFAULT_WF_DIR
    Logger.info("Getting data stream from directory %s..." % (wf_dir))
    
    #day = starttime.strftime("%Y%m%d")
    #path_search = os.path.join(WF_DIR, day, "*.%s*%s*" % (station, channel))
    day = starttime.strftime("%j")
    year = starttime.strftime("%Y")
    path_search = os.path.join(wf_dir, year, day, "*.%s..%s*" % (station, channel))
    
    file_list = glob(path_search)
    st = Stream()
    if len(file_list) > 0:
        for file in file_list:
            Logger.info("Reading file %s" % file)
            tmp = read(file, starttime=starttime, endtime=endtime)
            if len(tmp) > 1:
                raise ValueError("More than one trace read from file, that's weird...")
            if tmp[0].stats.sampling_rate != fs:
                tmp.resample(fs)
            st.append(tmp[0])
    else:
        Logger.info("No data found for day %s" % day)
        Logger.info("\t\tSearch string was: %s" % path_search)

    # Fill gaps with noise
    st = fill_time_gaps_noise(st)

    # Convert to nm/s
    trace = st[0]
    trace.data *= gain

    Logger.info("\tFinal Stream:")
    Logger.info("\tSampling rate: %f" % fs)
    Logger.info("\tStart time: %s" % trace.stats.starttime.strftime("%Y-%m-%d %H:%M:%S"))
    Logger.info("\tEnd time: %s" % trace.stats.endtime.strftime("%Y-%m-%d %H:%M:%S"))

    return trace
Пример #38
0
def keep_longest(stream):
    """
    keeps the longest record of each channel
    """

    st_tmp = Stream()
    st_tmp.sort(['npts'])
    channels = AttribDict()

    for i, tr in enumerate(stream):

        if tr.stats.channel in channels:
            continue
        else:
            # Append the name of channel, samplingpoints and number of trace
            channels[tr.stats.channel] = [tr.stats.npts, i]
            st_tmp.append(stream[i])

    stream = st_tmp

    return stream
Пример #39
0
    def test_bp_filterbank(self):
        from obspy.signal.freqattributes import cfrequency

        st=Stream()
        fst=Stream()
        data_trace=self.data_trace.copy()
        dt=data_trace.stats.delta
        data_trace.data=np.zeros(400)
        data_trace.data[200]=1*2*np.pi
        freqmin=1.0
        freqmax=4.0
        freq_step=(freqmax-freqmin)/2.0
        n_bank=5
        for i in xrange(n_bank):
            dtrace=data_trace.copy()
            fr_min=freqmin+i*freq_step
            fr_max=freqmax+i*freq_step
            dtrace.filter('bandpass',freqmin=fr_min, freqmax=fr_max, zerophase=True)
            st.append(dtrace)
            ftrace=self.data_trace.copy()
            ftrace.data=np.real(np.convolve(ftrace.data,dtrace.data,'same'))
            cf=cfrequency(ftrace.data,1/dt,0,0)
            self.assertTrue(cf > fr_min and cf < fr_max)
Пример #40
0
def _read_asc(filename, headonly=False, skip=0, delta=None, length=None,
              **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Handler ASCII file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: ASCII file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :type skip: int, optional
    :param skip: Number of lines to be skipped from top of file. If defined
        only one trace is read from file.
    :type delta: float, optional
    :param delta: If ``skip`` is used, ``delta`` defines sample offset in
        seconds.
    :type length: int, optional
    :param length: If ``skip`` is used, ``length`` defines the number of values
        to be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/QFILE-TEST-ASC.ASC")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    3 Trace(s) in Stream:
    .TEST..BHN | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .TEST..BHE | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .WET..HHZ  | 2010-01-01T01:01:05.999000Z - ... | 100.0 Hz, 4001 samples
    """
    fh = open(filename, 'rt')
    # read file and split text into channels
    channels = []
    headers = {}
    data = io.StringIO()
    for line in fh.readlines()[skip:]:
        if line.isspace():
            # blank line
            # check if any data fetched yet
            if len(headers) == 0 and data.tell() == 0:
                continue
            # append current channel
            data.seek(0)
            channels.append((headers, data))
            # create new channel
            headers = {}
            data = io.StringIO()
            if skip:
                # if skip is set only one trace is read, everything else makes
                # no sense.
                break
            continue
        elif line[0].isalpha():
            # header entry
            key, value = line.split(':', 1)
            key = key.strip()
            value = value.strip()
            headers[key] = value
        elif not headonly:
            # data entry - may be written in multiple columns
            data.write(line.strip() + ' ')
    fh.close()
    # create ObsPy stream object
    stream = Stream()
    # custom header
    custom_header = {}
    if delta:
        custom_header["delta"] = delta
    if length:
        custom_header["npts"] = length

    for headers, data in channels:
        # create Stats
        header = Stats(custom_header)
        header['sh'] = {}
        channel = [' ', ' ', ' ']
        # generate headers
        for key, value in headers.items():
            if key == 'DELTA':
                header['delta'] = float(value)
            elif key == 'LENGTH':
                header['npts'] = int(value)
            elif key == 'CALIB':
                header['calib'] = float(value)
            elif key == 'STATION':
                header['station'] = value
            elif key == 'COMP':
                channel[2] = value[0]
            elif key == 'CHAN1':
                channel[0] = value[0]
            elif key == 'CHAN2':
                channel[1] = value[0]
            elif key == 'START':
                # 01-JAN-2009_01:01:01.0
                # 1-OCT-2009_12:46:01.000
                header['starttime'] = to_utcdatetime(value)
            else:
                # everything else gets stored into sh entry
                if key in SH_KEYS_INT:
                    header['sh'][key] = int(value)
                elif key in SH_KEYS_FLOAT:
                    header['sh'][key] = float(value)
                else:
                    header['sh'][key] = value
        # set channel code
        header['channel'] = ''.join(channel)
        if headonly:
            # skip data
            stream.append(Trace(header=header))
        else:
            # read data
            data = loadtxt(data, dtype=np.float32, ndmin=1)

            # cut data if requested
            if skip and length:
                data = data[:length]

            # use correct value in any case
            header["npts"] = len(data)

            stream.append(Trace(data=data, header=header))
    return stream
Пример #41
0
def _read_q(filename, headonly=False, data_directory=None, byteorder='=',
            **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Handler Q file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: Q header file to be read. Must have a `QHD` file
        extension.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :type data_directory: str, optional
    :param data_directory: Data directory where the corresponding QBN file can
        be found.
    :type byteorder: str, optional
    :param byteorder: Enforce byte order for data file. This is important for
        Q files written in older versions of Seismic Handler, which don't
        explicit state the `BYTEORDER` flag within the header file. Can be
        little endian (``'<'``), big endian (``'>'``), or native byte order
        (``'='``). Defaults to ``'='``.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    Q files consists of two files per data set:

    * a ASCII header file with file extension `QHD` and the
    * binary data file with file extension `QBN`.

    The read method only accepts header files for the ``filename`` parameter.
    ObsPy assumes that the corresponding data file is within the same directory
    if the ``data_directory`` parameter is not set. Otherwise it will search
    in the given ``data_directory`` for a file with the `QBN` file extension.
    This function should NOT be called directly, it registers via the
    ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/QFILE-TEST.QHD")
    >>> st    #doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    3 Trace(s) in Stream:
    .TEST..BHN | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .TEST..BHE | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .WET..HHZ  | 2010-01-01T01:01:05.999000Z - ... | 100.0 Hz, 4001 samples
    """
    if not headonly:
        if not data_directory:
            data_file = os.path.splitext(filename)[0] + '.QBN'
        else:
            data_file = os.path.basename(os.path.splitext(filename)[0])
            data_file = os.path.join(data_directory, data_file + '.QBN')
        if not os.path.isfile(data_file):
            msg = "Can't find corresponding QBN file at %s."
            raise IOError(msg % data_file)
        fh_data = open(data_file, 'rb')
    # loop through read header file
    with open(filename, 'rt') as fh:
        lines = fh.read().splitlines()
    # number of comment lines
    cmtlines = int(lines[0][5:7])
    # trace lines
    traces = {}
    i = -1
    id = ''
    for line in lines[cmtlines:]:
        cid = int(line[0:2])
        if cid != id:
            id = cid
            i += 1
        traces.setdefault(i, '')
        traces[i] += line[3:]
    # create stream object
    stream = Stream()
    for id in sorted(traces.keys()):
        # fetch headers
        header = {}
        header['sh'] = {
            "FROMQ": True,
            "FILE": os.path.splitext(os.path.split(filename)[1])[0],
        }
        channel = ['', '', '']
        npts = 0
        for item in traces[id].split('~'):
            key = item.lstrip()[0:4]
            value = item.lstrip()[5:]
            if key == 'L001':
                npts = header['npts'] = int(value)
            elif key == 'L000':
                continue
            elif key == 'R000':
                header['delta'] = float(value)
            elif key == 'R026':
                header['calib'] = float(value)
            elif key == 'S001':
                header['station'] = value
            elif key == 'C000' and value:
                channel[2] = value[0]
            elif key == 'C001' and value:
                channel[0] = value[0]
            elif key == 'C002' and value:
                channel[1] = value[0]
            elif key == 'C003':
                if value == '<' or value == '>':
                    byteorder = header['sh']['BYTEORDER'] = value
            elif key == 'S021':
                # 01-JAN-2009_01:01:01.0
                # 1-OCT-2009_12:46:01.000
                header['starttime'] = to_utcdatetime(value)
            elif key == 'S022':
                header['sh']['P-ONSET'] = to_utcdatetime(value)
            elif key == 'S023':
                header['sh']['S-ONSET'] = to_utcdatetime(value)
            elif key == 'S024':
                header['sh']['ORIGIN'] = to_utcdatetime(value)
            elif key:
                key = INVERTED_SH_IDX.get(key, key)
                if key in SH_KEYS_INT:
                    header['sh'][key] = int(value)
                elif key in SH_KEYS_FLOAT:
                    header['sh'][key] = float(value)
                else:
                    header['sh'][key] = value
        # set channel code
        header['channel'] = ''.join(channel)
        # remember record number
        header['sh']['RECNO'] = len(stream) + 1
        if headonly:
            # skip data
            stream.append(Trace(header=header))
        else:
            if not npts:
                stream.append(Trace(header=header))
                continue
            # read data
            data = fh_data.read(npts * 4)
            dtype = native_str(byteorder + 'f4')
            data = np.fromstring(data, dtype=dtype)
            # convert to system byte order
            data = np.require(data, native_str('=f4'))
            stream.append(Trace(data=data, header=header))
    if not headonly:
        fh_data.close()
    return stream
Пример #42
0
def _read_segy(filename, headonly=False, byteorder=None,
               textual_header_encoding=None, unpack_trace_headers=False,
               **kwargs):  # @UnusedVariable
    """
    Reads a SEG Y file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: SEG Y rev1 file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the header and omit the waveform
        data.
    :type byteorder: str or ``None``
    :param byteorder: Determines the endianness of the file. Either ``'>'`` for
        big endian or ``'<'`` for little endian. If it is ``None``, it will try
        to autodetect the endianness. The endianness is always valid for the
        whole file. Defaults to ``None``.
    :type textual_header_encoding: str or ``None``
    :param textual_header_encoding: The encoding of the textual header. Can be
        ``'EBCDIC'``, ``'ASCII'`` or ``None``. If it is ``None``, autodetection
        will be attempted. Defaults to ``None``.
    :type unpack_trace_headers: bool, optional
    :param unpack_trace_headers: Determines whether or not all trace header
        values will be unpacked during reading. If ``False`` it will greatly
        enhance performance and especially memory usage with large files. The
        header values can still be accessed and will be calculated on the fly
        but tab completion will no longer work. Look in the headers.py for a
        list of all possible trace header values. Defaults to ``False``.
    :returns: A ObsPy :class:`~obspy.core.stream.Stream` object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/00001034.sgy_first_trace")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    1 Trace(s) in Stream:
    Seq. No. in line:    1 | 2009-06-22T14:47:37.000000Z - ... 2001 samples
    """
    # Read file to the internal segy representation.
    segy_object = _read_segyrev1(
        filename, endian=byteorder,
        textual_header_encoding=textual_header_encoding,
        unpack_headers=unpack_trace_headers)
    # Create the stream object.
    stream = Stream()
    # SEGY has several file headers that apply to all traces. They will be
    # stored in Stream.stats.
    stream.stats = AttribDict()
    # Get the textual file header.
    textual_file_header = segy_object.textual_file_header
    # The binary file header will be a new AttribDict
    binary_file_header = AttribDict()
    for key, value in segy_object.binary_file_header.__dict__.items():
        setattr(binary_file_header, key, value)
    # Get the data encoding and the endianness from the first trace.
    data_encoding = segy_object.traces[0].data_encoding
    endian = segy_object.traces[0].endian
    textual_file_header_encoding = segy_object.textual_header_encoding.upper()
    # Add the file wide headers.
    stream.stats.textual_file_header = textual_file_header
    stream.stats.binary_file_header = binary_file_header
    # Also set the data encoding, endianness and the encoding of the
    # textual_file_header.
    stream.stats.data_encoding = data_encoding
    stream.stats.endian = endian
    stream.stats.textual_file_header_encoding = \
        textual_file_header_encoding

    # Convert traces to ObsPy Trace objects.
    for tr in segy_object.traces:
        stream.append(tr.to_obspy_trace(
            headonly=headonly,
            unpack_trace_headers=unpack_trace_headers))

    return stream
Пример #43
0
def _read_seisan(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a SEISAN file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: SEISAN file to be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/2001-01-13-1742-24S.KONO__004")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    4 Trace(s) in Stream:
    .KONO.0.B0Z | 2001-01-13T17:45:01.999000Z - ... | 20.0 Hz, 6000 samples
    .KONO.0.L0Z | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    .KONO.0.L0N | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    .KONO.0.L0E | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    """
    # get version info from event file header (at least 12*80 bytes)
    fh = open(filename, 'rb')
    data = fh.read(80 * 12)
    (byteorder, arch, version) = _get_version(data)
    dlen = arch // 8
    dtype = np.dtype(native_str(byteorder + 'i' + str(dlen)))
    stype = native_str('=i' + str(dlen))

    def _readline(fh, version=version, dtype=dtype):
        if version >= 7:
            # On Sun, Linux, MaxOSX and PC from version 7.0 (using Digital
            # Fortran), every write is preceded and terminated with 4
            # additional bytes giving the number of bytes in the write.
            # With 64 bit systems, 8 bytes is used to define number of bytes
            # written.
            start_bytes = fh.read(dtype.itemsize)
            # convert to int32/int64
            length = np.fromstring(start_bytes, dtype=dtype)[0]
            data = fh.read(length)
            end_bytes = fh.read(dtype.itemsize)
            assert start_bytes == end_bytes
            return data
        else:  # version <= 6
            # Every write is preceded and terminated with one byte giving the
            # number of bytes in the write. If the write contains more than 128
            # bytes, it is blocked in records of 128 bytes, each with the start
            # and end byte which in this case is the number 128. Each record is
            # thus 130 bytes long.
            data = b''
            while True:
                start_byte = fh.read(1)
                if not start_byte:
                    # end of file
                    break
                # convert to unsigned int8
                length = np.fromstring(start_byte, np.uint8)[0]
                data += fh.read(length)
                end_byte = fh.read(1)
                assert start_byte == end_byte
                if length == 128:
                    # blocked data - repeat loop
                    continue
                # end of blocked data
                break
            return data

    # reset file pointer
    if version >= 7:
        fh.seek(0)
    else:
        # version <= 6 starts with first byte K
        fh.seek(1)
    # event file header
    # line 1
    data = _readline(fh)
    number_of_channels = int(data[30:33])
    # calculate number of lines with channels
    number_of_lines = number_of_channels // 3 + (number_of_channels % 3 and 1)
    if number_of_lines < 10:
        number_of_lines = 10
    # line 2 - always empty
    data = _readline(fh)
    # line 3
    for _i in range(0, number_of_lines):
        data = _readline(fh)
    # now parse each event file channel header + data
    stream = Stream()
    for _i in range(number_of_channels):
        # get channel header
        temp = _readline(fh).decode()
        # create Stats
        header = Stats()
        header['network'] = (temp[16] + temp[19]).strip()
        header['station'] = temp[0:5].strip()
        header['location'] = (temp[7] + temp[12]).strip()
        header['channel'] = (temp[5:7] + temp[8]).strip()
        header['sampling_rate'] = float(temp[36:43])
        header['npts'] = int(temp[43:50])
        # create start and end times
        year = int(temp[9:12]) + 1900
        month = int(temp[17:19])
        day = int(temp[20:22])
        hour = int(temp[23:25])
        mins = int(temp[26:28])
        secs = float(temp[29:35])
        header['starttime'] = UTCDateTime(year, month, day, hour, mins) + secs
        if headonly:
            # skip data
            from_buffer(_readline(fh), dtype=dtype)
            stream.append(Trace(header=header))
        else:
            # fetch data
            data = from_buffer(_readline(fh), dtype=dtype)
            # convert to system byte order
            data = np.require(data, stype)
            if header['npts'] != len(data):
                msg = "Mismatching byte size %d != %d"
                warnings.warn(msg % (header['npts'], len(data)))
            stream.append(Trace(data=data, header=header))
    fh.close()
    return stream
Пример #44
0
def postprocess_adjsrc(adjsrcs, interp_starttime, interp_delta,
                       interp_npts, rotate_flag=False, inventory=None,
                       event=None, sum_over_comp_flag=False,
                       weight_flag=False, weight_dict=None,
                       filter_flag=False, pre_filt=None):
    """
    Postprocess adjoint sources to fit SPECFEM input(same as raw_synthetic)
    1) zero padding the adjoint sources
    2) interpolation
    3) add multiple instrument together if there are
    4) rotate from (R, T) to (N, E)

    :param adjsrcs: adjoint sources list from the same station
    :type adjsrcs: list
    :param adj_starttime: starttime of adjoint sources
    :param adj_starttime: obspy.UTCDateTime
    :param raw_synthetic: raw synthetic from SPECFEM output, as reference
    :type raw_synthetic: obspy.Stream or obspy.Trace
    :param inventory: station inventory
    :type inventory: obspy.Inventory
    :param event: event information
    :type event: obspy.Event
    :param sum_over_comp_flag: sum over component flag
    :param weight_dict: weight dictionary
    """

    if not isinstance(adjsrcs, list):
        raise ValueError("Input adjsrcs should be type of list of adjoint "
                         "sources")

    # transfer AdjointSource type to stream for easy processing
    adj_stream = Stream()
    for adj in adjsrcs:
        _tr = _convert_adj_to_trace(adj)
        adj_stream.append(_tr)

    # zero padding
    interp_endtime = interp_starttime + interp_delta * interp_npts
    zero_padding_stream(adj_stream, interp_starttime, interp_endtime)

    # interpolate
    adj_stream.interpolate(sampling_rate=1.0/interp_delta,
                           starttime=interp_starttime,
                           npts=interp_npts)

    # sum multiple instruments
    if sum_over_comp_flag:
        adj_stream = sum_adj_on_component(adj_stream, weight_flag,
                                          weight_dict)

    # add zero trace for missing components
    missinglist = ["Z", "R", "T"]
    tr_template = adj_stream[0]
    for tr in adj_stream:
        missinglist.remove(tr.stats.channel[-1])

    for component in missinglist:
        zero_adj = tr_template.copy()
        zero_adj.data.fill(0.0)
        zero_adj.stats.channel = "%s%s" % (tr_template.stats.channel[0:2],
                                           component)
        adj_stream.append(zero_adj)

    if rotate_flag:
        rotate_adj(adj_stream, event, inventory)

    if filter_flag:
        # filter the adjoint source
        if pre_filt is None or len(pre_filt) != 4:
            raise ValueError("Input pre_filt should be a list or tuple with "
                             "length of 4")
        if not check_array_order(pre_filt, order="ascending"):
            raise ValueError("Input pre_filt must a in ascending order. The "
                             "unit is Hz")
        for tr in adj_stream:
            filter_trace(tr, pre_filt)

    # convert the stream to pyadjoint.AdjointSource
    final_adjsrcs = []
    adj_src_type = adjsrcs[0].adj_src_type
    minp = adjsrcs[0].min_period
    maxp = adjsrcs[0].max_period
    for tr in adj_stream:
        final_adjsrcs.append(_convert_trace_to_adj(tr, adj_src_type,
                                                   minp, maxp))

    return final_adjsrcs
Пример #45
0
    def getWaveform(self, network, station, location, channel, starttime,
                    endtime, cleanup=True):
        """
        Retrieves waveform data from Earthworm Wave Server and returns an ObsPy
        Stream object.

        :type filename: str
        :param filename: Name of the output file.
        :type network: str
        :param network: Network code, e.g. ``'UW'``.
        :type station: str
        :param station: Station code, e.g. ``'TUCA'``.
        :type location: str
        :param location: Location code, e.g. ``'--'``.
        :type channel: str
        :param channel: Channel code, e.g. ``'BHZ'``. Last character (i.e.
            component) can be a wildcard ('?' or '*') to fetch `Z`, `N` and
            `E` component.
        :type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param starttime: Start date and time.
        :type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param endtime: End date and time.
        :return: ObsPy :class:`~obspy.core.stream.Stream` object.
        :type cleanup: bool
        :param cleanup: Specifies whether perfectly aligned traces should be
            merged or not. See :meth:`obspy.core.stream.Stream.merge` for
            ``method=-1``.

        .. rubric:: Example

        >>> from obspy.earthworm import Client
        >>> client = Client("pele.ess.washington.edu", 16017)
        >>> dt = UTCDateTime(2013, 1, 17) - 2000  # now - 2000 seconds
        >>> st = client.getWaveform('UW', 'TUCA', '', 'BHZ', dt, dt + 10)
        >>> st.plot()  # doctest: +SKIP
        >>> st = client.getWaveform('UW', 'TUCA', '', 'BH*', dt, dt + 10)
        >>> st.plot()  # doctest: +SKIP

        .. plot::

            from obspy.earthworm import Client
            from obspy import UTCDateTime
            client = Client("pele.ess.washington.edu", 16017, timeout=5)
            dt = UTCDateTime(2013, 1, 17) - 2000  # now - 2000 seconds
            st = client.getWaveform('UW', 'TUCA', '', 'BHZ', dt, dt + 10)
            st.plot()
            st = client.getWaveform('UW', 'TUCA', '', 'BH*', dt, dt + 10)
            st.plot()
        """
        # replace wildcards in last char of channel and fetch all 3 components
        if channel[-1] in "?*":
            st = Stream()
            for comp in ("Z", "N", "E"):
                channel_new = channel[:-1] + comp
                st += self.getWaveform(network, station, location,
                                       channel_new, starttime, endtime,
                                       cleanup=cleanup)
            return st
        if location == '':
            location = '--'
        scnl = (station, channel, network, location)
        # fetch waveform
        tbl = readWaveServerV(self.host, self.port, scnl, starttime, endtime,
                              timeout=self.timeout)
        # create new stream
        st = Stream()
        for tb in tbl:
            st.append(tb.getObspyTrace())
        if cleanup:
            st._cleanup()
        st.trim(starttime, endtime)
        return st
Пример #46
0
class SEG2(object):
    """
    Class to read and write SEG 2 formatted files. The main reason this is
    realized as a class is for the ease of passing the various parameters from
    one function to the next.

    Do not change the file_pointer attribute while using this class. It will
    be used to keep track of which parts have been read yet and which not.
    """
    def __init__(self):
        pass

    def readFile(self, file_object):
        """
        Reads the following file and will return a Stream object. If
        file_object is a string it will be treated as a filename, otherwise it
        will be expected to be a file like object with read(), seek() and
        tell() methods.

        If it is a file_like object, file.seek(0, 0) is expected to be the
        beginning of the SEG-2 file.
        """
        # Read the file if it is a filename.
        if isinstance(file_object, basestring):
            self.file_pointer = open(file_object, 'rb')
        else:
            self.file_pointer = file_object
            self.file_pointer.seek(0, 0)

        self.stream = Stream()

        # Read the file descriptor block. This will also determine the
        # endianness.
        self.readFileDescriptorBlock()

        # Loop over every trace, read it and append it to the Stream.
        for tr_pointer in self.trace_pointers:
            self.file_pointer.seek(tr_pointer, 0)
            self.stream.append(self.parseNextTrace())

        return self.stream

    def readFileDescriptorBlock(self):
        """
        Handles the reading of the file descriptor block and the free form
        section following it.
        """
        file_descriptor_block = self.file_pointer.read(32)

        # Determine the endianness and check if the block id is valid.
        if (unpack('B', file_descriptor_block[0])[0] == 0x55) and \
           (unpack('B', file_descriptor_block[1])[0] == 0x3a):
            self.endian = '<'
        elif (unpack('B', file_descriptor_block[0])[0] == 0x3a) and \
            (unpack('B', file_descriptor_block[1])[0] == 0x55):
            self.endian = '>'
        else:
            msg = 'Wrong File Descriptor Block ID'
            raise SEG2InvalidFileError(msg)

        # Check the revision number.
        revision_number = unpack('%sH' % self.endian,
                                file_descriptor_block[2:4])[0]
        if revision_number != 1:
            msg = '\nOnly SEG 2 revision 1 is officially supported. This file '
            msg += 'has revision %i. Reading it might fail.' % revision_number
            msg += '\nPlease contact the ObsPy developers with a sample file.'
            warnings.warn(msg)
        size_of_trace_pointer_sub_block = unpack('%sH' % self.endian,
                                       file_descriptor_block[4:6])[0]
        number_of_traces = unpack('%sH' % self.endian,
                                  file_descriptor_block[6:8])[0]

        # Define the string and line terminators.
        size_of_string_terminator = unpack('B', file_descriptor_block[8])[0]
        first_string_terminator_char = unpack('c', file_descriptor_block[9])[0]
        second_string_terminator_char = unpack('c',
                                               file_descriptor_block[10])[0]
        size_of_line_terminator = unpack('B', file_descriptor_block[11])[0]
        first_line_terminator_char = unpack('c', file_descriptor_block[12])[0]
        second_line_terminator_char = unpack('c', file_descriptor_block[13])[0]

        # Assemble the string terminator.
        if size_of_string_terminator == 1:
            self.string_terminator = first_string_terminator_char
        elif size_of_string_terminator == 2:
            self.string_terminator = first_string_terminator_char + \
                                     second_string_terminator_char
        else:
            msg = 'Wrong size of string terminator.'
            raise SEG2InvalidFileError(msg)
        # Assemble the line terminator.
        if size_of_line_terminator == 1:
            self.line_terminator = first_line_terminator_char
        elif size_of_line_terminator == 2:
            self.line_terminator = first_line_terminator_char + \
                                     second_line_terminator_char
        else:
            msg = 'Wrong size of line terminator.'
            raise SEG2InvalidFileError(msg)

        # Read the trace pointer sub-block and retrieve all the pointers.
        trace_pointer_sub_block = \
                self.file_pointer.read(size_of_trace_pointer_sub_block)
        self.trace_pointers = []
        for _i in xrange(number_of_traces):
            index = _i * 4
            self.trace_pointers.append(
                unpack('%sL' % self.endian,
                       trace_pointer_sub_block[index:index + 4])[0])

        # The rest of the header up to where the first trace pointer points is
        # a free form section.
        self.stream.stats = AttribDict()
        self.stream.stats.seg2 = AttribDict()
        self.parseFreeForm(self.file_pointer.read(\
                           self.trace_pointers[0] - self.file_pointer.tell()),
                           self.stream.stats.seg2)

        # Get the time information from the file header.
        # XXX: Need some more generic date/time parsers.
        time = self.stream.stats.seg2.ACQUISITION_TIME
        date = self.stream.stats.seg2.ACQUISITION_DATE
        time = time.strip().split(':')
        date = date.strip().split('/')
        hour, minute, second = int(time[0]), int(time[1]), float(time[2])
        day, month, year = int(date[0]), MONTHS[date[1].lower()], int(date[2])
        self.starttime = UTCDateTime(year, month, day, hour, minute, second)

    def parseNextTrace(self):
        """
        Parse the next trace in the trace pointer list and return a Trace
        object.
        """
        trace_descriptor_block = self.file_pointer.read(32)
        # Check if the trace descripter block id is valid.
        if unpack('%sH' % self.endian, trace_descriptor_block[0:2])[0] != \
           0x4422:
            msg = 'Invalid trace descripter block id.'
            raise SEG2InvalidFileError(msg)
        size_of_this_block = unpack('%sH' % self.endian,
                                    trace_descriptor_block[2:4])[0]
        _size_of_corresponding_data_block = \
                unpack('%sL' % self.endian, trace_descriptor_block[4:8])[0]
        number_of_samples_in_data_block = \
                unpack('%sL' % self.endian, trace_descriptor_block[8:12])[0]
        data_format_code = unpack('B', trace_descriptor_block[12])[0]

        # Parse the data format code.
        if data_format_code == 4:
            dtype = 'float32'
            sample_size = 4
        elif data_format_code == 5:
            dtype = 'float64'
            sample_size = 8
        elif (data_format_code == 1) or \
             (data_format_code == 2) or \
             (data_format_code == 3):
            msg = '\nData format code %i not supported yet.\n' \
                    % data_format_code
            msg += 'Please contact the ObsPy developers with a sample file.'
            raise NotImplementedError(msg)
        else:
            msg = 'Unrecognized data format code'
            raise SEG2InvalidFileError(msg)

        # The rest of the trace block is free form.
        header = {}
        header['seg2'] = AttribDict()
        self.parseFreeForm(\
                         self.file_pointer.read(size_of_this_block - 32),
                          header['seg2'])
        header['delta'] = float(header['seg2']['SAMPLE_INTERVAL'])
        # Set to the file's starttime.
        header['starttime'] = deepcopy(self.starttime)
        # Unpack the data.
        data = np.fromstring(self.file_pointer.read(\
                number_of_samples_in_data_block * sample_size), dtype=dtype)
        return Trace(data=data, header=header)

    def parseFreeForm(self, free_form_str, attrib_dict):
        """
        Parse the free form section stored in free_form_str and save it in
        attrib_dict.
        """
        # Separate the strings.
        strings = free_form_str.split(self.string_terminator)
        # This is not fully according to the SEG-2 format specification (or
        # rather the specification only speaks about on offset of 2 bytes
        # between strings and a string_terminator between two free form
        # strings. The file I have show the following separation between two
        # strings: 'random offset byte', 'string_terminator',
        # 'random offset byte'
        # Therefore every string has to be at least 3 bytes wide to be
        # acceptable after being split at the string terminator.
        strings = [_i for _i in strings if len(_i) >= 3]
        # Every string has the structure OPTION<SPACE>VALUE. Write to
        # stream.stats attribute.
        for string in strings:
            string = string.strip()
            string = string.split(' ')
            key = string[0].strip()
            value = ' '.join(string[1:]).strip()
            setattr(attrib_dict, key, value)
        # Parse the notes string again.
        if hasattr(attrib_dict, 'NOTE'):
            notes = attrib_dict.NOTE.split(self.line_terminator)
            attrib_dict.NOTE = AttribDict()
            for note in notes:
                note = note.strip()
                note = note.split(' ')
                key = note[0].strip()
                value = ' '.join(note[1:]).strip()
                setattr(attrib_dict.NOTE, key, value)
Пример #47
0
def _read_ah2(filename):
    """
    Reads an AH v2 waveform file and returns a Stream object.

    :type filename: str
    :param filename: AH v2 file to be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :returns: Stream with Traces specified by given file.
    """

    def _unpack_trace(data):
        ah_stats = AttribDict({
            'version': '2.0',
            'event': AttribDict(),
            'station': AttribDict(),
            'record': AttribDict(),
            'extras': []
        })

        # station info
        data.unpack_int()  # undocumented extra int?
        ah_stats.station.code = _unpack_string(data)
        data.unpack_int()  # here too?
        ah_stats.station.channel = _unpack_string(data)
        data.unpack_int()  # and again?
        ah_stats.station.type = _unpack_string(data)
        ah_stats.station.recorder = _unpack_string(data)
        ah_stats.station.sensor = _unpack_string(data)
        ah_stats.station.azimuth = data.unpack_float()  # degrees E from N
        ah_stats.station.dip = data.unpack_float()  # up = -90, down = +90
        ah_stats.station.latitude = data.unpack_double()
        ah_stats.station.longitude = data.unpack_double()
        ah_stats.station.elevation = data.unpack_float()
        ah_stats.station.gain = data.unpack_float()
        ah_stats.station.normalization = data.unpack_float()  # A0

        npoles = data.unpack_int()
        ah_stats.station.poles = []
        for _i in range(npoles):
            r = data.unpack_float()
            i = data.unpack_float()
            ah_stats.station.poles.append(complex(r, i))

        nzeros = data.unpack_int()
        ah_stats.station.zeros = []
        for _i in range(nzeros):
            r = data.unpack_float()
            i = data.unpack_float()
            ah_stats.station.zeros.append(complex(r, i))
        ah_stats.station.comment = _unpack_string(data)

        # event info
        ah_stats.event.latitude = data.unpack_double()
        ah_stats.event.longitude = data.unpack_double()
        ah_stats.event.depth = data.unpack_float()
        ot_year = data.unpack_int()
        ot_mon = data.unpack_int()
        ot_day = data.unpack_int()
        ot_hour = data.unpack_int()
        ot_min = data.unpack_int()
        ot_sec = data.unpack_float()
        try:
            ot = UTCDateTime(ot_year, ot_mon, ot_day, ot_hour, ot_min, ot_sec)
        except Exception:
            ot = None
        ah_stats.event.origin_time = ot
        data.unpack_int()  # and again?
        ah_stats.event.comment = _unpack_string(data)

        # record info
        ah_stats.record.type = dtype = data.unpack_int()  # data type
        ah_stats.record.ndata = ndata = data.unpack_uint()  # number of samples
        ah_stats.record.delta = data.unpack_float()  # sampling interval
        ah_stats.record.max_amplitude = data.unpack_float()
        at_year = data.unpack_int()
        at_mon = data.unpack_int()
        at_day = data.unpack_int()
        at_hour = data.unpack_int()
        at_min = data.unpack_int()
        at_sec = data.unpack_float()
        at = UTCDateTime(at_year, at_mon, at_day, at_hour, at_min, at_sec)
        ah_stats.record.start_time = at
        ah_stats.record.units = _unpack_string(data)
        ah_stats.record.inunits = _unpack_string(data)
        ah_stats.record.outunits = _unpack_string(data)
        data.unpack_int()  # and again?
        ah_stats.record.comment = _unpack_string(data)
        data.unpack_int()  # and again?
        ah_stats.record.log = _unpack_string(data)

        # user attributes
        nusrattr = data.unpack_int()
        ah_stats.usrattr = {}
        for _i in range(nusrattr):
            key = _unpack_string(data)
            value = _unpack_string(data)
            ah_stats.usrattr[key] = value

        # unpack data using dtype from record info
        if dtype == 1:
            # float
            temp = data.unpack_farray(ndata, data.unpack_float)
        elif dtype == 6:
            # double
            temp = data.unpack_farray(ndata, data.unpack_double)
        else:
            # e.g. 3 (vector), 2 (complex), 4 (tensor)
            msg = 'Unsupported AH v2 record type %d'
            raise NotImplementedError(msg % (dtype))

        tr = Trace(np.array(temp))
        tr.stats.ah = ah_stats
        tr.stats.delta = ah_stats.record.delta
        tr.stats.starttime = ah_stats.record.start_time
        tr.stats.station = ah_stats.station.code
        tr.stats.channel = ah_stats.station.channel
        return tr

    st = Stream()
    with open(filename, "rb") as fh:
        # loop as long we can read records
        while True:
            try:
                # read first 8 bytes with XDR library
                data = xdrlib.Unpacker(fh.read(8))
                # check magic version number
                magic = data.unpack_int()
            except EOFError:
                break
            if magic != 1100:
                raise Exception('Not a AH v2 file')
            try:
                # get record length
                length = data.unpack_uint()
                # read rest of record into XDR unpacker
                data = xdrlib.Unpacker(fh.read(length))
                tr = _unpack_trace(data)
                st.append(tr)
            except EOFError:
                break
        return st
Пример #48
0
class SEG2(object):
    """
    Class to read and write SEG 2 formatted files. The main reason this is
    realized as a class is for the ease of passing the various parameters from
    one function to the next.

    Do not change the file_pointer attribute while using this class. It will
    be used to keep track of which parts have been read yet and which not.
    """
    def __init__(self):
        pass

    def readFile(self, file_object):
        """
        Reads the following file and will return a Stream object. If
        file_object is a string it will be treated as a filename, otherwise it
        will be expected to be a file like object with read(), seek() and
        tell() methods.

        If it is a file_like object, file.seek(0, 0) is expected to be the
        beginning of the SEG-2 file.
        """
        # Read the file if it is a filename.
        if not hasattr(file_object, 'write'):
            self.file_pointer = open(file_object, 'rb')
        else:
            self.file_pointer = file_object
            self.file_pointer.seek(0, 0)

        self.stream = Stream()

        # Read the file descriptor block. This will also determine the
        # endianness.
        self.readFileDescriptorBlock()

        # Loop over every trace, read it and append it to the Stream.
        for tr_pointer in self.trace_pointers:
            self.file_pointer.seek(tr_pointer, 0)
            self.stream.append(self.parseNextTrace())

        if not hasattr(file_object, 'write'):
            self.file_pointer.close()
        return self.stream

    def readFileDescriptorBlock(self):
        """
        Handles the reading of the file descriptor block and the free form
        section following it.
        """
        file_descriptor_block = self.file_pointer.read(32)

        # Determine the endianness and check if the block id is valid.
        if unpack(b'B', file_descriptor_block[0:1])[0] == 0x55 and \
           unpack(b'B', file_descriptor_block[1:2])[0] == 0x3a:
            self.endian = b'<'
        elif unpack(b'B', file_descriptor_block[0:1])[0] == 0x3a and \
                unpack(b'B', file_descriptor_block[1:2])[0] == 0x55:
            self.endian = b'>'
        else:
            msg = 'Wrong File Descriptor Block ID'
            raise SEG2InvalidFileError(msg)

        # Check the revision number.
        revision_number = unpack(self.endian + b'H',
                                 file_descriptor_block[2:4])[0]
        if revision_number != 1:
            msg = '\nOnly SEG 2 revision 1 is officially supported. This file '
            msg += 'has revision %i. Reading it might fail.' % revision_number
            msg += '\nPlease contact the ObsPy developers with a sample file.'
            warnings.warn(msg)
        size_of_trace_pointer_sub_block = unpack(
            self.endian + b'H', file_descriptor_block[4:6])[0]
        number_of_traces = unpack(
            self.endian + b'H', file_descriptor_block[6:8])[0]

        # Define the string and line terminators.
        (size_of_string_terminator,
         first_string_terminator_char,
         second_string_terminator_char,
         size_of_line_terminator,
         first_line_terminator_char,
         second_line_terminator_char
         ) = unpack(b'BccBcc', file_descriptor_block[8:14])

        # Assemble the string terminator.
        if size_of_string_terminator == 1:
            self.string_terminator = first_string_terminator_char
        elif size_of_string_terminator == 2:
            self.string_terminator = first_string_terminator_char + \
                second_string_terminator_char
        else:
            msg = 'Wrong size of string terminator.'
            raise SEG2InvalidFileError(msg)
        # Assemble the line terminator.
        if size_of_line_terminator == 1:
            self.line_terminator = first_line_terminator_char
        elif size_of_line_terminator == 2:
            self.line_terminator = first_line_terminator_char + \
                second_line_terminator_char
        else:
            msg = 'Wrong size of line terminator.'
            raise SEG2InvalidFileError(msg)

        # Read the trace pointer sub-block and retrieve all the pointers.
        trace_pointer_sub_block = \
            self.file_pointer.read(size_of_trace_pointer_sub_block)
        self.trace_pointers = []
        for _i in range(number_of_traces):
            index = _i * 4
            self.trace_pointers.append(
                unpack(self.endian + b'L',
                       trace_pointer_sub_block[index:index + 4])[0])

        # The rest of the header up to where the first trace pointer points is
        # a free form section.
        self.stream.stats = AttribDict()
        self.stream.stats.seg2 = AttribDict()
        self.parseFreeForm(self.file_pointer.read(
                           self.trace_pointers[0] - self.file_pointer.tell()),
                           self.stream.stats.seg2)

        # Get the time information from the file header.
        # XXX: Need some more generic date/time parsers.
        time = self.stream.stats.seg2.ACQUISITION_TIME
        date = self.stream.stats.seg2.ACQUISITION_DATE
        time = time.strip().split(':')
        date = date.strip().split('/')
        hour, minute, second = int(time[0]), int(time[1]), float(time[2])
        day, month, year = int(date[0]), MONTHS[date[1].lower()], int(date[2])
        self.starttime = UTCDateTime(year, month, day, hour, minute, second)

    def parseNextTrace(self):
        """
        Parse the next trace in the trace pointer list and return a Trace
        object.
        """
        trace_descriptor_block = self.file_pointer.read(32)
        # Check if the trace descripter block id is valid.
        if unpack(self.endian + b'H', trace_descriptor_block[0:2])[0] != \
           0x4422:
            msg = 'Invalid trace descripter block id.'
            raise SEG2InvalidFileError(msg)
        size_of_this_block = unpack(self.endian + b'H',
                                    trace_descriptor_block[2:4])[0]
        number_of_samples_in_data_block = \
            unpack(self.endian + b'L', trace_descriptor_block[8:12])[0]
        data_format_code = unpack(b'B', trace_descriptor_block[12:13])[0]

        # Parse the data format code.
        if data_format_code == 4:
            dtype = np.float32
            sample_size = 4
        elif data_format_code == 5:
            dtype = np.float64
            sample_size = 8
        elif data_format_code == 1:
            dtype = np.int16
            sample_size = 2
        elif data_format_code == 2:
            dtype = np.int32
            sample_size = 4
        elif data_format_code == 3:
            msg = ('\nData format code 3 (20-bit SEG-D floating point) not '
                   'supported yet.\nPlease contact the ObsPy developers with '
                   'a sample file.')
            raise NotImplementedError(msg)
        else:
            msg = 'Unrecognized data format code'
            raise SEG2InvalidFileError(msg)

        # The rest of the trace block is free form.
        header = {}
        header['seg2'] = AttribDict()
        self.parseFreeForm(self.file_pointer.read(size_of_this_block - 32),
                           header['seg2'])
        header['delta'] = float(header['seg2']['SAMPLE_INTERVAL'])
        # Set to the file's starttime.
        header['starttime'] = deepcopy(self.starttime)
        if 'DELAY' in header['seg2']:
            if float(header['seg2']['DELAY']) != 0:
                msg = "Non-zero value found in Trace's 'DELAY' field. " + \
                      "This is not supported/tested yet and might lead " + \
                      "to a wrong starttime of the Trace. Please contact " + \
                      "the ObsPy developers with a sample file."
                warnings.warn(msg)
        header['calib'] = float(header['seg2']['DESCALING_FACTOR'])
        # Unpack the data.
        data = np.fromstring(
            self.file_pointer.read(number_of_samples_in_data_block *
                                   sample_size),
            dtype=dtype)
        # Integrate SEG2 file header into each trace header
        tmp = self.stream.stats.seg2.copy()
        tmp.update(header['seg2'])
        header['seg2'] = tmp
        return Trace(data=data, header=header)

    def parseFreeForm(self, free_form_str, attrib_dict):
        """
        Parse the free form section stored in free_form_str and save it in
        attrib_dict.
        """
        # Separate the strings.
        strings = free_form_str.split(self.string_terminator)
        # This is not fully according to the SEG-2 format specification (or
        # rather the specification only speaks about on offset of 2 bytes
        # between strings and a string_terminator between two free form
        # strings. The file I have show the following separation between two
        # strings: 'random offset byte', 'string_terminator',
        # 'random offset byte'
        # Therefore every string has to be at least 3 bytes wide to be
        # acceptable after being split at the string terminator.

        def is_good_char(c):
            return c in (b'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN'
                         b'OPQRSTUVWXYZ!"#$%&\'()*+,-./:; <=>?@[\\]^_`{|}~ ')

        # A loop over a bytestring in Python 3 returns integers. This can be
        # solved with a number of imports from the python-future module and
        # all kinds of subtle changes throughout this file. Separating the
        # handling for Python 2 and 3 seems the cleaner and simpler approach.
        if PY2:
            strings = ["".join(filter(is_good_char, _i))
                       for _i in strings
                       if len(_i) >= 3]
        else:
            strings = ["".join(map(chr, filter(is_good_char, _i)))
                       for _i in strings
                       if len(_i) >= 3]

        # Every string has the structure OPTION<SPACE>VALUE. Write to
        # stream.stats attribute.
        for string in strings:
            string = string.strip()
            string = string.split(' ')
            key = string[0].strip()
            value = ' '.join(string[1:]).strip()
            setattr(attrib_dict, key, value)
        # Parse the notes string again.
        if hasattr(attrib_dict, 'NOTE'):
            notes = attrib_dict.NOTE.split(self.line_terminator.decode())
            attrib_dict.NOTE = AttribDict()
            for note in notes:
                note = note.strip()
                note = note.split(' ')
                key = note[0].strip()
                value = ' '.join(note[1:]).strip()
                setattr(attrib_dict.NOTE, key, value)
Пример #49
0
def readTSPAIR(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a ASCII TSPAIR file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: ASCII file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the headers. This is most useful
        for scanning available data in huge (temporary) data sets.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read('/path/to/tspair.ascii')
    """
    fh = open(filename, "rt")
    # read file and split text into channels
    headers = {}
    key = None
    for line in fh:
        if line.isspace():
            # blank line
            continue
        elif line.startswith("TIMESERIES"):
            # new header line
            key = line
            headers[key] = StringIO()
        elif headonly:
            # skip data for option headonly
            continue
        elif key:
            # data entry - may be written in multiple columns
            headers[key].write(line.strip().split()[-1] + " ")
    fh.close()
    # create ObsPy stream object
    stream = Stream()
    for header, data in headers.iteritems():
        # create Stats
        stats = Stats()
        parts = header.replace(",", "").split()
        temp = parts[1].split("_")
        stats.network = temp[0]
        stats.station = temp[1]
        stats.location = temp[2]
        stats.channel = temp[3]
        stats.sampling_rate = parts[4]
        # quality only used in MSEED
        stats.mseed = AttribDict({"dataquality": temp[4]})
        stats.ascii = AttribDict({"unit": parts[-1]})
        stats.starttime = UTCDateTime(parts[6])
        stats.npts = parts[2]
        if headonly:
            # skip data
            stream.append(Trace(header=stats))
        else:
            data = _parse_data(data, parts[8])
            stream.append(Trace(data=data, header=stats))
    return stream
Пример #50
0
def readSEISAN(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a SEISAN file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: SEISAN file to be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/2001-01-13-1742-24S.KONO__004")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    4 Trace(s) in Stream:
    .KONO.0.B0Z | 2001-01-13T17:45:01.999000Z - ... | 20.0 Hz, 6000 samples
    .KONO.0.L0Z | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    .KONO.0.L0N | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    .KONO.0.L0E | 2001-01-13T17:42:24.924000Z - ... | 1.0 Hz, 3542 samples
    """
    def _readline(fh, length=80):
        data = fh.read(length + 8)
        end = length + 4
        start = 4
        return data[start:end]
    # read data chunk from given file
    fh = open(filename, 'rb')
    data = fh.read(80 * 12)
    # get version info from file
    (byteorder, arch, _version) = _getVersion(data)
    # fetch lines
    fh.seek(0)
    # start with event file header
    # line 1
    data = _readline(fh)
    number_of_channels = int(data[30:33])
    # calculate number of lines with channels
    number_of_lines = number_of_channels // 3 + (number_of_channels % 3 and 1)
    if number_of_lines < 10:
        number_of_lines = 10
    # line 2
    data = _readline(fh)
    # line 3
    for _i in range(0, number_of_lines):
        data = _readline(fh)
    # now parse each event file channel header + data
    stream = Stream()
    dlen = arch // 8
    dtype = byteorder + 'i' + str(dlen)
    stype = '=i' + str(dlen)
    for _i in range(number_of_channels):
        # get channel header
        temp = _readline(fh, 1040).decode()
        # create Stats
        header = Stats()
        header['network'] = (temp[16] + temp[19]).strip()
        header['station'] = temp[0:5].strip()
        header['location'] = (temp[7] + temp[12]).strip()
        header['channel'] = (temp[5:7] + temp[8]).strip()
        header['sampling_rate'] = float(temp[36:43])
        header['npts'] = int(temp[43:50])
        # create start and end times
        year = int(temp[9:12]) + 1900
        month = int(temp[17:19])
        day = int(temp[20:22])
        hour = int(temp[23:25])
        mins = int(temp[26:28])
        secs = float(temp[29:35])
        header['starttime'] = UTCDateTime(year, month, day, hour, mins) + secs
        if headonly:
            # skip data
            fh.seek(dlen * (header['npts'] + 2), 1)
            stream.append(Trace(header=header))
        else:
            # fetch data
            data = np.fromfile(fh, dtype=dtype, count=header['npts'] + 2)
            # convert to system byte order
            data = np.require(data, stype)
            stream.append(Trace(data=data[2:], header=header))
    fh.close()
    return stream
Пример #51
0
def _read_su(filename, headonly=False, byteorder=None,
             unpack_trace_headers=False, **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Unix (SU) file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: SU file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the header and omit the waveform
        data.
    :type byteorder: str or ``None``
    :param byteorder: Determines the endianness of the file. Either ``'>'`` for
        big endian or ``'<'`` for little endian. If it is ``None``, it will try
        to autodetect the endianness. The endianness is always valid for the
        whole file. Defaults to ``None``.
    :type unpack_trace_headers: bool, optional
    :param unpack_trace_headers: Determines whether or not all trace header
        values will be unpacked during reading. If ``False`` it will greatly
        enhance performance and especially memory usage with large files. The
        header values can still be accessed and will be calculated on the fly
        but tab completion will no longer work. Look in the headers.py for a
        list of all possible trace header values. Defaults to ``False``.
    :returns: A ObsPy :class:`~obspy.core.stream.Stream` object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/1.su_first_trace")
    >>> st #doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  #doctest: +ELLIPSIS
    1 Trace(s) in Stream:
    ... | 2005-12-19T15:07:54.000000Z - ... | 4000.0 Hz, 8000 samples
    """
    # Read file to the internal segy representation.
    su_object = _read_suFile(filename, endian=byteorder,
                             unpack_headers=unpack_trace_headers)

    # Create the stream object.
    stream = Stream()

    # Get the endianness from the first trace.
    endian = su_object.traces[0].endian
    # Loop over all traces.
    for tr in su_object.traces:
        # Create new Trace object for every segy trace and append to the Stream
        # object.
        trace = Trace()
        stream.append(trace)
        # skip data if headonly is set
        if headonly:
            trace.stats.npts = tr.npts
        else:
            trace.data = tr.data
        trace.stats.su = AttribDict()
        # If all values will be unpacked create a normal dictionary.
        if unpack_trace_headers:
            # Add the trace header as a new attrib dictionary.
            header = AttribDict()
            for key, value in tr.header.__dict__.items():
                setattr(header, key, value)
        # Otherwise use the LazyTraceHeaderAttribDict.
        else:
            # Add the trace header as a new lazy attrib dictionary.
            header = LazyTraceHeaderAttribDict(tr.header.unpacked_header,
                                               tr.header.endian)
        trace.stats.su.trace_header = header
        # Also set the endianness.
        trace.stats.su.endian = endian
        # The sampling rate should be set for every trace. It is a sample
        # interval in microseconds. The only sanity check is that is should be
        # larger than 0.
        tr_header = trace.stats.su.trace_header
        if tr_header.sample_interval_in_ms_for_this_trace > 0:
            trace.stats.delta = \
                float(tr.header.sample_interval_in_ms_for_this_trace) / \
                1E6
        # If the year is not zero, calculate the start time. The end time is
        # then calculated from the start time and the sampling rate.
        # 99 is often used as a placeholder.
        if tr_header.year_data_recorded > 0:
            year = tr_header.year_data_recorded
            # The SEG Y rev 0 standard specifies the year to be a 4 digit
            # number.  Before that it was unclear if it should be a 2 or 4
            # digit number. Old or wrong software might still write 2 digit
            # years. Every number <30 will be mapped to 2000-2029 and every
            # number between 30 and 99 will be mapped to 1930-1999.
            if year < 100:
                if year < 30:
                    year += 2000
                else:
                    year += 1900
            julday = tr_header.day_of_year
            julday = tr_header.day_of_year
            hour = tr_header.hour_of_day
            minute = tr_header.minute_of_hour
            second = tr_header.second_of_minute
            trace.stats.starttime = UTCDateTime(
                year=year, julday=julday, hour=hour, minute=minute,
                second=second)
    return stream
Пример #52
0
def paracorr(par):
    """Computation of noise correlation functions
    
    Compute noise correlation functions according to specifications parameter
    dictionary ``par``. This function is most conviniently used as a python
    program passing the parameter file as argument. This use is explained in
    the tutorial on correlation.
    
    The processing is performed in the following sequence
    
     * Data is read in typically day-long chunks ideally contained in a single 
       file to speed up reading time
     * Preprocessing on the long sequences to avoid dominating influence of 
       perturbing signals if processed in shorter chunks.
     * dividing these long sequences into shorter ones (typically an hour)
     * time domain preprocessing
     * frequency domain preprocessing
     * correlation
     * if ``direct_output`` is present data is directly writen by individual
       processes
     * optionally rotation of correlation tensor into ZRT system (not possible
       in combination with direct output
     * combination of correlation traces of subsequent time segments in 
       correlation matrices
     * optionally delete traces of individual time segments
    
    :type par: dict
    :param par: processing parameters
    """


    # initialize MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    psize = comm.Get_size()
    
    # set up the logger
    logger = logging.getLogger('paracorr')
    hdlr = logging.FileHandler(os.path.join(par['log_dir'],'%s_paracorr_%d.log' % (
                        par['execution_start'],rank)))
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr) 
    logger.setLevel(logging.DEBUG)

    comb_list = combine_station_channels(par['net']['stations'],
                            par['net']['channels'],par['co']['combination_method'])

    sttimes = datetime_list(par['co']['read_start'], par['co']['read_end'], inc=par['co']['read_inc'])	## loop over 24hrs/whole days
    time_inc = datetime.timedelta(seconds=par['co']['read_len'])
    lle_df = lat_lon_ele_load(par['net']['coordinate_file'])
    res_dir = par['co']['res_dir']
    station_list = par['net']['stations']
    channel_list = par['net']['channels']


    program_start = UTCDateTime()

    # bulid a dictionary that caches the streams that reside in the same file
    stream_cache = {}
    for station in par['net']['stations']:
        stream_cache.update({station:{}})
        for channel in par['net']['channels']:
            stream_cache[station].update({channel:Stream().append(Trace())})


    # mapping of stations to processes
    pmap = (np.arange(len(station_list))*psize)/len(station_list)
    # indecies for stations to be worked on by each process
    st_ind = np.where(pmap == rank)[0]

    # number of subdivision of read length
    if 'subdivision' in par['co'].keys():
        nsub = int(np.ceil((float(par['co']['read_len']) - par['co']['subdivision']['corr_len'])
                                      /par['co']['subdivision']['corr_inc'])+1)
    else:
        nsub = 1

    # loop over times
    pathname = os.path.join(res_dir, correlation_subdir_name(sttimes[0]))
    print '\nrank %d of %d'  % (rank,psize)
    logger.debug('Rank %d of %d Beginning execution.'  % (rank,psize))
    for sttime in sttimes:
        if rank == 0:
            print "\n>>> Working on %s at %s:" % (sttime,UTCDateTime())
            logger.debug("\n>>> Working on %s at %s:" % (sttime,UTCDateTime()))
        usttime = UTCDateTime(sttime)
        # fill cache and extract current stream: only done by process 0
        cst = Stream()
        # loop over stations different stations for every process
        for this_ind in st_ind:
            station = station_list[this_ind]
            tst = Stream()
            for channel in channel_list:
                print channel, station
                try:
                    if ((len(stream_cache[station][channel])==0) or 
                            (not ((stream_cache[station][channel][0].stats['starttime']<=usttime) &
                            (stream_cache[station][channel][0].stats['endtime']>=(usttime+par['co']['read_len']))))):
                        stream_cache[station][channel] = read_from_filesystem('%s.*.%s' %(station, channel), sttime, sttime+time_inc, par['net']['fss'], trim=False)
                        if not stream_cache[station][channel]:
                            logger.warning("%s %s at %s: No trace read." % (station, channel, sttime))
                            continue
                        samp_flag = False
                        for tr in stream_cache[station][channel]:
                            if tr.stats.sampling_rate != par['co']['sampling_rate']:
                                samp_flag = True
                        if samp_flag:
                            logger.warning("%s %s at %s: Mismatching sampling rate." % (station, channel, sttime))
                            stream_cache[station][channel] = Stream()
                            continue
                        if par['co']['decimation'] > 1:
                            sst = stream_cache[station][channel].split()
                            sst.decimate(par['co']['decimation'])
                            stream_cache[station][channel] = deepcopy(sst.merge())
                    ttst = stream_cache[station][channel].copy().trim(starttime=usttime,
                                                 endtime=usttime+par['co']['read_len'])
                    get_valid_traces(ttst)
                    tst +=ttst
                except:
                    logger.warning("%s %s at %s: %s" % (station, channel, sttime, sys.exc_info()[0]))
            cst += tst
        cst = stream_add_lat_lon_ele(cst,lle_df)
        # initial preprocessing on long time series
        if 'preProcessing' in par['co'].keys():
            for procStep in par['co']['preProcessing']:
                cst = procStep['function'](cst,**procStep['args'])

        # create output path
        pathname = os.path.join(par['co']['res_dir'],correlation_subdir_name(sttime))
        if rank == 0:
            create_path(pathname)
                
        # broadcast every station to every process    
        st = Stream()
        for pind in range(psize):
            pst = Stream()
            if rank == pind:
                pst = deepcopy(cst)
            pst = comm.bcast(pst, root=pind)
            st += pst

        ## do correlations
        if len(st) == 0:
            logger.warning("%s: No traces to correlate." % (sttime))
        else:
            targs = deepcopy(par['co']['corr_args'])
            if 'direct_output' in targs.keys():
                targs['direct_output']['base_dir'] = pathname
            # loop over subdivisions
            for subn in range(nsub):
                if nsub > 1:
                    sub_st = st.copy().trim(starttime=UTCDateTime(sttime)+
                                    subn*par['co']['subdivision']['corr_inc'],
                                    endtime=UTCDateTime(sttime)+subn*par['co']['subdivision']['corr_inc']+
                                    par['co']['subdivision']['corr_len'])
                    get_valid_traces(sub_st)
                else:
                    sub_st = st
                targs['combinations'] = select_available_combinations(sub_st,comb_list,targs)
                if len(targs['combinations']) == 0:
                    continue
                cst = px.stream_pxcorr(sub_st,targs,comm=comm)
                # if 'direct_output' in targs.keys() cst is empty and the 
                # following will not be executed
                if cst:
                    if par['co']['rotation']:
                        rcst = px.rotate_multi_corr_stream(cst)
                    else:
                        rcst = cst
                
                    # distributed writing
                    # mapping of stations to processes
                    pmap = (np.arange(len(rcst))*psize)/len(rcst)
                    # indecies for stations to be worked on by each process
                    tr_ind = np.where(pmap == rank)[0]
                    logger.debug('Process %d starting to write %d traces to %s.' % (rank,len(tr_ind),pathname))
                    this_st = Stream()
                    for this_ind in tr_ind:
                        this_st.append(rcst[this_ind])
                    convert_to_matlab(this_st,'trace',pathname)
            
        # if there is a subdivision of read traces
        comm.barrier()
        if ('subdivision' in par['co']) and (rank == 0):
            logger.debug('combining subdivisions')
            # combine traces to matrix
            corr_mat_create_from_traces(pathname, pathname, delete_trace_files=True)
            if par['co']['subdivision']['recombine_subdivision']:
                flist = dir_read(pathname,'mat__*.mat')
                for fl in flist:
                    try:
                        mat = mat_to_ndarray(fl)
                        tr = corr_mat_extract_trace(mat,method='norm_mean')
                        save_dict_to_matlab_file(fl.replace('mat__','tr__'),tr)
                        if par['co']['subdivision']['delete_subdivisions']:
                            os.remove(fl)
                    except:
                        pass



    program_end = UTCDateTime()

    print 'rank %d execution time' % rank, program_end-program_start
    logger.debug('Rank %d of %d End execution.'  % (rank,psize))
Пример #53
0
def _read_ah1(filename):
    """
    Reads an AH v1 waveform file and returns a Stream object.

    :type filename: str
    :param filename: AH v1 file to be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :returns: Stream with Traces specified by given file.
    """

    def _unpack_trace(data):
        ah_stats = AttribDict({
            'version': '1.0',
            'event': AttribDict(),
            'station': AttribDict(),
            'record': AttribDict(),
            'extras': []
        })

        # station info
        ah_stats.station.code = _unpack_string(data)
        ah_stats.station.channel = _unpack_string(data)
        ah_stats.station.type = _unpack_string(data)
        ah_stats.station.latitude = data.unpack_float()
        ah_stats.station.longitude = data.unpack_float()
        ah_stats.station.elevation = data.unpack_float()
        ah_stats.station.gain = data.unpack_float()
        ah_stats.station.normalization = data.unpack_float()  # A0
        poles = []
        zeros = []
        for _i in range(0, 30):
            r = data.unpack_float()
            i = data.unpack_float()
            poles.append(complex(r, i))
            r = data.unpack_float()
            i = data.unpack_float()
            zeros.append(complex(r, i))
        # first value describes number of poles/zeros
        npoles = int(poles[0].real) + 1
        nzeros = int(zeros[0].real) + 1
        ah_stats.station.poles = poles[1:npoles]
        ah_stats.station.zeros = zeros[1:nzeros]

        # event info
        ah_stats.event.latitude = data.unpack_float()
        ah_stats.event.longitude = data.unpack_float()
        ah_stats.event.depth = data.unpack_float()
        ot_year = data.unpack_int()
        ot_mon = data.unpack_int()
        ot_day = data.unpack_int()
        ot_hour = data.unpack_int()
        ot_min = data.unpack_int()
        ot_sec = data.unpack_float()
        try:
            ot = UTCDateTime(ot_year, ot_mon, ot_day, ot_hour, ot_min, ot_sec)
        except Exception:
            ot = None
        ah_stats.event.origin_time = ot
        ah_stats.event.comment = _unpack_string(data)

        # record info
        ah_stats.record.type = dtype = data.unpack_int()  # data type
        ah_stats.record.ndata = ndata = data.unpack_uint()  # number of samples
        ah_stats.record.delta = data.unpack_float()  # sampling interval
        ah_stats.record.max_amplitude = data.unpack_float()
        at_year = data.unpack_int()
        at_mon = data.unpack_int()
        at_day = data.unpack_int()
        at_hour = data.unpack_int()
        at_min = data.unpack_int()
        at_sec = data.unpack_float()
        at = UTCDateTime(at_year, at_mon, at_day, at_hour, at_min, at_sec)
        ah_stats.record.start_time = at
        ah_stats.record.abscissa_min = data.unpack_float()
        ah_stats.record.comment = _unpack_string(data)
        ah_stats.record.log = _unpack_string(data)

        # extras
        ah_stats.extras = data.unpack_array(data.unpack_float)

        # unpack data using dtype from record info
        if dtype == 1:
            # float
            temp = data.unpack_farray(ndata, data.unpack_float)
        elif dtype == 6:
            # double
            temp = data.unpack_farray(ndata, data.unpack_double)
        else:
            # e.g. 3 (vector), 2 (complex), 4 (tensor)
            msg = 'Unsupported AH v1 record type %d'
            raise NotImplementedError(msg % (dtype))
        tr = Trace(np.array(temp))
        tr.stats.ah = ah_stats
        tr.stats.delta = ah_stats.record.delta
        tr.stats.starttime = ah_stats.record.start_time
        tr.stats.station = ah_stats.station.code
        tr.stats.channel = ah_stats.station.channel
        return tr

    st = Stream()
    with open(filename, "rb") as fh:
        # read with XDR library
        data = xdrlib.Unpacker(fh.read())
        # loop as long we can read records
        while True:
            try:
                tr = _unpack_trace(data)
                st.append(tr)
            except EOFError:
                break
        return st
Пример #54
0
def postprocess_adjsrc(adjsrcs, adj_starttime, raw_synthetic, inventory, event,
                       sum_over_comp_flag=False, weight_dict=None):
    """
    Postprocess adjoint sources to fit SPECFEM input(same as raw_synthetic)
    1) zero padding the adjoint sources
    2) interpolation
    3) add multiple instrument together if there are
    4) rotate from (R, T) to (N, E)

    :param adjsrcs: adjoint sources list from the same station
    :type adjsrcs: list
    :param adj_starttime: starttime of adjoint sources
    :param adj_starttime: obspy.UTCDateTime
    :param raw_synthetic: raw synthetic from SPECFEM output, as reference
    :type raw_synthetic: obspy.Stream or obspy.Trace
    :param inventory: station inventory
    :type inventory: obspy.Inventory
    :param event: event information
    :type event: obspy.Event
    :param sum_over_comp_flag: sum over component flag
    :param weight_dict: weight dictionary
    """

    # extract event information
    origin = event.preferred_origin() or event.origins[0]
    elat = origin.latitude
    elon = origin.longitude
    event_time = origin.time

    # extract station information
    slat = float(inventory[0][0].latitude)
    slon = float(inventory[0][0].longitude)

    # transfer AdjointSource type to stream
    adj_stream = Stream()
    for chan_id, adj in adjsrcs.iteritems():
        _tr = _convert_adj_to_trace(adj, adj_starttime, chan_id)
        adj_stream.append(_tr)

    interp_starttime = raw_synthetic[0].stats.starttime
    interp_delta = raw_synthetic[0].stats.delta
    interp_npts = raw_synthetic[0].stats.npts
    interp_endtime = interp_starttime + interp_delta * interp_npts
    time_offset = interp_starttime - event_time

    # zero padding
    zero_padding_stream(adj_stream, interp_starttime, interp_endtime)

    # interpolate
    adj_stream.interpolate(sampling_rate=1.0/interp_delta,
                           starttime=interp_starttime,
                           npts=interp_npts)

    # sum multiple instruments
    if sum_over_comp_flag:
        if weight_dict is None:
            raise ValueError("weight_dict should be assigned if you want"
                             "to add")
        adj_stream = sum_adj_on_component(adj_stream, weight_dict)

    # add zero trace for missing components
    missinglist = ["Z", "R", "T"]
    tr_template = adj_stream[0]
    for tr in adj_stream:
        missinglist.remove(tr.stats.channel[-1])
    for component in missinglist:
        zero_adj = tr_template.copy()
        zero_adj.data.fill(0.0)
        zero_adj.stats.channel = "%s%s" % (tr_template.stats.channel[0:2],
                                           component)
        adj_stream.append(zero_adj)

    # rotate
    baz = calculate_baz(elat, elon, slat, slon)
    components = [tr.stats.channel[-1] for tr in adj_stream]

    if "R" in components and "T" in components:
        try:
            adj_stream.rotate(method="RT->NE", back_azimuth=baz)
        except Exception as e:
            print e

    # prepare the final results
    final_adjsrcs = []
    _temp_id = adjsrcs.keys()[0]
    adj_src_type = adjsrcs[_temp_id].adj_src_type
    minp = adjsrcs[_temp_id].min_period
    maxp = adjsrcs[_temp_id].max_period
    for tr in adj_stream:
        _adj = AdjointSource(adj_src_type, 0.0, 0.0, minp, maxp, "")
        final_adjsrcs.append(_convert_trace_to_adj(tr, _adj))

    return final_adjsrcs, time_offset
Пример #55
0
def readSLIST(filename, headonly=False, **kwargs):  # @UnusedVariable
    """
    Reads a ASCII SLIST file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: ASCII file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read('/path/to/slist.ascii')
    """
    with open(filename, 'rt') as fh:
        # read file and split text into channels
        buf = []
        key = False
        for line in fh:
            if line.isspace():
                # blank line
                continue
            elif line.startswith('TIMESERIES'):
                # new header line
                key = True
                buf.append((line, StringIO()))
            elif headonly:
                # skip data for option headonly
                continue
            elif key:
                # data entry - may be written in multiple columns
                buf[-1][1].write(line.strip() + ' ')
    # create ObsPy stream object
    stream = Stream()
    for header, data in buf:
        # create Stats
        stats = Stats()
        parts = header.replace(',', '').split()
        temp = parts[1].split('_')
        stats.network = temp[0]
        stats.station = temp[1]
        stats.location = temp[2]
        stats.channel = temp[3]
        stats.sampling_rate = parts[4]
        # quality only used in MSEED
        stats.mseed = AttribDict({'dataquality': temp[4]})
        stats.ascii = AttribDict({'unit': parts[-1]})
        stats.starttime = UTCDateTime(parts[6])
        stats.npts = parts[2]
        if headonly:
            # skip data
            stream.append(Trace(header=stats))
        else:
            data = _parse_data(data, parts[8])
            stream.append(Trace(data=data, header=stats))
    return stream
Пример #56
0
def vespagram(stream, ev, inv, method, frqlow, frqhigh, baz, scale, nthroot=4,
              filter=True, static3D=False, vel_corr=4.8, sl=(0.0, 10.0, 0.5),
              align=False, align_phase=['P', 'Pdiff'], plot_trace=True):
    """
    vespagram wrapper routine for MESS 2014.

    :param stream: Waveforms for the array processing.
    :type stream: :class:`obspy.core.stream.Stream`
    :param inventory: Station metadata for waveforms
    :type inventory: :class:`obspy.station.inventory.Inventory`
    :param method: Method used for the array analysis
        (one of "DLS": Delay and Sum, "PWS": Phase Weighted Stack).
    :type method: str
    :param frqlow: Low corner of frequency range for array analysis
    :type frqlow: float
    :param frqhigh: High corner of frequency range for array analysis
    :type frqhigh: float
    :param baz: pre-defined (theoretical or calculated) backazimuth used for calculation
    :type baz_plot: float
    :param scale: scale for plotting
    :type scale: float
    :param nthroot: estimating the nthroot for calculation of the beam
    :type nthroot: int
    :param filter: Whether to bandpass data to selected frequency range
    :type filter: bool
    :param static3D: static correction of topography using `vel_corr` as
        velocity (slow!)
    :type static3D: bool
    :param vel_corr: Correction velocity for static topography correction in
        km/s.
    :type vel_corr: float
    :param sl: Min/Max and stepwidthslowness for analysis
    :type sl: (float, float,float)
    :param align: whether to align the vespagram to a certain phase
    :type align: bool
    :param align_phase: phase to be aligned with (might be a list if simulateneous arivials are expected (P,PcP,Pdif)
    :type align: str
    :param plot_trace: if True plot the vespagram as wiggle plot, if False as density map
    :type align: bool
    """

    starttime = max([tr.stats.starttime for tr in stream])
    endtime = min([tr.stats.endtime for tr in stream])
    stream.trim(starttime, endtime)

    org = ev.preferred_origin() or ev.origins[0]
    ev_lat = org.latitude
    ev_lon = org.longitude
    ev_depth = org.depth/1000.  # in km
    ev_otime = org.time

    sll, slm, sls = sl
    sll /= KM_PER_DEG
    slm /= KM_PER_DEG
    sls /= KM_PER_DEG
    center_lon = 0.
    center_lat = 0.
    center_elv = 0.
    seismo = stream
    seismo.attach_response(inv)
    seismo.merge()
    sz = Stream()
    i = 0
    for tr in seismo:
        for station in inv[0].stations:
            if tr.stats.station == station.code:
                tr.stats.coordinates = \
                    AttribDict({'latitude': station.latitude,
                                'longitude': station.longitude,
                                'elevation': station.elevation})
                center_lon += station.longitude
                center_lat += station.latitude
                center_elv += station.elevation
                i += 1
        sz.append(tr)

    center_lon /= float(i)
    center_lat /= float(i)
    center_elv /= float(i)

    starttime = max([tr.stats.starttime for tr in stream])
    stt = starttime
    endtime = min([tr.stats.endtime for tr in stream])
    e = endtime
    stream.trim(starttime, endtime)

    #nut = 0
    max_amp = 0.
    sz.trim(stt, e)
    sz.detrend('simple')

    print sz
    fl, fh = frqlow, frqhigh
    if filter:
        sz.filter('bandpass', freqmin=fl, freqmax=fh, zerophase=True)

    if align:
        deg = []
        shift = []
        res = gps2DistAzimuth(center_lat, center_lon, ev_lat, ev_lon)
        deg.append(kilometer2degrees(res[0]/1000.))
        tt = getTravelTimes(deg[0], ev_depth, model='ak135')
        for item in tt:
            phase = item['phase_name']
            if phase in align_phase:
                try:
                    travel = item['time']
                    travel = ev_otime.timestamp + travel
                    dtime = travel - stt.timestamp
                    shift.append(dtime)
                except:
                    break
        for i, tr in enumerate(sz):
            res = gps2DistAzimuth(tr.stats.coordinates['latitude'],
                                  tr.stats.coordinates['longitude'],
                                  ev_lat, ev_lon)
            deg.append(kilometer2degrees(res[0]/1000.))
            tt = getTravelTimes(deg[i+1], ev_depth, model='ak135')
            for item in tt:
                phase = item['phase_name']
                if phase in align_phase:
                    try:
                        travel = item['time']
                        travel = ev_otime.timestamp + travel
                        dtime = travel - stt.timestamp
                        shift.append(dtime)
                    except:
                        break
        shift = np.asarray(shift)
        shift -= shift[0]
        AA.shifttrace_freq(sz, -shift)

    baz += 180.
    nbeam = int((slm - sll)/sls + 0.5) + 1
    kwargs = dict(
        # slowness grid: X min, X max, Y min, Y max, Slow Step
        sll=sll, slm=slm, sls=sls, baz=baz, stime=stt, method=method,
        nthroot=nthroot, etime=e, correct_3dplane=False, static_3D=static3D,
        vel_cor=vel_corr)

    start = UTCDateTime()
    slow, beams, max_beam, beam_max = AA.vespagram_baz(sz, **kwargs)
    print "Total time in routine: %f\n" % (UTCDateTime() - start)

    df = sz[0].stats.sampling_rate
    # Plot the seismograms
    npts = len(beams[0])
    print npts
    T = np.arange(0, npts/df, 1/df)
    sll *= KM_PER_DEG
    slm *= KM_PER_DEG
    sls *= KM_PER_DEG
    slow = np.arange(sll, slm, sls)
    max_amp = np.max(beams[:, :])
    #min_amp = np.min(beams[:, :])
    scale *= sls

    fig = plt.figure(figsize=(12, 8))

    if plot_trace:
        ax1 = fig.add_axes([0.1, 0.1, 0.85, 0.85])
        for i in xrange(nbeam):
            if i == max_beam:
                ax1.plot(T, sll + scale*beams[i]/max_amp + i*sls, 'r',
                         zorder=1)
            else:
                ax1.plot(T, sll + scale*beams[i]/max_amp + i*sls, 'k',
                         zorder=-1)
        ax1.set_xlabel('Time [s]')
        ax1.set_ylabel('slowness [s/deg]')
        ax1.set_xlim(T[0], T[-1])
        data_minmax = ax1.yaxis.get_data_interval()
        minmax = [min(slow[0], data_minmax[0]), max(slow[-1], data_minmax[1])]
        ax1.set_ylim(*minmax)
    #####
    else:
        #step = (max_amp - min_amp)/100.
        #level = np.arange(min_amp, max_amp, step)
        #beams = beams.transpose()
        #cmap = cm.hot_r
        cmap = cm.rainbow

        ax1 = fig.add_axes([0.1, 0.1, 0.85, 0.85])
        #ax1.contour(slow,T,beams,level)
        #extent = (slow[0], slow[-1], \
        #               T[0], T[-1])
        extent = (T[0], T[-1], slow[0] - sls * 0.5, slow[-1] + sls * 0.5)

        ax1.set_ylabel('slowness [s/deg]')
        ax1.set_xlabel('T [s]')
        beams = np.flipud(beams)
        ax1.imshow(beams, cmap=cmap, interpolation="nearest",
                   extent=extent, aspect='auto')

    ####
    result = "BAZ: %.2f Time %s" % (baz-180., stt)
    ax1.set_title(result)

    plt.show()
    return slow, beams, max_beam, beam_max