Esempio n. 1
0
def get_waveforms():
    events = get_events()
    client = ArcClient()
    wforms = Stream()
    for event in events:
        t = event.preferred_origin().time
        args = seed_id.split('.') + [t + 5 * 60, t + 14 * 60]
        wforms.extend(client.getWaveform(*args))
    wforms.decimate(int(round(wforms[0].stats.sampling_rate)) // 5,
                    no_filter=True)
    wforms.write(wavname, wavformat)
Esempio n. 2
0
def get_waveforms():
    events = get_events()
    client = ArcClient(**client_kwargs)
    wforms = Stream()
    for i, event in enumerate(events):
        print('Fetch data for event no. %d' % (i + 1))
        t = event.preferred_origin().time
        for sta in stations:
            args = (net, sta, loc, cha, t - 10, t + 220)
            try:
                stream = client.getWaveform(*args)
            except:
                print('no data for %s' % (args,))
                continue
            sr = stream[0].stats.sampling_rate
            stream.decimate(int(sr) // 20, no_filter=True)
            for tr in stream:
                del tr.stats.mseed
            stream.merge()
            wforms.extend(stream)
    wforms.write(wavname, wavformat)
    return wforms
Esempio n. 3
0
def get_waveforms():
    events = get_events()
    client = ArcClient(**client_kwargs)
    wforms = Stream()
    for i, event in enumerate(events):
        print('Fetch data for event no. %d' % (i + 1))
        t = event.preferred_origin().time
        for sta in stations:
            args = (net, sta, loc, cha, t - 10, t + 220)
            try:
                stream = client.getWaveform(*args)
            except:
                print('no data for %s' % (args, ))
                continue
            sr = stream[0].stats.sampling_rate
            stream.decimate(int(sr) // 20, no_filter=True)
            for tr in stream:
                del tr.stats.mseed
            stream.merge()
            wforms.extend(stream)
    wforms.write(wavname, wavformat)
    return wforms
Esempio n. 4
0
client = Client("http://10.153.82.3:8080", timeout=60)

st = Stream()
num_stations = 0
exceptions = []
for station in STATIONS:
    try:
        # we request 60s more at start and end and cut them off later to avoid
        # a false trigger due to the tapering during instrument correction
        tmp = client.waveform.getWaveform(NET, station, "", CHANNEL, T1 - 180,
                                          T2 + 180, getPAZ=True,
                                          getCoordinates=True)
    except Exception, e:
        exceptions.append("%s: %s" % (e.__class__.__name__, e))
        continue
    st.extend(tmp)
    num_stations += 1
st.merge(-1)
st.sort()

summary = []
summary.append("#" * 79)
summary.append("######## %s  ---  %s ########" % (T1, T2))
summary.append("#" * 79)
summary.append(st.__str__(extended=True))
if exceptions:
    summary.append("#" * 33 + " Exceptions  " + "#" * 33)
    summary += exceptions
summary.append("#" * 79)

trig = []
Esempio n. 5
0
def corr_trace_fun(signals, comb=[], normal=True,
                   parallel=True, processes=None):
    """ Correlate Traces according to the given combinations

    The `corr_trace_fun` correlates the Traces contained in the passed
    :class:`~obspy.core.trace.Stream` object according to the list of
    combinations `tuple` given in input. It does the job asynchronously
    instantiating as many process as cores available in the hosting machine.
    If traces do not share the same starttime the correlation trace is shifted
    by fractions of a sample such that time alignment is obtained precisely at
    the sample 1971-01-01T00:00:00Z. If there is no overlap between the
    traces this time might not be in the stream.

    :type signals: :class:`~obspy.core.stream.Stream`
    :param signals: The container for the Traces that we want to correlate
    :type comb: list, optional
    :param comb: List of combinations that must be calculated
    :type normal: bool, otional
    :param normal: Normalization flag (See
        :func:`~miic.core.corr_fun.conv_traces` for details)
    :type parallel: bool (Default: True)
    :pram parallel: If the filtering will be run in parallel or not
    :type processes: int
    :pram processes: Number of processes to start (if None it will be equal
        to the number of cores available in the hosting machine)

    :rtype: :class:`~obspy.core.stream.Stream`
    :return: **corrData**: The resulting object containing the correlation data
        and their meta-informations obtained as described
        in thr function :func:`~miic.core.corr_fun.conv_traces`
    """

    if not isinstance(signals, Stream):
        raise TypeError("signal must be an obspy Stream object.")

    corrData = Stream()

    nSignal = signals.count()

    if nSignal == 0:
        print "Empty stream!!"
        return corrData

    if (nSignal == 1) and not (comb == [(1, 1)]):
        print "Single trace. No cross correlation"
        return corrData

    if comb == []:
        comb = [(k, i) for k in range(nSignal) for i in range(k + 1, nSignal)]

    if not parallel:
        dc = _doCorr(signals, normal)
        corrData.extend(map(dc, comb))
    else:
        if processes == 0:
            processes = None

        p = Pool(processes=processes)

        p.map_async(_doCorr(signals, normal),
                    comb,
                    callback=_AppendST(corrData))

        p.close()
        p.join()

    return corrData
Esempio n. 6
0
def _three_station_interferometry_pair(rec1, rec2):
    """
    Calculate source-specific interferogram (:math:`C_3`)
    and stack for :math:`I_3` for a station-pair.
    """
    dest = get_fnm('I3', rec1, sta2=rec2)
    if PARAM['skip']['I3'] and exists(dest):
        logger.debug(f'Skip {dest}')
        return dest

    phprper = None
    phprvel = None
    if USE_DW and PARAM['interferometry']['phase_shift']:
        phprper, phprvel = get_pred_pv(STA2NET[rec1], rec1, STA2NET[rec2], rec2)

    dir_src = PARAM['interferometry'].get('dir_src', True)
    dest_I3s = []
    I3s = Stream()
    nsrc = 0
    n1 = 0
    n2 = 0
    for src in PAIR2SRC[f'{rec1}_{rec2}']:
        dest_I3, I3 = _source_specifc_interferogram_pair(
            rec1, rec2, src,
            phprper=phprper, phprvel=phprvel,
        )
        if I3:
            dest_I3s.extend(dest_I3)
            I3s.extend(I3)
            nsrc += 1
            if dir_src:
                if I3[0].stats.sac[KEY2SHD['dir_src']] == 1:
                    n1 += 1
                else:
                    n2 += 1

    # Do stack
    if PARAM['write']['stack']:
        my.sys_tool.mkdir(join(DIROUT, PARAM['dir']['I3'], rec1))
        if nsrc < PARAM['stack']['min_src']:
            logger.debug(f'Insufficient ({nsrc}) source-stations for {rec1}-{rec2}')
            return

        logger.debug(f'{nsrc} source-stations for {rec1}-{rec2}')

        stats = I3s[0].stats.copy()
        stats.sac[KEY2SHD['nsrc']] = nsrc
        if dir_src:
            stats.sac[KEY2SHD['nsrc_dir1']] = n1
            stats.sac[KEY2SHD['nsrc_dir2']] = n2
        for key in [
            'src_net',
            'src_sta',
            'dir_src',
            'dr',
            'theta',
        ]:
            stats.sac.pop(KEY2SHD[key])

        sk = my.seis.stack(
            I3s,
            stats=stats,
            kw_stack={**PARAM['stack'], **KEY2SHD},
            kw_snr=PARAM['snr'],
        )
        if sk is None:
            logger.warning(f'{rec1}-{rec2}: Failed for SNR')
            return
        else:
            sk.write(dest, format='SAC')

        if PARAM['stack']['rand']:
            my.sys_tool.mkdir(join(
                DIROUT,
                PARAM['dir']['I3_rand'],
                rec1,
                rec2,
            ))
            dest_rand = get_fnm('I3_rand', rec1, sta2=rec2)
            for i, _sk in enumerate(my.seis.rand_stack(
                I3s,
                stats=stats,
                kw_stack={**PARAM['stack'], **KEY2SHD},
                kw_snr=PARAM['snr'],
            )):
                _sk.write(f'{dest_rand}.{i}', format='SAC')

    # To save SNR in header after stack
    if PARAM['write']['C3']:
        my.sys_tool.mkdir(join(DIROUT, rec1, rec2))
        for nm, tr in zip(dest_I3s, I3s):
            tr.write(nm, format='SAC')

    return dest
Esempio n. 7
0
client = Client("http://10.153.82.3:8080", timeout=60)

st = Stream()
num_stations = 0
exceptions = []
for station in STATIONS:
    try:
        # we request 60s more at start and end and cut them off later to avoid
        # a false trigger due to the tapering during instrument correction
        tmp = client.waveform.getWaveform(NET, station, "", CHANNEL, T1 - 180,
                                          T2 + 180, getPAZ=True,
                                          getCoordinates=True)
    except Exception, e:
        exceptions.append("%s: %s" % (e.__class__.__name__, e))
        continue
    st.extend(tmp)
    num_stations += 1
st.merge(-1)
st.sort()

summary = []
summary.append("#" * 79)
summary.append("######## %s  ---  %s ########" % (T1, T2))
summary.append("#" * 79)
summary.append(st.__str__(extended=True))
if exceptions:
    summary.append("#" * 33 + " Exceptions  " + "#" * 33)
    summary += exceptions
summary.append("#" * 79)

trig = []
Esempio n. 8
0
def correlate(io,
              day,
              outkey,
              edge=60,
              length=3600,
              overlap=1800,
              demean_window=True,
              discard=None,
              only_auto_correlation=False,
              station_combinations=None,
              component_combinations=('ZZ', ),
              max_lag=100,
              keep_correlations=False,
              stack='1d',
              njobs=0,
              **preprocessing_kwargs):
    """
    Correlate data of one day

    :param io: io config dictionary
    :param day: |UTC| object with day
    :param outkey: the output key for the HDF5 index
    :param edge: additional time span requested from day before and after
        in seconds
    :param length: length of correlation in seconds (string possible)
    :param overlap: length of overlap in seconds (string possible)
    :param demean_window: demean each window individually before correlating
    :param discard: discard correlations with less data coverage
        (float from interval [0, 1])
    :param only_auto_correlations: Only correlate stations with itself
        (different components possible)
    :param station_combinations: specify station combinations
        (e.g. ``'CX.PATCX-CX.PB01``, network code can be
        omitted, e.g. ``'PATCX-PB01'``, default: all)
    :param component_combinations: component combinations to calculate,
        tuple of strings with length two, e.g. ``('ZZ', 'ZN', 'RR')``,
        if ``'R'`` or ``'T'`` is specified, components will be rotated after
        preprocessing, default: only ZZ components
    :param max_lag: max time lag in correlations in seconds
    :param keep_correlatons: write correlations into HDF5 file (dafault: False)
    :param stack: stack correlations and write stacks into HDF5 file
        (default: ``'1d'``, must be smaller than one day or one day)

        .. note::

            If you want to stack larger time spans
            use the separate stack command on correlations or stacked
            correlations.

    :param njobs: number of jobs used. Some tasks will run parallel
        (preprocessing and correlation).
    :param \*\*preprocessing_kwargs: all other kwargs are passed to
        `preprocess`

    """
    inventory = io['inventory']
    length = _time2sec(length)
    overlap = _time2sec(overlap)
    if not keep_correlations and stack is None:
        msg = ('keep_correlation is False and stack is None -> correlations '
               ' would not be saved')
        raise ValueError(msg)
    components = set(''.join(component_combinations))
    if 'R' in components or 'T' in components:
        load_components = components - {'R', 'T'} | {'N', 'E'}
    else:
        load_components = components
    if station_combinations is not None:
        load_stations = set(sta for comb in station_combinations
                            for sta in comb.split('-'))
    else:
        load_stations = None
    # load data
    stream = obspy.Stream()
    for smeta in _iter_station_meta(inventory, load_components):
        if (load_stations is not None and smeta['station'] not in load_stations
                and '.'.join((smeta['network'], smeta['station']))
                not in load_stations):
            continue
        stream2 = get_data(smeta,
                           io['data'],
                           io['data_format'],
                           day,
                           overlap=overlap,
                           edge=edge)
        if stream2:
            stream += stream2
    if len(stream) == 0:
        log.warning('empty stream for day %s', str(day)[:10])
        return
    preprocess(stream,
               day,
               inventory,
               overlap=overlap,
               njobs=njobs,
               **preprocessing_kwargs)
    # collect trace pairs for correlation
    next_day = day + 24 * 3600
    stations = sorted({tr.id[:-1] for tr in stream})
    tasks = []
    for station1, station2 in itertools.combinations_with_replacement(
            stations, 2):
        if only_auto_correlation and station1 != station2:
            continue
        if station_combinations and not any(
                set(station_comb.split('-')) ==
            ({station1.rsplit('.', 2)[0],
              station2.rsplit('.', 2)[0]} if '.' in (station_comb) else
             {station1.
              split('.')[1], station2.split('.')[1]})
                for station_comb in station_combinations):
            continue
        stream1 = Stream([tr for tr in stream if tr.id[:-1] == station1])
        stream2 = Stream([tr for tr in stream if tr.id[:-1] == station2])
        datetime1 = _midtime(stream1[0].stats)
        datetime2 = _midtime(stream2[0].stats)
        msg = 'Cannot get coordinates for channel %s datetime %s'
        try:
            c1 = inventory.get_coordinates(stream1[0].id, datetime=datetime1)
        except Exception as ex:
            raise RuntimeError(msg % (stream1[0].id, datetime1)) from ex
        try:
            c2 = inventory.get_coordinates(stream2[0].id, datetime=datetime2)
        except Exception as ex:
            raise RuntimeError(msg % (stream2[0].id, datetime2)) from ex
        args = (c1['latitude'], c1['longitude'], c2['latitude'],
                c2['longitude'])
        dist, azi, baz = gps2dist_azimuth(*args)
        if ('R' in components or 'T' in components) and station1 != station2:
            stream1 = stream1.copy()
            stream1b = stream1.copy().rotate('NE->RT', azi)
            stream1.extend(stream1b.select(component='R'))
            stream1.extend(stream1b.select(component='T'))
            stream2 = stream2.copy()
            stream2b = stream2.copy().rotate('NE->RT', azi)
            stream2.extend(stream2b.select(component='R'))
            stream2.extend(stream2b.select(component='T'))
        it_ = (itertools.product(stream1, stream2) if station1 != station2 else
               itertools.combinations_with_replacement(stream1, 2))
        for tr1, tr2 in it_:
            comps = tr1.stats.channel[-1] + tr2.stats.channel[-1]
            if component_combinations and (comps not in component_combinations
                                           and comps[::-1]
                                           not in component_combinations):
                continue
            tasks.append((tr1, tr2, dist, azi, baz))
    # start correlation
    do_work = partial(_slide_and_correlate_traces, day, next_day, length,
                      overlap, discard, max_lag, outkey, demean_window)
    streams = start_parallel_jobs_inner_loop(tasks, do_work, njobs)
    xstream = Stream()
    xstream.traces = [tr for s_ in streams for tr in s_]
    if len(xstream) > 0:
        res = {}
        if keep_correlations:
            res['corr'] = xstream
        if stack:
            res['stack'] = yam.stack.stack(xstream, stack)
        return res
Esempio n. 9
0
class sdschunk(chunk):
	"""
	This class walks a SDS archive by incremental time and
	handle the synchronization of GAPS between traces supplied.
	
	One class handle one stream, but, while the get() method is called
	a list of streams can be passed what allows for getting a syncronized
	list of traces and streams.
	
	The return of the get() method is True or False. True when data was
	added to others and False when no data was added to others.
	"""
	def __init__(self, path, N, S, L, C, verbose = False):
		super().__init__(N, S, L, C, verbose)
		
		if not os.path.isdir(path):
			log(" Bad path to SDS", level=2, verbose=verbose)
		
		self.path = path
		self.N = N
		self.S = S
		self.L = L
		self.C = C
		self._verbose = verbose
		
		self._S = Stream()
		self._last_time = None
		self._visited = []

		log(f" I:> New SDSCHUNK {N}.{S}.{L}.{C} @ {self.path}", level=0,
					verbose=verbose)

	def _make_path(self, d):
		path = "%s/%04d/%s/%s/%s.D/%s.%s.%s.%s.D.%04d.%03d" % (self.path,
					d.year, self.N, self.S, self.C,self.N, self.S, self.L,
					self.C, d.year, d.julday)

		if path in self._visited: return None
		self._visited.append(path)

		return path

	def _update(self, s, e):
		s2 = (UTCDateTime(s.date) - 86400 / 2) if UTCDateTime(s).hour == 0 and\
					UTCDateTime(s).minute < 30 else UTCDateTime(s.date)
		e2 = (UTCDateTime(e.date) + 2 * 86400) if UTCDateTime(e).hour == 23 and\
					UTCDateTime(e).minute > 30 else (UTCDateTime(e.date) + 1*86400)

		while s2 < e2:
			self._extend(self._make_path(s2))
			s2 += 86400

		self._clean(s)

		return self._last_time

	def _extend(self, filename):
		if filename is None or not os.path.isfile(filename): return False

		self._S.extend(read(filename))
		self._S.merge(method = -1)
		self._S.sort()

		log(f" W:> READ: {filename}", level=0, verbose=self._verbose)

		self._last_time = self._S[-1].stats.endtime

		return True
Esempio n. 10
0
	def download(self, location, channel, solstart, solend, replace=False):
		"""
		Download and store data

		location [str]: location code
		channel  [str]: channel code
		solstart [int]: initial download time in Sols
		solend   [int]: final download time in Sols
		replace [bool]: if True, will replace existing files with ney ones
		"""

		if solstart > solend or solstart < 0 or solend < 0:
			log(f"Wrong Sol: {solstart} -> {solend}")

		# Starttime and Endtime converted from Sol to UTC
		time0, _ = sol_span_in_utc(solstart)
		_, time1 = sol_span_in_utc(solend)

		# Fix continuity
		ST = Stream()

		for year in range(time0.year, time1.year + 1, 1):

			start = max(UTCDateTime(f"{year}-01-01"), time0)
			end   = min(UTCDateTime(f"{year}-12-31T23:59:59.999999"), time1)

			for julday in range(start.julday, end.julday + 1, 1):

				# Starttime and Endtime for the entire julian day
				julday_t0, julday_t1 = julday_in_utc(year, julday)

				# Active channels
				loc_cha_running = self._get_cha(location, channel, julday_t0,
												julday_t1)

				for lc_id in loc_cha_running:

					loc, cha = lc_id.split(".")
					filename = self._gen_path(loc, cha, year, julday)
					code = f"{self.net}.{self.sta}.{loc}.{cha}"

					# File already exists
					if os.path.exists(filename) and not replace:
						log(f"FILE: {os.path.basename(filename)} already " +\
									"exists. Download will skip this day",
									level=1, verbose=self.verbose)
						continue
						
					else:
						try:

							st = CLIENT.get_waveforms(network=self.net,
											station=self.sta, location=loc,
											channel=cha, starttime=julday_t0-10,
											endtime=julday_t1+10)
							ST.extend([st[0]]).merge(method=1)
							ST.trim(julday_t0-10, julday_t1+10)

							st = ST.slice(julday_t0, julday_t1,
								nearest_sample=False).select(location=loc,
								channel=cha)

							if len(st) == 0:
								continue

						except header.FDSNNoDataException as E:
							log(f"{E}", level=1, verbose=self.verbose)
							continue

						except Exception as E:
							log(f"{E}", level=2, verbose=self.verbose)

						log(f"DOWNLOAD: {code} Julday: {julday}",
											level=0, verbose=self.verbose)

						# SDS Creation/Update
						self._make_dir(year, cha)

						if any(np.ma.is_masked(tr.data) for tr in st):
							print("MASKED")
						# 	for i in range(len(st)):
						# 		st[i].data = st[i].data.filled()
						st.split().write(filename, format="MSEED")
Esempio n. 11
0
def usarray_read(fname):
    """ Read the BAM US-Array lbv data format used on Mike-2 test specimen.

    Read the BAM US-Array lbv data format used on Mike-2 test specimen into a
    stream object.
    As there is no obvious station (or any other) information in the data file.
    As the parameters are not supposed to change, they are hardcoded here.

    :parameters:
    ------------
    :type fname: string
    :param fname: Path to the file containing the data to be read
        (WITHOUT EXTENSION) extensions .dat and .hdr will be added
        automatically
    :rtype: :class:`~obspy.core.Stream` object
    :return: **st**: obspy.core.Stream object
        Obspy stream object containing the data
    """

    # filenames
    lbvfilename = fname + '.lbv'
    hdrfilename = fname + '.hdr'

    # initialise
    st = Stream()
    tr = Trace()
    # tr = SacIO()

    # static parameters
    t = os.path.getmtime(hdrfilename)
    tt = datetime.datetime.fromtimestamp(t)

    tr.stats['starttime'] = UTCDateTime(tt.year, tt.month, tt.day, tt.hour,
                                        tt.minute, tt.second, tt.microsecond)
    tr.stats['network'] = 'BAM-USArray'
    tr.stats['channel'] = 'z'

    # reading header from file
    fh = open(hdrfilename, 'r')
    while True:
        line = fh.readline()
        if line.__len__() < 1:
            break
        line = line.rstrip()
        if line.find('PK') > -1:
            parts = re.split(':', line)
            tr.stats['location'] = parts[1].lstrip()
        if line.find('transceivers') > -1:
            parts = re.split(':', line)
            ntra = int(parts[1].lstrip())
            traco = np.zeros((ntra, 3), float)
            for i in range(ntra):
                coordstr = fh.readline().split()
                for j in range(3):
                    traco[i, j] = float(coordstr[j])
        if line.find('measurements') > -1:
            parts = re.split(':', line)
            nmeas = int(parts[1].lstrip())
            measco = np.zeros((nmeas, 2), int)
            for i in range(nmeas):
                configstr = fh.readline().split()
                for j in range(2):
                    measco[i, j] = float(configstr[j])
        if line.find('samples') > -1:
            parts = re.split(':', line)
            tr.stats['npts'] = int(parts[1].lstrip())
        if line.find('samplefreq') > -1:
            parts = re.split(':', line)
            tr.stats['sampling_rate'] = int(parts[1].lstrip())

    fh.close()

    # reading data from file
    fd = open(lbvfilename, 'rb')
    datatype = '>i2'
    read_data = np.fromfile(file=fd, dtype=datatype)
    fd.close()

    # sort and store traces
    for i in range(nmeas):
        # receiver number stored as station name
        tr.stats['station'] = str(measco[i, 1])
        # receiver coords (storing not yet implemented)
        stla = traco[measco[i, 1] - 1, 1]  # x
        stlo = traco[measco[i, 1] - 1, 1]  # y
        stel = traco[measco[i, 1] - 1, 1]  # z
        # transmitter number stored as event name (storing not yet implemented)
        kevnm = str(measco[i, 0])
        # transmitter coords (storing not yet implemented)
        evla = traco[measco[i, 1] - 1, 0]  # x
        evlo = traco[measco[i, 1] - 1, 0]  # y
        evdp = traco[measco[i, 1] - 1, 0]  # z
        tr.data = read_data[i * tr.stats.npts:(i + 1) * tr.stats.npts]
        st.extend([tr])
        # plot 1 trace for test purposes
        # if i==20:
        #    tr.plot()
        #    print ('plot done')

    return st
Esempio n. 12
0
def kutec_read(fname):
    """ Read the K-UTec proprietary file format.

    Read data in the K-UTec specific IMC FAMOS format into a stream object.
    As there is no obvious station information in the data file
    Network is set to KU and Station is set to the first five letters of the
    filename.

    :parameters:
    ------------
    fname : string
        path to the file containing the data to be read

    .. rubric:: Returns

    st : obspy.core.Stream object
        Obspy stream object containing the data

    """
    tr = Trace()

    line = []
    keys = {}
    f = open(fname, 'r')
    char = f.read(1)  # read leading '|'
    while char == '|':
        key = []
        cnt = 0
        while 1:
            key.append(f.read(1))
            if key[-1] == ',':
                cnt += 1
            if cnt == 3:
                break
        tkeys = string.split(string.join(key, ''), ',')
        key.append(f.read(int(tkeys[2])))
        keyline = string.join(key, '')
        f.read(1)  # read terminating ';'
        char = f.read(1)  # read leading '|'
        # print char
        while (char == '\r') or (char == '\n'):
            char = f.read(1)  # read leading '|'
        #    print char
        keyval = keyline.split(',')
        # ######
        # # in the post 20120619 version files there are leading
        # linefeed in the key (\n), remove them here
        if keyval[0].startswith('\n|'):
            print "does this happen", keyval
            keyval[0] = keyval[0][2:]

        if keyval[0] == 'CF':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Dateiformat'] = int(keyval[1])
            keys[keyval[0]]['Keylaenge'] = int(keyval[2])
            keys[keyval[0]]['Prozessor'] = int(keyval[3])
        elif keyval[0] == 'CK':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['Dump'] = keyval[3]
            keys[keyval[0]]['Abgeschlossen'] = int(keyval[3])
            if keys[keyval[0]]['Abgeschlossen'] != 1:
                print "%s %s = %s not implemented." % (keyval[0], \
                        'Abgeschlossen', keys[keyval[0]]['DirekteFolgeAnzahl'])
        elif keyval[0] == 'NO':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['Ursprung'] = int(keyval[3])
            keys[keyval[0]]['NameLang'] = int(keyval[4])
            keys[keyval[0]]['Name'] = keyval[5]
            keys[keyval[0]]['KommLang'] = int(keyval[6])
            if keys[keyval[0]]['KommLang']:
                keys[keyval[0]]['Kommemtar'] = keyval[7]
        elif keyval[0] == 'CP':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['BufferReferenz'] = int(keyval[3])
            keys[keyval[0]]['Bytes'] = int(keyval[4])  # Bytes fuer
                                                        # einen Messwert
            keys[keyval[0]]['ZahlenFormat'] = int(keyval[5])
            keys[keyval[0]]['SignBits'] = int(keyval[6])
            keys[keyval[0]]['Maske'] = int(keyval[7])
            keys[keyval[0]]['Offset'] = int(keyval[8])
            keys[keyval[0]]['DirekteFolgeAnzahl'] = int(keyval[9])
            keys[keyval[0]]['AbstandBytes'] = int(keyval[10])
            if keys[keyval[0]]['DirekteFolgeAnzahl'] != 1:
                print "%s %s = %s not implemented." % (keyval[0], \
                   'DirekteFolgeAnzahl', keys[keyval[0]]['DirekteFolgeAnzahl'])
                break

        elif keyval[0] == 'Cb':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['AnzahlBufferInKey'] = int(keyval[3])
            if keys[keyval[0]]['AnzahlBufferInKey'] != 1:
                print "%s %s = %d not implemented." % (keyval[0], \
                    'AnzahlBufferInKey', keys[keyval[0]]['AnzahlBufferInKey'])
                break
            keys[keyval[0]]['BytesInUserInfo'] = int(keyval[4])
            keys[keyval[0]]['BufferReferenz'] = int(keyval[5])
            keys[keyval[0]]['IndexSampleKey'] = int(keyval[6])
            keys[keyval[0]]['OffsetBufferInSampleKey'] = int(keyval[7])
            if keys[keyval[0]]['OffsetBufferInSampleKey'] != 0:
                print "%s %s = %d not implemented." % (keyval[0], \
                                    'OffsetBufferInSampleKey', \
                                    keys[keyval[0]]['OffsetBufferInSampleKey'])
                break
            keys[keyval[0]]['BufferLangBytes'] = int(keyval[8])
            keys[keyval[0]]['OffsetFirstSampleInBuffer'] = int(keyval[9])
            if keys[keyval[0]]['OffsetFirstSampleInBuffer'] != 0:
                print "%s %s = %d not implemented." % (keyval[0], \
                                'OffsetFirstSampleInBuffer', \
                                keys[keyval[0]]['OffsetFirstSampleInBuffer'])
                break
            keys[keyval[0]]['BufferFilledBytes'] = int(keyval[10])
            keys[keyval[0]]['x0'] = float(keyval[12])
            keys[keyval[0]]['Addzeit'] = float(keyval[13])
            if keys[keyval[0]]['BytesInUserInfo']:
                keys[keyval[0]]['UserInfo'] = int(keyval[14])
        elif keyval[0] == 'CS':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['AnzahlBufferInKey'] = int(keyval[3])
            tmp = string.join(keyval[4:], ',')
            keys[keyval[0]]['Rohdaten'] = tmp

            npts = keys['Cb']['BufferFilledBytes'] / keys['CP']['Bytes']
            tr.stats['npts'] = npts
            # allocate array
            tr.data = np.ndarray(npts, dtype=float)
            # treat different number formats
            if keys['CP']['ZahlenFormat'] == 4:
                tmp = np.fromstring(keys['CS']['Rohdaten'], dtype='uint8', \
                                count=npts * 2)
                tr.data = (tmp[0::2].astype(float) + \
                       (tmp[1::2].astype(float) * 256))
                tr.data[np.nonzero(tr.data > 32767)] -= 65536
            elif keys['CP']['ZahlenFormat'] == 8:
                tr.data = np.fromstring(keys['CS']['Rohdaten'],
                                        dtype='float64',
                                        count=npts)
            else:
                print "%s %s = %d not implemented." % (keyval[0], \
                             'ZahlenFormat', keys[keyval[0]]['ZahlenFormat'])
                break

        elif keyval[0] == 'NT':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['Tag'] = int(keyval[3])
            keys[keyval[0]]['Monat'] = int(keyval[4])
            keys[keyval[0]]['Jahr'] = int(keyval[5])
            keys[keyval[0]]['Stunden'] = int(keyval[6])
            keys[keyval[0]]['Minuten'] = int(keyval[7])
            keys[keyval[0]]['Sekunden'] = float(keyval[8])
            tr.stats['starttime'] = UTCDateTime(keys[keyval[0]]['Jahr'], \
                                                keys[keyval[0]]['Monat'], \
                                                keys[keyval[0]]['Tag'], \
                                                keys[keyval[0]]['Stunden'], \
                                                keys[keyval[0]]['Minuten'], \
                                                keys[keyval[0]]['Sekunden'])
        elif keyval[0] == 'CD':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['dx'] = float(keyval[3])
            tr.stats['delta'] = keys[keyval[0]]['dx']
            keys[keyval[0]]['kalibiert'] = int(keyval[4])
            if keys[keyval[0]]['kalibiert'] != 1:
                print "%s %s = %d not implemented." % \
                    (keyval[0], 'kalibiert',
                     keys[keyval[0]]['kalibiert'])
                break
            keys[keyval[0]]['EinheitLang'] = int(keyval[5])
            keys[keyval[0]]['Einheit'] = keyval[6]

            if keys[keyval[0]]['Version'] == 2:
                keys[keyval[0]]['Reduktion'] = int(keyval[7])
                keys[keyval[0]]['InMultiEvents'] = int(keyval[8])
                keys[keyval[0]]['SortiereBuffer'] = int(keyval[9])
                keys[keyval[0]]['x0'] = float(keyval[10])
                keys[keyval[0]]['PretriggerVerwendung'] = int(keyval[11])
            if keys[keyval[0]]['Version'] == 1:
                keys[keyval[0]]['Reduktion'] = ''
                keys[keyval[0]]['InMultiEvents'] = ''
                keys[keyval[0]]['SortiereBuffer'] = ''
                keys[keyval[0]]['x0'] = ''
                keys[keyval[0]]['PretriggerVerwendung'] = 0

        elif keyval[0] == 'CR':
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['Transformieren'] = int(keyval[3])
            keys[keyval[0]]['Faktor'] = float(keyval[4])
            keys[keyval[0]]['Offset'] = float(keyval[5])
            keys[keyval[0]]['Kalibriert'] = int(keyval[6])
            keys[keyval[0]]['EinheitLang'] = int(keyval[7])
            keys[keyval[0]]['Einheit'] = keyval[8]
        elif keyval[0] == 'CN':  # station names
            keys[keyval[0]] = {}
            keys[keyval[0]]['Version'] = int(keyval[1])
            keys[keyval[0]]['Lang'] = int(keyval[2])
            keys[keyval[0]]['IndexGruppe'] = int(keyval[3])
            keys[keyval[0]]['IndexBit'] = int(keyval[5])
            keys[keyval[0]]['NameLang'] = int(keyval[6])
            keys[keyval[0]]['Name'] = keyval[7]
            keys[keyval[0]]['KommLang'] = int(keyval[8])
            keys[keyval[0]]['Kommentar'] = keyval[9]
        else:
            keys[keyval[0]] = {}
            keys[keyval[0]]['KeyString'] = keyval[1:]

    # NT key is beginning of measurement (starting of measurement unit)
    # keys['Cb']['Addzeit'] needs to be added to obtain the absolute trigger
    # time

    tr.stats['starttime'] += keys['Cb']['Addzeit']

    # Adjust starttime according to pretrigger (There is some uncertainty
    # about the CD key) to get relative trigger time
    # for CD:Version == 1 always use Cb:x0
    # for CD:Version == 2 only use Cb:x0 if CD:PretriggerVerwendung == 1
    if keys['CD']['Version'] == 1 or \
        (keys['CD']['Version'] == 2 and
         keys['CD']['PretriggerVerwendung'] == 1):
        tr.stats['starttime'] += keys['Cb']['x0']

    if 'CR' in keys:
        if keys['CR']['Transformieren']:
            tr.data = tr.data * keys['CR']['Faktor'] + keys['CR']['Offset']

    f.close()
    # ### Channel naming
    tr.stats['network'] = 'KU'
    tr.stats['location'] = ''
    # ### Pre 20120619 namin convention to extract the station name from the
    # filename
    # tr.stats['station'] = fname[-12:-7]
    # ### Now take the station name from the ICN key
    tr.stats['station'] = keys['CN']['Name'].replace('_', '')
    # ### or construct a name that is consistent with the old filename
    # generated one from the key
    # ### This is is very likely to cause a problem sooner or later.
    # tr.stats['station'] = 'MK%03d' % int(keys['CN']['Name'].split('_')[-1])

    # tr.stats['station'] = keys['CN']['Name'].replace('_','')

    st = Stream()
    st.extend([tr])

    return st
Esempio n. 13
0
def corr_trace_fun(signals, comb=[], normal=True,
                   parallel=True, processes=None):
    """ Correlate Traces according to the given combinations

    The `corr_trace_fun` correlates the Traces contained in the passed
    :class:`~obspy.core.trace.Stream` object according to the list of
    combinations `tuple` given in input. It does the job asynchronously
    instantiating as many process as cores available in the hosting machine.
    If traces do not share the same starttime the correlation trace is shifted
    by fractions of a sample such that time alignment is obtained precisely at
    the sample 1971-01-01T00:00:00Z. If there is no overlap between the
    traces this time might not be in the stream.

    :type signals: :class:`~obspy.core.stream.Stream`
    :param signals: The container for the Traces that we want to correlate
    :type comb: list, optional
    :param comb: List of combinations that must be calculated
    :type normal: bool, otional
    :param normal: Normalization flag (See
        :func:`~miic.core.corr_fun.conv_traces` for details)
    :type parallel: bool (Default: True)
    :pram parallel: If the filtering will be run in parallel or not
    :type processes: int
    :pram processes: Number of processes to start (if None it will be equal
        to the number of cores available in the hosting machine)

    :rtype: :class:`~obspy.core.stream.Stream`
    :return: **corrData**: The resulting object containing the correlation data
        and their meta-informations obtained as described
        in thr function :func:`~miic.core.corr_fun.conv_traces`
    """

    if not isinstance(signals, Stream):
        raise TypeError("signal must be an obspy Stream object.")

    corrData = Stream()

    nSignal = signals.count()

    if nSignal == 0:
        print "Empty stream!!"
        return corrData

    if (nSignal == 1) and not (comb == [(1, 1)]):
        print "Single trace. No cross correlation"
        return corrData

    if comb == []:
        comb = [(k, i) for k in range(nSignal) for i in range(k + 1, nSignal)]

    if not parallel:
        dc = _doCorr(signals, normal)
        corrData.extend(map(dc, comb))
    else:
        if processes == 0:
            processes = None

        p = Pool(processes=processes)

        p.map_async(_doCorr(signals, normal),
                    comb,
                    callback=_AppendST(corrData))

        p.close()
        p.join()

    return corrData
Esempio n. 14
0
# search given timespan one hour at a time, set initial T1 one hour earlier
T1 = START - (60 * 60 * 1)
while T1 < END:
    T1 += (60 * 60 * 1)
    T2 = T1 + (60 * 60 * 1)

    st = Stream()
    num_stations = 0
    for station in STATIONS:
        try:
            # we request 60s more at start and end and cut them off later to avoid
            # a false trigger due to the tapering during instrument correction
            tmp = client.waveform.getWaveform(NET, station, "", CHANNEL, T1 - 60,
                                              T2 + 60, getPAZ=True,
                                              getCoordinates=True)
            st.extend(tmp)
            num_stations += 1
        except Exception, e:
            if "No waveform data available" in str(e):
                continue
            raise
    st.merge(-1)
    st.sort()

    summary = []
    summary.append("#" * 79)
    summary.append("######## %s  ---  %s ########" % (T1, T2))
    summary.append("#" * 79)
    summary.append(str(st))

    if not st:
Esempio n. 15
0
    st = Stream()
    num_stations = 0
    for station in STATIONS:
        try:
            # we request 60s more at start and end and cut them off later to avoid
            # a false trigger due to the tapering during instrument correction
            tmp = client.waveform.getWaveform(NET,
                                              station,
                                              "",
                                              CHANNEL,
                                              T1 - 60,
                                              T2 + 60,
                                              getPAZ=True,
                                              getCoordinates=True)
            st.extend(tmp)
            num_stations += 1
        except Exception, e:
            if "No waveform data available" in str(e):
                continue
            raise
    st.merge(-1)
    st.sort()

    summary = []
    summary.append("#" * 79)
    summary.append("######## %s  ---  %s ########" % (T1, T2))
    summary.append("#" * 79)
    summary.append(str(st))

    if not st: