Esempio n. 1
0
def read_pick(line):
    """
    Convert REST pick string to ObsPy Pick object

    :param line: string containing pick information
    :type line: str

    :returns:
        :class:`obspy.core.event.Pick` and
        :class:`obspy.core.event.origin.Arrival`
    """
    # line = line.split()  # Cannot just split the line :(
    splits = [0, 6, 10, 15, 18, 22, 28, 29, 41, 49, -1]
    _line = []
    for split in range(len(splits) - 1):
        _line.append(line[splits[split]: splits[split + 1]].strip())
    line = _line
    pick = Pick(time=UTCDateTime(
        year=int(line[1]), julday=int(line[2]), hour=int(line[3]),
        minute=int(line[4])) + float(line[5]), phase_hint=line[7],
        evaluation_mode="automatic",
        method_id=ResourceIdentifier("smi:local/REST"),
        waveform_id=WaveformStreamID(station_code=line[0]),
        time_errors=QuantityError(uncertainty=float(line[8])))
    arrival = Arrival(
        pick_id=pick.resource_id, time_residual=float(line[9]))
    return pick, arrival
Esempio n. 2
0
def read_header_line(string_line):

    new_event = Event()
    line = string_line

    param_event = line.split()[1:]

    ### check if line as required number of arguments

    if len(param_event) != 14:
        return new_event

    ### Get parameters

    year, month, day = [int(x) for x in param_event[0:3]]
    hour, minu = [int(x) for x in param_event[3:5]]
    sec = float(param_event[5])
    if sec >= 60:
        sec = 59.999
    lat, lon, z = [float(x) for x in param_event[6:9]]
    mag = float(param_event[9])
    errh, errz, rms = [float(x) for x in param_event[10:13]]

    _time = UTCDateTime(year, month, day, hour, minu, sec)
    _origin_quality = OriginQuality(standard_error=rms)

    # change what's next to handle origin with no errors estimates

    origin = Origin(time=_time,
                    longitude=lon,
                    latitude=lat,
                    depth=z,
                    longitude_errors=QuantityError(uncertainty=errh),
                    latitude_errors=QuantityError(uncertainty=errh),
                    depth_errors=QuantityError(uncertainty=errz),
                    quality=_origin_quality)

    magnitude = Magnitude(mag=mag, origin_id=origin.resource_id)

    ### Return

    new_event.origins.append(origin)
    new_event.magnitudes.append(magnitude)
    new_event.preferred_origin_id = origin.resource_id
    new_event.preferred_magnitude_id = magnitude.resource_id

    return new_event
Esempio n. 3
0
def ORNL_events_to_cat(ornl_file):
    """Make Catalog from ORNL locations"""
    cat = Catalog()
    loc_df = pd.read_csv(ornl_file, infer_datetime_format=True)
    loc_df = loc_df.set_index('event_datetime')
    eid = 0
    for dt, row in loc_df.iterrows():
        ot = UTCDateTime(dt)
        hmc_east = row['x(m)']
        hmc_north = row['y(m)']
        hmc_elev = row['z(m)']
        errX = row['error_x (m)']
        errY = row['error_y (m)']
        errZ = row['error_z (m)']
        rms = row['rms (millisecond)']
        converter = SURF_converter()
        lon, lat, elev = converter.to_lonlat((hmc_east, hmc_north,
                                              hmc_elev))
        o = Origin(time=ot, latitude=lat, longitude=lon, depth=130 - elev)
        o.origin_uncertainty = OriginUncertainty()
        o.quality = OriginQuality()
        ou = o.origin_uncertainty
        oq = o.quality
        ou.max_horizontal_uncertainty = np.max([errX, errY])
        ou.min_horizontal_uncertainty = np.min([errX, errY])
        o.depth_errors.uncertainty = errZ
        oq.standard_error = rms * 1e3
        extra = AttribDict({
            'hmc_east': {
                'value': hmc_east,
                'namespace': 'smi:local/hmc'
            },
            'hmc_north': {
                'value': hmc_north,
                'namespace': 'smi:local/hmc'
            },
            'hmc_elev': {
                'value': hmc_elev,
                'namespace': 'smi:local/hmc'
            },
            'hmc_eid': {
                'value': eid,
                'namespace': 'smi:local/hmc'
            }
        })
        o.extra = extra
        rid = ResourceIdentifier(id=ot.strftime('%Y%m%d%H%M%S%f'))
        # Dummy magnitude of 1. for all events until further notice
        mag = Magnitude(mag=1., mag_errors=QuantityError(uncertainty=1.))
        ev = Event(origins=[o], magnitudes=[mag], resource_id=rid)
        ev.preferred_origin_id = o.resource_id.id
        cat.events.append(ev)
        eid += 1
    return cat
Esempio n. 4
0
def read_origin(event_str):
    """
    Read the origin information from the REST file string

    :param event_str: Contents of file as list of str
    :type event_str: list

    :returns: :class:`obspy.core.event.Event`
    """
    event = Event()

    head = event_str[0].split()
    try:
        gap = float(head[17])
    except IndexError:
        gap = None
    origin = Origin(
        time=UTCDateTime(
            year=int(head[0]), julday=int(head[1]), hour=int(head[2]),
            minute=int(head[3])) + float(head[4]),
        latitude=float(head[5]), longitude=float(head[6]),
        depth=float(head[7]) * 1000, origin_quality=OriginQuality(
            standard_error=float(head[9]),
            azimuthal_gap=gap,
            used_phase_count=int(head[17])),
        longitude_errors=QuantityError(
            uncertainty=kilometer2degrees(float(head[12]))),
        latitude_errors=QuantityError(
            uncertainty=kilometer2degrees(float(head[11]))),
        depth_errors=QuantityError(uncertainty=float(head[13]) * 1000),
        method_id=ResourceIdentifier("smi:local/REST"),
        evaluation_mode="automatic")
    event.origins.append(origin)
    try:
        event.magnitudes.append(Magnitude(
            mag=float(head[19]), magnitude_type="M"))
    except IndexError:
        pass
    return event
Esempio n. 5
0
File: core.py Progetto: znamy/obspy
    def _parse_magnitude(self, line):
        #    1-5  a5   magnitude type (mb, Ms, ML, mbmle, msmle)
        magnitude_type = line[0:5].strip()
        #      6  a1   min max indicator (<, >, or blank)
        # TODO figure out the meaning of this min max indicator
        min_max_indicator = line[5:6].strip()
        #   7-10  f4.1 magnitude value
        mag = float_or_none(line[6:10])
        #  12-14  f3.1 standard magnitude error
        mag_errors = float_or_none(line[11:14])
        #  16-19  i4   number of stations used to calculate magni-tude
        station_count = int_or_none(line[15:19])
        #  21-29  a9   author of the origin
        author = line[20:29].strip()
        #  31-38  a8   origin identification
        origin_id = line[30:38].strip()

        # process items
        if author:
            creation_info = CreationInfo(author=author)
        else:
            creation_info = None
        mag_errors = mag_errors and QuantityError(uncertainty=mag_errors)
        if origin_id:
            origin_id = self._construct_id(['origin', origin_id])
        else:
            origin_id = None
        if not magnitude_type:
            magnitude_type = None
        # magnitudes have no id field, so construct a unique one at least
        resource_id = self._construct_id(['magnitude'], add_hash=True)

        if min_max_indicator:
            msg = 'Magnitude min/max indicator field not yet implemented'
            warnings.warn(msg)

        # combine and return
        mag = Magnitude(magnitude_type=magnitude_type,
                        mag=mag,
                        station_count=station_count,
                        creation_info=creation_info,
                        mag_errors=mag_errors,
                        origin_id=origin_id,
                        resource_id=resource_id)
        # event init always sets an empty QuantityError, even when specifying
        # None, which is strange
        for key in ['mag_errors']:
            setattr(mag, key, None)
        return mag
Esempio n. 6
0
def reweight_picks(cat):
    """
    Function to change pick uncertainties based upon correlation values (saved in pick Comment).
    This works in-place on the catalog.

    :type cat: obspy.core.Catalog
    :param cat: catalog of events with ccvals against detecting template saved in Comment
    :return: obspy.core.Catalog
    """
    from obspy.core.event import QuantityError
    for ev in cat:
        for pk in ev.picks:
            if pk.phase_hint == 'P':
                ccval = float(pk.comments[0].text.split('=')[-1])
                # Re-weight based on some scheme (less down-weighting)
                if ccval > 0.3:
                    pk.time_errors = QuantityError(uncertainty=0.05)
    return cat
Esempio n. 7
0
def moment_magnitude(stream,
                     cat,
                     inventory,
                     vp,
                     vs,
                     only_triaxial=True,
                     density=2700,
                     min_dist=20,
                     win_length=0.04,
                     len_spectrum=2**12,
                     clipped_fraction=0.1,
                     max_frequency=600,
                     preferred_origin_only=True):
    """
    WARNING
    Calculate the moment magnitude for an event.
    :param stream: seismogram
    :type stream: uquake.Stream # does not exist yet
    :param cat: catalog object
    :type cat: uquake.core.event.Catalog
    :param inventory: network information (contains stations information)
    :type inventory: uquake.station.Site
    :param vp: P-wave velocity
    :type vp: float or uquake.core.data.Grid
    :param vs: S-wave velocity
    :type vs: float or uquake.core.data.Grid
    :param only_triaxial: whether only triaxial sensor are used in the
    magnitude calculation (optional) (not yet implemented)
    :type only_triaxial: bool
    :param density: density in kg / m**3 (assuming homogeneous for now)
    :type density: float
    :param win_length: length of the window in second in which magnitude is
    calculated
    :type win_length: float
    :param min_dist: minimum distance between sensor an event to allow
    magnitude calculation
    :param len_spectrum: length of the spectrum
    :param clipped_fraction: allowed clipped fraction (fraction of the
    signal equal to the min or the max.
    :param max_frequency: maximum frequency used in the calculation on
    magnitude. After a certain frequency, the noise starts to dominate the
    signal and the biases the calculation of the magnitude and corner
    frequency.
    :param preferred_origin_only: calculates the magnitude for the
    preferred_origin only
    :rtype: uquake.core.event.Catalog
    """

    # rigidity in Pa (shear-wave modulus)

    if only_triaxial:
        logger.info(
            'only triaxial sensor will be used in magnitude calculation')

    fcs = []

    quality = {'station_code': [], 'phase': [], 'origin_id': [], 'quality': []}

    if preferred_origin_only:
        origins = [cat[0].preferred_origin()]

    else:
        origins = cat[0].origins

    for origin in origins:
        ev_loc = np.array([origin.x, origin.y, origin.z])

        if not ((type(vp) == np.float) or (type(vp) == np.int)):
            vp_src = vp.interpolate(ev_loc, grid_space=False)
            vs_src = vs.interpolate(ev_loc, grid_space=False)
        else:
            vp_src = vp
            vs_src = vs

        moment_magnitudes = []
        corner_frequencies = []
        stations = []

        spectrum_norm_matrix = []
        frequencies = []
        indices = []
        for k, arr in enumerate(origin.arrivals):
            pick = arr.get_pick()
            network_code = pick.waveform_id.network_code
            station_code = pick.waveform_id.station_code
            location_code = pick.waveform_id.location_code
            travel_time = arr.get_pick().time - origin.time
            # ensuring backward compatibility
            if not pick:
                pick = cat[0].picks[k]
            at = pick.time
            phase = pick.phase_hint

            sensor_response = inventory.select(network=network_code,
                                               station=station_code,
                                               location=location_code)

            if sensor_response is None:
                logger.warning(f'no response was found in the inventory for '
                               f'sensor '
                               f'{network_code}.{station_code}.'
                               f'{location_code}')
                continue

            if sensor_response[0][0][0].response is None:
                logger.warning(f'no response was found in the inventory for '
                               f'sensor '
                               f'{network_code}.{station_code}.'
                               f'{location_code}')
                continue

            st_loc = sensor_response[0][0][0].loc
            if not sensor_response:
                logger.warning(f'sensor response not found for sensor '
                               f'{network_code}.{station_code}'
                               f'.{location_code}')
                continue

            poles = np.abs(sensor_response[0][0][0].response.get_paz().poles)
            st_trs = stream.select(network=network_code,
                                   station=station_code,
                                   location=location_code)

            if len(st_trs) == 0:
                continue

            if only_triaxial and (len(st_trs) < 3):
                continue

            # creating displacement pulse
            st_trs.detrend('demean').detrend('linear')
            st_trs.taper(max_percentage=0.05, type='cosine')

            data = st_trs.composite()[0].data

            len_max = len(data[data == np.max(data)]) + \
                      len(data[data == np.min(data)])

            if len_max / len(data) > clipped_fraction:
                logger.info('Clipped waveform detected: station %s '
                            'will not be used for magnitude calculation' %
                            sensor_response.code)
                continue

            pulse = st_trs.copy()
            pulse.attach_response(inventory)

            # filter the pulse using the corner frequency of the sensor

            sensor_min_freq = np.min(poles) / (2 * np.pi)
            window_min_freq = 1 / win_length

            low_bp_freq = np.max([sensor_min_freq, window_min_freq])
            high_bp_freq = np.max(poles) / (2 * np.pi)
            if high_bp_freq > pulse[0].stats.sampling_rate / 2:
                high_bp_freq = pulse[0].stats.sampling_rate / 2.5

            high_bp_freq = max_frequency
            pulse = pulse.taper(max_percentage=0.05, type='cosine')
            pulse.filter('bandpass', freqmin=low_bp_freq, freqmax=high_bp_freq)
            low_bp_freq1 = 10**(np.log10(low_bp_freq) - 0.2)
            low_bp_freq2 = low_bp_freq
            high_bp_freq1 = high_bp_freq
            high_bp_freq2 = 10**(np.log10(high_bp_freq) + 0.2)
            pre_filt = [
                low_bp_freq1, low_bp_freq2, high_bp_freq1, high_bp_freq2
            ]
            pulse.attach_response(inventory)
            # try:
            dp_trs = []
            for tr in pulse:
                try:
                    dp_trs = tr.remove_response(inventory=inventory,
                                                output='DISP',
                                                pre_filt=pre_filt)
                except Exception as e:
                    logger.error(e)

            dp = Stream(traces=dp_trs)

            # creating a signal containing only one for comparison
            tr_one = Trace(data=np.ones(len(pulse[0].data)))
            tr_one.stats = pulse[0].stats
            st_one = Stream(traces=[tr_one])

            dp = dp.trim(starttime=at - 0.01, endtime=at + 2 * win_length)
            dp = dp.taper(type='cosine',
                          max_percentage=0.5,
                          max_length=0.08,
                          side='left')

            # applying the same operation to the one signal
            # st_one_trimmed = st_one.trim(starttime=at - 0.01,
            #                              endtime=at + 2 * win_length)
            # st_one_taper = st_one_trimmed.taper(type='cosine',
            #                                     max_percentage=0.5,
            #                                     max_length=0.08,
            #                                     side='left')
            #
            dp_spectrum = np.zeros(len_spectrum)
            water_level = 1e-15
            for tr in dp:
                dp_spectrum += np.abs(np.fft.fft(tr.data, n=len_spectrum))
            # one_spectrum = np.fft.fft(st_one_taper[0].data, n=len_spectrum)

            # dp_spectrum_scaled = dp_spectrum # / (one_spectrum + water_level)

            if arr.distance is not None:
                hypo_dist = arr.distance
            else:
                hypo_dist = np.linalg.norm(st_loc - ev_loc)

            if hypo_dist < min_dist:
                continue

            radiation = radiation_pattern_attenuation()
            if phase.lower() == 'P':
                radiation = radiation[0]
                v_src = vp_src
            else:
                radiation = radiation[1]
                v_src = vs_src

            sr = dp[0].stats.sampling_rate

            f = np.fft.fftfreq(len_spectrum, 1 / sr)

            anelastic = np.exp(travel_time)

            spectrum_norm = dp_spectrum / radiation * hypo_dist * 4 * \
                            np.pi * density * v_src ** 3 / sr * anelastic

            fi = np.nonzero((f >= low_bp_freq) & (f <= high_bp_freq))[0]
            # fr = np.nonzero((f < low_bp_freq) | (f > high_bp_freq))[0]
            # spectrum_norm[fr] = np.nan
            spectrum_norm_matrix.append(np.abs(spectrum_norm[fi]))
            frequencies.append(f[fi])
            indices.append(np.ones(len(fi)) * k)

        if not spectrum_norm_matrix:
            continue

        st_count = len(spectrum_norm_matrix)
        spectrum_norm = np.nanmedian(spectrum_norm_matrix, axis=0)
        f = np.median(frequencies, axis=0)
        fi = np.nonzero((np.isnan(spectrum_norm) == False) & (f > 0))[0]

        p_opt, p_cov = curve_fit(spectral_function,
                                 f[fi],
                                 np.log10(spectrum_norm[fi]), (10, 100, 100),
                                 bounds=((1, 0, 10), (100, 1000, 5000)))

        mw = 2 / 3.0 * p_opt[0] - 6.02
        mu = 29.5e9
        dnw1 = 2 / 3.0 * (p_opt[0] - p_cov[0, 0] / 2) - 6.02
        dnw2 = 2 / 3.0 * (p_opt[0] + p_cov[0, 0] / 2) - 6.02
        dmw = dnw2 - dnw1
        fc = p_opt[1]

        mag = event.Magnitude(mag=mw,
                              station_count=st_count,
                              magnitude_type='Mw',
                              evaluation_mode=origin.evaluation_mode,
                              evaluation_status=origin.evaluation_status,
                              origin_id=origin.resource_id)

        mag.corner_frequency_hz = fc

        from obspy.core.event import QuantityError
        mag.mag_errors = QuantityError(uncertainty=dmw)
        # mag.fc_errors = QuantityError(uncertainty=dfc)
        cat[0].magnitudes.append(mag)
        if origin.resource_id == cat[0].preferred_origin().resource_id:
            cat[0].preferred_magnitude_id = mag.resource_id

    return cat
Esempio n. 8
0
def weight_corr_picks(cat, temp_dict=None, stream_dict=None, method='SNR_temp', temp_cat=False, show_ccs=False):
    """
    Implementing pick weighting by SNR of cc function
    :param cat: catalog of detections
    :param template_dir: directory where the templates live
    :param stream: directory where the longer event waveforms
    :param method: 'SNR_temp', 'SNR_ccval', 'doub_diff_ccval'
    :return: obspy.core.event.Catalog

    ** Note: Hypoellipse default quality mapping: 0.05, 0.1, 0.2, 0.4 sec (0, 1, 2, 3?)
             We can go with these values based on SNR of cc function, I suppose.
    """
    import warnings
    import numpy as np
    from eqcorrscan.core.match_filter import normxcorr2
    import matplotlib.pyplot as plt
    from obspy.core.event import QuantityError

    SNRs = []
    if temp_cat:
        temp_SNRs = {str(ev.resource_id).split('/')[-1].split('_')[0]:
                         {amp.waveform_id.station_code + '.EZ':
                              amp.snr for amp in ev.amplitudes} for ev in temp_cat}
    for ev in cat:
        det_rid = str(ev.resource_id).split('/')[-1]
        temp_rid = str(ev.resource_id).split('/')[-1].split('_')[0]
        picks = list(ev.picks)
        for i, pk in enumerate(picks):
            if pk.phase_hint == 'P':
                sta = pk.waveform_id.station_code
                chan = pk.waveform_id.channel_code
                if method == 'SNR_ccval':
                    temp = temp_dict[temp_rid + '_1sec']
                    stream = stream_dict[det_rid]
                    tr = temp.select(station=sta,
                                     channel=chan)[0]
                    st_tr = stream.select(station=sta,
                                          channel=chan)[0]
                    ccc = normxcorr2(tr.data, st_tr.data)[0]
                    pk_samp = int(tr.stats.sampling_rate * (pk.time - st_tr.stats.starttime) - 5)
                    sta_start = pk_samp - 5
                    sta_end = pk_samp + 5
                    LTA = np.std(ccc)
                    STA = np.std(ccc[sta_start:sta_end])
                    # STA = abs(ccc[pk_samp])
                    SNR = STA / LTA
                    # Here we map the ccval SNR to time uncertainty in the original catalog
                    orig_pk = ev.picks[i]
                    if SNR < 0.75: orig_pk.time_errors = QuantityError(uncertainty=0.40)
                    elif SNR < 1.25: orig_pk.time_errors = QuantityError(uncertainty=0.20)
                    elif SNR < 1.75: orig_pk.time_errors = QuantityError(uncertainty=0.10)
                    elif SNR < 2.25: orig_pk.time_errors = QuantityError(uncertainty=0.05)
                    else: orig_pk.time_errors = QuantityError(uncertainty=0.01)
                    SNRs.append(SNR)
                    if show_ccs:
                        fig, ax = plt.subplots()
                        ax.plot(ccc)
                        ax.set_title('%s.%s: %f' % (sta, chan, SNR))
                        fig.show()
                elif method == 'SNR_temp':
                    orig_pk = ev.picks[i]
                    try:
                        SNR = temp_SNRs[temp_rid]['%s.%s' % (sta, chan)]
                    except KeyError:
                        warnings.warn('%s.%s has no amplitude pick' % (sta, chan))
                        orig_pk.time_errors = QuantityError(uncertainty=0.10)
                        continue
                    if SNR < 1.0:
                        orig_pk.time_errors = QuantityError(uncertainty=0.20)
                    elif SNR < 2.:
                        orig_pk.time_errors = QuantityError(uncertainty=0.10)
                    elif SNR < 5.:
                        orig_pk.time_errors = QuantityError(uncertainty=0.05)
                    else:
                        orig_pk.time_errors = QuantityError(uncertainty=0.01)
    return cat
Esempio n. 9
0
File: core.py Progetto: znamy/obspy
    def _parse_origin(self, line):
        # 1-10    i4,a1,i2,a1,i2    epicenter date (yyyy/mm/dd)
        # 12-22   i2,a1,i2,a1,f5.2  epicenter time (hh:mm:ss.ss)
        time = UTCDateTime.strptime(line[:17], '%Y/%m/%d %H:%M:')
        time += float(line[17:22])
        # 23      a1    fixed flag (f = fixed origin time solution, blank if
        #                           not a fixed origin time)
        time_fixed = fixed_flag(line[22])
        # 25-29   f5.2  origin time error (seconds; blank if fixed origin time)
        time_error = float_or_none(line[24:29])
        time_error = time_error and QuantityError(uncertainty=time_error)
        # 31-35   f5.2  root mean square of time residuals (seconds)
        rms = float_or_none(line[30:35])
        # 37-44   f8.4  latitude (negative for South)
        latitude = float_or_none(line[36:44])
        # 46-54   f9.4  longitude (negative for West)
        longitude = float_or_none(line[45:54])
        # 55      a1    fixed flag (f = fixed epicenter solution, blank if not
        #                           a fixed epicenter solution)
        epicenter_fixed = fixed_flag(line[54])
        # 56-60   f5.1  semi-major axis of 90% ellipse or its estimate
        #               (km, blank if fixed epicenter)
        _uncertainty_major_m = float_or_none(line[55:60], multiplier=1e3)
        # 62-66   f5.1  semi-minor axis of 90% ellipse or its estimate
        #               (km, blank if fixed epicenter)
        _uncertainty_minor_m = float_or_none(line[61:66], multiplier=1e3)
        # 68-70   i3    strike (0 <= x <= 360) of error ellipse clock-wise from
        #                       North (degrees)
        _uncertainty_major_azimuth = float_or_none(line[67:70])
        # 72-76   f5.1  depth (km)
        depth = float_or_none(line[71:76], multiplier=1e3)
        # 77      a1    fixed flag (f = fixed depth station, d = depth phases,
        #                           blank if not a fixed depth)
        epicenter_fixed = fixed_flag(line[76])
        # 79-82   f4.1  depth error 90% (km; blank if fixed depth)
        depth_error = float_or_none(line[78:82], multiplier=1e3)
        # 84-87   i4    number of defining phases
        used_phase_count = int_or_none(line[83:87])
        # 89-92   i4    number of defining stations
        used_station_count = int_or_none(line[88:92])
        # 94-96   i3    gap in azimuth coverage (degrees)
        azimuthal_gap = float_or_none(line[93:96])
        # 98-103  f6.2  distance to closest station (degrees)
        minimum_distance = float_or_none(line[97:103])
        # 105-110 f6.2  distance to furthest station (degrees)
        maximum_distance = float_or_none(line[104:110])
        # 112     a1    analysis type: (a = automatic, m = manual, g = guess)
        evaluation_mode, evaluation_status = \
            evaluation_mode_and_status(line[111])
        # 114     a1    location method: (i = inversion, p = pattern
        #                                 recognition, g = ground truth, o =
        #                                 other)
        location_method = LOCATION_METHODS[line[113].strip().lower()]
        # 116-117 a2    event type:
        # XXX event type and event type certainty is specified per origin,
        # XXX not sure how to bset handle this, for now only use it if
        # XXX information on the individual origins do not clash.. not sure yet
        # XXX how to identify the preferred origin..
        event_type, event_type_certainty = \
            EVENT_TYPE_CERTAINTY[line[115:117].strip().lower()]
        # 119-127 a9    author of the origin
        author = line[118:127].strip()
        # 129-136 a8    origin identification
        origin_id = self._construct_id(['origin', line[128:136].strip()])

        # do some combinations
        depth_error = depth_error and dict(uncertainty=depth_error,
                                           confidence_level=90)
        if all(v is not None
               for v in (_uncertainty_major_m, _uncertainty_minor_m,
                         _uncertainty_major_azimuth)):
            origin_uncertainty = OriginUncertainty(
                min_horizontal_uncertainty=_uncertainty_minor_m,
                max_horizontal_uncertainty=_uncertainty_major_m,
                azimuth_max_horizontal_uncertainty=_uncertainty_major_azimuth,
                preferred_description='uncertainty ellipse',
                confidence_level=90)
            # event init always sets an empty QuantityError, even when
            # specifying None, which is strange
            for key in ['confidence_ellipsoid']:
                setattr(origin_uncertainty, key, None)
        else:
            origin_uncertainty = None
        origin_quality = OriginQuality(standard_error=rms,
                                       used_phase_count=used_phase_count,
                                       used_station_count=used_station_count,
                                       azimuthal_gap=azimuthal_gap,
                                       minimum_distance=minimum_distance,
                                       maximum_distance=maximum_distance)
        comments = []
        if location_method:
            comments.append(
                self._make_comment('location method: ' + location_method))
        if author:
            creation_info = CreationInfo(author=author)
        else:
            creation_info = None
        # assemble whole event
        origin = Origin(time=time,
                        resource_id=origin_id,
                        longitude=longitude,
                        latitude=latitude,
                        depth=depth,
                        depth_errors=depth_error,
                        origin_uncertainty=origin_uncertainty,
                        time_fixed=time_fixed,
                        epicenter_fixed=epicenter_fixed,
                        origin_quality=origin_quality,
                        comments=comments,
                        creation_info=creation_info)
        # event init always sets an empty QuantityError, even when specifying
        # None, which is strange
        for key in ('time_errors', 'longitude_errors', 'latitude_errors',
                    'depth_errors'):
            setattr(origin, key, None)
        return origin, event_type, event_type_certainty
Esempio n. 10
0
def full_test_event():
    """
    Function to generate a basic, full test event
    """
    test_event = Event()
    test_event.origins.append(
        Origin(time=UTCDateTime("2012-03-26") + 1.2,
               latitude=45.0,
               longitude=25.0,
               depth=15000))
    test_event.event_descriptions.append(EventDescription())
    test_event.event_descriptions[0].text = 'LE'
    test_event.creation_info = CreationInfo(agency_id='TES')
    test_event.magnitudes.append(
        Magnitude(mag=0.1,
                  magnitude_type='ML',
                  creation_info=CreationInfo('TES'),
                  origin_id=test_event.origins[0].resource_id))
    test_event.magnitudes.append(
        Magnitude(mag=0.5,
                  magnitude_type='Mc',
                  creation_info=CreationInfo('TES'),
                  origin_id=test_event.origins[0].resource_id))
    test_event.magnitudes.append(
        Magnitude(mag=1.3,
                  magnitude_type='Ms',
                  creation_info=CreationInfo('TES'),
                  origin_id=test_event.origins[0].resource_id))

    # Define the test pick
    _waveform_id_1 = WaveformStreamID(station_code='FOZ',
                                      channel_code='SHZ',
                                      network_code='NZ')
    _waveform_id_2 = WaveformStreamID(station_code='WTSZ',
                                      channel_code='BH1',
                                      network_code=' ')
    # Pick to associate with amplitude
    test_event.picks.append(
        Pick(waveform_id=_waveform_id_1,
             phase_hint='IAML',
             polarity='undecidable',
             time=UTCDateTime("2012-03-26") + 1.68,
             evaluation_mode="manual"))
    # Need a second pick for coda
    test_event.picks.append(
        Pick(waveform_id=_waveform_id_1,
             onset='impulsive',
             phase_hint='PN',
             polarity='positive',
             time=UTCDateTime("2012-03-26") + 1.68,
             evaluation_mode="manual"))
    # Unassociated pick
    test_event.picks.append(
        Pick(waveform_id=_waveform_id_2,
             onset='impulsive',
             phase_hint='SG',
             polarity='undecidable',
             time=UTCDateTime("2012-03-26") + 1.72,
             evaluation_mode="manual"))
    # Unassociated pick
    test_event.picks.append(
        Pick(waveform_id=_waveform_id_2,
             onset='impulsive',
             phase_hint='PN',
             polarity='undecidable',
             time=UTCDateTime("2012-03-26") + 1.62,
             evaluation_mode="automatic"))
    # Test a generic local magnitude amplitude pick
    test_event.amplitudes.append(
        Amplitude(generic_amplitude=2.0,
                  period=0.4,
                  pick_id=test_event.picks[0].resource_id,
                  waveform_id=test_event.picks[0].waveform_id,
                  unit='m',
                  magnitude_hint='ML',
                  category='point',
                  type='AML'))
    # Test a coda magnitude pick
    test_event.amplitudes.append(
        Amplitude(generic_amplitude=10,
                  pick_id=test_event.picks[1].resource_id,
                  waveform_id=test_event.picks[1].waveform_id,
                  type='END',
                  category='duration',
                  unit='s',
                  magnitude_hint='Mc',
                  snr=2.3))
    test_event.origins[0].arrivals.append(
        Arrival(time_weight=0,
                phase=test_event.picks[1].phase_hint,
                pick_id=test_event.picks[1].resource_id))
    test_event.origins[0].arrivals.append(
        Arrival(time_weight=2,
                phase=test_event.picks[2].phase_hint,
                pick_id=test_event.picks[2].resource_id,
                backazimuth_residual=5,
                time_residual=0.2,
                distance=15,
                azimuth=25))
    test_event.origins[0].arrivals.append(
        Arrival(time_weight=2,
                phase=test_event.picks[3].phase_hint,
                pick_id=test_event.picks[3].resource_id,
                backazimuth_residual=5,
                time_residual=0.2,
                distance=15,
                azimuth=25))
    # Add in error info (line E)
    test_event.origins[0].quality = OriginQuality(standard_error=0.01,
                                                  azimuthal_gap=36)
    # Origin uncertainty in Seisan is output as long-lat-depth, quakeML has
    # semi-major and semi-minor
    test_event.origins[0].origin_uncertainty = OriginUncertainty(
        confidence_ellipsoid=ConfidenceEllipsoid(
            semi_major_axis_length=3000,
            semi_minor_axis_length=1000,
            semi_intermediate_axis_length=2000,
            major_axis_plunge=20,
            major_axis_azimuth=100,
            major_axis_rotation=4))
    test_event.origins[0].time_errors = QuantityError(uncertainty=0.5)
    # Add in fault-plane solution info (line F) - Note have to check program
    # used to determine which fields are filled....
    test_event.focal_mechanisms.append(
        FocalMechanism(nodal_planes=NodalPlanes(
            nodal_plane_1=NodalPlane(strike=180,
                                     dip=20,
                                     rake=30,
                                     strike_errors=QuantityError(10),
                                     dip_errors=QuantityError(10),
                                     rake_errors=QuantityError(20))),
                       method_id=ResourceIdentifier(
                           "smi:nc.anss.org/focalMechanism/FPFIT"),
                       creation_info=CreationInfo(agency_id="NC"),
                       misfit=0.5,
                       station_distribution_ratio=0.8))
    # Need to test high-precision origin and that it is preferred origin.
    # Moment tensor includes another origin
    test_event.origins.append(
        Origin(time=UTCDateTime("2012-03-26") + 1.2,
               latitude=45.1,
               longitude=25.2,
               depth=14500))
    test_event.magnitudes.append(
        Magnitude(mag=0.1,
                  magnitude_type='MW',
                  creation_info=CreationInfo('TES'),
                  origin_id=test_event.origins[-1].resource_id))
    # Moment tensors go with focal-mechanisms
    test_event.focal_mechanisms.append(
        FocalMechanism(moment_tensor=MomentTensor(
            derived_origin_id=test_event.origins[-1].resource_id,
            moment_magnitude_id=test_event.magnitudes[-1].resource_id,
            scalar_moment=100,
            tensor=Tensor(
                m_rr=100, m_tt=100, m_pp=10, m_rt=1, m_rp=20, m_tp=15),
            method_id=ResourceIdentifier(
                'smi:nc.anss.org/momentTensor/BLAH'))))
    return test_event
Esempio n. 11
0
def computeOriginErrors(org):
    """
    Given a NLL's event build the Confidence Ellipsoid from Covariance Matrix
    :param evt: NLL's QML Event
    :return: Dictionary containing computed errors
    """

    # WARNING: QuakeML uses meter for origin depth, origin uncertainty and confidence ellipsoid, SC3ML uses kilometers.

    d = {}

    confidenceLevel = 0.90  # Conficence level

    kp1 = np.sqrt(chi2.ppf(confidenceLevel, 1))  # 1D confidence coefficient
    kp2 = np.sqrt(chi2.ppf(confidenceLevel, 2))  # 2D confidence coefficient
    kp3 = np.sqrt(chi2.ppf(confidenceLevel, 3))  # 3D confidence coefficient

    # Covariance matrix is given in the NLL's "STATISTICS" line of *.grid0.loc.hyp file and in the Origin's comments parsed by ObsPy
    comments = org['comments'][0].text
    stats = comments.split('STATISTICS')[-1].split()
    cvm = [float(i) for i in stats[1::2]][3:9]  # Covariance matrix

    # Code adapted from IGN's computation of ConfidenceEllipsoid in "locsat.cpp" program
    cvxx = cvm[0]
    cvxy = cvm[1]
    cvxz = cvm[2]
    cvyy = cvm[3]
    cvyz = cvm[4]
    cvzz = cvm[5]

    nll3d = np.array([[cvxx, cvxy, cvxz],
                      [cvxy, cvyy, cvyz],
                      [cvxz, cvyz, cvzz]
                      ])

    # 1D confidence intervals at confidenceLevel
    errx = kp1 * np.sqrt(cvxx)
    qe = QuantityError(uncertainty=errx, confidence_level=confidenceLevel * 100.0)
    d['longitude_errors'] = qe

    erry = kp1 * np.sqrt(cvyy)
    qe = QuantityError(uncertainty=erry, confidence_level=confidenceLevel * 100.0)
    d['latitude_errors'] = qe

    errz = kp1 * np.sqrt(cvzz)
    qe = QuantityError(uncertainty=errz, confidence_level=confidenceLevel * 100.0)
    d['depth_errors'] = qe
    
    #NLL kp1=1 because is up to 1 sigma 68.3%, LocSAT kp1=2.71 because is up to 90% (one dim) 
    #LocSAT np.sqrt(cvzz)/2.71 = NLL np.sqrt(cvzz)



    # 2D confidence intervals at confidenceLevel
    nll2d = np.array(nll3d[:2, :2])
    eigval2d, eigvec2d = np.linalg.eig(nll2d)  # XY (horizontal) plane

    # indexes are not necessarily ordered. Sort them by eigenvalues
    idx = eigval2d.argsort()
    eigval2d = eigval2d[idx]
    eigvec2d = eigvec2d[:, idx]

    # sminax = kp2 * np.sqrt(eigval2d[0]) * 1.0e3  # QML in meters
    # smajax = kp2 * np.sqrt(eigval2d[1]) * 1.0e3  # QML in meters
    sminax = kp2 * np.sqrt(eigval2d[0])  # SC3ML in kilometers
    smajax = kp2 * np.sqrt(eigval2d[1])  # SC3ML in kilometers
    strike = 90.0 - np.rad2deg(np.arctan(eigvec2d[1, 1] / eigvec2d[0, 1]))  # calculate and refer it to North
    # horizontalUncertainty = np.sqrt((errx ** 2) + (erry ** 2)) * 1.0e3   # QML in meters
    horizontalUncertainty = np.sqrt((errx ** 2) + (erry ** 2))   # SC3ML in kilometers

    # 3D confidence intervals at confidenceLevel
    eigval3d, eigvec3d = np.linalg.eig(nll3d)
    idx = eigval3d.argsort()
    eigval3d = eigval3d[idx]
    eigvec3d = eigvec3d[:, idx]

    # s3dminax = kp3 * np.sqrt(eigval3d[0]) * 1.0e3   # QML in meters
    # s3dintax = kp3 * np.sqrt(eigval3d[1]) * 1.0e3   # QML in meters
    # s3dmaxax = kp3 * np.sqrt(eigval3d[2]) * 1.0e3   # QML in meters
    s3dminax = kp3 * np.sqrt(eigval3d[0])   # SC3ML in kilometers
    s3dintax = kp3 * np.sqrt(eigval3d[1])   # SC3ML in kilometers
    s3dmaxax = kp3 * np.sqrt(eigval3d[2])   # SCEML in kilometers

    majaxplunge = normalizeAngle(
        np.rad2deg(np.arctan(eigvec3d[2, 2] / np.sqrt((eigvec3d[2, 0] ** 2) + (eigvec3d[2, 1] ** 2)))))
    majaxazimuth = normalizeAngle(np.rad2deg(np.arctan(eigvec3d[2, 1] / eigvec3d[2, 0])))
    majaxrotation = normalizeAngle(
        np.rad2deg(np.arctan(eigvec3d[0, 2] / np.sqrt((eigvec3d[0, 0] ** 2) + (eigvec3d[0, 1] ** 2)))))

    # print('2D sminax:\t{}\tsmajax:\t{}\tstrike:\t{}'.format(sminax, smajax, strike))
    # print('3D sminax:\t{}\tsmajax:\t{}\tsintax:\t{}'.format(s3dminax, s3dmaxax, s3dintax))
    # print('   plunge:\t{}\tazim:\t{}\trotat:\t{}'.format(majaxplunge, majaxazimuth, majaxrotation))
    # print('-' * 144)

    ce = ConfidenceEllipsoid(semi_major_axis_length=s3dmaxax,
                             semi_minor_axis_length=s3dminax,
                             semi_intermediate_axis_length=s3dintax,
                             major_axis_plunge=majaxplunge,
                             major_axis_azimuth=majaxazimuth,
                             major_axis_rotation=majaxrotation)

    ou = OriginUncertainty(horizontal_uncertainty=horizontalUncertainty,
                           min_horizontal_uncertainty=sminax,
                           max_horizontal_uncertainty=smajax,
                           azimuth_max_horizontal_uncertainty=strike,
                           confidence_ellipsoid=ce,
                           preferred_description='confidence ellipsoid',
                           confidence_level=confidenceLevel * 100.0)

    d['origin_uncertainty'] = ou

    return d
Esempio n. 12
0
def surf_events_to_cat(loc_file, pick_file):
    """
    Take location files (hypoinverse formatted) and picks (format TBD)
    and creates a single obspy catalog for later use and dissemination.

    :param loc_file: File path
    :param pick_file: File path
    :return: obspy.core.Catalog
    """
    # Read/parse location file and create Events for each
    surf_cat = Catalog()
    # Parse the pick file to a dictionary
    pick_dict = parse_picks(pick_file)
    with open(loc_file, 'r') as f:
        next(f)
        for ln in f:
            ln = ln.strip('\n')
            line = ln.split(',')
            eid = line[0]
            if eid not in pick_dict:
                print('No picks for this location, skipping for now.')
                continue
            ot = UTCDateTime(line[1])
            hmc_east = float(line[2])
            hmc_north = float(line[3])
            hmc_elev = float(line[4])
            gap = float(line[-5])
            rms = float(line[-3])
            errXY = float(line[-2])
            errZ = float(line[-1])
            converter = SURF_converter()
            lon, lat, elev = converter.to_lonlat((hmc_east, hmc_north,
                                                  hmc_elev))
            o = Origin(time=ot, longitude=lon, latitude=lat, depth=130 - elev)
            o.origin_uncertainty = OriginUncertainty()
            o.quality = OriginQuality()
            ou = o.origin_uncertainty
            oq = o.quality
            ou.horizontal_uncertainty = errXY * 1e3
            ou.preferred_description = "horizontal uncertainty"
            o.depth_errors.uncertainty = errZ * 1e3
            oq.standard_error = rms
            oq.azimuthal_gap = gap
            extra = AttribDict({
                'hmc_east': {
                    'value': hmc_east,
                    'namespace': 'smi:local/hmc'
                },
                'hmc_north': {
                    'value': hmc_north,
                    'namespace': 'smi:local/hmc'
                },
                'hmc_elev': {
                    'value': hmc_elev,
                    'namespace': 'smi:local/hmc'
                },
                'hmc_eid': {
                    'value': eid,
                    'namespace': 'smi:local/hmc'
                }
            })
            o.extra = extra
            rid = ResourceIdentifier(id=ot.strftime('%Y%m%d%H%M%S%f'))
            # Dummy magnitude of 1. for all events until further notice
            mag = Magnitude(mag=1., mag_errors=QuantityError(uncertainty=1.))
            ev = Event(origins=[o], magnitudes=[mag],
                       picks=pick_dict[eid], resource_id=rid)
            surf_cat.append(ev)
    return surf_cat
Esempio n. 13
0
def write_qml(config, sourcepar):
    if not config.options.qml_file:
        return
    qml_file = config.options.qml_file
    cat = read_events(qml_file)
    evid = config.hypo.evid
    try:
        ev = [e for e in cat if evid in str(e.resource_id)][0]
    except Exception:
        logging.warning('Unable to find evid "{}" in QuakeML file. '
                        'QuakeML output will not be written.'.format(evid))

    origin = ev.preferred_origin()
    if origin is None:
        origin = ev.origins[0]
    origin_id = origin.resource_id
    origin_id_strip = origin_id.id.split('/')[-1]
    origin_id_strip = origin_id_strip.replace(config.smi_strip_from_origin_id,
                                              '')

    # Common parameters
    ssp_version = get_versions()['version']
    method_id = config.smi_base + '/sourcespec/' + ssp_version
    cr_info = CreationInfo()
    cr_info.agency_id = config.agency_id
    if config.author is None:
        author = '{}@{}'.format(getuser(), gethostname())
    else:
        author = config.author
    cr_info.author = author
    cr_info.creation_time = UTCDateTime()

    means = sourcepar.means_weight
    errors = sourcepar.errors_weight
    stationpar = sourcepar.station_parameters

    # Magnitude
    mag = Magnitude()
    _id = config.smi_magnitude_template.replace('$SMI_BASE', config.smi_base)
    _id = _id.replace('$ORIGIN_ID', origin_id_strip)
    mag.resource_id = ResourceIdentifier(id=_id)
    mag.method_id = ResourceIdentifier(id=method_id)
    mag.origin_id = origin_id
    mag.magnitude_type = 'Mw'
    mag.mag = means['Mw']
    mag_err = QuantityError()
    mag_err.uncertainty = errors['Mw']
    mag_err.confidence_level = 68.2
    mag.mag_errors = mag_err
    mag.station_count = len([_s for _s in stationpar.keys()])
    mag.evaluation_mode = 'automatic'
    mag.creation_info = cr_info

    # Seismic moment -- It has to be stored in a MomentTensor object
    # which, in turn, is part of a FocalMechanism object
    mt = MomentTensor()
    _id = config.smi_moment_tensor_template.replace('$SMI_BASE',
                                                    config.smi_base)
    _id = _id.replace('$ORIGIN_ID', origin_id_strip)
    mt.resource_id = ResourceIdentifier(id=_id)
    mt.derived_origin_id = origin_id
    mt.moment_magnitude_id = mag.resource_id
    mt.scalar_moment = means['Mo']
    mt_err = QuantityError()
    mt_err.lower_uncertainty = errors['Mo'][0]
    mt_err.upper_uncertainty = errors['Mo'][1]
    mt_err.confidence_level = 68.2
    mt.scalar_moment_errors = mt_err
    mt.method_id = method_id
    mt.creation_info = cr_info
    # And here is the FocalMechanism object
    fm = FocalMechanism()
    _id = config.smi_focal_mechanism_template.replace('$SMI_BASE',
                                                      config.smi_base)
    _id = _id.replace('$ORIGIN_ID', origin_id_strip)
    fm.resource_id = ResourceIdentifier(id=_id)
    fm.triggering_origin_id = origin_id
    fm.method_id = ResourceIdentifier(id=method_id)
    fm.moment_tensor = mt
    fm.creation_info = cr_info
    ev.focal_mechanisms.append(fm)

    # Station magnitudes
    for statId in sorted(stationpar.keys()):
        par = stationpar[statId]
        st_mag = StationMagnitude()
        seed_id = statId.split()[0]
        _id = config.smi_station_magnitude_template.replace(
            '$SMI_MAGNITUDE_TEMPLATE', config.smi_magnitude_template)
        _id = _id.replace('$ORIGIN_ID', origin_id_strip)
        _id = _id.replace('$SMI_BASE', config.smi_base)
        _id = _id.replace('$WAVEFORM_ID', seed_id)
        st_mag.resource_id = ResourceIdentifier(id=_id)
        st_mag.origin_id = origin_id
        st_mag.mag = par['Mw']
        st_mag.station_magnitude_type = 'Mw'
        st_mag.method_id = mag.method_id
        st_mag.creation_info = cr_info
        st_mag.waveform_id = WaveformStreamID(seed_string=seed_id)
        st_mag.extra = SSPExtra()
        st_mag.extra.moment = SSPTag(par['Mo'])
        st_mag.extra.corner_frequency = SSPTag(par['fc'])
        st_mag.extra.t_star = SSPTag(par['t_star'])
        ev.station_magnitudes.append(st_mag)
        st_mag_contrib = StationMagnitudeContribution()
        st_mag_contrib.station_magnitude_id = st_mag.resource_id
        mag.station_magnitude_contributions.append(st_mag_contrib)
    ev.magnitudes.append(mag)

    # Write other average parameters as custom tags
    ev.extra = SSPExtra()
    ev.extra.corner_frequency = SSPContainerTag()
    ev.extra.corner_frequency.value.value = SSPTag(means['fc'])
    ev.extra.corner_frequency.value.lower_uncertainty =\
        SSPTag(errors['fc'][0])
    ev.extra.corner_frequency.value.upper_uncertainty =\
        SSPTag(errors['fc'][1])
    ev.extra.corner_frequency.value.confidence_level = SSPTag(68.2)
    ev.extra.t_star = SSPContainerTag()
    ev.extra.t_star.value.value = SSPTag(means['t_star'])
    ev.extra.t_star.value.uncertainty = SSPTag(errors['t_star'])
    ev.extra.t_star.value.confidence_level = SSPTag(68.2)
    ev.extra.source_radius = SSPContainerTag()
    ev.extra.source_radius.value.value = SSPTag(means['ra'])
    ev.extra.source_radius.value.lower_uncertainty =\
        SSPTag(errors['ra'][0])
    ev.extra.source_radius.value.upper_uncertainty =\
        SSPTag(errors['ra'][1])
    ev.extra.source_radius.value.confidence_level = SSPTag(68.2)
    ev.extra.stress_drop = SSPContainerTag()
    ev.extra.stress_drop.value.value = SSPTag(means['bsd'])
    ev.extra.stress_drop.value.lower_uncertainty =\
        SSPTag(errors['bsd'][0])
    ev.extra.stress_drop.value.upper_uncertainty =\
        SSPTag(errors['bsd'][1])
    ev.extra.stress_drop.value.confidence_level = SSPTag(68.2)

    if config.set_preferred_magnitude:
        ev.preferred_magnitude_id = mag.resource_id.id

    qml_file_out = os.path.join(config.options.outdir, evid + '.xml')
    ev.write(qml_file_out, format='QUAKEML')
    logging.info('QuakeML file written to: ' + qml_file_out)