コード例 #1
0
ファイル: dataframes.py プロジェクト: usgs/libcomcat
def get_phase_dataframe(detail, catalog='preferred'):
    """Return a Pandas DataFrame consisting of Phase arrival data.

    Args:
        detail (DetailEvent): DetailEvent object.
        catalog (str): Source network ('us','ak', etc. ,or 'preferred'.)

    Returns:
        DataFrame: Pandas DataFrame containing columns:
            - Channel: Network.Station.Channel.Location (NSCL) style station
                       description. ("--" indicates missing information)
            - Distance: Distance (kilometers) from epicenter to station.
            - Azimuth: Azimuth (degrees) from epicenter to station.
            - Phase: Name of the phase (Pn,Pg, etc.)
            - Arrival Time: Pick arrival time (UTC).
            - Status: "manual" or "automatic".
            - Residual: Arrival time residual.
            - Weight: Arrival weight.
            - Agency: Agency ID.
    Raises:
        AttributeError: If input DetailEvent does not have a phase-data product
            for the input catalog.
    """
    if catalog is None:
        catalog = 'preferred'
    df = pd.DataFrame(columns=['Channel', 'Distance', 'Azimuth',
                               'Phase', 'Arrival Time', 'Status',
                               'Residual', 'Weight', 'Agency'])

    phasedata = detail.getProducts('phase-data', source=catalog)[0]
    quakeurl = phasedata.getContentURL('quakeml.xml')
    try:
        fh = urlopen(quakeurl, timeout=TIMEOUT)
        data = fh.read()
        fh.close()
    except Exception:
        return None
    unpickler = Unpickler()
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", category=UserWarning)
        catalog = unpickler.loads(data)
        catevent = catalog.events[0]
        for pick in catevent.picks:
            phaserow = _get_phaserow(pick, catevent)
            if phaserow is None:
                continue
            df = df.append(phaserow, ignore_index=True)
    return df
コード例 #2
0
ファイル: event.py プロジェクト: znamy/obspy
def _read_sc3ml(filename, id_prefix='smi:org.gfz-potsdam.de/geofon/'):
    """
    Read a SC3ML file and returns a :class:`~obspy.core.event.Catalog`.

    An XSLT file is used to convert the SC3ML file to a QuakeML file. The
    catalog is then generated using the QuakeML module.

    .. warning::
    This function should NOT be called directly, it registers via the
    the :meth:`~obspy.core.event.catalog.Catalog.write` method of an
    ObsPy :class:`~obspy.core.event.catalog.Catalog` object, call this
    instead.

    :type filename: str
    :param filename: SC3ML file to be read.
    :type id_prefix: str
    :param id_prefix: ID prefix. SC3ML does not enforce any particular ID
        restriction, this ID prefix allows to convert the IDs to a well
        formatted QuakeML ID. You can modify the default ID prefix with the
        reverse DNS name of your institute.
    :rtype: :class:`~obspy.core.event.Catalog`
    :return: An ObsPy Catalog object.

    .. rubric:: Example

    >>> from obspy import read_events
    >>> cat = read_events('/path/to/iris_events.sc3ml')
    >>> print(cat)
    2 Event(s) in Catalog:
    2011-03-11T05:46:24.120000Z | +38.297, +142.373
    2006-09-10T04:26:33.610000Z |  +9.614, +121.961
    """
    sc3ml_doc = _xml_doc_from_anything(filename)

    match = re.match(
        r'{http://geofon\.gfz-potsdam\.de/ns/seiscomp3-schema/([-+]?'
        r'[0-9]*\.?[0-9]+)}', sc3ml_doc.tag)

    try:
        version = match.group(1)
    except AttributeError:
        raise ValueError("Not a SC3ML compatible file or string.")
    else:
        if version not in SCHEMA_VERSION:
            message = ("Can't read SC3ML version %s, ObsPy can deal with "
                       "versions [%s].") % (version, ', '.join(SCHEMA_VERSION))
            raise ValueError(message)

    xslt_filename = os.path.join(os.path.dirname(__file__), 'data',
                                 'sc3ml_%s__quakeml_1.2.xsl' % version)
    transform = etree.XSLT(etree.parse(xslt_filename))
    quakeml_doc = transform(sc3ml_doc,
                            ID_PREFIX=etree.XSLT.strparam(id_prefix))

    return Unpickler().load(io.BytesIO(quakeml_doc))
コード例 #3
0
ファイル: event.py プロジェクト: meqash/obspy
def _read_sc3ml(filename, id_prefix='smi:org.gfz-potsdam.de/geofon/'):
    """
    Read a 0.9 SC3ML file and returns a :class:`~obspy.core.event.Catalog`.

    An XSLT file is used to convert the SC3ML file to a QuakeML file. The
    catalog is then generated using the QuakeML module.

    .. warning::
    This function should NOT be called directly, it registers via the
    the :meth:`~obspy.core.event.catalog.Catalog.write` method of an
    ObsPy :class:`~obspy.core.event.catalog.Catalog` object, call this
    instead.

    :type filename: str
    :param filename: SC3ML file to be read.
    :type id_prefix: str
    :param id_prefix: ID prefix. SC3ML does not enforce any particular ID
        restriction, this ID prefix allows to convert the IDs to a well
        formatted QuakeML ID. You can modify the default ID prefix with the
        reverse DNS name of your institute.
    :rtype: :class:`~obspy.core.event.Catalog`
    :return: An ObsPy Catalog object.

    .. rubric:: Example

    >>> from obspy import read_events
    >>> cat = read_events('/path/to/iris_events.sc3ml')
    >>> print(cat)
    2 Event(s) in Catalog:
    2011-03-11T05:46:24.120000Z | +38.297, +142.373
    2006-09-10T04:26:33.610000Z |  +9.614, +121.961
    """
    xslt_filename = os.path.join(os.path.dirname(__file__), 'data',
                                 'sc3ml_0.9__quakeml_1.2.xsl')
    transform = etree.XSLT(etree.parse(xslt_filename))
    sc3ml_doc = _xml_doc_from_anything(filename)
    quakeml_doc = transform(sc3ml_doc,
                            ID_PREFIX=etree.XSLT.strparam(id_prefix))
    return Unpickler().load(io.BytesIO(quakeml_doc))
コード例 #4
0
# -*- coding: utf-8 -*-
"""
Parse data from GHA Kermadec Spreadsheet or csv of eventIDs and produce plots of data/analysis
"""

import argparse
import datetime
from io import BytesIO
import math
import matplotlib
import matplotlib.pyplot as plt
from obspy.io.quakeml.core import Unpickler
import pycurl
import subprocess

quakeml_reader = Unpickler()

def get_event(eventID):

    """
    Get a single event from the GeoNet FDSN service
    :param eventID: GeoNet eventID
    :return: FDSN event
    """

    query = "https://service.geonet.org.nz/fdsnws/dataselect/1/query?network=NZ&" + eventID
    queryresult = curl(query)
    event = quakeml_reader.loads(queryresult)
    return event

コード例 #5
0
def get_magnitude_data_frame(detail, catalog, magtype):
    """Return a Pandas DataFrame consisting of magnitude data.

    Args:
        detail (DetailEvent): DetailEvent object.
        catalog (str): Source catalog ('us','ak', etc. ,or 'preferred'.)
        magtype (str): Magnitude type (mb, ml, etc.)

    Returns:
        DataFrame: Pandas DataFrame containing columns:
            - Channel: Network.Station.Channel.Location (NSCL) style station
                       description. ("--" indicates missing information)
            - Type: Magnitude type.
            - Amplitude: Amplitude of seismic wave at each station (m).
            - Period: Period of seismic wave at each station (s).
            - Status: "manual" or "automatic".
            - Magnitude: Locally determined magnitude.
            - Weight: Magnitude weight.
    Raises:
        AttributeError if input DetailEvent does not have a phase-data product
            for the input catalog.
    """
    columns = columns = [
        'Channel', 'Type', 'Amplitude', 'Period', 'Status', 'Magnitude',
        'Weight'
    ]
    df = pd.DataFrame(columns=columns)
    phasedata = detail.getProducts('phase-data', source=catalog)[0]
    quakeurl = phasedata.getContentURL('quakeml.xml')
    try:
        fh = urlopen(quakeurl, timeout=TIMEOUT)
        data = fh.read()
        fh.close()
    except Exception:
        return None
    fmt = '%s.%s.%s.%s'
    unpickler = Unpickler()
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", category=UserWarning)
        catalog = unpickler.loads(data)
        catevent = catalog.events[0]  # match this to input catalog
        for magnitude in catevent.magnitudes:
            if magnitude.magnitude_type.lower() != magtype.lower():
                continue
            for contribution in magnitude.station_magnitude_contributions:
                row = {}
                smag = contribution.station_magnitude_id.get_referred_object()
                ampid = smag.amplitude_id
                amp = None
                if ampid is None:
                    waveid = smag.waveform_id
                    tpl = (waveid.network_code, waveid.station_code, '--',
                           '--')
                else:
                    amp = ampid.get_referred_object()
                    if amp is None:
                        waveid = smag.waveform_id
                        tpl = (waveid.network_code, waveid.station_code, '--',
                               '--')
                    else:
                        waveid = amp.waveform_id
                        tpl = (waveid.network_code, waveid.station_code,
                               waveid.channel_code, waveid.location_code)

                row['Channel'] = fmt % tpl
                row['Type'] = smag.station_magnitude_type
                if amp is not None:
                    row['Amplitude'] = amp.generic_amplitude
                    row['Period'] = amp.period
                    row['Status'] = amp.evaluation_mode
                else:
                    row['Amplitude'] = np.nan
                    row['Period'] = np.nan
                    row['Status'] = 'automatic'
                row['Magnitude'] = smag.mag
                row['Weight'] = contribution.weight
                df = df.append(row, ignore_index=True)
    df = df[columns]
    return df
コード例 #6
0
ファイル: dataframes.py プロジェクト: usgs/libcomcat
def get_magnitude_data_frame(detail, catalog, magtype):
    """Return a Pandas DataFrame consisting of magnitude data.

    Args:
        detail (DetailEvent): DetailEvent object.
        catalog (str): Source catalog ('us','ak', etc. ,or 'preferred'.)
        magtype (str): Magnitude type (mb, ml, etc.)

    Returns:
        DataFrame: Pandas DataFrame containing columns:
            - Channel: Network.Station.Channel.Location (NSCL) style station
                       description. ("--" indicates missing information)
            - Type: Magnitude type.
            - Amplitude: Amplitude of seismic wave at each station (m).
            - Period: Period of seismic wave at each station (s).
            - Status: "manual" or "automatic".
            - Magnitude: Locally determined magnitude.
            - Weight: Magnitude weight.
    Raises:
        AttributeError if input DetailEvent does not have a phase-data product
            for the input catalog.
    """
    columns = columns = ['Channel', 'Type', 'Amplitude',
                         'Period', 'Status', 'Magnitude',
                         'Weight']
    df = pd.DataFrame(columns=columns)
    phasedata = detail.getProducts('phase-data', source=catalog)[0]
    quakeurl = phasedata.getContentURL('quakeml.xml')
    try:
        fh = urlopen(quakeurl, timeout=TIMEOUT)
        data = fh.read()
        fh.close()
    except Exception:
        return None
    fmt = '%s.%s.%s.%s'
    unpickler = Unpickler()
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", category=UserWarning)
        catalog = unpickler.loads(data)
        catevent = catalog.events[0]  # match this to input catalog
        for magnitude in catevent.magnitudes:
            if magnitude.magnitude_type.lower() != magtype.lower():
                continue
            for contribution in magnitude.station_magnitude_contributions:
                row = {}
                smag = contribution.station_magnitude_id.get_referred_object()
                ampid = smag.amplitude_id
                amp = None
                if ampid is None:
                    waveid = smag.waveform_id
                    tpl = (waveid.network_code,
                           waveid.station_code,
                           '--',
                           '--')
                else:
                    amp = ampid.get_referred_object()
                    if amp is None:
                        waveid = smag.waveform_id
                        tpl = (waveid.network_code,
                               waveid.station_code,
                               '--',
                               '--')
                    else:
                        waveid = amp.waveform_id
                        tpl = (waveid.network_code,
                               waveid.station_code,
                               waveid.channel_code,
                               waveid.location_code)

                row['Channel'] = fmt % tpl
                row['Type'] = smag.station_magnitude_type
                if amp is not None:
                    row['Amplitude'] = amp.generic_amplitude
                    row['Period'] = amp.period
                    row['Status'] = amp.evaluation_mode
                else:
                    row['Amplitude'] = np.nan
                    row['Period'] = np.nan
                    row['Status'] = 'automatic'
                row['Magnitude'] = smag.mag
                row['Weight'] = contribution.weight
                df = df.append(row, ignore_index=True)
    df = df[columns]
    return df