Exemplo n.º 1
0
    def catalog(self):
        """
        Add existing Event to a Catalog

        """
        c = Catalog(events=[self.event])
        c.creation_info = CreationInfo(
            creation_time = UTCDateTime(), 
            agency_id = self.agency,
            version = self.event.creation_info.version,
            )
        c.resource_id = self._rid(c)
        return c
Exemplo n.º 2
0
 def _deserialize(self):
     catalog = Catalog()
     res_id = '/'.join((res_id_prefix,
                        self.filename.replace(':', '/')))\
         .replace('\\', '/').replace('//', '/')
     catalog.resource_id = ResourceIdentifier(id=res_id)
     catalog.description = 'Created from NEIC PDE mchedr format'
     catalog.comments = ''
     catalog.creation_info = CreationInfo(creation_time=UTCDateTime())
     for line in self.fh.readlines():
         # XXX: ugly, probably we should do everything in byte strings
         # here? Is the pde / mchedr format unicode aware?
         line = line.decode()
         record_id = line[0:2]
         if record_id == 'HY':
             event = self._parse_record_hy(line)
             catalog.append(event)
         elif record_id == 'P ':
             pick, arrival = self._parse_record_p(line, event)
         elif record_id == 'E ':
             self._parse_record_e(line, event)
         elif record_id == 'L ':
             self._parse_record_l(line, event)
         elif record_id == 'A ':
             self._parse_record_a(line, event)
         elif record_id == 'C ':
             self._parse_record_c(line, event)
         elif record_id == 'AH':
             self._parse_record_ah(line, event)
         elif record_id == 'AE':
             self._parse_record_ae(line, event)
         elif record_id == 'Dp':
             focal_mechanism = self._parse_record_dp(line, event)
         elif record_id == 'Dt':
             self._parse_record_dt(line, focal_mechanism)
         elif record_id == 'Da':
             self._parse_record_da(line, focal_mechanism)
         elif record_id == 'Dc':
             self._parse_record_dc(line, focal_mechanism)
         elif record_id == 'M ':
             self._parse_record_m(line, event, pick)
         elif record_id == 'S ':
             self._parse_record_s(line, event, pick, arrival)
     self.fh.close()
     # strip extra whitespaces from event comments
     for event in catalog:
         for comment in event.comments:
             comment.text = comment.text.strip()
         event.scope_resource_ids()
     return catalog
Exemplo n.º 3
0
 def _deserialize(self):
     catalog = Catalog()
     res_id = '/'.join((res_id_prefix,
                        self.filename.replace(':', '/')))\
         .replace('\\', '/').replace('//', '/')
     catalog.resource_id = ResourceIdentifier(id=res_id)
     catalog.description = 'Created from NEIC PDE mchedr format'
     catalog.comments = ''
     catalog.creation_info = CreationInfo(creation_time=UTCDateTime())
     for line in self.fh.readlines():
         # XXX: ugly, probably we should do everything in byte strings
         # here? Is the pde / mchedr format unicode aware?
         line = line.decode()
         record_id = line[0:2]
         if record_id == 'HY':
             event = self._parse_record_hy(line)
             catalog.append(event)
         elif record_id == 'P ':
             pick, arrival = self._parse_record_p(line, event)
         elif record_id == 'E ':
             self._parse_record_e(line, event)
         elif record_id == 'L ':
             self._parse_record_l(line, event)
         elif record_id == 'A ':
             self._parse_record_a(line, event)
         elif record_id == 'C ':
             self._parse_record_c(line, event)
         elif record_id == 'AH':
             self._parse_record_ah(line, event)
         elif record_id == 'AE':
             self._parse_record_ae(line, event)
         elif record_id == 'Dp':
             focal_mechanism = self._parse_record_dp(line, event)
         elif record_id == 'Dt':
             self._parse_record_dt(line, focal_mechanism)
         elif record_id == 'Da':
             self._parse_record_da(line, focal_mechanism)
         elif record_id == 'Dc':
             self._parse_record_dc(line, focal_mechanism)
         elif record_id == 'M ':
             self._parse_record_m(line, event, pick)
         elif record_id == 'S ':
             self._parse_record_s(line, event, pick, arrival)
     self.fh.close()
     # strip extra whitespaces from event comments
     for event in catalog:
         for comment in event.comments:
             comment.text = comment.text.strip()
         event.scope_resource_ids()
     return catalog
Exemplo n.º 4
0
 def _deserialize(self):
     catalog = Catalog()
     res_id = "/".join((res_id_prefix, self.filename))
     catalog.resource_id = ResourceIdentifier(id=res_id)
     catalog.description = "Created from NEIC PDE mchedr format"
     catalog.comments = ""
     catalog.creation_info = CreationInfo(creation_time=UTCDateTime())
     for line in self.fh.readlines():
         # XXX: ugly, probably we should do everything in byte strings
         # here? Is the pde / mchedr format unicode aware?
         line = line.decode()
         record_id = line[0:2]
         if record_id == "HY":
             event = self._parseRecordHY(line)
             catalog.append(event)
         elif record_id == "P ":
             pick, arrival = self._parseRecordP(line, event)
         elif record_id == "E ":
             self._parseRecordE(line, event)
         elif record_id == "L ":
             self._parseRecordL(line, event)
         elif record_id == "A ":
             self._parseRecordA(line, event)
         elif record_id == "C ":
             self._parseRecordC(line, event)
         elif record_id == "AH":
             self._parseRecordAH(line, event)
         elif record_id == "AE":
             self._parseRecordAE(line, event)
         elif record_id == "Dp":
             focal_mechanism = self._parseRecordDp(line, event)
         elif record_id == "Dt":
             self._parseRecordDt(line, focal_mechanism)
         elif record_id == "Da":
             self._parseRecordDa(line, focal_mechanism)
         elif record_id == "Dc":
             self._parseRecordDc(line, focal_mechanism)
         elif record_id == "M ":
             self._parseRecordM(line, event, pick)
         elif record_id == "S ":
             self._parseRecordS(line, event, pick, arrival)
     self.fh.close()
     # strip extra whitespaces from event comments
     for event in catalog:
         for comment in event.comments:
             comment.text = comment.text.strip()
     return catalog
Exemplo n.º 5
0
 def _deserialize(self):
     catalog = Catalog()
     res_id = '/'.join((res_id_prefix, self.filename))
     catalog.resource_id = ResourceIdentifier(id=res_id)
     catalog.description = 'Created from NEIC PDE mchedr format'
     catalog.comments = ''
     catalog.creation_info = CreationInfo(creation_time=UTCDateTime())
     for line in self.fh.readlines():
         record_id = line[0:2]
         if record_id == 'HY':
             event = self._parseRecordHY(line)
             catalog.append(event)
         elif record_id == 'P ':
             pick, arrival = self._parseRecordP(line, event)
         elif record_id == 'E ':
             self._parseRecordE(line, event)
         elif record_id == 'L ':
             self._parseRecordL(line, event)
         elif record_id == 'A ':
             self._parseRecordA(line, event)
         elif record_id == 'C ':
             self._parseRecordC(line, event)
         elif record_id == 'AH':
             self._parseRecordAH(line, event)
         elif record_id == 'AE':
             self._parseRecordAE(line, event)
         elif record_id == 'Dp':
             focal_mechanism = self._parseRecordDp(line, event)
         elif record_id == 'Dt':
             self._parseRecordDt(line, focal_mechanism)
         elif record_id == 'Da':
             self._parseRecordDa(line, focal_mechanism)
         elif record_id == 'Dc':
             self._parseRecordDc(line, focal_mechanism)
         elif record_id == 'M ':
             self._parseRecordM(line, event, pick)
         elif record_id == 'S ':
             self._parseRecordS(line, event, pick, arrival)
     self.fh.close()
     # strip extra whitespaces from event comments
     for event in catalog:
         for comment in event.comments:
             comment.text = comment.text.strip()
     return catalog
Exemplo n.º 6
0
    def _deserialize(self):
        catalog = Catalog()
        catalog.description = 'Created from GSE2 format'
        catalog.creation_info = self._get_creation_info()

        # Flag used to ignore line which aren't in a BEGIN-STOP block
        begin_block = False
        # Flag used to ignore line which aren't in a BULLETIN block
        bulletin_block = False

        try:
            for line in self.lines:
                if line.startswith('BEGIN'):
                    if begin_block:
                        # 2 BEGIN without STOP
                        message = self._add_line_nb('Missing STOP tag')
                        raise GSE2BulletinSyntaxError(message)
                    else:
                        # Enter a BEGIN block
                        begin_block = True

                    self._check_header(line)
                elif line.startswith('STOP'):
                    if begin_block:
                        # Exit a BEGIN-STOP block
                        begin_block = False
                    else:
                        # STOP without BEGIN
                        message = self._add_line_nb('Missing BEGIN tag')
                        raise GSE2BulletinSyntaxError(message)
                elif line.startswith('DATA_TYPE'):
                    bulletin_block = line[10:18] == 'BULLETIN'

                if not begin_block or not bulletin_block:
                    # Not in a BEGIN-STOP block, nor a DATA_TYPE BULLETIN
                    # block.
                    continue

                # If a "Reviewed Event Bulletin" or "Reviewed Bulletin"
                # line exists, put it in comment
                if 'Reviewed Event Bulletin' in line \
                        or 'Reviewed Bulletin' in line:
                    comment = self._comment(line.strip())
                    if comment.text:
                        catalog.comments.append(comment)
                # Detect start of an event
                elif line.startswith('EVENT'):
                    event = self._parse_event(line)
                    if event:
                        catalog.append(event)

        except StopIteration:
            message = self._add_line_nb('Unexpected EOF while parsing')
            raise GSE2BulletinSyntaxError(message)
        except Exception:
            self._warn('Unexpected error')
            raise

        if begin_block:
            # BEGIN-STOP block not closed
            text = 'Unexpected EOF while parsing, BEGIN-STOP block not closed'
            message = self._add_line_nb(text)
            raise GSE2BulletinSyntaxError(message)

        catalog.resource_id = self._get_res_id('event/evid')

        return catalog
def readSeishubEventFile(filename):
    """
    Reads a Seishub event file and returns a ObsPy Catalog object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.event.readEvents` function, call this instead.

    :type filename: str
    :param filename: Seishub event file to be read.
    :rtype: :class:`~obspy.core.event.Catalog`
    :return: A ObsPy Catalog object.

    .. rubric:: Example
    """
    global CURRENT_TYPE

    base_name = os.path.basename(filename)

    if base_name.lower().startswith("baynet"):
        CURRENT_TYPE = "baynet"
    elif base_name.lower().startswith("earthworm"):
        CURRENT_TYPE = "earthworm"
    elif base_name.lower().startswith("gof"):
        CURRENT_TYPE = "seiscomp3"
    elif base_name.lower().startswith("obspyck") or base_name == "5622":
        CURRENT_TYPE = "obspyck"
    elif base_name.lower().startswith("toni"):
        CURRENT_TYPE = "toni"
    else:
        print "AAAAAAAAAAAAAAAAAAAAAAAAAAHHHHHHHHHHHHHHHHHHH"
        raise Exception

    # Just init the parser, the SeisHub event file format has no namespaces.
    parser = XMLParser(filename)
    # Create new Event object.
    public_id = parser.xpath('event_id/value')[0].text

    # A Seishub event just specifies a single event so Catalog information is
    # not really given.
    catalog = Catalog()
    catalog.resource_id = "/".join([RESOURCE_ROOT, "catalog", public_id])

    # Read the event_type tag.
    account = parser.xpath2obj('event_type/account', parser, str)
    user = parser.xpath2obj('event_type/user', parser, str)
    global_evaluation_mode = parser.xpath2obj('event_type/value', parser, str)
    public = parser.xpath2obj('event_type/public', parser, str)
    public = {"True": True, "False": False}.get(public, None)
    if account is not None and account.lower() != "sysop":
        public = False
    # The author will be stored in the CreationInfo object. This will be the
    # creation info of the event as well as on all picks.
    author = user
    if CURRENT_TYPE in ["seiscomp3", "earthworm"]:
        author = CURRENT_TYPE
    creation_info = {"author": author,
        "agency_id": "Erdbebendienst Bayern",
        "agency_uri": "%s/agency" % RESOURCE_ROOT,
        "creation_time": NOW}

    # Create the event object.
    event = Event(resource_id="/".join([RESOURCE_ROOT, "event", public_id]),
        creation_info=creation_info)
    # If account is None or 'sysop' and public is true, write 'public in the
    # comment, 'private' otherwise.
    event.extra = AttribDict()
    event.extra.public = {'value': public, 'namespace': NAMESPACE}
    event.extra.evaluationMode = {'value': global_evaluation_mode, 'namespace': NAMESPACE}

    event_type = parser.xpath2obj('type', parser, str)
    if event_type is not None:
        if event_type == "induced earthquake":
            event_type = "induced or triggered event"
        if event_type != "null":
            event.event_type = event_type

    # Parse the origins.
    origins = parser.xpath("origin")
    if len(origins) > 1:
        msg = "Only files with a single origin are currently supported"
        raise Exception(msg)
    for origin_el in parser.xpath("origin"):
        origin = __toOrigin(parser, origin_el)
        event.origins.append(origin)
    # Parse the magnitudes.
    for magnitude_el in parser.xpath("magnitude"):
        magnitude = __toMagnitude(parser, magnitude_el, origin)
        if magnitude.mag is None:
            continue
        event.magnitudes.append(magnitude)
    # Parse the picks. Pass the global evaluation mode (automatic, manual)
    for pick_el in parser.xpath("pick"):
        pick = __toPick(parser, pick_el, global_evaluation_mode)
        if pick is None:
            continue
        event.picks.append(pick)
        # The arrival object gets the following things from the Seishub.pick
        # objects
        # arrival.time_weight = pick.phase_weight
        # arrival.time_residual = pick.phase_res
        # arrival.azimuth = pick.azimuth
        # arrival.take_off_angle = pick.incident
        # arrival.distance = hyp_dist
        arrival = __toArrival(parser, pick_el, global_evaluation_mode, pick)
        if event.origins:
            event.origins[0].arrivals.append(arrival)

    for mag in event.station_magnitudes:
        mag.origin_id = event.origins[0].resource_id

    # Parse the station magnitudes.
    for stat_magnitude_el in parser.xpath("stationMagnitude"):
        stat_magnitude = __toStationMagnitude(parser, stat_magnitude_el)
        event.station_magnitudes.append(stat_magnitude)

    # Parse the amplitudes
    # we don't reference their id in the corresponding station magnitude,
    # because we use one amplitude measurement for each component
    for el in parser.xpath("stationMagnitude/amplitude"):
        event.amplitudes.append(__toAmplitude(parser, el))

    for mag in event.station_magnitudes:
        mag.origin_id = event.origins[0].resource_id

    for _i, stat_mag in enumerate(event.station_magnitudes):
        contrib = StationMagnitudeContribution()
        weight = None
        # The order of station magnitude objects is the same as in the xml
        # file.
        weight = parser.xpath2obj("weight",
            parser.xpath("stationMagnitude")[_i], float)
        if weight is not None:
            contrib.weight = weight
        contrib.station_magnitude_id = stat_mag.resource_id
        event.magnitudes[0].station_magnitude_contributions.append(contrib)

    for foc_mec_el in parser.xpath("focalMechanism"):
        foc_mec = __toFocalMechanism(parser, foc_mec_el)
        if foc_mec is not None:
            event.focal_mechanisms.append(foc_mec)

    # Set the origin id for the focal mechanisms. There is only one origin per
    # SeisHub event file.
    for focmec in event.focal_mechanisms:
        focmec.triggering_origin_id = event.origins[0].resource_id

    # Add the event to the catalog
    catalog.append(event)

    return catalog
Exemplo n.º 8
0
    def _deserialize(self):
        catalog = Catalog()
        catalog.description = 'Created from GSE2 format'
        catalog.creation_info = self._get_creation_info()

        # Flag used to ignore line which aren't in a BEGIN-STOP block
        begin_block = False
        # Flag used to ignore line which aren't in a BULLETIN block
        bulletin_block = False

        try:
            for line in self.lines:
                if line.startswith('BEGIN'):
                    if begin_block:
                        # 2 BEGIN without STOP
                        message = self._add_line_nb('Missing STOP tag')
                        raise GSE2BulletinSyntaxError(message)
                    else:
                        # Enter a BEGIN block
                        begin_block = True

                    self._check_header(line)
                elif line.startswith('STOP'):
                    if begin_block:
                        # Exit a BEGIN-STOP block
                        begin_block = False
                    else:
                        # STOP without BEGIN
                        message = self._add_line_nb('Missing BEGIN tag')
                        raise GSE2BulletinSyntaxError(message)
                elif line.startswith('DATA_TYPE'):
                    bulletin_block = line[10:18] == 'BULLETIN'

                if not begin_block or not bulletin_block:
                    # Not in a BEGIN-STOP block, nor a DATA_TYPE BULLETIN
                    # block.
                    continue

                # If a "Reviewed Event Bulletin" or "Reviewed Bulletin"
                # line exists, put it in comment
                if 'Reviewed Event Bulletin' in line \
                        or 'Reviewed Bulletin' in line:
                    comment = self._comment(line.strip())
                    if comment.text:
                        catalog.comments.append(comment)
                # Detect start of an event
                elif line.startswith('EVENT'):
                    event = self._parse_event(line)
                    if event:
                        catalog.append(event)

        except StopIteration:
            message = self._add_line_nb('Unexpected EOF while parsing')
            raise GSE2BulletinSyntaxError(message)
        except Exception:
            self._warn('Unexpected error')
            raise

        if begin_block:
            # BEGIN-STOP block not closed
            text = 'Unexpected EOF while parsing, BEGIN-STOP block not closed'
            message = self._add_line_nb(text)
            raise GSE2BulletinSyntaxError(message)

        catalog.resource_id = self._get_res_id('event/evid')

        return catalog
Exemplo n.º 9
0
def iris2quakeml(url, output_folder=None):
    if not "/spudservice/" in url:
        url = url.replace("/spud/", "/spudservice/")
        if url.endswith("/"):
            url += "quakeml"
        else:
            url += "/quakeml"
    print "Downloading %s..." % url
    r = requests.get(url)
    if r.status_code != 200:
        msg = "Error Downloading file!"
        raise Exception(msg)

    # For some reason the quakeml file is escaped HTML.
    h = HTMLParser.HTMLParser()

    data = h.unescape(r.content)

    # Replace some XML tags.
    data = data.replace("long-period body waves", "body waves")
    data = data.replace("intermediate-period surface waves", "surface waves")
    data = data.replace("long-period mantle waves", "mantle waves")

    data = data.replace("<html><body><pre>", "")
    data = data.replace("</pre></body></html>", "")

    # Change the resource identifiers. Colons are not allowed in QuakeML.
    pattern = r"(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})\.(\d{6})"
    data = re.sub(pattern, r"\1-\2-\3T\4-\5-\6.\7", data)

    data = StringIO(data)

    try:
        cat = readEvents(data)
    except:
        msg = "Could not read downloaded event data"
        raise ValueError(msg)

    # Parse the event, and use only one origin, magnitude and focal mechanism.
    # Only the first event is used. Should not be a problem for the chosen
    # global cmt application.
    ev = cat[0]

    if ev.preferred_origin():
        ev.origins = [ev.preferred_origin()]
    else:
        ev.origins = [ev.origins[0]]
    if ev.preferred_focal_mechanism():
        ev.focal_mechanisms = [ev.preferred_focal_mechanism()]
    else:
        ev.focal_mechanisms = [ev.focal_mechanisms[0]]

    try:
        mt = ev.focal_mechanisms[0].moment_tensor
    except:
        msg = "No moment tensor found in file."
        raise ValueError
    seismic_moment_in_dyn_cm = mt.scalar_moment
    if not seismic_moment_in_dyn_cm:
        msg = "No scalar moment found in file."
        raise ValueError(msg)

    # Create a new magnitude object with the moment magnitude calculated from
    # the given seismic moment.
    mag = Magnitude()
    mag.magnitude_type = "Mw"
    mag.origin_id = ev.origins[0].resource_id
    # This is the formula given on the GCMT homepage.
    mag.mag = (2.0 / 3.0) * (math.log10(seismic_moment_in_dyn_cm) - 16.1)
    mag.resource_id = ev.origins[0].resource_id.resource_id.replace("Origin",
        "Magnitude")
    ev.magnitudes = [mag]
    ev.preferred_magnitude_id = mag.resource_id

    # Convert the depth to meters.
    org = ev.origins[0]
    org.depth *= 1000.0
    if org.depth_errors.uncertainty:
        org.depth_errors.uncertainty *= 1000.0

    # Ugly asserts -- this is just a simple script.
    assert(len(ev.magnitudes) == 1)
    assert(len(ev.origins) == 1)
    assert(len(ev.focal_mechanisms) == 1)

    # All values given in the QuakeML file are given in dyne * cm. Convert them
    # to N * m.
    for key, value in mt.tensor.iteritems():
        if key.startswith("m_") and len(key) == 4:
            mt.tensor[key] /= 1E7
        if key.endswith("_errors") and hasattr(value, "uncertainty"):
            mt.tensor[key].uncertainty /= 1E7
    mt.scalar_moment /= 1E7
    if mt.scalar_moment_errors.uncertainty:
        mt.scalar_moment_errors.uncertainty /= 1E7
    p_axes = ev.focal_mechanisms[0].principal_axes
    for ax in [p_axes.t_axis, p_axes.p_axis, p_axes.n_axis]:
        if ax is None or not ax.length:
            continue
        ax.length /= 1E7

    # Check if it has a source time function
    stf = mt.source_time_function
    if stf:
        if stf.type != "triangle":
            msg = ("Source time function type '%s' not yet mapped. Please "
                "contact the developers.") % stf.type
            raise NotImplementedError(msg)
        if not stf.duration:
            if not stf.decay_time:
                msg = "Not known how to derive duration without decay time."
                raise NotImplementedError(msg)
            # Approximate the duraction for triangular STF.
            stf.duration = 2 * stf.decay_time

    # Get the flinn_engdahl region for a nice name.
    fe = FlinnEngdahl()
    region_name = fe.get_region(ev.origins[0].longitude,
        ev.origins[0].latitude)
    region_name = region_name.replace(" ", "_")
    event_name = "GCMT_event_%s_Mag_%.1f_%s-%s-%s-%s-%s.xml" % \
        (region_name, ev.magnitudes[0].mag, ev.origins[0].time.year,
        ev.origins[0].time.month, ev.origins[0].time.day,
        ev.origins[0].time.hour, ev.origins[0].time.minute)

    # Check if the ids of the magnitude and origin contain the corresponding
    # tag. Otherwise replace tme.
    ev.origins[0].resource_id = ev.origins[0].resource_id.resource_id.replace(
        "quakeml/gcmtid", "quakeml/origin/gcmtid")
    ev.magnitudes[0].resource_id = \
        ev.magnitudes[0].resource_id.resource_id.replace(
            "quakeml/gcmtid", "quakeml/magnitude/gcmtid")

    # Fix up the moment tensor resource_ids.
    mt.derived_origin_id = ev.origins[0].resource_id
    mt.resource_id = mt.resource_id.resource_id.replace("focalmechanism",
        "momenttensor")

    cat = Catalog()
    cat.resource_id = ev.origins[0].resource_id.resource_id.replace("origin",
        "event_parameters")
    cat.append(ev)
    if output_folder:
        event_name = os.path.join(output_folder, event_name)
    cat.write(event_name, format="quakeml", validate=True)
    print "Written file", event_name
Exemplo n.º 10
0
def readSeishubEventFile(filename):
    """
    Reads a Seishub event file and returns a ObsPy Catalog object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.event.readEvents` function, call this instead.

    :type filename: str
    :param filename: Seishub event file to be read.
    :rtype: :class:`~obspy.core.event.Catalog`
    :return: A ObsPy Catalog object.

    .. rubric:: Example
    """
    global CURRENT_TYPE

    base_name = os.path.basename(filename)

    if base_name.lower().startswith("baynet"):
        CURRENT_TYPE = "baynet"
    elif base_name.lower().startswith("earthworm"):
        CURRENT_TYPE = "earthworm"
    elif base_name.lower().startswith("gof"):
        CURRENT_TYPE = "seiscomp3"
    elif base_name.lower().startswith("obspyck") or base_name == "5622":
        CURRENT_TYPE = "obspyck"
    elif base_name.lower().startswith("toni"):
        CURRENT_TYPE = "toni"
    else:
        print "AAAAAAAAAAAAAAAAAAAAAAAAAAHHHHHHHHHHHHHHHHHHH"
        raise Exception

    # Just init the parser, the SeisHub event file format has no namespaces.
    parser = XMLParser(filename)
    # Create new Event object.
    public_id = parser.xpath('event_id/value')[0].text

    # A Seishub event just specifies a single event so Catalog information is
    # not really given.
    catalog = Catalog()
    catalog.resource_id = "/".join([RESOURCE_ROOT, "catalog", public_id])

    # Read the event_type tag.
    account = parser.xpath2obj('event_type/account', parser, str)
    user = parser.xpath2obj('event_type/user', parser, str)
    global_evaluation_mode = parser.xpath2obj('event_type/value', parser, str)
    public = parser.xpath2obj('event_type/public', parser, str)
    public = {
        "True": True,
        "False": False,
        "true": True,
        "false": False
    }.get(public, None)
    if account is not None and account.lower() != "sysop":
        public = False
    # The author will be stored in the CreationInfo object. This will be the
    # creation info of the event as well as on all picks.
    author = user
    if CURRENT_TYPE in ["seiscomp3", "earthworm"]:
        public = False
        author = CURRENT_TYPE
        global_evaluation_mode = "automatic"
    elif CURRENT_TYPE in ["baynet", "toni"]:
        public = True
        author = CURRENT_TYPE
        global_evaluation_mode = "manual"
    creation_info = {
        "author": author,
        "agency_id": "Erdbebendienst Bayern",
        "agency_uri": "%s/agency" % RESOURCE_ROOT,
        "creation_time": NOW
    }

    # Create the event object.
    event = Event(resource_id="/".join([RESOURCE_ROOT, "event", public_id]),
                  creation_info=creation_info)
    # If account is None or 'sysop' and public is true, write 'public in the
    # comment, 'private' otherwise.
    event.extra = AttribDict()
    event.extra.public = {'value': public, 'namespace': NAMESPACE}
    event.extra.evaluationMode = {
        'value': global_evaluation_mode,
        'namespace': NAMESPACE
    }

    event_type = parser.xpath2obj('type', parser, str)
    if event_type is not None:
        if event_type == "induced earthquake":
            event_type = "induced or triggered event"
        if event_type != "null":
            event.event_type = event_type

    # Parse the origins.
    origins = parser.xpath("origin")
    if len(origins) > 1:
        msg = "Only files with a single origin are currently supported"
        raise Exception(msg)
    for origin_el in parser.xpath("origin"):
        origin = __toOrigin(parser, origin_el)
        event.origins.append(origin)
    # Parse the magnitudes.
    for magnitude_el in parser.xpath("magnitude"):
        magnitude = __toMagnitude(parser, magnitude_el, origin)
        if magnitude.mag is None:
            continue
        event.magnitudes.append(magnitude)
    # Parse the picks. Pass the global evaluation mode (automatic, manual)
    for pick_el in parser.xpath("pick"):
        pick = __toPick(parser, pick_el, global_evaluation_mode)
        if pick is None:
            continue
        event.picks.append(pick)
        # The arrival object gets the following things from the Seishub.pick
        # objects
        # arrival.time_weight = pick.phase_weight
        # arrival.time_residual = pick.phase_res
        # arrival.azimuth = pick.azimuth
        # arrival.take_off_angle = pick.incident
        # arrival.distance = hyp_dist
        arrival = __toArrival(parser, pick_el, global_evaluation_mode, pick)
        if event.origins:
            event.origins[0].arrivals.append(arrival)

    for mag in event.station_magnitudes:
        mag.origin_id = event.origins[0].resource_id

    # Parse the station magnitudes.
    for stat_magnitude_el in parser.xpath("stationMagnitude"):
        stat_magnitude = __toStationMagnitude(parser, stat_magnitude_el)
        event.station_magnitudes.append(stat_magnitude)

    # Parse the amplitudes
    # we don't reference their id in the corresponding station magnitude,
    # because we use one amplitude measurement for each component
    for el in parser.xpath("stationMagnitude/amplitude"):
        event.amplitudes.append(__toAmplitude(parser, el))

    for mag in event.station_magnitudes:
        mag.origin_id = event.origins[0].resource_id

    for _i, stat_mag in enumerate(event.station_magnitudes):
        contrib = StationMagnitudeContribution()
        weight = None
        # The order of station magnitude objects is the same as in the xml
        # file.
        weight = parser.xpath2obj("weight",
                                  parser.xpath("stationMagnitude")[_i], float)
        if weight is not None:
            contrib.weight = weight
        contrib.station_magnitude_id = stat_mag.resource_id
        event.magnitudes[0].station_magnitude_contributions.append(contrib)

    for foc_mec_el in parser.xpath("focalMechanism"):
        foc_mec = __toFocalMechanism(parser, foc_mec_el)
        if foc_mec is not None:
            event.focal_mechanisms.append(foc_mec)

    # Set the origin id for the focal mechanisms. There is only one origin per
    # SeisHub event file.
    for focmec in event.focal_mechanisms:
        focmec.triggering_origin_id = event.origins[0].resource_id

    # Add the event to the catalog
    catalog.append(event)

    return catalog
Exemplo n.º 11
0
def iris2quakeml(url, output_folder=None):
    if not "/spudservice/" in url:
        url = url.replace("/spud/", "/spudservice/")
        if url.endswith("/"):
            url += "quakeml"
        else:
            url += "/quakeml"
    print "Downloading %s..." % url
    r = requests.get(url)
    if r.status_code != 200:
        msg = "Error Downloading file!"
        raise Exception(msg)

    # For some reason the quakeml file is escaped HTML.
    h = HTMLParser.HTMLParser()

    data = h.unescape(r.content)

    # Replace some XML tags.
    data = data.replace("long-period body waves", "body waves")
    data = data.replace("intermediate-period surface waves", "surface waves")
    data = data.replace("long-period mantle waves", "mantle waves")

    data = data.replace("<html><body><pre>", "")
    data = data.replace("</pre></body></html>", "")

    # Change the resource identifiers. Colons are not allowed in QuakeML.
    pattern = r"(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})\.(\d{6})"
    data = re.sub(pattern, r"\1-\2-\3T\4-\5-\6.\7", data)

    data = StringIO(data)

    try:
        cat = readEvents(data)
    except:
        msg = "Could not read downloaded event data"
        raise ValueError(msg)

    # Parse the event, and use only one origin, magnitude and focal mechanism.
    # Only the first event is used. Should not be a problem for the chosen
    # global cmt application.
    ev = cat[0]

    if ev.preferred_origin():
        ev.origins = [ev.preferred_origin()]
    else:
        ev.origins = [ev.origins[0]]
    if ev.preferred_focal_mechanism():
        ev.focal_mechanisms = [ev.preferred_focal_mechanism()]
    else:
        ev.focal_mechanisms = [ev.focal_mechanisms[0]]

    try:
        mt = ev.focal_mechanisms[0].moment_tensor
    except:
        msg = "No moment tensor found in file."
        raise ValueError
    seismic_moment_in_dyn_cm = mt.scalar_moment
    if not seismic_moment_in_dyn_cm:
        msg = "No scalar moment found in file."
        raise ValueError(msg)

    # Create a new magnitude object with the moment magnitude calculated from
    # the given seismic moment.
    mag = Magnitude()
    mag.magnitude_type = "Mw"
    mag.origin_id = ev.origins[0].resource_id
    # This is the formula given on the GCMT homepage.
    mag.mag = (2.0 / 3.0) * (math.log10(seismic_moment_in_dyn_cm) - 16.1)
    mag.resource_id = ev.origins[0].resource_id.resource_id.replace(
        "Origin", "Magnitude")
    ev.magnitudes = [mag]
    ev.preferred_magnitude_id = mag.resource_id

    # Convert the depth to meters.
    org = ev.origins[0]
    org.depth *= 1000.0
    if org.depth_errors.uncertainty:
        org.depth_errors.uncertainty *= 1000.0

    # Ugly asserts -- this is just a simple script.
    assert (len(ev.magnitudes) == 1)
    assert (len(ev.origins) == 1)
    assert (len(ev.focal_mechanisms) == 1)

    # All values given in the QuakeML file are given in dyne * cm. Convert them
    # to N * m.
    for key, value in mt.tensor.iteritems():
        if key.startswith("m_") and len(key) == 4:
            mt.tensor[key] /= 1E7
        if key.endswith("_errors") and hasattr(value, "uncertainty"):
            mt.tensor[key].uncertainty /= 1E7
    mt.scalar_moment /= 1E7
    if mt.scalar_moment_errors.uncertainty:
        mt.scalar_moment_errors.uncertainty /= 1E7
    p_axes = ev.focal_mechanisms[0].principal_axes
    for ax in [p_axes.t_axis, p_axes.p_axis, p_axes.n_axis]:
        if ax is None or not ax.length:
            continue
        ax.length /= 1E7

    # Check if it has a source time function
    stf = mt.source_time_function
    if stf:
        if stf.type != "triangle":
            msg = ("Source time function type '%s' not yet mapped. Please "
                   "contact the developers.") % stf.type
            raise NotImplementedError(msg)
        if not stf.duration:
            if not stf.decay_time:
                msg = "Not known how to derive duration without decay time."
                raise NotImplementedError(msg)
            # Approximate the duraction for triangular STF.
            stf.duration = 2 * stf.decay_time

    # Get the flinn_engdahl region for a nice name.
    fe = FlinnEngdahl()
    region_name = fe.get_region(ev.origins[0].longitude,
                                ev.origins[0].latitude)
    region_name = region_name.replace(" ", "_")
    event_name = "GCMT_event_%s_Mag_%.1f_%s-%s-%s-%s-%s.xml" % \
        (region_name, ev.magnitudes[0].mag, ev.origins[0].time.year,
        ev.origins[0].time.month, ev.origins[0].time.day,
        ev.origins[0].time.hour, ev.origins[0].time.minute)

    # Check if the ids of the magnitude and origin contain the corresponding
    # tag. Otherwise replace tme.
    ev.origins[0].resource_id = ev.origins[0].resource_id.resource_id.replace(
        "quakeml/gcmtid", "quakeml/origin/gcmtid")
    ev.magnitudes[0].resource_id = \
        ev.magnitudes[0].resource_id.resource_id.replace(
            "quakeml/gcmtid", "quakeml/magnitude/gcmtid")

    # Fix up the moment tensor resource_ids.
    mt.derived_origin_id = ev.origins[0].resource_id
    mt.resource_id = mt.resource_id.resource_id.replace(
        "focalmechanism", "momenttensor")

    cat = Catalog()
    cat.resource_id = ev.origins[0].resource_id.resource_id.replace(
        "origin", "event_parameters")
    cat.append(ev)
    if output_folder:
        event_name = os.path.join(output_folder, event_name)
    cat.write(event_name, format="quakeml", validate=True)
    print "Written file", event_name