Example #1
0
def remove_instrument_response(tr,
                               pre_filt=(0.01, 0.02, 50, 100),
                               detrend=True,
                               taper=True,
                               dataless_file=None):

    if not dataless_file:
        PYGEMA_PATH = "%s/pygema" % (site.getsitepackages()[0])
        dataless_file = glob.glob(
            "%s/src/dataless/%s_%s.dataless" %
            (PYGEMA_PATH, tr.stats.network, tr.stats.station))[0]

    if detrend:
        tr.detrend('demean')
        tr.detrend('linear')

    if taper:
        tr.taper(max_percentage=0.005, type='hann')

    parser = Parser(dataless_file)
    paz = parser.get_paz(tr.id)
    tr.simulate(paz_remove=paz,
                pre_filt=pre_filt,
                paz_simulate=None,
                remove_sensitivity=True)

    return tr
Example #2
0
    def test_evalresp_with_output_from_seed(self):
        """
        The StationXML file has been converted to SEED with the help of a tool
        provided by IRIS:

        https://seiscode.iris.washington.edu/projects/stationxml-converter
        """
        t_samp = 0.05
        nfft = 16384

        # Test for different output units.
        units = ["DISP", "VEL", "ACC"]
        filenames = ["IRIS_single_channel_with_response", "XM.05", "AU.MEEK"]

        for filename in filenames:
            xml_filename = os.path.join(self.data_dir,
                                        filename + os.path.extsep + "xml")
            seed_filename = os.path.join(self.data_dir,
                                         filename + os.path.extsep + "seed")

            p = Parser(seed_filename)

            # older systems don't like an end date in the year 2599
            t_ = UTCDateTime(2030, 1, 1)
            if p.blockettes[50][0].end_effective_date > t_:
                p.blockettes[50][0].end_effective_date = None
            if p.blockettes[52][0].end_date > t_:
                p.blockettes[52][0].end_date = None

            resp_filename = p.get_RESP()[0][-1]

            inv = read_inventory(xml_filename)

            network = inv[0].code
            station = inv[0][0].code
            location = inv[0][0][0].location_code
            channel = inv[0][0][0].code
            date = inv[0][0][0].start_date

            for unit in units:
                resp_filename.seek(0, 0)

                seed_response, seed_freq = evalresp(t_samp,
                                                    nfft,
                                                    resp_filename,
                                                    date=date,
                                                    station=station,
                                                    channel=channel,
                                                    network=network,
                                                    locid=location,
                                                    units=unit,
                                                    freq=True)

                xml_response, xml_freq = \
                    inv[0][0][0].response.get_evalresp_response(t_samp, nfft,
                                                                output=unit)

                self.assertTrue(np.allclose(seed_freq, xml_freq, rtol=1E-5))
                self.assertTrue(
                    np.allclose(seed_response, xml_response, rtol=1E-5))
Example #3
0
    def get_paz(self, seed_id, datetime):
        """
        Get PAZ for a station at given time span. Gain is the A0 normalization
        constant for the poles and zeros.

        :type seed_id: str
        :param seed_id: SEED or channel id, e.g. ``"BW.RJOB..EHZ"`` or
            ``"EHE"``.
        :type datetime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param datetime: Time for which the PAZ is requested,
            e.g. ``'2010-01-01 12:00:00'``.
        :rtype: dict
        :return: Dictionary containing zeros, poles, gain and sensitivity.

        .. rubric:: Example

        >>> c = Client(timeout=20)
        >>> paz = c.station.get_paz('BW.MANZ..EHZ', '20090707')
        >>> paz['zeros']
        [0j, 0j]
        >>> len(paz['poles'])
        5
        >>> print(paz['poles'][0])
        (-0.037004+0.037016j)
        >>> paz['gain']
        60077000.0
        >>> paz['sensitivity']
        2516800000.0
        """
        # try to read PAZ from previously obtained XSEED data
        for res in self.client.xml_seeds.get(seed_id, []):
            parser = Parser(res)
            try:
                paz = parser.get_paz(seed_id=seed_id, datetime=UTCDateTime(datetime))
                return paz
            except:
                continue
        network, station, location, channel = seed_id.split(".")
        # request station information
        station_list = self.get_list(network=network, station=station, datetime=datetime)
        if not station_list:
            return {}
        # don't allow wild cards
        for wildcard in ["*", "?"]:
            if wildcard in seed_id:
                msg = "Wildcards in seed_id are not allowed."
                raise ValueError(msg)

        if len(station_list) > 1:
            warnings.warn("Received more than one XSEED file. Using first.")

        xml_doc = station_list[0]
        res = self.client.station.get_resource(xml_doc["resource_name"])
        reslist = self.client.xml_seeds.setdefault(seed_id, [])
        if res not in reslist:
            reslist.append(res)
        parser = Parser(res)
        paz = parser.get_paz(seed_id=seed_id, datetime=UTCDateTime(datetime))
        return paz
Example #4
0
def get_local_magnitude(st,
                        stations,
                        stlons,
                        stlats,
                        evtime,
                        evlon,
                        evlat,
                        evdep,
                        freqmin=1,
                        freqmax=10,
                        max_epicenter_dist=100):

    allmags = []
    for station in stations:
        st1 = st.copy().select(station=station)
        for tr in st1:
            try:
                tr.detrend('demean')
                tr.detrend('linear')
                tr.taper(max_percentage=0.005, type='hann')
                tr.filter("bandpass",
                          freqmin=freqmin,
                          freqmax=freqmax,
                          corners=2)

                amp_min = tr.data.min()
                amp_max = tr.data.max()
                ind1 = np.where(amp_min == tr.data)[0][0]
                ind2 = np.where(amp_max == tr.data)[0][0]
                amplitude = abs(amp_max) + abs(amp_min)
                timespan = tr.times("utcdatetime")[ind2] - tr.times(
                    "utcdatetime")[ind1]

                ind = np.where(station == stations)[0][0]
                h_dist = calc_vincenty_inverse(evlat, evlon, stlats[ind],
                                               stlons[ind])[0] / 1000.

                PYGEMA_PATH = "%s/pygema" % (site.getsitepackages()[0])
                dataless_file = glob.glob(
                    "%s/src/dataless/%s_%s.dataless" %
                    (PYGEMA_PATH, tr.stats.network, tr.stats.station))[0]
                parser = Parser(dataless_file)
                paz = parser.get_paz(tr.id)

                mag = estimate_magnitude(paz, amplitude, timespan, h_dist)
                if h_dist <= max_epicenter_dist:
                    allmags.append(mag)

            except:
                continue

    # calculate mean of magnitudes
    if len(allmags) > 0:
        evmag = np.nanmean(allmags)
    else:
        evmag = 0.0

    return evmag
Example #5
0
    def test_evalresp_with_output_from_seed(self):
        """
        The StationXML file has been converted to SEED with the help of a tool
        provided by IRIS:

        https://seiscode.iris.washington.edu/projects/stationxml-converter
        """
        t_samp = 0.05
        nfft = 16384

        # Test for different output units.
        units = ["DISP", "VEL", "ACC"]
        filenames = ["IRIS_single_channel_with_response", "XM.05", "AU.MEEK"]

        for filename in filenames:
            xml_filename = os.path.join(self.data_dir,
                                        filename + os.path.extsep + "xml")
            seed_filename = os.path.join(self.data_dir,
                                         filename + os.path.extsep + "seed")

            p = Parser(seed_filename)

            # older systems don't like an end date in the year 2599
            t_ = UTCDateTime(2030, 1, 1)
            if p.blockettes[50][0].end_effective_date > t_:
                p.blockettes[50][0].end_effective_date = None
            if p.blockettes[52][0].end_date > t_:
                p.blockettes[52][0].end_date = None

            resp_filename = p.get_resp()[0][-1]

            inv = read_inventory(xml_filename)

            network = inv[0].code
            station = inv[0][0].code
            location = inv[0][0][0].location_code
            channel = inv[0][0][0].code
            date = inv[0][0][0].start_date

            for unit in units:
                resp_filename.seek(0, 0)

                seed_response, seed_freq = evalresp(
                    t_samp, nfft, resp_filename, date=date, station=station,
                    channel=channel, network=network, locid=location,
                    units=unit, freq=True)

                xml_response, xml_freq = \
                    inv[0][0][0].response.get_evalresp_response(t_samp, nfft,
                                                                output=unit)

                self.assertTrue(np.allclose(seed_freq, xml_freq, rtol=1E-5))
                self.assertTrue(np.allclose(seed_response, xml_response,
                                            rtol=1E-5))
Example #6
0
 def from_dataless(cls, file_path):
     parser = Parser()
     parser.read(file_path)
     # inventory = parser.get_inventory()
     station_blk = parser.stations[0][0]
     station_dict = {
         "Name": station_blk.station_call_letters,
         "Lon": station_blk.longitude,
         "Lat": station_blk.latitude,
         "Depth": station_blk.elevation
     }
     return cls(**station_dict)
Example #7
0
 def test_dataless(self):
     dir_path = os.path.join(ROOT_DIR, "260", "dataless",
                             "datalessOBS01.dlsv")
     parser = Parser()
     parser.read(dir_path)
     station_blk = parser.stations[0][0]
     station_dict = {
         "Name": station_blk.station_call_letters,
         "Lon": station_blk.longitude,
         "Lat": station_blk.latitude,
         "Depth": station_blk.elevation
     }
     station_stats = StationsStats.from_dataless(dir_path)
     self.assertDictEqual(station_dict, station_stats.to_dict())
Example #8
0
def read_SEED(SEED):
	dataless = Parser(SEED)
	inv = dataless.get_inventory()
	chn = list(inv['channels'])
	for i in range(len(chn)):
		channel_id, start_date, end_date, instrument = chn[i]['channel_id'], chn[i]['start_date'], chn[i]['end_date'], chn[i]['instrument']
		print "Channel %s: \t %s \t %s \t %s"%(i, channel_id, start_date, end_date)
	n = int(raw_input("Ingrese el número del canal que quiere probar\n"))
	channel_id, start_date, end_date, instrument = chn[n]['channel_id'], chn[n]['start_date'], chn[n]['end_date'], chn[n]['instrument']
	PAZ = dataless.get_paz(seed_id=channel_id,datetime=start_date)
	label = ".".join((channel_id.split('.')[1],channel_id.split('.')[3]))
	print label
	poles, zeros, Sd = PAZ['poles'], PAZ['zeros'], PAZ['sensitivity']
	return poles, zeros, Sd, label
Example #9
0
def dataless2xseed(indir, outdir):
    """
    Function for taking a directory of dataless files, parsing them with obspy.io.xseed and writing to file
    :type indir: str
    :param indir: Input directory with wildcards
    :type outdir: str
    :param outdir: Output directory for xseed
    :return: nuthin
    """
    from glob import glob
    from obspy.io.xseed import Parser
    files = glob(indir)
    for filename in files:
        sp = Parser(filename)
        sp.writeXSEED('%s%s.xseed' % (outdir, filename.split('/')[-1].strip()))
    return
Example #10
0
def _read_metadata(path):
    if path is None:
        return None
    logger.info('Reading station metadata...')
    metadata = None

    # Try to read the file as StationXML
    if os.path.isdir(path):
        _path = os.path.join(path, '*')
    else:
        _path = path
    try:
        metadata = read_inventory(_path)
    except Exception:
        pass
    except IOError as err:
        logger.error(err)
        ssp_exit()

    if metadata is None:
        metadata = dict()
        if os.path.isdir(path):
            listing = os.listdir(path)
            for filename in listing:
                fullpath = os.path.join(path, filename)
                try:
                    metadata[filename] = Parser(fullpath)
                except Exception:
                    continue
            # TODO: manage the case in which "path" is a file name
    logger.info('Reading station metadata: done')
    return metadata
Example #11
0
def _read_dataless(path):
    if path is None:
        return None

    # Try to read the file as StationXML
    if not os.path.isdir(path):
        try:
            inv = read_inventory(path)
            return inv
        except TypeError:
            pass
        except IOError as err:
            logger.error(err)
            ssp_exit()

    logger.info('Reading dataless...')
    dataless = dict()
    if os.path.isdir(path):
        listing = os.listdir(path)
        for filename in listing:
            fullpath = os.path.join(path, filename)
            try:
                dataless[filename] = Parser(fullpath)
            except Exception:
                continue
        # TODO: manage the case in which "path" is a file name
    logger.info('Reading dataless: done')
    return dataless
Example #12
0
def get_data_OVPF(cfg, starttime, window_length, inv):

    time2 = time.time()
    print("Configure parser")
    parser = Parser(cfg.BOR_response_fname)

    time3 = time.time()
    st = Stream()
    for sta in cfg.station_names:
        print("Getting waveform for %s" % (sta))
        if sta == 'BOR':
            st_tmp = io.get_waveform_data(starttime,
                                          window_length,
                                          'PF',
                                          sta,
                                          'EHZ',
                                          parser,
                                          simulate=True)
        else:
            st_tmp = io.get_waveform_data(starttime, window_length, 'PF', sta,
                                          'HHZ', inv)

        if st_tmp is not None:

            st += st_tmp
    time4 = time.time()
    print "Time for configuring parser %0.2f" % (time3 - time2)
    print "Time for getting waveforms %0.2f" % (time4 - time3)
    return st
Example #13
0
    def _extract_index_values_seed(filename):
        """
        Reads SEED files and extracts some keys per channel.
        """
        try:
            p = Parser(filename)
        except:
            msg = "Not a valid SEED file?"
            raise StationCacheError(msg)
        channels = p.get_inventory()["channels"]

        channels = [[
            _i["channel_id"], int(_i["start_date"].timestamp),
            int(_i["end_date"].timestamp) if _i["end_date"] else None,
            _i["latitude"], _i["longitude"], _i["elevation_in_m"],
            _i["local_depth_in_m"]] for _i in channels]

        return channels
def read_rm_resp(mseedpath, dlseedpath, resp='ACC'):

    st = ob.read(mseedpath)

    #---------- Remove instrument response -----------------------
    parser = Parser(dlseedpath)

    coords = parser.get_coordinates(st[0].stats.channel)
    paz = parser.get_paz(st[0].stats.channel)

    # BDS PAZ are already in DISP
    prefilt = (0.01, 0.02, 35, 40)

    st.simulate(paz_remove=paz, zero_mean=True, pre_filt=prefilt)

    if resp == 'ACC':
        st.differentiate()
        st.differentiate()
    elif resp == 'VEL':
        st.differentiate()

    return st
Example #15
0
 def test_getSNCL_get_sncls_parser(self):
     test_dataless = Parser('/APPS/metadata/SEED/CU.dataless')
     test_day = UTCDateTime('2015001')
     test_network = 'CU'
     test_case = [u'CU.ANWB.00.BH1', u'CU.ANWB.00.BH2', u'CU.ANWB.00.BHZ',
                  u'CU.ANWB.20.HN1', u'CU.ANWB.20.HN2', u'CU.ANWB.20.HNZ',
                  u'CU.ANWB.00.LH1', u'CU.ANWB.00.LH2', u'CU.ANWB.00.LHZ',
                  u'CU.ANWB.20.LN1', u'CU.ANWB.20.LN2', u'CU.ANWB.20.LNZ',
                  u'CU.ANWB.00.VMU', u'CU.ANWB.00.VMV', u'CU.ANWB.00.VMW',
                  u'CU.BBGH.00.BH1', u'CU.BBGH.00.BH2', u'CU.BBGH.00.BHZ',
                  u'CU.BBGH.20.HN1', u'CU.BBGH.20.HN2', u'CU.BBGH.20.HNZ',
                  u'CU.BBGH.00.LH1', u'CU.BBGH.00.LH2', u'CU.BBGH.00.LHZ',
                  u'CU.BBGH.20.LN1', u'CU.BBGH.20.LN2', u'CU.BBGH.20.LNZ',
                  u'CU.BBGH.00.VMU', u'CU.BBGH.00.VMV', u'CU.BBGH.00.VMW',
                  u'CU.BCIP.00.BH1', u'CU.BCIP.00.BH2', u'CU.BCIP.00.BHZ',
                  u'CU.BCIP.20.HN1', u'CU.BCIP.20.HN2', u'CU.BCIP.20.HNZ',
                  u'CU.BCIP.00.LH1', u'CU.BCIP.00.LH2', u'CU.BCIP.00.LHZ',
                  u'CU.BCIP.20.LN1', u'CU.BCIP.20.LN2', u'CU.BCIP.20.LNZ',
                  u'CU.BCIP.00.VMU', u'CU.BCIP.00.VMV', u'CU.BCIP.00.VMW',
                  u'CU.GRGR.00.BH1', u'CU.GRGR.00.BH2', u'CU.GRGR.00.BHZ',
                  u'CU.GRGR.20.HN1', u'CU.GRGR.20.HN2', u'CU.GRGR.20.HNZ',
                  u'CU.GRGR.00.LH1', u'CU.GRGR.00.LH2', u'CU.GRGR.00.LHZ',
                  u'CU.GRGR.20.LN1', u'CU.GRGR.20.LN2', u'CU.GRGR.20.LNZ',
                  u'CU.GRGR.00.VMU', u'CU.GRGR.00.VMV', u'CU.GRGR.00.VMW',
                  u'CU.GRTK.00.BH1', u'CU.GRTK.00.BH2', u'CU.GRTK.00.BHZ',
                  u'CU.GRTK.20.HN1', u'CU.GRTK.20.HN2', u'CU.GRTK.20.HNZ',
                  u'CU.GRTK.00.LH1', u'CU.GRTK.00.LH2', u'CU.GRTK.00.LHZ',
                  u'CU.GRTK.20.LN1', u'CU.GRTK.20.LN2', u'CU.GRTK.20.LNZ',
                  u'CU.GRTK.00.VMU', u'CU.GRTK.00.VMV', u'CU.GRTK.00.VMW',
                  u'CU.GTBY.00.BH1', u'CU.GTBY.00.BH2', u'CU.GTBY.00.BHZ',
                  u'CU.GTBY.20.HN1', u'CU.GTBY.20.HN2', u'CU.GTBY.20.HNZ',
                  u'CU.GTBY.00.LH1', u'CU.GTBY.00.LH2', u'CU.GTBY.00.LHZ',
                  u'CU.GTBY.20.LN1', u'CU.GTBY.20.LN2', u'CU.GTBY.20.LNZ',
                  u'CU.GTBY.00.VMU', u'CU.GTBY.00.VMV', u'CU.GTBY.00.VMW',
                  u'CU.MTDJ.00.BH1', u'CU.MTDJ.00.BH2', u'CU.MTDJ.00.BHZ',
                  u'CU.MTDJ.20.HN1', u'CU.MTDJ.20.HN2', u'CU.MTDJ.20.HNZ',
                  u'CU.MTDJ.00.LH1', u'CU.MTDJ.00.LH2', u'CU.MTDJ.00.LHZ',
                  u'CU.MTDJ.20.LN1', u'CU.MTDJ.20.LN2', u'CU.MTDJ.20.LNZ',
                  u'CU.MTDJ.00.VMU', u'CU.MTDJ.00.VMV', u'CU.MTDJ.00.VMW',
                  u'CU.SDDR.00.BH1', u'CU.SDDR.00.BH2', u'CU.SDDR.00.BHZ',
                  u'CU.SDDR.20.HN1', u'CU.SDDR.20.HN2', u'CU.SDDR.20.HNZ',
                  u'CU.SDDR.00.LH1', u'CU.SDDR.00.LH2', u'CU.SDDR.00.LHZ',
                  u'CU.SDDR.20.LN1', u'CU.SDDR.20.LN2', u'CU.SDDR.20.LNZ',
                  u'CU.SDDR.00.VMU', u'CU.SDDR.00.VMV', u'CU.SDDR.00.VMW',
                  u'CU.TGUH.00.BH1', u'CU.TGUH.00.BH2', u'CU.TGUH.00.BHZ',
                  u'CU.TGUH.20.HN1', u'CU.TGUH.20.HN2', u'CU.TGUH.20.HNZ',
                  u'CU.TGUH.00.LH1', u'CU.TGUH.00.LH2', u'CU.TGUH.00.LHZ',
                  u'CU.TGUH.20.LN1', u'CU.TGUH.20.LN2', u'CU.TGUH.20.LNZ',
                  u'CU.TGUH.00.VMU', u'CU.TGUH.00.VMV', u'CU.TGUH.00.VMW']
     self.assertEqual(getSNCL.get_sncls_parser(test_dataless, test_day,
                                               test_network), test_case)
Example #16
0
def apply_response(foname,
                   dtless_name,
                   units="VEL",
                   plot=False,
                   filt=True,
                   path="./"):
    """Create xml seed response file and apply response from dataless
    :type foname: str
    :param foname: String containing a waveform file name
    :type dtless_name: str
    :param dtless_name: String containg a dataless file name
    :type units: str
    :param units: String containg units. Can be: DIS, VEL, ACC
    :type plot: boolean
    :param plot: Define if waveform is plotted
    :type filt: boolean
    :param filt: Define if waveform is filtered
    """
    print "Opciones seleccionadas fueron:"
    print foname, dtless_name, units, plot, filt, path
    st = read(foname)
    parser = Parser(dtless_name)
    xml_name = dtless_name + ".xml"
    parser.write_xseed(xml_name)
    inv = read_inventory(xml_name)

    pre_filt = (0.005, 0.006, 30.0, 35.0)
    if not filt: pre_filt = None

    print "Pasando de cuentas a: " + units
    st.remove_response(inventory=inv,
                       output=units,
                       pre_filt=pre_filt,
                       plot=plot)
    if plot: pl.show()

    st.write(path + foname + "_" + units, format="mseed")
    print "\n\n\tArchivo de salida: " + foname + "_" + units + "\n"
Example #17
0
def test_channel_in_parser():
    """
    Tests if a given channel is part of a Parser object.
    """
    starttime = UTCDateTime(2007, 2, 12, 10, 30, 28, 197700)
    endtime = UTCDateTime(2007, 2, 12, 11, 35, 28, 197700)
    channel_id = "ES.ECAL..HHE"
    # An empty file should of course not contain much.
    parser_object = Parser(
        os.path.join(data_dir, "station_files", "seed",
                     "channelless_datalessSEED"))
    assert utils.channel_in_parser(parser_object, channel_id, starttime,
                                   endtime) is False
    # Now read a file that actually contains data.
    channel_id = "IU.PAB.00.BHE"
    starttime = UTCDateTime(1999, 2, 18, 10, 0)
    endtime = UTCDateTime(2009, 8, 13, 19, 0)
    parser_object = Parser(
        os.path.join(data_dir, "station_files", "seed", "dataless.IU_PAB"))
    # This is an exact fit of the start and end times in this file.
    assert utils.channel_in_parser(parser_object, channel_id, starttime,
                                   endtime) is True
    # Now try some others that do not fit.
    assert utils.channel_in_parser(parser_object, channel_id, starttime - 1,
                                   endtime) is False
    assert utils.channel_in_parser(parser_object, channel_id, starttime,
                                   endtime + 1) is False
    assert utils.channel_in_parser(parser_object, channel_id + "x", starttime,
                                   endtime) is False
    assert utils.channel_in_parser(parser_object, channel_id, starttime - 200,
                                   starttime - 100) is False
    assert utils.channel_in_parser(parser_object, channel_id, endtime + 100,
                                   endtime + 200) is False
    # And some that do fit.
    assert utils.channel_in_parser(parser_object, channel_id, starttime,
                                   starttime + 10) is True
    assert utils.channel_in_parser(parser_object, channel_id, endtime - 100,
                                   endtime) is True
Example #18
0
def dataless_parser(dfiles,subset,debug=0):
    print('Using: ',subset,' please')
    inv=Inventory()
    for i in dfiles: # loop through dataless files
        p=Parser()
        if (debug):
            print('reading',i)
        try:
            p.read(i)
        except Exception as e:
            print(e)
        invtmp=d2inv(p)
        if len(invtmp._networks) > 1:
            print('More than 1 net in: ',i,len(invtmp._networks))
        else:
            nettmp=invtmp._networks[0].select(location='0K')
        if len(inv) == 0:
            inv._networks.append(nettmp)
        else:
            sta=nettmp[0].select(location='01')
           # 
            inv._networks[0]._stations.append(sta)
    return inv
Example #19
0
    def test_PPSD_w_IRIS_against_obspy_results(self):
        """
        Test against results obtained after merging of #1108.
        """
        # Read in ANMO data for one day
        st = read(os.path.join(self.path, 'IUANMO.seed'))

        # Read in metadata in various different formats
        paz = {
            'gain':
            86298.5,
            'zeros': [0, 0],
            'poles': [
                -59.4313, -22.7121 + 27.1065j, -22.7121 + 27.1065j, -0.0048004,
                -0.073199
            ],
            'sensitivity':
            3.3554 * 10**9
        }
        resp = os.path.join(self.path, 'IUANMO.resp')
        parser = Parser(os.path.join(self.path, 'IUANMO.dataless'))
        inv = read_inventory(os.path.join(self.path, 'IUANMO.xml'))

        # load expected results, for both only PAZ and full response
        results_paz = np.load(os.path.join(self.path, 'IUANMO_ppsd_paz.npz'))
        results_full = np.load(
            os.path.join(self.path, 'IUANMO_ppsd_fullresponse.npz'))
        arrays_to_check = ['hist_stack', 'spec_bins', 'period_bins']

        # Calculate the PPSDs and test against expected results
        # first: only PAZ
        ppsd = PPSD(st[0].stats, paz)
        ppsd.add(st)
        for key in arrays_to_check:
            self.assertTrue(
                np.allclose(getattr(ppsd, key), results_paz[key], rtol=1e-5))
        # second: various methods for full response
        # (also test various means of initialization, basically testing the
        #  decorator that maps the deprecated keywords)
        for metadata in [parser, inv, resp]:
            ppsd = PPSD(st[0].stats, paz=metadata)
            ppsd = PPSD(st[0].stats, parser=metadata)
            ppsd = PPSD(st[0].stats, metadata)
            ppsd.add(st)
            for key in arrays_to_check:
                self.assertTrue(
                    np.allclose(getattr(ppsd, key),
                                results_full[key],
                                rtol=1e-5))
Example #20
0
    def get_paz(self, seed_id, datetime):
        """
        Get PAZ for a station at given time span. Gain is the A0 normalization
        constant for the poles and zeros.

        :type seed_id: str
        :param seed_id: SEED or channel id, e.g. ``"BW.RJOB..EHZ"`` or
            ``"EHE"``.
        :type datetime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param datetime: Time for which the PAZ is requested,
            e.g. ``'2010-01-01 12:00:00'``.
        :rtype: dict
        :return: Dictionary containing zeros, poles, gain and sensitivity.
        """
        # try to read PAZ from previously obtained XSEED data
        for res in self.client.xml_seeds.get(seed_id, []):
            parser = Parser(res)
            try:
                paz = parser.get_paz(seed_id=seed_id,
                                     datetime=UTCDateTime(datetime))
                return paz
            except Exception:
                continue
        network, station, location, channel = seed_id.split(".")
        # request station information
        station_list = self.get_list(network=network,
                                     station=station,
                                     datetime=datetime)
        if not station_list:
            return {}
        # don't allow wild cards
        for wildcard in ['*', '?']:
            if wildcard in seed_id:
                msg = "Wildcards in seed_id are not allowed."
                raise ValueError(msg)

        for xml_doc in station_list:
            res = self.client.station.get_resource(xml_doc['resource_name'])
            reslist = self.client.xml_seeds.setdefault(seed_id, [])
            if res not in reslist:
                reslist.append(res)
            parser = Parser(res)
            try:
                paz = parser.get_paz(seed_id=seed_id,
                                     datetime=UTCDateTime(datetime))
            except SEEDParserException as e:
                not_found_msg = 'No channel found with the given SEED id:'
                if str(e).startswith(not_found_msg):
                    continue
                raise
            break
        else:
            msg = 'No channel found with the given SEED id: %s' % seed_id
            raise SEEDParserException(msg)
        return paz
Example #21
0
def _colormap_plot_ppsd(cmaps):
    """
    Plot for illustrating colormaps: PPSD.

    :param cmaps: list of :class:`~matplotlib.colors.Colormap`
    :rtype: None
    """
    import matplotlib.pyplot as plt
    from obspy import read
    from obspy.signal import PPSD
    from obspy.io.xseed import Parser
    st = read("https://examples.obspy.org/BW.KW1..EHZ.D.2011.037")
    st += read("https://examples.obspy.org/BW.KW1..EHZ.D.2011.038")
    parser = Parser("https://examples.obspy.org/dataless.seed.BW_KW1")
    ppsd = PPSD(st[0].stats, metadata=parser)
    ppsd.add(st)

    for cmap in cmaps:
        ppsd.plot(cmap=cmap, show=False)
    plt.show()
Example #22
0
def read_metadata(path_to_inventory):
    """
    take path_to_inventory and return either the corresponding list of files
    found or the Parser object for a network dataless seed volume to prevent
    read overhead for large dataless seed volumes
    :param path_to_inventory:
    :return: tuple containing a either list of files or `obspy.io.xseed.Parser`
    object and the inventory type found
    :rtype: tuple
    """
    dlfile = list()
    invfile = list()
    respfile = list()
    # possible file extensions specified here:
    inv = dict(dless=dlfile, xml=invfile, resp=respfile, dseed=dlfile[:])
    if os.path.isfile(path_to_inventory):
        ext = os.path.splitext(path_to_inventory)[1].split('.')[1]
        inv[ext] += [path_to_inventory]
    else:
        for ext in inv.keys():
            inv[ext] += glob.glob1(path_to_inventory, '*.{0}'.format(ext))

    invtype = key_for_set_value(inv)

    if invtype is None:
        print("Neither dataless-SEED file, inventory-xml file nor "
              "RESP-file found!")
        print("!!WRONG CALCULATION OF SOURCE PARAMETERS!!")
        robj = None,
    elif invtype == 'dless':  # prevent multiple read of large dlsv
        print("Reading metadata information from dataless-SEED file ...")
        if len(inv[invtype]) == 1:
            fullpath_inv = os.path.join(path_to_inventory, inv[invtype][0])
            robj = Parser(fullpath_inv)
        else:
            robj = inv[invtype]
    else:
        print("Reading metadata information from inventory-xml file ...")
        robj = inv[invtype]
    return invtype, robj
Example #23
0
def dataless2stationXml(datalessFileName, xmlFileName):
    # Read the dataless seed file
    sp = Parser(datalessFileName)

    # Collect all potential unit abbreviations
    units = {}
    #genAbbrev={}
    for entry in sp.abbreviations:
        if entry.name == 'Units Abbreviations':
            units[entry.unit_lookup_code] = entry.unit_name
    #    elif entry.name=='Generic Abbreviation':
    #        genAbbrev[entry.abbreviation_lookup_code]=entry.abbreviation_description

    # Make a look-up dictionary for the transfer functions
    transFuncs = {
        'A': 'LAPLACE (RADIANS/SECOND)',
        'B': 'ANALOG (HERTZ)',
        'C': 'COMPOSITE',
        'D': 'DIGITAL (Z-TRANSFORM)'
    }

    # Collect each of the stations objects
    stations = []
    staNetCodes = []
    for stationBlock in sp.stations:
        station, staNetCode = getStation(stationBlock, units, transFuncs)
        stations.append(station)
        staNetCodes.append(staNetCode)

    # For each of the unique networks codes, collect the stations which relate to it
    networks = []
    staNetCodes = np.array(staNetCodes)
    unqNets = np.unique(staNetCodes)
    for aNet in unqNets:
        netStas = [stations[arg] for arg in np.where(staNetCodes == aNet)[0]]
        networks.append(Network(aNet, stations=netStas))

    # Finally turn this into an inventory and save
    inv = Inventory(networks, 'Lazylyst')
    inv.write(xmlFileName, format='stationxml', validate=True)
Example #24
0
def get_dataless_block(metadata):
    #                               preload the dataless seed fields from the calibration
    p = Parser("C:/Pyscripts/dataless.pzcalc_template.seed")
    blk = p.blockettes
    #                               Basic changes to existing fields necessary to customize the dataless seed file
    blk[10][0].beginning_time = metadata['beginning_time']
    blk[10][0].end_time = metadata[
        'end_time']  # These appear to be unused within pdcc
    blk[11][0].station_identifier_code = metadata[
        'network_id']  # This is overritten by blk[50].network_code
    blk[33][0].abbreviation_description = metadata['station_description']
    blk[33][1].abbreviation_description = metadata['instrument_description']
    # metadata['location_identifier']
    blk[50][0].network_code = metadata['network_code']
    blk[50][0].station_call_letters = metadata['station_code']
    blk[50][0].site_name = metadata['site_name']
    blk[50][0].latitude = metadata['latitude']
    blk[50][0].longitude = metadata['longitude']
    blk[50][0].elevation = metadata['elevation']
    blk[50][0].start_effective_date = UTCDateTime(
        metadata['start_effective_date'])
    blk[50][0].end_effective_date = metadata['end_effective_date']
    return (blk)
Example #25
0
def get_data_OVPF(cfg, starttime, window_length):

    inv = read_inventory(cfg.response_fname)
    parser = Parser(cfg.BOR_response_fname)

    st = Stream()
    for sta in cfg.station_names:
        if sta == 'BOR':
            st_tmp = io.get_waveform_data(starttime,
                                          window_length,
                                          'PF',
                                          sta,
                                          '??Z',
                                          parser,
                                          simulate=True)
        else:
            st_tmp = io.get_waveform_data(starttime, window_length, 'PF', sta,
                                          '??Z', inv)

        if st_tmp is not None:

            st += st_tmp

    return st
Example #26
0
zone_code = get_field_data(sf, 'CODE', 'str')

# parse catalogue
evdict = parse_usgs_events(usgscsv)

#a=b # kill

from obspy.io.xseed import Parser

# read dataless seed volumes
print('Reading dataless seed volumes...')
if getcwd().startswith('/nas'):

    au_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/AU/AU.IRIS.dataless'
    )
    s1_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/S1/S1.IRIS.dataless'
    )
    ge_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/GE/GE1.IRIS.dataless'
    )
    ge2_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/GE/GE1.IRIS.dataless'
    )
    iu_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/IU/IU.IRIS.dataless'
    )
    ii_parser = Parser(
        '/nas/active/ops/community_safety/ehp/georisk_earthquake/hazard/Networks/II/II.IRIS.dataless'
Example #27
0
                print(resppath)
        resp = evalresp(t_samp=tr.stats.delta,
                        nfft=NFFT,
                        filename=resppath,
                        date=tr.stats.starttime,
                        station=tr.stats.station,
                        channel=tr.stats.channel,
                        locid=tr.stats.location,
                        network=tr.stats.network,
                        units="ACC")

        st.filter('bandpass', freqmin=1. / Pmax, freqmax=1. / Pmin)
        st.taper(0.05)
        st.sort()

        sp = Parser()
        for tr in st.select(channel='LH*'):
            if net == 'XX':
                sp.read('/home/aalejandro/Pressure/RESP/RESP.' + net + '.' +
                        sta + '.' + loc + '.' + chan)
            else:
                sp.read('/APPS/metadata/RESPS/RESP.' + net + '.' + sta + '.' +
                        loc + '.' + chan)
        paz = sp.get_paz(net + '.' + sta + '.' + loc + '.' + chan, stime)

        ### Convolve Pressure Signal with Seismometer Response ###
        st.select(channel="LDO").simulate(paz_simulate=paz)
        st.normalize()

        ### Pressure Corrected Signal Function ###
        def presscorrt(x):
Example #28
0
 def test_checkMSD_process_day_net(self):
     day = '2015001'
     net = 'CU'
     dataless_location = '/APPS/metadata/SEED/%s.dataless' % net
     clients = {'NEIC': True, 'ASL': True}
     test_case = [{'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BBGH.00.BH1',
                   'MSD': 99.99997106481482, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.MTDJ.00.BH2',
                   'MSD': 99.99997106481482, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.GRTK.20.LN2',
                   'MSD': 99.9988425925926, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.GRTK.20.LNZ',
                   'MSD': 99.9988425925926, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.GRTK.20.LN1',
                   'MSD': 99.9988425925926, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.ANWB.20.HN1',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.ANWB.20.HN2',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.ANWB.20.HNZ',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BBGH.20.HN1',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BBGH.20.HN2',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BBGH.20.HNZ',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.00.BH1',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.00.BH2',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.00.BHZ',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.20.HN1',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.20.HN2',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.20.HNZ',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.00.LH1',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.00.LH2',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.00.LHZ',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.20.LN1',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.20.LN2',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.20.LNZ',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.00.VMU',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.00.VMV',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.BCIP.00.VMW',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.GRGR.20.HN1',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.GRGR.20.HN2',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.GRGR.20.HNZ',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.GRTK.20.HN1',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.GRTK.20.HN2',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.GRTK.20.HNZ',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.GTBY.20.HN1',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.GTBY.20.HN2',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.GTBY.20.HNZ',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.MTDJ.20.HN1',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.MTDJ.20.HN2',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.MTDJ.20.HNZ',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.SDDR.20.HN1',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.SDDR.20.HN2',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.SDDR.20.HNZ',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.TGUH.20.HN1',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.TGUH.20.HN2',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0},
                  {'NEIC': 0.0, 'Year': 2015, 'sncl': 'CU.TGUH.20.HNZ',
                   'MSD': 0.0, 'Day': 1, 'ASL': 0.0}]
     self.assertEqual(checkMSD.process_day_net(UTCDateTime(day), net,
                      Parser(dataless_location), clients), test_case)
from __future__ import print_function

from math import log10

from obspy import UTCDateTime, read
from obspy.geodetics import gps2dist_azimuth
from obspy.io.xseed import Parser


st = read("../data/LKBD.MSEED")

paz_wa = {'sensitivity': 2800, 'zeros': [0j], 'gain': 1,
          'poles': [-6.2832 - 4.7124j, -6.2832 + 4.7124j]}

parser = Parser("../data/LKBD.dataless")
paz_le3d5s = parser.get_paz("CH.LKBD..EHZ")

st.simulate(paz_remove=paz_le3d5s, paz_simulate=paz_wa, water_level=10)

t = UTCDateTime("2012-04-03T02:45:03")
st.trim(t, t + 50)

tr_n = st.select(component="N")[0]
ampl_n = max(abs(tr_n.data))
tr_e = st.select(component="E")[0]
ampl_e = max(abs(tr_e.data))
ampl = max(ampl_n, ampl_e)

sta_lat = 46.38703
sta_lon = 7.62714
event_lat = 46.218
import obspy
from obspy.signal import PPSD
from obspy.io.xseed import Parser


st = obspy.read("http://examples.obspy.org/BW.KW1..EHZ.D.2011.037")
tr = st.select(id="BW.KW1..EHZ")[0]
parser = Parser("http://examples.obspy.org/dataless.seed.BW_KW1")
paz = parser.getPAZ(tr.id)
ppsd = PPSD(tr.stats, paz)
ppsd.add(st)

st = obspy.read("http://examples.obspy.org/BW.KW1..EHZ.D.2011.038")
ppsd.add(st)

ppsd.plot()
Example #31
0
    def test_response_calculation_from_seed_and_xseed(self):
        """
        Test the response calculations with the obspy.core interface.

        It does it by converting whatever it gets to RESP files and then
        uses evalresp to get the response. This is compared to using the
        ObsPy Response object - this also uses evalresp but the actual flow
        of the data is very different.

        This is an expensive test but worth it for the trust it builds and
        the bugs it found and prevents.
        """
        # Very broad range but the responses should be exactly identical as
        # they use the same code under the hood so it should prove no issue.
        frequencies = np.logspace(-3, 3, 20)

        for filename in self.seed_files + self.xseed_files:
            # Parse the files using the Parser object.
            with warnings.catch_warnings(record=True):
                p = Parser(filename)
                p_resp = {_i[0]: _i[1] for _i in p.get_resp()}
                # Also read using the core routines.
                inv = obspy.read_inventory(filename)

            # Get all the channels and epochs.
            channels = collections.defaultdict(list)
            for c in p.get_inventory()["channels"]:
                channels[c["channel_id"]].append(
                    (c["start_date"], c["end_date"]))

            # Loop over each.
            for channel, epochs in channels.items():
                with NamedTemporaryFile() as tf:
                    r = p_resp["RESP.%s" % channel]
                    r.seek(0, 0)
                    tf.write(r.read())

                    # Now loop over the epochs.
                    for start, end in epochs:
                        if end:
                            t = start + (end - start) / 2
                        else:
                            t = start + 10

                        # Find response
                        n, s, l, c = channel.split(".")
                        _inv_t = inv.select(network=n, station=s,
                                            location=l, channel=c,
                                            starttime=t - 1, endtime=t + 1)
                        # Should now only be a single channel.
                        self.assertEqual(_inv_t.get_contents()["channels"],
                                         [channel])
                        inv_r = _inv_t[0][0][0].response

                        for unit in ("DISP", "VEL", "ACC"):
                            # Directly call evalresp.
                            e_r = evalresp_for_frequencies(
                                t_samp=None, frequencies=frequencies,
                                filename=tf.name, date=t, units=unit)
                            i_r = inv_r.get_evalresp_response_for_frequencies(
                                frequencies=frequencies, output=unit)
                            # Adaptive absolute tolerance to deal with very
                            # small values.
                            atol = 1E-7 * max(np.abs(e_r).max(),
                                              np.abs(i_r).max())
                            np.testing.assert_allclose(
                                e_r.real, i_r.real,
                                err_msg="real - %s - %s" % (filename, unit),
                                rtol=1E-6, atol=atol)
                            np.testing.assert_allclose(
                                e_r.imag, i_r.imag,
                                err_msg="imag - %s - %s" % (filename, unit),
                                rtol=1E-6, atol=atol)

                            # Bonus: Also read the RESP file directly with
                            # obspy.core and test the response.
                            i_r_r = obspy.read_inventory(tf.name).select(
                                starttime=t - 1,
                                endtime=t + 1)[0][0][0].response\
                                .get_evalresp_response_for_frequencies(
                                frequencies=frequencies, output=unit)
                            np.testing.assert_allclose(
                                e_r.real, i_r_r.real,
                                err_msg="RESP real - %s - %s" % (filename,
                                                                 unit),
                                rtol=1E-6, atol=atol)
                            np.testing.assert_allclose(
                                e_r.imag, i_r_r.imag,
                                err_msg="RESP imag - %s - %s" % (filename,
                                                                 unit),
                                rtol=1E-6, atol=atol)
    def scan(self):
        self.channel_array=[] 
        for filename in self.file_list:
            # print filename
            # Load Dataless to modify, assume only one station
            p = Parser(filename)

            # Get station name and network code
            net, sta = p.get_inventory()['stations'][0]['station_id'].split('.')
            

            # Get Data Format Identifier Codes
            lookup_steim2 = -1
            lookup_geoscope_3bit = -1
            lookup_geoscope_4bit = -1

            format_lookup_list = []
            for local_blockette in p.abbreviations:
                if local_blockette.blockette_type == 30:
                    # Increment number of format
                    # Get Data Format Identifier Code for Steim2
                    if "Steim2 Integer Compression Format" in local_blockette.short_descriptive_name:
                        lookup_steim2 = local_blockette.data_format_identifier_code
                        # print "lookup_steim2 = ", lookup_steim2
                        format_lookup_list.append(lookup_steim2)
                    # Get Data Format Identifier Code for Geoscope 3 bits
                    if "Geoscope gain-range on 3 bits" in local_blockette.short_descriptive_name:
                        lookup_geoscope_3bit = local_blockette.data_format_identifier_code
                        # print "lookup_3bit = ", lookup_geoscope_3bit
                        format_lookup_list.append(lookup_geoscope_3bit)
                    # Get Data Format Identifier Code for Geoscope 4 bits
                    if "Geoscope gain range on 4 bits" in local_blockette.short_descriptive_name:
                        lookup_geoscope_4bit = local_blockette.data_format_identifier_code
                        # print "lookup_4bit = ", lookup_geoscope_4bit
                        format_lookup_list.append(lookup_geoscope_4bit)

            # print format_lookup_list

            # Get Station = first station
            blksta = p.stations[0]

            # Look for all blockettes 52 that reference to one of Geoscope Data Format
            # Identifier Code
            # print "---- Look for all blockettes 52 that reference Geoscope
            # Data Format 3 bit ----"

            if lookup_geoscope_3bit != -1:
                for blockette in blksta:
                    if (blockette.blockette_type == 52) and (blockette.data_format_identifier_code == lookup_geoscope_3bit):
                        print "3",  net,sta, blockette.channel_identifier,  blockette.location_identifier, blockette.start_date, blockette.end_date
                        self.channel_array.append(channel_period(net, sta, '', blockette.channel_identifier, blockette.start_date, blockette.end_date, '3'))
    
                        # embed()
            # print "---- Look for all blockettes 52 that reference Geoscope
            # Data Format 4 bit ----"
            if lookup_geoscope_4bit != -1:
                for blockette in blksta:
                    if (blockette.blockette_type == 52) and (blockette.data_format_identifier_code == lookup_geoscope_4bit):
                        print "4",  net,sta, blockette.channel_identifier,  blockette.location_identifier, blockette.start_date, blockette.end_date
                        self.channel_array.append(channel_period(net, sta, '', blockette.channel_identifier, blockette.start_date, blockette.end_date, '3'))
            
            cPickle.dump( self.channel_array, open( "dataless.p", "wb" ) )
#            cPickle.dump({ "lion": "yellow", "kitty": "red" }, open( "dataless.p", "wb" ) )
            
            
            for cha in self.channel_array:
                print cha
    def modify(self):

        for filename in self.file_list:
            print filename
            # Load Dataless to modify, assume only one station
            p = Parser(filename)

            # Get Data Format Identifier Codes
            lookup_steim2 = -1
            lookup_geoscope_3bit = -1
            lookup_geoscope_4bit = -1

            format_lookup_list = []
            print "---- Get data format identifier codes ----"
            for local_blockette in p.abbreviations:
                if local_blockette.blockette_type == 30:
                    # Increment number of format
                    # Get Data Format Identifier Code for Steim2
                    if "Steim2 Integer Compression Format" in local_blockette.short_descriptive_name:
                        lookup_steim2 = local_blockette.data_format_identifier_code
                        print "lookup_steim2 = ", lookup_steim2
                        format_lookup_list.append(lookup_steim2)
                    # Get Data Format Identifier Code for Geoscope 3 bits
                    if "Geoscope gain-range on 3 bits" in local_blockette.short_descriptive_name:
                        lookup_geoscope_3bit = local_blockette.data_format_identifier_code
                        print "lookup_3bit = ", lookup_geoscope_3bit
                        format_lookup_list.append(lookup_geoscope_3bit)
                    # Get Data Format Identifier Code for Geoscope 4 bits
                    if "Geoscope gain range on 4 bits" in local_blockette.short_descriptive_name:
                        lookup_geoscope_4bit = local_blockette.data_format_identifier_code
                        print "lookup_4bit = ", lookup_geoscope_4bit
                        format_lookup_list.append(lookup_geoscope_4bit)

            print format_lookup_list

            # Create Steim2 Data Format blockette if it does not exist
            print "---- Create Steim2 Data Format blockette if it does not exist ----"
            if lookup_steim2 == -1:
                # Get a blockette 30 with Steim2 encoding
                psteim2 = Parser("./test/dataless.G.CLF.seed")
                # Copy it on current dataless
                p.abbreviations.insert(0, psteim2.abbreviations[0])
                # create new lookup code (make sure it is not already used otherwise
                # increment)
                lookup_steim2 = 1
                while lookup_steim2 in format_lookup_list:
                    lookup_steim2 += 1
                print lookup_steim2
                # Set new lookup code
                p.abbreviations[0].data_format_identifier_code = lookup_steim2

            # Get Station = first station
            print "---- Get Station = first station ----"
            blksta = p.stations[0]

            # Remove Comment Blockettes because it is IPGP datacenter that insert them
            # and problems with PDCC

            print "---- Remove Comment Blockettes ----"

            i = 1
            while i < len(blksta):
                if blksta[i].blockette_type == 51:
                    blksta.pop(i)
                else:
                    i += 1

            # Remove Comment Blockettes
            i = 1
            while i < len(blksta):
                if blksta[i].blockette_type == 59:
                    blksta.pop(i)
                else:
                    i += 1

            # Look for all blockettes 52 that reference to one of Geoscope Data Format
            # Identifier Code
            print "---- Look for all blockettes 52 that reference Geoscope Data Format 3 bit ----"
            i = 1
            clone = -1
            if lookup_geoscope_3bit != -1:
                while i < len(blksta):
                    if blksta[i].blockette_type == 52:
                        if blksta[i].data_format_identifier_code == lookup_geoscope_3bit:
                            print ""
                            print blksta[i].channel_identifier, blksta[i].start_date, blksta[i].location_identifier
                            # Clone blockette 52
                            blksta.insert(i, copy.deepcopy(blksta[i]))
                            blksta[i].location_identifier = "00"
                            blksta[i].data_format_identifier_code = lookup_steim2
                            i += 1
                            clone = i
                        else:
                            clone = -1
                    else:
                        if clone != -1:  # Blockette is concerned
                            print blksta[i].stage_sequence_number, blksta[i].blockette_type,
                            # Clone blockette
                            b = copy.deepcopy(blksta[i])
                            # Detect stage 0
                            if b.stage_sequence_number == 0:
                                # If stage 0, add gain blockette before
                                newb = copy.deepcopy(blksta[i])
                                newb.sensitivity_gain = gain_geoscope_3
                                newb.stage_sequence_number = blksta[
                                    i - 1].stage_sequence_number + 1
                                print "new stage =", newb.stage_sequence_number
                                blksta.insert(clone, newb)
                                clone += 1
                                b.sensitivity_gain *= gain_geoscope_3
                                i += 1
                            blksta.insert(clone, b)
                            clone += 1
                            i += 1
                    i += 1

            # Verify
            print ""
            print ""
            print "---- Verify ----"
            display = -1
            if lookup_geoscope_3bit != -1:
                for blksta_local in blksta:
                    if blksta_local.blockette_type == 52:
                        if blksta_local.data_format_identifier_code == lookup_geoscope_3bit:
                            print ""
                            print blksta_local.channel_identifier, blksta_local.start_date,  blksta_local.location_identifier
                            display = 1
                        else:
                            display = -1
                    else:
                        if display == 1:
                            if blksta_local.stage_sequence_number == 0:
                                print blksta_local.stage_sequence_number, blksta_local.blockette_type, blksta_local.sensitivity_gain
                            else:
                                print blksta_local.stage_sequence_number, blksta_local.blockette_type

            # Look for all blockettes 52 that reference to one of Geoscope Data Format
            # Identifier Code
            print ""
            print ""
            print "---- Look for all blockettes 52 that reference Geoscope Data Format 4 bit ----"
            i = 1
            clone = -1
            if lookup_geoscope_4bit != -1:
                while i < len(blksta):
                    if blksta[i].blockette_type == 52:
                        if blksta[i].data_format_identifier_code == lookup_geoscope_4bit:
                            print ""
                            print blksta[i].channel_identifier, blksta[i].start_date, blksta[i].location_identifier
                            # Clone blockette 52
                            blksta.insert(i, copy.deepcopy(blksta[i]))
                            blksta[i].location_identifier = "00"
                            print lookup_steim2, lookup_geoscope_4bit
                            blksta[i].data_format_identifier_code = lookup_steim2
                            i += 1
                            clone = i
                        else:
                            clone = -1
                    else:
                        if clone != -1:  # Blockette is concerned
                            print blksta[i].stage_sequence_number, blksta[i].blockette_type,
                            # Clone blockette
                            b = copy.deepcopy(blksta[i])
                            # Detect stage 0
                            if b.stage_sequence_number == 0:
                                # If stage 0, add gain blockette before
                                newb = copy.deepcopy(blksta[i])
                                newb.sensitivity_gain = gain_geoscope_4
                                newb.stage_sequence_number = blksta[
                                    i - 1].stage_sequence_number + 1
                                print "new stage =", newb.stage_sequence_number
                                blksta.insert(clone, newb)
                                clone += 1
                                b.sensitivity_gain *= gain_geoscope_4
                                i += 1
                            blksta.insert(clone, b)
                            clone += 1
                            i += 1
                    i += 1

            # Verify
            print ""
            print ""
            print "---- Verify ----"
            display = -1
            if lookup_geoscope_4bit != -1:
                for blksta_local in blksta:
                    if blksta_local.blockette_type == 52:
                        if blksta_local.data_format_identifier_code == lookup_geoscope_4bit:
                            print ""
                            print blksta_local.channel_identifier, blksta_local.start_date,  blksta_local.location_identifier
                            display = 1
                        else:
                            display = -1
                    else:
                        if display == 1:
                            if blksta_local.stage_sequence_number == 0:
                                print blksta_local.stage_sequence_number, blksta_local.blockette_type, blksta_local.sensitivity_gain,
                            else:
                                print blksta_local.stage_sequence_number, blksta_local.blockette_type,

            # Write new dataless
            print ""
            print "---- Write new dataless ----"
            p.write_seed(self.args.output + '/' + os.path.basename(filename))
from __future__ import print_function

from math import log10

from obspy import UTCDateTime, read
from obspy.geodetics import gps2dist_azimuth
from obspy.io.xseed import Parser


st = read("../data/LKBD.MSEED")

paz_wa = {'sensitivity': 2800, 'zeros': [0j], 'gain': 1,
          'poles': [-6.2832 - 4.7124j, -6.2832 + 4.7124j]}

parser = Parser("../data/LKBD.dataless")
paz_le3d5s = parser.getPAZ("CH.LKBD..EHZ")

st.simulate(paz_remove=paz_le3d5s, paz_simulate=paz_wa, water_level=10)

t = UTCDateTime("2012-04-03T02:45:03")
st.trim(t, t + 50)

tr_n = st.select(component="N")[0]
ampl_n = max(abs(tr_n.data))
tr_e = st.select(component="E")[0]
ampl_e = max(abs(tr_e.data))
ampl = max(ampl_n, ampl_e)

sta_lat = 46.38703
sta_lon = 7.62714
event_lat = 46.218
Example #35
0
def restitute_trace(input_tuple):
    tr, invtype, inobj, unit, force = input_tuple

    remove_trace = False

    seed_id = tr.get_id()
    # check, whether this trace has already been corrected
    if 'processing' in tr.stats.keys() \
            and np.any(['remove' in p for p in tr.stats.processing]) \
            and not force:
        print("Trace {0} has already been corrected!".format(seed_id))
        return tr, False
    stime = tr.stats.starttime
    prefilt = get_prefilt(tr)
    if invtype == 'resp':
        fresp = find_in_list(inobj, seed_id)
        if not fresp:
            raise IOError('no response file found '
                          'for trace {0}'.format(seed_id))
        fname = fresp
        seedresp = dict(filename=fname, date=stime, units=unit)
        kwargs = dict(paz_remove=None, pre_filt=prefilt, seedresp=seedresp)
    elif invtype == 'dless':
        if type(inobj) is list:
            fname = Parser(find_in_list(inobj, seed_id))
        else:
            fname = inobj
        seedresp = dict(filename=fname, date=stime, units=unit)
        kwargs = dict(pre_filt=prefilt, seedresp=seedresp)
    elif invtype == 'xml':
        invlist = inobj
        if len(invlist) > 1:
            finv = find_in_list(invlist, seed_id)
        else:
            finv = invlist[0]
        inventory = read_inventory(finv, format='STATIONXML')
    elif invtype == None:
        print(
            "No restitution possible, as there are no station-meta data available!"
        )
        return tr, True
    else:
        remove_trace = True
    # apply restitution to data
    print("Correcting instrument at station %s, channel %s" \
          % (tr.stats.station, tr.stats.channel))
    try:
        if invtype in ['resp', 'dless']:
            try:
                tr.simulate(**kwargs)
            except ValueError as e:
                vmsg = '{0}'.format(e)
                print(vmsg)

        else:
            tr.attach_response(inventory)
            tr.remove_response(output=unit, pre_filt=prefilt)
    except ValueError as e:
        msg0 = 'Response for {0} not found in Parser'.format(seed_id)
        msg1 = 'evalresp failed to calculate response'
        if msg0 not in e.message or msg1 not in e.message:
            raise
        else:
            # restitution done to copies of data thus deleting traces
            # that failed should not be a problem
            remove_trace = True

    return tr, remove_trace
    #formato de las imagenes
    _format = 'png'

    #Parametros de entrada
    dataless_name = sys.argv[1]
    #Frecuencia de normalizacion. Para estados analogos se toma normalmente como  1.0 Hz
    fn = float(
        raw_input(
            "Ingrese el valor de la frecuencia de referencia en Hz\n (Valor recomendado: 1.0 Hz)\n"
        ))

    #Crea archivo log
    log_file = open(dataless_name + '.log', 'w')

    #Carga dataless mediante io.xseed
    dtlss = Parser(dataless_name)
    print >> log_file, dtlss

    #Crea un diccionario con respuesta por canal
    inv = dtlss.get_inventory()

    ###workout para el dataless:
    F = np.arange(.001, 100, .001)
    for chn in inv['channels']:
        channel_id, start_date, end_date, instrument = chn['channel_id'], chn[
            'start_date'], chn['end_date'], chn['instrument']
        location = channel_id.split('.')[2]
        if end_date != "":
            starttime, endtime = start_date.date, end_date.date
        else:
            starttime, endtime = start_date.date, " "
# -*- coding: utf-8 -*-
"""
Created on Fri May 13 14:56:35 2016

@author: leroy
"""

from obspy.io.xseed import Parser
import copy
import sys

# Load Dataless to modify, assume only one station
p = Parser("dataless.G.SSB.seed")

# Get Data Format Identifier Code
if p.abbreviations[0].blockette_type == 30:
    if p.abbreviations[0].data_family_type == 1:
        # Test if encoding is Geoscope 3 bits
        if p.abbreviations[0].short_descriptive_name.rfind("3") != -1:
            # Set gain to 2^7 = 128
            gain = 2**7
        # Or if encoding is Geoscope 4 bits
        elif p.abbreviations[0].short_descriptive_name.rfind("4") != -1:
            # Set gain to 2^15 = 32768
            gain = 2**15
        else :
            # Print warning and exit
            print("No Geoscope encoding")
            sys.exit()

    
Example #38
0
def preprocess(db, stations, comps, goal_day, params, tramef_Z, tramef_E=np.array([]), tramef_N=np.array([])):
    datafilesZ = {}
    datafilesE = {}
    datafilesN = {}

    for station in stations:
        datafilesZ[station] = []
        datafilesE[station] = []
        datafilesN[station] = []
        net, sta = station.split('.')
        gd = datetime.datetime.strptime(goal_day, '%Y-%m-%d')
        files = get_data_availability(
            db, net=net, sta=sta, starttime=gd, endtime=gd)
        for file in files:
            comp = file.comp
            fullpath = os.path.join(file.path, file.file)
            if comp[-1] == 'Z':
                datafilesZ[station].append(fullpath)
            elif comp[-1] == 'E':
                datafilesE[station].append(fullpath)
            elif comp[-1] == 'N':
                datafilesN[station].append(fullpath)

    j = 0
    for istation, station in enumerate(stations):
        for comp in comps:
            files = eval("datafiles%s['%s']" % (comp, station))
            if len(files) != 0:
                logging.debug("%s.%s Reading %i Files" %
                              (station, comp, len(files)))
                stream = Stream()
                for file in sorted(files):
                    st = read(file, dytpe=np.float,
                              starttime=UTCDateTime(gd),
                              endtime=UTCDateTime(gd) + 86400)
                    for tr in st:
                        tr.data = tr.data.astype(np.float)
                    stream += st
                    del st

                logging.debug("Checking sample alignment")
                for i, trace in enumerate(stream):
                    stream[i] = check_and_phase_shift(trace)

                stream.sort()
                logging.debug("Checking Gaps")
                if len(getGaps(stream)) > 0:
                    max_gap = 10
                    only_too_long = False
                    while getGaps(stream) and not only_too_long:
                        too_long = 0
                        gaps = getGaps(stream)
                        for gap in gaps:
                            if int(gap[-1]) <= max_gap:
                                stream[gap[0]] = stream[gap[0]].__add__(stream[gap[1]], method=0,
                                                                        fill_value="interpolate")
                                stream.remove(stream[gap[1]])
                                break
                            else:
                                too_long += 1
                        if too_long == len(gaps):
                            only_too_long = True

                taper_length = 20.0  # seconds
                for trace in stream:
                    if trace.stats.npts < 4 * taper_length * trace.stats.sampling_rate:
                        trace.data = np.zeros(trace.stats.npts)
                    else:
                        trace.detrend(type="demean")
                        trace.detrend(type="linear")
                        taper_1s = taper_length * float(trace.stats.sampling_rate) / trace.stats.npts
                        cp = cosine_taper(trace.stats.npts, taper_1s)
                        trace.data *= cp
                try:
                    stream.merge(method=0, fill_value=0.0)
                except:
                    continue

                logging.debug("%s.%s Slicing Stream to %s:%s" % (station, comp, utcdatetime.UTCDateTime(
                    goal_day.replace('-', '')), utcdatetime.UTCDateTime(
                    goal_day.replace('-', '')) + params.goal_duration - stream[0].stats.delta))
                stream[0].trim(utcdatetime.UTCDateTime(goal_day.replace('-', '')), utcdatetime.UTCDateTime(
                    goal_day.replace('-', '')) + params.goal_duration - stream[0].stats.delta, pad=True, fill_value=0.0,
                               nearest_sample=False)

                if get_config(db, 'remove_response', isbool=True):
                    logging.debug('Removing instrument response')
                    response_format = get_config(db, 'response_format')
                    response_prefilt = eval(get_config(db, 'response_prefilt'))
                    files = glob.glob(os.path.join(get_config(db,
                                                              'response_path'),
                                                   "*"))
                    if response_format == "inventory":
                        firstinv = True
                        inventory = None
                        for file in files:
                            try:
                                inv = read_inventory(file)
                                if firstinv:
                                    inventory = inv
                                    firstinv = False
                                else:
                                    inventory += inv
                            except:
                                traceback.print_exc()
                                pass
                        if inventory:
                            stream.attach_response(inventory)
                            stream.remove_response(output='VEL',
                                                   pre_filt=response_prefilt)
                    elif response_format == "dataless":
                        for file in files:
                            p = Parser(file)
                            try:
                                p.getPAZ(stream[0].id,
                                         datetime=UTCDateTime(gd))
                                break
                            except:
                                traceback.print_exc()
                                del p
                                continue
                        stream.simulate(seedresp={'filename': p, "units": "VEL"},
                                        pre_filt=response_prefilt,
                                        paz_remove=None,
                                        paz_simulate=None, )
                    elif response_format == "paz":
                        msg = "Unexpected type for `response_format`: %s" % \
                              response_format
                        raise TypeError(msg)
                    elif response_format == "resp":
                        msg = "Unexpected type for `response_format`: %s" % \
                              response_format
                        raise TypeError(msg)
                    else:
                        msg = "Unexpected type for `response_format`: %s" % \
                              response_format
                        raise TypeError(msg)
                trace = stream[0]

                logging.debug(
                    "%s.%s Highpass at %.2f Hz" % (station, comp, params.preprocess_highpass))
                trace.filter("highpass", freq=params.preprocess_highpass, zerophase=True)

                if trace.stats.sampling_rate != params.goal_sampling_rate:
                    logging.debug(
                        "%s.%s Lowpass at %.2f Hz" % (station, comp, params.preprocess_lowpass))
                    trace.filter("lowpass", freq=params.preprocess_lowpass, zerophase=True, corners=8)

                    if params.resampling_method == "Resample":
                        logging.debug("%s.%s Downsample to %.1f Hz" %
                                      (station, comp, params.goal_sampling_rate))
                        trace.data = resample(
                            trace.data, params.goal_sampling_rate / trace.stats.sampling_rate, 'sinc_fastest')

                    elif params.resampling_method == "Decimate":
                        decimation_factor = trace.stats.sampling_rate / params.goal_sampling_rate
                        if not int(decimation_factor) == decimation_factor:
                            logging.warning("%s.%s CANNOT be decimated by an integer factor, consider using Resample or Lanczos methods"
                                            " Trace sampling rate = %i ; Desired CC sampling rate = %i" %
                                            (station, comp, trace.stats.sampling_rate, params.goal_sampling_rate))
                            sys.stdout.flush()
                            sys.exit()
                        logging.debug("%s.%s Decimate by a factor of %i" %
                                      (station, comp, decimation_factor))
                        trace.data = trace.data[::decimation_factor]

                    elif params.resampling_method == "Lanczos":
                        logging.debug("%s.%s Downsample to %.1f Hz" %
                                      (station, comp, params.goal_sampling_rate))
                        trace.data = np.array(trace.data)
                        trace.interpolate(method="lanczos", sampling_rate=params.goal_sampling_rate, a=1.0)

                    trace.stats.sampling_rate = params.goal_sampling_rate


                year, month, day, hourf, minf, secf, wday, yday, isdst = trace.stats.starttime.utctimetuple()

                if j == 0:
                    t = time.strptime("%04i:%02i:%02i:%02i:%02i:%02i" %
                                      (year, month, day, hourf, minf, secf), "%Y:%m:%d:%H:%M:%S")
                    basetime = calendar.timegm(t)

                if len(trace.data) % 2 != 0:
                    trace.data = np.append(trace.data, 0.)
                if len(trace.data) != len(tramef_Z[istation]):
                    missing = len(tramef_Z[istation]) - len(trace.data)
                    for i in range(missing):
                        trace.data = np.append(trace.data, 0.)
                if comp == "Z":
                    tramef_Z[istation] = trace.data
                elif comp == "E":
                    tramef_E[istation] = trace.data
                elif comp == "N":
                    tramef_N[istation] = trace.data

                del trace, stream
    if len(tramef_E) != 0:
        return basetime, tramef_Z, tramef_E, tramef_N
    else:
        return basetime, tramef_Z
Example #39
0
    def test_ppsd_w_iris_against_obspy_results(self):
        """
        Test against results obtained after merging of #1108.
        """
        # Read in ANMO data for one day
        st = read(os.path.join(self.path, 'IUANMO.seed'))

        # Read in metadata in various different formats
        paz = {
            'gain':
            86298.5,
            'zeros': [0, 0],
            'poles': [
                -59.4313, -22.7121 + 27.1065j, -22.7121 + 27.1065j, -0.0048004,
                -0.073199
            ],
            'sensitivity':
            3.3554 * 10**9
        }
        resp = os.path.join(self.path, 'IUANMO.resp')
        parser = Parser(os.path.join(self.path, 'IUANMO.dataless'))
        inv = read_inventory(os.path.join(self.path, 'IUANMO.xml'))

        # load expected results, for both only PAZ and full response
        filename_paz = os.path.join(self.path, 'IUANMO_ppsd_paz.npz')
        results_paz = PPSD.load_npz(filename_paz, metadata=None)
        filename_full = os.path.join(self.path, 'IUANMO_ppsd_fullresponse.npz')
        results_full = PPSD.load_npz(filename_full, metadata=None)

        # Calculate the PPSDs and test against expected results
        # first: only PAZ
        ppsd = PPSD(st[0].stats, paz)
        ppsd.add(st)
        # commented code to generate the test data:
        # ## np.savez(filename_paz,
        # ##          **dict([(k, getattr(ppsd, k))
        # ##                  for k in PPSD.NPZ_STORE_KEYS]))
        for key in PPSD.NPZ_STORE_KEYS_ARRAY_TYPES:
            np.testing.assert_allclose(getattr(ppsd, key),
                                       getattr(results_paz, key),
                                       rtol=1e-5)
        for key in PPSD.NPZ_STORE_KEYS_LIST_TYPES:
            for got, expected in zip(getattr(ppsd, key),
                                     getattr(results_paz, key)):
                np.testing.assert_allclose(got, expected, rtol=1e-5)
        for key in PPSD.NPZ_STORE_KEYS_SIMPLE_TYPES:
            if key in ["obspy_version", "numpy_version", "matplotlib_version"]:
                continue
            self.assertEqual(getattr(ppsd, key), getattr(results_paz, key))
        # second: various methods for full response
        for metadata in [parser, inv, resp]:
            ppsd = PPSD(st[0].stats, metadata)
            ppsd.add(st)
            # commented code to generate the test data:
            # ## np.savez(filename_full,
            # ##          **dict([(k, getattr(ppsd, k))
            # ##                  for k in PPSD.NPZ_STORE_KEYS]))
            for key in PPSD.NPZ_STORE_KEYS_ARRAY_TYPES:
                np.testing.assert_allclose(getattr(ppsd, key),
                                           getattr(results_full, key),
                                           rtol=1e-5)
            for key in PPSD.NPZ_STORE_KEYS_LIST_TYPES:
                for got, expected in zip(getattr(ppsd, key),
                                         getattr(results_full, key)):
                    np.testing.assert_allclose(got, expected, rtol=1e-5)
            for key in PPSD.NPZ_STORE_KEYS_SIMPLE_TYPES:
                if key in [
                        "obspy_version", "numpy_version", "matplotlib_version"
                ]:
                    continue
                self.assertEqual(getattr(ppsd, key),
                                 getattr(results_full, key))
Example #40
0
def preprocessing_function(tr, processing_info):  # NOQA
    """
    Function to perform the actual preprocessing for one individual seismogram.
    This is part of the project so it can change depending on the project.

    Please keep in mind that you will have to manually update this file to a
    new version if LASIF is ever updated.
    """

    def zerophase_chebychev_lowpass_filter(trace, freqmax):
        """
        Custom Chebychev type two zerophase lowpass filter useful for
        decimation filtering.

        This filter is stable up to a reduction in frequency with a factor of
        10. If more reduction is desired, simply decimate in steps.

        Partly based on a filter in ObsPy.

        :param trace: The trace to be filtered.
        :param freqmax: The desired lowpass frequency.

        Will be replaced once ObsPy has a proper decimation filter.
        """
        # rp - maximum ripple of passband, rs - attenuation of stopband
        rp, rs, order = 1, 96, 1e99
        ws = freqmax / (trace.stats.sampling_rate * 0.5)  # stop band frequency
        wp = ws  # pass band frequency

        while True:
            if order <= 12:
                break
            wp *= 0.99
            order, wn = signal.cheb2ord(wp, ws, rp, rs, analog=0)

        b, a = signal.cheby2(order, rs, wn, btype="low", analog=0, output="ba")

        # Apply twice to get rid of the phase distortion.
        trace.data = signal.filtfilt(b, a, trace.data)

    # =========================================================================
    # Read seismograms and gather basic information.
    # =========================================================================
    specfem_delta_delay = -1.0687500
    starttime = processing_info["event_information"]["origin_time"] + specfem_delta_delay
    endtime = starttime + processing_info["process_params"]["dt"] * \
                          (processing_info["process_params"]["npts"] - 1)
    duration = endtime - starttime

    # Make sure the seismograms are long enough. If not, skip them.
    if starttime < tr.stats.starttime or endtime > tr.stats.endtime:
        msg = ("The seismogram does not cover the required time span.\n"
               "Seismogram time span: %s - %s\n"
               "Requested time span: %s - %s" % (
                   tr.stats.starttime, tr.stats.endtime, starttime, endtime))
        raise LASIFError(msg)

    # Trim to reduce processing cost.
    # starttime is the origin time of the event
    # endtime is the origin time plus the length of the synthetics
    tr.trim(starttime - 0.2 * duration, endtime + 0.2 * duration)

    # =========================================================================
    # Some basic checks on the data.
    # =========================================================================
    # Non-zero length
    if not len(tr):
        msg = "No data found in time window around the event. File skipped."
        raise LASIFError(msg)

    # No nans or infinity values allowed.
    if not np.isfinite(tr.data).all():
        msg = "Data contains NaNs or Infs. File skipped"
        raise LASIFError(msg)

    # =========================================================================
    # Step 1: Decimation
    # Decimate with the factor closest to the sampling rate of the synthetics.
    # The data is still oversampled by a large amount so there should be no
    # problems. This has to be done here so that the instrument correction is
    # reasonably fast even for input data with a large sampling rate.
    # =========================================================================
    while True:
        decimation_factor = int(processing_info["process_params"]["dt"] /
                                tr.stats.delta)
        # Decimate in steps for large sample rate reductions.
        if decimation_factor > 8:
            decimation_factor = 8
        if decimation_factor > 1:
            new_nyquist = tr.stats.sampling_rate / 2.0 / float(
                    decimation_factor)
            zerophase_chebychev_lowpass_filter(tr, new_nyquist)
            tr.decimate(factor=decimation_factor, no_filter=True)
        else:
            break

    # =========================================================================
    # Step 2: Detrend and taper.
    # =========================================================================
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(max_percentage=0.05, type="hann")

    # =========================================================================
    # Step 3: Instrument correction
    # Correct seismograms to velocity in m/s.
    # =========================================================================
    output_units = "VEL"
    station_name = "station.{}_{}.response.xml".format(tr.stats.network, tr.stats.station)
    station_file = os.path.join("StationXML", station_name)

    # check if the station file actually exists ==============================
    if not os.path.exists(station_file):
        msg = "No station file found for the relevant time span. File skipped"
        raise LASIFError(msg)

    # This is really necessary as other filters are just not sharp enough
    # and lots of energy from other frequency bands leaks into the frequency
    # band of interest
    freqmin = processing_info["process_params"]["highpass"]
    freqmax = processing_info["process_params"]["lowpass"]

    f2 = 0.9 * freqmin
    f3 = 1.1 * freqmax
    # Recommendations from the SAC manual.
    f1 = 0.5 * f2
    f4 = 2.0 * f3
    pre_filt = (f1, f2, f3, f4)

    # processing for seed files ==============================================
    if "/SEED/" in station_file:
        # XXX: Check if this is m/s. In all cases encountered so far it
        # always is, but SEED is in theory also able to specify corrections
        # to other units...
        parser = Parser(station_file)
        try:
            # The simulate might fail but might still modify the data. The
            # backup is needed for the backup plan to only correct using
            # poles and zeros.
            backup_tr = tr.copy()
            try:
                tr.simulate(seedresp={"filename": parser,
                                      "units": output_units,
                                      "date": tr.stats.starttime},
                            pre_filt=pre_filt, zero_mean=False, taper=False)
            except ValueError:
                warnings.warn("Evalresp failed, will only use the Poles and "
                              "Zeros stage")
                tr = backup_tr
                paz = parser.getPAZ(tr.id, tr.stats.starttime)
                if paz["sensitivity"] == 0:
                    warnings.warn("Sensitivity is 0 in SEED file and will "
                                  "not be taken into account!")
                    tr.simulate(paz_remove=paz, remove_sensitivity=False,
                                pre_filt=pre_filt, zero_mean=False,
                                taper=False)
                else:
                    tr.simulate(paz_remove=paz, pre_filt=pre_filt,
                                zero_mean=False, taper=False)
        except Exception:
            msg = ("File  could not be corrected with the help of the "
                   "SEED file '%s'. Will be skipped.") \
                  % processing_info["station_filename"]
            raise LASIFError(msg)
    # processing with RESP files =============================================
    elif "/RESP/" in station_file:
        try:
            tr.simulate(seedresp={"filename": station_file,
                                  "units": output_units,
                                  "date": tr.stats.starttime},
                        pre_filt=pre_filt, zero_mean=False, taper=False)
        except ValueError as e:
            msg = ("File  could not be corrected with the help of the "
                   "RESP file '%s'. Will be skipped. Due to: %s") \
                  % (processing_info["station_filename"], str(e))
            raise LASIFError(msg)
    elif "StationXML" in station_file:
        try:
            inv = obspy.read_inventory(station_file, format="stationxml")
        except Exception as e:
            msg = ("Could not open StationXML file '%s'. Due to: %s. Will be "
                   "skipped." % (station_file, str(e)))
            raise LASIFError(msg)
        tr.attach_response(inv)
        try:
            tr.remove_response(output=output_units, pre_filt=pre_filt,
                               zero_mean=False, taper=False)
        except Exception as e:
            msg = ("File  could not be corrected with the help of the "
                   "StationXML file '%s'. Due to: '%s'  Will be skipped.") \
                  % (station_file, e.__repr__())
            raise LASIFError(msg)
    else:
        raise NotImplementedError

    # =========================================================================
    # Step 4: Bandpass filtering
    # This has to be exactly the same filter as in the source time function
    # in the case of SES3D.
    # =========================================================================
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(0.05, type="cosine")
    tr.filter("bandpass", freqmin=freqmin, freqmax=freqmax, corners=3,
              zerophase=True)
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(0.05, type="cosine")
    tr.filter("bandpass", freqmin=freqmin, freqmax=freqmax, corners=3,
              zerophase=True)

    # =========================================================================
    # Step 5: Sinc interpolation
    # =========================================================================
    # Make sure that the data array is at least as long as the
    # synthetics array.
    tr.data = np.require(tr.data, requirements="C")
    tr.interpolate(
            sampling_rate=1.0 / processing_info["process_params"]["dt"],
            method="lanczos", starttime=starttime, window="blackman", a=12,
            npts=processing_info["process_params"]["npts"])

    # =========================================================================
    # Save processed data and clean up.
    # =========================================================================
    # Convert to single precision to save some space.
    tr.data = np.require(tr.data, dtype="float32", requirements="C")
    if hasattr(tr.stats, "mseed"):
        tr.stats.mseed.encoding = "FLOAT32"

    return tr
Example #41
0
def config(set, sync):
    """This command should now only be used to use the command line to set
    a parameter value in the data base. It used to launch the Configurator but
    the recommended way to configure MSNoise is to use the "msnoise admin" web
    interface."""
    if set:
        from ..default import default
        if not set.count("="):
            click.echo("!! format of the set command is name=value !!")
            return
        name, value = set.split("=")
        if not name in default:
            click.echo("!! unknown parameter %s !!" % name)
            return
        from ..api import connect, update_config
        db = connect()
        update_config(db, name, value)
        db.commit()
        db.close()
        click.echo("Successfully updated parameter %s = %s" % (name, value))
    elif sync:
        import glob
        from ..api import connect, get_config, get_stations, update_station
        db = connect()
        response_format = get_config(db, 'response_format')
        response_files = glob.glob(os.path.join(get_config(db, 'response_path'), "*"))
        if response_format == "inventory":
            from obspy import read_inventory
            firstinv = True
            metadata = None
            for file in response_files:
                try:
                    inv = read_inventory(file)
                    if firstinv:
                        metadata = inv
                        firstinv = False
                    else:
                        metadata += inv
                except:
                    pass
        elif response_format == "dataless":
            from obspy.io.xseed import Parser
            all_metadata = {}
            for file in response_files:
                metadata = Parser(file)
                tmpinv = metadata.get_inventory()
                for chan in tmpinv["channels"]:
                    all_metadata[chan["channel_id"]] = metadata
        else:
            print("Response Format Not Supported")
            exit()
        for station in get_stations(db):
            id = "%s.%s.00.HHZ" % (station.net, station.sta)
            if response_format == "inventory":
                coords = inv.get_coordinates(id)
            else:

                coords = all_metadata[id].get_coordinates(id)
            update_station(db, station.net, station.sta, coords["longitude"],
                           coords["latitude"], coords["elevation"], "DEG", )
            logging.info("Added coordinates (%.5f %.5f) for station %s.%s" %
                        (coords["longitude"], coords["latitude"],
                         station.net, station.sta))
        db.close()

    else:
        from ..s001configurator import main
        click.echo('Let\'s Configure MSNoise !')
        main()
Example #42
0
    days = [
        current_day - 24 * 60 * 60 * (days_back + x)
        for x in range(number_of_days)
    ]
    # Add in a few more special days.
    extradays = [180, 120, 90, 60, 30]
    for day in extradays:
        days.append(current_day - day * 24 * 60 * 60)
    networks = ['IU', 'CU', 'US', 'IC', 'GT', 'IW', 'NE', 'XX', 'GS', 'NQ']
    for net in networks:
        if debug:
            print('On network: ' + net)
            cnettime = UTCDateTime.now()
            print('Start Time: ' + str(cnettime))
        try:
            sp = Parser('/APPS/metadata/SEED/' + net + '.dataless')
            clients = {'NEIC': True, 'ASL': True}
        except:
            sp = False
            clients = {'NEIC': False, 'ASL': True}

        # Need to make a function of one variable without a lambda
        def proc_part(x):
            return process_day_net(x, net, sp, clients)

        avails = []
        for idx, day in enumerate(days):
            if debug:
                print('On day: ' + str(idx + 1) + ' of ' + str(len(days)))
                print('Current scan day: ' + str(day))
            avails.append(proc_part(day))
Example #43
0
def preprocessing_function(processing_info, iteration):  # NOQA
    """
    Function to perform the actual preprocessing for one individual seismogram.
    This is part of the project so it can change depending on the project.

    Please keep in mind that you will have to manually update this file to a
    new version if LASIF is ever updated.

    You can do whatever you want in this function as long as the function
    signature is honored. The file is read from ``"input_filename"`` and
    written to ``"output_filename"``.

    One goal of this function is to make sure that the data is available at the
    same time steps as the synthetics. The first time sample of the synthetics
    will always be the origin time of the event.

    Furthermore the data has to be converted to m/s.

    :param processing_info: A dictionary containing information about the
        file to be processed. It will have the following structure.
    :type processing_info: dict

    .. code-block:: python

        {'event_information': {
            'depth_in_km': 22.0,
            'event_name': 'GCMT_event_VANCOUVER_ISLAND...',
            'filename': '/.../GCMT_event_VANCOUVER_ISLAND....xml',
            'latitude': 49.53,
            'longitude': -126.89,
            'm_pp': 2.22e+18,
            'm_rp': -2.78e+18,
            'm_rr': -6.15e+17,
            'm_rt': 1.98e+17,
            'm_tp': 5.14e+18,
            'm_tt': -1.61e+18,
            'magnitude': 6.5,
            'magnitude_type': 'Mwc',
            'origin_time': UTCDateTime(2011, 9, 9, 19, 41, 34, 200000),
            'region': u'VANCOUVER ISLAND, CANADA REGION'},
         'input_filename': u'/.../raw/7D.FN01A..HHZ.mseed',
         'output_filename': u'/.../processed_.../7D.FN01A..HHZ.mseed',
         'process_params': {
            'dt': 0.75,
            'highpass': 0.007142857142857143,
            'lowpass': 0.0125,
            'npts': 2000},
         'station_coordinates': {
            'elevation_in_m': -54.0,
            'latitude': 46.882,
            'local_depth_in_m': None,
            'longitude': -124.3337},
         'station_filename': u'/.../STATIONS/RESP/RESP.7D.FN01A..HH*'}

    Please note that you also got the iteration object here, so if you
    want some parameters to change depending on the iteration, just use
    if/else on the iteration objects.

    >>> iteration.name  # doctest: +SKIP
    '11'
    >>> iteration.get_process_params()  # doctest: +SKIP
    {'dt': 0.75,
     'highpass': 0.01,
     'lowpass': 0.02,
     'npts': 500}

    Use ``$ lasif shell`` to play around and figure out what the iteration
    objects can do.

    """
    def zerophase_chebychev_lowpass_filter(trace, freqmax):
        """
        Custom Chebychev type two zerophase lowpass filter useful for
        decimation filtering.

        This filter is stable up to a reduction in frequency with a factor of
        10. If more reduction is desired, simply decimate in steps.

        Partly based on a filter in ObsPy.

        :param trace: The trace to be filtered.
        :param freqmax: The desired lowpass frequency.

        Will be replaced once ObsPy has a proper decimation filter.
        """
        # rp - maximum ripple of passband, rs - attenuation of stopband
        rp, rs, order = 1, 96, 1e99
        ws = freqmax / (trace.stats.sampling_rate * 0.5)  # stop band frequency
        wp = ws  # pass band frequency

        while True:
            if order <= 12:
                break
            wp *= 0.99
            order, wn = signal.cheb2ord(wp, ws, rp, rs, analog=0)

        b, a = signal.cheby2(order, rs, wn, btype="low", analog=0, output="ba")

        # Apply twice to get rid of the phase distortion.
        trace.data = signal.filtfilt(b, a, trace.data)

    # =========================================================================
    # Read seismograms and gather basic information.
    # =========================================================================
    starttime = processing_info["event_information"]["origin_time"]
    endtime = starttime + processing_info["process_params"]["dt"] * \
        (processing_info["process_params"]["npts"] - 1)
    duration = endtime - starttime

    st = obspy.read(processing_info["input_filename"])

    if len(st) != 1:
        warnings.warn("The file '%s' has %i traces and not 1. "
                      "Skip all but the first" % (
                          processing_info["input_filename"], len(st)))
    tr = st[0]

    # Make sure the seismograms are long enough. If not, skip them.
    if starttime < tr.stats.starttime or endtime > tr.stats.endtime:

        msg = ("The seismogram does not cover the required time span.\n"
               "Seismogram time span: %s - %s\n"
               "Requested time span: %s - %s" % (
                   tr.stats.starttime, tr.stats.endtime, starttime, endtime))
        raise LASIFError(msg)

    # Trim to reduce processing cost.
    # starttime is the origin time of the event
    # endtime is the origin time plus the length of the synthetics
    tr.trim(starttime - 0.2 * duration, endtime + 0.2 * duration)

    # =========================================================================
    # Some basic checks on the data.
    # =========================================================================
    # Non-zero length
    if not len(tr):
        msg = "No data found in time window around the event. File skipped."
        raise LASIFError(msg)

    # No nans or infinity values allowed.
    if not np.isfinite(tr.data).all():
        msg = "Data contains NaNs or Infs. File skipped"
        raise LASIFError(msg)

    # =========================================================================
    # Step 1: Decimation
    # Decimate with the factor closest to the sampling rate of the synthetics.
    # The data is still oversampled by a large amount so there should be no
    # problems. This has to be done here so that the instrument correction is
    # reasonably fast even for input data with a large sampling rate.
    # =========================================================================
    while True:
        decimation_factor = int(processing_info["process_params"]["dt"] /
                                tr.stats.delta)
        # Decimate in steps for large sample rate reductions.
        if decimation_factor > 8:
            decimation_factor = 8
        if decimation_factor > 1:
            new_nyquist = tr.stats.sampling_rate / 2.0 / float(
                decimation_factor)
            zerophase_chebychev_lowpass_filter(tr, new_nyquist)
            tr.decimate(factor=decimation_factor, no_filter=True)
        else:
            break

    # =========================================================================
    # Step 2: Detrend and taper.
    # =========================================================================
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(max_percentage=0.05, type="hann")

    # =========================================================================
    # Step 3: Instrument correction
    # Correct seismograms to velocity in m/s.
    # =========================================================================
    output_units = "VEL"
    station_file = processing_info["station_filename"]

    # check if the station file actually exists ==============================
    if not processing_info["station_filename"]:
        msg = "No station file found for the relevant time span. File skipped"
        raise LASIFError(msg)

    # This is really necessary as other filters are just not sharp enough
    # and lots of energy from other frequency bands leaks into the frequency
    # band of interest
    freqmin = processing_info["process_params"]["highpass"]
    freqmax = processing_info["process_params"]["lowpass"]

    f2 = 0.9 * freqmin
    f3 = 1.1 * freqmax
    # Recommendations from the SAC manual.
    f1 = 0.5 * f2
    f4 = 2.0 * f3
    pre_filt = (f1, f2, f3, f4)

    # processing for seed files ==============================================
    if "/SEED/" in station_file:
        # XXX: Check if this is m/s. In all cases encountered so far it
        # always is, but SEED is in theory also able to specify corrections
        # to other units...
        parser = Parser(station_file)
        try:
            # The simulate might fail but might still modify the data. The
            # backup is needed for the backup plan to only correct using
            # poles and zeros.
            backup_tr = tr.copy()
            try:
                tr.simulate(seedresp={"filename": parser,
                                      "units": output_units,
                                      "date": tr.stats.starttime},
                            pre_filt=pre_filt, zero_mean=False, taper=False)
            except ValueError:
                warnings.warn("Evalresp failed, will only use the Poles and "
                              "Zeros stage")
                tr = backup_tr
                paz = parser.get_paz(tr.id, tr.stats.starttime)
                if paz["sensitivity"] == 0:
                    warnings.warn("Sensitivity is 0 in SEED file and will "
                                  "not be taken into account!")
                    tr.simulate(paz_remove=paz, remove_sensitivity=False,
                                pre_filt=pre_filt, zero_mean=False,
                                taper=False)
                else:
                    tr.simulate(paz_remove=paz, pre_filt=pre_filt,
                                zero_mean=False, taper=False)
        except Exception as e:
            msg = ("File  could not be corrected with the help of the "
                   "SEED file '%s'. Will be skipped due to: %s") \
                % (processing_info["station_filename"], str(e))
            raise LASIFError(msg)
    # processing with RESP files =============================================
    elif "/RESP/" in station_file:
        try:
            tr.simulate(seedresp={"filename": station_file,
                                  "units": output_units,
                                  "date": tr.stats.starttime},
                        pre_filt=pre_filt, zero_mean=False, taper=False)
        except ValueError as e:
            msg = ("File  could not be corrected with the help of the "
                   "RESP file '%s'. Will be skipped. Due to: %s") \
                % (processing_info["station_filename"], str(e))
            raise LASIFError(msg)
    elif "/StationXML/" in station_file:
        try:
            inv = obspy.read_inventory(station_file, format="stationxml")
        except Exception as e:
            msg = ("Could not open StationXML file '%s'. Due to: %s. Will be "
                   "skipped." % (station_file, str(e)))
            raise LASIFError(msg)
        tr.attach_response(inv)
        try:
            tr.remove_response(output=output_units, pre_filt=pre_filt,
                               zero_mean=False, taper=False)
        except Exception as e:
            msg = ("File  could not be corrected with the help of the "
                   "StationXML file '%s'. Due to: '%s'  Will be skipped.") \
                % (processing_info["station_filename"], e.__repr__()),
            raise LASIFError(msg)
    else:
        raise NotImplementedError

    # =========================================================================
    # Step 4: Bandpass filtering
    # This has to be exactly the same filter as in the source time function
    # in the case of SES3D.
    # =========================================================================
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(0.05, type="cosine")
    tr.filter("bandpass", freqmin=freqmin, freqmax=freqmax, corners=3,
              zerophase=False)
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(0.05, type="cosine")
    tr.filter("bandpass", freqmin=freqmin, freqmax=freqmax, corners=3,
              zerophase=False)

    # =========================================================================
    # Step 5: Sinc interpolation
    # =========================================================================
    # Make sure that the data array is at least as long as the
    # synthetics array.
    tr.interpolate(
        sampling_rate=1.0 / processing_info["process_params"]["dt"],
        method="lanczos", starttime=starttime, window="blackman", a=12,
        npts=processing_info["process_params"]["npts"])

    # =========================================================================
    # Save processed data and clean up.
    # =========================================================================
    # Convert to single precision to save some space.
    tr.data = np.require(tr.data, dtype="float32", requirements="C")
    if hasattr(tr.stats, "mseed"):
        tr.stats.mseed.encoding = "FLOAT32"

    tr.write(processing_info["output_filename"], format=tr.stats._format)