Example #1
0
    def test_evalresp_with_output_from_seed(self):
        """
        The StationXML file has been converted to SEED with the help of a tool
        provided by IRIS:

        https://seiscode.iris.washington.edu/projects/stationxml-converter
        """
        t_samp = 0.05
        nfft = 16384

        # Test for different output units.
        units = ["DISP", "VEL", "ACC"]
        filenames = ["IRIS_single_channel_with_response", "XM.05", "AU.MEEK"]

        for filename in filenames:
            xml_filename = os.path.join(self.data_dir,
                                        filename + os.path.extsep + "xml")
            seed_filename = os.path.join(self.data_dir,
                                         filename + os.path.extsep + "seed")

            p = Parser(seed_filename)

            # older systems don't like an end date in the year 2599
            t_ = UTCDateTime(2030, 1, 1)
            if p.blockettes[50][0].end_effective_date > t_:
                p.blockettes[50][0].end_effective_date = None
            if p.blockettes[52][0].end_date > t_:
                p.blockettes[52][0].end_date = None

            resp_filename = p.getRESP()[0][-1]

            inv = read_inventory(xml_filename)

            network = inv[0].code
            station = inv[0][0].code
            location = inv[0][0][0].location_code
            channel = inv[0][0][0].code
            date = inv[0][0][0].start_date

            for unit in units:
                resp_filename.seek(0, 0)

                seed_response, seed_freq = evalresp(t_samp,
                                                    nfft,
                                                    resp_filename,
                                                    date=date,
                                                    station=station,
                                                    channel=channel,
                                                    network=network,
                                                    locid=location,
                                                    units=unit,
                                                    freq=True)

                xml_response, xml_freq = \
                    inv[0][0][0].response.get_evalresp_response(t_samp, nfft,
                                                                output=unit)

                self.assertTrue(np.allclose(seed_freq, xml_freq, rtol=1E-5))
                self.assertTrue(
                    np.allclose(seed_response, xml_response, rtol=1E-5))
Example #2
0
    def _extract_index_values_seed(filename):
        """
        Reads SEED files and extracts some keys per channel.
        """
        try:
            p = Parser(filename)
        except:
            msg = "Not a valid SEED file?"
            raise StationCacheError(msg)
        channels = p.getInventory()["channels"]

        channels = [
            [
                _i["channel_id"],
                int(_i["start_date"].timestamp),
                int(_i["end_date"].timestamp) if _i["end_date"] else None,
                _i["latitude"],
                _i["longitude"],
                _i["elevation_in_m"],
                _i["local_depth_in_m"],
            ]
            for _i in channels
        ]

        return channels
Example #3
0
    def test_evalresp_with_output_from_seed(self):
        """
        The StationXML file has been converted to SEED with the help of a tool
        provided by IRIS:

        https://seiscode.iris.washington.edu/projects/stationxml-converter
        """
        t_samp = 0.05
        nfft = 16384

        # Test for different output units.
        units = ["DISP", "VEL", "ACC"]
        filenames = ["IRIS_single_channel_with_response", "XM.05", "AU.MEEK"]

        for filename in filenames:
            xml_filename = os.path.join(self.data_dir,
                                        filename + os.path.extsep + "xml")
            seed_filename = os.path.join(self.data_dir,
                                         filename + os.path.extsep + "seed")

            p = Parser(seed_filename)

            # older systems don't like an end date in the year 2599
            t_ = UTCDateTime(2030, 1, 1)
            if p.blockettes[50][0].end_effective_date > t_:
                p.blockettes[50][0].end_effective_date = None
            if p.blockettes[52][0].end_date > t_:
                p.blockettes[52][0].end_date = None

            resp_filename = p.getRESP()[0][-1]

            inv = read_inventory(xml_filename)

            network = inv[0].code
            station = inv[0][0].code
            location = inv[0][0][0].location_code
            channel = inv[0][0][0].code
            date = inv[0][0][0].start_date

            for unit in units:
                resp_filename.seek(0, 0)

                seed_response, seed_freq = evalresp(
                    t_samp, nfft, resp_filename, date=date, station=station,
                    channel=channel, network=network, locid=location,
                    units=unit, freq=True)

                xml_response, xml_freq = \
                    inv[0][0][0].response.get_evalresp_response(t_samp, nfft,
                                                                output=unit)

                self.assertTrue(np.allclose(seed_freq, xml_freq, rtol=1E-5))
                self.assertTrue(np.allclose(seed_response, xml_response,
                                            rtol=1E-5))
Example #4
0
    def stats_from_dataless(self, metadata_path=None):
        """
        Function that returns a (1,N) shaped array of the station names
        from a dataless SEED file. 
        """
        if metadata_path is None:
            metadata_path = self.metadata_path

        sp = Parser(metadata_path)

        metadata = Parser.getInventory(sp)
        stats = np.asarray(
            [stat['station_id'] for stat in metadata['stations']])
        return stats
Example #5
0
    def getPAZ(self, seed_id, datetime):
        """
        Get PAZ for a station at given time span. Gain is the A0 normalization
        constant for the poles and zeros.

        :type seed_id: str
        :param seed_id: SEED or channel id, e.g. ``"BW.RJOB..EHZ"`` or
            ``"EHE"``.
        :type datetime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param datetime: Time for which the PAZ is requested,
            e.g. ``'2010-01-01 12:00:00'``.
        :rtype: dict
        :return: Dictionary containing zeros, poles, gain and sensitivity.

        .. rubric:: Example

        >>> c = Client()
        >>> paz = c.station.getPAZ('BW.MANZ..EHZ', '20090707')
        >>> paz['zeros']
        [0j, 0j]
        >>> len(paz['poles'])
        5
        >>> print(paz['poles'][0])
        (-0.037004+0.037016j)
        >>> paz['gain']
        60077000.0
        >>> paz['sensitivity']
        2516800000.0
        """
        network, station, location, channel = seed_id.split(".")
        # request station information
        station_list = self.getList(network=network, station=station,
                                    datetime=datetime)
        if not station_list:
            return {}
        # don't allow wild cards
        for wildcard in ['*', '?']:
            if wildcard in seed_id:
                msg = "Wildcards in seed_id are not allowed."
                raise ValueError(msg)

        if len(station_list) > 1:
            warnings.warn("Received more than one XSEED file. Using first.")

        xml_doc = station_list[0]
        res = self.client.station.getResource(xml_doc['resource_name'])
        parser = Parser(res)
        paz = parser.getPAZ(seed_id=seed_id, datetime=UTCDateTime(datetime))
        return paz
Example #6
0
def read_paz(files):
    """
  Read dataless and extract poles and zeros
  """
    paz = {}

    for file in files:
        p = Parser(file)
        blk = p.blockettes

        for j in range(len(blk[50])):

            mult = len(blk[58]) / len(blk[52])

            sta = blk[50][j].station_call_letters
            paz[sta] = {}

            for i in range(j * 3, len(blk[52])):
                channel = blk[52][i].channel_identifier
                paz[sta][channel] = {}
                paz[sta][channel]['poles'] = np.array(
                    blk[53][i].real_pole) + 1j * np.array(
                        blk[53][i].imaginary_pole)
                paz[sta][channel]['zeros'] = np.array(
                    blk[53][i].real_zero) + 1j * np.array(
                        blk[53][i].imaginary_zero)
                paz[sta][channel]['gain'] = blk[53][i].A0_normalization_factor
                paz[sta][channel]['sensitivity'] = blk[58][(i + 1) * mult -
                                                           1].sensitivity_gain

    return paz
Example #7
0
 def dSEED_XML(self, metadata_path=None):
     """
     Function that imports a given dataless SEED format file from
     its either absolute or relative file path, to a station XML
     file format. This XML location and file name will be x.dataless
     convereted into x.XML and the locations of both files will be the
     same!
     """
     if metadata_path is None: 
         metadata_path = self.metadata_path
             
     dataless_basename =  os.path.basename(metadata_path)
     xml_name = os.path.splitext(dataless_basename)[0]
     xml_path = '{}.xml'.format(xml_name)
     sp = Parser(metadata_path)
     sp.writeXSEED(xml_path) 
Example #8
0
    def dSEED_XML(self, metadata_path=None):
        """
        Function that imports a given dataless SEED format file from
        its either absolute or relative file path, to a station XML
        file format. This XML location and file name will be x.dataless
        convereted into x.XML and the locations of both files will be the
        same!
        """
        if metadata_path is None:
            metadata_path = self.metadata_path

        dataless_basename = os.path.basename(metadata_path)
        xml_name = os.path.splitext(dataless_basename)[0]
        xml_path = '{}.xml'.format(xml_name)
        sp = Parser(metadata_path)
        sp.writeXSEED(xml_path)
Example #9
0
    def import_dataless(self):
        """
        Import station XML from XML file absolute or relative path. The 
        function then returns two outputs to self.inventory and self.XML
        """
        metadata_path = self.metadata_path

        return Parser(metadata_path)
 def format(request, data, res_name):
     """
     """
     try:
         p = Parser()
         p.read(data)
         result = p.getSEED()
     except:
         return data
     # set file name
     request.setHeader('Content-Disposition',
                       'attachment; filename=%s.dataless' \
                       % res_name)
     # set content type
     request.setHeader('content-type',
                       'application/octet-stream')
     return result
Example #11
0
 def test_channel_in_parser(self):
     """
     Tests if a given channel is part of a Parser object.
     """
     starttime = UTCDateTime(2007, 2, 12, 10, 30, 28, 197700)
     endtime = UTCDateTime(2007, 2, 12, 11, 35, 28, 197700)
     channel_id = "ES.ECAL..HHE"
     # An empty file should of course not contain much.
     parser_object = Parser(
         os.path.join(self.data_dir, "channelless_datalessSEED"))
     self.assertFalse(
         utils.channel_in_parser(parser_object, channel_id, starttime,
                                 endtime))
     # Now read a file that actually contains data.
     channel_id = "IU.PAB.00.BHE"
     starttime = UTCDateTime(1999, 2, 18, 10, 0)
     endtime = UTCDateTime(2009, 8, 13, 19, 0)
     parser_object = Parser(os.path.join(self.data_dir, "dataless.IU_PAB"))
     # This is an exact fit of the start and end times in this file.
     self.assertTrue(
         utils.channel_in_parser(parser_object, channel_id, starttime,
                                 endtime))
     # Now try some others that do not fit.
     self.assertFalse(
         utils.channel_in_parser(parser_object, channel_id, starttime - 1,
                                 endtime))
     self.assertFalse(
         utils.channel_in_parser(parser_object, channel_id, starttime,
                                 endtime + 1))
     self.assertFalse(
         utils.channel_in_parser(parser_object, channel_id + "x", starttime,
                                 endtime))
     self.assertFalse(
         utils.channel_in_parser(parser_object, channel_id, starttime - 200,
                                 starttime - 100))
     self.assertFalse(
         utils.channel_in_parser(parser_object, channel_id, endtime + 100,
                                 endtime + 200))
     # And some that do fit.
     self.assertTrue(
         utils.channel_in_parser(parser_object, channel_id, starttime,
                                 starttime + 10))
     self.assertTrue(
         utils.channel_in_parser(parser_object, channel_id, endtime - 100,
                                 endtime))
Example #12
0
    def _extract_index_values_seed(self, filename):
        """
        Reads SEED files and extracts some keys per channel.
        """
        try:
            p = Parser(filename)
        except:
            msg = "Could not read SEED file '%s'." % filename
            raise ValueError(msg)
        channels = p.getInventory()["channels"]

        channels = [[
            _i["channel_id"],
            int(_i["start_date"].timestamp),
            int(_i["end_date"].timestamp) if _i["end_date"] else None,
            _i["latitude"], _i["longitude"], _i["elevation_in_m"],
            _i["local_depth_in_m"]
        ] for _i in channels]

        return channels
Example #13
0
    def locs_from_dataless(self, metadata_path=None):
        """
        Function that returns a numpy (2,N) shaped array of the longitude
        latitude coordinates (in degree, decimal) from a dataless SEED file. 
        """
        if metadata_path is None:
            metadata_path = self.metadata_path

        sp = Parser(metadata_path)

        metadata = Parser.getInventory(sp)

        lats = np.asarray([float(i['latitude']) for i in metadata['channels']])

        lons = np.asarray(
            [float(i['longitude']) for i in metadata['channels']])

        elev = np.asarray(
            [float(i['elevation_in_m']) for i in metadata['channels']])

        return np.column_stack((lons, lats, elev))
def _read_SEED(string_io):
    """
    Attempt to read the file as a SEED file. If it not a valid SEED file,
    it will return False.
    """
    try:
        parser = Parser(string_io)
    except:
        return False
    if len(str(parser)) == 0:
        return False
    channels = parser.getInventory()["channels"]

    for channel in channels:
        channel_id = channel.pop("channel_id")
        del channel["sampling_rate"]
        net, sta, loc, cha = channel_id.split(".")
        channel["network"] = net
        channel["station"] = sta
        channel["location"] = loc
        channel["channel"] = cha
        if not channel["end_date"]:
            time = channel["start_date"] + 2 * 86400
        else:
            time = channel["start_date"] + 0.5 * (channel["end_date"] -
                channel["start_date"])
        try:
            location = parser.getCoordinates(channel_id, time)
            channel["latitude"] = location["latitude"]
            channel["longitude"] = location["longitude"]
            channel["elevation"] = location["elevation"]
            channel["local_depth"] = location["local_depth"]
        except:
            msg = ("Cannot retrieve location for channel. This happens when "
                "overlapping time periods are defined in the SEED file. "
                "Please fix the file. Contents of the file:\n")
            msg += str(parser)
            raise InvalidObjectError(msg)
        channel["format"] = parser._format
    return channels
Example #15
0
    def stats_from_dataless(self, metadata_path=None):
        """
        Function that returns a (1,N) shaped array of the station names
        from a dataless SEED file. 
        """
        if metadata_path is None: 
            metadata_path = self.metadata_path
            
        sp = Parser(metadata_path)

        metadata = Parser.getInventory(sp)
        stats = np.asarray([stat['station_id'] for 
                           stat in metadata['stations']])
        return stats
 def format(request, data, res_name):
     """
     """
     channel = str(request.args0.get('channel', '')).upper()
     try:
         p = Parser()
         p.read(data)
         resp_list = p.getRESP()
         # Create a ZIP archive.
         zip_fh = StringIO()
         if channel == '':
             zip_file = zipfile.ZipFile(zip_fh, "w")
             for response in resp_list:
                 response[1].seek(0, 0)
                 zip_file.writestr(response[0], response[1].read())
             zip_file.close()
             zip_fh.seek(0)
             data = zip_fh.read()
             res_name += os.extsep + "zip"
         else:
             for response in resp_list:
                 if response[0][-3:] != channel:
                     continue
                 response[1].seek(0, 0)
                 data = response[1].read()
                 res_name = response[0]
                 break
     except:
         return data
     if channel == '':
         # set content type
         request.setHeader('content-type', 'application/zip')
     # set file name
     request.setHeader('Content-Disposition',
                      'attachment; filename=%s' % res_name)
     return data
Example #17
0
 def _parse_seed(self, station_item, all_stations):
     """
     Helper function to parse SEED and XSEED files.
     """
     parser = Parser(station_item)
     for station in parser.stations:
         network_code = None
         station_code = None
         latitude = None
         longitude = None
         elevation = None
         local_depth = None
         for blockette in station:
             if blockette.id not in [50, 52]:
                 continue
             elif blockette.id == 50:
                 network_code = str(blockette.network_code)
                 station_code = str(blockette.station_call_letters)
                 continue
             elif blockette.id == 52:
                 latitude = blockette.latitude
                 longitude = blockette.longitude
                 elevation = blockette.elevation
                 local_depth = blockette.local_depth
                 break
         if None in [
                 network_code, station_code, latitude, longitude, elevation,
                 local_depth
         ]:
             msg = "Could not parse %s" % station_item
             raise ValueError(msg)
         stat = {
             "id": "%s.%s" % (network_code, station_code),
             "latitude": latitude,
             "longitude": longitude,
             "elevation_in_m": elevation,
             "local_depth_in_m": local_depth
         }
         if stat["id"] in all_stations:
             all_stations[stat["id"]].update(stat)
         else:
             all_stations[stat["id"]] = stat
Example #18
0
def getStationDataless(netsta):
	#the function that returns the dataless for a given station
	net = netsta[:2].upper()
	sta = netsta[2:].upper()
	netsta = '_'.join([net,sta])
	if os.path.exists(staDatalessPath + 'DATALESS.' + netsta + '.seed'):
		station = []
		parsedDataless = aslParser(staDatalessPath + 'DATALESS.' + netsta + '.seed')
		for blockette in parsedDataless.stations:
			station.extend(blockette)
		return station
	else:
		parsedDataless = Parser(netDatalessPath + net + '.dataless')
		if len(netsta) > 2:
			sta = netsta[2:].upper()
			for station in parsedDataless.stations:
				for blockette in station:
					if blockette.id == 50:
						if blockette.station_call_letters == sta:
							return station
Example #19
0
    def locs_from_dataless(self, metadata_path=None):
        """
        Function that returns a numpy (2,N) shaped array of the longitude
        latitude coordinates (in degree, decimal) from a dataless SEED file. 
        """
        if metadata_path is None: 
            metadata_path = self.metadata_path
            
        sp = Parser(metadata_path)

        metadata = Parser.getInventory(sp)

        lats = np.asarray([float(i['latitude']) for i 
                           in metadata['channels']])
                           
        lons = np.asarray([float(i['longitude']) for i 
                           in metadata['channels']])

        elev = np.asarray([float(i['elevation_in_m']) for i 
                           in metadata['channels']])
        
        return np.column_stack((lons, lats, elev))
Example #20
0
 def load_dataless(self, parser_data):
    ''' User input: Locate and saves the directory of the dataless file into the
    parser_data dictionary.
    
    Saves the parser data using the parser method from obspy, 
    into parser_data dictionary'''
    
    dir_dl=askopenfilenames()
    
    key_name="" #key name for the dictionary data
    for dir in dir_dl:
       for c in dir:
          if c != '.':
             key_name+=c
          else:
             break
             
       parser=Parser(dir)
  
       parser_data[key_name]=parser
    
    return parser_data
    '''try:      
Example #21
0
from obspy.core import read
from obspy.xseed import Parser
from obspy.signal import PPSD

st = read("http://examples.obspy.org/BW.KW1..EHZ.D.2011.037")
tr = st.select(id="BW.KW1..EHZ")[0]
parser = Parser("http://examples.obspy.org/dataless.seed.BW_KW1")
paz = parser.getPAZ(tr.id)
ppsd = PPSD(tr.stats, paz)
ppsd.add(st)

st = read("http://examples.obspy.org/BW.KW1..EHZ.D.2011.038")
ppsd.add(st)

ppsd.plot()
Example #22
0
                                           os.path.basename(seedfile))
     print msg,
 # fetch original SEED file
 fp = open(file, 'r')
 org_seed = fp.read()
 fp.close()
 # set compact date flag
 compact = False
 if os.path.basename(file) in compact_date_files:
     compact = True
 # start parsing
 try:
     print "rS",
     sys.stdout.flush()
     # parse SEED
     sp = Parser(org_seed)
     print "wS",
     sys.stdout.flush()
     # write SEED to compare to original SEED.
     f1 = open(seedfile, 'w')
     seed = sp.getSEED(compact=compact)
     f1.write(seed)
     f1.close()
     print "cS",
     sys.stdout.flush()
     # Compare to original SEED.
     utils.compareSEED(org_seed, seed)
     print "wX",
     sys.stdout.flush()
     # generate XSEED versions 1.0 and 1.1
     f1 = open(xseedfile_10, 'w')
Example #23
0
from obspy.signal.psd import PPSD
from cpsd import PPSD
from obspy.core import *
from obspy.xseed import Parser

st = read("BW.KW1..EHZ.D.2011.090")
tr = st[0]
p = Parser("dataless.seed.BW_KW1")
paz = p.getPAZ("BW.KW1..EHZ")
ppsd = PPSD(tr.stats, paz)
#ppsd.add(tr)
#ppsd.add(st[1])
#ppsd.plot()
#ppsd.save("/tmp/ppsd")
Example #24
0
            respfiles = glob.glob(os.path.join(args.inputFolder, '*.resp'))
            if not len(seedfiles):
                if not len(respfiles):
                    print 'A dataless SEED file (ending in .seed) or a RESP file (ending in .resp) must be supplied with input SAC files. Exiting.'
                    sys.exit(1)
                else:
                    seedresp = {
                        'filename': respfiles[0],  # RESP filename
                        # when using Trace/Stream.simulate() the "date" parameter can
                        # also be omitted, and the starttime of the trace is then used.
                        'date': obspy.UTCDateTime(etime),
                        # Units to return response in ('DIS', 'VEL' or ACC)
                        'units': 'ACC'
                    }
            else:
                parser = Parser(seedfiles[0])
        elif args.source == 'unam':
            tdatafiles = glob.glob(os.path.join(args.inputFolder,
                                                '*'))  #grab everything
            datafiles = []
            for dfile in tdatafiles:
                fname, fext = os.path.splitext(dfile)
                if re.match('\d', fext[1:]) is not None:
                    datafiles.append(dfile)
        else:
            print 'Data source %s not supported.' % args.source
            sys.exit(1)

    traces = []
    for dfile in datafiles:
        if args.source == 'knet':
Example #25
0
 # skip directories
 if not os.path.isfile(file):
     continue
 # create folder from filename
 seedfile = os.path.basename(file)
 resp_path = os.path.join(path, seedfile)
 # skip existing directories
 if os.path.isdir(resp_path):
     print "Skipping", os.path.join(relpath, seedfile)
     continue
 else:
     os.mkdir(resp_path)
     print "Parsing %s\t\t" % os.path.join(relpath, seedfile)
 # Create the RESP file.
 try:
     sp = Parser(file)
     sp.writeRESP(folder=resp_path)
     sp.writeRESP(folder=resp_path, zipped=True)
     # Compare with RESP files generated with rdseed from IRIS if existing
     for resp_file in glob.iglob(resp_path + os.sep + '*'):
         print '  ' + os.path.basename(resp_file)
         org_resp_file = resp_file.replace('output' + os.sep,
                                           'data' + os.sep)
         if os.path.exists(org_resp_file):
             _compareRESPFiles(org_resp_file, resp_file)
 except Exception, e:
     # remove all related files
     if os.path.isdir(resp_path):
         for f in glob.glob(os.path.join(resp_path, '*')):
             os.remove(f)
         os.removedirs(resp_path)
Example #26
0
from obspy.core import read
from obspy.core.util.geodetics import gps2DistAzimuth
from obspy.xseed import Parser
from math import log10

st = read("../data/LKBD.MSEED")

paz_wa = {
    'sensitivity': 2800,
    'zeros': [0j],
    'gain': 1,
    'poles': [-6.2832 - 4.7124j, -6.2832 + 4.7124j]
}

parser = Parser("../data/LKBD.dataless")
paz_le3d5s = parser.getPAZ("CH.LKBD..EHZ")

st.simulate(paz_remove=paz_le3d5s, paz_simulate=paz_wa, water_level=10)

t = UTCDateTime("2012-04-03T02:45:03")
st.trim(t, t + 50)

tr_n = st.select(component="N")[0]
ampl_n = max(abs(tr_n.data))
tr_e = st.select(component="E")[0]
ampl_e = max(abs(tr_e.data))
ampl = max(ampl_n, ampl_e)

sta_lat = 46.38703
sta_lon = 7.62714
event_lat = 46.218
Example #27
0
minlatitude = -45.0
minlongitude = 110.0
maxlatitude = 0.0
maxlongitude = 160.0

plt.figure(1)
colours = ["black", "blue", "green", "yellow", "purple", 
           "orange", "white", "red", "brown"]

dataless_files = glob.glob('/home/boland/Dropbox/University/UniMelb/AGOS/PROGRAMS/dataless/*.dataless')

for index, files in enumerate(dataless_files):
    
    network = os.path.basename(files).split('.')[0]
    sp = Parser(files)
        
    info = sp.getInventory()
        
    coordinates = [(i['longitude'], i['latitude'], i['channel_id']) 
    for i in info['channels'][:] if i['channel_id'][-3:] == "BHZ"]

                              
#dataless_inventories = []
#if USE_DATALESSPAZ:
#    with warnings.catch_warnings():
#        warnings.simplefilter('ignore')
#        dataless_inventories = psstation.get_dataless_inventories(DATALESS_DIR,
#                                                                  verbose=True)
        
#        info = dataless_inventories.getInventory()
Example #28
0
#!/usr/bin/env python
# test_dataless.py
#
# Checks whether it is possible to read poles and zeroes from a dataless file
# Prints the erorrs to stdout
#
# 2012-02-15 - Claudio Satriano <*****@*****.**>
from obspy.xseed import Parser
from datetime import timedelta
from glob import glob

for dlessfile in glob('dataless.*'):
    try:
        sp = Parser(dlessfile)
    except IOError:
        print 'Error reading file:', dlessfile
        continue

    blk = sp.blockettes
    net = blk[50][0].network_code
    sta = blk[50][0].station_call_letters

    startdates = [b.start_date for b in blk[52]]
    #enddates = [b.end_date for b in blk[52]]
    chans = [b.channel_identifier for b in blk[52]]
    locs = [b.location_identifier for b in blk[52]]

    for i in range(0, len(startdates)):
        channel_id = net + '.' + sta + '.' + locs[i] + '.' + chans[i]
        # If we do not add at least 7 minutes to the start_time, ObsPy says:
        # "None or more than one channel with the given description"
Example #29
0
#Get station metadata in individual files
stations = [
    'RT01', 'RT02', 'RT03', 'RT05', 'RT06', 'RT07', 'RT08', 'RT09', 'RT10',
    'RT11', 'RT12', 'RT12', 'RT13', 'RT14', 'RT15', 'RT16', 'RT17', 'RT18',
    'RT19', 'RT20', 'RT21', 'NS01', 'NS02', 'NS03', 'NS04', 'NS05', 'NS06',
    'NS07', 'NS08', 'NS09', 'NS10', 'NS11', 'NS12', 'NS13', 'NS14', 'NS15',
    'NS16', 'NS18', 'WPRZ', 'HRRZ', 'PRRZ', 'ALRZ', 'ARAZ', 'THQ2', 'RT23',
    'RT22'
]
new_stas = ['RT23', 'RT22', 'NS15', 'NS16', 'NS18']
for station in new_stas:
    try:
        sta_inv = client.get_stations(station=station, level="response")
    except FDSNException:
        print('No StationXML available')
    sta_inv.write('/home/chet/data/GeoNet_catalog/stations/station_xml/' +
                  station + '_STATIONXML.xml',
                  format='STATIONXML')
"""
Intermediate step to use stationxml-converter java app (IRIS)
Perhaps can be done from this script?
"""

dataless_files = glob('/home/chet/data/GeoNet_catalog/stations/*.dataless')
for file1 in dataless_files:
    #Read dataless to obspy, then write to XSEED
    sp = Parser(file1)
    sp.writeXSEED('/home/chet/data/GeoNet_catalog/stations/' +
                  str(file1[-13:-9]) + '_xseed.xml')
    plt.ylabel("Source Radius [m]", fontsize="x-large")
    plt.grid()
    plt.savefig("/Users/lion/Desktop/SourceRadius.pdf")


if __name__ == "__main__":
    # Read all instrument responses.
    widgets = ['Parsing instrument responses...', progressbar.Percentage(),
        ' ', progressbar.Bar()]
    pbar = progressbar.ProgressBar(widgets=widgets,
        maxval=len(STATION_FILES)).start()
    parsers = {}
    # Read all waveform files.
    for _i, xseed in enumerate(STATION_FILES):
        pbar.update(_i)
        parser = Parser(xseed)
        channels = [c['channel_id'] for c in parser.getInventory()['channels']]
        parsers_ = dict.fromkeys(channels, parser)
        if any([k in parsers for k in parsers_.keys()]):
            msg = "Channel(s) defined in more than one metadata file."
            warnings.warn(msg)
        parsers.update(parsers_)
    pbar.finish()

    # Parse all waveform files.
    widgets = ['Indexing waveform files...     ', progressbar.Percentage(),
        ' ', progressbar.Bar()]
    pbar = progressbar.ProgressBar(widgets=widgets,
        maxval=len(WAVEFORM_FILES)).start()
    waveform_index = {}
    # Read all waveform files.
Example #31
0
MIN_STATIONS = 3  # minimum of coincident stations for alert
SUMMARY = "/scratch/uh_trigger.txt"

mseed_files = []
parsers = []
for station in STATIONS:
    # waveforms
    dir = os.path.join(BASEDIR, str(TIME.year), NET, station, CHANNEL)
    # XXX maybe read the day before/after to make sure we dont miss data around
    # 00:00
    files = glob.glob("%s*/*.%s" % (dir, TIME.julday))
    mseed_files.extend(files)
    # metadata
    files = glob.glob("%s/dataless*%s" % (BASEDIR_DATALESS, station))
    for file in files:
        parsers.append(Parser(file))

if not mseed_files:
    pass  # XXX print/mail warning

inst = cornFreq2Paz(1.0)
nfft = 4194304  # next nfft of 5h
last_endtime = 0
last_id = "--"

trigger_list = []
summary = []
summary.append("#" * 79)
for file in mseed_files:
    summary.append(file)
    try:
Example #32
0
from os import path, makedirs
from obspy.core import utcdatetime, event
from obspy.core.event import Catalog, Event, Magnitude, Origin, StationMagnitude
import sys
from obspy.neic.client import Client
#from obspy.clients.neic.client import Client
from obspy.xseed import Parser
#from obspy.io.xseed import Parser
from obspy.xseed.utils import SEEDParserException
#from obspy.io.xseed.utils import SEEDParserException
from multiprocessing import Pool, Process, Queue, cpu_count
sys.path.append('../../')
from catalogue.parsers import parse_ggcat

# we will use dataless seed from IRIS to get station information
parser = Parser("../../data/AU.seed")


def sind(x):
    return np.sin(x / 180. * np.pi)


def cosd(x):
    return np.cos(x / 180. * np.pi)


def tand(x):
    return np.tan(x / 180. * np.pi)


def arcsind(x):
from obspy.core import read
from obspy.xseed import Parser
from obspy.signal import seisSim, cosTaper, highpass
from matplotlib.mlab import detrend
import matplotlib.pyplot as plt
import sys

try:
    file = sys.argv[1]
except:
    print __doc__
    raise

# parse DataLess part
sp = Parser(file)

# parse DataOnly/MiniSEED part
stream = read(file)

for tr in stream:
    # get poles, zeros, sensitivity and gain
    paz = sp.getPAZ(tr.stats.channel)
    # Uncomment the following for:
    # Integrate by adding a zero at the position zero
    # As for the simulation the poles and zeros are inverted and convolved
    # in the frequency domain this is basically mutliplying by 1/jw which
    # is an integration in the frequency domain
    # See "Of Poles and Zeros", Frank Scherbaum, Springer 2007
    #paz['zeros'].append(0j)
    # preprocessing
        # SEED Handling.
        if seed_file in faulty_seed_files:
            continue

        #from obspy import UTCDateTime
        #if channel.start_date != UTCDateTime(1999, 12, 28, 22, 24, 39):
        #continue

        print chan + ": ",

        unit_known_to_evalresp = True

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            try:
                p = Parser(seed_file)
            except:
                faulty_seed_files.append(seed_file)
                counter["random_error"] += 1
                print_warning("Failed to read SEED file!")
                continue

        try:
            all_resps = p.getRESP()
        except:
            counter["random_error"] += 1
            print_warning("getRESP() failed. Very likely a faulty SEED file")
            continue
        resp_string = "RESP.%s.%s.%s.%s" % (net_id, stat_id, loc_id, chan_id)
        all_resps = [_i for _i in all_resps if _i[0] == resp_string]
Example #35
0
def preprocessing_function(processing_info, iteration):  # NOQA
    """
    Function to perform the actual preprocessing for one individual seismogram.
    This is part of the project so it can change depending on the project.

    Please keep in mind that you will have to manually update this file to a
    new version if LASIF is ever updated.

    You can do whatever you want in this function as long as the function
    signature is honored. The file is read from ``"input_filename"`` and
    written to ``"output_filename"``.

    One goal of this function is to make sure that the data is available at the
    same time steps as the synthetics. The first time sample of the synthetics
    will always be the origin time of the event.

    Furthermore the data has to be converted to m/s.

    :param processing_info: A dictionary containing information about the
        file to be processed. It will have the following structure.
    :type processing_info: dict

    .. code-block:: python

        {'event_information': {
            'depth_in_km': 22.0,
            'event_name': 'GCMT_event_VANCOUVER_ISLAND...',
            'filename': '/.../GCMT_event_VANCOUVER_ISLAND....xml',
            'latitude': 49.53,
            'longitude': -126.89,
            'm_pp': 2.22e+18,
            'm_rp': -2.78e+18,
            'm_rr': -6.15e+17,
            'm_rt': 1.98e+17,
            'm_tp': 5.14e+18,
            'm_tt': -1.61e+18,
            'magnitude': 6.5,
            'magnitude_type': 'Mwc',
            'origin_time': UTCDateTime(2011, 9, 9, 19, 41, 34, 200000),
            'region': u'VANCOUVER ISLAND, CANADA REGION'},
         'input_filename': u'/.../raw/7D.FN01A..HHZ.mseed',
         'output_filename': u'/.../processed_.../7D.FN01A..HHZ.mseed',
         'process_params': {
            'dt': 0.75,
            'highpass': 0.007142857142857143,
            'lowpass': 0.0125,
            'npts': 2000},
         'station_coordinates': {
            'elevation_in_m': -54.0,
            'latitude': 46.882,
            'local_depth_in_m': None,
            'longitude': -124.3337},
         'station_filename': u'/.../STATIONS/RESP/RESP.7D.FN01A..HH*'}

    Please note that you also got the iteration object here, so if you
    want some parameters to change depending on the iteration, just use
    if/else on the iteration objects.

    >>> iteration.name  # doctest: +SKIP
    '11'
    >>> iteration.get_process_params()  # doctest: +SKIP
    {'dt': 0.75,
     'highpass': 0.01,
     'lowpass': 0.02,
     'npts': 500}

    Use ``$ lasif shell`` to play around and figure out what the iteration
    objects can do.

    """
    def zerophase_chebychev_lowpass_filter(trace, freqmax):
        """
        Custom Chebychev type two zerophase lowpass filter useful for
        decimation filtering.

        This filter is stable up to a reduction in frequency with a factor of
        10. If more reduction is desired, simply decimate in steps.

        Partly based on a filter in ObsPy.

        :param trace: The trace to be filtered.
        :param freqmax: The desired lowpass frequency.

        Will be replaced once ObsPy has a proper decimation filter.
        """
        # rp - maximum ripple of passband, rs - attenuation of stopband
        rp, rs, order = 1, 96, 1e99
        ws = freqmax / (trace.stats.sampling_rate * 0.5)  # stop band frequency
        wp = ws  # pass band frequency

        while True:
            if order <= 12:
                break
            wp *= 0.99
            order, wn = signal.cheb2ord(wp, ws, rp, rs, analog=0)

        b, a = signal.cheby2(order, rs, wn, btype="low", analog=0, output="ba")

        # Apply twice to get rid of the phase distortion.
        trace.data = signal.filtfilt(b, a, trace.data)

    # =========================================================================
    # Read seismograms and gather basic information.
    # =========================================================================
    starttime = processing_info["event_information"]["origin_time"]
    endtime = starttime + processing_info["process_params"]["dt"] * \
        (processing_info["process_params"]["npts"] - 1)
    duration = endtime - starttime

    st = obspy.read(processing_info["input_filename"])

    if len(st) != 1:
        warnings.warn("The file '%s' has %i traces and not 1. "
                      "Skip all but the first" % (
                          processing_info["input_filename"], len(st)))
    tr = st[0]

    # Trim with a short buffer in an attempt to avoid boundary effects.
    # starttime is the origin time of the event
    # endtime is the origin time plus the length of the synthetics
    tr.trim(starttime - 0.2 * duration, endtime + 0.2 * duration)

    # =========================================================================
    # Some basic checks on the data.
    # =========================================================================
    # Non-zero length
    if not len(tr):
        msg = "No data found in time window around the event. File skipped."
        raise LASIFError(msg)

    # No nans or infinity values allowed.
    if not np.isfinite(tr.data).all():
        msg = "Data contains NaNs or Infs. File skipped"
        raise LASIFError(msg)

    # =========================================================================
    # Step 1: Decimation
    # Decimate with the factor closest to the sampling rate of the synthetics.
    # The data is still oversampled by a large amount so there should be no
    # problems. This has to be done here so that the instrument correction is
    # reasonably fast even for input data with a large sampling rate.
    # =========================================================================
    while True:
        decimation_factor = int(processing_info["process_params"]["dt"] /
                                tr.stats.delta)
        # Decimate in steps for large sample rate reductions.
        if decimation_factor > 8:
            decimation_factor = 8
        if decimation_factor > 1:
            new_nyquist = tr.stats.sampling_rate / 2.0 / float(
                decimation_factor)
            zerophase_chebychev_lowpass_filter(tr, new_nyquist)
            tr.decimate(factor=decimation_factor, no_filter=True)
        else:
            break

    # =========================================================================
    # Step 2: Detrend and taper.
    # =========================================================================
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(max_percentage=0.05, type="hann")

    # =========================================================================
    # Step 3: Instrument correction
    # Correct seismograms to velocity in m/s.
    # =========================================================================
    station_file = processing_info["station_filename"]

    # check if the station file actually exists ==============================
    if not processing_info["station_filename"]:
        msg = "No station file found for the relevant time span. File skipped"
        raise LASIFError(msg)

    # This is really necessary as other filters are just not sharp enough
    # and lots of energy from other frequency bands leaks into the frequency
    # band of interest
    freqmin = processing_info["process_params"]["highpass"]
    freqmax = processing_info["process_params"]["lowpass"]

    f2 = 0.9 * freqmin
    f3 = 1.1 * freqmax
    f1 = 0.8 * f2
    f4 = 1.3 * f3
    pre_filt = (f1, f2, f3, f4)

    # processing for seed files ==============================================
    if "/SEED/" in station_file:
        # XXX: Check if this is m/s. In all cases encountered so far it
        # always is, but SEED is in theory also able to specify corrections
        # to other units...
        parser = Parser(station_file)
        try:
            # The simulate might fail but might still modify the data. The
            # backup is needed for the backup plan to only correct using
            # poles and zeros.
            backup_tr = tr.copy()
            try:
                tr.simulate(seedresp={"filename": parser, "units": "VEL",
                                      "date": tr.stats.starttime},
                            pre_filt=pre_filt, zero_mean=False, taper=False)
            except ValueError:
                warnings.warn("Evalresp failed, will only use the Poles and "
                              "Zeros stage")
                tr = backup_tr
                paz = parser.getPAZ(tr.id, tr.stats.starttime)
                if paz["sensitivity"] == 0:
                    warnings.warn("Sensitivity is 0 in SEED file and will "
                                  "not be taken into account!")
                    tr.simulate(paz_remove=paz, remove_sensitivity=False,
                                pre_filt=pre_filt, zero_mean=False,
                                taper=False)
                else:
                    tr.simulate(paz_remove=paz, pre_filt=pre_filt,
                                zero_mean=False, taper=False)
        except Exception:
            msg = ("File  could not be corrected with the help of the "
                   "SEED file '%s'. Will be skipped.") \
                % processing_info["station_filename"]
            raise LASIFError(msg)
    # processing with RESP files =============================================
    elif "/RESP/" in station_file:
        try:
            tr.simulate(seedresp={"filename": station_file, "units": "VEL",
                                  "date": tr.stats.starttime},
                        pre_file=pre_filt, zero_mean=False, taper=False)
        except ValueError as e:
            msg = ("File  could not be corrected with the help of the "
                   "RESP file '%s'. Will be skipped. Due to: %s") \
                % (processing_info["station_filename"], str(e))
            raise LASIFError(msg)
    elif "/StationXML/" in station_file:
        try:
            inv = obspy.read_inventory(station_file, format="stationxml")
        except Exception as e:
            msg = ("Could not open StationXML file '%s'. Due to: %s. Will be "
                   "skipped." % (station_file, str(e)))
            raise LASIFError(msg)
        tr.attach_response(inv)
        try:
            tr.remove_response(output="VEL", pre_filt=pre_filt,
                               zero_mean=False, taper=False)
        except Exception as e:
            msg = ("File  could not be corrected with the help of the "
                   "StationXML file '%s'. Due to: '%s'  Will be skipped.") \
                % (processing_info["station_filename"], e.__repr__()),
            raise LASIFError(msg)
    else:
        raise NotImplementedError

    # =========================================================================
    # Step 4: Bandpass filtering
    # This has to be exactly the same filter as in the source time function
    # in the case of SES3D.
    # =========================================================================
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(0.05, type="cosine")
    tr.filter("bandpass", freqmin=freqmin, freqmax=freqmax, corners=3,
              zerophase=False)
    tr.detrend("linear")
    tr.detrend("demean")
    tr.taper(0.05, type="cosine")
    tr.filter("bandpass", freqmin=freqmin, freqmax=freqmax, corners=3,
              zerophase=False)

    # =========================================================================
    # Step 5: Interpolation
    # =========================================================================
    # Make sure that the data array is at least as long as the
    # synthetics array. Also add some buffer sample for the
    # spline interpolation to work in any case.
    buf = processing_info["process_params"]["dt"] * 5
    if starttime < (tr.stats.starttime + buf):
        tr.trim(starttime=starttime - buf, pad=True, fill_value=0.0)
    if endtime > (tr.stats.endtime - buf):
        tr.trim(endtime=endtime + buf, pad=True, fill_value=0.0)
    tr.interpolate(
        sampling_rate=1.0 / processing_info["process_params"]["dt"],
        method="weighted_average_slopes", starttime=starttime,
        npts=processing_info["process_params"]["npts"])

    # =========================================================================
    # Save processed data and clean up.
    # =========================================================================
    # Convert to single precision to save some space.
    tr.data = np.require(tr.data, dtype="float32", requirements="C")
    if hasattr(tr.stats, "mseed"):
        tr.stats.mseed.encoding = "FLOAT32"

    tr.write(processing_info["output_filename"], format=tr.stats._format)
Example #36
0
maxlongitude = 160.0

plt.figure(1)
colours = [
    "black", "blue", "green", "yellow", "purple", "orange", "white", "red",
    "brown"
]

dataless_files = glob.glob(
    '/home/boland/Dropbox/University/UniMelb/AGOS/PROGRAMS/dataless/*.dataless'
)

for index, files in enumerate(dataless_files):

    network = os.path.basename(files).split('.')[0]
    sp = Parser(files)

    info = sp.getInventory()

    coordinates = [(i['longitude'], i['latitude'], i['channel_id'])
                   for i in info['channels'][:]
                   if i['channel_id'][-3:] == "BHZ"]

    #dataless_inventories = []
    #if USE_DATALESSPAZ:
    #    with warnings.catch_warnings():
    #        warnings.simplefilter('ignore')
    #        dataless_inventories = psstation.get_dataless_inventories(DATALESS_DIR,
    #                                                                  verbose=True)

    #        info = dataless_inventories.getInventory()
Example #37
0
    def add_stations(self, stations):
        """
        Add the desired output stations to the input file generator.

        Can currently deal with SEED/XML-SEED files and dictionaries of the
        following form:

            {"latitude": 123.4,
             "longitude": 123.4,
             "elevation_in_m": 123.4,
             "local_depth_in_m": 123.4,
             "id": "network_code.station_code"}

        `local_depth_in_m` is optional and will be assumed to be zero if not
        present. It denotes the burrial of the sensor beneath the surface.

        If it is a SEED/XML-SEED files, all stations in it will be added.

        :type stations: List of filenames, list of dictionaries or a single
            filename, single dictionary.
        :param stations: The stations for which output files should be
            generated.
        """
        # Try to interpret it as json. If it works and results in a list or
        # dicionary, use it!
        try:
            json_s = json.loads(stations)
        except:
            pass
        else:
            # A simple string is also a valid JSON document.
            if isinstance(json_s, list) or isinstance(json_s, dict):
                stations = json_s

        # Thin wrapper to enable single element treatment.
        if isinstance(stations, dict) or not hasattr(stations, "__iter__") or \
                (hasattr(stations, "read") and
                 hasattr(stations.read, "__call__")):
            stations = [
                stations,
            ]

        all_stations = {}

        for station_item in stations:
            # Store the original pointer position to be able to restore it.
            original_position = None
            try:
                original_position = station_item.tell()
                station_item.seek(original_position, 0)
            except:
                pass

            # Download it if it is some kind of URL.
            if isinstance(station_item, basestring) and "://" in station_item:
                station_item = io.BytesIO(urllib2.urlopen(station_item).read())

            # If it is a dict do some checks and add it.
            if isinstance(station_item, dict):
                if "latitude" not in station_item or \
                        "longitude" not in station_item or \
                        "elevation_in_m" not in station_item or \
                        "id" not in station_item:
                    msg = (
                        "Each station dictionary needs to at least have "
                        "'latitude', 'longitude', 'elevation_in_m', and 'id' "
                        "keys.")
                    raise ValueError(msg)
                # Create new dict to not carry around any additional keys.
                stat = {
                    "latitude": float(station_item["latitude"]),
                    "longitude": float(station_item["longitude"]),
                    "elevation_in_m": float(station_item["elevation_in_m"]),
                    "id": str(station_item["id"])
                }
                try:
                    stat["local_depth_in_m"] = \
                        float(station_item["local_depth_in_m"])
                except:
                    pass
                all_stations[stat["id"]] = stat
                continue

            # Also accepts SAC files.
            if isSAC(station_item):
                st = read(station_item)
                for tr in st:
                    stat = {}
                    stat["id"] = "%s.%s" % (tr.stats.network, tr.stats.station)
                    stat["latitude"] = float(tr.stats.sac.stla)
                    stat["longitude"] = float(tr.stats.sac.stlo)
                    stat["elevation_in_m"] = float(tr.stats.sac.stel)
                    stat["local_depth_in_m"] = float(tr.stats.sac.stdp)
                    # lat/lng/ele must be given.
                    if stat["latitude"] == -12345.0 or \
                            stat["longitude"] == -12345.0 or \
                            stat["elevation_in_m"] == -12345.0:
                        warnings.warn("No coordinates for channel '%s'." %
                                      str(tr))
                        continue
                    # Local may be neclected.
                    if stat["local_depth_in_m"] == -12345.0:
                        del stat["local_depth_in_m"]
                    all_stations[stat["id"]] = stat
                    continue
                continue

            # Reset pointer.
            if original_position is not None:
                station_item.seek(original_position, 0)

            # SEED / XML-SEED
            try:
                Parser(station_item)
                is_seed = True
            except:
                is_seed = False
            # Reset.
            if original_position is not None:
                station_item.seek(original_position, 0)
            if is_seed is True:
                self._parse_seed(station_item, all_stations)
                continue

            # StationXML
            try:
                stations = extract_coordinates_from_StationXML(station_item)
            except:
                pass
            else:
                for station in stations:
                    all_stations[station["id"]] = station
                continue

            msg = "Could not read %s." % station_item
            raise ValueError(msg)

        self.__add_stations(all_stations.values())
Example #38
0
from obspy import read
from obspy.xseed import Parser

st = read("http://examples.obspy.org/BW.BGLD..EH.D.2010.037")
parser = Parser("http://examples.obspy.org/dataless.seed.BW_BGLD")
st.simulate(seedresp={'filename': parser, 'units': "DIS"})
Example #39
0
         seedfiles = glob.glob(os.path.join(args.inputFolder,'*.seed'))
         respfiles = glob.glob(os.path.join(args.inputFolder,'*.resp'))
         if not len(seedfiles):
             if not len(respfiles):
                 print 'A dataless SEED file (ending in .seed) or a RESP file (ending in .resp) must be supplied with input SAC files. Exiting.'
                 sys.exit(1)
             else:
                 seedresp = {'filename': respfiles[0],  # RESP filename
                 # when using Trace/Stream.simulate() the "date" parameter can
                 # also be omitted, and the starttime of the trace is then used.
                 'date': obspy.UTCDateTime(etime),
                 # Units to return response in ('DIS', 'VEL' or ACC)
                 'units': 'ACC'
                 }
         else:
             parser = Parser(seedfiles[0])
     elif args.source == 'unam':
         tdatafiles = glob.glob(os.path.join(args.inputFolder,'*')) #grab everything
         datafiles = []
         for dfile in tdatafiles:
             fname,fext = os.path.splitext(dfile)
             if re.match('\d',fext[1:]) is not None:
                 datafiles.append(dfile)
     else:
         print 'Data source %s not supported.' % args.source
         sys.exit(1)
     
 
 traces = []
 for dfile in datafiles:
     if args.source == 'knet':
Example #40
0
                                           os.path.basename(seedfile))
     print msg,
 # fetch original SEED file
 fp = open(file, 'r')
 org_seed = fp.read()
 fp.close()
 # set compact date flag
 compact = False
 if os.path.basename(file) in compact_date_files:
     compact = True
 # start parsing
 try:
     print "rS",
     sys.stdout.flush()
     # parse SEED
     sp = Parser(org_seed)
     print "wS",
     sys.stdout.flush()
     # write SEED to compare to original SEED.
     f1 = open(seedfile, 'w')
     seed = sp.getSEED(compact=compact)
     f1.write(seed)
     f1.close()
     print "cS",
     sys.stdout.flush()
     # Compare to original SEED.
     utils.compareSEED(org_seed, seed)
     print "wX",
     sys.stdout.flush()
     # generate XSEED versions 1.0 and 1.1
     f1 = open(xseedfile_10, 'w')
Example #41
0
def preprocess(db, stations, comps, goal_day, params, tramef_Z, tramef_E = np.array([]), tramef_N = np.array([])):
    datafilesZ = {}
    datafilesE = {}
    datafilesN = {}

    for station in stations:
        datafilesZ[station] = []
        datafilesE[station] = []
        datafilesN[station] = []
        net, sta = station.split('.')
        gd = datetime.datetime.strptime(goal_day, '%Y-%m-%d')
        files = get_data_availability(
            db, net=net, sta=sta, starttime=gd, endtime=gd)
        for file in files:
            comp = file.comp
            fullpath = os.path.join(file.path, file.file)
            if comp[-1] == 'Z':
                datafilesZ[station].append(fullpath)
            elif comp[-1] == 'E':
                datafilesE[station].append(fullpath)
            elif comp[-1] == 'N':
                datafilesN[station].append(fullpath)

    j = 0
    for istation, station in enumerate(stations):
        for comp in comps:
            files = eval("datafiles%s['%s']" % (comp, station))
            if len(files) != 0:
                logging.debug("%s.%s Reading %i Files" %
                              (station, comp, len(files)))
                stream = Stream()
                for file in sorted(files):
                    st = read(file, dytpe=np.float,
                              starttime=UTCDateTime(gd),
                              endtime=UTCDateTime(gd)+86400)
                    for tr in st:
                        tr.data = tr.data.astype(np.float)
                    stream += st
                    del st

                logging.debug("Checking sample alignment")
                for i, trace in enumerate(stream):
                    stream[i] = check_and_phase_shift(trace)

                stream.sort()
                logging.debug("Checking Gaps")
                if len(getGaps(stream)) > 0:
                    max_gap = 10
                    only_too_long=False
                    while getGaps(stream) and not only_too_long:
                        too_long = 0
                        gaps = getGaps(stream)
                        for gap in gaps:
                            if int(gap[-1]) <= max_gap:
                                stream[gap[0]] = stream[gap[0]].__add__(stream[gap[1]], method=0, fill_value="interpolate")
                                stream.remove(stream[gap[1]])
                                break
                            else:
                                too_long += 1
                        if too_long == len(gaps):
                            only_too_long = True

                taper_length = 20.0 #seconds
                for trace in stream:
                    if trace.stats.npts < 4 * taper_length*trace.stats.sampling_rate:
                        trace.data = np.zeros(trace.stats.npts)
                    else:
                        trace.detrend(type="demean")
                        trace.detrend(type="linear")
                        taper_1s = taper_length * float(trace.stats.sampling_rate) / trace.stats.npts
                        cp = cosTaper(trace.stats.npts, taper_1s)
                        trace.data *= cp
                try:
                    stream.merge(method=0, fill_value=0.0)
                except:
                    continue

                logging.debug("%s.%s Slicing Stream to %s:%s" % (station, comp, utcdatetime.UTCDateTime(
                    goal_day.replace('-', '')), utcdatetime.UTCDateTime(goal_day.replace('-', '')) + params.goal_duration - stream[0].stats.delta))
                stream[0].trim(utcdatetime.UTCDateTime(goal_day.replace('-', '')), utcdatetime.UTCDateTime(
                    goal_day.replace('-', '')) + params.goal_duration - stream[0].stats.delta, pad=True, fill_value=0.0,
                    nearest_sample=False)


                if get_config(db, 'remove_response', isbool=True):
                    logging.debug('Removing instrument response')
                    response_format = get_config(db, 'response_format')
                    response_prefilt = eval(get_config(db, 'response_prefilt'))
                    files = glob.glob(os.path.join(get_config(db,
                                                              'response_path'),
                                                   "*"))
                    if response_format == "inventory":
                        firstinv = False
                        inventory = None
                        for file in files:
                            try:
                                inv = read_inventory(file)
                                if firstinv:
                                    inventory = inv
                                    firstinv = False
                                else:
                                    inventory += inv
                            except:
                                pass
                        stream.attach_response(inventory)
                        stream.remove_response(output='VEL',
                                               pre_filt=response_prefilt)
                    elif response_format == "dataless":
                        for file in files:
                            p = Parser(file)
                            try:
                                p.getPAZ(stream[0].id,
                                         datetime=UTCDateTime(gd))
                                break
                            except:
                                traceback.print_exc()
                                del p
                                continue
                        stream.simulate(seedresp={'filename': p, "units":"VEL"},
                                        pre_filt=response_prefilt,
                                        paz_remove=None,
                                        paz_simulate=None,)
                    elif response_format == "paz":
                        msg = "Unexpected type for `response_format`: %s" % \
                              response_format
                        raise TypeError(msg)
                    elif response_format == "resp":
                        msg = "Unexpected type for `response_format`: %s" % \
                              response_format
                        raise TypeError(msg)
                    else:
                        msg = "Unexpected type for `response_format`: %s" % \
                              response_format
                        raise TypeError(msg)
                trace = stream[0]

                logging.debug(
                    "%s.%s Highpass at %.2f Hz" % (station, comp, params.preprocess_highpass))
                trace.filter("highpass", freq=params.preprocess_highpass, zerophase=True)
                
                if trace.stats.sampling_rate != params.goal_sampling_rate:
                    logging.debug(
                        "%s.%s Lowpass at %.2f Hz" % (station, comp, params.preprocess_lowpass))
                    trace.filter("lowpass", freq=params.preprocess_lowpass, zerophase=True)

                    

                    if params.resampling_method == "Resample":
                        logging.debug("%s.%s Downsample to %.1f Hz" %
                                      (station, comp, params.goal_sampling_rate))
                        trace.data = resample(
                            trace.data, params.goal_sampling_rate / trace.stats.sampling_rate, 'sinc_fastest')

                    elif params.resampling_method == "Decimate":
                        logging.debug("%s.%s Decimate by a factor of %i" %
                                      (station, comp, params.decimation_factor))
                        trace.data = trace.data[::params.decimation_factor]
                    trace.stats.sampling_rate = params.goal_sampling_rate

                year, month, day, hourf, minf, secf, wday, yday, isdst = trace.stats.starttime.utctimetuple()

                if j == 0:
                    t = time.strptime("%04i:%02i:%02i:%02i:%02i:%02i" %
                                      (year, month, day, hourf, minf, secf), "%Y:%m:%d:%H:%M:%S")
                    basetime = calendar.timegm(t)

                if len(trace.data) % 2 != 0:
                    trace.data = np.append(trace.data, 0.)

                if comp == "Z":
                    tramef_Z[istation] = trace.data
                elif comp == "E":
                    tramef_E[istation] = trace.data
                elif comp == "N":
                    tramef_N[istation] = trace.data

                del trace, stream
    if len(tramef_E) != 0:
        return basetime, tramef_Z, tramef_E, tramef_N
    else:
        return basetime, tramef_Z
        # SEED Handling.
        if seed_file in faulty_seed_files:
            continue

        #from obspy import UTCDateTime
        #if channel.start_date != UTCDateTime(1999, 12, 28, 22, 24, 39):
            #continue

        print chan + ": ",

        unit_known_to_evalresp = True

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            try:
                p = Parser(seed_file)
            except:
                faulty_seed_files.append(seed_file)
                counter["random_error"] += 1
                print_warning("Failed to read SEED file!")
                continue

        try:
            all_resps = p.getRESP()
        except:
            counter["random_error"] += 1
            print_warning("getRESP() failed. Very likely a faulty SEED file")
            continue
        resp_string = "RESP.%s.%s.%s.%s" % (net_id, stat_id, loc_id, chan_id)
        all_resps = [_i for _i in all_resps if _i[0] == resp_string]
    print parseresult.date
    print "Here is the year " + parseresult.date.split()[0]
    print "Here is the day " + parseresult.date.split()[1]
try:
    epochtime = UTCDateTime(parseresult.date.split()[0] + "-" +
                            parseresult.date.split()[1] + "T00:00:00.0")
except:
    print "Problem reading epoch time"
    sys.exit(0)

#Read in the dataless
if verbose:
    print "Reading in the dataless"
#try:

sp = Parser(parseresult.dataless)
#except:
#	print "Not able to read dataless"
#	sys.exit(0)

if parseresult.station:
    if verbose:
        print "Making a station list"
    stations = getstalist(sp, epochtime)
    for sta in stations:
        print sta

if parseresult.stationlist:
    if verbose:
        print "Making a station list"
    stations = getstalistlocation(sp, epochtime)
Example #44
0
from obspy.core import read
from obspy.core.util.geodetics import gps2DistAzimuth
from obspy.xseed import Parser
from math import log10

st = read("../data/LKBD.MSEED")

paz_wa = {'sensitivity': 2800, 'zeros': [0j], 'gain': 1,
          'poles': [-6.2832-4.7124j, -6.2832+4.7124j]}

parser = Parser("../data/LKBD.dataless")
paz_le3d5s = parser.getPAZ("CH.LKBD..EHZ")

st.simulate(paz_remove=paz_le3d5s, paz_simulate=paz_wa, water_level=10)

t = UTCDateTime("2012-04-03T02:45:03")
st.trim(t, t + 50)

tr_n = st.select(component="N")[0]
ampl_n = max(abs(tr_n.data))
tr_e = st.select(component="E")[0]
ampl_e = max(abs(tr_e.data))
ampl = max(ampl_n, ampl_e)

sta_lat = 46.38703
sta_lon = 7.62714
event_lat = 46.218
event_lon = 7.706

epi_dist, az, baz = gps2DistAzimuth(event_lat, event_lon, sta_lat, sta_lon)
epi_dist = epi_dist / 1000
Example #45
0
from obspy.xseed import Parser
sp = Parser("dataless.seed")
sp.writeXSEED("dataless.seed")
    plt.ylabel("Source Radius [m]", fontsize="x-large")
    plt.grid()
    plt.savefig("/Users/lion/Desktop/SourceRadius.pdf")


if __name__ == "__main__":
    # Read all instrument responses.
    widgets = ['Parsing instrument responses...', progressbar.Percentage(),
        ' ', progressbar.Bar()]
    pbar = progressbar.ProgressBar(widgets=widgets,
        maxval=len(STATION_FILES)).start()
    parsers = {}
    # Read all waveform files.
    for _i, xseed in enumerate(STATION_FILES):
        pbar.update(_i)
        parser = Parser(xseed)
        channels = [c['channel_id'] for c in parser.getInventory()['channels']]
        parsers_ = dict.fromkeys(channels, parser)
        if any([k in parsers for k in parsers_.keys()]):
            msg = "Channel(s) defined in more than one metadata file."
            warnings.warn(msg)
        parsers.update(parsers_)
    pbar.finish()

    # Parse all waveform files.
    widgets = ['Indexing waveform files...     ', progressbar.Percentage(),
        ' ', progressbar.Bar()]
    pbar = progressbar.ProgressBar(widgets=widgets,
        maxval=len(WAVEFORM_FILES)).start()
    waveform_index = {}
    # Read all waveform files.
Example #47
0
def obspy_fullresp_RESP(input_dics,
                        trace,
                        resp_file,
                        Address,
                        unit='DIS',
                        BP_filter=(0.008, 0.012, 3.0, 4.0),
                        inform='N/N'):
    """
    Instrument correction using dataless seed --->
    equivalent to full response file steps: detrend, demean, taper, filter,
    deconvolution
    :param input_dics:
    :param trace:
    :param resp_file:
    :param Address:
    :param unit:
    :param BP_filter:
    :param inform:
    :return:
    """
    dataless_parser = Parser(resp_file)
    seedresp = {'filename': dataless_parser, 'units': unit}

    try:
        trace.detrend('linear')
        trace.simulate(seedresp=seedresp,
                       paz_remove=None,
                       paz_simulate=None,
                       remove_sensitivity=True,
                       simulate_sensitivity=False,
                       water_level=input_dics['water_level'],
                       zero_mean=True,
                       taper=True,
                       taper_fraction=0.05,
                       pre_filt=eval(BP_filter),
                       pitsasim=False,
                       sacsim=True)
        # Remove the following line since we want to keep
        # the units as it is in the stationXML
        # trace.data *= 1.e9
        trace_identity = '%s.%s.%s.%s' % (
            trace.stats['network'], trace.stats['station'],
            trace.stats['location'], trace.stats['channel'])
        if input_dics['mseed'] == 'N':
            trace.write(os.path.join(Address,
                                     '%s.%s' % (unit.lower(), trace_identity)),
                        format='SAC')
        else:
            trace.write(os.path.join(Address,
                                     '%s.%s' % (unit.lower(), trace_identity)),
                        format='MSEED')

        if unit.lower() == 'dis':
            unit_print = 'displacement'
        elif unit.lower() == 'vel':
            unit_print = 'velocity'
        elif unit.lower() == 'acc':
            unit_print = 'acceleration'
        else:
            unit_print = 'UNKNOWN'
        print '%s -- instrument correction to %s for: %s' \
              % (inform, unit_print, trace_identity)

    except Exception as e:
        print '%s -- %s' % (inform, e)
    def get_station_details(self, request, network, station):
        session = self.env.db.session(bind=self.env.db.engine)
        try:
            query = session.query(StationObject)\
                .filter(StationObject.network == network)\
                .filter(StationObject.station == station).one()
        except sqlalchemy.orm.exc.NoResultFound:
            session.close()
            msg = "Station %s.%s could not be found." % (network, station)
            raise NotFoundError(msg)
        result = {
            "network_code": query.network,
            "network_name": "",
            "station_code": query.station,
            "station_name": "",
            "latitude": query.latitude,
            "longitude": query.longitude,
            "elevation_in_m": query.elevation_in_m,
            "local_depth_in_m": query.local_depth_in_m,
            "channels": []}
        # Also parse information about all channels.
        for channel in query.channel:
            md = channel.channel_metadata
            if md:
                md = md[0]

            info = {
                "channel_code": channel.channel,
                "location_code": channel.location,
                "start_date": str(md.starttime) if md else None,
                "end_date": str(md.starttime) if md else None,
                "instrument": "",
                "sampling_rate": "",
                "format": md.format if md else None,
                "channel_filepath_id": md.filepath_id if md else None}

            # Attempt to get long descriptions for the station and network
            # codes. This is only possible for SEED and XSEED files.
            if info["format"] and info["format"].lower() in ["seed", "xseed"]:
                parser = Parser(md.filepath.filepath)
                inv = parser.getInventory()
                if not result["network_name"] and inv["networks"]:
                    for network in inv["networks"]:
                        if network["network_code"] != result["network_code"]:
                            continue
                        result["network_name"] = network["network_name"]
                        break
                if not result["station_name"] and inv["stations"]:
                    for station in inv["stations"]:
                        station_code = station["station_id"].split(".")[1]
                        if station_code != result["station_code"]:
                            continue
                        result["station_name"] = station["station_name"]
                for channel in inv["channels"]:
                    location_code, channel_code = \
                        channel["channel_id"].split(".")[2:]
                    if location_code == info["location_code"] and \
                            channel_code == info["channel_code"]:
                        info["start_date"] = str(channel["start_date"])
                        info["end_date"] = str(channel["end_date"])
                        info["instrument"] = channel["instrument"]
                        info["sampling_rate"] = channel["sampling_rate"]
            result["channels"].append({"channel": info})
        session.close()
        return formatResults(request, [result])