示例#1
0
 def test_createPreviewWithUnroundedSampleRate(self):
     """
     Test for creating preview.
     """
     tr = Trace(data=np.arange(4000))
     tr.stats.sampling_rate = 124.999992371
     tr.stats.starttime = UTCDateTime("1989-10-06T14:31:14.000000Z")
     createPreview(tr, delta=30)
示例#2
0
 def test_createPreviewWithUnroundedSampleRate(self):
     """
     Test for creating preview.
     """
     tr = Trace(data=np.arange(4000))
     tr.stats.sampling_rate = 124.999992371
     tr.stats.starttime = UTCDateTime("1989-10-06T14:31:14.000000Z")
     createPreview(tr, delta=30)
示例#3
0
 def test_createPreviewWithVerySmallSampleRate(self):
     """
     Test for creating previews with samples per slice less than 1.
     """
     tr = Trace(data=np.arange(4000))
     # 1 - should raise
     tr.stats.sampling_rate = 0.1
     self.assertRaises(ValueError, createPreview, tr)
     # 2 - should work
     tr.stats.sampling_rate = 1
     createPreview(tr)
示例#4
0
 def test_createPreviewWithVerySmallSampleRate(self):
     """
     Test for creating previews with samples per slice less than 1.
     """
     tr = Trace(data=np.arange(4000))
     # 1 - should raise
     tr.stats.sampling_rate = 0.1
     self.assertRaises(ValueError, createPreview, tr)
     # 2 - should work
     tr.stats.sampling_rate = 1
     createPreview(tr)
示例#5
0
 def test_createPreviewWithMaskedArrays(self):
     """
     Test for creating preview using masked arrays.
     """
     # 1 - masked arrays without masked values
     trace = Trace(data=np.ma.ones(600))
     preview = createPreview(trace, delta=60)
     # only masked values get replaced with an -1
     np.testing.assert_array_equal(preview.data, np.array(10 * [0]))
     # 2 - masked arrays with masked values
     trace = Trace(data=np.ma.ones(600))
     trace.data.mask = [False] * 600
     trace.data.mask[200:400] = True
     preview = createPreview(trace, delta=60)
     # masked values get replaced with an -1
     np.testing.assert_array_equal(preview.data,
                                   np.array(4 * [0] + 2 * [-1] + 4 * [0]))
示例#6
0
 def test_createPreviewWithMaskedArrays(self):
     """
     Test for creating preview using masked arrays.
     """
     # 1 - masked arrays without masked values
     trace = Trace(data=np.ma.ones(600))
     preview = createPreview(trace, delta=60)
     # only masked values get replaced with an -1
     np.testing.assert_array_equal(preview.data, np.array(10 * [0]))
     # 2 - masked arrays with masked values
     trace = Trace(data=np.ma.ones(600))
     trace.data.mask = [False] * 600
     trace.data.mask[200:400] = True
     preview = createPreview(trace, delta=60)
     # masked values get replaced with an -1
     np.testing.assert_array_equal(preview.data,
                                   np.array(4 * [0] + 2 * [-1] + 4 * [0]))
示例#7
0
 def test_createPreview(self):
     """
     Test for creating preview.
     """
     # Wrong delta should raise.
     self.assertRaises(TypeError, createPreview, Trace(data=np.arange(10)),
                       60.0)
     self.assertRaises(TypeError, createPreview, Trace(data=np.arange(10)),
                       0)
     # 1
     trace = Trace(data=np.array([0] * 28 + [0, 1] * 30 + [-1, 1] * 29))
     trace.stats.starttime = UTCDateTime(32)
     preview = createPreview(trace, delta=60)
     self.assertEqual(preview.stats.starttime, UTCDateTime(60))
     self.assertEqual(preview.stats.endtime, UTCDateTime(120))
     self.assertEqual(preview.stats.delta, 60)
     np.testing.assert_array_equal(preview.data, np.array([1, 2]))
     # 2
     trace = Trace(data=np.arange(0, 30))
     preview = createPreview(trace, delta=60)
     self.assertEqual(preview.stats.starttime, UTCDateTime(0))
     self.assertEqual(preview.stats.endtime, UTCDateTime(0))
     self.assertEqual(preview.stats.delta, 60)
     np.testing.assert_array_equal(preview.data, np.array([29]))
     # 3
     trace = Trace(data=np.arange(0, 60))
     preview = createPreview(trace, delta=60)
     self.assertEqual(preview.stats.starttime, UTCDateTime(0))
     self.assertEqual(preview.stats.endtime, UTCDateTime(0))
     self.assertEqual(preview.stats.delta, 60)
     np.testing.assert_array_equal(preview.data, np.array([59]))
     # 4
     trace = Trace(data=np.arange(0, 90))
     preview = createPreview(trace, delta=60)
     self.assertEqual(preview.stats.starttime, UTCDateTime(0))
     self.assertEqual(preview.stats.endtime, UTCDateTime(60))
     self.assertEqual(preview.stats.delta, 60)
     np.testing.assert_array_equal(preview.data, np.array([59, 29]))
示例#8
0
 def test_createPreview(self):
     """
     Test for creating preview.
     """
     # Wrong delta should raise.
     self.assertRaises(TypeError, createPreview,
                       Trace(data=np.arange(10)), 60.0)
     self.assertRaises(TypeError, createPreview,
                       Trace(data=np.arange(10)), 0)
     # 1
     trace = Trace(data=np.array([0] * 28 + [0, 1] * 30 + [-1, 1] * 29))
     trace.stats.starttime = UTCDateTime(32)
     preview = createPreview(trace, delta=60)
     self.assertEqual(preview.stats.starttime, UTCDateTime(60))
     self.assertEqual(preview.stats.endtime, UTCDateTime(120))
     self.assertEqual(preview.stats.delta, 60)
     np.testing.assert_array_equal(preview.data, np.array([1, 2]))
     # 2
     trace = Trace(data=np.arange(0, 30))
     preview = createPreview(trace, delta=60)
     self.assertEqual(preview.stats.starttime, UTCDateTime(0))
     self.assertEqual(preview.stats.endtime, UTCDateTime(0))
     self.assertEqual(preview.stats.delta, 60)
     np.testing.assert_array_equal(preview.data, np.array([29]))
     # 3
     trace = Trace(data=np.arange(0, 60))
     preview = createPreview(trace, delta=60)
     self.assertEqual(preview.stats.starttime, UTCDateTime(0))
     self.assertEqual(preview.stats.endtime, UTCDateTime(0))
     self.assertEqual(preview.stats.delta, 60)
     np.testing.assert_array_equal(preview.data, np.array([59]))
     # 4
     trace = Trace(data=np.arange(0, 90))
     preview = createPreview(trace, delta=60)
     self.assertEqual(preview.stats.starttime, UTCDateTime(0))
     self.assertEqual(preview.stats.endtime, UTCDateTime(60))
     self.assertEqual(preview.stats.delta, 60)
     np.testing.assert_array_equal(preview.data, np.array([59, 29]))
示例#9
0
 def __init__(self, *args, **kwargs):
     super(ClientTestCase, self).__init__(*args, **kwargs)
     # Create a in memory database only once for test suite
     url = 'sqlite:///:memory:'
     self.client = Client(url)
     # add paths
     session = self.client.session()
     path1 = WaveformPath({'path': '/path/to/1'})
     path2 = WaveformPath({'path': '/path/to/2'})
     session.add_all([path1, path2])
     # add files
     file1 = WaveformFile(
         {'file': 'file_001.mseed', 'size': 2000,
             'mtime': UTCDateTime('20120101').timestamp, 'format': 'MSEED'})
     file2 = WaveformFile(
         {'file': 'file_002.mseed', 'size': 2000,
             'mtime': UTCDateTime('20120102').timestamp, 'format': 'MSEED'})
     file3 = WaveformFile(
         {'file': 'file_001.gse2', 'size': 2000,
             'mtime': UTCDateTime('20120102').timestamp, 'format': 'GSE2'})
     path1.files.append(file1)
     path1.files.append(file2)
     path2.files.append(file3)
     session.add_all([file1, file2, file3])
     # add channels
     channel1 = WaveformChannel(
         {'network': 'BW', 'station': 'MANZ',
             'location': '', 'channel': 'EHZ',
             'starttime':
             UTCDateTime('2012-01-01 00:00:00.000000').datetime,
             'endtime': UTCDateTime('2012-01-01 23:59:59.999999').datetime,
             'npts': 3000, 'sampling_rate': 100.0})
     channel2 = WaveformChannel(
         {'network': 'BW', 'station': 'MANZ',
             'location': '', 'channel': 'EHZ',
             'starttime':
             UTCDateTime('2012-01-02 01:00:00.000000').datetime,
             'endtime':
             UTCDateTime('2012-01-02 23:59:59.999999').datetime,
             'npts': 3000,
             'sampling_rate': 100.0})
     # create a channel with preview
     header = {'network': 'GE', 'station': 'FUR',
               'location': '00', 'channel': 'BHZ',
               'starttime': UTCDateTime('2012-01-01 00:00:00.000000'),
               'sampling_rate': 100.0}
     # linear trend
     data = np.linspace(0, 1, 3000000)
     # some peaks
     data[20000] = 15
     data[20001] = -15
     data[1000000] = 22
     data[1000001] = -22
     data[2000000] = 14
     data[2000001] = -14
     tr = Trace(data=data, header=header)
     self.preview = createPreview(tr, 30).data
     header = dict(tr.stats)
     header['starttime'] = tr.stats.starttime.datetime
     header['endtime'] = tr.stats.endtime.datetime
     channel3 = WaveformChannel(header)
     channel3.preview = self.preview.dumps()
     file1.channels.append(channel1)
     file2.channels.append(channel2)
     file3.channels.append(channel3)
     session.add_all([channel1, channel2, channel3])
     session.commit()
     session.close()
示例#10
0
def worker(_i, input_queue, work_queue, output_queue, log_queue, mappings={}):
    try:
        # fetch and initialize all possible waveform feature plug-ins
        all_features = {}
        for (key, ep) in _getEntryPoints('obspy.db.feature').iteritems():
            try:
                # load plug-in
                cls = ep.load()
                # initialize class
                func = cls().process
            except Exception, e:
                msg = 'Could not initialize feature %s. (%s)'
                log_queue.append(msg % (key, str(e)))
                continue
            all_features[key] = {}
            all_features[key]['run'] = func
            try:
                all_features[key]['indexer_kwargs'] = cls['indexer_kwargs']
            except:
                all_features[key]['indexer_kwargs'] = {}
        # loop through input queue
        while True:
            # fetch a unprocessed item
            try:
                filepath, (path, file, features) = input_queue.popitem()
            except:
                continue
            # skip item if already in work queue
            if filepath in work_queue:
                continue
            work_queue.append(filepath)
            # get additional kwargs for read method from waveform plug-ins
            kwargs = {'verify_chksum': False}
            for feature in features:
                if feature not in all_features:
                    log_queue.append('%s: Unknown feature %s' % (filepath,
                                                                 feature))
                    continue
                kwargs.update(all_features[feature]['indexer_kwargs'])
            # read file and get file stats
            try:
                stats = os.stat(filepath)
                stream = read(filepath, **kwargs)
                # get gap and overlap information
                gap_list = stream.getGaps()
                # merge channels and replace gaps/overlaps with 0 to prevent
                # generation of masked arrays
                stream.merge(fill_value=0)
            except Exception, e:
                msg = '[Reading stream] %s: %s'
                log_queue.append(msg % (filepath, e))
                try:
                    work_queue.remove(filepath)
                except:
                    pass
                continue
            # build up dictionary of gaps and overlaps for easier lookup
            gap_dict = {}
            for gap in gap_list:
                id = '.'.join(gap[0:4])
                temp = {
                    'gap': gap[6] >= 0,
                    'starttime': gap[4].datetime,
                    'endtime': gap[5].datetime,
                    'samples': abs(gap[7])
                }
                gap_dict.setdefault(id, []).append(temp)
            # loop through traces
            dataset = []
            for trace in stream:
                result = {}
                # general file information
                result['mtime'] = int(stats.st_mtime)
                result['size'] = stats.st_size
                result['path'] = path
                result['file'] = file
                result['filepath'] = filepath
                # trace information
                result['format'] = format = trace.stats._format
                result['station'] = trace.stats.station
                result['location'] = trace.stats.location
                result['channel'] = trace.stats.channel
                result['network'] = trace.stats.network
                result['starttime'] = trace.stats.starttime.datetime
                result['endtime'] = trace.stats.endtime.datetime
                result['calib'] = trace.stats.calib
                result['npts'] = trace.stats.npts
                result['sampling_rate'] = trace.stats.sampling_rate
                # check for any id mappings
                if trace.id in mappings:
                    old_id = trace.id
                    for mapping in mappings[old_id]:
                        if trace.stats.starttime and \
                           trace.stats.starttime > mapping['endtime']:
                            continue
                        if trace.stats.endtime and \
                           trace.stats.endtime < mapping['starttime']:
                            continue
                        result['network'] = mapping['network']
                        result['station'] = mapping['station']
                        result['location'] = mapping['location']
                        result['channel'] = mapping['channel']
                        msg = "Mapping '%s' to '%s.%s.%s.%s'" % \
                            (old_id, mapping['network'], mapping['station'],
                            mapping['location'], mapping['channel'])
                        log_queue.append(msg)
                # gaps/overlaps for current trace
                result['gaps'] = gap_dict.get(trace.id, [])
                # apply feature functions
                result['features'] = []
                for key in features:
                    if key not in all_features:
                        continue
                    try:
                        # run plug-in and update results
                        temp = all_features[key]['run'](trace)
                        for key, value in temp.iteritems():
                            result['features'].append({'key': key,
                                                       'value': value})
                    except Exception, e:
                        msg = '[Processing feature] %s: %s'
                        log_queue.append(msg % (filepath, e))
                        continue
                # generate preview of trace
                result['preview'] = None
                if '.LOG.L.' not in file or trace.stats.channel != 'LOG':
                    # create previews only for non-log files (see issue #400)
                    try:
                        trace = createPreview(trace, 30)
                        result['preview'] = trace.data.dumps()
                    except ValueError:
                        pass
                    except Exception, e:
                        msg = '[Creating preview] %s: %s'
                        log_queue.append(msg % (filepath, e))
示例#11
0
 def __init__(self, *args, **kwargs):
     super(ClientTestCase, self).__init__(*args, **kwargs)
     # Create a in memory database only once for test suite
     url = 'sqlite:///:memory:'
     self.client = Client(url)
     # add paths
     session = self.client.session()
     path1 = WaveformPath({'path': '/path/to/1'})
     path2 = WaveformPath({'path': '/path/to/2'})
     session.add_all([path1, path2])
     # add files
     file1 = WaveformFile({
         'file': 'file_001.mseed',
         'size': 2000,
         'mtime': UTCDateTime('20120101').timestamp,
         'format': 'MSEED'
     })
     file2 = WaveformFile({
         'file': 'file_002.mseed',
         'size': 2000,
         'mtime': UTCDateTime('20120102').timestamp,
         'format': 'MSEED'
     })
     file3 = WaveformFile({
         'file': 'file_001.gse2',
         'size': 2000,
         'mtime': UTCDateTime('20120102').timestamp,
         'format': 'GSE2'
     })
     path1.files.append(file1)
     path1.files.append(file2)
     path2.files.append(file3)
     session.add_all([file1, file2, file3])
     # add channels
     channel1 = WaveformChannel({
         'network':
         'BW',
         'station':
         'MANZ',
         'location':
         '',
         'channel':
         'EHZ',
         'starttime':
         UTCDateTime('2012-01-01 00:00:00.000000').datetime,
         'endtime':
         UTCDateTime('2012-01-01 23:59:59.999999').datetime,
         'npts':
         3000,
         'sampling_rate':
         100.0
     })
     channel2 = WaveformChannel({
         'network':
         'BW',
         'station':
         'MANZ',
         'location':
         '',
         'channel':
         'EHZ',
         'starttime':
         UTCDateTime('2012-01-02 01:00:00.000000').datetime,
         'endtime':
         UTCDateTime('2012-01-02 23:59:59.999999').datetime,
         'npts':
         3000,
         'sampling_rate':
         100.0
     })
     # create a channel with preview
     header = {
         'network': 'GE',
         'station': 'FUR',
         'location': '00',
         'channel': 'BHZ',
         'starttime': UTCDateTime('2012-01-01 00:00:00.000000'),
         'sampling_rate': 100.0
     }
     # linear trend
     data = np.linspace(0, 1, 3000000)
     # some peaks
     data[20000] = 15
     data[20001] = -15
     data[1000000] = 22
     data[1000001] = -22
     data[2000000] = 14
     data[2000001] = -14
     tr = Trace(data=data, header=header)
     self.preview = createPreview(tr, 30).data
     header = dict(tr.stats)
     header['starttime'] = tr.stats.starttime.datetime
     header['endtime'] = tr.stats.endtime.datetime
     channel3 = WaveformChannel(header)
     channel3.preview = self.preview.dumps()
     file1.channels.append(channel1)
     file2.channels.append(channel2)
     file3.channels.append(channel3)
     session.add_all([channel1, channel2, channel3])
     session.commit()
     session.close()
示例#12
0
def worker(_i, input_queue, work_queue, output_queue, log_queue, mappings={}):
    try:
        # fetch and initialize all possible waveform feature plug-ins
        all_features = {}
        for (key, ep) in _getEntryPoints('obspy.db.feature').items():
            try:
                # load plug-in
                cls = ep.load()
                # initialize class
                func = cls().process
            except Exception as e:
                msg = 'Could not initialize feature %s. (%s)'
                log_queue.append(msg % (key, str(e)))
                continue
            all_features[key] = {}
            all_features[key]['run'] = func
            try:
                all_features[key]['indexer_kwargs'] = cls['indexer_kwargs']
            except:
                all_features[key]['indexer_kwargs'] = {}
        # loop through input queue
        while True:
            # fetch a unprocessed item
            try:
                filepath, (path, file, features) = input_queue.popitem()
            except:
                continue
            # skip item if already in work queue
            if filepath in work_queue:
                continue
            work_queue.append(filepath)
            # get additional kwargs for read method from waveform plug-ins
            kwargs = {'verify_chksum': False}
            for feature in features:
                if feature not in all_features:
                    log_queue.append('%s: Unknown feature %s' %
                                     (filepath, feature))
                    continue
                kwargs.update(all_features[feature]['indexer_kwargs'])
            # read file and get file stats
            try:
                stats = os.stat(filepath)
                stream = read(filepath, **kwargs)
                # get gap and overlap information
                gap_list = stream.getGaps()
                # merge channels and replace gaps/overlaps with 0 to prevent
                # generation of masked arrays
                stream.merge(fill_value=0)
            except Exception as e:
                msg = '[Reading stream] %s: %s'
                log_queue.append(msg % (filepath, e))
                try:
                    work_queue.remove(filepath)
                except:
                    pass
                continue
            # build up dictionary of gaps and overlaps for easier lookup
            gap_dict = {}
            for gap in gap_list:
                id = '.'.join(gap[0:4])
                temp = {
                    'gap': gap[6] >= 0,
                    'starttime': gap[4].datetime,
                    'endtime': gap[5].datetime,
                    'samples': abs(gap[7])
                }
                gap_dict.setdefault(id, []).append(temp)
            # loop through traces
            dataset = []
            for trace in stream:
                result = {}
                # general file information
                result['mtime'] = int(stats.st_mtime)
                result['size'] = stats.st_size
                result['path'] = path
                result['file'] = file
                result['filepath'] = filepath
                # trace information
                result['format'] = trace.stats._format
                result['station'] = trace.stats.station
                result['location'] = trace.stats.location
                result['channel'] = trace.stats.channel
                result['network'] = trace.stats.network
                result['starttime'] = trace.stats.starttime.datetime
                result['endtime'] = trace.stats.endtime.datetime
                result['calib'] = trace.stats.calib
                result['npts'] = trace.stats.npts
                result['sampling_rate'] = trace.stats.sampling_rate
                # check for any id mappings
                if trace.id in mappings:
                    old_id = trace.id
                    for mapping in mappings[old_id]:
                        if trace.stats.starttime and \
                           trace.stats.starttime > mapping['endtime']:
                            continue
                        if trace.stats.endtime and \
                           trace.stats.endtime < mapping['starttime']:
                            continue
                        result['network'] = mapping['network']
                        result['station'] = mapping['station']
                        result['location'] = mapping['location']
                        result['channel'] = mapping['channel']
                        msg = "Mapping '%s' to '%s.%s.%s.%s'" % \
                            (old_id, mapping['network'], mapping['station'],
                             mapping['location'], mapping['channel'])
                        log_queue.append(msg)
                # gaps/overlaps for current trace
                result['gaps'] = gap_dict.get(trace.id, [])
                # apply feature functions
                result['features'] = []
                for key in features:
                    if key not in all_features:
                        continue
                    try:
                        # run plug-in and update results
                        temp = all_features[key]['run'](trace)
                        for key, value in temp.items():
                            result['features'].append({
                                'key': key,
                                'value': value
                            })
                    except Exception as e:
                        msg = '[Processing feature] %s: %s'
                        log_queue.append(msg % (filepath, e))
                        continue
                # generate preview of trace
                result['preview'] = None
                if '.LOG.L.' not in file or trace.stats.channel != 'LOG':
                    # create previews only for non-log files (see issue #400)
                    try:
                        trace = createPreview(trace, 30)
                        result['preview'] = trace.data.dumps()
                    except ValueError:
                        pass
                    except Exception as e:
                        msg = '[Creating preview] %s: %s'
                        log_queue.append(msg % (filepath, e))
                # update dataset
                dataset.append(result)
            del stream
            # return results to main loop
            try:
                output_queue.append(dataset)
            except:
                pass
            try:
                work_queue.remove(filepath)
            except:
                pass
    except KeyboardInterrupt:
        return
示例#13
0
文件: indexer.py 项目: johjam/obspy
def worker(_i, input_queue, work_queue, output_queue, log_queue, mappings={}):
    try:
        # fetch and initialize all possible waveform feature plug-ins
        all_features = {}
        for (key, ep) in _getEntryPoints("obspy.db.feature").items():
            try:
                # load plug-in
                cls = ep.load()
                # initialize class
                func = cls().process
            except Exception as e:
                msg = "Could not initialize feature %s. (%s)"
                log_queue.append(msg % (key, str(e)))
                continue
            all_features[key] = {}
            all_features[key]["run"] = func
            try:
                all_features[key]["indexer_kwargs"] = cls["indexer_kwargs"]
            except:
                all_features[key]["indexer_kwargs"] = {}
        # loop through input queue
        while True:
            # fetch a unprocessed item
            try:
                filepath, (path, file, features) = input_queue.popitem()
            except:
                continue
            # skip item if already in work queue
            if filepath in work_queue:
                continue
            work_queue.append(filepath)
            # get additional kwargs for read method from waveform plug-ins
            kwargs = {"verify_chksum": False}
            for feature in features:
                if feature not in all_features:
                    log_queue.append("%s: Unknown feature %s" % (filepath, feature))
                    continue
                kwargs.update(all_features[feature]["indexer_kwargs"])
            # read file and get file stats
            try:
                stats = os.stat(filepath)
                stream = read(filepath, **kwargs)
                # get gap and overlap information
                gap_list = stream.getGaps()
                # merge channels and replace gaps/overlaps with 0 to prevent
                # generation of masked arrays
                stream.merge(fill_value=0)
            except Exception as e:
                msg = "[Reading stream] %s: %s"
                log_queue.append(msg % (filepath, e))
                try:
                    work_queue.remove(filepath)
                except:
                    pass
                continue
            # build up dictionary of gaps and overlaps for easier lookup
            gap_dict = {}
            for gap in gap_list:
                id = ".".join(gap[0:4])
                temp = {
                    "gap": gap[6] >= 0,
                    "starttime": gap[4].datetime,
                    "endtime": gap[5].datetime,
                    "samples": abs(gap[7]),
                }
                gap_dict.setdefault(id, []).append(temp)
            # loop through traces
            dataset = []
            for trace in stream:
                result = {}
                # general file information
                result["mtime"] = int(stats.st_mtime)
                result["size"] = stats.st_size
                result["path"] = path
                result["file"] = file
                result["filepath"] = filepath
                # trace information
                result["format"] = trace.stats._format
                result["station"] = trace.stats.station
                result["location"] = trace.stats.location
                result["channel"] = trace.stats.channel
                result["network"] = trace.stats.network
                result["starttime"] = trace.stats.starttime.datetime
                result["endtime"] = trace.stats.endtime.datetime
                result["calib"] = trace.stats.calib
                result["npts"] = trace.stats.npts
                result["sampling_rate"] = trace.stats.sampling_rate
                # check for any id mappings
                if trace.id in mappings:
                    old_id = trace.id
                    for mapping in mappings[old_id]:
                        if trace.stats.starttime and trace.stats.starttime > mapping["endtime"]:
                            continue
                        if trace.stats.endtime and trace.stats.endtime < mapping["starttime"]:
                            continue
                        result["network"] = mapping["network"]
                        result["station"] = mapping["station"]
                        result["location"] = mapping["location"]
                        result["channel"] = mapping["channel"]
                        msg = "Mapping '%s' to '%s.%s.%s.%s'" % (
                            old_id,
                            mapping["network"],
                            mapping["station"],
                            mapping["location"],
                            mapping["channel"],
                        )
                        log_queue.append(msg)
                # gaps/overlaps for current trace
                result["gaps"] = gap_dict.get(trace.id, [])
                # apply feature functions
                result["features"] = []
                for key in features:
                    if key not in all_features:
                        continue
                    try:
                        # run plug-in and update results
                        temp = all_features[key]["run"](trace)
                        for key, value in temp.items():
                            result["features"].append({"key": key, "value": value})
                    except Exception as e:
                        msg = "[Processing feature] %s: %s"
                        log_queue.append(msg % (filepath, e))
                        continue
                # generate preview of trace
                result["preview"] = None
                if ".LOG.L." not in file or trace.stats.channel != "LOG":
                    # create previews only for non-log files (see issue #400)
                    try:
                        trace = createPreview(trace, 30)
                        result["preview"] = trace.data.dumps()
                    except ValueError:
                        pass
                    except Exception as e:
                        msg = "[Creating preview] %s: %s"
                        log_queue.append(msg % (filepath, e))
                # update dataset
                dataset.append(result)
            del stream
            # return results to main loop
            try:
                output_queue.append(dataset)
            except:
                pass
            try:
                work_queue.remove(filepath)
            except:
                pass
    except KeyboardInterrupt:
        return
示例#14
0
 def __init__(self, *args, **kwargs):
     super(ClientTestCase, self).__init__(*args, **kwargs)
     # Create a in memory database only once for test suite
     url = "sqlite:///:memory:"
     self.client = Client(url)
     # add paths
     session = self.client.session()
     path1 = WaveformPath({"path": "/path/to/1"})
     path2 = WaveformPath({"path": "/path/to/2"})
     session.add_all([path1, path2])
     # add files
     file1 = WaveformFile(
         {"file": "file_001.mseed", "size": 2000, "mtime": UTCDateTime("20120101").timestamp, "format": "MSEED"}
     )
     file2 = WaveformFile(
         {"file": "file_002.mseed", "size": 2000, "mtime": UTCDateTime("20120102").timestamp, "format": "MSEED"}
     )
     file3 = WaveformFile(
         {"file": "file_001.gse2", "size": 2000, "mtime": UTCDateTime("20120102").timestamp, "format": "GSE2"}
     )
     path1.files.append(file1)
     path1.files.append(file2)
     path2.files.append(file3)
     session.add_all([file1, file2, file3])
     # add channels
     channel1 = WaveformChannel(
         {
             "network": "BW",
             "station": "MANZ",
             "location": "",
             "channel": "EHZ",
             "starttime": UTCDateTime("2012-01-01 00:00:00.000000").datetime,
             "endtime": UTCDateTime("2012-01-01 23:59:59.999999").datetime,
             "npts": 3000,
             "sampling_rate": 100.0,
         }
     )
     channel2 = WaveformChannel(
         {
             "network": "BW",
             "station": "MANZ",
             "location": "",
             "channel": "EHZ",
             "starttime": UTCDateTime("2012-01-02 01:00:00.000000").datetime,
             "endtime": UTCDateTime("2012-01-02 23:59:59.999999").datetime,
             "npts": 3000,
             "sampling_rate": 100.0,
         }
     )
     # create a channel with preview
     header = {
         "network": "GE",
         "station": "FUR",
         "location": "00",
         "channel": "BHZ",
         "starttime": UTCDateTime("2012-01-01 00:00:00.000000"),
         "sampling_rate": 100.0,
     }
     # linear trend
     data = np.linspace(0, 1, 3000000)
     # some peaks
     data[20000] = 15
     data[20001] = -15
     data[1000000] = 22
     data[1000001] = -22
     data[2000000] = 14
     data[2000001] = -14
     tr = Trace(data=data, header=header)
     self.preview = createPreview(tr, 30).data
     header = dict(tr.stats)
     header["starttime"] = tr.stats.starttime.datetime
     header["endtime"] = tr.stats.endtime.datetime
     channel3 = WaveformChannel(header)
     channel3.preview = self.preview.dumps()
     file1.channels.append(channel1)
     file2.channels.append(channel2)
     file3.channels.append(channel3)
     session.add_all([channel1, channel2, channel3])
     session.commit()
     session.close()