コード例 #1
0
def test_stream():
    inventory = get_inventory()
    channels = ['HN1', 'HN2', 'HNZ']
    data = np.random.rand(1000)
    traces = []
    network = inventory.networks[0]
    station = network.stations[0]
    chlist = station.channels
    channelcodes = [ch.code for ch in chlist]
    for channel in channels:
        chidx = channelcodes.index(channel)
        channeldata = chlist[chidx]
        header = {
            'sampling_rate': channeldata.sample_rate,
            'npts': len(data),
            'network': network.code,
            'location': channeldata.location_code,
            'station': station.code,
            'channel': channel,
            'starttime': UTCDateTime(2010, 1, 1, 0, 0, 0)
        }
        trace = Trace(data=data, header=header)
        traces.append(trace)
    invstream = StationStream(traces=traces, inventory=inventory)
    inventory2 = invstream.getInventory()
    inv2_channel1 = inventory2.networks[0].stations[0].channels[0]
    inv_channel1 = inventory2.networks[0].stations[0].channels[0]
    assert inv_channel1.code == inv2_channel1.code
コード例 #2
0
def read_volume_one(filename, location='', alternate=False):
    """Read channel data from USC volume 1 text file.

    Args:
        filename (str): Input DMG V1 filename.
    Returns:
        tuple: (list of obspy Trace, int line offset)
    """
    volume = VOLUMES['V1']
    # count the number of lines in the file
    with open(filename) as f:
        line_count = sum(1 for _ in f)
    # read as many channels as are present in the file
    line_offset = 0
    stream = StationStream([])
    while line_offset < line_count:
        trace, line_offset = _read_channel(filename,
                                           line_offset,
                                           volume,
                                           location=location,
                                           alternate=alternate)
        # store the trace if the station type is in the valid_station_types
        # list or store the trace if there is no valid_station_types list
        if trace is not None:
            stream.append(trace)

    return [stream]
コード例 #3
0
    def get_derivative(self):
        """
        Calculated the derivative of each trace's data.

        Returns:
            stream: StationStream with the differentiated data.
        """
        stream = StationStream([])
        for trace in self.transform_data:
            differentiated_trace = trace.copy().differentiate()
            differentiated_trace.stats['units'] = 'acc'
            stream.append(differentiated_trace)
        return stream
コード例 #4
0
def split_station(grouped_trace_list):
    if grouped_trace_list[0].stats.network in NETWORKS_USING_LOCATION:
        streams_dict = {}
        for trace in grouped_trace_list:
            if trace.stats.location in streams_dict:
                streams_dict[trace.stats.location] += trace
            else:
                streams_dict[trace.stats.location] = \
                    StationStream(traces=[trace])
        streams = list(streams_dict.values())
    else:
        streams = [StationStream(traces=grouped_trace_list)]
    return streams
コード例 #5
0
    def get_integral(self):
        """
        Calculated the integral of each trace's data.

        Returns:
            stream: StationStream with the integrated data.
        """
        stream = StationStream([])
        for trace in self.transform_data:
            integrated_trace = trace.copy().integrate()
            integrated_trace.stats['units'] = 'vel'
            stream.append(integrated_trace)
        return stream
コード例 #6
0
    def get_derivative(self):
        """
        Calculated the derivative of each trace's data.

        Returns:
            stream: StationStream with the differentiated data.
        """
        stream = StationStream([])
        for trace in self.transform_data:
            integrated_trace = trace.differentiate()
            integrated_trace.stats['units'] = 'acc'
            strace = StationTrace(data=integrated_trace.data,
                                  header=integrated_trace.stats)
            stream.append(strace)
        return stream
コード例 #7
0
    def get_integral(self):
        """
        Calculated the integral of each trace's data.

        Returns:
            stream: StationStream with the integrated data.
        """
        stream = StationStream([])
        for trace in self.transform_data:
            integrated_trace = trace.integrate()
            integrated_trace.stats['units'] = 'veloc'
            strace = StationTrace(data=integrated_trace.data,
                    header=integrated_trace.stats)
            stream.append(strace)
        return stream
コード例 #8
0
    def get_integral(self):
        """
        Calculated the integral of each trace's data.

        Returns:
            stream: StationStream with the integrated data.
        """
        stream = StationStream([])
        for trace in self.transform_data:
            integrated_trace = trace.integrate()
            integrated_trace.stats['units'] = 'veloc'
            strace = StationTrace(data=integrated_trace.data,
                                  header=integrated_trace.stats)
            stream.append(strace)
        return stream
コード例 #9
0
def read_unam(filename):
    """Read the Mexican UNAM strong motion data format.

    Args:
        filename (str): path to UNAM data file.

    Returns:
        list: Sequence of one StationStream object containing 3
        StationTrace objects.
    """

    channels = _read_header(filename)
    npts = channels[0]['npts']
    all_data = np.genfromtxt(filename, skip_header=ALL_HEADERS, max_rows=npts)
    trace1 = StationTrace(data=all_data[:, 0], header=channels[0])
    trace2 = StationTrace(data=all_data[:, 1], header=channels[1])
    trace3 = StationTrace(data=all_data[:, 2], header=channels[2])

    # tell the trace that data has already been converted to physical units
    response = {'input_units': 'counts', 'output_units': 'cm/s^2'}
    trace1.setProvenance('remove_response', response)
    trace2.setProvenance('remove_response', response)
    trace3.setProvenance('remove_response', response)

    stream = StationStream(traces=[trace1, trace2, trace3])
    return [stream]
コード例 #10
0
def read_bhrc(filename):
    """Read the Iran BHRC strong motion data format.

    Args:
        filename (str): path to BHRC data file.

    Returns:
        list: Sequence of one StationStream object containing 3
        StationTrace objects.
    """
    header1, offset = _read_header_lines(filename, 0)
    data1, offset = _read_data(filename, offset, header1)
    header2, offset = _read_header_lines(filename, offset)
    data2, offset = _read_data(filename, offset, header2)
    header3, offset = _read_header_lines(filename, offset)
    data3, offset = _read_data(filename, offset, header3)
    trace1 = StationTrace(data1, header1)
    trace2 = StationTrace(data2, header2)
    trace3 = StationTrace(data3, header3)
    stream = StationStream([trace1, trace2, trace3])

    for tr in stream:
        if tr.stats.standard.process_level != PROCESS_LEVELS['V0']:
            response = {'input_units': 'counts', 'output_units': 'cm/s^2'}
            tr.setProvenance('remove_response', response)

    return [stream]
コード例 #11
0
def test_arias():
    ddir = os.path.join('data', 'testdata')
    datadir = pkg_resources.resource_filename('gmprocess', ddir)
    data_file = os.path.join(datadir, 'arias_data.json')
    with open(data_file, 'rt') as f:
        jdict = json.load(f)

    time = np.array(jdict['time'])
    # input output is m/s/s
    acc = np.array(jdict['acc']) / 100
    target_IA = jdict['ia']
    delta = time[2] - time[1]
    sr = 1 / delta
    header = {
        'delta': delta,
        'sampling_rate': sr,
        'npts': len(acc),
        'units': 'm/s/s',
        'channel': 'HN1',
        'standard': {
            'corner_frequency': np.nan,
            'station_name': '',
            'source': 'json',
            'source_file': '',
            'instrument': '',
            'instrument_period': np.nan,
            'source_format': 'json',
            'comments': '',
            'structure_type': '',
            'sensor_serial_number': '',
            'process_level': 'raw counts',
            'process_time': '',
            'horizontal_orientation': np.nan,
            'units': 'acc',
            'instrument_damping': np.nan
        }
    }
    # input is cm/s/s output is m/s/s
    trace = StationTrace(data=acc * 100, header=header)
    trace2 = trace.copy()
    trace2.stats.channel = 'HN2'
    stream = StationStream([trace, trace2])
    station = StationSummary.from_stream(stream, ['ARITHMETIC_MEAN'],
                                         ['arias'])
    pgms = station.pgms
    Ia = pgms[(pgms.IMT == 'ARIAS')
              & (pgms.IMC == 'ARITHMETIC_MEAN')].Result.tolist()[0]
    # the target has only one decimal place and is in cm/s/s
    Ia = Ia * 100
    np.testing.assert_almost_equal(Ia, target_IA, decimal=1)

    # Test other components
    data_files, _ = read_data_dir('cwb', 'us1000chhc', '2-ECU.dat')
    stream = read_data(data_files[0])[0]
    station = StationSummary.from_stream(stream, [
        'channels', 'gmrotd', 'rotd50', 'greater_of_two_horizontals',
        'ARITHMETIC_MEAN'
    ], ['arias'])
    stream = StationSummary.from_stream(stream, ['gmrotd50'], ['arias'])
    assert stream.pgms.Result.tolist() == []
コード例 #12
0
def read_nsmn(filename):
    """Read the Turkish NSMN strong motion data format.

    Args:
        filename (str): path to NSMN data file.

    Returns:
        list: Sequence of one StationStream object containing 3 StationTrace objects.
    """
    header = _read_header(filename)
    header1 = copy.deepcopy(header)
    header2 = copy.deepcopy(header)
    header3 = copy.deepcopy(header)
    header1['standard']['horizontal_orientation'] = 0.0
    header1['channel'] = get_channel_name(header['sampling_rate'], True, False,
                                          True)
    header2['standard']['horizontal_orientation'] = 90.0
    header2['channel'] = get_channel_name(header['sampling_rate'], True, False,
                                          False)
    header3['standard']['horizontal_orientation'] = 0.0
    header3['channel'] = get_channel_name(header['sampling_rate'], True, True,
                                          False)
    # three columns of NS, EW, UD
    # data = np.genfromtxt(filename, skip_header=TEXT_HDR_ROWS,
    #                      delimiter=[COLWIDTH] * NCOLS, encoding=ENCODING)
    data = np.loadtxt(filename, skiprows=TEXT_HDR_ROWS, encoding=ENCODING)
    data1 = data[:, 0]
    data2 = data[:, 1]
    data3 = data[:, 2]
    trace1 = StationTrace(data=data1, header=header1)
    trace2 = StationTrace(data=data2, header=header2)
    trace3 = StationTrace(data=data3, header=header3)
    stream = StationStream(traces=[trace1, trace2, trace3])
    return [stream]
コード例 #13
0
def test_uneven_stream():
    inventory = get_inventory()
    channels = ['HN1', 'HN2', 'HNZ']
    data1 = np.random.rand(1000)
    data2 = np.random.rand(1001)
    data3 = np.random.rand(1002)
    data = [data1, data2, data3]
    traces = []
    network = inventory.networks[0]
    station = network.stations[0]
    chlist = station.channels
    channelcodes = [ch.code for ch in chlist]
    for datat, channel in zip(data, channels):
        chidx = channelcodes.index(channel)
        channeldata = chlist[chidx]
        header = {
            'sampling_rate': channeldata.sample_rate,
            'npts': len(datat),
            'network': network.code,
            'location': channeldata.location_code,
            'station': station.code,
            'channel': channel,
            'starttime': UTCDateTime(2010, 1, 1, 0, 0, 0)
        }
        trace = Trace(data=datat, header=header)
        traces.append(trace)
    invstream = StationStream(traces=traces, inventory=inventory)
    x = 1
コード例 #14
0
def read_cosmos(filename, **kwargs):
    """Read COSMOS V1/V2 strong motion file.

    There is one extra key in the Stats object for each Trace -
    "process_level".
    This will be set to either "V1" or "V2".

    Args:
        filename (str): Path to possible COSMOS V1/V2 data file.
        kwargs (ref):
            valid_station_types (list): List of valid station types. See table
                6  in the COSMOS strong motion data format documentation for
                station type codes.
            Other arguments will be ignored.
    Returns:
        list: List of StationStreams containing three channels of acceleration
        data (cm/s**2).
    """
    logging.debug("Starting read_cosmos.")
    if not is_cosmos(filename):
        raise Exception(
            '%s is not a valid COSMOS strong motion data file.' % filename)
    # get list of valid stations
    valid_station_types = kwargs.get('valid_station_types', None)
    # get list of valid stations
    location = kwargs.get('location', '')

    # count the number of lines in the file
    with open(filename) as f:
        line_count = sum(1 for _ in f)

    # read as many channels as are present in the file
    line_offset = 0
    stream = StationStream([])
    while line_offset < line_count:
        trace, line_offset = _read_channel(
            filename, line_offset, location=location)
        # store the trace if the station type is in the valid_station_types
        # list or store the trace if there is no valid_station_types list
        if valid_station_types is not None:
            if trace.stats['format_specific']['station_code'] in valid_station_types:
                stream.append(trace)
        else:
            stream.append(trace)

    return [stream]
コード例 #15
0
    def get_radial_transverse(self):
        """
        Performs radial transverse rotation.

        Returns:
            radial_transverse: StationStream with the radial and
                    transverse components.
        """
        st_copy = self.rotation_data.copy()
        st_n = st_copy.select(component='[N1]')
        st_e = st_copy.select(component='[E2]')

        # Check that we have one northing and one easting channel
        if len(st_e) != 1 or len(st_n) != 1:
            raise Exception('Radial_Transverse: Stream must have one north '
                            'and one east channel.')

        # Check that the orientations are orthogonal
        ho1 = st_e[0].stats.standard.horizontal_orientation
        ho2 = st_n[0].stats.standard.horizontal_orientation
        if abs(ho1 - ho2) not in [90, 270]:
            raise Exception('Radial_Transverse: Channels must be orthogonal.')

        # Check that the lengths of the two channels are the same
        if st_e[0].stats.npts != st_n[0].stats.npts:
            raise Exception('Radial_Transverse: East and north channels must '
                            'have same length.')

        # First, rotate to North-East components if not already
        if st_n[0].stats.standard.horizontal_orientation != 0:
            az_diff = 360 - st_n[0].stats.standard.horizontal_orientation
            az_diff = np.deg2rad(az_diff)
            rotation_matrix = np.array([[np.cos(az_diff),
                                         np.sin(az_diff)],
                                        [-np.sin(az_diff),
                                         np.cos(az_diff)]])
            data = np.array([st_n[0].data, st_e[0].data])
            newdata = np.matmul(rotation_matrix, data)

            st_n[0].data = newdata[0]
            st_e[0].data = newdata[1]

        st_n[0].stats.channel = st_n[0].stats.channel[:-1] + 'N'
        st_e[0].stats.channel = st_n[0].stats.channel[:-1] + 'E'

        # For some reason the rotation does not update the channel
        # name in the rotation if it is not an obspy stream
        ne_stream = Stream([st_n[0], st_e[0]])
        # Calculate back azimuth and perform rotation to radial and transverse
        baz = gps2dist_azimuth(st_e[0].stats.coordinates.latitude,
                               st_e[0].stats.coordinates.longitude,
                               self.event.latitude, self.event.longitude)[1]
        ne_stream.rotate(method='NE->RT', back_azimuth=baz)
        radial_transverse = StationStream([ne_stream[0], ne_stream[1]])
        return radial_transverse
コード例 #16
0
def read_renadic(filename):
    """Read the Chilean RENADIC strong motion data format.

    Args:
        filename (str): path to RENADIC data file.

    Returns:
        list: Sequence of one StationStream object containing 3
        StationTrace objects.
    """
    # This network does not include station coordinates in the data files,
    # but they did provide a PDF table with information about each station,
    # including structure type (free field or something else) and the
    # coordinates
    data_dir = pkg_resources.resource_filename('gmprocess',
                                               'data')
    tablefile = os.path.join(data_dir, 'station_coordinates.xlsx')
    table = pd.read_excel(tablefile)

    with open(filename, 'rt', encoding=ENCODING) as f:
        lines1 = [next(f) for x in range(TEXT_HDR_ROWS)]
    header1 = _read_header(lines1, filename, table)
    ndata_rows = int(np.ceil((header1['npts'] * 2) / NCOLS))

    skip_rows = TEXT_HDR_ROWS + INT_HEADER_ROWS + FLOAT_HEADER_ROWS
    data1 = _read_data(filename, skip_rows, header1['npts'])

    skip_rows += ndata_rows + 1
    with open(filename, 'rt', encoding=ENCODING) as f:
        [next(f) for x in range(skip_rows)]
        lines2 = [next(f) for x in range(TEXT_HDR_ROWS)]

    header2 = _read_header(lines2, filename, table)
    skip_rows += TEXT_HDR_ROWS + INT_HEADER_ROWS + FLOAT_HEADER_ROWS
    data2 = _read_data(filename, skip_rows, header1['npts'])

    skip_rows += ndata_rows + 1
    with open(filename, 'rt', encoding=ENCODING) as f:
        [next(f) for x in range(skip_rows)]
        lines3 = [next(f) for x in range(TEXT_HDR_ROWS)]

    header3 = _read_header(lines3, filename, table)
    skip_rows += TEXT_HDR_ROWS + INT_HEADER_ROWS + FLOAT_HEADER_ROWS
    data3 = _read_data(filename, skip_rows, header1['npts'])

    trace1 = StationTrace(data=data1, header=header1)
    response = {'input_units': 'counts', 'output_units': 'cm/s^2'}
    trace1.setProvenance('remove_response', response)
    trace2 = StationTrace(data=data2, header=header2)
    trace2.setProvenance('remove_response', response)
    trace3 = StationTrace(data=data3, header=header3)
    trace3.setProvenance('remove_response', response)
    stream = StationStream(traces=[trace1, trace2, trace3])
    return [stream]
コード例 #17
0
def test_stream():
    inventory = get_inventory()
    channels = ['HN1', 'HN2', 'HNZ']
    data = np.random.rand(1000)
    traces = []
    network = inventory.networks[0]
    station = network.stations[0]
    chlist = station.channels
    channelcodes = [ch.code for ch in chlist]
    for channel in channels:
        chidx = channelcodes.index(channel)
        channeldata = chlist[chidx]
        header = {'sampling_rate': channeldata.sample_rate,
                  'npts': len(data),
                  'network': network.code,
                  'location': channeldata.location_code,
                  'station': station.code,
                  'channel': channel,
                  'starttime': UTCDateTime(2010, 1, 1, 0, 0, 0)}
        trace = Trace(data=data, header=header)
        traces.append(trace)
    invstream = StationStream(traces=traces, inventory=inventory)
    inventory2 = invstream.getInventory()
    inv2_channel1 = inventory2.networks[0].stations[0].channels[0]
    inv_channel1 = inventory2.networks[0].stations[0].channels[0]
    assert inv_channel1.code == inv2_channel1.code

    # test the streamparam functionality
    statsdict = {'name': 'Fred', 'age': 34}
    invstream.setStreamParam('stats', statsdict)
    assert invstream.getStreamParamKeys() == ['stats']
    cmpdict = invstream.getStreamParam('stats')
    assert statsdict == cmpdict
コード例 #18
0
ファイル: core.py プロジェクト: norfordb/groundmotion
def read_geonet(filename, **kwargs):
    """Read New Zealand GNS V1/V2 strong motion file.

    There is one extra key in the Stats object for each Trace -
    "process_level".
    This will be set to either "V1" or "V2".

    Args:
        filename (str): Path to possible GNS V1/V2 data file.
        kwargs (ref): Other arguments will be ignored.

    Returns:
        Stream: Obspy Stream containing three channels of acceleration data
        (cm/s**2).
    """
    logging.debug("Starting read_geonet.")
    if not is_geonet(filename):
        raise Exception('%s is not a valid GEONET strong motion data file.' %
                        filename)
    trace1, offset1, _ = _read_channel(filename, 0)
    trace2, offset2, _ = _read_channel(filename, offset1)
    trace3, _, _ = _read_channel(filename, offset2)

    # occasionally, geonet horizontal components are
    # identical.  To handle this, we'll set the second
    # channel to whatever isn't the first one.
    channel1 = trace1.stats['channel']
    channel2 = trace2.stats['channel']
    channel3 = trace3.stats['channel']
    if channel1 == channel2:
        if channel1.endswith('1'):
            trace2.stats['channel'] = trace2.stats['channel'][0:2] + '2'
        elif channel1.endswith('2'):
            trace2.stats['channel'] = trace2.stats['channel'][0:2] + '1'
        else:
            raise Exception(
                'GEONET: Could not resolve duplicate channels in %s' %
                trace1.stats['station'])
    if channel2 == channel3:
        if channel2.endswith('2'):
            trace3.stats['channel'] = trace2.stats['channel'][0:2] + '1'
        elif channel2.endswith('1'):
            trace3.stats['channel'] = trace2.stats['channel'][0:2] + '2'
        else:
            raise Exception(
                'GEONET: Could not resolve duplicate channels in %s' %
                trace1.stats['station'])

    traces = [trace1, trace2, trace3]
    stream = StationStream(traces)

    return [stream]
コード例 #19
0
def read_fdsn(filename):
    """Read Obspy data file (SAC, MiniSEED, etc).

    Args:
        filename (str):
            Path to data file.
        kwargs (ref):
            Other arguments will be ignored.
    Returns:
        Stream: StationStream object.
    """
    logging.debug("Starting read_fdsn.")
    if not is_fdsn(filename):
        raise Exception('%s is not a valid Obspy file format.' % filename)

    streams = []
    tstream = read(filename)
    xmlfile = _get_station_file(filename, tstream)
    inventory = read_inventory(xmlfile)
    traces = []
    for ttrace in tstream:
        trace = StationTrace(data=ttrace.data,
                             header=ttrace.stats,
                             inventory=inventory)
        location = ttrace.stats.location

        trace.stats.channel = get_channel_name(
            trace.stats.sampling_rate, trace.stats.channel[1] == 'N',
            inventory.get_orientation(trace.id)['dip'] in [90, -90]
            or trace.stats.channel[2] == 'Z',
            is_channel_north(inventory.get_orientation(trace.id)['azimuth']))

        if trace.stats.location == '':
            trace.stats.location = '--'

        network = ttrace.stats.network
        if network in LOCATION_CODES:
            codes = LOCATION_CODES[network]
            if location in codes:
                sdict = codes[location]
                if sdict['free_field']:
                    trace.stats.standard.structure_type = 'free_field'
                else:
                    trace.stats.standard.structure_type = sdict['description']
        head, tail = os.path.split(filename)
        trace.stats['standard']['source_file'] = tail or os.path.basename(head)
        traces.append(trace)
    stream = StationStream(traces=traces)
    streams.append(stream)

    return streams
コード例 #20
0
    def from_traces(cls, traces):
        """
        Create a StreamCollection instance from a list of traces.

        Args:
            traces (list):
                List of StationTrace objects.

        Returns:
            StreamCollection instance.
        """

        streams = [StationStream([tr]) for tr in traces]
        return cls(streams)
コード例 #21
0
    def get_arias(self):
        """
        Performs calculation of arias intensity.

        Returns:
            arias_intensities: Dictionary of arias intensity for each channel.
        """
        arias_intensities = {}
        arias_stream = StationStream([])
        for trace in self.reduction_data:
            dt = trace.stats['delta']
            # convert from cm/s/s to m/s/s
            acc = trace.data * 0.01

            # Calculate Arias Intensity
            integrated_acc2 = integrate.cumtrapz(acc * acc, dx=dt)
            arias_intensity = integrated_acc2 * np.pi * GAL_TO_PCTG / 2
            channel = trace.stats.channel
            trace.stats.standard.units = 'veloc'
            trace.stats.npts = len(arias_intensity)
            arias_stream.append(StationTrace(arias_intensity, trace.stats))
            arias_intensities[channel] = np.abs(np.max(arias_intensity))
        self.arias_stream = arias_stream
        return arias_intensities
コード例 #22
0
ファイル: arias.py プロジェクト: vinceq-usgs/gmprocess
    def get_arias(self):
        """
        Performs calculation of arias intensity.

        Returns:
            arias_intensities: Dictionary of arias intensity for each channel.
        """
        arias_intensities = {}
        arias_stream = StationStream([])
        for trace in self.reduction_data:
            dt = trace.stats['delta']
            # convert from cm/s/s to m/s/s
            acc = trace.data * 0.01

            # Calculate Arias Intensity
            integrated_acc2 = integrate.cumtrapz(acc * acc, dx=dt)
            arias_intensity = integrated_acc2 * np.pi * GAL_TO_PCTG / 2
            channel = trace.stats.channel
            trace.stats.standard.units = 'veloc'
            trace.stats.npts = len(arias_intensity)
            arias_stream.append(StationTrace(arias_intensity, trace.stats))
            arias_intensities[channel] = np.abs(np.max(arias_intensity))
        self.arias_stream = arias_stream
        return arias_intensities
コード例 #23
0
def read_volume_one(filename, location='', alternate=False):
    """Read channel data from USC volume 1 text file.

    Args:
        filename (str): Input DMG V1 filename.
    Returns:
        tuple: (list of obspy Trace, int line offset)
    """
    volume = VOLUMES['V1']
    # count the number of lines in the file
    with open(filename) as f:
        line_count = sum(1 for _ in f)
    # read as many channels as are present in the file
    line_offset = 0
    stream = StationStream([])
    while line_offset < line_count:
        trace, line_offset = _read_channel(
            filename, line_offset, volume, location=location, alternate=alternate)
        # store the trace if the station type is in the valid_station_types
        # list or store the trace if there is no valid_station_types list
        if trace is not None:
            stream.append(trace)

    return [stream]
コード例 #24
0
ファイル: core.py プロジェクト: vinceq-usgs/gmprocess
def read_unam(filename):
    """Read the Mexican UNAM strong motion data format.

    Args:
        filename (str): path to UNAM data file.

    Returns:
        list: Sequence of one StationStream object containing 3
        StationTrace objects.
    """

    channels = _read_header(filename)
    npts = channels[0]['npts']
    all_data = np.genfromtxt(filename, skip_header=ALL_HEADERS, max_rows=npts)
    trace1 = StationTrace(data=all_data[:, 0], header=channels[0])
    trace2 = StationTrace(data=all_data[:, 1], header=channels[1])
    trace3 = StationTrace(data=all_data[:, 2], header=channels[2])
    stream = StationStream(traces=[trace1, trace2, trace3])
    return [stream]
コード例 #25
0
ファイル: core.py プロジェクト: norfordb/groundmotion
def read_cosmos(filename, **kwargs):
    """Read COSMOS V1/V2 strong motion file.

    There is one extra key in the Stats object for each Trace -
    "process_level".
    This will be set to either "V1" or "V2".

    Args:
        filename (str): Path to possible COSMOS V1/V2 data file.
        kwargs (ref):
            valid_station_types (list): List of valid station types. See table
                6  in the COSMOS strong motion data format documentation for
                station type codes.
            Other arguments will be ignored.
    Returns:
        list: List of StationStreams containing three channels of acceleration
        data (cm/s**2).
    """
    logging.debug("Starting read_cosmos.")
    if not is_cosmos(filename):
        raise Exception('%s is not a valid COSMOS strong motion data file.' %
                        filename)
    # get list of valid stations
    valid_station_types = kwargs.get('valid_station_types', None)
    # get list of valid stations
    location = kwargs.get('location', '')

    # count the number of lines in the file
    with open(filename) as f:
        line_count = sum(1 for _ in f)

    # read as many channels as are present in the file
    line_offset = 0
    stream = StationStream([])
    while line_offset < line_count:
        trace, line_offset = _read_channel(filename,
                                           line_offset,
                                           line_count,
                                           location=location)
        # store the trace if the station type is in the valid_station_types
        # list or store the trace if there is no valid_station_types list
        if valid_station_types is not None:
            if trace.stats['format_specific']['station_code'] in \
                 valid_station_types:
                stream.append(trace)
        else:
            stream.append(trace)

    return [stream]
コード例 #26
0
def read_bhrc(filename):
    """Read the Iran BHRC strong motion data format.

    Args:
        filename (str): path to BHRC data file.

    Returns:
        list: Sequence of one StationStream object containing 3 StationTrace objects.
    """
    header1, offset = _read_header_lines(filename, 0)
    data1, offset = _read_data(filename, offset, header1)
    header2, offset = _read_header_lines(filename, offset)
    data2, offset = _read_data(filename, offset, header2)
    header3, offset = _read_header_lines(filename, offset)
    data3, offset = _read_data(filename, offset, header3)
    trace1 = StationTrace(data1, header1)
    trace2 = StationTrace(data2, header2)
    trace3 = StationTrace(data3, header3)
    stream = StationStream([trace1, trace2, trace3])
    return [stream]
コード例 #27
0
    def __group_by_net_sta_inst(self):
        trace_list = []
        for stream in self:
            for trace in stream:
                trace_list += [trace]

        # Create a list of traces with matching net, sta.
        all_matches = []
        match_list = []
        for idx1, trace1 in enumerate(trace_list):
            if idx1 in all_matches:
                continue
            matches = [idx1]
            network = trace1.stats['network']
            station = trace1.stats['station']
            free_field = trace1.free_field
            # For instrument, use first two characters of the channel
            inst = trace1.stats['channel'][0:2]
            for idx2, trace2 in enumerate(trace_list):
                if idx1 != idx2 and idx1 not in all_matches:
                    if (network == trace2.stats['network']
                            and station == trace2.stats['station']
                            and inst == trace2.stats['channel'][0:2]
                            and free_field == trace2.free_field):
                        matches.append(idx2)
            if len(matches) > 1:
                match_list.append(matches)
                all_matches.extend(matches)
            else:
                if matches[0] not in all_matches:
                    match_list.append(matches)
                    all_matches.extend(matches)

        grouped_streams = []
        for groups in match_list:
            grouped_trace_list = []
            for i in groups:
                grouped_trace_list.append(trace_list[i])
            grouped_streams.append(StationStream(grouped_trace_list))

        self.streams = grouped_streams
コード例 #28
0
def test_exceptions():
    ddir = os.path.join('data', 'testdata', 'geonet')
    homedir = pkg_resources.resource_filename('gmprocess', ddir)
    datafile_v2 = os.path.join(homedir, 'us1000778i',
                               '20161113_110259_WTMC_20.V2A')
    stream_v2 = read_geonet(datafile_v2)[0]
    # Check for origin Error
    passed = True
    try:
        m = MetricsController('pga', 'radial_transverse', stream_v2)
    except PGMException as e:
        passed = False
    assert passed == False

    # -------- Horizontal Channel Errors -----------
    # Check for horizontal passthrough gm
    st2 = stream_v2.select(component='[N1]')
    st3 = stream_v2.select(component='Z')
    st1 = StationStream([st2[0], st3[0]])
    passed = True
    m = MetricsController('pga', 'geometric_mean', st1)
    pgm = m.pgms
    result = pgm['Result'].tolist()[0]
    assert np.isnan(result)
    # Check for horizontal passthrough rotd50
    m = MetricsController('pga', 'rotd50', st1)
    pgm = m.pgms
    result = pgm['Result'].tolist()[0]
    assert np.isnan(result)
    # Check for horizontal passthrough gmrotd50
    m = MetricsController('pga', 'gmrotd50', st1)
    pgm = m.pgms
    result = pgm['Result'].tolist()[0]
    assert np.isnan(result)
    # No horizontal channels
    try:
        m = MetricsController('sa3.0', 'channels', st3)
    except PGMException as e:
        passed = False
    assert passed == False
コード例 #29
0
    def getStreams(self, eventid, stations=None, labels=None):
        """Get Stream from ASDF file given event id and input tags.

        Args:
            eventid (str):
                Event ID corresponding to an Event in the workspace.
            stations (list):
                List of stations to search for.
            labels (list):
                List of processing labels to search for.

        Returns:
            StreamCollection: Object containing list of organized
            StationStreams.
        """
        trace_auxholder = []
        stream_auxholder = []
        if 'TraceProcessingParameters' in self.dataset.auxiliary_data:
            trace_auxholder = self.dataset.auxiliary_data.TraceProcessingParameters
        if 'StreamProcessingParameters' in self.dataset.auxiliary_data:
            stream_auxholder = self.dataset.auxiliary_data.StreamProcessingParameters
        streams = []
        all_tags = []

        if stations is None:
            stations = self.getStations(eventid)
        if labels is None:
            labels = self.getLabels()
        for station in stations:
            for label in labels:
                all_tags.append('%s_%s_%s' % (eventid, station.lower(), label))

        for waveform in self.dataset.waveforms:
            ttags = waveform.get_waveform_tags()
            wtags = []
            if not len(all_tags):
                wtags = ttags
            else:
                wtags = list(set(all_tags).intersection(set(ttags)))
            for tag in wtags:
                if eventid in waveform[tag][0].stats.asdf.event_ids:
                    tstream = waveform[tag].copy()
                    inventory = waveform['StationXML']
                    for ttrace in tstream:
                        trace = StationTrace(data=ttrace.data,
                                             header=ttrace.stats,
                                             inventory=inventory)
                        tpl = (trace.stats.network.lower(),
                               trace.stats.station.lower(),
                               trace.stats.channel.lower())
                        channel = '%s_%s_%s' % tpl
                        channel_tag = '%s_%s' % (tag, channel)
                        if channel_tag in self.dataset.provenance.list():
                            provdoc = self.dataset.provenance[channel_tag]
                            trace.setProvenanceDocument(provdoc)
                        trace_path = '%s_%s' % (
                            tag, trace.stats.channel)
                        if trace_path in trace_auxholder:
                            bytelist = trace_auxholder[
                                trace_path].data[:].tolist()
                            jsonstr = ''.join([chr(b) for b in bytelist])
                            jdict = json.loads(jsonstr)
                            for key, value in jdict.items():
                                trace.setParameter(key, value)

                        stream = StationStream(traces=[trace])
                        stream.tag = tag  # testing this out

                        # look for stream-based metadata
                        if tag in stream_auxholder:
                            bytelist = stream_auxholder[
                                tag].data[:].tolist()
                            jsonstr = ''.join([chr(b) for b in bytelist])
                            jdict = json.loads(jsonstr)
                            for key, value in jdict.items():
                                stream.setStreamParam(key, value)

                        streams.append(stream)
        streams = StreamCollection(streams)
        return streams
コード例 #30
0
def read_knet(filename):
    """Read Japanese KNET strong motion file.

    Args:
        filename (str): Path to possible KNET data file.
        kwargs (ref): Other arguments will be ignored.
    Returns:
        Stream: Obspy Stream containing three channels of acceleration data
            (cm/s**2).
    """
    logging.debug("Starting read_knet.")
    if not is_knet(filename):
        raise Exception('%s is not a valid KNET file' % filename)

    # Parse the header portion of the file
    with open(filename, 'rt') as f:
        lines = [next(f) for x in range(TEXT_HDR_ROWS)]

    hdr = {}
    coordinates = {}
    standard = {}
    hdr['network'] = 'BO'
    hdr['station'] = lines[5].split()[2]
    logging.debug('station: %s' % hdr['station'])
    standard['station_name'] = ''

    # according to the powers that defined the Network.Station.Channel.Location
    # "standard", Location is a two character field.  Most data providers,
    # including KNET here, don't provide this.  We'll flag it as "--".
    hdr['location'] = '--'

    coordinates['latitude'] = float(lines[6].split()[2])
    coordinates['longitude'] = float(lines[7].split()[2])
    coordinates['elevation'] = float(lines[8].split()[2])

    hdr['sampling_rate'] = float(
        re.search('\\d+', lines[10].split()[2]).group())
    hdr['delta'] = 1 / hdr['sampling_rate']
    standard['units'] = 'acc'

    dir_string = lines[12].split()[1].strip()
    # knet files have directions listed as N-S, E-W, or U-D,
    # whereas in kiknet those directions are '4', '5', or '6'.
    if dir_string in ['N-S', '1', '4']:
        hdr['channel'] = get_channel_name(
            hdr['sampling_rate'],
            is_acceleration=True,
            is_vertical=False,
            is_north=True)
    elif dir_string in ['E-W', '2', '5']:
        hdr['channel'] = get_channel_name(
            hdr['sampling_rate'],
            is_acceleration=True,
            is_vertical=False,
            is_north=False)
    elif dir_string in ['U-D', '3', '6']:
        hdr['channel'] = get_channel_name(
            hdr['sampling_rate'],
            is_acceleration=True,
            is_vertical=True,
            is_north=False)
    else:
        raise Exception('KNET: Could not parse direction %s' %
                        lines[12].split()[1])

    logging.debug('channel: %s' % hdr['channel'])
    scalestr = lines[13].split()[2]
    parts = scalestr.split('/')
    num = float(parts[0].replace('(gal)', ''))
    den = float(parts[1])
    calib = num / den
    hdr['calib'] = calib

    duration = float(lines[11].split()[2])

    hdr['npts'] = int(duration * hdr['sampling_rate'])

    timestr = ' '.join(lines[9].split()[2:4])
    # The K-NET and KiK-Net data logger adds a 15s time delay
    # this is removed here
    sttime = datetime.strptime(timestr, TIMEFMT) - timedelta(seconds=15.0)
    # Shift the time to utc (Japanese time is 9 hours ahead)
    sttime = sttime - timedelta(seconds=9 * 3600.)
    hdr['starttime'] = sttime

    # read in the data - there is a max of 8 columns per line
    # the code below handles the case when last line has
    # less than 8 columns
    if hdr['npts'] % COLS_PER_LINE != 0:
        nrows = int(np.floor(hdr['npts'] / COLS_PER_LINE))
        nrows2 = 1
    else:
        nrows = int(np.ceil(hdr['npts'] / COLS_PER_LINE))
        nrows2 = 0
    data = np.genfromtxt(filename, skip_header=TEXT_HDR_ROWS,
                         max_rows=nrows, filling_values=np.nan)
    data = data.flatten()
    if nrows2:
        skip_header = TEXT_HDR_ROWS + nrows
        data2 = np.genfromtxt(filename, skip_header=skip_header,
                              max_rows=nrows2, filling_values=np.nan)
        data = np.hstack((data, data2))
        nrows += nrows2

    # apply the correction factor we're given in the header
    data *= calib

    # fill out the rest of the standard dictionary
    standard['horizontal_orientation'] = np.nan
    standard['instrument_period'] = np.nan
    standard['instrument_damping'] = np.nan
    standard['process_time'] = ''
    standard['process_level'] = PROCESS_LEVELS['V1']
    standard['sensor_serial_number'] = ''
    standard['instrument'] = ''
    standard['comments'] = ''
    standard['structure_type'] = ''
    if dir_string in ['1', '2', '3']:
        standard['structure_type'] = 'borehole'

    standard['corner_frequency'] = np.nan
    standard['units'] = 'acc'
    standard['source'] = SRC
    standard['source_format'] = 'knet'

    hdr['coordinates'] = coordinates
    hdr['standard'] = standard

    # create a Trace from the data and metadata
    trace = StationTrace(data.copy(), Stats(hdr.copy()))
    response = {'input_units': 'counts', 'output_units': 'cm/s^2'}
    trace.setProvenance('remove_response', response)

    stream = StationStream(traces=[trace])
    return [stream]
コード例 #31
0
def read_esm(filename):
    """Read European ESM strong motion file.

    Args:
        filename (str): Path to possible ESM data file.
        kwargs (ref): Other arguments will be ignored.
    Returns:
        Stream: Obspy Stream containing one channels of acceleration data
            (cm/s**2).
    """
    logging.debug("Starting read_esm.")
    if not is_esm(filename):
        raise Exception('%s is not a valid ESM file' % filename)

    # Parse the header portion of the file
    header = {}
    with open(filename, 'rt') as f:
        lines = [next(f) for x in range(TEXT_HDR_ROWS)]

    for line in lines:
        parts = line.split(':')
        key = parts[0].strip()
        value = ':'.join(parts[1:]).strip()
        header[key] = value

    stats = {}
    standard = {}
    format_specific = {}
    coordinates = {}

    # fill in all known stats header fields
    stats['network'] = header['NETWORK']
    stats['station'] = header['STATION_CODE']
    stats['channel'] = header['STREAM']
    stats['location'] = '--'
    stats['delta'] = float(header['SAMPLING_INTERVAL_S'])
    stats['sampling_rate'] = 1 / stats['delta']
    stats['calib'] = 1.0
    stats['npts'] = int(header['NDATA'])
    stimestr = header['DATE_TIME_FIRST_SAMPLE_YYYYMMDD_HHMMSS']
    stats['starttime'] = datetime.strptime(stimestr, TIMEFMT)

    # fill in standard fields
    head, tail = os.path.split(filename)
    standard['source_file'] = tail or os.path.basename(head)
    standard['source'] = SRC
    standard['source_format'] = FORMAT
    standard['horizontal_orientation'] = np.nan
    standard['station_name'] = header['STATION_NAME']
    try:
        standard['instrument_period'] = 1 / \
            float(header['INSTRUMENTAL_FREQUENCY_HZ'])
    except ValueError:
        standard['instrument_period'] = np.nan
    try:
        standard['instrument_damping'] = 1 / \
            float(header['INSTRUMENTAL_DAMPING'])
    except ValueError:
        standard['instrument_damping'] = np.nan

    ptimestr = header['DATA_TIMESTAMP_YYYYMMDD_HHMMSS']
    ptime = datetime.strptime(ptimestr, TIMEFMT).strftime(TIMEFMT2)
    standard['process_time'] = ptime
    standard['process_level'] = PROCESS_LEVELS['V1']
    instr_str = header['INSTRUMENT']
    parts = instr_str.split('|')
    sensor_str = parts[0].split('=')[1].strip()
    standard['sensor_serial_number'] = ''
    standard['instrument'] = sensor_str
    standard['comments'] = ''
    standard['structure_type'] = ''
    standard['units'] = 'cm/s^2'
    standard['units_type'] = 'acc'
    standard['instrument_sensitivity'] = np.nan
    standard['corner_frequency'] = np.nan

    coordinates['latitude'] = float(header['STATION_LATITUDE_DEGREE'])
    coordinates['longitude'] = float(header['STATION_LONGITUDE_DEGREE'])
    coordinates['elevation'] = float(header['STATION_ELEVATION_M'])

    # read in the data
    data = np.genfromtxt(filename, skip_header=TEXT_HDR_ROWS)

    # create a Trace from the data and metadata
    stats['standard'] = standard
    stats['coordinates'] = coordinates
    trace = StationTrace(data.copy(), Stats(stats.copy()))
    response = {'input_units': 'counts', 'output_units': 'cm/s^2'}
    trace.setProvenance('remove_response', response)
    ftype = header['FILTER_TYPE'].capitalize()
    forder = int(header['FILTER_ORDER'])
    lowfreq = float(header['LOW_CUT_FREQUENCY_HZ'])
    highfreq = float(header['LOW_CUT_FREQUENCY_HZ'])
    filter_att = {'bandpass_filter':
                  {'filter_type': ftype,
                   'lower_corner_frequency': lowfreq,
                   'higher_corner_frequency': highfreq,
                   'filter_order': forder}}
    trace.setProvenance('lowpass_filter', filter_att)
    detrend_att = {'detrend': {'detrending_method': 'baseline'}}
    trace.setProvenance('detrend', detrend_att)
    stream = StationStream(traces=[trace])
    return [stream]
コード例 #32
0
def read_dmg(filename, **kwargs):
    """Read DMG strong motion file.

    Notes:
        CSMIP is synonymous to as DMG in this reader.

    Args:
        filename (str): Path to possible DMG data file.
        kwargs (ref):
            units (str): String determining which timeseries is return. Valid
                    options include 'acc', 'vel', 'disp'. Default is 'acc'.
            Other arguments will be ignored.

    Returns:
        Stream: Obspy Stream containing three channels of acceleration data
        (cm/s**2).
    """
    logging.debug("Starting read_dmg.")
    if not is_dmg(filename):
        raise Exception('%s is not a valid DMG strong motion data file.' %
                        filename)

    # Check for units and location
    units = kwargs.get('units', 'acc')
    location = kwargs.get('location', '')

    if units not in UNITS:
        raise Exception('DMG: Not a valid choice of units.')

    # Check for DMG format and determine volume type
    line = open(filename, 'rt').readline()
    if is_dmg(filename):
        if line.lower().find('uncorrected') >= 0:
            reader = 'V1'
        elif line.lower().find('corrected') >= 0:
            reader = 'V2'
        elif line.lower().find('response') >= 0:
            reader = 'V3'

    # Count the number of lines in the file
    with open(filename) as f:
        line_count = sum(1 for _ in f)

    # Read as many channels as are present in the file
    line_offset = 0
    trace_list = []
    while line_offset < line_count:
        if reader == 'V2':
            traces, line_offset = _read_volume_two(filename,
                                                   line_offset,
                                                   location=location,
                                                   units=units)
            if traces is not None:
                trace_list += traces
        elif reader == 'V1':
            traces, line_offset = _read_volume_one(filename,
                                                   line_offset,
                                                   location=location,
                                                   units=units)
            if traces is not None:
                trace_list += traces
        else:
            raise GMProcessException('DMG: Not a supported volume.')

    stream = StationStream([])
    for trace in trace_list:
        # For our purposes, we only want acceleration, so lets only return
        # that; we may need to change this later if others start using this
        # code and want to read in the other data.
        if trace.stats['standard']['units'] == units:
            stream.append(trace)
    return [stream]
コード例 #33
0
    def getStreams(self, eventid, stations=None, labels=None):
        """Get Stream from ASDF file given event id and input tags.

        Args:
            eventid (str):
                Event ID corresponding to an Event in the workspace.
            stations (list):
                List of stations to search for.
            labels (list):
                List of processing labels to search for.

        Returns:
            StreamCollection: Object containing list of organized
            StationStreams.
        """
        trace_auxholder = []
        stream_auxholder = []
        if 'TraceProcessingParameters' in self.dataset.auxiliary_data:
            trace_auxholder = self.dataset.auxiliary_data.TraceProcessingParameters
        if 'StreamProcessingParameters' in self.dataset.auxiliary_data:
            stream_auxholder = self.dataset.auxiliary_data.StreamProcessingParameters
        streams = []
        all_tags = []

        if stations is None:
            stations = self.getStations(eventid)
        if labels is None:
            labels = self.getLabels()
        for station in stations:
            for label in labels:
                all_tags.append('%s_%s' % (station.lower(), label))

        for waveform in self.dataset.waveforms:
            ttags = waveform.get_waveform_tags()
            wtags = []
            if not len(all_tags):
                wtags = ttags
            else:
                wtags = list(set(all_tags).intersection(set(ttags)))
            for tag in wtags:
                if eventid in waveform[tag][0].stats.asdf.event_ids:
                    tstream = waveform[tag].copy()
                    inventory = waveform['StationXML']
                    traces = []
                    for ttrace in tstream:
                        trace = StationTrace(data=ttrace.data,
                                             header=ttrace.stats,
                                             inventory=inventory)
                        tpl = (trace.stats.network.lower(),
                               trace.stats.station.lower(),
                               trace.stats.channel.lower())
                        channel = '%s_%s_%s' % tpl
                        channel_tag = '%s_%s' % (tag, channel)
                        if channel_tag in self.dataset.provenance.list():
                            provdoc = self.dataset.provenance[channel_tag]
                            trace.setProvenanceDocument(provdoc)
                        trace_path = '%s_%s_%s' % (eventid,
                                                   tag,
                                                   trace.stats.channel)
                        if trace_path in trace_auxholder:
                            bytelist = trace_auxholder[trace_path].data[:].tolist(
                            )
                            jsonstr = ''.join([chr(b) for b in bytelist])
                            jdict = json.loads(jsonstr)
                            # jdict = unstringify_dict(jdict)
                            for key, value in jdict.items():
                                trace.setParameter(key, value)

                        traces.append(trace)
                    stream = StationStream(traces=traces)
                    stream.tag = tag  # testing this out

                    # look for stream-based metadata
                    stream_path = '%s_%s' % (eventid, tag)
                    if stream_path in stream_auxholder:
                        bytelist = stream_auxholder[stream_path].data[:].tolist(
                        )
                        jsonstr = ''.join([chr(b) for b in bytelist])
                        jdict = json.loads(jsonstr)
                        # jdict = unstringify_dict(jdict)
                        for key, value in jdict.items():
                            stream.setStreamParam(key, value)

                    streams.append(stream)
        streams = StreamCollection(streams)
        return streams
コード例 #34
0
    def __handle_duplicates(self, max_dist_tolerance, process_level_preference,
                            format_preference):
        """
        Removes duplicate data from the StreamCollection, based on the
        process level and format preferences.

        Args:
            max_dist_tolerance (float):
                Maximum distance tolerance for determining whether two streams
                are at the same location (in meters).
            process_level_preference (list):
                A list containing 'V0', 'V1', 'V2', with the order determining
                which process level is the most preferred (most preferred goes
                first in the list).
            format_preference (list):
                A list continaing strings of the file source formats (found
                in gmprocess.io). Does not need to list all of the formats.
                Example: ['cosmos', 'dmg'] indicates that cosmos files are
                preferred over dmg files.
        """

        # If arguments are None, check the config
        # If not in the config, use the default values at top of the file
        preferences = {
            'max_dist_tolerance': max_dist_tolerance,
            'process_level_preference': process_level_preference,
            'format_preference': format_preference
        }
        default_config = None
        for key, val in preferences.items():
            if val is None:
                if default_config is None:
                    default_config = get_config()
                preferences[key] = default_config['duplicate'][key]

        stream_params = gather_stream_parameters(self.streams)

        traces = []
        for st in self.streams:
            for tr in st:
                traces.append(tr)
        preferred_traces = []

        for tr_to_add in traces:
            is_duplicate = False
            for tr_pref in preferred_traces:
                if are_duplicates(tr_to_add, tr_pref,
                                  preferences['max_dist_tolerance']):
                    is_duplicate = True
                    break

            if is_duplicate:
                if choose_preferred(
                        tr_to_add, tr_pref,
                        preferences['process_level_preference'],
                        preferences['format_preference']) == tr_to_add:
                    preferred_traces.remove(tr_pref)
                    logging.info(
                        'Trace %s (%s) is a duplicate and '
                        'has been removed from the StreamCollection.' %
                        (tr_pref.id, tr_pref.stats.standard.source_file))
                    preferred_traces.append(tr_to_add)
                else:
                    logging.info(
                        'Trace %s (%s) is a duplicate and '
                        'has been removed from the StreamCollection.' %
                        (tr_to_add.id, tr_to_add.stats.standard.source_file))

            else:
                preferred_traces.append(tr_to_add)

        streams = [StationStream([tr]) for tr in preferred_traces]
        streams = insert_stream_parameters(streams, stream_params)
        self.streams = streams
コード例 #35
0
def test_rotd():
    ddir = os.path.join('data', 'testdata', 'process')
    datadir = pkg_resources.resource_filename('gmprocess', ddir)
    # Create a stream and station summary, convert from m/s^2 to cm/s^2 (GAL)
    osc1_data = np.genfromtxt(datadir + '/ALCTENE.UW..sac.acc.final.txt')
    osc2_data = np.genfromtxt(datadir + '/ALCTENN.UW..sac.acc.final.txt')
    osc1_data = osc1_data.T[1] * 100
    osc2_data = osc2_data.T[1] * 100
    tr1 = StationTrace(data=osc1_data, header={
        'channel': 'HN1', 'delta': 0.01,
        'npts': 24001,
        'standard': {
            'corner_frequency': np.nan,
            'station_name': '',
            'source': 'json',
            'instrument': '',
            'instrument_period': np.nan,
            'source_format': 'json',
            'comments': '',
            'source_file': '',
            'structure_type': '',
            'horizontal_orientation': np.nan,
            'vertical_orientation': np.nan,
            'sensor_serial_number': '',
            'process_level': 'corrected physical units',
            'process_time': '',
            'units': 'acc',
            'units_type': 'acc',
            'instrument_sensitivity': np.nan,
            'instrument_damping': np.nan
        }
    })
    tr2 = StationTrace(data=osc2_data, header={
        'channel': 'HN2', 'delta': 0.01,
        'npts': 24001, 'standard': {
            'corner_frequency': np.nan,
            'station_name': '',
            'source': 'json',
            'instrument': '',
            'instrument_period': np.nan,
            'source_format': 'json',
            'comments': '',
            'structure_type': '',
            'source_file': '',
            'horizontal_orientation': np.nan,
            'vertical_orientation': np.nan,
            'sensor_serial_number': '',
            'process_level': 'corrected physical units',
            'process_time': '',
            'units': 'acc',
            'units_type': 'acc',
            'instrument_sensitivity': np.nan,
            'instrument_damping': np.nan
        }
    })
    st = StationStream([tr1, tr2])

    for tr in st:
        response = {'input_units': 'counts', 'output_units': 'cm/s^2'}
        tr.setProvenance('remove_response', response)

    target_pga50 = 4.12528265306
    target_sa1050 = 10.7362857143
    target_pgv50 = 6.239364
    target_sa0350 = 10.1434159021
    target_sa3050 = 1.12614169215
    station = StationSummary.from_stream(
        st, ['rotd50'],
        ['pga', 'pgv', 'sa0.3', 'sa1.0', 'sa3.0']
    )

    pgms = station.pgms
    pga = pgms.loc['PGA', 'ROTD(50.0)'].Result
    pgv = pgms.loc['PGV', 'ROTD(50.0)'].Result
    SA10 = pgms.loc['SA(1.000)', 'ROTD(50.0)'].Result
    SA03 = pgms.loc['SA(0.300)', 'ROTD(50.0)'].Result
    SA30 = pgms.loc['SA(3.000)', 'ROTD(50.0)'].Result
    np.testing.assert_allclose(pga, target_pga50, atol=0.1)
    np.testing.assert_allclose(SA10, target_sa1050, atol=0.1)
    np.testing.assert_allclose(pgv, target_pgv50, atol=0.1)
    np.testing.assert_allclose(SA03, target_sa0350, atol=0.1)
    np.testing.assert_allclose(SA30, target_sa3050, atol=0.1)
コード例 #36
0
def read_smc(filename, **kwargs):
    """Read SMC strong motion file.

    Args:
        filename (str): Path to possible SMC data file.
        kwargs (ref):
            any_structure (bool): Read data from any type of structure,
                raise Exception if False and structure type is not free-field.
            accept_flagged (bool): accept problem flagged data.
            set_location (str): Two character code for location.
            Other arguments will be ignored.
    Returns:
        Stream: Obspy Stream containing one channel of acceleration data
        (cm/s**2).
    """
    logging.debug("Starting read_smc.")
    any_structure = kwargs.get('any_structure', False)
    accept_flagged = kwargs.get('accept_flagged', False)
    location = kwargs.get('location', '')

    if not is_smc(filename):
        raise Exception('%s is not a valid SMC file' % filename)

    with open(filename, 'rt') as f:
        line = f.readline().strip()
        if 'DISPLACEMENT' in line:
            raise GMProcessException(
                'SMC: Diplacement records are not supported: '
                '%s.' % filename)
        elif 'VELOCITY' in line:
            raise GMProcessException(
                'SMC: Velocity records are not supported: '
                '%s.' % filename)
        elif line == "*":
            raise GMProcessException(
                'SMC: No record volume specified in file: '
                '%s.' % filename)

    stats, num_comments = _get_header_info(filename,
                                           any_structure=any_structure,
                                           accept_flagged=accept_flagged,
                                           location=location)

    skip = ASCII_HEADER_LINES + INTEGER_HEADER_LINES + \
        num_comments + FLOAT_HEADER_LINES

    # read float data (8 columns per line)
    nrows = int(np.floor(stats['npts'] / DATA_COLUMNS))
    data = np.genfromtxt(filename,
                         max_rows=nrows,
                         skip_header=skip,
                         delimiter=FLOAT_DATA_WIDTHS)
    data = data.flatten()
    if stats['npts'] % DATA_COLUMNS:
        lastrow = np.genfromtxt(filename,
                                max_rows=1,
                                skip_header=skip + nrows,
                                delimiter=FLOAT_DATA_WIDTHS)
        data = np.append(data, lastrow)
    data = data[0:stats['npts']]
    trace = StationTrace(data, header=stats)

    response = {'input_units': 'counts', 'output_units': 'cm/s^2'}
    trace.setProvenance('remove_response', response)

    stream = StationStream(traces=[trace])
    return [stream]