예제 #1
0
파일: lemi424.py 프로젝트: kujaku11/mth5
    def to_run_ts(self, fn=None, e_channels=["e1", "e2"]):
        """
        Return a RunTS object from the data

        :param fn: DESCRIPTION, defaults to None
        :type fn: TYPE, optional
        :return: DESCRIPTION
        :rtype: TYPE

        """
        ch_list = []
        for comp in (["bx", "by", "bz"] + e_channels +
                     ["temperature_e", "temperature_h"]):
            if comp[0] in ["h", "b"]:
                ch = ChannelTS("magnetic")
            elif comp[0] in ["e"]:
                ch = ChannelTS("electric")
            else:
                ch = ChannelTS("auxiliary")

            ch.sample_rate = self.sample_rate
            ch.start = self.start
            ch.ts = self._df[comp].values
            ch.component = comp
            ch_list.append(ch)

        return RunTS(
            array_list=ch_list,
            station_metadata=self.station_metadata,
            run_metadata=self.run_metadata,
        )
예제 #2
0
파일: miniseed.py 프로젝트: kujaku11/mth5
def read_miniseed(fn):
    """
    Read a miniseed file into a :class:`mth5.timeseries.RunTS` object

    :param fn: full path to the miniseed file
    :type fn: string
    :return:
    :rtype: TYPE

    """

    # obspy does not use Path objects for file names
    if isinstance(fn, Path):
        fn = fn.as_posix()
    obs_stream = obspy_read(fn)
    run_obj = RunTS()
    run_obj.from_obspy_stream(obs_stream)

    return run_obj
예제 #3
0
    def test_from_run_ts(self):
        ts_list = []
        for comp in ["ex", "ey", "hx", "hy", "hz"]:
            if comp[0] in ["e"]:
                ch_type = "electric"
            elif comp[1] in ["h", "b"]:
                ch_type = "magnetic"
            else:
                ch_type = "auxiliary"
            meta_dict = {
                ch_type: {
                    "component": comp,
                    "dipole_length": 49.0,
                    "measurement_azimuth": 12.0,
                    "type": ch_type,
                    "units": "counts",
                    "time_period.start": "2020-01-01T12:00:00",
                    "sample_rate": 1,
                }
            }
            channel_ts = ChannelTS(ch_type,
                                   data=np.random.rand(4096),
                                   channel_metadata=meta_dict)
            ts_list.append(channel_ts)
        run_ts = RunTS(ts_list, {"id": "MT002a"})

        station = self.mth5_obj.add_station("MT002", survey="test")
        run = station.add_run("MT002a")
        channel_groups = run.from_runts(run_ts)

        self.assertListEqual(["ex", "ey", "hx", "hy", "hz"], run.groups_list)

        # check to make sure the metadata was transfered
        for cg in channel_groups:
            with self.subTest(name=cg.metadata.component):
                self.assertEqual(MTime("2020-01-01T12:00:00"), cg.start)
                self.assertEqual(1, cg.sample_rate)
                self.assertEqual(4096, cg.n_samples)
        # slicing

        with self.subTest("get slice"):
            r_slice = run.to_runts(start="2020-01-01T12:00:00", n_samples=256)

            self.assertEqual(r_slice.end, "2020-01-01T12:04:16+00:00")
예제 #4
0
streams = client.get_waveforms(network, station, None, None, start, end)

# get the metadata
inventory = client.get_stations(
    start, end, network=network, station=station, level="channel"
)
# translate obspy.core.Inventory to an mt_metadata.timeseries.Experiment
translator = XMLInventoryMTExperiment()
experiment = translator.xml_to_mt(inventory)

# initiate MTH5 file
m = MTH5()
m.open_mth5(r"from_iris_dmc.h5", "w")

# fill metadata
m.from_experiment(experiment)
station_group = m.get_station(station)

# runs can be split into channels with similar start times and sample rates
start_times = sorted(list(set([tr.stats.starttime.isoformat() for tr in streams])))
end_times = sorted(list(set([tr.stats.endtime.isoformat() for tr in streams])))

for index, times in enumerate(zip(start_times, end_times), 1):
    run_stream = streams.slice(UTCDateTime(times[0]), UTCDateTime(times[1]))
    run_ts_obj = RunTS()
    run_ts_obj.from_obspy_stream(run_stream)
    run_group = station_group.add_run(f"{index:03}")
    run_group.from_runts(run_ts_obj)

m.close_mth5()
예제 #5
0
파일: make_mth5.py 프로젝트: kujaku11/mth5
    def make_mth5_from_fdsnclient(self, df, path=None, client=None, interact=False):
        """
        Make an MTH5 file from an FDSN data center

        :param df: DataFrame with columns

            - 'network'   --> FDSN Network code
            - 'station'   --> FDSN Station code
            - 'location'  --> FDSN Location code
            - 'channel'   --> FDSN Channel code
            - 'start'     --> Start time YYYY-MM-DDThh:mm:ss
            - 'end'       --> End time YYYY-MM-DDThh:mm:ss

        :type df: :class:`pandas.DataFrame`
        :param path: Path to save MTH5 file to, defaults to None
        :type path: string or :class:`pathlib.Path`, optional
        :param client: FDSN client name, defaults to "IRIS"
        :type client: string, optional
        :raises AttributeError: If the input DataFrame is not properly
        formatted an Attribute Error will be raised.
        :raises ValueError: If the values of the DataFrame are not correct a
        ValueError will be raised.
        :return: MTH5 file name
        :rtype: :class:`pathlib.Path`


        .. seealso:: https://docs.obspy.org/packages/obspy.clients.fdsn.html#id1

        .. note:: If any of the column values are blank, then any value will
        searched for.  For example if you leave 'station' blank, any station
        within the given start and end time will be returned.



        """
        if path is None:
            path = Path().cwd()
        else:
            path = Path(path)

        if client is not None:
            self.client = client

        df = self._validate_dataframe(df)

        unique_list = self.get_unique_networks_and_stations(df)
        if self.mth5_version in ["0.1.0"]:
            if len(unique_list) != 1:
                raise AttributeError("MTH5 supports one survey/network per container.")

        file_name = path.joinpath(self.make_filename(df))

        # initiate MTH5 file
        m = MTH5(file_version=self.mth5_version)
        m.open_mth5(file_name, "w")

        # read in inventory and streams
        inv, streams = self.get_inventory_from_df(df, self.client)
        # translate obspy.core.Inventory to an mt_metadata.timeseries.Experiment
        translator = XMLInventoryMTExperiment()
        experiment = translator.xml_to_mt(inv)

        # Updates expriment information based on time extent of streams
        # rather than time extent of inventory
        # experiment = translator.drop_runs(m, streams)

        m.from_experiment(experiment)
        if self.mth5_version in ["0.1.0"]:
            for station_id in unique_list[0]["stations"]:
                # get the streams for the given station
                msstreams = streams.select(station=station_id)
                trace_start_times = sorted(
                    list(set([tr.stats.starttime.isoformat() for tr in msstreams]))
                )
                trace_end_times = sorted(
                    list(set([tr.stats.endtime.isoformat() for tr in msstreams]))
                )
                if len(trace_start_times) != len(trace_end_times):
                    raise ValueError(
                        f"Do not have the same number of start {len(trace_start_times)}"
                        f" and end times {len(trace_end_times)} from streams"
                    )
                run_list = m.get_station(station_id).groups_list

                n_times = len(trace_start_times)

                # adding logic if there are already runs filled in
                if len(run_list) == n_times:
                    for run_id, start, end in zip(
                        run_list, trace_start_times, trace_end_times
                    ):
                        # add the group first this will get the already filled in
                        # metadata to update the run_ts_obj.
                        run_group = m.stations_group.get_station(station_id).add_run(
                            run_id
                        )
                        # then get the streams an add existing metadata
                        run_stream = msstreams.slice(
                            UTCDateTime(start), UTCDateTime(end)
                        )
                        run_ts_obj = RunTS()
                        run_ts_obj.from_obspy_stream(run_stream, run_group.metadata)
                        run_group.from_runts(run_ts_obj)

                # if there is just one run
                elif len(run_list) == 1:
                    if n_times > 1:
                        for run_id, times in enumerate(
                            zip(trace_start_times, trace_end_times), 1
                        ):
                            run_group = m.stations_group.get_station(
                                station_id
                            ).add_run(f"{run_id:03}")
                            run_stream = msstreams.slice(
                                UTCDateTime(times[0]), UTCDateTime(times[1])
                            )
                            run_ts_obj = RunTS()
                            run_ts_obj.from_obspy_stream(run_stream, run_group.metadata)
                            run_group.from_runts(run_ts_obj)

                    elif n_times == 1:
                        run_group = m.stations_group.get_station(station_id).add_run(
                            run_list[0]
                        )
                        run_stream = msstreams.slice(
                            UTCDateTime(times[0]), UTCDateTime(times[1])
                        )
                        run_ts_obj = RunTS()
                        run_ts_obj.from_obspy_stream(run_stream, run_group.metadata)
                        run_group.from_runts(run_ts_obj)
                elif len(run_list) != n_times:
                    print(
                        "More or less runs have been requested by the user "
                        + "than are defined in the metadata. Runs will be "
                        + "defined but only the requested run extents contain "
                        + "time series data "
                        + "based on the users request."
                    )
                    for run_id, start, end in zip(
                        run_list, trace_start_times, trace_end_times
                    ):

                        # add the group first this will get the already filled in
                        # metadata
                        for run in run_list:
                            run_group = m.stations_group.get_station(
                                station_id
                            ).get_run(run)
                            # Chekcs for start and end times of runs
                            run_start = run_group.metadata.time_period.start
                            run_end = run_group.metadata.time_period.end
                            # Create if statment that checks for start and end
                            # times in the run.
                            # Compares start and end times of runs
                            # to start and end times of traces. Packs runs based on
                            # time spans
                            if UTCDateTime(start) >= UTCDateTime(
                                run_start
                            ) and UTCDateTime(end) <= UTCDateTime(run_end):
                                run_stream = msstreams.slice(
                                    UTCDateTime(start), UTCDateTime(end)
                                )
                                run_ts_obj = RunTS()
                                run_ts_obj.from_obspy_stream(
                                    run_stream, run_group.metadata
                                )
                                run_group.from_runts(run_ts_obj)
                            else:
                                continue
                else:
                    raise ValueError("Cannot add Run for some reason.")

        # Version 0.2.0 has the ability to store multiple surveys
        elif self.mth5_version in ["0.2.0"]:
            # mt_metadata translates the mt survey id into the survey id
            # if it is provided which will be different from the fdsn network
            # id, so we need to map the fdsn networks onto the survey id.
            survey_map = dict([(s.fdsn.network, s.id) for s in experiment.surveys])

            for survey_dict in unique_list:
                # get the mt survey id that maps to the fdsn network
                fdsn_network = survey_dict["network"]
                survey_id = survey_map[fdsn_network]

                survey_group = m.get_survey(survey_id)
                for station_id in survey_dict["stations"]:
                    # get the streams for the given station
                    msstreams = streams.select(station=station_id)
                    trace_start_times = sorted(
                        list(set([tr.stats.starttime.isoformat() for tr in msstreams]))
                    )
                    trace_end_times = sorted(
                        list(set([tr.stats.endtime.isoformat() for tr in msstreams]))
                    )
                    if len(trace_start_times) != len(trace_end_times):
                        raise ValueError(
                            f"Do not have the same number of start {len(trace_start_times)}"
                            f" and end times {len(trace_end_times)} from streams"
                        )
                    run_list = m.get_station(station_id, survey_id).groups_list
                    run_list.remove("Transfer_Functions")

                    n_times = len(trace_start_times)

                    # adding logic if there are already runs filled in
                    if len(run_list) == n_times:
                        for run_id, start, end in zip(
                            run_list, trace_start_times, trace_end_times
                        ):
                            # add the group first this will get the already filled in
                            # metadata to update the run_ts_obj.
                            run_group = survey_group.stations_group.get_station(
                                station_id
                            ).add_run(run_id)

                            # then get the streams an add existing metadata
                            run_stream = msstreams.slice(
                                UTCDateTime(start), UTCDateTime(end)
                            )
                            run_ts_obj = RunTS()
                            run_ts_obj.from_obspy_stream(run_stream, run_group.metadata)
                            run_group.from_runts(run_ts_obj)

                    # if there is just one run
                    elif len(run_list) == 1:
                        if n_times > 1:
                            for run_id, times in enumerate(
                                zip(trace_start_times, trace_end_times), 1
                            ):
                                run_group = survey_group.stations_group.get_station(
                                    station_id
                                ).add_run(f"{run_id:03}")
                                run_stream = msstreams.slice(
                                    UTCDateTime(times[0]), UTCDateTime(times[1])
                                )
                                run_ts_obj = RunTS()
                                run_ts_obj.from_obspy_stream(
                                    run_stream, run_group.metadata
                                )
                                run_group.from_runts(run_ts_obj)

                        elif n_times == 1:
                            run_group = survey_group.stations_group.get_station(
                                station_id
                            ).add_run(run_list[0])
                            run_stream = msstreams.slice(
                                UTCDateTime(times[0]), UTCDateTime(times[1])
                            )
                            run_ts_obj = RunTS()
                            run_ts_obj.from_obspy_stream(run_stream, run_group.metadata)
                            run_group.from_runts(run_ts_obj)
                    else:
                        raise ValueError("Cannot add Run for some reason.")

        if not interact:
            m.close_mth5()

            return file_name
        if interact:
            return m
예제 #6
0
파일: test_runts.py 프로젝트: kujaku11/mth5
    def setUp(self):
        self.run = RunTS()
        self.maxDiff = None
        self.start = "2015-01-08T19:49:18+00:00"
        self.end = "2015-01-08T19:57:49.875000"
        self.sample_rate = 8
        self.npts = 4096

        self.ex = ChannelTS(
            "electric",
            data=np.random.rand(self.npts),
            channel_metadata={
                "electric": {
                    "component": "Ex",
                    "sample_rate": self.sample_rate,
                    "time_period.start": self.start,
                }
            },
        )
        self.ey = ChannelTS(
            "electric",
            data=np.random.rand(self.npts),
            channel_metadata={
                "electric": {
                    "component": "Ey",
                    "sample_rate": self.sample_rate,
                    "time_period.start": self.start,
                }
            },
        )
        self.hx = ChannelTS(
            "magnetic",
            data=np.random.rand(self.npts),
            channel_metadata={
                "magnetic": {
                    "component": "hx",
                    "sample_rate": self.sample_rate,
                    "time_period.start": self.start,
                }
            },
        )
        self.hy = ChannelTS(
            "magnetic",
            data=np.random.rand(self.npts),
            channel_metadata={
                "magnetic": {
                    "component": "hy",
                    "sample_rate": self.sample_rate,
                    "time_period.start": self.start,
                }
            },
        )
        self.hz = ChannelTS(
            "magnetic",
            data=np.random.rand(self.npts),
            channel_metadata={
                "magnetic": {
                    "component": "hz",
                    "sample_rate": self.sample_rate,
                    "time_period.start": self.start,
                }
            },
        )

        self.run.set_dataset([self.ex, self.ey, self.hx, self.hy, self.hz])
예제 #7
0
파일: test_runts.py 프로젝트: kujaku11/mth5
class TestRunTS(unittest.TestCase):
    def setUp(self):
        self.run = RunTS()
        self.maxDiff = None
        self.start = "2015-01-08T19:49:18+00:00"
        self.end = "2015-01-08T19:57:49.875000"
        self.sample_rate = 8
        self.npts = 4096

        self.ex = ChannelTS(
            "electric",
            data=np.random.rand(self.npts),
            channel_metadata={
                "electric": {
                    "component": "Ex",
                    "sample_rate": self.sample_rate,
                    "time_period.start": self.start,
                }
            },
        )
        self.ey = ChannelTS(
            "electric",
            data=np.random.rand(self.npts),
            channel_metadata={
                "electric": {
                    "component": "Ey",
                    "sample_rate": self.sample_rate,
                    "time_period.start": self.start,
                }
            },
        )
        self.hx = ChannelTS(
            "magnetic",
            data=np.random.rand(self.npts),
            channel_metadata={
                "magnetic": {
                    "component": "hx",
                    "sample_rate": self.sample_rate,
                    "time_period.start": self.start,
                }
            },
        )
        self.hy = ChannelTS(
            "magnetic",
            data=np.random.rand(self.npts),
            channel_metadata={
                "magnetic": {
                    "component": "hy",
                    "sample_rate": self.sample_rate,
                    "time_period.start": self.start,
                }
            },
        )
        self.hz = ChannelTS(
            "magnetic",
            data=np.random.rand(self.npts),
            channel_metadata={
                "magnetic": {
                    "component": "hz",
                    "sample_rate": self.sample_rate,
                    "time_period.start": self.start,
                }
            },
        )

        self.run.set_dataset([self.ex, self.ey, self.hx, self.hy, self.hz])

    def test_initialize(self):

        with self.subTest("channels"):
            self.assertListEqual(["ex", "ey", "hx", "hy", "hz"],
                                 self.run.channels)
        with self.subTest("sample rate"):
            self.assertEqual(self.run.sample_rate, self.sample_rate)
        with self.subTest("start"):
            self.assertEqual(self.run.start, MTime(self.start))
        with self.subTest("end"):
            self.assertEqual(self.run.end, MTime(self.end))

    def test_sr_fail(self):
        self.hz = ChannelTS(
            "magnetic",
            data=np.random.rand(self.npts),
            channel_metadata={
                "magnetic": {
                    "component": "hz",
                    "sample_rate": 1,
                    "time_period.start": self.start,
                }
            },
        )

        self.assertRaises(
            MTTSError,
            self.run.set_dataset,
            [self.ex, self.ey, self.hx, self.hy, self.hz],
        )

    def test_channels(self):

        for comp in ["ex", "ey", "hx", "hy", "hz"]:
            ch = getattr(self, comp)

            with self.subTest("isinstance channel"):
                self.assertIsInstance(ch, ChannelTS)
            with self.subTest("sample rate"):
                self.assertEqual(ch.sample_rate, self.sample_rate)
            with self.subTest("start"):
                self.assertEqual(ch.start, MTime(self.start))
            with self.subTest("end"):
                self.assertEqual(ch.end, MTime(self.end))
            with self.subTest("component"):
                self.assertEqual(ch.component, comp)

    def test_get_channel_fail(self):
        """
        self.run.temperature should return None, because 'temperature' is not in self.channels
        :return:
        """

        self.assertRaises(NameError, getattr, *(self.run, "temperature"))

    def test_wrong_metadata(self):
        self.run.run_metadata.sample_rate = 10
        self.run.validate_metadata()

        with self.subTest("sample rate"):
            self.assertEqual(self.ex.sample_rate,
                             self.run.run_metadata.sample_rate)
        with self.subTest("start"):
            self.run.run_metadata.start = "2020-01-01T00:00:00"
            self.run.validate_metadata()
            self.assertEqual(self.run.start,
                             self.run.run_metadata.time_period.start)
        with self.subTest("end"):
            self.run.run_metadata.end = "2020-01-01T00:00:00"
            self.run.validate_metadata()
            self.assertEqual(self.run.end,
                             self.run.run_metadata.time_period.end)

    def test_get_slice(self):

        start = "2015-01-08T19:49:30+00:00"
        npts = 256

        r_slice = self.run.get_slice(start, n_samples=npts)

        with self.subTest("isinstance runts"):
            self.assertIsInstance(r_slice, RunTS)
        with self.subTest("sample rate"):
            self.assertEqual(r_slice.sample_rate, self.sample_rate)
        with self.subTest("start"):
            self.assertEqual(r_slice.start, MTime(start))