def setUp(self): self.make_mth5 = MakeMTH5(mth5_version="0.2.0") self.make_mth5.client = "IRIS" channels = ["LFE", "LFN", "LFZ", "LQE", "LQN"] CAS04 = ["8P", "CAS04", "2020-06-02T18:00:00", "2020-07-13T19:00:00"] NVR08 = ["8P", "NVR08", "2020-06-02T18:00:00", "2020-07-13T19:00:00"] request_list = [] for entry in [CAS04, NVR08]: for channel in channels: request_list.append( [entry[0], entry[1], "", channel, entry[2], entry[3]]) self.logger = setup_logger("test_make_mth5_v2") self.csv_fn = Path().cwd().joinpath("test_inventory.csv") self.mth5_path = Path().cwd() self.stations = ["CAS04", "NVR08"] self.channels = ["LQE", "LQN", "LFE", "LFN", "LFZ"] # Turn list into dataframe self.metadata_df = pd.DataFrame(request_list, columns=self.make_mth5.column_names) self.metadata_df.to_csv(self.csv_fn, index=False) self.metadata_df_fail = pd.DataFrame( request_list, columns=["net", "sta", "loc", "chn", "startdate", "enddate"])
def __init__(self, group, group_metadata=None, **kwargs): self.compression = None self.compression_opts = None self.shuffle = False self.fletcher32 = False self.logger = setup_logger(f"{__name__}.{self._class_name}") # make sure the reference to the group is weak so there are no lingering # references to a closed HDF5 file. if group is not None and isinstance(group, (h5py.Group, h5py.Dataset)): self.hdf5_group = weakref.ref(group)() # initialize metadata self._initialize_metadata() # if metadata, make sure that its the same class type if group_metadata is not None: self.metadata = group_metadata # write out metadata to make sure that its in the file. self.write_metadata() else: self.read_metadata() # if any other keywords for key, value in kwargs.items(): setattr(self, key, value)
def __init__(self, hdf5_dataset): self.logger = setup_logger(f"{__name__}.{self.__class__.__name__}") self.hdf5_reference = None if isinstance(hdf5_dataset, h5py.Dataset): self.array = weakref.ref(hdf5_dataset)() self.hdf5_reference = hdf5_dataset.ref else: msg = "Input must be a h5py.Dataset not {0}".format( type(hdf5_dataset)) self.logger.error(msg) raise MTH5TableError(msg)
def __init__(self, dataset, dataset_metadata=None, write_metadata=True, **kwargs): if dataset is not None and isinstance(dataset, (h5py.Dataset)): self.hdf5_dataset = weakref.ref(dataset)() self.logger = setup_logger(f"{__name__}.{self._class_name}") # set metadata to the appropriate class. Standards is not a # Base object so should be skipped. If the class name is not # defined yet set to Base class. self.metadata = StatisticalEstimate() if not hasattr(self.metadata, "mth5_type"): self._add_base_attributes() self.metadata.hdf5_reference = self.hdf5_dataset.ref self.metadata.mth5_type = self._class_name # if the input data set already has filled attributes, namely if the # channel data already exists then read them in with our writing back if "mth5_type" in list(self.hdf5_dataset.attrs.keys()): self.metadata.from_dict({ self.hdf5_dataset.attrs["mth5_type"]: self.hdf5_dataset.attrs }) # if metadata is input, make sure that its the same class type amd write # to the hdf5 dataset if dataset_metadata is not None: if not isinstance(dataset_metadata, type(self.metadata)): msg = "metadata must be type metadata.%s not %s" self.logger.error(msg, self._class_name, type(dataset_metadata)) raise MTH5Error(msg % self._class_name, type(dataset_metadata)) # load from dict because of the extra attributes for MTH5 self.metadata.from_dict(dataset_metadata.to_dict()) self.metadata.hdf5_reference = self.hdf5_dataset.ref self.metadata.mth5_type = self._class_name # write out metadata to make sure that its in the file. if write_metadata: self.write_metadata() # if the attrs don't have the proper metadata keys yet write them if not "mth5_type" in list(self.hdf5_dataset.attrs.keys()): self.write_metadata()
def __init__(self, array_list=None, run_metadata=None, station_metadata=None): self.logger = setup_logger(f"{__name__}.{self.__class__.__name__}") self.run_metadata = metadata.Run() self.station_metadata = metadata.Station() self._dataset = xr.Dataset() # load the arrays first this will write run and station metadata if array_list is not None: self.dataset = array_list # if the use inputs metadata, overwrite all values in the metadata element if run_metadata is not None: if isinstance(run_metadata, dict): # make sure the input dictionary has the correct form if "Run" not in list(run_metadata.keys()): run_metadata = {"Run": run_metadata} self.run_metadata.from_dict(run_metadata) elif isinstance(run_metadata, metadata.Run): self.run_metadata.from_dict(run_metadata.to_dict()) else: msg = ("Input metadata must be a dictionary or Run object, " f"not {type(run_metadata)}") self.logger.error(msg) raise MTTSError(msg) # add station metadata, this will be important when propogating a run if station_metadata is not None: if isinstance(station_metadata, metadata.Station): self.station_metadata.from_dict(station_metadata.to_dict()) elif isinstance(station_metadata, dict): if "Station" not in list(station_metadata.keys()): station_metadata = {"Station": station_metadata} self.station_metadata.from_dict(station_metadata) else: msg = "input metadata must be type %s or dict, not %s" self.logger.error(msg, type(self.station_metadata), type(station_metadata)) raise MTTSError( msg % (type(self.station_metadata), type(station_metadata)))
def __init__(self, inventory_object=None): self.logger = setup_logger("{0}.{1}".format(__name__, self.__class__.__name__)) self.mt_namespace = r"http://emiw.org/xmlns/mt/1.0" self.namespace_map = { "xsi": r"http://www.w3.org/2001/XMLSchema-instance", "schemaLocation": "http://www.fdsn.org/xml/station/fdsn-station-1.1.xsd", "mt": self.mt_namespace, } if inventory_object is not None: if not isinstance(inventory_object, inventory.Inventory): msg = "Input must be obspy.Inventory object not type {0}" self.logger.error(msg.format(type(inventory_object))) raise TypeError(msg.format(type(inventory_object))) self.inventory_obj = inventory_object else: self.inventory_obj = inventory.Inventory(source="MT Metadata")
def __init__( self, ts, time_array, sample_interval, channel_response_filter, **kwargs ): self.logger = setup_logger(f"{__name__}.{self.__class__.__name__}") self.ts = ts self.time_array = time_array self.sample_interval = sample_interval self.channel_response_filter = channel_response_filter self.plot = False self.detrend = True self.zero_mean = True self.zero_pad = True self.t_window = None self.t_window_params = {} self.f_window = None self.f_window_params = {} self.bandpass = {} self.fig = None self.nrows = None self.subplot_dict = {} for key, value in kwargs.items(): setattr(self, key, value)
from mth5.utils.mth5_logger import setup_logger, load_logging_config # ============================================================================= # Package Variables # ============================================================================= __author__ = """Jared Peacock""" __email__ = "*****@*****.**" __version__ = "0.2.6" # ============================================================================= # Initialize Loggers # ============================================================================= load_logging_config() debug_logger = setup_logger(__name__, fn="mth5_debug", level="info") debug_logger.debug("Starting MTH5 Debug Log File") # ============================================================================= # Defualt Parameters # ============================================================================= CHUNK_SIZE = 8196 TF_DTYPE = np.dtype([ ("station", "S30"), ("survey", "S50"), ("latitude", float), ("longitude", float), ("elevation", float), ("tf_id", "S30"), ("units", "S60"),
""" # ============================================================================= # Imports # ============================================================================= from pathlib import Path from copy import deepcopy from mth5.utils.fdsn_tools import make_channel_code, get_location_code from mth5.utils.mth5_logger import setup_logger from mt_metadata import timeseries as metadata from obspy.core import inventory from obspy.core.util import AttribDict logger = setup_logger(__name__) # ============================================================================= # Translate between metadata and inventory: mapping dictionaries # ============================================================================= def flip_dict(original_dict): """ Flip keys and values of the dictionary :param original_dict: DESCRIPTION :type original_dict: TYPE :return: DESCRIPTION :rtype: TYPE """
#!/usr/bin/env python """ time series filters """ # ================================================================= import numpy as np from scipy import signal from matplotlib import pyplot as plt from matplotlib.lines import Line2D from mth5.utils.mth5_logger import setup_logger logger = setup_logger(__file__) # ================================================================= def butter_bandpass(lowcut, highcut, fs, order=5): nyq = 0.5 * fs if lowcut is not None: low = lowcut / nyq if highcut is not None: high = highcut / nyq if lowcut and highcut: sos = signal.butter( order, [low, high], analog=False, btype="band", output="sos" ) elif highcut is None:
def __init__( self, channel_type="auxiliary", data=None, channel_metadata=None, station_metadata=None, run_metadata=None, **kwargs, ): self.logger = setup_logger(f"{__name__}.{self.__class__.__name__}") self.station_metadata = metadata.Station() self.run_metadata = metadata.Run() self._ts = xr.DataArray([1], coords=[("time", [1])], name="ts") self._channel_response = ChannelResponseFilter() # get correct metadata class try: self.channel_metadata = meta_classes[channel_type.capitalize()]() self.channel_metadata.type = channel_type.lower() except KeyError: msg = ("Channel type is undefined, must be [ electric | " + "magnetic | auxiliary ]") self.logger.error(msg) raise ValueError(msg) if channel_metadata is not None: if isinstance(channel_metadata, type(self.channel_metadata)): self.channel_metadata.update(channel_metadata) self.logger.debug("Loading from metadata class {0}".format( type(self.channel_metadata))) elif isinstance(channel_metadata, dict): if not channel_type in [ cc.lower() for cc in channel_metadata.keys() ]: channel_metadata = {channel_type: channel_metadata} self.channel_metadata.from_dict(channel_metadata) self.logger.debug("Loading from metadata dict") else: msg = "input metadata must be type %s or dict, not %s" self.logger.error(msg, type(self.channel_metadata), type(channel_metadata)) raise MTTSError( msg % (type(self.channel_metadata), type(channel_metadata))) # add station metadata, this will be important when propogating a single # channel such that it can stand alone. if station_metadata is not None: if isinstance(station_metadata, metadata.Station): self.station_metadata.update(station_metadata) elif isinstance(station_metadata, dict): if not "station" in [ cc.lower() for cc in station_metadata.keys() ]: station_metadata = {"Station": station_metadata} self.station_metadata.from_dict(station_metadata) self.logger.debug("Loading from metadata dict") else: msg = "input metadata must be type {0} or dict, not {1}".format( type(self.station_metadata), type(station_metadata)) self.logger.error(msg) raise MTTSError(msg) # add run metadata, this will be important when propogating a single # channel such that it can stand alone. if run_metadata is not None: if isinstance(run_metadata, metadata.Run): self.run_metadata.update(run_metadata) elif isinstance(run_metadata, dict): if not "run" in [cc.lower() for cc in run_metadata.keys()]: run_metadata = {"Run": run_metadata} self.run_metadata.from_dict(run_metadata) self.logger.debug("Loading from metadata dict") else: msg = "input metadata must be type %s or dict, not %s" self.logger.error(msg, type(self.run_metadata), type(run_metadata)) raise MTTSError(msg % (type(self.run_metadata), type(run_metadata))) # input data if data is not None: self.ts = data self._update_xarray_metadata() for key in list(kwargs.keys()): setattr(self, key, kwargs[key])