def dump_to_asdf(self, outputfile): """ Dump self.adjoin_sources into adjoint file """ print("=" * 15 + "\nWrite to file: %s" % outputfile) if os.path.exists(outputfile): print("Output file exists and removed:%s" % outputfile) os.remove(outputfile) ds = ASDFDataSet(outputfile, mode='a', compression=None) ds.add_quakeml(self.events) event = self.events[0] origin = event.preferred_origin() event_time = origin.time for adj_id in sorted(self.adjoint_sources): adj = self.adjoint_sources[adj_id] sta_tag = "%s_%s" % (adj.network, adj.station) sta_info = self.stations[sta_tag] adj_array, adj_path, parameters = \ dump_adjsrc(adj, sta_info, event_time) ds.add_auxiliary_data(adj_array, data_type="AdjointSources", path=adj_path, parameters=parameters)
def save_adjoint_to_asdf(outputfile, events, adjoint_sources, stations): """ Save events(obspy.Catalog) and adjoint sources, together with staiton information, to asdf file on disk. """ print("=" * 15 + "\nWrite to file: %s" % outputfile) outputdir = os.path.dirname(outputfile) if not os.path.exists(outputdir): os.makedirs(outputdir) if os.path.exists(outputfile): print("Output file exists and removed:%s" % outputfile) os.remove(outputfile) ds = ASDFDataSet(outputfile, mode='a', compression=None) ds.add_quakeml(events) for adj_id in sorted(adjoint_sources): adj = adjoint_sources[adj_id] sta_tag = "%s_%s" % (adj.network, adj.station) sta_info = stations[sta_tag] adj_array, adj_path, parameters = \ dump_adjsrc(adj, sta_info) ds.add_auxiliary_data(adj_array, data_type="AdjointSources", path=adj_path, parameters=parameters)
def test_str_method_of_aux_data(tmpdir): asdf_filename = os.path.join(tmpdir.strpath, "test.h5") data_set = ASDFDataSet(asdf_filename) # With provenance id. data = np.random.random((10, 10)) # The data must NOT start with a number. data_type = "RandomArray" tag = "test_data" parameters = {"a": 1, "b": 2.0, "e": "hallo"} provenance_id = "{http://example.org}test" data_set.add_auxiliary_data(data=data, data_type=data_type, tag=tag, parameters=parameters, provenance_id=provenance_id) assert \ str(data_set.auxiliary_data.RandomArray.test_data) == ( "Auxiliary Data of Type 'RandomArray'\n" "\tTag: 'test_data'\n" "\tProvenance ID: '{http://example.org}test'\n" "\tData shape: '(10, 10)', dtype: 'float64'\n" "\tParameters:\n" "\t\ta: 1\n" "\t\tb: 2.0\n" "\t\te: hallo") # Without. data = np.random.random((10, 10)) # The data must NOT start with a number. data_type = "RandomArray" tag = "test_data_2" parameters = {"a": 1, "b": 2.0, "e": "hallo"} data_set.add_auxiliary_data(data=data, data_type=data_type, tag=tag, parameters=parameters) assert \ str(data_set.auxiliary_data.RandomArray.test_data_2) == ( "Auxiliary Data of Type 'RandomArray'\n" "\tTag: 'test_data_2'\n" "\tData shape: '(10, 10)', dtype: 'float64'\n" "\tParameters:\n" "\t\ta: 1\n" "\t\tb: 2.0\n" "\t\te: hallo")
def test_reading_and_writing_auxiliary_data_with_provenance_id(tmpdir): asdf_filename = os.path.join(tmpdir.strpath, "test.h5") data_set = ASDFDataSet(asdf_filename) data = np.random.random((10, 10)) # The data must NOT start with a number. data_type = "RandomArray" tag = "test_data" parameters = {"a": 1, "b": 2.0, "e": "hallo"} provenance_id = "{http://example.org}test" data_set.add_auxiliary_data(data=data, data_type=data_type, tag=tag, parameters=parameters, provenance_id=provenance_id) data_set.__del__() del data_set new_data_set = ASDFDataSet(asdf_filename) assert new_data_set.auxiliary_data.RandomArray.test_data.provenance_id \ == provenance_id
def test_adding_auxiliary_data_with_invalid_data_type_name_raises(tmpdir): asdf_filename = os.path.join(tmpdir.strpath, "test.h5") data_set = ASDFDataSet(asdf_filename) data = np.random.random((10, 10)) # The data must NOT start with a number. data_type = "2DRandomArray" tag = "test_data" parameters = {"a": 1, "b": 2.0, "e": "hallo"} try: with pytest.raises(ASDFValueError) as err: data_set.add_auxiliary_data(data=data, data_type=data_type, tag=tag, parameters=parameters) assert err.value.args[0] == ( "Data type name '2DRandomArray' is invalid. It must validate " "against the regular expression '^[A-Z][A-Za-z0-9]*$'.") finally: data_set.__del__()
def test_adding_auxiliary_data_with_wrong_tag_name_raises(tmpdir): asdf_filename = os.path.join(tmpdir.strpath, "test.h5") data_set = ASDFDataSet(asdf_filename) # With provenance id. data = np.random.random((10, 10)) # The data must NOT start with a number. data_type = "RandomArray" tag = "A.B.C" with pytest.raises(ASDFValueError) as err: data_set.add_auxiliary_data( data=data, data_type=data_type, tag=tag, parameters={}) assert err.value.args[0] == ( "Tag name 'A.B.C' is invalid. It must validate " "against the regular expression " "'^[a-zA-Z0-9][a-zA-Z0-9_]*[a-zA-Z0-9]$'.") data_set.__del__()
def test_item_access_of_auxiliary_data(tmpdir): """ Make sure all auxiliary data types, and the data itsself can be accessed via dictionary like accesses. """ asdf_filename = os.path.join(tmpdir.strpath, "test.h5") data_set = ASDFDataSet(asdf_filename) assert str(data_set.auxiliary_data) == ( "Data set contains no auxiliary data.") data = np.random.random((10, 10)) data_type = "RandomArray" tag = "test_data_1" parameters = {"a": 1, "b": 2.0, "e": "hallo"} data_set.add_auxiliary_data(data=data, data_type=data_type, tag=tag, parameters=parameters) assert data_set.auxiliary_data["RandomArray"]["test_data_1"].tag == \ data_set.auxiliary_data.RandomArray.test_data_1.tag
def test_reading_and_writing_auxiliary_data(tmpdir): """ Tests reading and writing auxiliary data. """ asdf_filename = os.path.join(tmpdir.strpath, "test.h5") data_set = ASDFDataSet(asdf_filename) # Define some auxiliary data and add it. data = np.random.random(100) data_type = "RandomArrays" tag = "test_data" parameters = {"a": 1, "b": 2.0, "e": "hallo"} data_set.add_auxiliary_data(data=data, data_type=data_type, tag=tag, parameters=parameters) del data_set new_data_set = ASDFDataSet(asdf_filename) aux_data = new_data_set.auxiliary_data.RandomArrays.test_data np.testing.assert_equal(data, aux_data.data) aux_data.data_type == data_type aux_data.tag == tag aux_data.parameters == parameters
def save_adjoint_to_asdf(outputfile, events, adjoint_sources, stations): """ Save events(obspy.Catalog) and adjoint sources, together with staiton information, to asdf file on disk. """ print("="*15 + "\nWrite to file: %s" % outputfile) outputdir = os.path.dirname(outputfile) if not os.path.exists(outputdir): os.makedirs(outputdir) if os.path.exists(outputfile): print("Output file exists and removed:%s" % outputfile) os.remove(outputfile) ds = ASDFDataSet(outputfile, mode='a', compression=None) ds.add_quakeml(events) for adj_id in sorted(adjoint_sources): adj = adjoint_sources[adj_id] sta_tag = "%s_%s" % (adj.network, adj.station) sta_info = stations[sta_tag] adj_array, adj_path, parameters = \ dump_adjsrc(adj, sta_info) ds.add_auxiliary_data(adj_array, data_type="AdjointSources", path=adj_path, parameters=parameters)
def test_str_of_auxiliary_data(tmpdir): """ Test the various __str__ method of auxiliary data types. """ asdf_filename = os.path.join(tmpdir.strpath, "test.h5") data_set = ASDFDataSet(asdf_filename) assert str(data_set.auxiliary_data) == ( "Data set contains no auxiliary data.") data = np.random.random((10, 10)) data_type = "RandomArray" tag = "test_data_1" parameters = {"a": 1, "b": 2.0, "e": "hallo"} data_set.add_auxiliary_data(data=data, data_type=data_type, tag=tag, parameters=parameters) data = np.random.random((10, 10)) data_type = "RandomArray" tag = "test_data_2" parameters = {"a": 1, "b": 2.0, "e": "hallo"} data_set.add_auxiliary_data(data=data, data_type=data_type, tag=tag, parameters=parameters) data = np.random.random((10, 10)) data_type = "SomethingElse" tag = "test_data" parameters = {"a": 1, "b": 2.0, "e": "hallo"} data_set.add_auxiliary_data(data=data, data_type=data_type, tag=tag, parameters=parameters) assert str(data_set.auxiliary_data) == ( "Data set contains the following auxiliary data types:\n" "\tRandomArray (2 item(s))\n" "\tSomethingElse (1 item(s))" ) assert str(data_set.auxiliary_data.RandomArray) == ( "2 auxiliary data item(s) of type 'RandomArray' available:\n" "\ttest_data_1\n" "\ttest_data_2" )
def test_reading_and_writing_n_dimensional_auxiliary_data(tmpdir): """ Tests reading and writing n-dimensional auxiliary data. """ # 2D. asdf_filename = os.path.join(tmpdir.strpath, "test_2D.h5") data_set = ASDFDataSet(asdf_filename) data = np.random.random((10, 10)) data_type = "RandomArrays" tag = "test_data" parameters = {"a": 1, "b": 2.0, "e": "hallo"} data_set.add_auxiliary_data(data=data, data_type=data_type, tag=tag, parameters=parameters) del data_set new_data_set = ASDFDataSet(asdf_filename) aux_data = new_data_set.auxiliary_data.RandomArrays.test_data np.testing.assert_equal(data, aux_data.data) aux_data.data_type == data_type aux_data.tag == tag aux_data.parameters == parameters del new_data_set # 3D. asdf_filename = os.path.join(tmpdir.strpath, "test_3D.h5") data_set = ASDFDataSet(asdf_filename) data = np.random.random((5, 5, 5)) data_type = "RandomArrays" tag = "test_data" parameters = {"a": 1, "b": 2.0, "e": "hallo"} data_set.add_auxiliary_data(data=data, data_type=data_type, tag=tag, parameters=parameters) del data_set new_data_set = ASDFDataSet(asdf_filename) aux_data = new_data_set.auxiliary_data.RandomArrays.test_data np.testing.assert_equal(data, aux_data.data) aux_data.data_type == data_type aux_data.tag == tag aux_data.parameters == parameters del new_data_set # 4D. asdf_filename = os.path.join(tmpdir.strpath, "test_4D.h5") data_set = ASDFDataSet(asdf_filename) data = np.random.random((2, 3, 4, 5)) data_type = "RandomArrays" tag = "test_data" parameters = {"a": 1, "b": 2.0, "e": "hallo"} data_set.add_auxiliary_data(data=data, data_type=data_type, tag=tag, parameters=parameters) del data_set new_data_set = ASDFDataSet(asdf_filename) aux_data = new_data_set.auxiliary_data.RandomArrays.test_data np.testing.assert_equal(data, aux_data.data) aux_data.data_type == data_type aux_data.tag == tag aux_data.parameters == parameters del new_data_set