示例#1
0
文件: test_tsdb.py 项目: dnvgl/qats
    def test_export_reload_ascii(self):
        self.db.load(os.path.join(self.data_directory, 'model_test_data.dat'))
        name = "Wave-S[m]"
        fnout = os.path.join(self.data_directory, '_test_export.dat')
        try:
            # route screen dump from export to null
            was_stdout = sys.stdout
            f = open(os.devnull, 'w')
            sys.stdout = f
            # export, should not raise errors
            self.db.export(fnout, names=name)
        finally:
            sys.stdout = was_stdout
            f.close()
        # reload
        db2 = TsDB()
        db2.load(fnout)
        # compare ts
        ts1 = self.db.get(name=name)
        ts2 = db2.get(name=name)

        # clean exported files
        os.remove(fnout)

        # check arrays
        np.testing.assert_array_almost_equal(ts1.x, ts2.x, 6, "Export/reload did not yield same arrays")
示例#2
0
文件: test_tsdb.py 项目: dnvgl/qats
    def test_export_reload(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        name = "Sway"
        fnout = os.path.join(self.data_directory, '_test_export.ts')
        try:
            # route screen dump from export to null
            was_stdout = sys.stdout
            f = open(os.devnull, 'w')
            sys.stdout = f
            # export, should not raise errors
            self.db.export(fnout, names=name)
        finally:
            # reset sys.stdout
            sys.stdout = was_stdout
            f.close()
        # reload
        db2 = TsDB()
        db2.load(fnout)
        # compare ts
        ts1 = self.db.get(name=name)
        ts2 = db2.get(name=name)
        # clean exported files
        try:
            os.remove(fnout)
            os.remove(os.path.splitext(fnout)[0] + ".key")
        except FileNotFoundError:
            pass

        # check arrays
        self.assertTrue(np.array_equal(ts1.x, ts2.x), "Export/reload did not yield same arrays")
示例#3
0
 def setUp(self):
     """
     Common setup for all tests
     """
     self.db = TsDB()
     # the data directory used in the test relative to this module
     # necessary to do it like this for the tests to work both locally and in virtual env for conda build
     self.tsfile = os.path.join(os.path.dirname(__file__), '..', 'data',
                                'mooring.ts')
     self.db.load(self.tsfile)
     self.ts = self.db.get(name="Mooring line 4")
     # add datetime reference to ts for later testing
     self.ts._dtg_ref = datetime.now()
示例#4
0
 def setUp(self):
     """
     Set up for some of the tests.
     """
     # load irregular 3-hour time series test rebin and mesh
     tsfile = os.path.join(os.path.dirname(__file__), '..', 'data', 'simo_p_out.ts')
     self.irreg_series = TsDB.fromfile(tsfile).get(name='Tension_2_qs').x
示例#5
0
文件: test_tsdb.py 项目: dnvgl/qats
    def test_geta(self):
        tsfile = os.path.join(self.data_directory, 'simo_p.ts')
        self.db.load(tsfile)
        tsname = "Tension_2_qs"
        keys = self.db.list(names=tsname, display=False)
        _, data1 = self.db.geta(name=keys[0])

        # test 1: geta() when ts is already loaded
        _, data2 = self.db.geta(name=tsname)
        self.assertTrue(np.array_equal(data1, data2), "Did not get correct data time series using get() "
                                                      "(ts pre-loaded)")
        # test 2: geta() when ts is not already loaded
        db2 = TsDB()
        db2.load(tsfile)
        _, data3 = db2.geta(name=tsname)
        self.assertTrue(np.array_equal(data1, data3), "Did not get correct data time series using get() "
                                                      "(ts not pre-loaded)")
示例#6
0
 def test_correct_number_of_timeseries(self):
     failed = []
     for filename, nts in self.files:
         db = TsDB.fromfile(os.path.join(self.data_directory, filename))
         if not nts == db.n:
             failed.append(f"{filename} ({nts} != {db.n})")
     self.assertTrue(len(failed) == 0,
                     f"Failed to identify correct number of time series on {len(failed)} file(s):\n   *** " +
                     f"\n   *** ".join(failed))
示例#7
0
 def test_correct_timeseries_size(self):
     failed = []
     for filename, _ in self.files:
         try:
             db = TsDB.fromfile(os.path.join(self.data_directory, filename))
             ts = db.get(ind=0)  # should not fail
             self.assertTrue(ts.t.size > 1 and ts.t.size == ts.x.size,
                             f"Did not read time series correctly (t.size = {ts.t.size}, x.size = {ts.x.size})")
         except Exception:
             exctype, excvalue, _ = sys.exc_info()
             exctypestr = str(exctype).lstrip("<class '").rstrip("'>")  # e.g. <class 'IndexError'>  =>  IndexError
             failed.append(f"{filename}: {exctypestr}: {excvalue}")
     self.assertTrue(len(failed) == 0,
                     f"Failed to read time series from {len(failed)} file(s):\n   *** " +
                     f"\n   *** ".join(failed))
示例#8
0
文件: test_tsdb.py 项目: dnvgl/qats
 def test_get_by_name(self):
     tsfile = os.path.join(self.data_directory, 'simo_p.ts')
     self.db.load(tsfile)
     tsname = "Tension_2_qs"
     keys = self.db.list(names=tsname, display=False)
     key = keys[0]
     ts1 = self.db.getm(names=key, fullkey=True)[key]
     # test 1: get_ts() when ts is already loaded
     ts2 = self.db.get(name=tsname)
     self.assertIs(ts1, ts2, "Did not get correct TimeSeries  using get_ts()"
                             " (ts pre-loaded)")
     # test 2: get_ts() when ts is not already loaded
     db2 = TsDB.fromfile(tsfile)
     ts3 = db2.get(name=tsname)
     self.assertTrue(np.array_equal(ts1.x, ts3.x), "Did not get correct TimeSeries using get_ts()"
                                                   " (ts not pre-loaded)")
示例#9
0
文件: test_tsdb.py 项目: dnvgl/qats
 def test_stats_dataframe(self):
     """ Test that stats dataframe is correctly constructed """
     fn = os.path.join(self.data_directory, 'mooring.ts')
     keys = ["Surge", "Sway", "Heave"]
     db = TsDB.fromfile(fn)
     stats = db.stats(names=keys)  # type: dict
     df = db.stats_dataframe(names=keys)
     # check that statistics for each time series is correctly stored in columns
     self.assertListEqual(keys, list(df.keys()), "Statistics dataframe does not have time series stats in columns")
     # check that the values are correctly fetched from dataframe
     failed = []
     for k in keys:
         for kstat, val in stats[k].items():
             if not df[k][kstat] == val:
                 failed.append((k, kstat))
     self.assertFalse(failed, "Statistics dataframe values don't match the statistics dict")
示例#10
0
文件: test_tsdb.py 项目: dnvgl/qats
    def test_get_by_index(self):
        tsfile = os.path.join(self.data_directory, 'simo_p.ts')
        self.db.load(tsfile)
        tsname = "Tension_2_qs"
        key = self.db.list(names=tsname, display=False)[0]
        ts1 = self.db.get(name=tsname)
        ind = self.db.register_keys.index(key)
        # test 1: get_ts() using index when ts is already loaded
        ts2 = self.db.get(ind=ind)
        self.assertIs(ts1, ts2, "Did not get correct TimeSeries using get_ts() and specifying index"
                                " (ts pre-loaded)")

        # test 2: get_ts() using index when ts is not already loaded
        db2 = TsDB.fromfile(tsfile)
        ts3 = db2.get(ind=ind)
        self.assertTrue(np.array_equal(ts1.x, ts3.x), "Did not get correct TimeSeries using get_ts() and specifying"
                                                      " index (ts not pre-loaded)")
示例#11
0
文件: test_tsdb.py 项目: dnvgl/qats
 def setUp(self):
     self.db = TsDB()
     # the data directory used in the test relative to this module
     # necessary to do it like this for the tests to work both locally and in virtual env for conda build
     self.data_directory = os.path.join(os.path.dirname(__file__), '..', 'data')
示例#12
0
class TestTs(unittest.TestCase):
    def setUp(self):
        """
        Common setup for all tests
        """
        self.db = TsDB()
        # the data directory used in the test relative to this module
        # necessary to do it like this for the tests to work both locally and in virtual env for conda build
        self.tsfile = os.path.join(os.path.dirname(__file__), '..', 'data',
                                   'mooring.ts')
        self.db.load(self.tsfile)
        self.ts = self.db.get(name="Mooring line 4")
        # add datetime reference to ts for later testing
        self.ts._dtg_ref = datetime.now()

    def test_tsdb_getone_returns_timeseries_object(self):
        """
        Test that the input ts is a TimeSeries object
        """
        self.assertTrue(isinstance(self.ts, TimeSeries),
                        "self.ts not TimeSeries object.")

    def test_dtg_start_is_correct_type(self):
        """
        Test that the dtg_start attribute has correct type
        """
        self.assertTrue(
            isinstance(self.ts._dtg_ref, datetime) or self.ts._dtg_ref is None,
            "Expected 'dtg_start' datetime object or None")

    def test_time_dtg_equals_time(self):
        """
        Test that the time_as_datetime property is identical to the original time array (floats)
        """
        dt0 = self.ts.dtg_time[0]
        # subract start time as datetime and convert to float seconds
        time_from_datetime = [
            dt.total_seconds()
            for dt in [dti - dt0 for dti in self.ts.dtg_time]
        ]

        # test equality with relative tolerance, there are minor round off errors with datetime objects which
        # breaks numpy.array_equal()
        self.assertTrue(
            np.allclose(np.array(time_from_datetime),
                        self.ts.t,
                        rtol=1.e-6,
                        atol=0.), "The date-times are "
            "not equal to the "
            "time array.")

    def test_specifying_both_resample_and_twin_raises_assertionerror(self):
        """
        Test that an AssertionError is raised if one tries to specify both resampling to new time array and
        cropping to a time window at the same time.
        """
        try:
            _, _ = self.ts.get(twin=(0., 100.),
                               resample=np.arange(0., 300., 0.01))
        except AssertionError:
            pass
        else:
            self.fail(
                "The TimeSeries.get() method does not raise AssertionError if one tries to specify both "
                "resampling to new time array and cropping to a time window at the same time."
            )

    def test_resampling_beyond_original_time_array_raises_valueerror(self):
        """
        Test that a ValueError is raised if one tries to resample the time series beyond the original time array.
        """
        try:
            _, _ = self.ts.get(resample=np.arange(-5., 300., 0.01))
        except ValueError:
            pass
        else:
            self.fail(
                "The TimeSeries.get() method does not raise ValueError if one tries to resample/extrapolate "
                "beyond the original time array.")

    def test_resampling_beyond_original_time_array_raises_valueerror_2(self):
        """
        Test that a ValueError is raised if one tries to resample the time series beyond the original time array.
        """
        try:
            _, _ = self.ts.get(resample=np.arange(
                0., 1.e6, 1000.))  # large step to avoid MemoryError
        except ValueError:
            pass
        else:
            self.fail(
                "The TimeSeries.get() method does not raise ValueError if one tries to resample/extrapolate "
                "beyond the original time array.")

    def test_filter_lp_hp(self):
        """
        Test that filter() method uses get() as intended, and that sum of lp and hp components equals original signal.

        Note: Test is setup to suit the exact signal tested - this is not a generic test.
        """
        twin = (1000, 1e12)
        freq = 0.03
        _, xtot = self.ts.get(twin=twin)
        # check 1: should not raise error
        _, xlo = self.ts.filter('lp', freq, twin=twin)
        _, xhi = self.ts.filter('hp', freq, twin=twin)
        # check 2: sum of components (almots) equals total signal
        deviation = np.max((xlo + xhi - xtot) / xtot)
        self.assertLessEqual(
            deviation, 0.02,
            "Sum of low- and high-pass components does not equal total signal")

    def test_data_is_dict_type(self):
        """
        Test that the data property returns dictionary
        """
        # todo: invoke type check below when the quality of the data property is assured
        # self.assertIsInstance(self.ts.data, dict, "Data property does not return dictionary.")
        try:
            self.ts.data
        except NotImplementedError:
            pass
        else:
            self.fail("Use of data property does not raise NotImplemented")

    def test_ts_copy_returns_unique_ts(self):
        """
        Test that the object returned from TimeSeries.copy is correct type and is not identical to the original
        """
        new_ts = self.ts.copy()

        self.assertIsInstance(
            new_ts, TimeSeries,
            "TimeSeries.copy() does not return a TimeSeries object, "
            "but type '%s'." % type(new_ts))

        self.assertIsNot(
            self.ts.t, new_ts.t,
            "TimeSeries.copy() returns TimeSeries with time array which is bound "
            "to the time array of the original TimeSeries object.")

        self.assertIsNot(
            self.ts.x, new_ts.x,
            "TimeSeries.copy() returns TimeSeries with data array which is bound"
            "to the data array of the original TimeSeries object.")

    def test_copycopy_returns_unique_ts(self):
        """
        Test that the object returned from copy.copy(TimeSeries) is correct type and is not identical to the original
        """
        new_ts = copy.copy(self.ts)

        self.assertIsInstance(
            new_ts, TimeSeries,
            "copy.copy(TimeSeries) does not return a TimeSeries object, "
            "but type '%s'." % type(new_ts))
        self.assertIsNot(
            self.ts.t, new_ts.t,
            "copy.copy(TimeSeries) returns TimeSeries with time array which is bound "
            "to the time array of the original TimeSeries object.")

        self.assertIsNot(
            self.ts.x, new_ts.x,
            "copy.copy(TimeSeries) returns TimeSeries with data array which is bound "
            "to the data array of the original TimeSeries object.")

    def test_max_equals_largest_maxima(self):
        """
        Test that the value returned from max() method equals the largest value in the array returned from maxima() method
        """
        twin = (500, 1.e12)
        self.assertEqual(
            np.max(self.ts.maxima(twin=twin)), self.ts.max(twin=twin),
            "Method max() returns value which is different from the largest value from maxima() method"
        )

    def test_min_equals_smallest_minima(self):
        """
        Test that the value returned from min() method equals the smalles value in the array returned from minima() method
        """
        twin = (500, 1.e12)
        self.assertEqual(
            np.min(self.ts.minima(twin=twin)), self.ts.min(twin=twin),
            "Method min() returns value which is different from the smallest value from minima() method"
        )
示例#13
0
class TestOperations(unittest.TestCase):
    def setUp(self):
        self.db = TsDB()
        # the data directory used in the test relative to this module
        # necessary to do it like this for the tests to work both locally and in virtual env for conda build
        self.data_directory = os.path.join(os.path.dirname(__file__), '..',
                                           'data')

    def test_transform_motion(self):
        self.db.load(os.path.join(self.data_directory, 'simo_trans.ts'))
        db = self.db
        motionkeys = db.list(
            relative=True)[:6]  # names of 6-dof motion time series
        motion = [ts.x for ts in db.getl(motionkeys)]
        # check transformation 1, new ref = (74.61, 0., 0.)
        xyz1 = transform_motion(motion,
                                newref=(74.61, 0.,
                                        0.))  # default rotunit ('deg') is used
        np.testing.assert_allclose(xyz1[0],
                                   db.get("XG_trans1").x,
                                   rtol=0,
                                   atol=1e-6,
                                   err_msg="XG differs in transformation 1")
        np.testing.assert_allclose(xyz1[1],
                                   db.get("YG_trans1").x,
                                   rtol=0,
                                   atol=1e-6,
                                   err_msg="YG differs in transformation 1")
        np.testing.assert_allclose(xyz1[2],
                                   db.get("ZG_trans1").x,
                                   rtol=0,
                                   atol=1e-6,
                                   err_msg="YG differs in transformation 1")

    def test_velocity(self):
        """ Check that numerical differentiation of sin(x) is approximately equal to cos(x) """
        x = np.linspace(0., 4 * np.pi, num=1000)
        y = np.sin(x)
        dydt = velocity(y, x)
        # check vs. analytical answer, disregard first and last values (inaccurate at bounds)
        np.testing.assert_allclose(
            dydt[1:-1],
            np.cos(x)[1:-1],
            rtol=1e-4,
            atol=0,
            err_msg="Numerical diff. of sin(x) differs from cos(x)")

    def test_velocity_n_signals(self):
        """ Check that velocity handles 2-D input array (more than one signal) """
        x = np.linspace(0., 4 * np.pi, num=1000)
        y1 = np.sin(x)
        y2 = np.sin(x + np.pi / 4)
        dy = velocity([y1, y2], x)
        dy1 = velocity(y1, x)
        np.testing.assert_array_equal(
            dy[0, :],
            dy1,
            err_msg="velocity fails to handle multiple signals properly")

    def test_acceleration(self):
        """ Check that acceleration is same as time differentiation (velocity) twice """
        x = np.linspace(0., 4 * np.pi, num=1000)
        y = np.sin(x)
        acc = acceleration(y, x)
        dydt2 = velocity(velocity(y, x), x)
        np.testing.assert_array_equal(
            acc,
            dydt2,
            err_msg="acceleration differs from double time differentiation")
示例#14
0
from qats.ts import TimeSeries, average_frequency
from qats.signal import find_maxima, smooth
from qats import TsDB
from scipy.optimize import curve_fit
from scipy.integrate import odeint
import time
# Input field: Write the names for the test to be tested.

start = time.perf_counter()

name = "test"  #"surge", "hsseave", "moored_heave", "pitch" or "moored_pitch"
sf = 75
#dt = 0.01
n_tests = 3  # Number of decay tests in model test base file. If more than 1, the script will produce plots for all
# individual tests but also print out a txt. file with averaged natural periods and coefficients.
db = TsDB()

if name == "surge":
    n_name = "*M207_COF X"
    f = ['Recorded Data_Y200.tdms']
    t_dur = 35  # Time-interval for each test in the decay-test.
elif name == "heave":
    n_name = "*M207_COF Z"
    f = ['Recorded Data_Y300.tdms']
    t_dur = 12  # Time-interval for each test in the decay-test.
elif name == "pitch":
    n_name = "*M207_COF Pitch"
    f = ['Recorded Data_Y301.tdms']
    t_dur = 25  # Time-interval for each test in the decay-test.
elif name == "moored_heave":
    n_name = "*M207_COF Z"
示例#15
0
"""
Example of using the time series database class
"""
import os
from qats import TsDB

db = TsDB()

# locate time series file
file_name = os.path.join("..", "..", "..", "data", "mooring.ts")

# load time series from file
db.load([file_name])

# plot everything on the file
db.plot()

# plot only specific time series identified by name
db.plot(names=["surge", "sway"])

# plot the power spectral density for the same time series
db.plot_psd(names=["surge", "sway"], resample=0.1)

示例#16
0
文件: test_tsdb.py 项目: dnvgl/qats
class TestTsDB(unittest.TestCase):
    def setUp(self):
        self.db = TsDB()
        # the data directory used in the test relative to this module
        # necessary to do it like this for the tests to work both locally and in virtual env for conda build
        self.data_directory = os.path.join(os.path.dirname(__file__), '..', 'data')

    def test_exception_load_numeric(self):
        try:
            self.db.load(223334)    # numeric values should throw an exception
        except TypeError:
            pass
        else:
            self.fail("Did not throw exception on numeric file name")

    def test_exception_load_dict(self):
        try:
            self.db.load({})    # dictionary should throw an exception
        except TypeError:
            pass
        else:
            self.fail("Did not throw exception on dictionary of file names.")

    def test_exception_load_directory(self):
        try:
            self.db.load(self.data_directory)
        except FileExistsError:
            pass
        else:
            self.fail("Did not throw exception when trying to load a directory.")

    def test_exception_load_nonexistingfile(self):
        try:
            self.db.load(os.path.join(self.data_directory, 'donotexist.ts'))
        except FileExistsError:
            pass
        else:
            self.fail("Did not throw exception when trying to load a non-existing file.")

    def test_exception_load_unsupportedfile(self):
        try:
            self.db.load(os.path.join(self.data_directory, 'unsupportedfile.out'))
        except NotImplementedError:
            pass
        else:
            self.fail("Did not throw exception when trying to load a file type which is not yet supported.")

    def test_list_all(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        k = self.db.list(display=False)
        self.assertEqual(14, len(k), "Deviating number of listed keys = %d" % len(k))

    def test_list_subset(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        k = self.db.list(names="Mooring line*", display=False)
        self.assertEqual(8, len(k), "Deviating number of listed keys = %d" % len(k))

    def test_list_subset_misc_criteria(self):
        for tsfile in ('mooring.ts', 'simo_p.ts'):
            self.db.load(os.path.join(self.data_directory, tsfile))
        # test 1
        k = self.db.list(names="Tension*", display=False)
        self.assertEqual(10, len(k), "Deviating number of listed keys = %d" % len(k))
        # test 2
        k = self.db.list(names="simo_p.ts*line*", display=False)
        self.assertEqual(2, len(k), "Deviating number of listed keys = %d" % len(k))

    def test_list_subset_keep_specified_order(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        names_reversed = list(reversed([os.path.basename(k) for k in self.db.register_keys]))
        namelist = [os.path.basename(_) for _ in self.db.list(names=names_reversed)]
        self.assertEqual(names_reversed, namelist, "Failed to keep specified order")

    def test_list_subset_special_characters(self):
        self.db.load(os.path.join(self.data_directory, 'model_test_data.dat'))
        # should return exactly one key
        self.assertEqual(1, len(self.db.list(names="RW1[m]")), "TsDB.list() returned wrong number of keys")

    def test_list_subset_special_characters_2(self):
        self.db.load(os.path.join(self.data_directory, 'model_test_data.dat'))
        # should return exactly one key
        self.assertEqual(1, len(self.db.list(names="Acc-X[m/s^2]")), "TsDB.list() returned wrong number of keys")

    def test_list_prepended_wildcard_1_3(self):
        """
        Test that wildcard is prepended in a reasonable manner. Test cases:
            1. Specifying 'XG' should not return 'vel_XG'
            2. Specifying '*XG' should return both 'XG' and 'vel_XG'
            3. Specifying full key should be possible
            4. If multiple files are loaded, specifying 'XG' should return all occurrences (across files)

        The first three are tested here, while the fourth is tested in `test_list_prepended_wildcard_4()`
        """
        path = os.path.join(self.data_directory, 'simo_r1.ts')
        db = self.db
        db.load(path)
        k1 = db.list(names="XG")   # should return 1 key
        k2 = db.list(names="*XG")  # should return 2 keys
        k3 = db.list(names=os.path.abspath(os.path.join(path, "XG")))  # should return 1 key
        # test of the cases described in docstring
        self.assertEqual(len(k1), 1, "TsDB.list() failed to return correct number of keys for names='XG'")
        self.assertEqual(len(k2), 2, "TsDB.list() failed to return correct number of keys for names='*XG'")
        self.assertEqual(len(k3), 1, "TsDB.list() failed to return correct number of keys when specifying full path")

    def test_list_prepended_wildcard_4(self):
        """
        See description of `test_list_prepended_wildcard_1_3()`
        """
        db = self.db
        db.load(os.path.join(self.data_directory, 'simo_r1.ts'))
        db.load(os.path.join(self.data_directory, 'simo_r2.ts'))
        k1 = db.list(names="XG")  # should return 2 keys
        k2 = db.list(names="*XG")  # should return 4 keys
        # test of the cases described in docstring
        self.assertEqual(len(k1), 2, "TsDB.list() failed to return correct number of keys for names='XG'")
        self.assertEqual(len(k2), 4, "TsDB.list() failed to return correct number of keys for names='*XG'")

    def test_clear_all(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        self.db.clear(display=False)
        k = self.db.list(display=False)
        self.assertEqual([], k, "Did not clear all registered keys.")

    def test_clear_subset(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        self.db.clear(names="*Mooring line*", display=False)
        k = self.db.list(display=False)
        self.assertEqual(6, len(k), "Did not clear subset of registered keys correctly. %d keys remaining" % len(k))

    def test_getda_correct_key(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        rk = self.db.list(names="Heave", display=False)
        container = self.db.getda(names="Heave", fullkey=True)
        self.assertEqual(rk, list(container.keys()), "db list method and get_many method returns different keys.")

    def test_getda_correct_number_of_arrays(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        rk = self.db.list(names="Heave", display=False)  # should be only 1 key returned in this case
        container = self.db.getda(names="Heave", fullkey=True)
        self.assertEqual(2, len(container[rk[0]]), "Got more than 2 arrays (time and data) in return from get_many().")

    def test_gets_none(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        container = self.db.getda(names=[])
        n = len(container)
        self.assertEqual(0, n, "Should have received empty container (OrderedDict) from getda()")

    def test_getl_correct_key(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        rk = self.db.list(names="Heave", display=False, relative=True)
        tslist = self.db.getl(names="Heave")
        self.assertEqual(rk, [ts.name for ts in tslist], "db list method and getl returns different keys.")

    def test_getm_correct_key(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        rk = self.db.list(names="Heave", display=False)
        container = self.db.getm(names="Heave", fullkey=True)
        self.assertEqual(rk, list(container.keys()), "db list method and getm method returns different keys.")

    def test_getm_correct_key_by_ind(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        rk = self.db.list(names="Heave", display=False)
        container = self.db.getm(ind=2, fullkey=True)
        self.assertEqual(rk, list(container.keys()), "db list method and getm method returns different keys.")

    def test_getd_equals_getm(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        container1 = self.db.getm(names="*", fullkey=True)
        container2 = self.db.getd(names="*", fullkey=True)
        for name, ts in container1.items():
            self.assertTrue(name in container2 and container2[name] is container1[name],
                            "container returned by getd is not identical to container returned by getm")

    def test_geta(self):
        tsfile = os.path.join(self.data_directory, 'simo_p.ts')
        self.db.load(tsfile)
        tsname = "Tension_2_qs"
        keys = self.db.list(names=tsname, display=False)
        _, data1 = self.db.geta(name=keys[0])

        # test 1: geta() when ts is already loaded
        _, data2 = self.db.geta(name=tsname)
        self.assertTrue(np.array_equal(data1, data2), "Did not get correct data time series using get() "
                                                      "(ts pre-loaded)")
        # test 2: geta() when ts is not already loaded
        db2 = TsDB()
        db2.load(tsfile)
        _, data3 = db2.geta(name=tsname)
        self.assertTrue(np.array_equal(data1, data3), "Did not get correct data time series using get() "
                                                      "(ts not pre-loaded)")

    def test_get_by_name(self):
        tsfile = os.path.join(self.data_directory, 'simo_p.ts')
        self.db.load(tsfile)
        tsname = "Tension_2_qs"
        keys = self.db.list(names=tsname, display=False)
        key = keys[0]
        ts1 = self.db.getm(names=key, fullkey=True)[key]
        # test 1: get_ts() when ts is already loaded
        ts2 = self.db.get(name=tsname)
        self.assertIs(ts1, ts2, "Did not get correct TimeSeries  using get_ts()"
                                " (ts pre-loaded)")
        # test 2: get_ts() when ts is not already loaded
        db2 = TsDB.fromfile(tsfile)
        ts3 = db2.get(name=tsname)
        self.assertTrue(np.array_equal(ts1.x, ts3.x), "Did not get correct TimeSeries using get_ts()"
                                                      " (ts not pre-loaded)")

    def test_get_by_index(self):
        tsfile = os.path.join(self.data_directory, 'simo_p.ts')
        self.db.load(tsfile)
        tsname = "Tension_2_qs"
        key = self.db.list(names=tsname, display=False)[0]
        ts1 = self.db.get(name=tsname)
        ind = self.db.register_keys.index(key)
        # test 1: get_ts() using index when ts is already loaded
        ts2 = self.db.get(ind=ind)
        self.assertIs(ts1, ts2, "Did not get correct TimeSeries using get_ts() and specifying index"
                                " (ts pre-loaded)")

        # test 2: get_ts() using index when ts is not already loaded
        db2 = TsDB.fromfile(tsfile)
        ts3 = db2.get(ind=ind)
        self.assertTrue(np.array_equal(ts1.x, ts3.x), "Did not get correct TimeSeries using get_ts() and specifying"
                                                      " index (ts not pre-loaded)")

    def test_get_by_index_0(self):
        """ Should not fail when index 0 is specified """
        tsfile = os.path.join(self.data_directory, 'simo_p.ts')
        self.db.load(tsfile)
        _ = self.db.get(ind=0)
        # should not fail

    def test_get_exceptions(self):
        self.db.load(os.path.join(self.data_directory, 'simo_p.ts'))
        # test 1: no match
        try:
            _ = self.db.geta(name="nonexisting_key")
        except LookupError:
            pass
        else:
            self.fail("Did not raise LookupError when no match was found")
        # test 2: more than one match
        try:
            _ = self.db.geta(name="Tension*")
        except ValueError:
            pass
        else:
            self.fail("Did not raise ValueError when multiple matches were found")

    def test_get_correct_number_of_timesteps(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        rk = self.db.list(names="Heave", display=False)  # should be only 1 key returned in this case
        container = self.db.getda(names="Heave", fullkey=True)
        self.assertEqual(65536, len(container[rk[0]][0]), "Deviating number of time steps.")

    def test_add_raises_keyerror_on_nonunique_key(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        container = self.db.getm(names="Surge", fullkey=True)
        for k, v in container.items():
            try:
                self.db.add(v)
            except KeyError:
                pass
            else:
                self.fail("Did not raise KeyError when trying to add time series with non-unique name to db.")

    def test_add_does_not_raise_error(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        ts = TimeSeries("quiteuniquekeyiguess", np.arange(0., 100., 0.01), np.sin(np.arange(0., 100., 0.01)))
        self.db.add(ts)
        # should not raise errors

    def test_rename(self):
        tsfile = os.path.abspath(os.path.join(self.data_directory, 'simo_p.ts'))
        self.db.load(tsfile)
        oldname = "Tension_2_qs"
        newname = "mooringline"
        #
        oldkey = os.path.join(tsfile, oldname)
        newkey = os.path.join(tsfile, newname)
        # get data before rename()
        _, data1 = self.db.geta(name=oldname)
        parent1 = self.db.register_parent[oldkey]
        index1 = self.db.register_indices[oldkey]
        # rename
        self.db.rename(oldname, newname)
        # get data after rename()
        _, data2 = self.db.geta(name=newname)
        parent2 = self.db.register_parent[newkey]
        index2 = self.db.register_indices[newkey]
        # checks
        self.assertTrue(newkey in self.db.register_keys, "register_keys not updated by rename()")
        self.assertEqual(parent1, parent2, "register_parent not correctly updated")
        self.assertEqual(index1, index2, "register_indices not correctly updated")
        self.assertTrue(np.array_equal(data1, data2), "register not correctly updated")

    def test_rename_execption(self):
        tsfile = os.path.join(self.data_directory, 'simo_p.ts')
        self.db.load(tsfile)
        oldname = "Tension_2_qs"
        newname = "Tension_3_qs"
        try:
            self.db.rename(oldname, newname)
        except ValueError:
            pass
        else:
            self.fail("Did not throw ValueError when attempting renaming to non-unique name.")

    def test_maxima_minima(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        container = self.db.getm(names="Surge")
        for k, ts in container.items():
            _ = ts.maxima()
            _, _ = ts.maxima(rettime=True)
            _ = ts.minima()
            _, _ = ts.minima(rettime=True)
            # currently only testing that no error are thrown

    def test_types_in_container_from_get_many(self):
        """
        Test correct types
        """
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        container = self.db.getda(names="Surge")
        for key, ts in container.items():
            self.assertIsInstance(key, str, "Key should be type string.")
            self.assertIsInstance(ts, tuple, "Time series container should be type tuple.")
            self.assertIsInstance(ts[0], np.ndarray, "First item of time series container should be type numpy array.")
            self.assertIsInstance(ts[1], np.ndarray, "Second item of time series container should be type numpy array.")

    def test_types_in_container_from_get_many_ts(self):
        """
        Test correct types
        """
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        container = self.db.getm(names="Surge")
        for key, ts in container.items():
            self.assertIsInstance(key, str, "Key should be type string.")
            self.assertIsInstance(ts, TimeSeries, "Time series container should be type TimeSeries.")
            self.assertIsInstance(ts.t, np.ndarray, "Attribute t of time series should be type numpy array.")
            self.assertIsInstance(ts.x, np.ndarray, "Attribute x of time series should be type numpy array.")

    def test_copy(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        name = "Surge"
        ts1 = self.db.get(name=name)
        db2 = self.db.copy()
        ts2 = db2.get(name=name)
        self.assertIsNot(ts1, ts2, "Copy with shallow=False kept binding on ts to source database")
        self.assertTrue(np.array_equal(ts1.x, ts2.x), "Copy did returned TimeSeries with different value array")

    def test_copy_shallow(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        name = "Surge"
        ts1 = self.db.get(name=name)
        db2 = self.db.copy(shallow=True)
        ts2 = db2.get(name=name)
        self.assertIs(ts1, ts2, "Copy with shallow=True did not return source instance")

    def test_update(self):
        pass
        # todo: update db2 name and ts names
        '''
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        n_before = self.db.n
        db2 = TsDB()
        db2.load(os.path.join(self.data_directory, ' ... '))
        self.db.update(db2, names="*")
        n_after = self.db.n
        ts1 = self.db.get_ts(name="")
        ts2 = db2.get_ts(name="")
        self.assertEqual(n_before + 3, n_after, "Did not update with correct number of keys")
        self.assertIsNot(ts1, ts2, "Update with shallow=False kept binding on ts to source database")
        '''

    def test_update_shallow(self):
        pass
        # todo: update db2 name and ts names
        '''
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        n_before = self.db.n
        db2 = TsDB()
        db2.load(os.path.join(self.data_directory, '....ts'))
        self.db.update(db2, names="JACKET*motion", shallow=True)
        n_after = self.db.n
        ts1 = self.db.get_ts(name="...")
        ts2 = db2.get_ts(name="...")
        self.assertEqual(n_before + 3, n_after, "Did not update with correct number of keys")
        self.assertIs(ts1, ts2, "Update with shallow=True did not return source instance")
        '''

    def test_is_common_time_false(self):
        pass
        # todo: update db2 name and ts names
        '''
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        self.db.load(os.path.join(self.data_directory, '....ts'))
        names = "Surge", "..."
        is_common = self.db.is_common_time(names=names)
        self.assertFalse(is_common, "'is_common_time()' did not report False")
        '''

    def test_is_common_time_true(self):
        pass
        # todo: update db2 name and ts names
        '''
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        self.db.load(os.path.join(self.data_directory, '....ts'))
        names = "Surge", "Sway"
        is_common = self.db.is_common_time(names=names)
        self.assertTrue(is_common, "'is_common_time()' did not report True")
        '''

    def test_export_uncommon_timearray_error(self):
        pass
        # todo: update db2 name and ts names
        '''
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        self.db.load(os.path.join(self.data_directory, '....ts'))
        names = "Surge", "..."
        keys = self.db.list(names=names, display=False)
        fnout = os.path.join(self.data_directory, '_test_export.ts')
        try:
            self.db.export(fnout, keys=keys)
        except ValueError:
            pass
        else:
            # clean exported files (in the event is was exported though it should not)
            os.remove(fnout)
            os.remove(os.path.splitext(fnout)[0] + ".key")
            self.fail("Did not throw exception when exporting un-common time arrays to .ts")
        '''

    def test_export(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        names = "Surge", "Sway"
        keys = self.db.list(names=names, display=False)
        fnout = os.path.join(self.data_directory, '_test_export.ts')
        try:
            # route screen dump from export to null
            was_stdout = sys.stdout
            f = open(os.devnull, 'w')
            sys.stdout = f
            # export, should not raise errors
            self.db.export(fnout, names=keys)
        finally:
            # reset sys.stdout
            sys.stdout = was_stdout
            f.close()
            # clean (remove exported files)
            try:
                os.remove(fnout)
                os.remove(os.path.splitext(fnout)[0] + ".key")
            except FileNotFoundError:
                pass
        # should not raise errors

    def test_export_reload(self):
        self.db.load(os.path.join(self.data_directory, 'mooring.ts'))
        name = "Sway"
        fnout = os.path.join(self.data_directory, '_test_export.ts')
        try:
            # route screen dump from export to null
            was_stdout = sys.stdout
            f = open(os.devnull, 'w')
            sys.stdout = f
            # export, should not raise errors
            self.db.export(fnout, names=name)
        finally:
            # reset sys.stdout
            sys.stdout = was_stdout
            f.close()
        # reload
        db2 = TsDB()
        db2.load(fnout)
        # compare ts
        ts1 = self.db.get(name=name)
        ts2 = db2.get(name=name)
        # clean exported files
        try:
            os.remove(fnout)
            os.remove(os.path.splitext(fnout)[0] + ".key")
        except FileNotFoundError:
            pass

        # check arrays
        self.assertTrue(np.array_equal(ts1.x, ts2.x), "Export/reload did not yield same arrays")

    def test_export_ascii(self):
        self.db.load(os.path.join(self.data_directory, 'model_test_data.dat'))
        names = "WaveC[m]", "Wave-S[m]", "Surge[m]"
        fnout = os.path.join(self.data_directory, '_test_export.dat')
        try:
            # route screen dump from export to null
            was_stdout = sys.stdout
            f = open(os.devnull, 'w')
            sys.stdout = f
            # export, should not raise errors
            self.db.export(fnout, names=names, verbose=False)
        finally:
            # clean exported files and route screen dump back
            os.remove(fnout)
            sys.stdout = was_stdout
            f.close()
        # should not raise errors

    def test_export_reload_ascii(self):
        self.db.load(os.path.join(self.data_directory, 'model_test_data.dat'))
        name = "Wave-S[m]"
        fnout = os.path.join(self.data_directory, '_test_export.dat')
        try:
            # route screen dump from export to null
            was_stdout = sys.stdout
            f = open(os.devnull, 'w')
            sys.stdout = f
            # export, should not raise errors
            self.db.export(fnout, names=name)
        finally:
            sys.stdout = was_stdout
            f.close()
        # reload
        db2 = TsDB()
        db2.load(fnout)
        # compare ts
        ts1 = self.db.get(name=name)
        ts2 = db2.get(name=name)

        # clean exported files
        os.remove(fnout)

        # check arrays
        np.testing.assert_array_almost_equal(ts1.x, ts2.x, 6, "Export/reload did not yield same arrays")

    def test_stats_dataframe(self):
        """ Test that stats dataframe is correctly constructed """
        fn = os.path.join(self.data_directory, 'mooring.ts')
        keys = ["Surge", "Sway", "Heave"]
        db = TsDB.fromfile(fn)
        stats = db.stats(names=keys)  # type: dict
        df = db.stats_dataframe(names=keys)
        # check that statistics for each time series is correctly stored in columns
        self.assertListEqual(keys, list(df.keys()), "Statistics dataframe does not have time series stats in columns")
        # check that the values are correctly fetched from dataframe
        failed = []
        for k in keys:
            for kstat, val in stats[k].items():
                if not df[k][kstat] == val:
                    failed.append((k, kstat))
        self.assertFalse(failed, "Statistics dataframe values don't match the statistics dict")
示例#17
0
"""
Calculate mooring line fatigue.
"""
import os
from math import pi
from qats import TsDB
from qats.fatigue.sn import SNCurve, minersum

# load time series
db = TsDB.fromfile(os.path.join("..", "..", "..", "data", "simo_p_out.ts"))

# initiate SN-curve: DNVGL-OS-E301 curve for studless chain
sncurve = SNCurve(name="Studless chain OS-E301", m1=3.0, a1=6e10)

# Calculate fatigue damage for all mooring line tension time series (kN)
for ts in db.getl(names='tension_*_qs'):
    # count tension (discarding the 100s transient)
    cycles = ts.rfc(twin=(100., 1e12))

    # unpack cycle range and count as separate lists (discard cycle means)
    ranges, _, counts = zip(*cycles)

    # calculate cross section stress cycles (118mm studless chain)
    area = 2. * pi * (118. / 2.)**2.  # mm^2
    ranges = [r * 1e3 / area for r in ranges]  # MPa

    # calculate fatigue damage from Palmgren-Miner rule (SCF=1, no thickness correction)
    damage = minersum(ranges, counts, sncurve)

    # print summary
    print(f"{ts.name}:")
示例#18
0
"""
Example on working with cycle range and range-mean distributions.
"""
import os
from qats import TsDB

# locate time series file
file_name = os.path.join("..", "..", "..", "data", "simo_p_out.ts")

# load time series directly on db initiation
db = TsDB.fromfile(file_name)

# fetch one of the time series from the db
ts = db.get(name='tension_2_qs')

# plot its cycle ranges as bar diagram
ts.plot_cycle_range(n=100)

# plot its cycle-range-mean distribution as scatter
ts.plot_cycle_rangemean(n=100)

# ... or as a 3D surface.
ts.plot_cycle_rangemean3d(nr=25, nm=25)

# you can also collect the cycle range-mean and count numbers (see TimeSeries.rfc and TimeSeries.get for options)
cycles = ts.rfc()
ranges, means, counts = zip(*cycles)    # unpack to separate lists if you prefer

# The TsDB class also has similar methods to ease comparison
# compare cycle range distribution (range versus count) grouped in 100 bins
db.plot_cycle_range(names='tension*', n=100)