Beispiel #1
0
 def test_tutorial_examples(self):
     """Runs all tutorial examples. If run without errors, passes test"""
     example_script = 'tutorial_ex%d.py'
     for example_num in range(1, 7):
         # Example 3 isn't meant to work in parallel
         if not (parallel.is_distributed() and example_num != 3):
             #printing(False)
             parallel.barrier()
             execfile(join(examples_dir, example_script%example_num))
             parallel.barrier()
Beispiel #2
0
def get_adjoint_impulse_response_array(A, C, num_steps, weights_array):
    num_outputs, num_states = C.shape
    A_adjoint = np.linalg.inv(weights_array).dot(A.conj().T.dot(weights_array))
    C_adjoint = np.linalg.inv(weights_array).dot(C.conj().T)
    adjoint_vecs = np.zeros((num_states, num_steps * num_outputs), dtype=A.dtype)
    A_adjoint_powers = np.identity(num_states)
    for idx in range(num_steps):
        adjoint_vecs[:, (idx * num_outputs):(idx + 1) * num_outputs] =\
            A_adjoint_powers.dot(C_adjoint)
        A_adjoint_powers = A_adjoint_powers.dot(A_adjoint)
    return adjoint_vecs


#@unittest.skip('Testing something else.')
@unittest.skipIf(parallel.is_distributed(), 'Serial only.')
class TestBPODArrays(unittest.TestCase):
    def setUp(self):
        self.num_states = 10
        self.num_steps = self.num_states + 1


    def test_all(self):
        # Set test tolerances.  Separate, more relaxed tolerances are required
        # for testing the BPOD modes, since that test requires "squaring" the
        # gramians and thus involves more ill-conditioned arrays.
        rtol = 1e-8
        atol = 1e-10
        rtol_sqr = 1e-8
        atol_sqr = 1e-8
Beispiel #3
0
        interval: interval between pairs of time steps, as shown above.

    Returns:
        time_steps: array of integers, time steps [0 1 interval interval+1 ...]
    """
    if num_steps % 2 != 0:
        raise ValueError('num_steps, %d, must be even' % num_steps)
    interval = int(interval)
    time_steps = np.zeros(num_steps, dtype=int)
    time_steps[::2] = interval * np.arange(num_steps / 2)
    time_steps[1::2] = 1 + interval * np.arange(num_steps / 2)
    return time_steps


@unittest.skipIf(parallel.is_distributed(), 'Only test ERA in serial')
class testERA(unittest.TestCase):
    def setUp(self):
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'files_ERA_DELETE_ME'
        if not os.path.exists(self.test_dir):
            os.mkdir(self.test_dir)
        self.impulse_file_path = join(self.test_dir, 'impulse_input%03d.txt')

    def tearDown(self):
        """Deletes all of the arrays created by the tests"""
        rmtree(self.test_dir, ignore_errors=True)

    #@unittest.skip('Testing others')
    def test_make_sampled_format(self):
Beispiel #4
0
class TestUtil(unittest.TestCase):
    """Tests all of the functions in util.py

    To test all parallel features, use "mpiexec -n 2 python testutil.py"
    """
    def setUp(self):
        self.test_dir = 'files_util_DELETE_ME'
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        if parallel.is_rank_zero():
            if not os.path.isdir(self.test_dir):
                os.mkdir(self.test_dir)

    def tearDown(self):
        parallel.barrier()
        if parallel.is_rank_zero():
            rmtree(self.test_dir, ignore_errors=True)
        parallel.barrier()

    #@unittest.skip('Testing something else.')
    def test_atleast_2d(self):
        # Test a 0d array.  Check that after reshaping to 2d, the value is the
        # same, but the shape is a row/column vector as specified.
        vec0d = np.array(1.)
        vec0d_row = util.atleast_2d_row(vec0d)
        vec0d_col = util.atleast_2d_col(vec0d)
        np.testing.assert_array_equal(vec0d, vec0d_row.squeeze())
        np.testing.assert_array_equal(vec0d, vec0d_col.squeeze())
        self.assertEqual(vec0d_row.shape, (1, 1))
        self.assertEqual(vec0d_col.shape, (1, 1))

        # Test a 1d array.  Check that after reshaping to 2d, the values are the
        # same, but the shape is a row/column vector as specified.
        vec1d = np.ones((3))
        vec1d_row = util.atleast_2d_row(vec1d)
        vec1d_col = util.atleast_2d_col(vec1d)
        np.testing.assert_array_equal(vec1d.squeeze(), vec1d_row.squeeze())
        np.testing.assert_array_equal(vec1d.squeeze(), vec1d_col.squeeze())
        self.assertEqual(vec1d.shape, (vec1d.size, ))
        self.assertEqual(vec1d_row.shape, (1, vec1d.size))
        self.assertEqual(vec1d_col.shape, (vec1d.size, 1))

        # Test a 2d array.  Nothing should change about the array.
        vec2d = np.ones((3, 3))
        vec2d_row = util.atleast_2d_row(vec2d)
        vec2d_col = util.atleast_2d_col(vec2d)
        np.testing.assert_array_equal(vec2d, vec2d_row)
        np.testing.assert_array_equal(vec2d, vec2d_col)

    #@unittest.skip('Testing something else.')
    @unittest.skipIf(parallel.is_distributed(),
                     'Only save/load arrays in serial')
    def test_load_save_array_text(self):
        """Test that can read/write text arrays"""
        rows = [1, 5, 20]
        cols = [1, 4, 5, 23]
        array_path = join(self.test_dir, 'test_array.txt')
        delimiters = [',', ' ', ';']
        for delimiter in delimiters:
            for is_complex in [False, True]:
                for squeeze in [False, True]:
                    for num_rows in rows:
                        for num_cols in cols:

                            # Generate real and complex arrays
                            array = np.random.random((num_rows, num_cols))
                            if is_complex:
                                array = array + (1j * np.random.random(
                                    (num_rows, num_cols)))

                            # Check row and column vectors, no squeeze (1, 1)
                            if squeeze and (num_rows > 1 or num_cols > 1):
                                array = np.squeeze(array)
                            util.save_array_text(array,
                                                 array_path,
                                                 delimiter=delimiter)
                            array_read = util.load_array_text(
                                array_path,
                                delimiter=delimiter,
                                is_complex=is_complex)
                            if squeeze:
                                array_read = np.squeeze(array_read)
                            np.testing.assert_equal(array_read, array)

    #@unittest.skip('Testing something else.')
    @unittest.skipIf(parallel.is_distributed(), 'Only load arrays in serial')
    def test_svd(self):
        # Set tolerance for testing eigval/eigvec property
        test_atol = 1e-10

        # Check tall, fat, and square arrays
        num_rows_list = [100]
        num_cols_list = [50, 100, 150]

        # Loop through different array sizes
        for num_rows in num_rows_list:
            for num_cols in num_cols_list:

                # Check real and complex data
                for is_complex in [True]:

                    # Generate a random array with elements in [0, 1]
                    array = np.random.random((num_rows, num_cols))
                    if is_complex:
                        array = array + 1j * np.random.random(
                            (num_rows, num_cols))

                    # Compute full set of singular values to help choose
                    # tolerance levels that guarantee truncation (otherwise
                    # tests won't actually check those features).
                    sing_vals_full = np.linalg.svd(array, full_matrices=0)[1]
                    atol_list = [np.median(sing_vals_full), None]
                    rtol_list = [
                        np.median(sing_vals_full) / np.max(sing_vals_full),
                        None
                    ]

                    # Loop through different tolerance cases
                    for atol in atol_list:
                        for rtol in rtol_list:

                            # For all arrays, check that the output of util.svd
                            # satisfies the definition of an SVD.  Do this by
                            # checking eigval/eigvec properties, which must be
                            # satisfied by the sing vecs and sing vals, even if
                            # there is truncation.  The fact that the singular
                            # vectors are eigenvectors of a normal array ensures
                            # that they are unitary, so we don't have to check
                            # that separately.
                            L_sing_vecs, sing_vals, R_sing_vecs = util.svd(
                                array, atol=atol, rtol=rtol)
                            np.testing.assert_allclose(
                                array.dot(array.conj().T.dot(L_sing_vecs)) -
                                L_sing_vecs.dot(np.diag(sing_vals**2)),
                                np.zeros(L_sing_vecs.shape),
                                atol=test_atol)
                            np.testing.assert_allclose(
                                array.conj().T.dot(array.dot(R_sing_vecs)) -
                                R_sing_vecs.dot(np.diag(sing_vals**2)),
                                np.zeros(R_sing_vecs.shape),
                                atol=test_atol)

                            # If either tolerance is nonzero, make sure that
                            # something is actually truncated, otherwise force
                            # test to quit.  To do this, make sure the eigvec
                            # array is not square.
                            if rtol and sing_vals.size == sing_vals_full.size:
                                raise ValueError(
                                    'Failed to choose relative tolerance that '
                                    'forces truncation.')
                            if atol and sing_vals.size == sing_vals_full.size:
                                raise ValueError(
                                    'Failed to choose absolute tolerance that '
                                    'forces truncation.')

                            # If necessary, test that tolerances are satisfied
                            if atol:
                                self.assertTrue(abs(sing_vals[-1]) > atol)
                            if rtol:
                                self.assertTrue(
                                    (abs(sing_vals[0]) / abs(sing_vals[-1]) >
                                     rtol))

    #@unittest.skip('Testing something else.')
    @unittest.skipIf(parallel.is_distributed(), 'Only load arrays in serial')
    def test_eigh(self):
        # Set tolerance for test of eigval/eigvec properties.  Value necessary
        # for test to pass depends on array size, as well as atol and rtol
        # values
        test_atol = 1e-12

        # Generate random array
        num_rows = 100

        # Test arrays that are and are not positive definite
        for is_pos_def in [True, False]:

            # Test both real and complex data
            for is_complex in [True, False]:

                # Generate random array with values between 0 and 1
                array = np.random.random((num_rows, num_rows))
                if is_complex:
                    array = array + 1j * np.random.random((num_rows, num_rows))

                # Make array conjugate-symmetric.  Note that if the array is
                # large, for some reason an in-place operation causes the
                # operation to fail (not sure why...).  Values are still between
                # 0 and 1.
                array = 0.5 * (array + array.conj().T)

                # If necessary, make the array positive definite by first making
                # it symmetric (adding the transpose), and then making it
                # diagonally dominant (each element is less than 1, so add N * I
                # to make the diagonal dominant).  Here an in-place change seems
                # to be ok.
                if is_pos_def:
                    array = array + num_rows * np.eye(num_rows)

                    # Make sure array is positive definite, otherwise
                    # force test to quit.
                    if np.linalg.eig(array)[0].min() < 0.:
                        raise ValueError(
                            'Failed to generate positive definite array '
                            'for test.')

                # Compute full set of eigenvalues to help choose tolerance
                # levels that guarantee truncation (otherwise tests won't
                # actually check those features).
                eigvals_full = np.linalg.eig(array)[0]
                atol_list = [np.median(abs(eigvals_full)), None]
                rtol_list = [
                    np.median(abs(eigvals_full)) / abs(np.max(eigvals_full)),
                    None
                ]

                # Loop through different tolerance values
                for atol in atol_list:
                    for rtol in rtol_list:

                        # For each case, test that returned values are in fact
                        # eigenvalues and eigenvectors of the given array.
                        # Since each pair that is returned (not truncated due to
                        # tolerances) should have this property, we can test
                        # this even if tolerances are passed in.  Compare to the
                        # zero array because then we only have to check the
                        # absolute magnitue of each term, rather than consider
                        # relative errors with respect to nonzero terms.
                        eigvals, eigvecs = util.eigh(
                            array,
                            rtol=rtol,
                            atol=atol,
                            is_positive_definite=is_pos_def)
                        np.testing.assert_allclose(
                            array.dot(eigvecs) - eigvecs.dot(np.diag(eigvals)),
                            np.zeros(eigvecs.shape),
                            atol=test_atol)

                        # If either tolerance is nonzero, make sure that
                        # something is actually truncated, otherwise force test
                        # to quit.  To do this, make sure the eigvec array is
                        # not square.
                        if rtol and eigvals.size == eigvals_full.size:
                            raise ValueError(
                                'Failed to choose relative tolerance that '
                                'forces truncation.')
                        if atol and eigvals.size == eigvals_full.size:
                            raise ValueError(
                                'Failed to choose absolute tolerance that '
                                'forces truncation.')

                        # If the positive definite flag is passed in, make sure
                        # the returned eigenvalues are all positive
                        if is_pos_def:
                            self.assertTrue(eigvals.min() > 0)

                        # If a relative tolerance is passed in, make sure the
                        # relative tolerance is satisfied.
                        if rtol is not None:
                            self.assertTrue(
                                abs(eigvals).min() / abs(eigvals).max() > rtol)

                        # If an absolute tolerance is passed in, make sure the
                        # absolute tolerance is satisfied.
                        if atol is not None:
                            self.assertTrue(abs(eigvals).min() > atol)

    #@unittest.skip('Testing something else.')
    @unittest.skipIf(parallel.is_distributed(), 'Only load arrays in serial')
    def test_eig_biorthog(self):
        test_atol = 1e-10
        num_rows = 100

        # Test real and complex data
        for is_complex in [True, False]:
            array = np.random.random((num_rows, num_rows))
            if is_complex:
                array = array + 1j * np.random.random((num_rows, num_rows))

            # Test different scale choices
            for scale_choice in ['left', 'right']:
                R_eigvals, R_eigvecs, L_eigvecs = util.eig_biorthog(
                    array, scale_choice=scale_choice)

                # Check eigenvector/eigenvalue relationship (use right
                # eigenvalues only).  Test difference so that all values are
                # compared to zeros, avoiding need to check relative tolerances.
                np.testing.assert_allclose(array.dot(R_eigvecs) -
                                           R_eigvecs.dot(np.diag(R_eigvals)),
                                           np.zeros(array.shape),
                                           atol=test_atol)
                np.testing.assert_allclose(
                    L_eigvecs.conj().T.dot(array) -
                    np.diag(R_eigvals).dot(L_eigvecs.conj().T),
                    np.zeros(array.shape),
                    atol=test_atol)

                # Check biorthogonality (take magnitudes since inner products
                # are complex in general).  Again, take difference so that all
                # test values should be zero, avoiding need for rtol
                ip_array = L_eigvecs.conj().T.dot(R_eigvecs)
                np.testing.assert_allclose(np.abs(ip_array) - np.eye(num_rows),
                                           np.zeros(ip_array.shape),
                                           atol=test_atol)

                # Check for unit norms
                if scale_choice == 'left':
                    unit_eigvecs = R_eigvecs
                elif scale_choice == 'right':
                    unit_eigvecs = L_eigvecs
                np.testing.assert_allclose(
                    np.sqrt(
                        np.sum(np.multiply(unit_eigvecs, unit_eigvecs.conj()),
                               axis=0)).squeeze(), np.ones(R_eigvals.size))

        # Check that error is raised for invalid scale choice
        self.assertRaises(ValueError, util.eig_biorthog, array,
                          **{'scale_choice': 'invalid'})

    #@unittest.skip('Testing something else.')
    @unittest.skipIf(parallel.is_distributed(), 'Only load data in serial')
    def test_load_impulse_outputs(self):
        """
        Test loading multiple signal files in [t sig1 sig2 ...] format.

        Creates signals, saves them, loads them, tests are equal to the
        originals.
        """
        signal_path = join(self.test_dir, 'file%03d.txt')
        for num_paths in [1, 4]:
            for num_signals in [1, 2, 4, 5]:
                for num_time_steps in [1, 10, 100]:
                    all_signals_true = np.random.random(
                        (num_paths, num_time_steps, num_signals))
                    # Time steps need not be sequential
                    time_values_true = np.random.random(num_time_steps)

                    signal_paths = []
                    # Save signals to file
                    for path_num in range(num_paths):
                        signal_paths.append(signal_path % path_num)
                        data_to_save = np.concatenate( \
                          (time_values_true.reshape(len(time_values_true), 1),
                          all_signals_true[path_num]), axis=1)
                        util.save_array_text(data_to_save,
                                             signal_path % path_num)

                    time_values, all_signals = util.load_multiple_signals(
                        signal_paths)
                    np.testing.assert_allclose(all_signals, all_signals_true)
                    np.testing.assert_allclose(time_values, time_values_true)

    #@unittest.skip('Testing something else.')
    @unittest.skipIf(parallel.is_distributed(), 'Serial only.')
    def test_balanced_truncation(self):
        """Test balanced system is close to original."""
        for num_inputs in [1, 3]:
            for num_outputs in [1, 4]:
                for num_states in [1, 10]:
                    A, B, C = util.drss(num_states, num_inputs, num_outputs)
                    Ar, Br, Cr = util.balanced_truncation(A, B, C)
                    num_time_steps = 10
                    y = util.impulse(A, B, C, num_time_steps=num_time_steps)
                    yr = util.impulse(Ar,
                                      Br,
                                      Cr,
                                      num_time_steps=num_time_steps)
                    np.testing.assert_allclose(yr, y, rtol=1e-3, atol=1e-3)

    #@unittest.skip('Testing something else.')
    @unittest.skipIf(parallel.is_distributed(), 'Serial only.')
    def test_drss(self):
        """Test drss gives correct array dimensions and stable dynamics."""
        for num_states in [1, 5, 14]:
            for num_inputs in [1, 3, 6]:
                for num_outputs in [1, 2, 3, 7]:
                    A, B, C = util.drss(num_states, num_inputs, num_outputs)
                    self.assertEqual(A.shape, (num_states, num_states))
                    self.assertEqual(B.shape, (num_states, num_inputs))
                    self.assertEqual(C.shape, (num_outputs, num_states))
                    self.assertTrue(np.amax(np.abs(np.linalg.eig(A)[0])) < 1)

    #@unittest.skip('Testing something else.')
    @unittest.skipIf(parallel.is_distributed(), 'Serial only.')
    def test_lsim(self):
        """Test that lsim has right shapes, does not test result"""
        for num_states in [1, 4, 9]:
            for num_inputs in [1, 2, 4]:
                for num_outputs in [1, 2, 3, 5]:
                    A, B, C = util.drss(num_states, num_inputs, num_outputs)
                    nt = 20
                    inputs = np.random.random((nt, num_inputs))
                    outputs = util.lsim(A, B, C, inputs)
                    self.assertEqual(outputs.shape, (nt, num_outputs))

    #@unittest.skip('Testing something else.')
    def test_Hankel(self):
        """Test forming Hankel array from first column and last row."""
        for num_rows in [1, 4, 6]:
            for num_cols in [1, 3, 6]:

                # Generate simple integer values so structure of array is easy
                # to see.  This doesn't affect the robustness of the test, as
                # all we are concerned about is structure.
                first_col = np.arange(1, num_rows + 1)
                last_row = np.arange(1, num_cols + 1) * 10
                last_row[0] = first_col[-1]

                # Fill in Hankel array.  Recall that along skew diagonals, i +
                # j is constant.
                Hankel_true = np.zeros((num_rows, num_cols))
                for i in range(num_rows):
                    for j in range(num_cols):

                        # Upper left triangle of values.  Fill skew diagonals
                        # until we hit the lower left corner of the array, where
                        # i + j = num_rows - 1.
                        if i + j < num_rows:
                            Hankel_true[i, j] = first_col[i + j]

                        # Lower right triangle of values.  Starting on skew
                        # diagonal just to right of lower left corner of array,
                        # fill in rest of values.
                        else:
                            Hankel_true[i, j] = last_row[i + j - num_rows + 1]

                # Compute Hankel array using util and test
                Hankel_test = util.Hankel(first_col, last_row)
                np.testing.assert_equal(Hankel_test, Hankel_true)

    #@unittest.skip('Testing something else.')
    def test_Hankel_chunks(self):
        """Test forming Hankel array using chunks."""
        chunk_num_rows = 2
        chunk_num_cols = 2
        chunk_shape = (chunk_num_rows, chunk_num_cols)
        for num_row_chunks in [1, 4, 6]:
            for num_col_chunks in [1, 3, 6]:

                # Generate simple values that make it easy to see the array
                # structure
                first_col_chunks = [
                    np.ones(chunk_shape) * (i + 1)
                    for i in range(num_row_chunks)
                ]
                last_row_chunks = [
                    np.ones(chunk_shape) * (j + 1) * 10
                    for j in range(num_col_chunks)
                ]
                last_row_chunks[0] = first_col_chunks[-1]

                # Fill in Hankel array chunk by chunk
                Hankel_true = np.zeros((num_row_chunks * chunk_shape[0],
                                        num_col_chunks * chunk_shape[1]))
                for i in range(num_row_chunks):
                    for j in range(num_col_chunks):

                        # Upper left triangle of values
                        if i + j < num_row_chunks:
                            Hankel_true[
                                i * chunk_num_rows:(i + 1) * chunk_num_rows,
                                j * chunk_num_cols:(j + 1) * chunk_num_cols] =\
                                first_col_chunks[i + j]

                        # Lower right triangle of values
                        else:
                            Hankel_true[
                                i * chunk_num_rows:(i + 1) * chunk_num_rows,
                                j * chunk_num_cols:(j + 1) * chunk_num_cols] =\
                                last_row_chunks[i + j - num_row_chunks + 1]

                # Compute Hankel array using util and test
                Hankel_test = util.Hankel_chunks(
                    first_col_chunks, last_row_chunks=last_row_chunks)
                np.testing.assert_equal(Hankel_test, Hankel_true)
Beispiel #5
0
#!/usr/bin/env python
"""Test vectors module"""
import unittest
import os
from os.path import join
from shutil import rmtree

import numpy as np

from modred import vectors as vcs, parallel


@unittest.skipIf(parallel.is_distributed(), 'No need to test in parallel')
class TestVectors(unittest.TestCase):
    """Test the vector methods """
    def setUp(self):
        self.test_dir = 'files_vectors_DELETE_ME'
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        if not os.path.isdir(self.test_dir) and parallel.is_rank_zero():
            os.mkdir(self.test_dir)
        parallel.barrier()
        self.mode_nums = [2, 4, 3, 6, 9, 8, 10, 11, 30]
        self.num_vecs = 40
        self.num_states = 100
        self.index_from = 2

    def tearDown(self):
        parallel.barrier()
        if parallel.is_rank_zero():
            rmtree(self.test_dir, ignore_errors=True)
Beispiel #6
0
        C_reduced_path = join(self.test_dir, 'C.txt')
        A = parallel.call_and_bcast(np.random.random, ((10, 10)))
        B = parallel.call_and_bcast(np.random.random, ((1, 10)))
        C = parallel.call_and_bcast(np.random.random, ((10, 2)))
        LTI_proj = lgp.LTIGalerkinProjectionBase()
        LTI_proj.A_reduced = A.copy()
        LTI_proj.B_reduced = B.copy()
        LTI_proj.C_reduced = C.copy()
        LTI_proj.put_model(A_reduced_path, B_reduced_path, C_reduced_path)
        np.testing.assert_equal(util.load_array_text(A_reduced_path), A)
        np.testing.assert_equal(util.load_array_text(B_reduced_path), B)
        np.testing.assert_equal(util.load_array_text(C_reduced_path), C)


#@unittest.skip('Testing something else.')
@unittest.skipIf(parallel.is_distributed(), 'Serial only')
class TestLTIGalerkinProjectionArrays(unittest.TestCase):
    """Tests that can find the correct A, B, and C arrays."""
    def setUp(self):
        self.num_basis_vecs = 10
        self.num_adjoint_basis_vecs = 10
        self.num_states = 11
        self.num_inputs = 3
        self.num_outputs = 2

        self.generate_data_set(self.num_basis_vecs,
                               self.num_adjoint_basis_vecs, self.num_states,
                               self.num_inputs, self.num_outputs)

        self.LTI_proj = lgp.LTIGalerkinProjectionArrays(
            self.basis_vecs,
Beispiel #7
0
#!/usr/bin/env python
from past.builtins import execfile
from future.builtins import range

import os

from modred import parallel
import modred as mr  # modred must be installed.

for i in range(1, 7):
    if not parallel.is_distributed():
        execfile('tutorial_ex%d.py' % i)
    if parallel.is_distributed() and i >= 3:
        parallel.barrier()
        execfile('tutorial_ex%d.py' % i)
        parallel.barrier()

for i in range(1, 3):
    if not parallel.is_distributed():
        execfile('rom_ex%d.py' % i)
    if parallel.is_distributed() and i > 1:
        execfile('rom_ex%d.py' % i)
        parallel.barrier()

if not parallel.is_distributed():
    execfile('main_CGL.py')
Beispiel #8
0
#!/usr/bin/env python
"""Test vectors module"""
import unittest
import os
from os.path import join
from shutil import rmtree

import numpy as np

from modred import vectors as vcs, parallel


@unittest.skipIf(parallel.is_distributed(), 'No need to test in parallel')
class TestVectors(unittest.TestCase):
    """Test the vector methods """
    def setUp(self):
        self.test_dir = 'files_vectors_DELETE_ME'
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        if not os.path.isdir(self.test_dir) and parallel.is_rank_zero():
            os.mkdir(self.test_dir)
        parallel.barrier()
        self.mode_nums = [2, 4, 3, 6, 9, 8, 10, 11, 30]
        self.num_vecs = 40
        self.num_states = 100
        self.index_from = 2


    def tearDown(self):
        parallel.barrier()
        if parallel.is_rank_zero():
class TestVectorSpaceHandles(unittest.TestCase):
    """ Tests of the VectorSpaceHandles class """
    def setUp(self):
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'files_vectorspace_DELETE_ME'
        if not os.path.isdir(self.test_dir):
            parallel.call_from_rank_zero(os.mkdir, self.test_dir)

        self.max_vecs_per_proc = 10
        self.total_num_vecs_in_mem = (
            parallel.get_num_procs() * self.max_vecs_per_proc)

        self.vec_space = vspc.VectorSpaceHandles(
            inner_product=np.vdot, verbosity=0)
        self.vec_space.max_vecs_per_proc = self.max_vecs_per_proc

        # Default data members; set verbosity to 0 even though default is 1
        # so messages won't print during tests
        self.default_data_members = {
            'inner_product': np.vdot, 'max_vecs_per_node': 10000,
            'max_vecs_per_proc': (
                10000 * parallel.get_num_nodes() // parallel.get_num_procs()),
            'verbosity': 0, 'print_interval': 10, 'prev_print_time': 0.}
        parallel.barrier()


    def tearDown(self):
        parallel.barrier()
        parallel.call_from_rank_zero(rmtree, self.test_dir, ignore_errors=True)
        parallel.barrier()


    #@unittest.skip('Testing other things')
    def test_init(self):
        """Test arguments passed to the constructor are assigned properly."""
        data_members_original = util.get_data_members(
            vspc.VectorSpaceHandles(inner_product=np.vdot, verbosity=0))
        self.assertEqual(data_members_original, self.default_data_members)

        max_vecs_per_node = 500
        vec_space = vspc.VectorSpaceHandles(
            inner_product=np.vdot, max_vecs_per_node=max_vecs_per_node,
            verbosity=0)
        data_members = copy.deepcopy(data_members_original)
        data_members['max_vecs_per_node'] = max_vecs_per_node
        data_members['max_vecs_per_proc'] = (
            max_vecs_per_node *
            parallel.get_num_nodes() // parallel.get_num_procs())
        self.assertEqual(util.get_data_members(vec_space), data_members)


    #@unittest.skip('Testing other things')
    def test_sanity_check(self):
        """Tests correctly checks user-supplied objects and functions."""
        # Setup
        nx = 40
        ny = 15
        test_array = np.random.random((nx, ny))
        vec_space = vspc.VectorSpaceHandles(inner_product=np.vdot, verbosity=0)
        in_mem_handle = VecHandleInMemory(test_array)
        vec_space.sanity_check(in_mem_handle)

        # Define some weird vectors that alter their internal data when adding
        # or multiplying (which they shouldn't do).
        class SanityMultVec(Vector):
            def __init__(self, arr):
                self.arr = arr

            def __add__(self, obj):
                f_return = copy.deepcopy(self)
                f_return.arr += obj.arr
                return f_return

            def __mul__(self, a):
                self.arr *= a
                return self

        class SanityAddVec(Vector):
            def __init__(self, arr):
                self.arr = arr

            def __add__(self, obj):
                self.arr += obj.arr
                return self

            def __mul__(self, a):
                f_return = copy.deepcopy(self)
                f_return.arr *= a
                return f_return

        # Define an inner product function for Sanity vec handles
        def good_custom_ip(vec1, vec2):
            return np.vdot(vec1.arr, vec2.arr)

        # Define a bad inner product for regular arrays
        def bad_array_ip(vec1, vec2):
            return np.vdot(vec1, vec2 ** 2.)

        # Make sure that sanity check passes for vectors with properly defined
        # vector operations.  Do so by simply calling the function.  If it
        # passes, then no error will be raised.
        vec_space.inner_product = np.vdot
        vec_space.sanity_check(VecHandleInMemory(test_array))

        # Make sure that sanity check fails if inner product values are not
        # correct.
        vec_space.inner_product = bad_array_ip
        self.assertRaises(
            ValueError,
            vec_space.sanity_check, VecHandleInMemory(test_array))

        # Make sure that sanity check fails if vectors alter their
        # internal data when doing vector space operations.
        vec_space.inner_product = good_custom_ip
        sanity_mult_vec = SanityMultVec(test_array)
        self.assertRaises(
            ValueError,
            vec_space.sanity_check, VecHandleInMemory(sanity_mult_vec))
        sanity_add_vec = SanityAddVec(test_array)
        self.assertRaises(
            ValueError,
            vec_space.sanity_check, VecHandleInMemory(sanity_add_vec))


    def generate_vecs_modes(
        self, num_states, num_vecs, num_modes=None, squeeze=False):
        """Generates random vecs and finds the modes.

        Returns:
            vec_array: array in which each column is a vec (in order)
            coeff_array: array of shape num_vecs x num_modes, random
                entries
            mode_array: array of modes, each column is a mode.
                The index of the array column equals the mode index.
        """
        if num_modes == None:
            num_modes = num_vecs
        coeff_array = (
            np.random.random((num_vecs, num_modes)) +
            1j * np.random.random((num_vecs, num_modes)))
        vec_array = (
            np.random.random((num_states, num_vecs)) +
            1j * np.random.random((num_states, num_vecs)))
        mode_array = vec_array.dot(coeff_array)
        if squeeze:
            build_coeff_array = coeff_array.squeeze()
        return vec_array, coeff_array, mode_array


    #@unittest.skip('Testing other things')
    def test_lin_combine(self):
        # Set test tolerances
        rtol = 1e-10
        atol = 1e-12

        # Setup
        mode_path = join(self.test_dir, 'mode_%03d.pkl')
        vec_path = join(self.test_dir, 'vec_%03d.pkl')

        # Test cases where number of modes:
        #   less, equal, more than num_states
        #   less, equal, more than num_vecs
        #   less, equal, more than total_num_vecs_in_mem
        # Also check the case of passing a None value to the mode_indices
        # argument.
        num_states = 20
        num_vecs_list = [1, 15, 40]
        num_modes_list = [
            None, 1, 8, 10, 20, 25, 45,
            int(np.ceil(self.total_num_vecs_in_mem / 2.)),
            self.total_num_vecs_in_mem, self.total_num_vecs_in_mem * 2]

        # Check for correct computations
        for num_vecs in num_vecs_list:
            for num_modes in num_modes_list:
                for squeeze in [True, False]:

                    # Generate data and then broadcast to all procs
                    vec_handles = [
                        VecHandlePickle(vec_path % i)
                        for i in range(num_vecs)]
                    vec_array, coeff_array, true_modes =\
                        parallel.call_and_bcast(
                            self.generate_vecs_modes, num_states, num_vecs,
                            num_modes=num_modes, squeeze=squeeze)
                    if parallel.is_rank_zero():
                        for vec_index, vec_handle in enumerate(vec_handles):
                            vec_handle.put(vec_array[:, vec_index])
                    parallel.barrier()

                    # Choose which modes to compute
                    if num_modes is None:
                        mode_idxs_arg = None
                        mode_idxs_vals = range(true_modes.shape[1])
                    elif num_modes == 1:
                        mode_idxs_arg = 0
                        mode_idxs_vals = [0]
                    else:
                        mode_idxs_arg = np.unique(
                            parallel.call_and_bcast(
                                np.random.randint, 0, high=num_modes,
                                size=num_modes // 2))
                        mode_idxs_vals = mode_idxs_arg
                    mode_handles = [
                        VecHandlePickle(mode_path % mode_num)
                        for mode_num in mode_idxs_vals]

                    # Saves modes to files
                    self.vec_space.lin_combine(
                        mode_handles, vec_handles, coeff_array,
                        coeff_array_col_indices=mode_idxs_arg)

                    # Test modes one by one
                    for mode_idx in mode_idxs_vals:
                        computed_mode = VecHandlePickle(
                            mode_path % mode_idx).get()
                        np.testing.assert_allclose(
                            computed_mode, true_modes[:, mode_idx],
                            rtol=rtol, atol=atol)
                    parallel.barrier()

                parallel.barrier()

            parallel.barrier()

        # Test that errors are caught for mismatched dimensions
        mode_handles = [
            VecHandlePickle(mode_path % i) for i in range(10)]
        vec_handles = [
            VecHandlePickle(vec_path % i) for i in range(15)]
        coeffs_array_too_short = np.zeros(
            (len(vec_handles) - 1, len(mode_handles)))
        coeffs_array_too_fat = np.zeros(
            (len(vec_handles), len(mode_handles) + 1))
        index_list_too_long = range(len(mode_handles) + 1)
        self.assertRaises(
            ValueError, self.vec_space.lin_combine, mode_handles, vec_handles,
            coeffs_array_too_short)
        self.assertRaises(
            ValueError, self.vec_space.lin_combine, mode_handles, vec_handles,
            coeffs_array_too_fat)


    #@unittest.skip('Testing other things')
    @unittest.skipIf(parallel.is_distributed(), 'Serial only')
    def test_compute_inner_product_array_types(self):
        num_row_vecs = 4
        num_col_vecs = 6
        num_states = 7

        row_vec_path = join(self.test_dir, 'row_vec_%03d.pkl')
        col_vec_path = join(self.test_dir, 'col_vec_%03d.pkl')

        # Check complex and real data
        for is_complex in [True, False]:

            # Generate data
            row_vec_array = np.random.random((num_states, num_row_vecs))
            col_vec_array = np.random.random((num_states, num_col_vecs))
            if is_complex:
                row_vec_array = row_vec_array * (
                    1j * np.random.random((num_states, num_row_vecs)))
                col_vec_array = col_vec_array * (
                    1j * np.random.random((num_states, num_col_vecs)))

            # Generate handles and save to file
            row_vec_paths = [row_vec_path % i for i in range(num_row_vecs)]
            col_vec_paths = [col_vec_path % i for i in range(num_col_vecs)]
            row_vec_handles = [
                VecHandlePickle(path) for path in row_vec_paths]
            col_vec_handles = [
                VecHandlePickle(path) for path in col_vec_paths]
            for idx, handle in enumerate(row_vec_handles):
                handle.put(row_vec_array[:, idx])
            for idx, handle in enumerate(col_vec_handles):
                handle.put(col_vec_array[:, idx])

            # Compute inner product array and check type
            inner_product_array = self.vec_space.compute_inner_product_array(
                row_vec_handles, col_vec_handles)
            symm_inner_product_array =\
                self.vec_space.compute_symm_inner_product_array(
                    row_vec_handles)
            self.assertEqual(inner_product_array.dtype, row_vec_array.dtype)
            self.assertEqual(
                symm_inner_product_array.dtype, row_vec_array.dtype)


    #@unittest.skip('Testing other things')
    def test_compute_inner_product_arrays(self):
        """Test computation of array of inner products."""
        rtol = 1e-10
        atol = 1e-12

        num_row_vecs_list = [
            1,
            int(round(self.total_num_vecs_in_mem / 2.)),
            self.total_num_vecs_in_mem,
            self.total_num_vecs_in_mem * 2,
            parallel.get_num_procs() + 1]
        num_col_vecs_list = num_row_vecs_list
        num_states = 6

        row_vec_path = join(self.test_dir, 'row_vec_%03d.pkl')
        col_vec_path = join(self.test_dir, 'col_vec_%03d.pkl')

        for num_row_vecs in num_row_vecs_list:
            for num_col_vecs in num_col_vecs_list:

                # Generate vecs
                parallel.barrier()
                row_vec_array = (
                    parallel.call_and_bcast(
                        np.random.random, (num_states, num_row_vecs))
                    + 1j * parallel.call_and_bcast(
                        np.random.random, (num_states, num_row_vecs)))
                col_vec_array = (
                    parallel.call_and_bcast(
                        np.random.random, (num_states, num_col_vecs))
                    + 1j * parallel.call_and_bcast(
                        np.random.random, (num_states, num_col_vecs)))
                row_vec_handles = [
                    VecHandlePickle(row_vec_path % i)
                    for i in range(num_row_vecs)]
                col_vec_handles = [
                    VecHandlePickle(col_vec_path % i)
                    for i in range(num_col_vecs)]

                # Save vecs
                if parallel.is_rank_zero():
                    for i, h in enumerate(row_vec_handles):
                        h.put(row_vec_array[:, i])
                    for i, h in enumerate(col_vec_handles):
                        h.put(col_vec_array[:, i])
                parallel.barrier()

                # If number of rows/cols is 1, check case of passing a handle
                if len(row_vec_handles) == 1:
                    row_vec_handles = row_vec_handles[0]
                if len(col_vec_handles) == 1:
                    col_vec_handles = col_vec_handles[0]

                # Test ip computation.
                product_true = np.dot(row_vec_array.conj().T, col_vec_array)
                product_computed = self.vec_space.compute_inner_product_array(
                    row_vec_handles, col_vec_handles)
                np.testing.assert_allclose(
                    product_computed, product_true, rtol=rtol, atol=atol)

                # Test symm ip computation
                product_true = np.dot(row_vec_array.conj().T, row_vec_array)
                product_computed =\
                    self.vec_space.compute_symm_inner_product_array(
                        row_vec_handles)
                np.testing.assert_allclose(
                    product_computed, product_true, rtol=rtol, atol=atol)
Beispiel #10
0
class TestUtil(unittest.TestCase):
    """Tests all of the functions in util.py

    To test all parallel features, use "mpiexec -n 2 python testutil.py"
    """
    def setUp(self):
        self.test_dir = 'DELETE_ME_test_files_util'
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        if parallel.is_rank_zero():
            if not os.path.isdir(self.test_dir):
                os.mkdir(self.test_dir)

    def tearDown(self):
        parallel.barrier()
        if parallel.is_rank_zero():
            rmtree(self.test_dir, ignore_errors=True)
        parallel.barrier()

    @unittest.skipIf(parallel.is_distributed(),
                     'Only save/load matrices in serial')
    def test_load_save_array_text(self):
        """Test that can read/write text matrices"""
        #tol = 1e-8
        rows = [1, 5, 20]
        cols = [1, 4, 5, 23]
        mat_path = join(self.test_dir, 'test_matrix.txt')
        delimiters = [',', ' ', ';']
        for delimiter in delimiters:
            for is_complex in [False, True]:
                for squeeze in [False, True]:
                    for num_rows in rows:
                        for num_cols in cols:
                            mat_real = np.random.random((num_rows, num_cols))
                            if is_complex:
                                mat_imag = np.random.random(
                                    (num_rows, num_cols))
                                mat = mat_real + 1J * mat_imag
                            else:
                                mat = mat_real
                            # Check row and column vectors, no squeeze (1,1)
                            if squeeze and (num_rows > 1 or num_cols > 1):
                                mat = np.squeeze(mat)
                            util.save_array_text(mat,
                                                 mat_path,
                                                 delimiter=delimiter)
                            mat_read = util.load_array_text(
                                mat_path,
                                delimiter=delimiter,
                                is_complex=is_complex)
                            if squeeze:
                                mat_read = np.squeeze(mat_read)
                            np.testing.assert_allclose(mat_read,
                                                       mat)  #,rtol=tol)

    @unittest.skipIf(parallel.is_distributed(), 'Only load matrices in serial')
    def test_svd(self):
        # Set tolerance for testing eigval/eigvec property
        test_tol = 1e-10

        # Check tall, fat, and square matrices
        num_rows_list = [100]
        num_cols_list = [50, 100, 150]

        # Loop through different matrix sizes
        for num_rows in num_rows_list:
            for num_cols in num_cols_list:

                # Generate a random matrix with elements in [0, 1]
                mat = np.random.random((num_rows, num_cols))

                # Compute full set of singular values to help choose tolerance
                # levels that guarantee truncation (otherwise tests won't
                # actually check those features).
                sing_vals_full = np.linalg.svd(mat, full_matrices=0)[1]
                atol_list = [np.median(sing_vals_full), None]
                rtol_list = [
                    np.median(sing_vals_full) / np.max(sing_vals_full), None
                ]

                # Loop through different tolerance cases
                for atol in atol_list:
                    for rtol in rtol_list:

                        # For all matrices, check that the output of util.svd
                        # satisfies the definition of an SVD.  Do this by
                        # checking eigval/eigvec properties, which must be
                        # satisfied by the sing vecs and sing vals, even if
                        # there is truncation.  The fact that the singular
                        # vectors are eigenvectors of a normal matrix ensures
                        # that they are unitary, so we don't have to check that
                        # separately.
                        L_sing_vecs, sing_vals, R_sing_vecs = util.svd(
                            mat, atol=atol, rtol=rtol)
                        np.testing.assert_allclose(
                            np.dot(np.dot(mat, mat.T), L_sing_vecs) -
                            np.dot(L_sing_vecs, np.diag(sing_vals**2)),
                            np.zeros(L_sing_vecs.shape),
                            atol=test_tol)
                        np.testing.assert_allclose(
                            np.dot(np.dot(mat.T, mat), R_sing_vecs) -
                            np.dot(R_sing_vecs, np.diag(sing_vals**2)),
                            np.zeros(R_sing_vecs.shape),
                            atol=test_tol)

                        # If either tolerance is nonzero, make sure that
                        # something is actually truncated, otherwise force test
                        # to quit.  To do this, make sure the eigvec matrix is
                        # not square.
                        if rtol and sing_vals.size == sing_vals_full.size:
                            raise ValueError(
                                'Failed to choose relative tolerance that '
                                'forces truncation.')
                        if atol and sing_vals.size == sing_vals_full.size:
                            raise ValueError(
                                'Failed to choose absolute tolerance that '
                                'forces truncation.')

                        # If necessary, test that tolerances are satisfied
                        if atol:
                            self.assertTrue(abs(sing_vals[-1]) > atol)
                        if rtol:
                            self.assertTrue(
                                abs(sing_vals[0]) / abs(sing_vals[-1]) > rtol)

    @unittest.skipIf(parallel.is_distributed(), 'Only load matrices in serial')
    def test_eigh(self):
        # Set tolerance for test of eigval/eigvec properties.  Value necessary
        # for test to pass depends on matrix size, as well as atol and rtol
        # values
        test_tol = 1e-12

        # Generate random matrix
        num_rows = 100

        # Test matrices that are and are not positive definite
        for is_pos_def in [True, False]:

            # Generate random matrix with values between 0 and 1
            mat = np.random.random((num_rows, num_rows))

            # Make matrix symmetric.  Note that if the matrix is large, for
            # some reason an in-place operation causes the operation to fail
            # (not sure why...).  Values are still between 0 and 1.
            mat = 0.5 * (mat + mat.T)

            # If necessary, make the matrix positive definite by first making
            # it symmetric (adding the transpose), and then making it
            # diagonally dominant (each element is less than 1, so add N * I to
            # make the diagonal dominant).  Here an in-place change seems to be
            # ok.
            if is_pos_def:
                mat = mat + num_rows * np.eye(num_rows)

                # Make sure matrix is positive definite, otherwise
                # force test to quit.
                if np.linalg.eig(mat)[0].min() < 0.:
                    raise ValueError(
                        'Failed to generate positive definite matrix '
                        'for test.')

            # Compute full set of eigenvalues to help choose tolerance levels
            # that guarantee truncation (otherwise tests won't actually check
            # those features).
            eigvals_full = np.linalg.eig(mat)[0]
            atol_list = [np.median(abs(eigvals_full)), None]
            rtol_list = [
                np.median(abs(eigvals_full)) / abs(np.max(eigvals_full)), None
            ]

            # Loop through different tolerance values
            for atol in atol_list:
                for rtol in rtol_list:

                    # For each case, test that returned values are in fact
                    # eigenvalues and eigenvectors of the given matrix.  Since
                    # each pair that is returned (not truncated due to
                    # tolerances) should have this property, we can test this
                    # even if tolerances are passed in.  Compare to the zero
                    # matrix because then we only have to check the absolute
                    # magnitue of each term, rather than consider relative
                    # errors with respect to nonzero terms.
                    eigvals, eigvecs = util.eigh(
                        mat,
                        rtol=rtol,
                        atol=atol,
                        is_positive_definite=is_pos_def)
                    np.testing.assert_allclose(
                        np.dot(mat, eigvecs) -
                        np.dot(eigvecs, np.diag(eigvals)),
                        np.zeros(eigvecs.shape),
                        atol=test_tol)

                    # If either tolerance is nonzero, make sure that something
                    # is actually truncated, otherwise force test to quit.  To
                    # do this, make sure the eigvec matrix is not square.
                    if rtol and eigvals.size == eigvals_full.size:
                        raise ValueError(
                            'Failed to choose relative tolerance that forces '
                            'truncation.')
                    if atol and eigvals.size == eigvals_full.size:
                        raise ValueError(
                            'Failed to choose absolute tolerance that forces '
                            'truncation.')

                    # If the positive definite flag is passed in, make sure the
                    # returned eigenvalues are all positive
                    if is_pos_def:
                        self.assertTrue(eigvals.min() > 0)

                    # If a relative tolerance is passed in, make sure the
                    # relative tolerance is satisfied.
                    if rtol is not None:
                        self.assertTrue(
                            abs(eigvals).min() / abs(eigvals).max() > rtol)

                    # If an absolute tolerance is passed in, make sure the
                    # absolute tolerance is satisfied.
                    if atol is not None:
                        self.assertTrue(abs(eigvals).min() > atol)

    @unittest.skipIf(parallel.is_distributed(), 'Only load matrices in serial')
    def test_eig_biorthog(self):
        test_tol = 1e-10
        num_rows = 100
        mat = np.random.random((num_rows, num_rows))
        for scale_choice in ['left', 'right']:
            R_eigvals, R_eigvecs, L_eigvecs = util.eig_biorthog(
                mat, scale_choice=scale_choice)

            # Check eigenvector/eigenvalue relationship (use right eigenvalues
            # only).  Test difference so that all values are compared to zeros,
            # avoiding need to check relative tolerances.
            np.testing.assert_allclose(np.dot(mat, R_eigvecs) -
                                       np.dot(R_eigvecs, np.diag(R_eigvals)),
                                       np.zeros(mat.shape),
                                       atol=test_tol)
            np.testing.assert_allclose(np.dot(L_eigvecs.conj().T, mat) -
                                       np.dot(np.diag(R_eigvals),
                                              L_eigvecs.conj().T),
                                       np.zeros(mat.shape),
                                       atol=test_tol)

            # Check biorthogonality (take magnitudes since inner products are
            # complex in general).  Again, take difference so that all test
            # values should be zero, avoiding need for rtol
            ip_mat = np.dot(L_eigvecs.conj().T, R_eigvecs)
            np.testing.assert_allclose(np.abs(ip_mat) - np.eye(num_rows),
                                       np.zeros(ip_mat.shape),
                                       atol=test_tol)

            # Check for unit norms
            if scale_choice == 'left':
                unit_eigvecs = R_eigvecs
            elif scale_choice == 'right':
                unit_eigvecs = L_eigvecs
            np.testing.assert_allclose(
                np.sqrt(
                    np.sum(np.multiply(unit_eigvecs, unit_eigvecs.conj()),
                           axis=0)).squeeze(), np.ones((1, R_eigvals.size)))

        # Check that error is raised for invalid scale choice
        self.assertRaises(ValueError, util.eig_biorthog, mat,
                          **{'scale_choice': 'invalid'})

    @unittest.skipIf(parallel.is_distributed(), 'Only load data in serial')
    def test_load_impulse_outputs(self):
        """
        Test loading multiple signal files in [t sig1 sig2 ...] format.

        Creates signals, saves them, loads them, tests are equal to the
        originals.
        """
        signal_path = join(self.test_dir, 'file%03d.txt')
        for num_paths in [1, 4]:
            for num_signals in [1, 2, 4, 5]:
                for num_time_steps in [1, 10, 100]:
                    all_signals_true = np.random.random(
                        (num_paths, num_time_steps, num_signals))
                    # Time steps need not be sequential
                    time_values_true = np.random.random(num_time_steps)

                    signal_paths = []
                    # Save signals to file
                    for path_num in range(num_paths):
                        signal_paths.append(signal_path % path_num)
                        data_to_save = np.concatenate( \
                          (time_values_true.reshape(len(time_values_true), 1),
                          all_signals_true[path_num]), axis=1)
                        util.save_array_text(data_to_save,
                                             signal_path % path_num)

                    time_values, all_signals = util.load_multiple_signals(
                        signal_paths)
                    np.testing.assert_allclose(all_signals, all_signals_true)
                    np.testing.assert_allclose(time_values, time_values_true)

    @unittest.skipIf(parallel.is_distributed(), 'Serial only.')
    def test_solve_Lyapunov(self):
        """Test solution of Lyapunov w/known solution from Matlab's dlyap"""
        A = np.array([[0.725404224946106, 0.714742903826096],
                      [-0.063054873189656, -0.204966058299775]])
        Q = np.array([[0.318765239858981, -0.433592022305684],
                      [-1.307688296305273, 0.342624466538650]])
        X_true = np.array([[-0.601761400231752, -0.351368789021923],
                           [-1.143398707577891, 0.334986522655114]])
        X_computed = util.solve_Lyapunov_direct(A, Q)
        np.testing.assert_allclose(X_computed, X_true)
        X_computed_mats = util.solve_Lyapunov_direct(np.mat(A), np.mat(Q))
        np.testing.assert_allclose(X_computed_mats, X_true)

        X_computed = util.solve_Lyapunov_iterative(A, Q)
        np.testing.assert_allclose(X_computed, X_true)
        X_computed_mats = util.solve_Lyapunov_iterative(np.mat(A), np.mat(Q))
        np.testing.assert_allclose(X_computed_mats, X_true)

    @unittest.skipIf(parallel.is_distributed(), 'Serial only.')
    def test_balanced_truncation(self):
        """Test balanced system is close to original."""
        for num_inputs in [1, 3]:
            for num_outputs in [1, 4]:
                for num_states in [1, 10]:
                    A, B, C = util.drss(num_states, num_inputs, num_outputs)
                    Ar, Br, Cr = util.balanced_truncation(A, B, C)
                    num_time_steps = 10
                    y = util.impulse(A, B, C, num_time_steps=num_time_steps)
                    yr = util.impulse(Ar,
                                      Br,
                                      Cr,
                                      num_time_steps=num_time_steps)
                    np.testing.assert_allclose(yr, y, rtol=1e-5)

    @unittest.skipIf(parallel.is_distributed(), 'Serial only.')
    def test_drss(self):
        """Test drss gives correct mat dimensions and stable dynamics."""
        for num_states in [1, 5, 14]:
            for num_inputs in [1, 3, 6]:
                for num_outputs in [1, 2, 3, 7]:
                    A, B, C = util.drss(num_states, num_inputs, num_outputs)
                    self.assertEqual(A.shape, (num_states, num_states))
                    self.assertEqual(B.shape, (num_states, num_inputs))
                    self.assertEqual(C.shape, (num_outputs, num_states))
                    self.assertTrue(np.amax(np.abs(np.linalg.eig(A)[0])) < 1)

    @unittest.skipIf(parallel.is_distributed(), 'Serial only.')
    def test_lsim(self):
        """Test that lsim has right shapes, does not test result"""
        for num_states in [1, 4, 9]:
            for num_inputs in [1, 2, 4]:
                for num_outputs in [1, 2, 3, 5]:
                    #print (
                    #    'num_states %d, num_inputs %d,
                    #    num_outputs %d'%(num_states, num_inputs, num_outputs)
                    A, B, C = util.drss(num_states, num_inputs, num_outputs)
                    #print 'Shape of C is',C.shape
                    nt = 5
                    inputs = np.random.random((nt, num_inputs))
                    outputs = util.lsim(A, B, C, inputs)
                    self.assertEqual(outputs.shape, (nt, num_outputs))

    @unittest.skipIf(parallel.is_distributed(), 'Serial only.')
    def test_impulse(self):
        """Test impulse response of discrete system"""
        for num_states in [1, 10]:
            for num_inputs in [1, 3]:
                for num_outputs in [1, 2, 3, 5]:
                    A, B, C = util.drss(num_states, num_inputs, num_outputs)
                    # Check that can give time_step
                    outputs = util.impulse(A, B, C)
                    num_time_steps = len(outputs)
                    outputs_true = np.zeros(
                        (num_time_steps, num_outputs, num_inputs))
                    for ti in range(num_time_steps):
                        outputs_true[ti] = C * (A**ti) * B
                    np.testing.assert_allclose(outputs,
                                               outputs_true,
                                               rtol=1e-7,
                                               atol=1e-7)

                    # Check can give num_time_steps as an argument
                    outputs = util.impulse(A,
                                           B,
                                           C,
                                           num_time_steps=num_time_steps)
                    np.testing.assert_allclose(outputs,
                                               outputs_true,
                                               rtol=1e-7,
                                               atol=1e-7)

    def test_Hankel(self):
        """Test forming Hankel matrix from first row and last column."""
        for num_rows in [1, 4, 6]:
            for num_cols in [1, 3, 6]:
                first_row = np.random.random((num_cols))
                last_col = np.random.random((num_rows))
                last_col[0] = first_row[-1]
                Hankel_true = np.zeros((num_rows, num_cols))
                for r in range(num_rows):
                    for c in range(num_cols):
                        if r + c < num_cols:
                            Hankel_true[r, c] = first_row[r + c]
                        else:
                            Hankel_true[r, c] = last_col[r + c - num_cols + 1]
                Hankel_comp = util.Hankel(first_row, last_col)
                np.testing.assert_equal(Hankel_comp, Hankel_true)
Beispiel #11
0
        interval: interval between pairs of time steps, as shown above.

    Returns:
        time_steps: array of integers, time steps [0 1 interval interval+1 ...]
    """
    if num_steps % 2 != 0:
        raise ValueError('num_steps, %d, must be even'%num_steps)
    interval = int(interval)
    time_steps = np.zeros(num_steps, dtype=int)
    time_steps[::2] = interval*np.arange(num_steps/2)
    time_steps[1::2] = 1 + interval*np.arange(num_steps/2)
    return time_steps


@unittest.skipIf(parallel.is_distributed(), 'Only test ERA in serial')
class testERA(unittest.TestCase):
    def setUp(self):
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'files_ERA_DELETE_ME'
        if not os.path.exists(self.test_dir):
            os.mkdir(self.test_dir)
        self.impulse_file_path = join(self.test_dir, 'impulse_input%03d.txt')


    def tearDown(self):
        """Deletes all of the arrays created by the tests"""
        rmtree(self.test_dir, ignore_errors=True)

Beispiel #12
0
class TestVectorSpaceHandles(unittest.TestCase):
    """ Tests of the VectorSpace class """
    def setUp(self):
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'DELETE_ME_test_files_vecoperations'
        if not os.path.isdir(self.test_dir) and parallel.is_rank_zero():
            os.mkdir(self.test_dir)

        self.max_vecs_per_proc = 10
        self.total_num_vecs_in_mem = (parallel.get_num_procs() *
                                      self.max_vecs_per_proc)

        self.my_vec_ops = VectorSpaceHandles(inner_product=np.vdot,
                                             verbosity=0)
        self.my_vec_ops.max_vecs_per_proc = self.max_vecs_per_proc

        # Default data members, verbosity set to 0 even though default is 1
        # so messages won't print during tests
        self.default_data_members = {
            'inner_product':
            np.vdot,
            'max_vecs_per_node':
            10000,
            'max_vecs_per_proc':
            (10000 * parallel.get_num_nodes() // parallel.get_num_procs()),
            'verbosity':
            0,
            'print_interval':
            10,
            'prev_print_time':
            0.
        }
        parallel.barrier()

    def tearDown(self):
        parallel.barrier()
        parallel.call_from_rank_zero(rmtree, self.test_dir, ignore_errors=True)
        parallel.barrier()

    #@unittest.skip('testing other things')
    def test_init(self):
        """Test arguments passed to the constructor are assigned properly."""
        data_members_original = util.get_data_members(
            VectorSpaceHandles(inner_product=np.vdot, verbosity=0))
        self.assertEqual(data_members_original, self.default_data_members)

        max_vecs_per_node = 500
        my_VS = VectorSpaceHandles(inner_product=np.vdot,
                                   max_vecs_per_node=max_vecs_per_node,
                                   verbosity=0)
        data_members = copy.deepcopy(data_members_original)
        data_members['max_vecs_per_node'] = max_vecs_per_node
        data_members['max_vecs_per_proc'] = max_vecs_per_node * \
            parallel.get_num_nodes() // parallel.get_num_procs()
        self.assertEqual(util.get_data_members(my_VS), data_members)

    #@unittest.skip('testing other things')
    def test_sanity_check(self):
        """Tests correctly checks user-supplied objects and functions."""
        nx = 40
        ny = 15
        test_array = np.random.random((nx, ny))

        my_VS = VectorSpaceHandles(inner_product=np.vdot, verbosity=0)
        in_mem_handle = V.VecHandleInMemory(test_array)
        my_VS.sanity_check(in_mem_handle)

        # An sanity's vector that redefines multiplication to modify its data
        class SanityMultVec(V.Vector):
            def __init__(self, arr):
                self.arr = arr

            def __add__(self, obj):
                f_return = copy.deepcopy(self)
                f_return.arr += obj.arr
                return f_return

            def __mul__(self, a):
                self.arr *= a
                return self

        class SanityAddVec(V.Vector):
            def __init__(self, arr):
                self.arr = arr

            def __add__(self, obj):
                self.arr += obj.arr
                return self

            def __mul__(self, a):
                f_return = copy.deepcopy(self)
                f_return.arr *= a
                return f_return

        def my_IP(vec1, vec2):
            return np.vdot(vec1.arr, vec2.arr)

        my_VS.inner_product = my_IP
        my_sanity_mult_vec = SanityMultVec(test_array)
        self.assertRaises(ValueError, my_VS.sanity_check,
                          V.VecHandleInMemory(my_sanity_mult_vec))
        my_sanity_add_vec = SanityAddVec(test_array)
        self.assertRaises(ValueError, my_VS.sanity_check,
                          V.VecHandleInMemory(my_sanity_add_vec))

    def generate_vecs_modes(self, num_states, num_vecs, num_modes):
        """Generates random vecs and finds the modes.

        Returns:
            vec_array: matrix in which each column is a vec (in order)
            mode_indices: unordered list of integers representing mode indices,
                each entry is unique. Mode indices are picked randomly.
            build_coeff_mat: matrix num_vecs x num_modes, random entries
            mode_array: matrix of modes, each column is a mode.
                matrix column # = mode_index
        """
        mode_indices = list(range(num_modes))
        random.shuffle(mode_indices)
        build_coeff_mat = np.mat(np.random.random((num_vecs, num_modes)))
        vec_array = np.mat(np.random.random((num_states, num_vecs)))
        mode_array = vec_array * build_coeff_mat
        return vec_array, mode_indices, build_coeff_mat, mode_array

    #@unittest.skip('testing other things')
    def test_lin_combine(self):
        num_vecs_list = [1, 15, 40]
        num_states = 20
        # Test cases where number of modes:
        #   less, equal, more than num_states
        #   less, equal, more than num_vecs
        #   less, equal, more than total_num_vecs_in_mem
        num_modes_list = [1, 8, 10, 20, 25, 45, \
            int(np.ceil(self.total_num_vecs_in_mem / 2.)),\
            self.total_num_vecs_in_mem, self.total_num_vecs_in_mem * 2]
        mode_path = join(self.test_dir, 'mode_%03d.txt')
        vec_path = join(self.test_dir, 'vec_%03d.txt')

        for num_vecs in num_vecs_list:
            for num_modes in num_modes_list:
                #generate data and then broadcast to all procs
                #print '----- new case ----- '
                #print 'num_vecs =',num_vecs
                #print 'num_states =',num_states
                #print 'num_modes =',num_modes
                #print 'max_vecs_per_node =',max_vecs_per_node
                #print 'index_from =',index_from
                vec_handles = [
                    V.VecHandleArrayText(vec_path % i) for i in range(num_vecs)
                ]
                vec_array, mode_indices, build_coeff_mat, true_modes = \
                    parallel.call_and_bcast(self.generate_vecs_modes,
                    num_states, num_vecs, num_modes)

                if parallel.is_rank_zero():
                    for vec_index, vec_handle in enumerate(vec_handles):
                        vec_handle.put(vec_array[:, vec_index])
                parallel.barrier()
                mode_handles = [
                    V.VecHandleArrayText(mode_path % mode_num)
                    for mode_num in mode_indices
                ]

                # If there are more vecs than mat has rows
                build_coeff_mat_too_small = \
                    np.zeros((build_coeff_mat.shape[0]-1,
                        build_coeff_mat.shape[1]))
                self.assertRaises(ValueError, self.my_vec_ops.\
                    lin_combine, mode_handles,
                    vec_handles, build_coeff_mat_too_small, mode_indices)

                # Test the case that only one mode is desired,
                # in which case user might pass in an int
                if len(mode_indices) == 1:
                    mode_indices = mode_indices[0]
                    mode_handles = mode_handles[0]

                # Saves modes to files
                self.my_vec_ops.lin_combine(mode_handles, vec_handles,
                                            build_coeff_mat, mode_indices)

                # Change back to list so is iterable
                if not isinstance(mode_indices, list):
                    mode_indices = [mode_indices]

                parallel.barrier()
                #print 'mode_indices',mode_indices
                for mode_index in mode_indices:
                    computed_mode = V.VecHandleArrayText(mode_path %
                                                         mode_index).get()
                    #print 'mode number',mode_num
                    #print 'true mode',true_modes[:,\
                    #    mode_num-index_from]
                    #print 'computed mode',computed_mode
                    np.testing.assert_allclose(computed_mode,
                                               true_modes[:, mode_index])

                parallel.barrier()

        parallel.barrier()

    #@unittest.skip('testing others')
    @unittest.skipIf(parallel.is_distributed(), 'Serial only')
    def test_compute_inner_product_mat_types(self):
        class ArrayTextComplexHandle(V.VecHandleArrayText):
            def get(self):
                return (1 + 1j) * V.VecHandleArrayText.get(self)

        num_row_vecs = 4
        num_col_vecs = 6
        num_states = 7

        row_vec_path = join(self.test_dir, 'row_vec_%03d.txt')
        col_vec_path = join(self.test_dir, 'col_vec_%03d.txt')

        # generate vecs and save to file
        row_vec_array = np.mat(np.random.random((num_states, num_row_vecs)))
        col_vec_array = np.mat(np.random.random((num_states, num_col_vecs)))
        row_vec_paths = []
        col_vec_paths = []
        for vec_index in range(num_row_vecs):
            path = row_vec_path % vec_index
            util.save_array_text(row_vec_array[:, vec_index], path)
            row_vec_paths.append(path)
        for vec_index in range(num_col_vecs):
            path = col_vec_path % vec_index
            util.save_array_text(col_vec_array[:, vec_index], path)
            col_vec_paths.append(path)

        # Compute inner product matrix and check type
        for handle, dtype in [(V.VecHandleArrayText, float),
                              (ArrayTextComplexHandle, complex)]:
            row_vec_handles = [handle(path) for path in row_vec_paths]
            col_vec_handles = [handle(path) for path in col_vec_paths]
            inner_product_mat = self.my_vec_ops.compute_inner_product_mat(
                row_vec_handles, col_vec_handles)
            symm_inner_product_mat = \
                self.my_vec_ops.compute_symmetric_inner_product_mat(
                    row_vec_handles)
            self.assertEqual(inner_product_mat.dtype, dtype)
            self.assertEqual(symm_inner_product_mat.dtype, dtype)

    #@unittest.skip('testing other things')
    def test_compute_inner_product_mats(self):
        """Test computation of matrix of inner products."""
        num_row_vecs_list = [
            1,
            int(round(self.total_num_vecs_in_mem / 2.)),
            self.total_num_vecs_in_mem, self.total_num_vecs_in_mem * 2,
            parallel.get_num_procs() + 1
        ]
        num_col_vecs_list = num_row_vecs_list
        num_states = 6

        row_vec_path = join(self.test_dir, 'row_vec_%03d.txt')
        col_vec_path = join(self.test_dir, 'col_vec_%03d.txt')

        for num_row_vecs in num_row_vecs_list:
            for num_col_vecs in num_col_vecs_list:
                # generate vecs
                parallel.barrier()
                row_vec_array = parallel.call_and_bcast(
                    np.random.random, (num_states, num_row_vecs))
                col_vec_array = parallel.call_and_bcast(
                    np.random.random, (num_states, num_col_vecs))
                row_vec_handles = [
                    V.VecHandleArrayText(row_vec_path % i)
                    for i in range(num_row_vecs)
                ]
                col_vec_handles = [
                    V.VecHandleArrayText(col_vec_path % i)
                    for i in range(num_col_vecs)
                ]

                # Save vecs
                if parallel.is_rank_zero():
                    for i, h in enumerate(row_vec_handles):
                        h.put(row_vec_array[:, i])
                    for i, h in enumerate(col_vec_handles):
                        h.put(col_vec_array[:, i])
                parallel.barrier()

                # If number of rows/cols is 1, check case of passing a handle
                if len(row_vec_handles) == 1:
                    row_vec_handles = row_vec_handles[0]
                if len(col_vec_handles) == 1:
                    col_vec_handles = col_vec_handles[0]

                # Test IP computation.
                product_true = np.dot(row_vec_array.T, col_vec_array)
                product_computed = self.my_vec_ops.compute_inner_product_mat(
                    row_vec_handles, col_vec_handles)
                row_vecs = [row_vec_array[:, i] for i in range(num_row_vecs)]
                col_vecs = [col_vec_array[:, i] for i in range(num_col_vecs)]
                np.testing.assert_allclose(product_computed, product_true)

                # Test symm IP computation
                product_true = np.dot(row_vec_array.T, row_vec_array)
                product_computed = \
                    self.my_vec_ops.compute_symmetric_inner_product_mat(
                        row_vec_handles)
                row_vecs = [row_vec_array[:, i] for i in range(num_row_vecs)]
                np.testing.assert_allclose(product_computed, product_true)