Esempio n. 1
0
    def test_compute_inner_product_mats(self):
        """Test computation of matrix of inner products."""
        num_row_vecs_list = [
            1,
            int(round(self.total_num_vecs_in_mem / 2.)),
            self.total_num_vecs_in_mem, self.total_num_vecs_in_mem * 2,
            parallel.get_num_procs() + 1
        ]
        num_col_vecs_list = num_row_vecs_list
        num_states = 6

        row_vec_path = join(self.test_dir, 'row_vec_%03d.txt')
        col_vec_path = join(self.test_dir, 'col_vec_%03d.txt')

        for num_row_vecs in num_row_vecs_list:
            for num_col_vecs in num_col_vecs_list:
                # generate vecs
                parallel.barrier()
                row_vec_array = parallel.call_and_bcast(
                    np.random.random, (num_states, num_row_vecs))
                col_vec_array = parallel.call_and_bcast(
                    np.random.random, (num_states, num_col_vecs))
                row_vec_handles = [
                    V.VecHandleArrayText(row_vec_path % i)
                    for i in range(num_row_vecs)
                ]
                col_vec_handles = [
                    V.VecHandleArrayText(col_vec_path % i)
                    for i in range(num_col_vecs)
                ]

                # Save vecs
                if parallel.is_rank_zero():
                    for i, h in enumerate(row_vec_handles):
                        h.put(row_vec_array[:, i])
                    for i, h in enumerate(col_vec_handles):
                        h.put(col_vec_array[:, i])
                parallel.barrier()

                # If number of rows/cols is 1, check case of passing a handle
                if len(row_vec_handles) == 1:
                    row_vec_handles = row_vec_handles[0]
                if len(col_vec_handles) == 1:
                    col_vec_handles = col_vec_handles[0]

                # Test IP computation.
                product_true = np.dot(row_vec_array.T, col_vec_array)
                product_computed = self.my_vec_ops.compute_inner_product_mat(
                    row_vec_handles, col_vec_handles)
                row_vecs = [row_vec_array[:, i] for i in range(num_row_vecs)]
                col_vecs = [col_vec_array[:, i] for i in range(num_col_vecs)]
                np.testing.assert_allclose(product_computed, product_true)

                # Test symm IP computation
                product_true = np.dot(row_vec_array.T, row_vec_array)
                product_computed = \
                    self.my_vec_ops.compute_symmetric_inner_product_mat(
                        row_vec_handles)
                row_vecs = [row_vec_array[:, i] for i in range(num_row_vecs)]
                np.testing.assert_allclose(product_computed, product_true)
Esempio n. 2
0
    def _helper_get_impulse_response_handles(self, num_inputs, num_outputs):
        # Get state space system
        A, B, C = parallel.call_and_bcast(get_system_arrays, self.num_states,
                                          num_inputs, num_outputs)

        # Run impulse responses
        direct_vec_array = parallel.call_and_bcast(
            get_direct_impulse_response_array, A, B, self.num_steps)
        adjoint_vec_array = parallel.call_and_bcast(
            get_adjoint_impulse_response_array, A, C, self.num_steps,
            np.identity(self.num_states))

        # Save data to disk
        direct_vec_handles = [
            VecHandlePickle(self.direct_vec_path % i)
            for i in range(direct_vec_array.shape[1])
        ]
        adjoint_vec_handles = [
            VecHandlePickle(self.adjoint_vec_path % i)
            for i in range(adjoint_vec_array.shape[1])
        ]
        if parallel.is_rank_zero():
            for idx, handle in enumerate(direct_vec_handles):
                handle.put(direct_vec_array[:, idx])
            for idx, handle in enumerate(adjoint_vec_handles):
                handle.put(adjoint_vec_array[:, idx])

        parallel.barrier()
        return direct_vec_handles, adjoint_vec_handles
Esempio n. 3
0
    def setUp(self):
        # Specify output locations
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'files_POD_DELETE_ME'
        if not os.path.isdir(self.test_dir):
            parallel.call_from_rank_zero(os.mkdir, self.test_dir)
        self.vec_path = join(self.test_dir, 'vec_%03d.pkl')
        self.mode_path = join(self.test_dir, 'mode_%03d.pkl')

        # Specify data dimensions
        self.num_states = 30
        self.num_vecs = 10

        # Generate random data and write to disk using handles
        self.vecs_array = (
            parallel.call_and_bcast(
                np.random.random, (self.num_states, self.num_vecs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (self.num_states, self.num_vecs)))
        self.vec_handles = [
            VecHandlePickle(self.vec_path % i) for i in range(self.num_vecs)]
        for idx, hdl in enumerate(self.vec_handles):
            hdl.put(self.vecs_array[:, idx])

        parallel.barrier()
Esempio n. 4
0
    def setUp(self):
        # Specify output locations
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'files_BPOD_DELETE_ME'
        if not os.path.isdir(self.test_dir):
            parallel.call_from_rank_zero(os.mkdir, self.test_dir)
        self.direct_vec_path = join(self.test_dir, 'direct_vec_%03d.pkl')
        self.adjoint_vec_path = join(self.test_dir, 'adjoint_vec_%03d.pkl')
        self.direct_mode_path = join(self.test_dir, 'direct_mode_%03d.pkl')
        self.adjoint_mode_path = join(self.test_dir, 'adjoint_mode_%03d.pkl')

        # Specify system dimensions.  Test single inputs/outputs as well as
        # multiple inputs/outputs.  Also allow for more inputs/outputs than
        # states.
        self.num_states = 10
        self.num_inputs_list = [
            1,
            parallel.call_and_bcast(np.random.randint, 2, self.num_states + 2)
        ]
        self.num_outputs_list = [
            1,
            parallel.call_and_bcast(np.random.randint, 2, self.num_states + 2)
        ]

        # Specify how long to run impulse responses
        self.num_steps = self.num_states + 1

        parallel.barrier()
Esempio n. 5
0
    def setUp(self):
        self.test_dir = 'DELETE_ME_test_files_pod'
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        if not os.path.isdir(self.test_dir) and parallel.is_rank_zero():
            os.mkdir(self.test_dir)
        self.mode_indices = [2, 4, 3, 6]
        self.num_vecs = 10
        self.num_states = 30
        self.vec_array = parallel.call_and_bcast(
            np.random.random, (self.num_states, self.num_vecs))
        self.correlation_mat_true = self.vec_array.conj().transpose().dot(
            self.vec_array)

        self.eigvals_true, self.eigvecs_true = \
            parallel.call_and_bcast(util.eigh, self.correlation_mat_true)

        self.mode_array = np.dot(
            self.vec_array,
            np.dot(self.eigvecs_true, np.diag(self.eigvals_true**-0.5)))
        self.vec_path = join(self.test_dir, 'vec_%03d.txt')
        self.vec_handles = [
            V.VecHandleArrayText(self.vec_path % i)
            for i in range(self.num_vecs)
        ]
        for vec_index, handle in enumerate(self.vec_handles):
            handle.put(self.vec_array[:, vec_index])

        self.my_POD = PODHandles(np.vdot, verbosity=0)
        parallel.barrier()
Esempio n. 6
0
    def test_compute_modes(self):
        rtol = 1e-10
        atol = 1e-12

        # Compute POD using modred.  (The properties defining a POD mode require
        # manipulations involving the correct decomposition, so we cannot
        # isolate the mode computation from the decomposition step.)
        POD = pod.PODHandles(np.vdot, verbosity=0)
        POD.compute_decomp(self.vec_handles)

        # Select a subset of modes to compute.  Compute at least half
        # the modes, and up to all of them.  Make sure to use unique
        # values.  (This may reduce the number of modes computed.)
        num_modes = parallel.call_and_bcast(
            np.random.randint,
            POD.eigvals.size // 2, POD.eigvals.size + 1)
        mode_idxs = np.unique(parallel.call_and_bcast(
            np.random.randint,
            0, POD.eigvals.size, num_modes))

        # Create handles for the modes
        mode_handles = [VecHandlePickle(self.mode_path % i) for i in mode_idxs]

        # Compute modes
        POD.compute_modes(mode_idxs, mode_handles, vec_handles=self.vec_handles)

        # Test modes
        np.testing.assert_allclose(
            POD.vec_space.compute_inner_product_array(
                mode_handles, self.vec_handles).dot(
                    POD.vec_space.compute_inner_product_array(
                        self.vec_handles, mode_handles)),
            np.diag(POD.eigvals[mode_idxs]),
            rtol=rtol, atol=atol)
Esempio n. 7
0
    def test_puts_gets(self):
        test_dir = 'DELETE_ME_test_files_pod'
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        if not os.path.isdir(test_dir):
            parallel.call_from_rank_zero(os.mkdir, test_dir)
        num_vecs = 10
        num_states = 30
        correlation_mat_true = parallel.call_and_bcast(np.random.random,
                                                       ((num_vecs, num_vecs)))
        eigvals_true = parallel.call_and_bcast(np.random.random, num_vecs)
        eigvecs_true = parallel.call_and_bcast(np.random.random,
                                               ((num_states, num_vecs)))

        my_POD = PODHandles(None, verbosity=0)
        my_POD.correlation_mat = correlation_mat_true
        my_POD.eigvals = eigvals_true
        my_POD.eigvecs = eigvecs_true

        eigvecs_path = join(test_dir, 'eigvecs.txt')
        eigvals_path = join(test_dir, 'eigvals.txt')
        correlation_mat_path = join(test_dir, 'correlation.txt')
        my_POD.put_decomp(eigvals_path, eigvecs_path)
        my_POD.put_correlation_mat(correlation_mat_path)
        parallel.barrier()

        POD_load = PODHandles(None, verbosity=0)
        POD_load.get_decomp(eigvals_path, eigvecs_path)
        correlation_mat_loaded = util.load_array_text(correlation_mat_path)

        np.testing.assert_allclose(correlation_mat_loaded,
                                   correlation_mat_true)
        np.testing.assert_allclose(POD_load.eigvals, eigvals_true)
        np.testing.assert_allclose(POD_load.eigvecs, eigvecs_true)
Esempio n. 8
0
    def setUp(self):
        # Specify output locations
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'files_POD_DELETE_ME'
        if not os.path.isdir(self.test_dir):
            parallel.call_from_rank_zero(os.mkdir, self.test_dir)
        self.vec_path = join(self.test_dir, 'vec_%03d.pkl')
        self.mode_path = join(self.test_dir, 'mode_%03d.pkl')

        # Specify data dimensions
        self.num_states = 30
        self.num_vecs = 10

        # Generate random data and write to disk using handles
        self.vecs_array = (
            parallel.call_and_bcast(np.random.random,
                                    (self.num_states, self.num_vecs)) +
            1j * parallel.call_and_bcast(np.random.random,
                                         (self.num_states, self.num_vecs)))
        self.vec_handles = [
            VecHandlePickle(self.vec_path % i) for i in range(self.num_vecs)
        ]
        for idx, hdl in enumerate(self.vec_handles):
            hdl.put(self.vecs_array[:, idx])

        parallel.barrier()
Esempio n. 9
0
    def setUp(self):
        # Specify output locations
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'files_BPOD_DELETE_ME'
        if not os.path.isdir(self.test_dir):
            parallel.call_from_rank_zero(os.mkdir, self.test_dir)
        self.direct_vec_path = join(self.test_dir, 'direct_vec_%03d.pkl')
        self.adjoint_vec_path = join(self.test_dir, 'adjoint_vec_%03d.pkl')
        self.direct_mode_path = join(self.test_dir, 'direct_mode_%03d.pkl')
        self.adjoint_mode_path = join(self.test_dir, 'adjoint_mode_%03d.pkl')

        # Specify system dimensions.  Test single inputs/outputs as well as
        # multiple inputs/outputs.  Also allow for more inputs/outputs than
        # states.
        self.num_states = 10
        self.num_inputs_list = [
            1,
            parallel.call_and_bcast(np.random.randint, 2, self.num_states + 2)]
        self.num_outputs_list = [
            1,
            parallel.call_and_bcast(np.random.randint, 2, self.num_states + 2)]

        # Specify how long to run impulse responses
        self.num_steps = self.num_states + 1

        parallel.barrier()
Esempio n. 10
0
    def _helper_get_impulse_response_handles(self, num_inputs, num_outputs):
        # Get state space system
        A, B, C = parallel.call_and_bcast(
            get_system_arrays, self.num_states, num_inputs, num_outputs)

        # Run impulse responses
        direct_vec_array = parallel.call_and_bcast(
            get_direct_impulse_response_array, A, B, self.num_steps)
        adjoint_vec_array = parallel.call_and_bcast(
            get_adjoint_impulse_response_array, A, C, self.num_steps,
            np.identity(self.num_states))

        # Save data to disk
        direct_vec_handles = [
            VecHandlePickle(self.direct_vec_path % i)
            for i in range(direct_vec_array.shape[1])]
        adjoint_vec_handles = [
            VecHandlePickle(self.adjoint_vec_path % i)
            for i in range(adjoint_vec_array.shape[1])]
        if parallel.is_rank_zero():
            for idx, handle in enumerate(direct_vec_handles):
                handle.put(direct_vec_array[:, idx])
            for idx, handle in enumerate(adjoint_vec_handles):
                handle.put(adjoint_vec_array[:, idx])

        parallel.barrier()
        return direct_vec_handles, adjoint_vec_handles
Esempio n. 11
0
    def generate_data_set(self, num_basis_vecs, num_adjoint_basis_vecs,
        num_states, num_inputs, num_outputs):
        """Generates random data, saves, and computes true reduced A,B,C."""
        self.basis_vecs = parallel.call_and_bcast(np.random.random,
            (num_states, num_basis_vecs))
        self.adjoint_basis_vecs = parallel.call_and_bcast(np.random.random,
            (num_states, num_adjoint_basis_vecs))
        self.A_array = parallel.call_and_bcast(np.random.random,
            (num_states, num_states))
        self.B_array = parallel.call_and_bcast(np.random.random,
            (num_states, num_inputs))
        self.C_array = parallel.call_and_bcast(np.random.random,
            (num_outputs, num_states))

        self.A_on_basis_vecs = np.dot(self.A_array, self.basis_vecs)
        self.B_on_standard_basis_array = self.B_array
        self.C_on_basis_vecs = self.C_array.dot(self.basis_vecs).squeeze()

        parallel.barrier()

        self.A_true = np.dot(self.adjoint_basis_vecs.T,
            np.dot(self.A_array, self.basis_vecs))
        self.B_true = np.dot(self.adjoint_basis_vecs.T, self.B_array)
        self.C_true = np.dot(self.C_array, self.basis_vecs)
        self.proj_mat = np.linalg.inv(np.dot(self.adjoint_basis_vecs.T,
            self.basis_vecs))
        self.A_true_nonorth = np.dot(self.proj_mat, self.A_true)
        self.B_true_nonorth = np.dot(self.proj_mat, self.B_true)
Esempio n. 12
0
    def test_puts_gets(self):
        """Test that put/get work in base class."""
        # Generate some random data
        Hankel_array_true = parallel.call_and_bcast(
            np.random.random, ((self.num_states, self.num_states)))
        L_sing_vecs_true, sing_vals_true, R_sing_vecs_true = \
            parallel.call_and_bcast(util.svd, Hankel_array_true)
        direct_proj_coeffs_true = parallel.call_and_bcast(
            np.random.random, ((self.num_steps, self.num_steps)))
        adj_proj_coeffs_true = parallel.call_and_bcast(
            np.random.random, ((self.num_steps, self.num_steps)))

        # Store the data in a BPOD object
        BPOD_save = bpod.BPODHandles(verbosity=0)
        BPOD_save.Hankel_array = Hankel_array_true
        BPOD_save.sing_vals = sing_vals_true
        BPOD_save.L_sing_vecs = L_sing_vecs_true
        BPOD_save.R_sing_vecs = R_sing_vecs_true
        BPOD_save.direct_proj_coeffs = direct_proj_coeffs_true
        BPOD_save.adjoint_proj_coeffs = adj_proj_coeffs_true

        # Use the BPOD object to save the data to disk
        sing_vals_path = join(self.test_dir, 'sing_vals.txt')
        L_sing_vecs_path = join(self.test_dir, 'L_sing_vecs.txt')
        R_sing_vecs_path = join(self.test_dir, 'R_sing_vecs.txt')
        Hankel_array_path = join(self.test_dir, 'Hankel_array.txt')
        direct_proj_coeffs_path = join(self.test_dir, 'direct_proj_coeffs.txt')
        adj_proj_coeffs_path = join(self.test_dir, 'adj_proj_coeffs.txt')
        BPOD_save.put_decomp(sing_vals_path, L_sing_vecs_path,
                             R_sing_vecs_path)
        BPOD_save.put_Hankel_array(Hankel_array_path)
        BPOD_save.put_direct_proj_coeffs(direct_proj_coeffs_path)
        BPOD_save.put_adjoint_proj_coeffs(adj_proj_coeffs_path)

        # Create a BPOD object and use it to load the data from disk
        BPOD_load = bpod.BPODHandles(verbosity=0)
        BPOD_load.get_decomp(sing_vals_path, L_sing_vecs_path,
                             R_sing_vecs_path)
        BPOD_load.get_Hankel_array(Hankel_array_path)
        BPOD_load.get_direct_proj_coeffs(direct_proj_coeffs_path)
        BPOD_load.get_adjoint_proj_coeffs(adj_proj_coeffs_path)

        # Compare loaded data or original data
        np.testing.assert_equal(BPOD_load.sing_vals, sing_vals_true)
        np.testing.assert_equal(BPOD_load.L_sing_vecs, L_sing_vecs_true)
        np.testing.assert_equal(BPOD_load.R_sing_vecs, R_sing_vecs_true)
        np.testing.assert_equal(BPOD_load.Hankel_array, Hankel_array_true)
        np.testing.assert_equal(BPOD_load.direct_proj_coeffs,
                                direct_proj_coeffs_true)
        np.testing.assert_equal(BPOD_load.adjoint_proj_coeffs,
                                adj_proj_coeffs_true)
Esempio n. 13
0
    def test_puts_gets(self):
        """Test that put/get work in base class."""
        # Generate some random data
        Hankel_array_true = parallel.call_and_bcast(
            np.random.random, ((self.num_states, self.num_states)))
        L_sing_vecs_true, sing_vals_true, R_sing_vecs_true = \
            parallel.call_and_bcast(util.svd, Hankel_array_true)
        direct_proj_coeffs_true = parallel.call_and_bcast(
            np.random.random, ((self.num_steps, self.num_steps)))
        adj_proj_coeffs_true = parallel.call_and_bcast(
            np.random.random, ((self.num_steps, self.num_steps)))

        # Store the data in a BPOD object
        BPOD_save = bpod.BPODHandles(None, verbosity=0)
        BPOD_save.Hankel_array = Hankel_array_true
        BPOD_save.sing_vals = sing_vals_true
        BPOD_save.L_sing_vecs = L_sing_vecs_true
        BPOD_save.R_sing_vecs = R_sing_vecs_true
        BPOD_save.direct_proj_coeffs = direct_proj_coeffs_true
        BPOD_save.adjoint_proj_coeffs = adj_proj_coeffs_true

        # Use the BPOD object to save the data to disk
        sing_vals_path = join(self.test_dir, 'sing_vals.txt')
        L_sing_vecs_path = join(self.test_dir, 'L_sing_vecs.txt')
        R_sing_vecs_path = join(self.test_dir, 'R_sing_vecs.txt')
        Hankel_array_path = join(self.test_dir, 'Hankel_array.txt')
        direct_proj_coeffs_path = join(self.test_dir, 'direct_proj_coeffs.txt')
        adj_proj_coeffs_path = join(self.test_dir, 'adj_proj_coeffs.txt')
        BPOD_save.put_decomp(sing_vals_path, L_sing_vecs_path, R_sing_vecs_path)
        BPOD_save.put_Hankel_array(Hankel_array_path)
        BPOD_save.put_direct_proj_coeffs(direct_proj_coeffs_path)
        BPOD_save.put_adjoint_proj_coeffs(adj_proj_coeffs_path)

        # Create a BPOD object and use it to load the data from disk
        BPOD_load = bpod.BPODHandles(None, verbosity=0)
        BPOD_load.get_decomp(sing_vals_path, L_sing_vecs_path, R_sing_vecs_path)
        BPOD_load.get_Hankel_array(Hankel_array_path)
        BPOD_load.get_direct_proj_coeffs(direct_proj_coeffs_path)
        BPOD_load.get_adjoint_proj_coeffs(adj_proj_coeffs_path)

        # Compare loaded data or original data
        np.testing.assert_equal(BPOD_load.sing_vals, sing_vals_true)
        np.testing.assert_equal(BPOD_load.L_sing_vecs, L_sing_vecs_true)
        np.testing.assert_equal(BPOD_load.R_sing_vecs, R_sing_vecs_true)
        np.testing.assert_equal(BPOD_load.Hankel_array, Hankel_array_true)
        np.testing.assert_equal(
            BPOD_load.direct_proj_coeffs, direct_proj_coeffs_true)
        np.testing.assert_equal(
            BPOD_load.adjoint_proj_coeffs, adj_proj_coeffs_true)
Esempio n. 14
0
 def test_put_reduced_arrays(self):
     """Test putting reduced mats"""
     A_reduced_path = join(self.test_dir, 'A.txt')
     B_reduced_path = join(self.test_dir, 'B.txt')
     C_reduced_path = join(self.test_dir, 'C.txt')
     A = parallel.call_and_bcast(np.random.random, ((10, 10)))
     B = parallel.call_and_bcast(np.random.random, ((1, 10)))
     C = parallel.call_and_bcast(np.random.random, ((10, 2)))
     LTI_proj = lgp.LTIGalerkinProjectionBase()
     LTI_proj.A_reduced = A.copy()
     LTI_proj.B_reduced = B.copy()
     LTI_proj.C_reduced = C.copy()
     LTI_proj.put_model(A_reduced_path, B_reduced_path, C_reduced_path)
     np.testing.assert_equal(util.load_array_text(A_reduced_path), A)
     np.testing.assert_equal(util.load_array_text(B_reduced_path), B)
     np.testing.assert_equal(util.load_array_text(C_reduced_path), C)
Esempio n. 15
0
 def test_put_reduced_arrays(self):
     """Test putting reduced mats"""
     A_reduced_path = join(self.test_dir, 'A.txt')
     B_reduced_path = join(self.test_dir, 'B.txt')
     C_reduced_path = join(self.test_dir, 'C.txt')
     A = parallel.call_and_bcast(np.random.random, ((10, 10)))
     B = parallel.call_and_bcast(np.random.random, ((1, 10)))
     C = parallel.call_and_bcast(np.random.random, ((10, 2)))
     LTI_proj = lgp.LTIGalerkinProjectionBase()
     LTI_proj.A_reduced = A.copy()
     LTI_proj.B_reduced = B.copy()
     LTI_proj.C_reduced = C.copy()
     LTI_proj.put_model(A_reduced_path, B_reduced_path, C_reduced_path)
     np.testing.assert_equal(util.load_array_text(A_reduced_path), A)
     np.testing.assert_equal(util.load_array_text(B_reduced_path), B)
     np.testing.assert_equal(util.load_array_text(C_reduced_path), C)
Esempio n. 16
0
 def test_call_and_bcast(self):
     """Call a function on rank zero and bcast outputs to all MPI workers."""
     def add_and_scale(arg1, arg2, scale=1):
         return True, scale*(arg1 + arg2)
     outputs = parallel.call_and_bcast(
         add_and_scale, parallel.get_rank() + 1, 2, scale=3)
     self.assertEqual(outputs, (True, 9))
Esempio n. 17
0
    def generate_data_set(
        self, num_basis_vecs, num_adjoint_basis_vecs,
        num_states, num_inputs, num_outputs):
        """Generates random data, saves, and computes true reduced A, B, C."""
        self.basis_vecs = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_basis_vecs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_basis_vecs)))
        self.adjoint_basis_vecs =(
            parallel.call_and_bcast(
                np.random.random, (num_states, num_basis_vecs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_basis_vecs)))
        self.A_array = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_states)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_states)))
        self.B_array = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_inputs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_inputs)))
        self.C_array = (
            parallel.call_and_bcast(
                np.random.random, (num_outputs, num_states)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_outputs, num_states)))

        self.A_on_basis_vecs = self.A_array.dot(self.basis_vecs)
        self.B_on_standard_basis_array = self.B_array
        self.C_on_basis_vecs = self.C_array.dot(self.basis_vecs).squeeze()

        parallel.barrier()

        self.A_true = self.adjoint_basis_vecs.conj().T.dot(
            self.A_array.dot(
                self.basis_vecs))
        self.B_true = self.adjoint_basis_vecs.conj().T.dot(self.B_array)
        self.C_true = self.C_array.dot(self.basis_vecs)
        self.proj_array = np.linalg.inv(
            self.adjoint_basis_vecs.conj().T.dot(self.basis_vecs))
        self.A_true_non_orth = self.proj_array.dot(self.A_true)
        self.B_true_non_orth = self.proj_array.dot(self.B_true)
Esempio n. 18
0
    def test_call_and_bcast(self):
        """Call a function on rank zero and bcast outputs to all MPI workers."""
        def add_and_scale(arg1, arg2, scale=1):
            return True, scale * (arg1 + arg2)

        outputs = parallel.call_and_bcast(add_and_scale,
                                          parallel.get_rank() + 1,
                                          2,
                                          scale=3)
        self.assertEqual(outputs, (True, 9))
Esempio n. 19
0
    def test_puts_gets(self):
        # Generate some random data
        correlation_array_true = parallel.call_and_bcast(
            np.random.random, ((self.num_vecs, self.num_vecs)))
        eigvals_true = parallel.call_and_bcast(
            np.random.random, self.num_vecs)
        eigvecs_true = parallel.call_and_bcast(
            np.random.random, ((self.num_states, self.num_vecs)))
        proj_coeffs_true = parallel.call_and_bcast(
            np.random.random, ((self.num_vecs, self.num_vecs)))

        # Create a POD object and store the data in it
        POD_save = pod.PODHandles(None, verbosity=0)
        POD_save.correlation_array = correlation_array_true
        POD_save.eigvals = eigvals_true
        POD_save.eigvecs = eigvecs_true
        POD_save.proj_coeffs = proj_coeffs_true

        # Write the data to disk
        eigvecs_path = join(self.test_dir, 'eigvecs.txt')
        eigvals_path = join(self.test_dir, 'eigvals.txt')
        correlation_array_path = join(self.test_dir, 'correlation.txt')
        proj_coeffs_path = join(self.test_dir, 'proj_coeffs.txt')
        POD_save.put_decomp(eigvals_path, eigvecs_path)
        POD_save.put_correlation_array(correlation_array_path)
        POD_save.put_proj_coeffs(proj_coeffs_path)
        parallel.barrier()

        # Create a new POD object and use it to load the data
        POD_load = pod.PODHandles(None, verbosity=0)
        POD_load.get_decomp(eigvals_path, eigvecs_path)
        POD_load.get_correlation_array(correlation_array_path)
        POD_load.get_proj_coeffs(proj_coeffs_path)

        # Check that the loaded data is correct
        np.testing.assert_equal(POD_load.eigvals, eigvals_true)
        np.testing.assert_equal(POD_load.eigvecs, eigvecs_true)
        np.testing.assert_equal(
            POD_load.correlation_array, correlation_array_true)
        np.testing.assert_equal(POD_load.proj_coeffs, proj_coeffs_true)
Esempio n. 20
0
    def test_compute_modes(self):
        """Test computing modes in serial and parallel."""
        tol = 1e-6
        ws = np.identity(self.num_states)
        ws[0, 0] = 2
        ws[1, 0] = 1.1
        ws[0, 1] = 1.1
        weights_list = [None, np.random.random(self.num_states), ws]
        weights_mats = [
            np.mat(np.identity(self.num_states)),
            np.mat(np.diag(weights_list[1])),
            np.mat(ws)
        ]
        for weights, weights_mat in zip(weights_list, weights_mats):
            A_adjoint = np.linalg.inv(weights_mat) * self.A.H * weights_mat
            C_adjoint = np.linalg.inv(weights_mat) * self.C.H
            A_adjoint_powers = np.identity(A_adjoint.shape[0])
            adjoint_vecs = []
            for t in range(self.num_adjoint_vecs):
                A_adjoint_powers = A_adjoint_powers.dot(A_adjoint)
                adjoint_vecs.append(A_adjoint_powers.dot(C_adjoint))
            adjoint_vecs = np.array(adjoint_vecs).squeeze().T

            IP = VectorSpaceMatrices(weights=weights).compute_inner_product_mat
            Hankel_mat_true = IP(adjoint_vecs, self.direct_vecs)

            L_sing_vecs_true, sing_vals_true, R_sing_vecs_true = \
                parallel.call_and_bcast(util.svd, Hankel_mat_true)
            direct_modes_array_true = self.direct_vecs.dot(
                R_sing_vecs_true).dot(np.diag(sing_vals_true**-0.5))
            adjoint_modes_array_true = adjoint_vecs.dot(L_sing_vecs_true).dot(
                np.diag(sing_vals_true**-0.5))
            direct_modes_array, adjoint_modes_array, sing_vals, \
                L_sing_vecs, R_sing_vecs, Hankel_mat = \
                compute_BPOD_matrices(self.direct_vecs,
                adjoint_vecs, self.mode_indices, self.mode_indices,
                inner_product_weights=weights, return_all=True)

            np.testing.assert_allclose(Hankel_mat, Hankel_mat_true)
            np.testing.assert_allclose(sing_vals, sing_vals_true)
            np.testing.assert_allclose(L_sing_vecs, L_sing_vecs_true)
            np.testing.assert_allclose(R_sing_vecs, R_sing_vecs_true)
            np.testing.assert_allclose(
                direct_modes_array,
                direct_modes_array_true[:, self.mode_indices],
                rtol=tol,
                atol=tol)
            np.testing.assert_allclose(
                adjoint_modes_array,
                adjoint_modes_array_true[:, self.mode_indices],
                rtol=tol,
                atol=tol)
Esempio n. 21
0
    def test_puts_gets(self):
        """Test that put/get work in base class."""
        test_dir = 'DELETE_ME_test_files_bpod'
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        if not os.path.isdir(test_dir) and parallel.is_rank_zero():
            os.mkdir(test_dir)
        num_vecs = 10
        num_states = 30
        Hankel_mat_true = parallel.call_and_bcast(np.random.random,
                                                  ((num_vecs, num_vecs)))
        L_sing_vecs_true, sing_vals_true, R_sing_vecs_true = \
            parallel.call_and_bcast(util.svd, Hankel_mat_true)

        my_BPOD = BPODHandles(None, verbosity=0)
        my_BPOD.Hankel_mat = Hankel_mat_true
        my_BPOD.sing_vals = sing_vals_true
        my_BPOD.L_sing_vecs = L_sing_vecs_true
        my_BPOD.R_sing_vecs = R_sing_vecs_true

        L_sing_vecs_path = join(test_dir, 'L_sing_vecs.txt')
        R_sing_vecs_path = join(test_dir, 'R_sing_vecs.txt')
        sing_vals_path = join(test_dir, 'sing_vals.txt')
        Hankel_mat_path = join(test_dir, 'Hankel_mat.txt')
        my_BPOD.put_decomp(sing_vals_path, L_sing_vecs_path, R_sing_vecs_path)
        my_BPOD.put_Hankel_mat(Hankel_mat_path)
        parallel.barrier()

        BPOD_load = BPODHandles(None, verbosity=0)

        BPOD_load.get_decomp(sing_vals_path, L_sing_vecs_path,
                             R_sing_vecs_path)
        Hankel_mat_loaded = parallel.call_and_bcast(util.load_array_text,
                                                    Hankel_mat_path)

        np.testing.assert_allclose(Hankel_mat_loaded, Hankel_mat_true)
        np.testing.assert_allclose(BPOD_load.L_sing_vecs, L_sing_vecs_true)
        np.testing.assert_allclose(BPOD_load.R_sing_vecs, R_sing_vecs_true)
        np.testing.assert_allclose(BPOD_load.sing_vals, sing_vals_true)
Esempio n. 22
0
    def generate_data_set(self, num_basis_vecs, num_adjoint_basis_vecs,
        num_states, num_inputs, num_outputs):
        """Generates random data, saves, and computes true reduced A,B,C."""
        self.basis_vec_handles = [
            VecHandlePickle(self.basis_vec_path % i)
            for i in range(self.num_basis_vecs)]
        self.adjoint_basis_vec_handles = [
            VecHandlePickle(self.adjoint_basis_vec_path % i)
            for i in range(self.num_adjoint_basis_vecs)]
        self.A_on_basis_vec_handles = [
            VecHandlePickle(self.A_on_basis_vec_path % i)
            for i in range(self.num_basis_vecs)]
        self.B_on_standard_basis_handles = [
            VecHandlePickle(self.B_on_basis_path % i)
            for i in range(self.num_inputs)]
        self.C_on_basis_vec_handles = [
            VecHandlePickle(self.C_on_basis_vec_path % i)
            for i in range(self.num_basis_vecs)]

        self.basis_vec_array = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_basis_vecs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_basis_vecs)))
        self.adjoint_basis_vec_array = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_adjoint_basis_vecs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_adjoint_basis_vecs)))
        self.A_array = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_states)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_states)))
        self.B_array = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_inputs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_inputs)))
        self.C_array = (
            parallel.call_and_bcast(
                np.random.random, (num_outputs, num_states)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_outputs, num_states)))

        self.basis_vecs = [
            self.basis_vec_array[:, i].squeeze() for i in range(num_basis_vecs)]
        self.adjoint_basis_vecs = [
            self.adjoint_basis_vec_array[:, i].squeeze()
            for i in range(num_adjoint_basis_vecs)]
        self.A_on_basis_vecs = [
            self.A_array.dot(basis_vec).squeeze()
            for basis_vec in self.basis_vecs]
        self.B_on_basis = [
            self.B_array[:, i].squeeze() for i in range(self.num_inputs)]
        self.C_on_basis_vecs = [
            np.array(self.C_array.dot(basis_vec).squeeze(), ndmin=1)
            for basis_vec in self.basis_vecs]

        if parallel.is_rank_zero():
            for handle,vec in zip(self.basis_vec_handles, self.basis_vecs):
                handle.put(vec)
            for handle,vec in zip(
                self.adjoint_basis_vec_handles, self.adjoint_basis_vecs):
                handle.put(vec)
            for handle,vec in zip(
                self.A_on_basis_vec_handles, self.A_on_basis_vecs):
                handle.put(vec)
            for handle,vec in zip(
                self.B_on_standard_basis_handles, self.B_on_basis):
                handle.put(vec)
            for handle,vec in zip(
                self.C_on_basis_vec_handles, self.C_on_basis_vecs):
                handle.put(vec)
        parallel.barrier()

        self.A_true = self.adjoint_basis_vec_array.conj().T.dot(
            self.A_array.dot(self.basis_vec_array))
        self.B_true = self.adjoint_basis_vec_array.conj().T.dot(self.B_array)
        self.C_true = self.C_array.dot(self.basis_vec_array)
        self.proj_array = np.linalg.inv(
            self.adjoint_basis_vec_array.conj().T.dot(self.basis_vec_array))
        self.A_true_non_orth = self.proj_array.dot(self.A_true)
        self.B_true_non_orth = self.proj_array.dot(self.B_true)
Esempio n. 23
0
    def test_lin_combine(self):
        # Set test tolerances
        rtol = 1e-10
        atol = 1e-12

        # Setup
        mode_path = join(self.test_dir, 'mode_%03d.pkl')
        vec_path = join(self.test_dir, 'vec_%03d.pkl')

        # Test cases where number of modes:
        #   less, equal, more than num_states
        #   less, equal, more than num_vecs
        #   less, equal, more than total_num_vecs_in_mem
        # Also check the case of passing a None value to the mode_indices
        # argument.
        num_states = 20
        num_vecs_list = [1, 15, 40]
        num_modes_list = [
            None, 1, 8, 10, 20, 25, 45,
            int(np.ceil(self.total_num_vecs_in_mem / 2.)),
            self.total_num_vecs_in_mem, self.total_num_vecs_in_mem * 2]

        # Check for correct computations
        for num_vecs in num_vecs_list:
            for num_modes in num_modes_list:
                for squeeze in [True, False]:

                    # Generate data and then broadcast to all procs
                    vec_handles = [
                        VecHandlePickle(vec_path % i)
                        for i in range(num_vecs)]
                    vec_array, coeff_array, true_modes =\
                        parallel.call_and_bcast(
                            self.generate_vecs_modes, num_states, num_vecs,
                            num_modes=num_modes, squeeze=squeeze)
                    if parallel.is_rank_zero():
                        for vec_index, vec_handle in enumerate(vec_handles):
                            vec_handle.put(vec_array[:, vec_index])
                    parallel.barrier()

                    # Choose which modes to compute
                    if num_modes is None:
                        mode_idxs_arg = None
                        mode_idxs_vals = range(true_modes.shape[1])
                    elif num_modes == 1:
                        mode_idxs_arg = 0
                        mode_idxs_vals = [0]
                    else:
                        mode_idxs_arg = np.unique(
                            parallel.call_and_bcast(
                                np.random.randint, 0, high=num_modes,
                                size=num_modes // 2))
                        mode_idxs_vals = mode_idxs_arg
                    mode_handles = [
                        VecHandlePickle(mode_path % mode_num)
                        for mode_num in mode_idxs_vals]

                    # Saves modes to files
                    self.vec_space.lin_combine(
                        mode_handles, vec_handles, coeff_array,
                        coeff_array_col_indices=mode_idxs_arg)

                    # Test modes one by one
                    for mode_idx in mode_idxs_vals:
                        computed_mode = VecHandlePickle(
                            mode_path % mode_idx).get()
                        np.testing.assert_allclose(
                            computed_mode, true_modes[:, mode_idx],
                            rtol=rtol, atol=atol)
                    parallel.barrier()

                parallel.barrier()

            parallel.barrier()

        # Test that errors are caught for mismatched dimensions
        mode_handles = [
            VecHandlePickle(mode_path % i) for i in range(10)]
        vec_handles = [
            VecHandlePickle(vec_path % i) for i in range(15)]
        coeffs_array_too_short = np.zeros(
            (len(vec_handles) - 1, len(mode_handles)))
        coeffs_array_too_fat = np.zeros(
            (len(vec_handles), len(mode_handles) + 1))
        index_list_too_long = range(len(mode_handles) + 1)
        self.assertRaises(
            ValueError, self.vec_space.lin_combine, mode_handles, vec_handles,
            coeffs_array_too_short)
        self.assertRaises(
            ValueError, self.vec_space.lin_combine, mode_handles, vec_handles,
            coeffs_array_too_fat)
Esempio n. 24
0
    def generate_data_set(self, num_basis_vecs, num_adjoint_basis_vecs,
                          num_states, num_inputs, num_outputs):
        """Generates random data, saves, and computes true reduced A,B,C."""
        self.basis_vec_handles = [
            VecHandlePickle(self.basis_vec_path % i)
            for i in range(self.num_basis_vecs)
        ]
        self.adjoint_basis_vec_handles = [
            VecHandlePickle(self.adjoint_basis_vec_path % i)
            for i in range(self.num_adjoint_basis_vecs)
        ]
        self.A_on_basis_vec_handles = [
            VecHandlePickle(self.A_on_basis_vec_path % i)
            for i in range(self.num_basis_vecs)
        ]
        self.B_on_standard_basis_handles = [
            VecHandlePickle(self.B_on_basis_path % i)
            for i in range(self.num_inputs)
        ]
        self.C_on_basis_vec_handles = [
            VecHandlePickle(self.C_on_basis_vec_path % i)
            for i in range(self.num_basis_vecs)
        ]

        self.basis_vec_array = (
            parallel.call_and_bcast(np.random.random,
                                    (num_states, num_basis_vecs)) +
            1j * parallel.call_and_bcast(np.random.random,
                                         (num_states, num_basis_vecs)))
        self.adjoint_basis_vec_array = (
            parallel.call_and_bcast(np.random.random,
                                    (num_states, num_adjoint_basis_vecs)) +
            1j * parallel.call_and_bcast(np.random.random,
                                         (num_states, num_adjoint_basis_vecs)))
        self.A_array = (parallel.call_and_bcast(np.random.random,
                                                (num_states, num_states)) +
                        1j * parallel.call_and_bcast(np.random.random,
                                                     (num_states, num_states)))
        self.B_array = (parallel.call_and_bcast(np.random.random,
                                                (num_states, num_inputs)) +
                        1j * parallel.call_and_bcast(np.random.random,
                                                     (num_states, num_inputs)))
        self.C_array = (
            parallel.call_and_bcast(np.random.random,
                                    (num_outputs, num_states)) +
            1j * parallel.call_and_bcast(np.random.random,
                                         (num_outputs, num_states)))

        self.basis_vecs = [
            self.basis_vec_array[:, i].squeeze() for i in range(num_basis_vecs)
        ]
        self.adjoint_basis_vecs = [
            self.adjoint_basis_vec_array[:, i].squeeze()
            for i in range(num_adjoint_basis_vecs)
        ]
        self.A_on_basis_vecs = [
            self.A_array.dot(basis_vec).squeeze()
            for basis_vec in self.basis_vecs
        ]
        self.B_on_basis = [
            self.B_array[:, i].squeeze() for i in range(self.num_inputs)
        ]
        self.C_on_basis_vecs = [
            np.array(self.C_array.dot(basis_vec).squeeze(), ndmin=1)
            for basis_vec in self.basis_vecs
        ]

        if parallel.is_rank_zero():
            for handle, vec in zip(self.basis_vec_handles, self.basis_vecs):
                handle.put(vec)
            for handle, vec in zip(self.adjoint_basis_vec_handles,
                                   self.adjoint_basis_vecs):
                handle.put(vec)
            for handle, vec in zip(self.A_on_basis_vec_handles,
                                   self.A_on_basis_vecs):
                handle.put(vec)
            for handle, vec in zip(self.B_on_standard_basis_handles,
                                   self.B_on_basis):
                handle.put(vec)
            for handle, vec in zip(self.C_on_basis_vec_handles,
                                   self.C_on_basis_vecs):
                handle.put(vec)
        parallel.barrier()

        self.A_true = self.adjoint_basis_vec_array.conj().T.dot(
            self.A_array.dot(self.basis_vec_array))
        self.B_true = self.adjoint_basis_vec_array.conj().T.dot(self.B_array)
        self.C_true = self.C_array.dot(self.basis_vec_array)
        self.proj_array = np.linalg.inv(
            self.adjoint_basis_vec_array.conj().T.dot(self.basis_vec_array))
        self.A_true_non_orth = self.proj_array.dot(self.A_true)
        self.B_true_non_orth = self.proj_array.dot(self.B_true)
Esempio n. 25
0
    def test_compute_inner_product_arrays(self):
        """Test computation of array of inner products."""
        rtol = 1e-10
        atol = 1e-12

        num_row_vecs_list = [
            1,
            int(round(self.total_num_vecs_in_mem / 2.)),
            self.total_num_vecs_in_mem,
            self.total_num_vecs_in_mem * 2,
            parallel.get_num_procs() + 1]
        num_col_vecs_list = num_row_vecs_list
        num_states = 6

        row_vec_path = join(self.test_dir, 'row_vec_%03d.pkl')
        col_vec_path = join(self.test_dir, 'col_vec_%03d.pkl')

        for num_row_vecs in num_row_vecs_list:
            for num_col_vecs in num_col_vecs_list:

                # Generate vecs
                parallel.barrier()
                row_vec_array = (
                    parallel.call_and_bcast(
                        np.random.random, (num_states, num_row_vecs))
                    + 1j * parallel.call_and_bcast(
                        np.random.random, (num_states, num_row_vecs)))
                col_vec_array = (
                    parallel.call_and_bcast(
                        np.random.random, (num_states, num_col_vecs))
                    + 1j * parallel.call_and_bcast(
                        np.random.random, (num_states, num_col_vecs)))
                row_vec_handles = [
                    VecHandlePickle(row_vec_path % i)
                    for i in range(num_row_vecs)]
                col_vec_handles = [
                    VecHandlePickle(col_vec_path % i)
                    for i in range(num_col_vecs)]

                # Save vecs
                if parallel.is_rank_zero():
                    for i, h in enumerate(row_vec_handles):
                        h.put(row_vec_array[:, i])
                    for i, h in enumerate(col_vec_handles):
                        h.put(col_vec_array[:, i])
                parallel.barrier()

                # If number of rows/cols is 1, check case of passing a handle
                if len(row_vec_handles) == 1:
                    row_vec_handles = row_vec_handles[0]
                if len(col_vec_handles) == 1:
                    col_vec_handles = col_vec_handles[0]

                # Test ip computation.
                product_true = np.dot(row_vec_array.conj().T, col_vec_array)
                product_computed = self.vec_space.compute_inner_product_array(
                    row_vec_handles, col_vec_handles)
                np.testing.assert_allclose(
                    product_computed, product_true, rtol=rtol, atol=atol)

                # Test symm ip computation
                product_true = np.dot(row_vec_array.conj().T, row_vec_array)
                product_computed =\
                    self.vec_space.compute_symm_inner_product_array(
                        row_vec_handles)
                np.testing.assert_allclose(
                    product_computed, product_true, rtol=rtol, atol=atol)
Esempio n. 26
0
    def test_compute_modes(self):
        """Test computing modes in serial and parallel."""
        # Set test tolerances.  More relaxed tolerances are required for testing
        # the BPOD modes, since that test requires "squaring" the gramians and
        # thus involves more ill-conditioned arrays.
        rtol_sqr = 1e-8
        atol_sqr = 1e-8

        # Test a single input/output as well as multiple inputs/outputs.  Allow
        # for more inputs/outputs than states.  (This is determined in setUp()).
        for num_inputs in self.num_inputs_list:
            for num_outputs in self.num_outputs_list:

                # Get impulse response data
                direct_vec_handles, adjoint_vec_handles =\
                    self._helper_get_impulse_response_handles(
                        num_inputs, num_outputs)

                # Create BPOD object and perform decomposition.  (The properties
                # defining a BPOD mode require manipulations involving the
                # correct decomposition, so we cannot isolate the mode
                # computation from the decomposition step.)  Use relative
                # tolerance to avoid Hankel singular values which may correspond
                # to very uncontrollable/unobservable states.  It is ok to use a
                # more relaxed tolerance here than in the actual test/assert
                # statements, as here we are saying it is ok to ignore highly
                # uncontrollable/unobservable states, rather than allowing loose
                # tolerances in the comparison of two numbers.  Furthermore, it
                # is likely that in actual use, users would want to ignore
                # relatively small Hankel singular values anyway, as that is the
                # point of doing a balancing transformation.
                BPOD = bpod.BPODHandles(inner_product=np.vdot, verbosity=0)
                BPOD.compute_decomp(direct_vec_handles,
                                    adjoint_vec_handles,
                                    num_inputs=num_inputs,
                                    num_outputs=num_outputs,
                                    rtol=1e-6,
                                    atol=1e-12)

                # Select a subset of modes to compute.  Compute at least half
                # the modes, and up to all of them.  Make sure to use unique
                # values.  (This may reduce the number of modes computed.)
                num_modes = parallel.call_and_bcast(np.random.randint,
                                                    BPOD.sing_vals.size // 2,
                                                    BPOD.sing_vals.size + 1)
                mode_idxs = np.unique(
                    parallel.call_and_bcast(np.random.randint, 0,
                                            BPOD.sing_vals.size, num_modes))

                # Create handles for the modes
                direct_mode_handles = [
                    VecHandlePickle(self.direct_mode_path % i)
                    for i in mode_idxs
                ]
                adjoint_mode_handles = [
                    VecHandlePickle(self.adjoint_mode_path % i)
                    for i in mode_idxs
                ]

                # Compute modes
                BPOD.compute_direct_modes(
                    mode_idxs,
                    direct_mode_handles,
                    direct_vec_handles=direct_vec_handles)
                BPOD.compute_adjoint_modes(
                    mode_idxs,
                    adjoint_mode_handles,
                    adjoint_vec_handles=adjoint_vec_handles)

                # Test modes against empirical gramians
                np.testing.assert_allclose(
                    BPOD.vec_space.compute_inner_product_array(
                        adjoint_mode_handles, direct_vec_handles).dot(
                            BPOD.vec_space.compute_inner_product_array(
                                direct_vec_handles, adjoint_mode_handles)),
                    np.diag(BPOD.sing_vals[mode_idxs]),
                    rtol=rtol_sqr,
                    atol=atol_sqr)
                np.testing.assert_allclose(
                    BPOD.vec_space.compute_inner_product_array(
                        direct_mode_handles, adjoint_vec_handles).dot(
                            BPOD.vec_space.compute_inner_product_array(
                                adjoint_vec_handles, direct_mode_handles)),
                    np.diag(BPOD.sing_vals[mode_idxs]),
                    rtol=rtol_sqr,
                    atol=atol_sqr)
Esempio n. 27
0
    def test_compute_inner_product_arrays(self):
        """Test computation of array of inner products."""
        rtol = 1e-10
        atol = 1e-12

        num_row_vecs_list = [
            1,
            int(round(self.total_num_vecs_in_mem / 2.)),
            self.total_num_vecs_in_mem,
            self.total_num_vecs_in_mem * 2,
            parallel.get_num_procs() + 1]
        num_col_vecs_list = num_row_vecs_list
        num_states = 6

        row_vec_path = join(self.test_dir, 'row_vec_%03d.pkl')
        col_vec_path = join(self.test_dir, 'col_vec_%03d.pkl')

        for num_row_vecs in num_row_vecs_list:
            for num_col_vecs in num_col_vecs_list:

                # Generate vecs
                parallel.barrier()
                row_vec_array = (
                    parallel.call_and_bcast(
                        np.random.random, (num_states, num_row_vecs))
                    + 1j * parallel.call_and_bcast(
                        np.random.random, (num_states, num_row_vecs)))
                col_vec_array = (
                    parallel.call_and_bcast(
                        np.random.random, (num_states, num_col_vecs))
                    + 1j * parallel.call_and_bcast(
                        np.random.random, (num_states, num_col_vecs)))
                row_vec_handles = [
                    VecHandlePickle(row_vec_path % i)
                    for i in range(num_row_vecs)]
                col_vec_handles = [
                    VecHandlePickle(col_vec_path % i)
                    for i in range(num_col_vecs)]

                # Save vecs
                if parallel.is_rank_zero():
                    for i, h in enumerate(row_vec_handles):
                        h.put(row_vec_array[:, i])
                    for i, h in enumerate(col_vec_handles):
                        h.put(col_vec_array[:, i])
                parallel.barrier()

                # If number of rows/cols is 1, check case of passing a handle
                if len(row_vec_handles) == 1:
                    row_vec_handles = row_vec_handles[0]
                if len(col_vec_handles) == 1:
                    col_vec_handles = col_vec_handles[0]

                # Test ip computation.
                product_true = np.dot(row_vec_array.conj().T, col_vec_array)
                product_computed = self.vec_space.compute_inner_product_array(
                    row_vec_handles, col_vec_handles)
                np.testing.assert_allclose(
                    product_computed, product_true, rtol=rtol, atol=atol)

                # Test symm ip computation
                product_true = np.dot(row_vec_array.conj().T, row_vec_array)
                product_computed =\
                    self.vec_space.compute_symm_inner_product_array(
                        row_vec_handles)
                np.testing.assert_allclose(
                    product_computed, product_true, rtol=rtol, atol=atol)
Esempio n. 28
0
    def test_lin_combine(self):
        # Set test tolerances
        rtol = 1e-10
        atol = 1e-12

        # Setup
        mode_path = join(self.test_dir, 'mode_%03d.pkl')
        vec_path = join(self.test_dir, 'vec_%03d.pkl')

        # Test cases where number of modes:
        #   less, equal, more than num_states
        #   less, equal, more than num_vecs
        #   less, equal, more than total_num_vecs_in_mem
        # Also check the case of passing a None value to the mode_indices
        # argument.
        num_states = 20
        num_vecs_list = [1, 15, 40]
        num_modes_list = [
            None, 1, 8, 10, 20, 25, 45,
            int(np.ceil(self.total_num_vecs_in_mem / 2.)),
            self.total_num_vecs_in_mem, self.total_num_vecs_in_mem * 2]

        # Check for correct computations
        for num_vecs in num_vecs_list:
            for num_modes in num_modes_list:
                for squeeze in [True, False]:

                    # Generate data and then broadcast to all procs
                    vec_handles = [
                        VecHandlePickle(vec_path % i)
                        for i in range(num_vecs)]
                    vec_array, coeff_array, true_modes =\
                        parallel.call_and_bcast(
                            self.generate_vecs_modes, num_states, num_vecs,
                            num_modes=num_modes, squeeze=squeeze)
                    if parallel.is_rank_zero():
                        for vec_index, vec_handle in enumerate(vec_handles):
                            vec_handle.put(vec_array[:, vec_index])
                    parallel.barrier()

                    # Choose which modes to compute
                    if num_modes is None:
                        mode_idxs_arg = None
                        mode_idxs_vals = range(true_modes.shape[1])
                    elif num_modes == 1:
                        mode_idxs_arg = 0
                        mode_idxs_vals = [0]
                    else:
                        mode_idxs_arg = np.unique(
                            parallel.call_and_bcast(
                                np.random.randint, 0, high=num_modes,
                                size=num_modes // 2))
                        mode_idxs_vals = mode_idxs_arg
                    mode_handles = [
                        VecHandlePickle(mode_path % mode_num)
                        for mode_num in mode_idxs_vals]

                    # Saves modes to files
                    self.vec_space.lin_combine(
                        mode_handles, vec_handles, coeff_array,
                        coeff_array_col_indices=mode_idxs_arg)

                    # Test modes one by one
                    for mode_idx in mode_idxs_vals:
                        computed_mode = VecHandlePickle(
                            mode_path % mode_idx).get()
                        np.testing.assert_allclose(
                            computed_mode, true_modes[:, mode_idx],
                            rtol=rtol, atol=atol)
                    parallel.barrier()

                parallel.barrier()

            parallel.barrier()

        # Test that errors are caught for mismatched dimensions
        mode_handles = [
            VecHandlePickle(mode_path % i) for i in range(10)]
        vec_handles = [
            VecHandlePickle(vec_path % i) for i in range(15)]
        coeffs_array_too_short = np.zeros(
            (len(vec_handles) - 1, len(mode_handles)))
        coeffs_array_too_fat = np.zeros(
            (len(vec_handles), len(mode_handles) + 1))
        index_list_too_long = range(len(mode_handles) + 1)
        self.assertRaises(
            ValueError, self.vec_space.lin_combine, mode_handles, vec_handles,
            coeffs_array_too_short)
        self.assertRaises(
            ValueError, self.vec_space.lin_combine, mode_handles, vec_handles,
            coeffs_array_too_fat)
Esempio n. 29
0
    def setUp(self):
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')

        self.test_dir = 'DELETE_ME_test_files_bpod'
        if not os.path.isdir(self.test_dir):
            parallel.call_from_rank_zero(os.mkdir, self.test_dir)

        self.mode_nums = [2, 3, 0]
        self.num_direct_vecs = 10
        self.num_adjoint_vecs = 12
        self.num_inputs = 1
        self.num_outputs = 1
        self.num_states = 20

        #A = np.mat(np.random.random((self.num_states, self.num_states)))
        A = np.mat(
            parallel.call_and_bcast(util.drss, self.num_states, 1, 1)[0])
        B = np.mat(
            parallel.call_and_bcast(np.random.random,
                                    (self.num_states, self.num_inputs)))
        C = np.mat(
            parallel.call_and_bcast(np.random.random,
                                    (self.num_outputs, self.num_states)))
        self.direct_vecs = [B]
        A_powers = np.identity(A.shape[0])
        for t in range(self.num_direct_vecs - 1):
            A_powers = A_powers.dot(A)
            self.direct_vecs.append(A_powers.dot(B))
        self.direct_vec_array = np.array(self.direct_vecs).squeeze().T

        A_adjoint = A.H
        C_adjoint = C.H
        A_adjoint_powers = np.identity(A_adjoint.shape[0])
        self.adjoint_vecs = [C_adjoint]
        for t in range(self.num_adjoint_vecs - 1):
            A_adjoint_powers = A_adjoint_powers.dot(A_adjoint)
            self.adjoint_vecs.append(A_adjoint_powers.dot(C_adjoint))
        self.adjoint_vec_array = np.array(self.adjoint_vecs).squeeze().T

        self.direct_vec_path = join(self.test_dir, 'direct_vec_%03d.txt')
        self.adjoint_vec_path = join(self.test_dir, 'adjoint_vec_%03d.txt')

        self.direct_vec_handles = [
            V.VecHandleArrayText(self.direct_vec_path % i)
            for i in range(self.num_direct_vecs)
        ]
        self.adjoint_vec_handles = [
            V.VecHandleArrayText(self.adjoint_vec_path % i)
            for i in range(self.num_adjoint_vecs)
        ]

        if parallel.is_rank_zero():
            for i, handle in enumerate(self.direct_vec_handles):
                handle.put(self.direct_vecs[i])
            for i, handle in enumerate(self.adjoint_vec_handles):
                handle.put(self.adjoint_vecs[i])

        self.Hankel_mat_true = np.dot(self.adjoint_vec_array.T,
                                      self.direct_vec_array)

        self.L_sing_vecs_true, self.sing_vals_true, self.R_sing_vecs_true = \
            parallel.call_and_bcast(util.svd, self.Hankel_mat_true, atol=1e-10)

        self.direct_mode_array = self.direct_vec_array * \
            np.mat(self.R_sing_vecs_true) * \
            np.mat(np.diag(self.sing_vals_true ** -0.5))
        self.adjoint_mode_array = self.adjoint_vec_array * \
            np.mat(self.L_sing_vecs_true) *\
            np.mat(np.diag(self.sing_vals_true ** -0.5))

        self.my_BPOD = BPODHandles(np.vdot, verbosity=0)
        parallel.barrier()
Esempio n. 30
0
    def test_compute_modes(self):
        """Test computing modes in serial and parallel."""
        # Set test tolerances.  More relaxed tolerances are required for testing
        # the BPOD modes, since that test requires "squaring" the gramians and
        # thus involves more ill-conditioned arrays.
        rtol_sqr = 1e-8
        atol_sqr = 1e-8

        # Test a single input/output as well as multiple inputs/outputs.  Allow
        # for more inputs/outputs than states.  (This is determined in setUp()).
        for num_inputs in self.num_inputs_list:
            for num_outputs in self.num_outputs_list:

                # Get impulse response data
                direct_vec_handles, adjoint_vec_handles =\
                    self._helper_get_impulse_response_handles(
                        num_inputs, num_outputs)

                # Create BPOD object and perform decomposition.  (The properties
                # defining a BPOD mode require manipulations involving the
                # correct decomposition, so we cannot isolate the mode
                # computation from the decomposition step.)  Use relative
                # tolerance to avoid Hankel singular values which may correspond
                # to very uncontrollable/unobservable states.  It is ok to use a
                # more relaxed tolerance here than in the actual test/assert
                # statements, as here we are saying it is ok to ignore highly
                # uncontrollable/unobservable states, rather than allowing loose
                # tolerances in the comparison of two numbers.  Furthermore, it
                # is likely that in actual use, users would want to ignore
                # relatively small Hankel singular values anyway, as that is the
                # point of doing a balancing transformation.
                BPOD = bpod.BPODHandles(np.vdot, verbosity=0)
                BPOD.compute_decomp(
                    direct_vec_handles, adjoint_vec_handles,
                    num_inputs=num_inputs, num_outputs=num_outputs,
                    rtol=1e-6, atol=1e-12)

                # Select a subset of modes to compute.  Compute at least half
                # the modes, and up to all of them.  Make sure to use unique
                # values.  (This may reduce the number of modes computed.)
                num_modes = parallel.call_and_bcast(
                    np.random.randint,
                    BPOD.sing_vals.size // 2, BPOD.sing_vals.size + 1)
                mode_idxs = np.unique(parallel.call_and_bcast(
                    np.random.randint,
                    0, BPOD.sing_vals.size, num_modes))

                # Create handles for the modes
                direct_mode_handles = [
                    VecHandlePickle(self.direct_mode_path % i)
                    for i in mode_idxs]
                adjoint_mode_handles = [
                    VecHandlePickle(self.adjoint_mode_path % i)
                    for i in mode_idxs]

                # Compute modes
                BPOD.compute_direct_modes(
                    mode_idxs, direct_mode_handles,
                    direct_vec_handles=direct_vec_handles)
                BPOD.compute_adjoint_modes(
                    mode_idxs, adjoint_mode_handles,
                    adjoint_vec_handles=adjoint_vec_handles)

                # Test modes against empirical gramians
                np.testing.assert_allclose(
                    BPOD.vec_space.compute_inner_product_array(
                        adjoint_mode_handles, direct_vec_handles).dot(
                            BPOD.vec_space.compute_inner_product_array(
                                direct_vec_handles, adjoint_mode_handles)),
                    np.diag(BPOD.sing_vals[mode_idxs]),
                    rtol=rtol_sqr, atol=atol_sqr)
                np.testing.assert_allclose(
                    BPOD.vec_space.compute_inner_product_array(
                        direct_mode_handles, adjoint_vec_handles).dot(
                            BPOD.vec_space.compute_inner_product_array(
                                adjoint_vec_handles, direct_mode_handles)),
                    np.diag(BPOD.sing_vals[mode_idxs]),
                    rtol=rtol_sqr, atol=atol_sqr)
Esempio n. 31
0
    def test_lin_combine(self):
        num_vecs_list = [1, 15, 40]
        num_states = 20
        # Test cases where number of modes:
        #   less, equal, more than num_states
        #   less, equal, more than num_vecs
        #   less, equal, more than total_num_vecs_in_mem
        num_modes_list = [1, 8, 10, 20, 25, 45, \
            int(np.ceil(self.total_num_vecs_in_mem / 2.)),\
            self.total_num_vecs_in_mem, self.total_num_vecs_in_mem * 2]
        mode_path = join(self.test_dir, 'mode_%03d.txt')
        vec_path = join(self.test_dir, 'vec_%03d.txt')

        for num_vecs in num_vecs_list:
            for num_modes in num_modes_list:
                #generate data and then broadcast to all procs
                #print '----- new case ----- '
                #print 'num_vecs =',num_vecs
                #print 'num_states =',num_states
                #print 'num_modes =',num_modes
                #print 'max_vecs_per_node =',max_vecs_per_node
                #print 'index_from =',index_from
                vec_handles = [
                    V.VecHandleArrayText(vec_path % i) for i in range(num_vecs)
                ]
                vec_array, mode_indices, build_coeff_mat, true_modes = \
                    parallel.call_and_bcast(self.generate_vecs_modes,
                    num_states, num_vecs, num_modes)

                if parallel.is_rank_zero():
                    for vec_index, vec_handle in enumerate(vec_handles):
                        vec_handle.put(vec_array[:, vec_index])
                parallel.barrier()
                mode_handles = [
                    V.VecHandleArrayText(mode_path % mode_num)
                    for mode_num in mode_indices
                ]

                # If there are more vecs than mat has rows
                build_coeff_mat_too_small = \
                    np.zeros((build_coeff_mat.shape[0]-1,
                        build_coeff_mat.shape[1]))
                self.assertRaises(ValueError, self.my_vec_ops.\
                    lin_combine, mode_handles,
                    vec_handles, build_coeff_mat_too_small, mode_indices)

                # Test the case that only one mode is desired,
                # in which case user might pass in an int
                if len(mode_indices) == 1:
                    mode_indices = mode_indices[0]
                    mode_handles = mode_handles[0]

                # Saves modes to files
                self.my_vec_ops.lin_combine(mode_handles, vec_handles,
                                            build_coeff_mat, mode_indices)

                # Change back to list so is iterable
                if not isinstance(mode_indices, list):
                    mode_indices = [mode_indices]

                parallel.barrier()
                #print 'mode_indices',mode_indices
                for mode_index in mode_indices:
                    computed_mode = V.VecHandleArrayText(mode_path %
                                                         mode_index).get()
                    #print 'mode number',mode_num
                    #print 'true mode',true_modes[:,\
                    #    mode_num-index_from]
                    #print 'computed mode',computed_mode
                    np.testing.assert_allclose(computed_mode,
                                               true_modes[:, mode_index])

                parallel.barrier()

        parallel.barrier()