示例#1
0
    def setUp(self):
        # Specify output locations
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'files_BPOD_DELETE_ME'
        if not os.path.isdir(self.test_dir):
            parallel.call_from_rank_zero(os.mkdir, self.test_dir)
        self.direct_vec_path = join(self.test_dir, 'direct_vec_%03d.pkl')
        self.adjoint_vec_path = join(self.test_dir, 'adjoint_vec_%03d.pkl')
        self.direct_mode_path = join(self.test_dir, 'direct_mode_%03d.pkl')
        self.adjoint_mode_path = join(self.test_dir, 'adjoint_mode_%03d.pkl')

        # Specify system dimensions.  Test single inputs/outputs as well as
        # multiple inputs/outputs.  Also allow for more inputs/outputs than
        # states.
        self.num_states = 10
        self.num_inputs_list = [
            1,
            parallel.call_and_bcast(np.random.randint, 2, self.num_states + 2)]
        self.num_outputs_list = [
            1,
            parallel.call_and_bcast(np.random.randint, 2, self.num_states + 2)]

        # Specify how long to run impulse responses
        self.num_steps = self.num_states + 1

        parallel.barrier()
示例#2
0
    def _helper_get_impulse_response_handles(self, num_inputs, num_outputs):
        # Get state space system
        A, B, C = parallel.call_and_bcast(
            get_system_arrays, self.num_states, num_inputs, num_outputs)

        # Run impulse responses
        direct_vec_array = parallel.call_and_bcast(
            get_direct_impulse_response_array, A, B, self.num_steps)
        adjoint_vec_array = parallel.call_and_bcast(
            get_adjoint_impulse_response_array, A, C, self.num_steps,
            np.identity(self.num_states))

        # Save data to disk
        direct_vec_handles = [
            VecHandlePickle(self.direct_vec_path % i)
            for i in range(direct_vec_array.shape[1])]
        adjoint_vec_handles = [
            VecHandlePickle(self.adjoint_vec_path % i)
            for i in range(adjoint_vec_array.shape[1])]
        if parallel.is_rank_zero():
            for idx, handle in enumerate(direct_vec_handles):
                handle.put(direct_vec_array[:, idx])
            for idx, handle in enumerate(adjoint_vec_handles):
                handle.put(adjoint_vec_array[:, idx])

        parallel.barrier()
        return direct_vec_handles, adjoint_vec_handles
示例#3
0
    def setUp(self):
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'files_vectorspace_DELETE_ME'
        if not os.path.isdir(self.test_dir):
            parallel.call_from_rank_zero(os.mkdir, self.test_dir)

        self.max_vecs_per_proc = 10
        self.total_num_vecs_in_mem = (parallel.get_num_procs() *
                                      self.max_vecs_per_proc)

        self.vec_space = vspc.VectorSpaceHandles(inner_product=np.vdot,
                                                 verbosity=0)
        self.vec_space.max_vecs_per_proc = self.max_vecs_per_proc

        # Default data members; set verbosity to 0 even though default is 1
        # so messages won't print during tests
        self.default_data_members = {
            'inner_product':
            np.vdot,
            'max_vecs_per_node':
            10000,
            'max_vecs_per_proc':
            (10000 * parallel.get_num_nodes() // parallel.get_num_procs()),
            'verbosity':
            0,
            'print_interval':
            10,
            'prev_print_time':
            0.
        }
        parallel.barrier()
示例#4
0
    def test_compute_inner_product_mats(self):
        """Test computation of matrix of inner products."""
        num_row_vecs_list = [
            1,
            int(round(self.total_num_vecs_in_mem / 2.)),
            self.total_num_vecs_in_mem, self.total_num_vecs_in_mem * 2,
            parallel.get_num_procs() + 1
        ]
        num_col_vecs_list = num_row_vecs_list
        num_states = 6

        row_vec_path = join(self.test_dir, 'row_vec_%03d.txt')
        col_vec_path = join(self.test_dir, 'col_vec_%03d.txt')

        for num_row_vecs in num_row_vecs_list:
            for num_col_vecs in num_col_vecs_list:
                # generate vecs
                parallel.barrier()
                row_vec_array = parallel.call_and_bcast(
                    np.random.random, (num_states, num_row_vecs))
                col_vec_array = parallel.call_and_bcast(
                    np.random.random, (num_states, num_col_vecs))
                row_vec_handles = [
                    V.VecHandleArrayText(row_vec_path % i)
                    for i in range(num_row_vecs)
                ]
                col_vec_handles = [
                    V.VecHandleArrayText(col_vec_path % i)
                    for i in range(num_col_vecs)
                ]

                # Save vecs
                if parallel.is_rank_zero():
                    for i, h in enumerate(row_vec_handles):
                        h.put(row_vec_array[:, i])
                    for i, h in enumerate(col_vec_handles):
                        h.put(col_vec_array[:, i])
                parallel.barrier()

                # If number of rows/cols is 1, check case of passing a handle
                if len(row_vec_handles) == 1:
                    row_vec_handles = row_vec_handles[0]
                if len(col_vec_handles) == 1:
                    col_vec_handles = col_vec_handles[0]

                # Test IP computation.
                product_true = np.dot(row_vec_array.T, col_vec_array)
                product_computed = self.my_vec_ops.compute_inner_product_mat(
                    row_vec_handles, col_vec_handles)
                row_vecs = [row_vec_array[:, i] for i in range(num_row_vecs)]
                col_vecs = [col_vec_array[:, i] for i in range(num_col_vecs)]
                np.testing.assert_allclose(product_computed, product_true)

                # Test symm IP computation
                product_true = np.dot(row_vec_array.T, row_vec_array)
                product_computed = \
                    self.my_vec_ops.compute_symmetric_inner_product_mat(
                        row_vec_handles)
                row_vecs = [row_vec_array[:, i] for i in range(num_row_vecs)]
                np.testing.assert_allclose(product_computed, product_true)
示例#5
0
    def test_puts_gets(self):
        test_dir = 'DELETE_ME_test_files_pod'
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        if not os.path.isdir(test_dir):
            parallel.call_from_rank_zero(os.mkdir, test_dir)
        num_vecs = 10
        num_states = 30
        correlation_mat_true = parallel.call_and_bcast(np.random.random,
                                                       ((num_vecs, num_vecs)))
        eigvals_true = parallel.call_and_bcast(np.random.random, num_vecs)
        eigvecs_true = parallel.call_and_bcast(np.random.random,
                                               ((num_states, num_vecs)))

        my_POD = PODHandles(None, verbosity=0)
        my_POD.correlation_mat = correlation_mat_true
        my_POD.eigvals = eigvals_true
        my_POD.eigvecs = eigvecs_true

        eigvecs_path = join(test_dir, 'eigvecs.txt')
        eigvals_path = join(test_dir, 'eigvals.txt')
        correlation_mat_path = join(test_dir, 'correlation.txt')
        my_POD.put_decomp(eigvals_path, eigvecs_path)
        my_POD.put_correlation_mat(correlation_mat_path)
        parallel.barrier()

        POD_load = PODHandles(None, verbosity=0)
        POD_load.get_decomp(eigvals_path, eigvecs_path)
        correlation_mat_loaded = util.load_array_text(correlation_mat_path)

        np.testing.assert_allclose(correlation_mat_loaded,
                                   correlation_mat_true)
        np.testing.assert_allclose(POD_load.eigvals, eigvals_true)
        np.testing.assert_allclose(POD_load.eigvecs, eigvecs_true)
示例#6
0
    def setUp(self):
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')

        self.test_dir ='file_LTIGalerkinProj_DELETE_ME'
        if parallel.is_rank_zero() and not os.path.exists(self.test_dir):
            os.mkdir(self.test_dir)
        parallel.barrier()

        self.basis_vec_path = join(self.test_dir, 'basis_vec_%02d.txt')
        self.adjoint_basis_vec_path = join(
            self.test_dir, 'adjoint_basis_vec_%02d.txt')
        self.A_on_basis_vec_path = join(self.test_dir, 'A_on_mode_%02d.txt')
        self.B_on_basis_path = join(self.test_dir, 'B_on_basis_%02d.txt')
        self.C_on_basis_vec_path = join(self.test_dir, 'C_on_mode_%02d.txt')

        self.num_basis_vecs = 10
        self.num_adjoint_basis_vecs = 10
        self.num_states = 11
        self.num_inputs = 3
        self.num_outputs = 2

        self.generate_data_set(
            self.num_basis_vecs, self.num_adjoint_basis_vecs,
            self.num_states, self.num_inputs, self.num_outputs)

        self.LTI_proj = lgp.LTIGalerkinProjectionHandles(
            np.vdot, self.basis_vec_handles,
            adjoint_basis_vec_handles=self.adjoint_basis_vec_handles,
            is_basis_orthonormal=True, verbosity=0)
示例#7
0
    def generate_data_set(self, num_basis_vecs, num_adjoint_basis_vecs,
        num_states, num_inputs, num_outputs):
        """Generates random data, saves, and computes true reduced A,B,C."""
        self.basis_vecs = parallel.call_and_bcast(np.random.random,
            (num_states, num_basis_vecs))
        self.adjoint_basis_vecs = parallel.call_and_bcast(np.random.random,
            (num_states, num_adjoint_basis_vecs))
        self.A_array = parallel.call_and_bcast(np.random.random,
            (num_states, num_states))
        self.B_array = parallel.call_and_bcast(np.random.random,
            (num_states, num_inputs))
        self.C_array = parallel.call_and_bcast(np.random.random,
            (num_outputs, num_states))

        self.A_on_basis_vecs = np.dot(self.A_array, self.basis_vecs)
        self.B_on_standard_basis_array = self.B_array
        self.C_on_basis_vecs = self.C_array.dot(self.basis_vecs).squeeze()

        parallel.barrier()

        self.A_true = np.dot(self.adjoint_basis_vecs.T,
            np.dot(self.A_array, self.basis_vecs))
        self.B_true = np.dot(self.adjoint_basis_vecs.T, self.B_array)
        self.C_true = np.dot(self.C_array, self.basis_vecs)
        self.proj_mat = np.linalg.inv(np.dot(self.adjoint_basis_vecs.T,
            self.basis_vecs))
        self.A_true_nonorth = np.dot(self.proj_mat, self.A_true)
        self.B_true_nonorth = np.dot(self.proj_mat, self.B_true)
示例#8
0
    def setUp(self):
        # Specify output locations
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'files_POD_DELETE_ME'
        if not os.path.isdir(self.test_dir):
            parallel.call_from_rank_zero(os.mkdir, self.test_dir)
        self.vec_path = join(self.test_dir, 'vec_%03d.pkl')
        self.mode_path = join(self.test_dir, 'mode_%03d.pkl')

        # Specify data dimensions
        self.num_states = 30
        self.num_vecs = 10

        # Generate random data and write to disk using handles
        self.vecs_array = (
            parallel.call_and_bcast(
                np.random.random, (self.num_states, self.num_vecs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (self.num_states, self.num_vecs)))
        self.vec_handles = [
            VecHandlePickle(self.vec_path % i) for i in range(self.num_vecs)]
        for idx, hdl in enumerate(self.vec_handles):
            hdl.put(self.vecs_array[:, idx])

        parallel.barrier()
示例#9
0
    def setUp(self):
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')

        self.test_dir = 'file_LTIGalerkinProj_DELETE_ME'
        if parallel.is_rank_zero() and not os.path.exists(self.test_dir):
            os.mkdir(self.test_dir)
        parallel.barrier()

        self.basis_vec_path = join(self.test_dir, 'basis_vec_%02d.txt')
        self.adjoint_basis_vec_path = join(self.test_dir,
                                           'adjoint_basis_vec_%02d.txt')
        self.A_on_basis_vec_path = join(self.test_dir, 'A_on_mode_%02d.txt')
        self.B_on_basis_path = join(self.test_dir, 'B_on_basis_%02d.txt')
        self.C_on_basis_vec_path = join(self.test_dir, 'C_on_mode_%02d.txt')

        self.num_basis_vecs = 10
        self.num_adjoint_basis_vecs = 10
        self.num_states = 11
        self.num_inputs = 3
        self.num_outputs = 2

        self.generate_data_set(self.num_basis_vecs,
                               self.num_adjoint_basis_vecs, self.num_states,
                               self.num_inputs, self.num_outputs)

        self.LTI_proj = lgp.LTIGalerkinProjectionHandles(
            np.vdot,
            self.basis_vec_handles,
            adjoint_basis_vec_handles=self.adjoint_basis_vec_handles,
            is_basis_orthonormal=True,
            verbosity=0)
示例#10
0
    def setUp(self):
        # Specify output locations
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'files_POD_DELETE_ME'
        if not os.path.isdir(self.test_dir):
            parallel.call_from_rank_zero(os.mkdir, self.test_dir)
        self.vec_path = join(self.test_dir, 'vec_%03d.pkl')
        self.mode_path = join(self.test_dir, 'mode_%03d.pkl')

        # Specify data dimensions
        self.num_states = 30
        self.num_vecs = 10

        # Generate random data and write to disk using handles
        self.vecs_array = (
            parallel.call_and_bcast(np.random.random,
                                    (self.num_states, self.num_vecs)) +
            1j * parallel.call_and_bcast(np.random.random,
                                         (self.num_states, self.num_vecs)))
        self.vec_handles = [
            VecHandlePickle(self.vec_path % i) for i in range(self.num_vecs)
        ]
        for idx, hdl in enumerate(self.vec_handles):
            hdl.put(self.vecs_array[:, idx])

        parallel.barrier()
示例#11
0
    def _helper_get_impulse_response_handles(self, num_inputs, num_outputs):
        # Get state space system
        A, B, C = parallel.call_and_bcast(get_system_arrays, self.num_states,
                                          num_inputs, num_outputs)

        # Run impulse responses
        direct_vec_array = parallel.call_and_bcast(
            get_direct_impulse_response_array, A, B, self.num_steps)
        adjoint_vec_array = parallel.call_and_bcast(
            get_adjoint_impulse_response_array, A, C, self.num_steps,
            np.identity(self.num_states))

        # Save data to disk
        direct_vec_handles = [
            VecHandlePickle(self.direct_vec_path % i)
            for i in range(direct_vec_array.shape[1])
        ]
        adjoint_vec_handles = [
            VecHandlePickle(self.adjoint_vec_path % i)
            for i in range(adjoint_vec_array.shape[1])
        ]
        if parallel.is_rank_zero():
            for idx, handle in enumerate(direct_vec_handles):
                handle.put(direct_vec_array[:, idx])
            for idx, handle in enumerate(adjoint_vec_handles):
                handle.put(adjoint_vec_array[:, idx])

        parallel.barrier()
        return direct_vec_handles, adjoint_vec_handles
示例#12
0
    def setUp(self):
        # Specify output locations
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'files_BPOD_DELETE_ME'
        if not os.path.isdir(self.test_dir):
            parallel.call_from_rank_zero(os.mkdir, self.test_dir)
        self.direct_vec_path = join(self.test_dir, 'direct_vec_%03d.pkl')
        self.adjoint_vec_path = join(self.test_dir, 'adjoint_vec_%03d.pkl')
        self.direct_mode_path = join(self.test_dir, 'direct_mode_%03d.pkl')
        self.adjoint_mode_path = join(self.test_dir, 'adjoint_mode_%03d.pkl')

        # Specify system dimensions.  Test single inputs/outputs as well as
        # multiple inputs/outputs.  Also allow for more inputs/outputs than
        # states.
        self.num_states = 10
        self.num_inputs_list = [
            1,
            parallel.call_and_bcast(np.random.randint, 2, self.num_states + 2)
        ]
        self.num_outputs_list = [
            1,
            parallel.call_and_bcast(np.random.randint, 2, self.num_states + 2)
        ]

        # Specify how long to run impulse responses
        self.num_steps = self.num_states + 1

        parallel.barrier()
示例#13
0
    def setUp(self):
        self.test_dir = 'DELETE_ME_test_files_pod'
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        if not os.path.isdir(self.test_dir) and parallel.is_rank_zero():
            os.mkdir(self.test_dir)
        self.mode_indices = [2, 4, 3, 6]
        self.num_vecs = 10
        self.num_states = 30
        self.vec_array = parallel.call_and_bcast(
            np.random.random, (self.num_states, self.num_vecs))
        self.correlation_mat_true = self.vec_array.conj().transpose().dot(
            self.vec_array)

        self.eigvals_true, self.eigvecs_true = \
            parallel.call_and_bcast(util.eigh, self.correlation_mat_true)

        self.mode_array = np.dot(
            self.vec_array,
            np.dot(self.eigvecs_true, np.diag(self.eigvals_true**-0.5)))
        self.vec_path = join(self.test_dir, 'vec_%03d.txt')
        self.vec_handles = [
            V.VecHandleArrayText(self.vec_path % i)
            for i in range(self.num_vecs)
        ]
        for vec_index, handle in enumerate(self.vec_handles):
            handle.put(self.vec_array[:, vec_index])

        self.my_POD = PODHandles(np.vdot, verbosity=0)
        parallel.barrier()
示例#14
0
 def setUp(self):
     if not os.access('.', os.W_OK):
         raise RuntimeError('Cannot write to current directory')
     self.test_dir = 'files_LTIGalerkinProj_DELETE_ME'
     if parallel.is_rank_zero() and not os.path.exists(self.test_dir):
         os.mkdir(self.test_dir)
     parallel.barrier()
示例#15
0
 def setUp(self):
     if not os.access('.', os.W_OK):
         raise RuntimeError('Cannot write to current directory')
     self.test_dir ='files_LTIGalerkinProj_DELETE_ME'
     if parallel.is_rank_zero() and not os.path.exists(self.test_dir):
         os.mkdir(self.test_dir)
     parallel.barrier()
示例#16
0
 def test_tutorial_examples(self):
     """Runs all tutorial examples. If run without errors, passes test"""
     example_script = 'tutorial_ex%d.py'
     for example_num in range(1, 7):
         # Example 3 isn't meant to work in parallel
         if not (parallel.is_distributed() and example_num != 3):
             #printing(False)
             parallel.barrier()
             execfile(join(examples_dir, example_script%example_num))
             parallel.barrier()
示例#17
0
    def setUp(self):
        parallel.barrier()
        self.test_dir = join(running_dir, 'DELETE_ME_test_tutorial_examples')
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        if not os.path.isdir(self.test_dir) and parallel.is_rank_zero():
            os.mkdir(self.test_dir)
        parallel.barrier()

        os.chdir(self.test_dir)
示例#18
0
 def setUp(self):
     self.test_dir = 'files_vectors_DELETE_ME'
     if not os.access('.', os.W_OK):
         raise RuntimeError('Cannot write to current directory')
     if not os.path.isdir(self.test_dir) and parallel.is_rank_zero():
         os.mkdir(self.test_dir)
     parallel.barrier()
     self.mode_nums = [2, 4, 3, 6, 9, 8, 10, 11, 30]
     self.num_vecs = 40
     self.num_states = 100
     self.index_from = 2
示例#19
0
 def setUp(self):
     self.test_dir = 'files_vectors_DELETE_ME'
     if not os.access('.', os.W_OK):
         raise RuntimeError('Cannot write to current directory')
     if not os.path.isdir(self.test_dir) and parallel.is_rank_zero():
         os.mkdir(self.test_dir)
     parallel.barrier()
     self.mode_nums = [2, 4, 3, 6, 9, 8, 10, 11, 30]
     self.num_vecs = 40
     self.num_states = 100
     self.index_from = 2
示例#20
0
    def generate_data_set(
        self, num_basis_vecs, num_adjoint_basis_vecs,
        num_states, num_inputs, num_outputs):
        """Generates random data, saves, and computes true reduced A, B, C."""
        self.basis_vecs = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_basis_vecs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_basis_vecs)))
        self.adjoint_basis_vecs =(
            parallel.call_and_bcast(
                np.random.random, (num_states, num_basis_vecs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_basis_vecs)))
        self.A_array = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_states)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_states)))
        self.B_array = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_inputs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_inputs)))
        self.C_array = (
            parallel.call_and_bcast(
                np.random.random, (num_outputs, num_states)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_outputs, num_states)))

        self.A_on_basis_vecs = self.A_array.dot(self.basis_vecs)
        self.B_on_standard_basis_array = self.B_array
        self.C_on_basis_vecs = self.C_array.dot(self.basis_vecs).squeeze()

        parallel.barrier()

        self.A_true = self.adjoint_basis_vecs.conj().T.dot(
            self.A_array.dot(
                self.basis_vecs))
        self.B_true = self.adjoint_basis_vecs.conj().T.dot(self.B_array)
        self.C_true = self.C_array.dot(self.basis_vecs)
        self.proj_array = np.linalg.inv(
            self.adjoint_basis_vecs.conj().T.dot(self.basis_vecs))
        self.A_true_non_orth = self.proj_array.dot(self.A_true)
        self.B_true_non_orth = self.proj_array.dot(self.B_true)
示例#21
0
    def test_puts_gets(self):
        # Generate some random data
        correlation_array_true = parallel.call_and_bcast(
            np.random.random, ((self.num_vecs, self.num_vecs)))
        eigvals_true = parallel.call_and_bcast(
            np.random.random, self.num_vecs)
        eigvecs_true = parallel.call_and_bcast(
            np.random.random, ((self.num_states, self.num_vecs)))
        proj_coeffs_true = parallel.call_and_bcast(
            np.random.random, ((self.num_vecs, self.num_vecs)))

        # Create a POD object and store the data in it
        POD_save = pod.PODHandles(None, verbosity=0)
        POD_save.correlation_array = correlation_array_true
        POD_save.eigvals = eigvals_true
        POD_save.eigvecs = eigvecs_true
        POD_save.proj_coeffs = proj_coeffs_true

        # Write the data to disk
        eigvecs_path = join(self.test_dir, 'eigvecs.txt')
        eigvals_path = join(self.test_dir, 'eigvals.txt')
        correlation_array_path = join(self.test_dir, 'correlation.txt')
        proj_coeffs_path = join(self.test_dir, 'proj_coeffs.txt')
        POD_save.put_decomp(eigvals_path, eigvecs_path)
        POD_save.put_correlation_array(correlation_array_path)
        POD_save.put_proj_coeffs(proj_coeffs_path)
        parallel.barrier()

        # Create a new POD object and use it to load the data
        POD_load = pod.PODHandles(None, verbosity=0)
        POD_load.get_decomp(eigvals_path, eigvecs_path)
        POD_load.get_correlation_array(correlation_array_path)
        POD_load.get_proj_coeffs(proj_coeffs_path)

        # Check that the loaded data is correct
        np.testing.assert_equal(POD_load.eigvals, eigvals_true)
        np.testing.assert_equal(POD_load.eigvecs, eigvecs_true)
        np.testing.assert_equal(
            POD_load.correlation_array, correlation_array_true)
        np.testing.assert_equal(POD_load.proj_coeffs, proj_coeffs_true)
示例#22
0
    def test_puts_gets(self):
        """Test that put/get work in base class."""
        test_dir = 'DELETE_ME_test_files_bpod'
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        if not os.path.isdir(test_dir) and parallel.is_rank_zero():
            os.mkdir(test_dir)
        num_vecs = 10
        num_states = 30
        Hankel_mat_true = parallel.call_and_bcast(np.random.random,
                                                  ((num_vecs, num_vecs)))
        L_sing_vecs_true, sing_vals_true, R_sing_vecs_true = \
            parallel.call_and_bcast(util.svd, Hankel_mat_true)

        my_BPOD = BPODHandles(None, verbosity=0)
        my_BPOD.Hankel_mat = Hankel_mat_true
        my_BPOD.sing_vals = sing_vals_true
        my_BPOD.L_sing_vecs = L_sing_vecs_true
        my_BPOD.R_sing_vecs = R_sing_vecs_true

        L_sing_vecs_path = join(test_dir, 'L_sing_vecs.txt')
        R_sing_vecs_path = join(test_dir, 'R_sing_vecs.txt')
        sing_vals_path = join(test_dir, 'sing_vals.txt')
        Hankel_mat_path = join(test_dir, 'Hankel_mat.txt')
        my_BPOD.put_decomp(sing_vals_path, L_sing_vecs_path, R_sing_vecs_path)
        my_BPOD.put_Hankel_mat(Hankel_mat_path)
        parallel.barrier()

        BPOD_load = BPODHandles(None, verbosity=0)

        BPOD_load.get_decomp(sing_vals_path, L_sing_vecs_path,
                             R_sing_vecs_path)
        Hankel_mat_loaded = parallel.call_and_bcast(util.load_array_text,
                                                    Hankel_mat_path)

        np.testing.assert_allclose(Hankel_mat_loaded, Hankel_mat_true)
        np.testing.assert_allclose(BPOD_load.L_sing_vecs, L_sing_vecs_true)
        np.testing.assert_allclose(BPOD_load.R_sing_vecs, R_sing_vecs_true)
        np.testing.assert_allclose(BPOD_load.sing_vals, sing_vals_true)
示例#23
0
    def setUp(self):
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')
        self.test_dir = 'files_vectorspace_DELETE_ME'
        if not os.path.isdir(self.test_dir):
            parallel.call_from_rank_zero(os.mkdir, self.test_dir)

        self.max_vecs_per_proc = 10
        self.total_num_vecs_in_mem = (
            parallel.get_num_procs() * self.max_vecs_per_proc)

        self.vec_space = vspc.VectorSpaceHandles(
            inner_product=np.vdot, verbosity=0)
        self.vec_space.max_vecs_per_proc = self.max_vecs_per_proc

        # Default data members; set verbosity to 0 even though default is 1
        # so messages won't print during tests
        self.default_data_members = {
            'inner_product': np.vdot, 'max_vecs_per_node': 10000,
            'max_vecs_per_proc': (
                10000 * parallel.get_num_nodes() // parallel.get_num_procs()),
            'verbosity': 0, 'print_interval': 10, 'prev_print_time': 0.}
        parallel.barrier()
示例#24
0
# Create artificial sample times used as quadrature weights in POD
num_vecs = 100
quad_weights = np.logspace(1., 3., num=num_vecs)
base_vec_handle = mr.VecHandlePickle('%s/base_vec.pkl' % out_dir)
snapshots = [
    mr.VecHandlePickle(
        '%s/vec%d.pkl' % (out_dir, i),base_vec_handle=base_vec_handle,
        scale=quad_weights[i])
    for i in mr.range(num_vecs)]

# Save arbitrary snapshot data
num_elements = 2000
if parallel.is_rank_zero():
    for snap in snapshots + [base_vec_handle]:
        snap.put(np.random.random(num_elements))
parallel.barrier()

# Compute and save POD modes
my_POD = mr.PODHandles(np.vdot)
my_POD.compute_decomp(snapshots)
my_POD.put_decomp('%s/sing_vals.txt' % out_dir, '%s/sing_vecs.txt' % out_dir)
my_POD.put_correlation_array('%s/correlation_array.txt' % out_dir)
mode_indices = [1, 4, 5, 0, 10]
modes = [
    mr.VecHandleArrayText('%s/mode%d.txt' % (out_dir, i)) for i in mode_indices]
my_POD.compute_modes(mode_indices, modes)

# Check that modes are orthonormal
vec_space = mr.VectorSpaceHandles(inner_product=np.vdot)
IP_array = vec_space.compute_symm_inner_product_array(modes)
if not np.allclose(IP_array, np.eye(len(mode_indices))):
示例#25
0
nx = 80
ny = 100
x_grid = 1. - np.cos(np.linspace(0, np.pi, nx))
y_grid = np.linspace(0, 1., ny)**2
Y, X = np.meshgrid(y_grid, x_grid)

# Create random snapshot data
num_vecs = 100
snapshots = [
    mr.VecHandlePickle('%s/vec%d.pkl' % (out_dir, i))
    for i in mr.range(num_vecs)
]
if parallel.is_rank_zero():
    for i, snap in enumerate(snapshots):
        snap.put(np.sin(X * i) + np.cos(Y * i))
parallel.barrier()

# Calculate DMD modes and save them to pickle files
weighted_IP = mr.InnerProductTrapz(x_grid, y_grid)
my_DMD = mr.DMDHandles(inner_product=weighted_IP)
my_DMD.compute_decomp(snapshots)
my_DMD.put_decomp('%s/eigvals.txt' % out_dir,
                  '%s/R_low_order_eigvecs.txt' % out_dir,
                  '%s/L_low_order_eigvecs.txt' % out_dir,
                  '%s/correlation_array_eigvals.txt' % out_dir,
                  '%s/correlation_array_eigvecs.txt' % out_dir)
mode_indices = [1, 4, 5, 0, 10]
modes = [
    mr.VecHandlePickle('%s/mode%d.pkl' % (out_dir, i)) for i in mode_indices
]
my_DMD.compute_exact_modes(mode_indices, modes)
示例#26
0
 def tearDown(self):
     parallel.barrier()
     if parallel.is_rank_zero():
         rmtree(self.test_dir, ignore_errors=True)
     parallel.barrier()
示例#27
0
 def tearDown(self):
     parallel.barrier()
     parallel.call_from_rank_zero(rmtree, self.test_dir, ignore_errors=True)
     parallel.barrier()
示例#28
0
    def test_compute_inner_product_arrays(self):
        """Test computation of array of inner products."""
        rtol = 1e-10
        atol = 1e-12

        num_row_vecs_list = [
            1,
            int(round(self.total_num_vecs_in_mem / 2.)),
            self.total_num_vecs_in_mem,
            self.total_num_vecs_in_mem * 2,
            parallel.get_num_procs() + 1]
        num_col_vecs_list = num_row_vecs_list
        num_states = 6

        row_vec_path = join(self.test_dir, 'row_vec_%03d.pkl')
        col_vec_path = join(self.test_dir, 'col_vec_%03d.pkl')

        for num_row_vecs in num_row_vecs_list:
            for num_col_vecs in num_col_vecs_list:

                # Generate vecs
                parallel.barrier()
                row_vec_array = (
                    parallel.call_and_bcast(
                        np.random.random, (num_states, num_row_vecs))
                    + 1j * parallel.call_and_bcast(
                        np.random.random, (num_states, num_row_vecs)))
                col_vec_array = (
                    parallel.call_and_bcast(
                        np.random.random, (num_states, num_col_vecs))
                    + 1j * parallel.call_and_bcast(
                        np.random.random, (num_states, num_col_vecs)))
                row_vec_handles = [
                    VecHandlePickle(row_vec_path % i)
                    for i in range(num_row_vecs)]
                col_vec_handles = [
                    VecHandlePickle(col_vec_path % i)
                    for i in range(num_col_vecs)]

                # Save vecs
                if parallel.is_rank_zero():
                    for i, h in enumerate(row_vec_handles):
                        h.put(row_vec_array[:, i])
                    for i, h in enumerate(col_vec_handles):
                        h.put(col_vec_array[:, i])
                parallel.barrier()

                # If number of rows/cols is 1, check case of passing a handle
                if len(row_vec_handles) == 1:
                    row_vec_handles = row_vec_handles[0]
                if len(col_vec_handles) == 1:
                    col_vec_handles = col_vec_handles[0]

                # Test ip computation.
                product_true = np.dot(row_vec_array.conj().T, col_vec_array)
                product_computed = self.vec_space.compute_inner_product_array(
                    row_vec_handles, col_vec_handles)
                np.testing.assert_allclose(
                    product_computed, product_true, rtol=rtol, atol=atol)

                # Test symm ip computation
                product_true = np.dot(row_vec_array.conj().T, row_vec_array)
                product_computed =\
                    self.vec_space.compute_symm_inner_product_array(
                        row_vec_handles)
                np.testing.assert_allclose(
                    product_computed, product_true, rtol=rtol, atol=atol)
示例#29
0
    def setUp(self):
        if not os.access('.', os.W_OK):
            raise RuntimeError('Cannot write to current directory')

        self.test_dir = 'DELETE_ME_test_files_bpod'
        if not os.path.isdir(self.test_dir):
            parallel.call_from_rank_zero(os.mkdir, self.test_dir)

        self.mode_nums = [2, 3, 0]
        self.num_direct_vecs = 10
        self.num_adjoint_vecs = 12
        self.num_inputs = 1
        self.num_outputs = 1
        self.num_states = 20

        #A = np.mat(np.random.random((self.num_states, self.num_states)))
        A = np.mat(
            parallel.call_and_bcast(util.drss, self.num_states, 1, 1)[0])
        B = np.mat(
            parallel.call_and_bcast(np.random.random,
                                    (self.num_states, self.num_inputs)))
        C = np.mat(
            parallel.call_and_bcast(np.random.random,
                                    (self.num_outputs, self.num_states)))
        self.direct_vecs = [B]
        A_powers = np.identity(A.shape[0])
        for t in range(self.num_direct_vecs - 1):
            A_powers = A_powers.dot(A)
            self.direct_vecs.append(A_powers.dot(B))
        self.direct_vec_array = np.array(self.direct_vecs).squeeze().T

        A_adjoint = A.H
        C_adjoint = C.H
        A_adjoint_powers = np.identity(A_adjoint.shape[0])
        self.adjoint_vecs = [C_adjoint]
        for t in range(self.num_adjoint_vecs - 1):
            A_adjoint_powers = A_adjoint_powers.dot(A_adjoint)
            self.adjoint_vecs.append(A_adjoint_powers.dot(C_adjoint))
        self.adjoint_vec_array = np.array(self.adjoint_vecs).squeeze().T

        self.direct_vec_path = join(self.test_dir, 'direct_vec_%03d.txt')
        self.adjoint_vec_path = join(self.test_dir, 'adjoint_vec_%03d.txt')

        self.direct_vec_handles = [
            V.VecHandleArrayText(self.direct_vec_path % i)
            for i in range(self.num_direct_vecs)
        ]
        self.adjoint_vec_handles = [
            V.VecHandleArrayText(self.adjoint_vec_path % i)
            for i in range(self.num_adjoint_vecs)
        ]

        if parallel.is_rank_zero():
            for i, handle in enumerate(self.direct_vec_handles):
                handle.put(self.direct_vecs[i])
            for i, handle in enumerate(self.adjoint_vec_handles):
                handle.put(self.adjoint_vecs[i])

        self.Hankel_mat_true = np.dot(self.adjoint_vec_array.T,
                                      self.direct_vec_array)

        self.L_sing_vecs_true, self.sing_vals_true, self.R_sing_vecs_true = \
            parallel.call_and_bcast(util.svd, self.Hankel_mat_true, atol=1e-10)

        self.direct_mode_array = self.direct_vec_array * \
            np.mat(self.R_sing_vecs_true) * \
            np.mat(np.diag(self.sing_vals_true ** -0.5))
        self.adjoint_mode_array = self.adjoint_vec_array * \
            np.mat(self.L_sing_vecs_true) *\
            np.mat(np.diag(self.sing_vals_true ** -0.5))

        self.my_BPOD = BPODHandles(np.vdot, verbosity=0)
        parallel.barrier()
示例#30
0
    def generate_data_set(self, num_basis_vecs, num_adjoint_basis_vecs,
                          num_states, num_inputs, num_outputs):
        """Generates random data, saves, and computes true reduced A,B,C."""
        self.basis_vec_handles = [
            VecHandlePickle(self.basis_vec_path % i)
            for i in range(self.num_basis_vecs)
        ]
        self.adjoint_basis_vec_handles = [
            VecHandlePickle(self.adjoint_basis_vec_path % i)
            for i in range(self.num_adjoint_basis_vecs)
        ]
        self.A_on_basis_vec_handles = [
            VecHandlePickle(self.A_on_basis_vec_path % i)
            for i in range(self.num_basis_vecs)
        ]
        self.B_on_standard_basis_handles = [
            VecHandlePickle(self.B_on_basis_path % i)
            for i in range(self.num_inputs)
        ]
        self.C_on_basis_vec_handles = [
            VecHandlePickle(self.C_on_basis_vec_path % i)
            for i in range(self.num_basis_vecs)
        ]

        self.basis_vec_array = (
            parallel.call_and_bcast(np.random.random,
                                    (num_states, num_basis_vecs)) +
            1j * parallel.call_and_bcast(np.random.random,
                                         (num_states, num_basis_vecs)))
        self.adjoint_basis_vec_array = (
            parallel.call_and_bcast(np.random.random,
                                    (num_states, num_adjoint_basis_vecs)) +
            1j * parallel.call_and_bcast(np.random.random,
                                         (num_states, num_adjoint_basis_vecs)))
        self.A_array = (parallel.call_and_bcast(np.random.random,
                                                (num_states, num_states)) +
                        1j * parallel.call_and_bcast(np.random.random,
                                                     (num_states, num_states)))
        self.B_array = (parallel.call_and_bcast(np.random.random,
                                                (num_states, num_inputs)) +
                        1j * parallel.call_and_bcast(np.random.random,
                                                     (num_states, num_inputs)))
        self.C_array = (
            parallel.call_and_bcast(np.random.random,
                                    (num_outputs, num_states)) +
            1j * parallel.call_and_bcast(np.random.random,
                                         (num_outputs, num_states)))

        self.basis_vecs = [
            self.basis_vec_array[:, i].squeeze() for i in range(num_basis_vecs)
        ]
        self.adjoint_basis_vecs = [
            self.adjoint_basis_vec_array[:, i].squeeze()
            for i in range(num_adjoint_basis_vecs)
        ]
        self.A_on_basis_vecs = [
            self.A_array.dot(basis_vec).squeeze()
            for basis_vec in self.basis_vecs
        ]
        self.B_on_basis = [
            self.B_array[:, i].squeeze() for i in range(self.num_inputs)
        ]
        self.C_on_basis_vecs = [
            np.array(self.C_array.dot(basis_vec).squeeze(), ndmin=1)
            for basis_vec in self.basis_vecs
        ]

        if parallel.is_rank_zero():
            for handle, vec in zip(self.basis_vec_handles, self.basis_vecs):
                handle.put(vec)
            for handle, vec in zip(self.adjoint_basis_vec_handles,
                                   self.adjoint_basis_vecs):
                handle.put(vec)
            for handle, vec in zip(self.A_on_basis_vec_handles,
                                   self.A_on_basis_vecs):
                handle.put(vec)
            for handle, vec in zip(self.B_on_standard_basis_handles,
                                   self.B_on_basis):
                handle.put(vec)
            for handle, vec in zip(self.C_on_basis_vec_handles,
                                   self.C_on_basis_vecs):
                handle.put(vec)
        parallel.barrier()

        self.A_true = self.adjoint_basis_vec_array.conj().T.dot(
            self.A_array.dot(self.basis_vec_array))
        self.B_true = self.adjoint_basis_vec_array.conj().T.dot(self.B_array)
        self.C_true = self.C_array.dot(self.basis_vec_array)
        self.proj_array = np.linalg.inv(
            self.adjoint_basis_vec_array.conj().T.dot(self.basis_vec_array))
        self.A_true_non_orth = self.proj_array.dot(self.A_true)
        self.B_true_non_orth = self.proj_array.dot(self.B_true)
示例#31
0
    def generate_data_set(self, num_basis_vecs, num_adjoint_basis_vecs,
        num_states, num_inputs, num_outputs):
        """Generates random data, saves, and computes true reduced A,B,C."""
        self.basis_vec_handles = [
            VecHandlePickle(self.basis_vec_path % i)
            for i in range(self.num_basis_vecs)]
        self.adjoint_basis_vec_handles = [
            VecHandlePickle(self.adjoint_basis_vec_path % i)
            for i in range(self.num_adjoint_basis_vecs)]
        self.A_on_basis_vec_handles = [
            VecHandlePickle(self.A_on_basis_vec_path % i)
            for i in range(self.num_basis_vecs)]
        self.B_on_standard_basis_handles = [
            VecHandlePickle(self.B_on_basis_path % i)
            for i in range(self.num_inputs)]
        self.C_on_basis_vec_handles = [
            VecHandlePickle(self.C_on_basis_vec_path % i)
            for i in range(self.num_basis_vecs)]

        self.basis_vec_array = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_basis_vecs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_basis_vecs)))
        self.adjoint_basis_vec_array = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_adjoint_basis_vecs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_adjoint_basis_vecs)))
        self.A_array = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_states)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_states)))
        self.B_array = (
            parallel.call_and_bcast(
                np.random.random, (num_states, num_inputs)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_states, num_inputs)))
        self.C_array = (
            parallel.call_and_bcast(
                np.random.random, (num_outputs, num_states)) +
            1j * parallel.call_and_bcast(
                np.random.random, (num_outputs, num_states)))

        self.basis_vecs = [
            self.basis_vec_array[:, i].squeeze() for i in range(num_basis_vecs)]
        self.adjoint_basis_vecs = [
            self.adjoint_basis_vec_array[:, i].squeeze()
            for i in range(num_adjoint_basis_vecs)]
        self.A_on_basis_vecs = [
            self.A_array.dot(basis_vec).squeeze()
            for basis_vec in self.basis_vecs]
        self.B_on_basis = [
            self.B_array[:, i].squeeze() for i in range(self.num_inputs)]
        self.C_on_basis_vecs = [
            np.array(self.C_array.dot(basis_vec).squeeze(), ndmin=1)
            for basis_vec in self.basis_vecs]

        if parallel.is_rank_zero():
            for handle,vec in zip(self.basis_vec_handles, self.basis_vecs):
                handle.put(vec)
            for handle,vec in zip(
                self.adjoint_basis_vec_handles, self.adjoint_basis_vecs):
                handle.put(vec)
            for handle,vec in zip(
                self.A_on_basis_vec_handles, self.A_on_basis_vecs):
                handle.put(vec)
            for handle,vec in zip(
                self.B_on_standard_basis_handles, self.B_on_basis):
                handle.put(vec)
            for handle,vec in zip(
                self.C_on_basis_vec_handles, self.C_on_basis_vecs):
                handle.put(vec)
        parallel.barrier()

        self.A_true = self.adjoint_basis_vec_array.conj().T.dot(
            self.A_array.dot(self.basis_vec_array))
        self.B_true = self.adjoint_basis_vec_array.conj().T.dot(self.B_array)
        self.C_true = self.C_array.dot(self.basis_vec_array)
        self.proj_array = np.linalg.inv(
            self.adjoint_basis_vec_array.conj().T.dot(self.basis_vec_array))
        self.A_true_non_orth = self.proj_array.dot(self.A_true)
        self.B_true_non_orth = self.proj_array.dot(self.B_true)
示例#32
0
 def tearDown(self):
     parallel.barrier()
     if parallel.is_rank_zero():
         rmtree(self.test_dir, ignore_errors=True)
     parallel.barrier()
示例#33
0
 def tearDown(self):
     parallel.barrier()
     parallel.call_from_rank_zero(rmtree, self.test_dir, ignore_errors=True)
     parallel.barrier()
示例#34
0
    def test_lin_combine(self):
        # Set test tolerances
        rtol = 1e-10
        atol = 1e-12

        # Setup
        mode_path = join(self.test_dir, 'mode_%03d.pkl')
        vec_path = join(self.test_dir, 'vec_%03d.pkl')

        # Test cases where number of modes:
        #   less, equal, more than num_states
        #   less, equal, more than num_vecs
        #   less, equal, more than total_num_vecs_in_mem
        # Also check the case of passing a None value to the mode_indices
        # argument.
        num_states = 20
        num_vecs_list = [1, 15, 40]
        num_modes_list = [
            None, 1, 8, 10, 20, 25, 45,
            int(np.ceil(self.total_num_vecs_in_mem / 2.)),
            self.total_num_vecs_in_mem, self.total_num_vecs_in_mem * 2]

        # Check for correct computations
        for num_vecs in num_vecs_list:
            for num_modes in num_modes_list:
                for squeeze in [True, False]:

                    # Generate data and then broadcast to all procs
                    vec_handles = [
                        VecHandlePickle(vec_path % i)
                        for i in range(num_vecs)]
                    vec_array, coeff_array, true_modes =\
                        parallel.call_and_bcast(
                            self.generate_vecs_modes, num_states, num_vecs,
                            num_modes=num_modes, squeeze=squeeze)
                    if parallel.is_rank_zero():
                        for vec_index, vec_handle in enumerate(vec_handles):
                            vec_handle.put(vec_array[:, vec_index])
                    parallel.barrier()

                    # Choose which modes to compute
                    if num_modes is None:
                        mode_idxs_arg = None
                        mode_idxs_vals = range(true_modes.shape[1])
                    elif num_modes == 1:
                        mode_idxs_arg = 0
                        mode_idxs_vals = [0]
                    else:
                        mode_idxs_arg = np.unique(
                            parallel.call_and_bcast(
                                np.random.randint, 0, high=num_modes,
                                size=num_modes // 2))
                        mode_idxs_vals = mode_idxs_arg
                    mode_handles = [
                        VecHandlePickle(mode_path % mode_num)
                        for mode_num in mode_idxs_vals]

                    # Saves modes to files
                    self.vec_space.lin_combine(
                        mode_handles, vec_handles, coeff_array,
                        coeff_array_col_indices=mode_idxs_arg)

                    # Test modes one by one
                    for mode_idx in mode_idxs_vals:
                        computed_mode = VecHandlePickle(
                            mode_path % mode_idx).get()
                        np.testing.assert_allclose(
                            computed_mode, true_modes[:, mode_idx],
                            rtol=rtol, atol=atol)
                    parallel.barrier()

                parallel.barrier()

            parallel.barrier()

        # Test that errors are caught for mismatched dimensions
        mode_handles = [
            VecHandlePickle(mode_path % i) for i in range(10)]
        vec_handles = [
            VecHandlePickle(vec_path % i) for i in range(15)]
        coeffs_array_too_short = np.zeros(
            (len(vec_handles) - 1, len(mode_handles)))
        coeffs_array_too_fat = np.zeros(
            (len(vec_handles), len(mode_handles) + 1))
        index_list_too_long = range(len(mode_handles) + 1)
        self.assertRaises(
            ValueError, self.vec_space.lin_combine, mode_handles, vec_handles,
            coeffs_array_too_short)
        self.assertRaises(
            ValueError, self.vec_space.lin_combine, mode_handles, vec_handles,
            coeffs_array_too_fat)
示例#35
0
    def test_compute_inner_product_arrays(self):
        """Test computation of array of inner products."""
        rtol = 1e-10
        atol = 1e-12

        num_row_vecs_list = [
            1,
            int(round(self.total_num_vecs_in_mem / 2.)),
            self.total_num_vecs_in_mem,
            self.total_num_vecs_in_mem * 2,
            parallel.get_num_procs() + 1]
        num_col_vecs_list = num_row_vecs_list
        num_states = 6

        row_vec_path = join(self.test_dir, 'row_vec_%03d.pkl')
        col_vec_path = join(self.test_dir, 'col_vec_%03d.pkl')

        for num_row_vecs in num_row_vecs_list:
            for num_col_vecs in num_col_vecs_list:

                # Generate vecs
                parallel.barrier()
                row_vec_array = (
                    parallel.call_and_bcast(
                        np.random.random, (num_states, num_row_vecs))
                    + 1j * parallel.call_and_bcast(
                        np.random.random, (num_states, num_row_vecs)))
                col_vec_array = (
                    parallel.call_and_bcast(
                        np.random.random, (num_states, num_col_vecs))
                    + 1j * parallel.call_and_bcast(
                        np.random.random, (num_states, num_col_vecs)))
                row_vec_handles = [
                    VecHandlePickle(row_vec_path % i)
                    for i in range(num_row_vecs)]
                col_vec_handles = [
                    VecHandlePickle(col_vec_path % i)
                    for i in range(num_col_vecs)]

                # Save vecs
                if parallel.is_rank_zero():
                    for i, h in enumerate(row_vec_handles):
                        h.put(row_vec_array[:, i])
                    for i, h in enumerate(col_vec_handles):
                        h.put(col_vec_array[:, i])
                parallel.barrier()

                # If number of rows/cols is 1, check case of passing a handle
                if len(row_vec_handles) == 1:
                    row_vec_handles = row_vec_handles[0]
                if len(col_vec_handles) == 1:
                    col_vec_handles = col_vec_handles[0]

                # Test ip computation.
                product_true = np.dot(row_vec_array.conj().T, col_vec_array)
                product_computed = self.vec_space.compute_inner_product_array(
                    row_vec_handles, col_vec_handles)
                np.testing.assert_allclose(
                    product_computed, product_true, rtol=rtol, atol=atol)

                # Test symm ip computation
                product_true = np.dot(row_vec_array.conj().T, row_vec_array)
                product_computed =\
                    self.vec_space.compute_symm_inner_product_array(
                        row_vec_handles)
                np.testing.assert_allclose(
                    product_computed, product_true, rtol=rtol, atol=atol)
示例#36
0
    def test_lin_combine(self):
        # Set test tolerances
        rtol = 1e-10
        atol = 1e-12

        # Setup
        mode_path = join(self.test_dir, 'mode_%03d.pkl')
        vec_path = join(self.test_dir, 'vec_%03d.pkl')

        # Test cases where number of modes:
        #   less, equal, more than num_states
        #   less, equal, more than num_vecs
        #   less, equal, more than total_num_vecs_in_mem
        # Also check the case of passing a None value to the mode_indices
        # argument.
        num_states = 20
        num_vecs_list = [1, 15, 40]
        num_modes_list = [
            None, 1, 8, 10, 20, 25, 45,
            int(np.ceil(self.total_num_vecs_in_mem / 2.)),
            self.total_num_vecs_in_mem, self.total_num_vecs_in_mem * 2]

        # Check for correct computations
        for num_vecs in num_vecs_list:
            for num_modes in num_modes_list:
                for squeeze in [True, False]:

                    # Generate data and then broadcast to all procs
                    vec_handles = [
                        VecHandlePickle(vec_path % i)
                        for i in range(num_vecs)]
                    vec_array, coeff_array, true_modes =\
                        parallel.call_and_bcast(
                            self.generate_vecs_modes, num_states, num_vecs,
                            num_modes=num_modes, squeeze=squeeze)
                    if parallel.is_rank_zero():
                        for vec_index, vec_handle in enumerate(vec_handles):
                            vec_handle.put(vec_array[:, vec_index])
                    parallel.barrier()

                    # Choose which modes to compute
                    if num_modes is None:
                        mode_idxs_arg = None
                        mode_idxs_vals = range(true_modes.shape[1])
                    elif num_modes == 1:
                        mode_idxs_arg = 0
                        mode_idxs_vals = [0]
                    else:
                        mode_idxs_arg = np.unique(
                            parallel.call_and_bcast(
                                np.random.randint, 0, high=num_modes,
                                size=num_modes // 2))
                        mode_idxs_vals = mode_idxs_arg
                    mode_handles = [
                        VecHandlePickle(mode_path % mode_num)
                        for mode_num in mode_idxs_vals]

                    # Saves modes to files
                    self.vec_space.lin_combine(
                        mode_handles, vec_handles, coeff_array,
                        coeff_array_col_indices=mode_idxs_arg)

                    # Test modes one by one
                    for mode_idx in mode_idxs_vals:
                        computed_mode = VecHandlePickle(
                            mode_path % mode_idx).get()
                        np.testing.assert_allclose(
                            computed_mode, true_modes[:, mode_idx],
                            rtol=rtol, atol=atol)
                    parallel.barrier()

                parallel.barrier()

            parallel.barrier()

        # Test that errors are caught for mismatched dimensions
        mode_handles = [
            VecHandlePickle(mode_path % i) for i in range(10)]
        vec_handles = [
            VecHandlePickle(vec_path % i) for i in range(15)]
        coeffs_array_too_short = np.zeros(
            (len(vec_handles) - 1, len(mode_handles)))
        coeffs_array_too_fat = np.zeros(
            (len(vec_handles), len(mode_handles) + 1))
        index_list_too_long = range(len(mode_handles) + 1)
        self.assertRaises(
            ValueError, self.vec_space.lin_combine, mode_handles, vec_handles,
            coeffs_array_too_short)
        self.assertRaises(
            ValueError, self.vec_space.lin_combine, mode_handles, vec_handles,
            coeffs_array_too_fat)
示例#37
0
    def test_lin_combine(self):
        num_vecs_list = [1, 15, 40]
        num_states = 20
        # Test cases where number of modes:
        #   less, equal, more than num_states
        #   less, equal, more than num_vecs
        #   less, equal, more than total_num_vecs_in_mem
        num_modes_list = [1, 8, 10, 20, 25, 45, \
            int(np.ceil(self.total_num_vecs_in_mem / 2.)),\
            self.total_num_vecs_in_mem, self.total_num_vecs_in_mem * 2]
        mode_path = join(self.test_dir, 'mode_%03d.txt')
        vec_path = join(self.test_dir, 'vec_%03d.txt')

        for num_vecs in num_vecs_list:
            for num_modes in num_modes_list:
                #generate data and then broadcast to all procs
                #print '----- new case ----- '
                #print 'num_vecs =',num_vecs
                #print 'num_states =',num_states
                #print 'num_modes =',num_modes
                #print 'max_vecs_per_node =',max_vecs_per_node
                #print 'index_from =',index_from
                vec_handles = [
                    V.VecHandleArrayText(vec_path % i) for i in range(num_vecs)
                ]
                vec_array, mode_indices, build_coeff_mat, true_modes = \
                    parallel.call_and_bcast(self.generate_vecs_modes,
                    num_states, num_vecs, num_modes)

                if parallel.is_rank_zero():
                    for vec_index, vec_handle in enumerate(vec_handles):
                        vec_handle.put(vec_array[:, vec_index])
                parallel.barrier()
                mode_handles = [
                    V.VecHandleArrayText(mode_path % mode_num)
                    for mode_num in mode_indices
                ]

                # If there are more vecs than mat has rows
                build_coeff_mat_too_small = \
                    np.zeros((build_coeff_mat.shape[0]-1,
                        build_coeff_mat.shape[1]))
                self.assertRaises(ValueError, self.my_vec_ops.\
                    lin_combine, mode_handles,
                    vec_handles, build_coeff_mat_too_small, mode_indices)

                # Test the case that only one mode is desired,
                # in which case user might pass in an int
                if len(mode_indices) == 1:
                    mode_indices = mode_indices[0]
                    mode_handles = mode_handles[0]

                # Saves modes to files
                self.my_vec_ops.lin_combine(mode_handles, vec_handles,
                                            build_coeff_mat, mode_indices)

                # Change back to list so is iterable
                if not isinstance(mode_indices, list):
                    mode_indices = [mode_indices]

                parallel.barrier()
                #print 'mode_indices',mode_indices
                for mode_index in mode_indices:
                    computed_mode = V.VecHandleArrayText(mode_path %
                                                         mode_index).get()
                    #print 'mode number',mode_num
                    #print 'true mode',true_modes[:,\
                    #    mode_num-index_from]
                    #print 'computed mode',computed_mode
                    np.testing.assert_allclose(computed_mode,
                                               true_modes[:, mode_index])

                parallel.barrier()

        parallel.barrier()