def _helper_get_impulse_response_handles(self, num_inputs, num_outputs): # Get state space system A, B, C = parallel.call_and_bcast(get_system_arrays, self.num_states, num_inputs, num_outputs) # Run impulse responses direct_vec_array = parallel.call_and_bcast( get_direct_impulse_response_array, A, B, self.num_steps) adjoint_vec_array = parallel.call_and_bcast( get_adjoint_impulse_response_array, A, C, self.num_steps, np.identity(self.num_states)) # Save data to disk direct_vec_handles = [ VecHandlePickle(self.direct_vec_path % i) for i in range(direct_vec_array.shape[1]) ] adjoint_vec_handles = [ VecHandlePickle(self.adjoint_vec_path % i) for i in range(adjoint_vec_array.shape[1]) ] if parallel.is_rank_zero(): for idx, handle in enumerate(direct_vec_handles): handle.put(direct_vec_array[:, idx]) for idx, handle in enumerate(adjoint_vec_handles): handle.put(adjoint_vec_array[:, idx]) parallel.barrier() return direct_vec_handles, adjoint_vec_handles
def test_compute_proj_coeffs(self): rtol = 1e-10 atol = 1e-12 # Compute POD using modred. (The properties defining a projection onto # POD modes require manipulations involving the correct decomposition # and modes, so we cannot isolate the mode computation from those # computations.) POD = pod.PODHandles(inner_product=np.vdot, verbosity=0) POD.compute_decomp(self.vec_handles) mode_idxs = range(POD.eigvals.size) mode_handles = [VecHandlePickle(self.mode_path % i) for i in mode_idxs] POD.compute_modes(mode_idxs, mode_handles, vec_handles=self.vec_handles) # Compute true projection coefficients by computing the inner products # between modes and snapshots. proj_coeffs_true = POD.vec_space.compute_inner_product_array( mode_handles, self.vec_handles) # Compute projection coefficients using POD object, which avoids # actually manipulating handles and computing their inner products, # instead using elements of the decomposition for a more efficient # computations. proj_coeffs = POD.compute_proj_coeffs() # Test values np.testing.assert_allclose(proj_coeffs, proj_coeffs_true, rtol=rtol, atol=atol)
def setUp(self): # Specify output locations if not os.access('.', os.W_OK): raise RuntimeError('Cannot write to current directory') self.test_dir = 'files_POD_DELETE_ME' if not os.path.isdir(self.test_dir): parallel.call_from_rank_zero(os.mkdir, self.test_dir) self.vec_path = join(self.test_dir, 'vec_%03d.pkl') self.mode_path = join(self.test_dir, 'mode_%03d.pkl') # Specify data dimensions self.num_states = 30 self.num_vecs = 10 # Generate random data and write to disk using handles self.vecs_array = ( parallel.call_and_bcast(np.random.random, (self.num_states, self.num_vecs)) + 1j * parallel.call_and_bcast(np.random.random, (self.num_states, self.num_vecs))) self.vec_handles = [ VecHandlePickle(self.vec_path % i) for i in range(self.num_vecs) ] for idx, hdl in enumerate(self.vec_handles): hdl.put(self.vecs_array[:, idx]) parallel.barrier()
def test_compute_modes(self): rtol = 1e-10 atol = 1e-12 # Compute POD using modred. (The properties defining a POD mode require # manipulations involving the correct decomposition, so we cannot # isolate the mode computation from the decomposition step.) POD = pod.PODHandles(np.vdot, verbosity=0) POD.compute_decomp(self.vec_handles) # Select a subset of modes to compute. Compute at least half # the modes, and up to all of them. Make sure to use unique # values. (This may reduce the number of modes computed.) num_modes = parallel.call_and_bcast( np.random.randint, POD.eigvals.size // 2, POD.eigvals.size + 1) mode_idxs = np.unique(parallel.call_and_bcast( np.random.randint, 0, POD.eigvals.size, num_modes)) # Create handles for the modes mode_handles = [VecHandlePickle(self.mode_path % i) for i in mode_idxs] # Compute modes POD.compute_modes(mode_idxs, mode_handles, vec_handles=self.vec_handles) # Test modes np.testing.assert_allclose( POD.vec_space.compute_inner_product_array( mode_handles, self.vec_handles).dot( POD.vec_space.compute_inner_product_array( self.vec_handles, mode_handles)), np.diag(POD.eigvals[mode_idxs]), rtol=rtol, atol=atol)
def test_compute_inner_product_array_types(self): num_row_vecs = 4 num_col_vecs = 6 num_states = 7 row_vec_path = join(self.test_dir, 'row_vec_%03d.pkl') col_vec_path = join(self.test_dir, 'col_vec_%03d.pkl') # Check complex and real data for is_complex in [True, False]: # Generate data row_vec_array = np.random.random((num_states, num_row_vecs)) col_vec_array = np.random.random((num_states, num_col_vecs)) if is_complex: row_vec_array = row_vec_array * ( 1j * np.random.random((num_states, num_row_vecs))) col_vec_array = col_vec_array * ( 1j * np.random.random((num_states, num_col_vecs))) # Generate handles and save to file row_vec_paths = [row_vec_path % i for i in range(num_row_vecs)] col_vec_paths = [col_vec_path % i for i in range(num_col_vecs)] row_vec_handles = [ VecHandlePickle(path) for path in row_vec_paths] col_vec_handles = [ VecHandlePickle(path) for path in col_vec_paths] for idx, handle in enumerate(row_vec_handles): handle.put(row_vec_array[:, idx]) for idx, handle in enumerate(col_vec_handles): handle.put(col_vec_array[:, idx]) # Compute inner product array and check type inner_product_array = self.vec_space.compute_inner_product_array( row_vec_handles, col_vec_handles) symm_inner_product_array =\ self.vec_space.compute_symm_inner_product_array( row_vec_handles) self.assertEqual(inner_product_array.dtype, row_vec_array.dtype) self.assertEqual( symm_inner_product_array.dtype, row_vec_array.dtype)
def test_derivs(self): """Test can take derivs""" dt = 0.1 true_derivs = [] num_vecs = len(self.basis_vec_handles) for i in range(num_vecs): true_derivs.append( (self.A_on_basis_vec_handles[i].get() - self.basis_vec_handles[i].get()).squeeze() / dt) deriv_handles = [ VecHandlePickle(join(self.test_dir, 'deriv_test%d' % i)) for i in range(num_vecs) ] lgp.compute_derivs_handles(self.basis_vec_handles, self.A_on_basis_vec_handles, deriv_handles, dt) derivs_loaded = [v.get() for v in deriv_handles] derivs_loaded = list(map(np.squeeze, derivs_loaded)) list(map(np.testing.assert_allclose, derivs_loaded, true_derivs))
def generate_data_set(self, num_basis_vecs, num_adjoint_basis_vecs, num_states, num_inputs, num_outputs): """Generates random data, saves, and computes true reduced A,B,C.""" self.basis_vec_handles = [ VecHandlePickle(self.basis_vec_path % i) for i in range(self.num_basis_vecs) ] self.adjoint_basis_vec_handles = [ VecHandlePickle(self.adjoint_basis_vec_path % i) for i in range(self.num_adjoint_basis_vecs) ] self.A_on_basis_vec_handles = [ VecHandlePickle(self.A_on_basis_vec_path % i) for i in range(self.num_basis_vecs) ] self.B_on_standard_basis_handles = [ VecHandlePickle(self.B_on_basis_path % i) for i in range(self.num_inputs) ] self.C_on_basis_vec_handles = [ VecHandlePickle(self.C_on_basis_vec_path % i) for i in range(self.num_basis_vecs) ] self.basis_vec_array = ( parallel.call_and_bcast(np.random.random, (num_states, num_basis_vecs)) + 1j * parallel.call_and_bcast(np.random.random, (num_states, num_basis_vecs))) self.adjoint_basis_vec_array = ( parallel.call_and_bcast(np.random.random, (num_states, num_adjoint_basis_vecs)) + 1j * parallel.call_and_bcast(np.random.random, (num_states, num_adjoint_basis_vecs))) self.A_array = (parallel.call_and_bcast(np.random.random, (num_states, num_states)) + 1j * parallel.call_and_bcast(np.random.random, (num_states, num_states))) self.B_array = (parallel.call_and_bcast(np.random.random, (num_states, num_inputs)) + 1j * parallel.call_and_bcast(np.random.random, (num_states, num_inputs))) self.C_array = ( parallel.call_and_bcast(np.random.random, (num_outputs, num_states)) + 1j * parallel.call_and_bcast(np.random.random, (num_outputs, num_states))) self.basis_vecs = [ self.basis_vec_array[:, i].squeeze() for i in range(num_basis_vecs) ] self.adjoint_basis_vecs = [ self.adjoint_basis_vec_array[:, i].squeeze() for i in range(num_adjoint_basis_vecs) ] self.A_on_basis_vecs = [ self.A_array.dot(basis_vec).squeeze() for basis_vec in self.basis_vecs ] self.B_on_basis = [ self.B_array[:, i].squeeze() for i in range(self.num_inputs) ] self.C_on_basis_vecs = [ np.array(self.C_array.dot(basis_vec).squeeze(), ndmin=1) for basis_vec in self.basis_vecs ] if parallel.is_rank_zero(): for handle, vec in zip(self.basis_vec_handles, self.basis_vecs): handle.put(vec) for handle, vec in zip(self.adjoint_basis_vec_handles, self.adjoint_basis_vecs): handle.put(vec) for handle, vec in zip(self.A_on_basis_vec_handles, self.A_on_basis_vecs): handle.put(vec) for handle, vec in zip(self.B_on_standard_basis_handles, self.B_on_basis): handle.put(vec) for handle, vec in zip(self.C_on_basis_vec_handles, self.C_on_basis_vecs): handle.put(vec) parallel.barrier() self.A_true = self.adjoint_basis_vec_array.conj().T.dot( self.A_array.dot(self.basis_vec_array)) self.B_true = self.adjoint_basis_vec_array.conj().T.dot(self.B_array) self.C_true = self.C_array.dot(self.basis_vec_array) self.proj_array = np.linalg.inv( self.adjoint_basis_vec_array.conj().T.dot(self.basis_vec_array)) self.A_true_non_orth = self.proj_array.dot(self.A_true) self.B_true_non_orth = self.proj_array.dot(self.B_true)
def test_compute_inner_product_arrays(self): """Test computation of array of inner products.""" rtol = 1e-10 atol = 1e-12 num_row_vecs_list = [ 1, int(round(self.total_num_vecs_in_mem / 2.)), self.total_num_vecs_in_mem, self.total_num_vecs_in_mem * 2, parallel.get_num_procs() + 1] num_col_vecs_list = num_row_vecs_list num_states = 6 row_vec_path = join(self.test_dir, 'row_vec_%03d.pkl') col_vec_path = join(self.test_dir, 'col_vec_%03d.pkl') for num_row_vecs in num_row_vecs_list: for num_col_vecs in num_col_vecs_list: # Generate vecs parallel.barrier() row_vec_array = ( parallel.call_and_bcast( np.random.random, (num_states, num_row_vecs)) + 1j * parallel.call_and_bcast( np.random.random, (num_states, num_row_vecs))) col_vec_array = ( parallel.call_and_bcast( np.random.random, (num_states, num_col_vecs)) + 1j * parallel.call_and_bcast( np.random.random, (num_states, num_col_vecs))) row_vec_handles = [ VecHandlePickle(row_vec_path % i) for i in range(num_row_vecs)] col_vec_handles = [ VecHandlePickle(col_vec_path % i) for i in range(num_col_vecs)] # Save vecs if parallel.is_rank_zero(): for i, h in enumerate(row_vec_handles): h.put(row_vec_array[:, i]) for i, h in enumerate(col_vec_handles): h.put(col_vec_array[:, i]) parallel.barrier() # If number of rows/cols is 1, check case of passing a handle if len(row_vec_handles) == 1: row_vec_handles = row_vec_handles[0] if len(col_vec_handles) == 1: col_vec_handles = col_vec_handles[0] # Test ip computation. product_true = np.dot(row_vec_array.conj().T, col_vec_array) product_computed = self.vec_space.compute_inner_product_array( row_vec_handles, col_vec_handles) np.testing.assert_allclose( product_computed, product_true, rtol=rtol, atol=atol) # Test symm ip computation product_true = np.dot(row_vec_array.conj().T, row_vec_array) product_computed =\ self.vec_space.compute_symm_inner_product_array( row_vec_handles) np.testing.assert_allclose( product_computed, product_true, rtol=rtol, atol=atol)
def test_lin_combine(self): # Set test tolerances rtol = 1e-10 atol = 1e-12 # Setup mode_path = join(self.test_dir, 'mode_%03d.pkl') vec_path = join(self.test_dir, 'vec_%03d.pkl') # Test cases where number of modes: # less, equal, more than num_states # less, equal, more than num_vecs # less, equal, more than total_num_vecs_in_mem # Also check the case of passing a None value to the mode_indices # argument. num_states = 20 num_vecs_list = [1, 15, 40] num_modes_list = [ None, 1, 8, 10, 20, 25, 45, int(np.ceil(self.total_num_vecs_in_mem / 2.)), self.total_num_vecs_in_mem, self.total_num_vecs_in_mem * 2] # Check for correct computations for num_vecs in num_vecs_list: for num_modes in num_modes_list: for squeeze in [True, False]: # Generate data and then broadcast to all procs vec_handles = [ VecHandlePickle(vec_path % i) for i in range(num_vecs)] vec_array, coeff_array, true_modes =\ parallel.call_and_bcast( self.generate_vecs_modes, num_states, num_vecs, num_modes=num_modes, squeeze=squeeze) if parallel.is_rank_zero(): for vec_index, vec_handle in enumerate(vec_handles): vec_handle.put(vec_array[:, vec_index]) parallel.barrier() # Choose which modes to compute if num_modes is None: mode_idxs_arg = None mode_idxs_vals = range(true_modes.shape[1]) elif num_modes == 1: mode_idxs_arg = 0 mode_idxs_vals = [0] else: mode_idxs_arg = np.unique( parallel.call_and_bcast( np.random.randint, 0, high=num_modes, size=num_modes // 2)) mode_idxs_vals = mode_idxs_arg mode_handles = [ VecHandlePickle(mode_path % mode_num) for mode_num in mode_idxs_vals] # Saves modes to files self.vec_space.lin_combine( mode_handles, vec_handles, coeff_array, coeff_array_col_indices=mode_idxs_arg) # Test modes one by one for mode_idx in mode_idxs_vals: computed_mode = VecHandlePickle( mode_path % mode_idx).get() np.testing.assert_allclose( computed_mode, true_modes[:, mode_idx], rtol=rtol, atol=atol) parallel.barrier() parallel.barrier() parallel.barrier() # Test that errors are caught for mismatched dimensions mode_handles = [ VecHandlePickle(mode_path % i) for i in range(10)] vec_handles = [ VecHandlePickle(vec_path % i) for i in range(15)] coeffs_array_too_short = np.zeros( (len(vec_handles) - 1, len(mode_handles))) coeffs_array_too_fat = np.zeros( (len(vec_handles), len(mode_handles) + 1)) index_list_too_long = range(len(mode_handles) + 1) self.assertRaises( ValueError, self.vec_space.lin_combine, mode_handles, vec_handles, coeffs_array_too_short) self.assertRaises( ValueError, self.vec_space.lin_combine, mode_handles, vec_handles, coeffs_array_too_fat)
def test_compute_proj_coeffs(self): # Set test tolerances. Use a slightly more relaxed absolute tolerance # here because the projection test uses modes that may correspond to # smaller Hankel singular values (i.e., less controllable/unobservable # states). Those mode pairs are not as close to biorthogonal, so a more # relaxed tolerance is required. rtol = 1e-8 atol = 1e-8 # Test a single input/output as well as multiple inputs/outputs. Allow # for more inputs/outputs than states. (This is determined in setUp()). for num_inputs in self.num_inputs_list: for num_outputs in self.num_outputs_list: # Get impulse response data direct_vec_handles, adjoint_vec_handles =\ self._helper_get_impulse_response_handles( num_inputs, num_outputs) # Create BPOD object and compute decomposition, modes. (The # properties defining a projection onto BPOD modes require # manipulations involving the correct decomposition and modes, # so we cannot isolate the projection step from those # computations.) Use relative tolerance to avoid Hankel # singular values which may correspond to very # uncontrollable/unobservable states. It is ok to use a # more relaxed tolerance here than in the actual test/assert # statements, as here we are saying it is ok to ignore # highly uncontrollable/unobservable states, rather than # allowing loose tolerances in the comparison of two # numbers. Furthermore, it is likely that in actual use, # users would want to ignore relatively small Hankel # singular values anyway, as that is the point of doing a # balancing transformation. BPOD = bpod.BPODHandles(inner_product=np.vdot, verbosity=0) BPOD.compute_decomp(direct_vec_handles, adjoint_vec_handles, num_inputs=num_inputs, num_outputs=num_outputs, rtol=1e-6, atol=1e-12) mode_idxs = range(BPOD.sing_vals.size) direct_mode_handles = [ VecHandlePickle(self.direct_mode_path % i) for i in mode_idxs ] adjoint_mode_handles = [ VecHandlePickle(self.adjoint_mode_path % i) for i in mode_idxs ] BPOD.compute_direct_modes( mode_idxs, direct_mode_handles, direct_vec_handles=direct_vec_handles) BPOD.compute_adjoint_modes( mode_idxs, adjoint_mode_handles, adjoint_vec_handles=adjoint_vec_handles) # Compute true projection coefficients by computing the inner # products between modes and snapshots. direct_proj_coeffs_true =\ BPOD.vec_space.compute_inner_product_array( adjoint_mode_handles, direct_vec_handles) adjoint_proj_coeffs_true =\ BPOD.vec_space.compute_inner_product_array( direct_mode_handles, adjoint_vec_handles) # Compute projection coefficients using BPOD object, which # avoids actually manipulating handles and computing inner # products, instead using elements of the decomposition for a # more efficient computation. direct_proj_coeffs = BPOD.compute_direct_proj_coeffs() adjoint_proj_coeffs = BPOD.compute_adjoint_proj_coeffs() # Test values np.testing.assert_allclose(direct_proj_coeffs, direct_proj_coeffs_true, rtol=rtol, atol=atol) np.testing.assert_allclose(adjoint_proj_coeffs, adjoint_proj_coeffs_true, rtol=rtol, atol=atol)
def test_compute_modes(self): """Test computing modes in serial and parallel.""" # Set test tolerances. More relaxed tolerances are required for testing # the BPOD modes, since that test requires "squaring" the gramians and # thus involves more ill-conditioned arrays. rtol_sqr = 1e-8 atol_sqr = 1e-8 # Test a single input/output as well as multiple inputs/outputs. Allow # for more inputs/outputs than states. (This is determined in setUp()). for num_inputs in self.num_inputs_list: for num_outputs in self.num_outputs_list: # Get impulse response data direct_vec_handles, adjoint_vec_handles =\ self._helper_get_impulse_response_handles( num_inputs, num_outputs) # Create BPOD object and perform decomposition. (The properties # defining a BPOD mode require manipulations involving the # correct decomposition, so we cannot isolate the mode # computation from the decomposition step.) Use relative # tolerance to avoid Hankel singular values which may correspond # to very uncontrollable/unobservable states. It is ok to use a # more relaxed tolerance here than in the actual test/assert # statements, as here we are saying it is ok to ignore highly # uncontrollable/unobservable states, rather than allowing loose # tolerances in the comparison of two numbers. Furthermore, it # is likely that in actual use, users would want to ignore # relatively small Hankel singular values anyway, as that is the # point of doing a balancing transformation. BPOD = bpod.BPODHandles(inner_product=np.vdot, verbosity=0) BPOD.compute_decomp(direct_vec_handles, adjoint_vec_handles, num_inputs=num_inputs, num_outputs=num_outputs, rtol=1e-6, atol=1e-12) # Select a subset of modes to compute. Compute at least half # the modes, and up to all of them. Make sure to use unique # values. (This may reduce the number of modes computed.) num_modes = parallel.call_and_bcast(np.random.randint, BPOD.sing_vals.size // 2, BPOD.sing_vals.size + 1) mode_idxs = np.unique( parallel.call_and_bcast(np.random.randint, 0, BPOD.sing_vals.size, num_modes)) # Create handles for the modes direct_mode_handles = [ VecHandlePickle(self.direct_mode_path % i) for i in mode_idxs ] adjoint_mode_handles = [ VecHandlePickle(self.adjoint_mode_path % i) for i in mode_idxs ] # Compute modes BPOD.compute_direct_modes( mode_idxs, direct_mode_handles, direct_vec_handles=direct_vec_handles) BPOD.compute_adjoint_modes( mode_idxs, adjoint_mode_handles, adjoint_vec_handles=adjoint_vec_handles) # Test modes against empirical gramians np.testing.assert_allclose( BPOD.vec_space.compute_inner_product_array( adjoint_mode_handles, direct_vec_handles).dot( BPOD.vec_space.compute_inner_product_array( direct_vec_handles, adjoint_mode_handles)), np.diag(BPOD.sing_vals[mode_idxs]), rtol=rtol_sqr, atol=atol_sqr) np.testing.assert_allclose( BPOD.vec_space.compute_inner_product_array( direct_mode_handles, adjoint_vec_handles).dot( BPOD.vec_space.compute_inner_product_array( adjoint_vec_handles, direct_mode_handles)), np.diag(BPOD.sing_vals[mode_idxs]), rtol=rtol_sqr, atol=atol_sqr)