def setUp(self): # Specify output locations if not os.access('.', os.W_OK): raise RuntimeError('Cannot write to current directory') self.test_dir = 'files_POD_DELETE_ME' if not os.path.isdir(self.test_dir): parallel.call_from_rank_zero(os.mkdir, self.test_dir) self.vec_path = join(self.test_dir, 'vec_%03d.pkl') self.mode_path = join(self.test_dir, 'mode_%03d.pkl') # Specify data dimensions self.num_states = 30 self.num_vecs = 10 # Generate random data and write to disk using handles self.vecs_array = ( parallel.call_and_bcast( np.random.random, (self.num_states, self.num_vecs)) + 1j * parallel.call_and_bcast( np.random.random, (self.num_states, self.num_vecs))) self.vec_handles = [ VecHandlePickle(self.vec_path % i) for i in range(self.num_vecs)] for idx, hdl in enumerate(self.vec_handles): hdl.put(self.vecs_array[:, idx]) parallel.barrier()
def setUp(self): # Specify output locations if not os.access('.', os.W_OK): raise RuntimeError('Cannot write to current directory') self.test_dir = 'files_BPOD_DELETE_ME' if not os.path.isdir(self.test_dir): parallel.call_from_rank_zero(os.mkdir, self.test_dir) self.direct_vec_path = join(self.test_dir, 'direct_vec_%03d.pkl') self.adjoint_vec_path = join(self.test_dir, 'adjoint_vec_%03d.pkl') self.direct_mode_path = join(self.test_dir, 'direct_mode_%03d.pkl') self.adjoint_mode_path = join(self.test_dir, 'adjoint_mode_%03d.pkl') # Specify system dimensions. Test single inputs/outputs as well as # multiple inputs/outputs. Also allow for more inputs/outputs than # states. self.num_states = 10 self.num_inputs_list = [ 1, parallel.call_and_bcast(np.random.randint, 2, self.num_states + 2)] self.num_outputs_list = [ 1, parallel.call_and_bcast(np.random.randint, 2, self.num_states + 2)] # Specify how long to run impulse responses self.num_steps = self.num_states + 1 parallel.barrier()
def setUp(self): # Specify output locations if not os.access('.', os.W_OK): raise RuntimeError('Cannot write to current directory') self.test_dir = 'files_POD_DELETE_ME' if not os.path.isdir(self.test_dir): parallel.call_from_rank_zero(os.mkdir, self.test_dir) self.vec_path = join(self.test_dir, 'vec_%03d.pkl') self.mode_path = join(self.test_dir, 'mode_%03d.pkl') # Specify data dimensions self.num_states = 30 self.num_vecs = 10 # Generate random data and write to disk using handles self.vecs_array = ( parallel.call_and_bcast(np.random.random, (self.num_states, self.num_vecs)) + 1j * parallel.call_and_bcast(np.random.random, (self.num_states, self.num_vecs))) self.vec_handles = [ VecHandlePickle(self.vec_path % i) for i in range(self.num_vecs) ] for idx, hdl in enumerate(self.vec_handles): hdl.put(self.vecs_array[:, idx]) parallel.barrier()
def setUp(self): if not os.access('.', os.W_OK): raise RuntimeError('Cannot write to current directory') self.test_dir = 'files_vectorspace_DELETE_ME' if not os.path.isdir(self.test_dir): parallel.call_from_rank_zero(os.mkdir, self.test_dir) self.max_vecs_per_proc = 10 self.total_num_vecs_in_mem = (parallel.get_num_procs() * self.max_vecs_per_proc) self.vec_space = vspc.VectorSpaceHandles(inner_product=np.vdot, verbosity=0) self.vec_space.max_vecs_per_proc = self.max_vecs_per_proc # Default data members; set verbosity to 0 even though default is 1 # so messages won't print during tests self.default_data_members = { 'inner_product': np.vdot, 'max_vecs_per_node': 10000, 'max_vecs_per_proc': (10000 * parallel.get_num_nodes() // parallel.get_num_procs()), 'verbosity': 0, 'print_interval': 10, 'prev_print_time': 0. } parallel.barrier()
def setUp(self): # Specify output locations if not os.access('.', os.W_OK): raise RuntimeError('Cannot write to current directory') self.test_dir = 'files_BPOD_DELETE_ME' if not os.path.isdir(self.test_dir): parallel.call_from_rank_zero(os.mkdir, self.test_dir) self.direct_vec_path = join(self.test_dir, 'direct_vec_%03d.pkl') self.adjoint_vec_path = join(self.test_dir, 'adjoint_vec_%03d.pkl') self.direct_mode_path = join(self.test_dir, 'direct_mode_%03d.pkl') self.adjoint_mode_path = join(self.test_dir, 'adjoint_mode_%03d.pkl') # Specify system dimensions. Test single inputs/outputs as well as # multiple inputs/outputs. Also allow for more inputs/outputs than # states. self.num_states = 10 self.num_inputs_list = [ 1, parallel.call_and_bcast(np.random.randint, 2, self.num_states + 2) ] self.num_outputs_list = [ 1, parallel.call_and_bcast(np.random.randint, 2, self.num_states + 2) ] # Specify how long to run impulse responses self.num_steps = self.num_states + 1 parallel.barrier()
def test_puts_gets(self): test_dir = 'DELETE_ME_test_files_pod' if not os.access('.', os.W_OK): raise RuntimeError('Cannot write to current directory') if not os.path.isdir(test_dir): parallel.call_from_rank_zero(os.mkdir, test_dir) num_vecs = 10 num_states = 30 correlation_mat_true = parallel.call_and_bcast(np.random.random, ((num_vecs, num_vecs))) eigvals_true = parallel.call_and_bcast(np.random.random, num_vecs) eigvecs_true = parallel.call_and_bcast(np.random.random, ((num_states, num_vecs))) my_POD = PODHandles(None, verbosity=0) my_POD.correlation_mat = correlation_mat_true my_POD.eigvals = eigvals_true my_POD.eigvecs = eigvecs_true eigvecs_path = join(test_dir, 'eigvecs.txt') eigvals_path = join(test_dir, 'eigvals.txt') correlation_mat_path = join(test_dir, 'correlation.txt') my_POD.put_decomp(eigvals_path, eigvecs_path) my_POD.put_correlation_mat(correlation_mat_path) parallel.barrier() POD_load = PODHandles(None, verbosity=0) POD_load.get_decomp(eigvals_path, eigvecs_path) correlation_mat_loaded = util.load_array_text(correlation_mat_path) np.testing.assert_allclose(correlation_mat_loaded, correlation_mat_true) np.testing.assert_allclose(POD_load.eigvals, eigvals_true) np.testing.assert_allclose(POD_load.eigvecs, eigvecs_true)
def setUp(self): if not os.access('.', os.W_OK): raise RuntimeError('Cannot write to current directory') self.test_dir = 'files_vectorspace_DELETE_ME' if not os.path.isdir(self.test_dir): parallel.call_from_rank_zero(os.mkdir, self.test_dir) self.max_vecs_per_proc = 10 self.total_num_vecs_in_mem = ( parallel.get_num_procs() * self.max_vecs_per_proc) self.vec_space = vspc.VectorSpaceHandles( inner_product=np.vdot, verbosity=0) self.vec_space.max_vecs_per_proc = self.max_vecs_per_proc # Default data members; set verbosity to 0 even though default is 1 # so messages won't print during tests self.default_data_members = { 'inner_product': np.vdot, 'max_vecs_per_node': 10000, 'max_vecs_per_proc': ( 10000 * parallel.get_num_nodes() // parallel.get_num_procs()), 'verbosity': 0, 'print_interval': 10, 'prev_print_time': 0.} parallel.barrier()
def tearDown(self): parallel.barrier() parallel.call_from_rank_zero(rmtree, self.test_dir, ignore_errors=True) parallel.barrier()
def setUp(self): if not os.access('.', os.W_OK): raise RuntimeError('Cannot write to current directory') self.test_dir = 'DELETE_ME_test_files_bpod' if not os.path.isdir(self.test_dir): parallel.call_from_rank_zero(os.mkdir, self.test_dir) self.mode_nums = [2, 3, 0] self.num_direct_vecs = 10 self.num_adjoint_vecs = 12 self.num_inputs = 1 self.num_outputs = 1 self.num_states = 20 #A = np.mat(np.random.random((self.num_states, self.num_states))) A = np.mat( parallel.call_and_bcast(util.drss, self.num_states, 1, 1)[0]) B = np.mat( parallel.call_and_bcast(np.random.random, (self.num_states, self.num_inputs))) C = np.mat( parallel.call_and_bcast(np.random.random, (self.num_outputs, self.num_states))) self.direct_vecs = [B] A_powers = np.identity(A.shape[0]) for t in range(self.num_direct_vecs - 1): A_powers = A_powers.dot(A) self.direct_vecs.append(A_powers.dot(B)) self.direct_vec_array = np.array(self.direct_vecs).squeeze().T A_adjoint = A.H C_adjoint = C.H A_adjoint_powers = np.identity(A_adjoint.shape[0]) self.adjoint_vecs = [C_adjoint] for t in range(self.num_adjoint_vecs - 1): A_adjoint_powers = A_adjoint_powers.dot(A_adjoint) self.adjoint_vecs.append(A_adjoint_powers.dot(C_adjoint)) self.adjoint_vec_array = np.array(self.adjoint_vecs).squeeze().T self.direct_vec_path = join(self.test_dir, 'direct_vec_%03d.txt') self.adjoint_vec_path = join(self.test_dir, 'adjoint_vec_%03d.txt') self.direct_vec_handles = [ V.VecHandleArrayText(self.direct_vec_path % i) for i in range(self.num_direct_vecs) ] self.adjoint_vec_handles = [ V.VecHandleArrayText(self.adjoint_vec_path % i) for i in range(self.num_adjoint_vecs) ] if parallel.is_rank_zero(): for i, handle in enumerate(self.direct_vec_handles): handle.put(self.direct_vecs[i]) for i, handle in enumerate(self.adjoint_vec_handles): handle.put(self.adjoint_vecs[i]) self.Hankel_mat_true = np.dot(self.adjoint_vec_array.T, self.direct_vec_array) self.L_sing_vecs_true, self.sing_vals_true, self.R_sing_vecs_true = \ parallel.call_and_bcast(util.svd, self.Hankel_mat_true, atol=1e-10) self.direct_mode_array = self.direct_vec_array * \ np.mat(self.R_sing_vecs_true) * \ np.mat(np.diag(self.sing_vals_true ** -0.5)) self.adjoint_mode_array = self.adjoint_vec_array * \ np.mat(self.L_sing_vecs_true) *\ np.mat(np.diag(self.sing_vals_true ** -0.5)) self.my_BPOD = BPODHandles(np.vdot, verbosity=0) parallel.barrier()