def _helper_compute_DMD_from_data(self, vecs, inner_product, adv_vecs=None, max_num_eigvals=None): if adv_vecs is None: adv_vecs = vecs[:, 1:] vecs = vecs[:, :-1] correlation_mat = inner_product(vecs, vecs) cross_correlation_mat = inner_product(vecs, adv_vecs) V, Sigma, dummy = util.svd(correlation_mat) # dummy = V.T U = vecs.dot(V).dot(np.diag(Sigma**-0.5)) # Truncate if necessary if max_num_eigvals is not None and (max_num_eigvals < Sigma.size): V = V[:, :max_num_eigvals] Sigma = Sigma[:max_num_eigvals] U = U[:, :max_num_eigvals] A_tilde = inner_product(U, adv_vecs).dot(V).dot(np.diag(Sigma**-0.5)) eigvals, W, Z = util.eig_biorthog(A_tilde, scale_choice='left') build_coeffs_proj = V.dot(np.diag(Sigma**-0.5)).dot(W) build_coeffs_exact = (V.dot(np.diag(Sigma**-0.5)).dot(W).dot( np.diag(eigvals**-1.))) modes_proj = vecs.dot(build_coeffs_proj) modes_exact = adv_vecs.dot(build_coeffs_exact) adj_modes = U.dot(Z) spectral_coeffs = np.abs( np.array(inner_product(adj_modes, np.mat(vecs[:, 0]).T)).squeeze()) return (modes_exact, modes_proj, spectral_coeffs, eigvals, W, Z, Sigma, V, correlation_mat, cross_correlation_mat)
def test_svd(self): num_internals_list = [10, 50] num_rows_list = [3, 5, 40] num_cols_list = [1, 9, 70] for num_rows in num_rows_list: for num_cols in num_cols_list: for num_internals in num_internals_list: left_mat = np.mat( np.random.random((num_rows, num_internals))) right_mat = np.mat( np.random.random((num_internals, num_cols))) full_mat = left_mat * right_mat L_sing_vecs, sing_vals, R_sing_vecs = util.svd(full_mat) U, E, V_comp_conj = np.linalg.svd(full_mat, full_matrices=0) V = np.mat(V_comp_conj).H if num_internals < num_rows or num_internals < num_cols: U = U[:, :num_internals] V = V[:, :num_internals] E = E[:num_internals] np.testing.assert_allclose(L_sing_vecs, U) np.testing.assert_allclose(sing_vals, E) np.testing.assert_allclose(R_sing_vecs, V)
def _helper_compute_DMD_from_data(self, vecs, adv_vecs, inner_product): correlation_mat = inner_product(vecs, vecs) W, Sigma, dummy = util.svd(correlation_mat) # dummy = W. U = vecs.dot(W).dot(np.diag(Sigma**-0.5)) ritz_vals, eig_vecs = np.linalg.eig(inner_product( U, adv_vecs).dot(W).dot(np.diag(Sigma**-0.5))) eig_vecs = np.mat(eig_vecs) ritz_vecs = U.dot(eig_vecs) scaling = np.linalg.lstsq(ritz_vecs, vecs[:, 0])[0] scaling = np.mat(np.diag(np.array(scaling).squeeze())) ritz_vecs = ritz_vecs.dot(scaling) build_coeffs = W.dot(np.diag(Sigma**-0.5)).dot(eig_vecs).dot(scaling) mode_norms = np.diag(inner_product(ritz_vecs, ritz_vecs)).real return ritz_vals, ritz_vecs, build_coeffs, mode_norms
def test_svd(self): num_internals_list = [10, 50] num_rows_list = [3, 5, 40] num_cols_list = [1, 9, 70] for num_rows in num_rows_list: for num_cols in num_cols_list: for num_internals in num_internals_list: left_mat = np.mat(np.random.random((num_rows, num_internals))) right_mat = np.mat(np.random.random((num_internals, num_cols))) full_mat = left_mat*right_mat L_sing_vecs, sing_vals, R_sing_vecs = util.svd(full_mat) U, E, V_comp_conj = np.linalg.svd(full_mat, full_matrices=0) V = np.mat(V_comp_conj).H if num_internals < num_rows or num_internals < num_cols: U = U[:,:num_internals] V = V[:,:num_internals] E = E[:num_internals] np.testing.assert_allclose(L_sing_vecs, U) np.testing.assert_allclose(sing_vals, E) np.testing.assert_allclose(R_sing_vecs, V)
def _helper_compute_DMD_from_data(self, vec_array, adv_vec_array, inner_product): # Create lists of vecs, advanced vecs for inner product function vecs = [vec_array[:, i] for i in range(vec_array.shape[1])] adv_vecs = [adv_vec_array[:, i] for i in range(adv_vec_array.shape[1])] # Compute DMD correlation_mat = inner_product(vecs, vecs) W, Sigma, dummy = util.svd(correlation_mat) # dummy = W. U = vec_array.dot(W).dot(np.diag(Sigma**-0.5)) U_list = [U[:,i] for i in range(U.shape[1])] ritz_vals, eig_vecs = np.linalg.eig(inner_product( U_list, adv_vecs).dot(W).dot(np.diag(Sigma**-0.5))) eig_vecs = np.mat(eig_vecs) ritz_vecs = U.dot(eig_vecs) scaling = np.linalg.lstsq(ritz_vecs, vec_array[:, 0])[0] scaling = np.mat(np.diag(np.array(scaling).squeeze())) ritz_vecs = ritz_vecs.dot(scaling) build_coeffs = W.dot(np.diag(Sigma**-0.5)).dot(eig_vecs).dot(scaling) ritz_vecs_list = [np.array(ritz_vecs[:,i]).squeeze() for i in range(ritz_vecs.shape[1])] mode_norms = np.diag(inner_product(ritz_vecs_list, ritz_vecs_list)).real return ritz_vals, ritz_vecs, build_coeffs, mode_norms
def test_svd(self): # Set tolerance for testing eigval/eigvec property test_atol = 1e-10 # Check tall, fat, and square arrays num_rows_list = [100] num_cols_list = [50, 100, 150] # Loop through different array sizes for num_rows in num_rows_list: for num_cols in num_cols_list: # Check real and complex data for is_complex in [True]: # Generate a random array with elements in [0, 1] array = np.random.random((num_rows, num_cols)) if is_complex: array = array + 1j * np.random.random( (num_rows, num_cols)) # Compute full set of singular values to help choose # tolerance levels that guarantee truncation (otherwise # tests won't actually check those features). sing_vals_full = np.linalg.svd(array, full_matrices=0)[1] atol_list = [np.median(sing_vals_full), None] rtol_list = [ np.median(sing_vals_full) / np.max(sing_vals_full), None ] # Loop through different tolerance cases for atol in atol_list: for rtol in rtol_list: # For all arrays, check that the output of util.svd # satisfies the definition of an SVD. Do this by # checking eigval/eigvec properties, which must be # satisfied by the sing vecs and sing vals, even if # there is truncation. The fact that the singular # vectors are eigenvectors of a normal array ensures # that they are unitary, so we don't have to check # that separately. L_sing_vecs, sing_vals, R_sing_vecs = util.svd( array, atol=atol, rtol=rtol) np.testing.assert_allclose( array.dot(array.conj().T.dot(L_sing_vecs)) - L_sing_vecs.dot(np.diag(sing_vals**2)), np.zeros(L_sing_vecs.shape), atol=test_atol) np.testing.assert_allclose( array.conj().T.dot(array.dot(R_sing_vecs)) - R_sing_vecs.dot(np.diag(sing_vals**2)), np.zeros(R_sing_vecs.shape), atol=test_atol) # If either tolerance is nonzero, make sure that # something is actually truncated, otherwise force # test to quit. To do this, make sure the eigvec # array is not square. if rtol and sing_vals.size == sing_vals_full.size: raise ValueError( 'Failed to choose relative tolerance that ' 'forces truncation.') if atol and sing_vals.size == sing_vals_full.size: raise ValueError( 'Failed to choose absolute tolerance that ' 'forces truncation.') # If necessary, test that tolerances are satisfied if atol: self.assertTrue(abs(sing_vals[-1]) > atol) if rtol: self.assertTrue( (abs(sing_vals[0]) / abs(sing_vals[-1]) > rtol))
def test_svd(self): # Set tolerance for testing eigval/eigvec property test_atol = 1e-10 # Check tall, fat, and square arrays num_rows_list = [100] num_cols_list = [50, 100, 150] # Loop through different array sizes for num_rows in num_rows_list: for num_cols in num_cols_list: # Check real and complex data for is_complex in [True]: # Generate a random array with elements in [0, 1] array = np.random.random((num_rows, num_cols)) if is_complex: array = array + 1j * np.random.random( (num_rows, num_cols)) # Compute full set of singular values to help choose # tolerance levels that guarantee truncation (otherwise # tests won't actually check those features). sing_vals_full = np.linalg.svd(array, full_matrices=0)[1] atol_list = [np.median(sing_vals_full), None] rtol_list = [ np.median(sing_vals_full) / np.max(sing_vals_full), None] # Loop through different tolerance cases for atol in atol_list: for rtol in rtol_list: # For all arrays, check that the output of util.svd # satisfies the definition of an SVD. Do this by # checking eigval/eigvec properties, which must be # satisfied by the sing vecs and sing vals, even if # there is truncation. The fact that the singular # vectors are eigenvectors of a normal array ensures # that they are unitary, so we don't have to check # that separately. L_sing_vecs, sing_vals, R_sing_vecs = util.svd( array, atol=atol, rtol=rtol) np.testing.assert_allclose( array.dot(array.conj().T.dot(L_sing_vecs)) - L_sing_vecs.dot(np.diag(sing_vals ** 2)), np.zeros(L_sing_vecs.shape), atol=test_atol) np.testing.assert_allclose( array.conj().T.dot(array.dot(R_sing_vecs)) - R_sing_vecs.dot(np.diag(sing_vals ** 2)), np.zeros(R_sing_vecs.shape), atol=test_atol) # If either tolerance is nonzero, make sure that # something is actually truncated, otherwise force # test to quit. To do this, make sure the eigvec # array is not square. if rtol and sing_vals.size == sing_vals_full.size: raise ValueError( 'Failed to choose relative tolerance that ' 'forces truncation.') if atol and sing_vals.size == sing_vals_full.size: raise ValueError( 'Failed to choose absolute tolerance that ' 'forces truncation.') # If necessary, test that tolerances are satisfied if atol: self.assertTrue(abs(sing_vals[-1]) > atol) if rtol: self.assertTrue(( abs(sing_vals[0]) / abs(sing_vals[-1]) > rtol))