def test_single_weights_update_reduces_cost_with_zero_delta(): """Test single weights update reduces cost function with delta = 0.""" random_seed = 0 random_state = check_random_state(random_seed) n_features = 13 n_components = 7 n_samples = 100 X = random_state.uniform(size=(n_samples, n_features)) K = X.dot(X.T) C = right_stochastic_matrix((n_components, n_samples), random_state=random_state) Z = right_stochastic_matrix((n_samples, n_components), random_state=random_state) alpha = np.ones(n_components) CK = C.dot(K) CKCt = C.dot(K.dot(C.T)) assert np.allclose(C.sum(axis=1), 1, 1e-12) assert np.allclose(Z.sum(axis=1), 1, 1e-12) initial_cost = _kernel_aa_cost(K, Z, C, alpha) updated_Z = _update_kernel_aa_weights(Z, alpha, CK, CKCt) final_cost = _kernel_aa_cost(K, updated_Z, C, alpha) assert final_cost <= initial_cost assert np.allclose(updated_Z.sum(axis=1), 1, 1e-12)
def test_single_dictionary_update_reduces_cost_with_zero_delta(): """Test single update step reduces cost function.""" random_seed = 0 random_state = check_random_state(random_seed) n_features = 10 n_components = 5 n_samples = 400 X = random_state.uniform(size=(n_samples, n_features)) K = X.dot(X.T) C = right_stochastic_matrix((n_components, n_samples), random_state=random_state) Z = right_stochastic_matrix((n_samples, n_components), random_state=random_state) alpha = np.ones(n_components) trace_K = np.trace(K) KZ = K.dot(Z) ZtZ = Z.T.dot(Z) assert np.allclose(C.sum(axis=1), 1, 1e-12) assert np.allclose(Z.sum(axis=1), 1, 1e-12) initial_cost = _kernel_aa_cost(K, Z, C, alpha) updated_C = _update_kernel_aa_dictionary(K, C, alpha, trace_K, KZ, ZtZ) final_cost = _kernel_aa_cost(K, Z, updated_C, alpha) assert final_cost <= initial_cost assert np.allclose(updated_C.sum(axis=1), 1, 1e-12)
def test_repeated_weights_updates_converge_with_nonzero_delta(): """Test repeated updates converge to a fixed point with delta = 0.""" random_seed = 0 random_state = check_random_state(random_seed) n_features = 30 n_components = 11 n_samples = 320 max_iterations = 100 tolerance = 1e-6 X = random_state.uniform(size=(n_samples, n_features)) K = X.dot(X.T) C = right_stochastic_matrix((n_components, n_samples), random_state=random_state) Z = right_stochastic_matrix((n_samples, n_components), random_state=random_state) assert np.allclose(C.sum(axis=1), 1, tolerance) assert np.allclose(Z.sum(axis=1), 1, tolerance) delta = 0.3 alpha = random_state.uniform(low=(1 - delta), high=(1 + delta), size=(n_components, )) initial_cost = _kernel_aa_cost(K, Z, C, alpha) updated_Z, updated_C, updated_alpha, _, n_iter = _iterate_kernel_aa( K, Z, C, alpha, delta=delta, update_weights=True, update_dictionary=False, update_scale_factors=False, tolerance=tolerance, max_iterations=max_iterations, require_monotonic_cost_decrease=True)[:5] final_cost = _kernel_aa_cost(K, updated_Z, updated_C, updated_alpha) assert final_cost <= initial_cost assert n_iter < max_iterations assert np.allclose(updated_C, C, 1e-12) assert np.allclose(updated_alpha, alpha, 1e-12) assert np.allclose(updated_Z.sum(axis=1), 1, 1e-12)
def test_exact_solution_with_zero_delta_is_dictionary_update_fixed_point(): """Test exact solution is a fixed point of the dictionary update.""" random_seed = 0 random_state = check_random_state(random_seed) n_features = 10 n_components = 6 n_samples = 100 tolerance = 1e-12 basis = random_state.uniform(size=(n_components, n_features)) Z = right_stochastic_matrix((n_samples, n_components), random_state=random_state) archetype_indices = np.zeros(n_components, dtype='i8') for i in range(n_components): new_index = False current_index = 0 while not new_index: new_index = True current_index = random_state.randint(low=0, high=n_samples) for index in archetype_indices: if current_index == index: new_index = False archetype_indices[i] = current_index C = np.zeros((n_components, n_samples)) component = 0 for index in archetype_indices: C[component, index] = 1.0 for i in range(n_components): if i == component: Z[index, i] = 1.0 else: Z[index, i] = 0.0 component += 1 X = Z.dot(basis) basis_projection = C.dot(X) assert np.allclose(basis_projection, basis, tolerance) assert np.linalg.norm(X - Z.dot(C.dot(X))) < tolerance K = X.dot(X.T) alpha = np.ones(n_components) initial_cost = _kernel_aa_cost(K, Z, C, alpha) trace_K = np.trace(K) KZ = K.dot(Z) ZtZ = Z.T.dot(Z) updated_C = _update_kernel_aa_dictionary(K, C, alpha, trace_K, KZ, ZtZ) final_cost = _kernel_aa_cost(K, Z, updated_C, alpha) assert abs(final_cost - initial_cost) < tolerance assert np.allclose(updated_C.sum(axis=1), 1, 1e-12) assert np.allclose(updated_C, C, tolerance)