def test_pca(A, p): pca = PCA(A, p) Ap, G = pca.get_reduced() A_re = pca.reconstruction(Ap) error = frobeniu_norm_error(A, A_re) print('PCA-Reconstruction error for {k} components is'.format(k=p), error) return G
def test_ae(A, p): model = AE(d_hidden_rep=p) model.train(A, A, 128, 300) A_re = model.reconstruction(A) final_w = model.get_params() error = frobeniu_norm_error(A, A_re) print( 'AE-Reconstruction error for {k}-dimensional hidden representation is'. format(k=p), error) return final_w
def test_pca(A, p): pca = PCA(A, p) Ap, G = pca.get_reduced() A_re = pca.reconstruction(Ap) # mean centering A A = torch.tensor(A) ones_matrix = torch.ones((A.shape[1], 1), dtype=torch.double) A_mean = torch.div(torch.mm(A, ones_matrix), A.shape[1]) A_centered = (A - torch.mm(A_mean, torch.transpose(ones_matrix, 0, 1))) error = frobeniu_norm_error(A_centered, A_re) print('PCA-Reconstruction error for {k} components is'.format(k=p), error) return G
n = A.shape[1] A_norm = (A - ((1 / n) * A @ np.ones((n, 1))) @ np.ones((1, n))) ps = [32, 64, 128] # PCA VS AE (shared weights) for p in ps: G = test_pca(A_norm, p) final_w = test_ae(A, p) #compare G and final_w # G:256x64 R = G.T @ W p = 64 G = test_pca(A_norm, p) W = test_ae(A_norm, p) R = G.T @ W u, s, v = np.linalg.svd(R, full_matrices=True) Rp = u @ np.eye(u.shape[0], v.shape[0]) @ v Gp = G @ Rp print("Gp-W", frobeniu_norm_error(Gp, W)) print('R-Rp', frobeniu_norm_error(R, Rp)) A_re = Gp @ Gp.T @ A_norm print("Gp reconstruction error:", frobeniu_norm_error(A_norm, A_re)) #AE (shared) vs AE(non-shared) for p in ps: final_w = test_ae(A, p) #multilayer AE ### END YOUR CODE
'AE-Reconstruction error for {k}-dimensional hidden representation is'. format(k=p), error) return final_w if __name__ == '__main__': dataloc = "./data/USPS.mat" A = load_data(dataloc) A = A.T ## Normalize A A = A / A.max() ### YOUR CODE HERE # Note: You are free to modify your code here for debugging and justifying your ideas for 5(f) ps = [32, 64, 128] for p in ps: G = test_pca(p) final_w = test_ae(p) ####### For question 5(f) ######## matrix_transform_G = np.dot(G, G.T) matrix_transform_W = np.dot(final_w, final_w.T) matrix_error = frobeniu_norm_error(matrix_transform_G, matrix_transform_W) print('Error before transformation of G and W for {} components is {}'. format(p, frobeniu_norm_error(G, final_w))) print('Error after transformations of G and W for {} components is {}'. format(p, matrix_error)) ### END YOUR CODE