def AdaIteration(X, X_unfold, A_mat, B_mat, C_mat, b0, eta, F, errors, n_mb, norm_x): global Gt dim_vec = X.shape dim = len(X.shape) if Gt == []: Gt = [b0 for _ in range(dim)] A = [A_mat, B_mat, C_mat] mu = 0 for i in range(1): # randomly permute the dimensions block_vec = np.random.permutation(dim) d_update = block_vec[0] # sampling fibers and forming the X_[d] = H_[d] A_[d]^t least squares [tensor_idx, factor_idx] = sample_fibers(n_mb, dim_vec, d_update) tensor_idx = tensor_idx.astype(int) cols = [tensor_idx[:, x] for x in range(len(tensor_idx[0]))] X_sample = X[tuple(cols)] X_sample = X_sample.reshape( (int(X_sample.size / dim_vec[d_update]), dim_vec[d_update])) # perform a sampled khatrirao product A_unsel = [] for i in range(d_update): A_unsel.append(A[i]) for i in range(d_update + 1, dim): A_unsel.append(A[i]) H = np.array(sampled_kr(A_unsel, factor_idx)) # compute the gradient g = (1 / n_mb) * (A[d_update] @ (H.transpose() @ H + mu * np.eye(F)) - X_sample.transpose() @ H - mu * A[d_update]) # compute the accumlated gradient Gt[d_update] = np.abs(np.square(g)) + Gt[d_update] eta_adapted = np.divide(eta, np.sqrt(Gt[d_update])) d = d_update A[d_update] = A[d_update] - np.multiply(eta_adapted, g) return A[0], A[1], A[2], errors
def adaIteration(configuration): ( X, b0, n_mb, A, B, C, eta, Gt, norm_x, iterations, start_time, errors, ) = configuration estimate = [A, B, C] # setup parameters dim = len(X.shape) dim_vec = X.shape F = estimate[0].shape[1] mu = 0 for it in range(iterations): # randomly permute the dimensions block_vec = np.random.permutation(dim) d_update = block_vec[0] # sampling fibers and forming the X_[d] = H_[d] A_[d]^t least squares [tensor_idx, factor_idx] = sample_fibers(n_mb, dim_vec, d_update) tensor_idx = tensor_idx.astype(int) cols = [tensor_idx[:, x] for x in range(len(tensor_idx[0]))] X_sample = X[tuple(cols)] X_sample = X_sample.reshape( (int(X_sample.size / dim_vec[d_update]), dim_vec[d_update])) # perform a sampled khatrirao product A_unsel = [] for i in range(d_update): A_unsel.append(estimate[i]) for i in range(d_update + 1, dim): A_unsel.append(estimate[i]) H = np.array(sampled_kr(A_unsel, factor_idx)) # compute the gradient g = (1 / n_mb) * ( estimate[d_update] @ (H.transpose() @ H + mu * np.eye(F)) - X_sample.transpose() @ H - mu * estimate[d_update]) # compute the accumlated gradient Gt[d_update] = np.abs(np.square(g)) + Gt[d_update] eta_adapted = np.divide(eta, np.sqrt(Gt[d_update])) d = d_update estimate[d_update] = estimate[d_update] - np.multiply(eta_adapted, g) estimate[d_update] = proxr(estimate[d_update], d) e = error(X, estimate, norm_x) t = time.time() - start_time errors[t] = ("ada", e) print("ada", it, e) return (configuration, estimate, errors)
def AdaCPDTime(X, b0, n_mb, max_time, A_init, sample_interval=500, eta=1): A = A_init # setup parameters dim = len(X.shape) dim_vec = X.shape F = A[0].shape[1] PP = tl.kruskal_to_tensor((np.ones(F), A)) err_e = ((np.linalg.norm(X[..., :] - PP[..., :]) ** 2)) / norm(X) NRE_A = {0: err_e} start = time.time() mu = 0 Gt = [b0 for _ in range(dim)] nextSample = sample_interval while time.time() - start < max_time: # randomly permute the dimensions block_vec = np.random.permutation(dim) d_update = block_vec[0] # sampling fibers and forming the X_[d] = H_[d] A_[d]^t least squares [tensor_idx, factor_idx] = sample_fibers(n_mb, dim_vec, d_update) tensor_idx = tensor_idx.astype(int) cols = [tensor_idx[:, x] for x in range(len(tensor_idx[0]))] X_sample = X[tuple(cols)] X_sample = X_sample.reshape( (int(X_sample.size / dim_vec[d_update]), dim_vec[d_update]) ) # perform a sampled khatrirao product A_unsel = [] for i in range(d_update): A_unsel.append(A[i]) for i in range(d_update + 1, dim): A_unsel.append(A[i]) H = np.array(sampled_kr(A_unsel, factor_idx)) # compute the gradient g = (1 / n_mb) * ( A[d_update] @ (H.transpose() @ H + mu * np.eye(F)) - X_sample.transpose() @ H - mu * A[d_update] ) # compute the accumlated gradient Gt[d_update] = np.abs(np.square(g)) + Gt[d_update] eta_adapted = np.divide(eta, np.sqrt(Gt[d_update])) d = d_update # print(A[d_update]) A[d_update] = A[d_update] - np.multiply(eta_adapted, g) A[d_update] = proxr(A[d_update], d) t = time.time() PP = tl.kruskal_to_tensor((np.ones(F), A)) error = np.linalg.norm(X - PP) ** 2 / norm(X) NRE_A[t - start] = error return (NRE_A, A)
def AdaCPD(X, b0, n_mb, mttrks, A_init, eta=1): A = A_init eta = 1 # setup parameters dim = len(X.shape) dim_vec = X.shape F = A[0].shape[1] PP = tl.kruskal_to_tensor((np.ones(F), A)) err_e = (np.linalg.norm(X[..., :] - PP[..., :])**2) / X.size NRE_A = [err_e] max_it = (X.shape[0]**2 / n_mb) * mttrks tic = time.time() time_A = [time.time() - tic] mu = 0 Gt = [b0 for _ in range(dim)] mttrk = 0 for it in range(1, int(math.ceil(max_it))): # randomly permute the dimensions block_vec = np.random.permutation(dim) d_update = block_vec[0] # sampling fibers and forming the X_[d] = H_[d] A_[d]^t least squares [tensor_idx, factor_idx] = sample_fibers(n_mb, dim_vec, d_update) tensor_idx = tensor_idx.astype(int) cols = [tensor_idx[:, x] for x in range(len(tensor_idx[0]))] X_sample = X[tuple(cols)] X_sample = X_sample.reshape( (int(X_sample.size / dim_vec[d_update]), dim_vec[d_update])) # perform a sampled khatrirao product A_unsel = [] for i in range(d_update): A_unsel.append(A[i]) for i in range(d_update + 1, dim): A_unsel.append(A[i]) H = np.array(sampled_kr(A_unsel, factor_idx)) # compute the gradient g = (1 / n_mb) * (A[d_update] @ (H.transpose() @ H + mu * np.eye(F)) - X_sample.transpose() @ H - mu * A[d_update]) # compute the accumlated gradient Gt[d_update] = np.abs(np.square(g)) + Gt[d_update] eta_adapted = np.divide(eta, np.sqrt(Gt[d_update])) print(A[d_update].shape) d = d_update A[d_update] = A[d_update] - np.multiply(eta_adapted, g) A[d_update] = proxr(A[d_update], d) if it % math.ceil((X.shape[0]**2 / n_mb)) == 0: mttrk += 1 time_A.append(time.time() - tic) PP = tl.kruskal_to_tensor((np.ones(F), A)) error = np.linalg.norm(X - PP)**2 NRE_A.append(error / norm(X)) return (time_A, NRE_A, A)
def BrasCPD(X, b0, n_mb, max_it, A_init, A_gt): A = A_init # setup parameters dim = len(X.shape) dim_vec = X.shape F = A[0].shape[1] PP = tl.kruskal_to_tensor((np.ones(F), A)) err_e = (np.linalg.norm(X[..., :] - PP[..., :])**2) / X.size NRE_A = [err_e] MSE_A = [] mse = 10**10 for x in list(permutations(range(3), 3)): m = (1.0 / 3.0) * (MSE(A[x[0]], A_gt[0]) + MSE(A[x[1]], A_gt[1]) + MSE(A[x[2]], A_gt[2])) mse = min(mse, m) MSE_A.append(mse) tic = time.time() time_A = [time.time() - tic] for it in range(1, int(math.ceil(max_it))): # step size alpha = b0 / (n_mb * (it)**(10**-6)) # randomly permute the dimensions block_vec = np.random.permutation(dim) d_update = block_vec[0] # sampling fibers and forming the X_[d] = H_[d] A_[d]^t least squares [tensor_idx, factor_idx] = sample_fibers(n_mb, dim_vec, d_update) tensor_idx = tensor_idx.astype(int) cols = [tensor_idx[:, x] for x in range(len(tensor_idx[0]))] X_sample = X[tuple(cols)] X_sample = X_sample.reshape( (int(X_sample.size / dim_vec[d_update]), dim_vec[d_update])) # perform a sampled khatrirao product A_unsel = [] for i in range(d_update - 1): A_unsel.append(A[i]) for i in range(d_update + 1, dim): A_unsel.append(A[i]) H = np.array(sampled_kr(A_unsel, factor_idx)) alpha_t = alpha d = d_update A[d_update] = A[d_update] - alpha_t * ( A[d_update] @ H.transpose() @ H - (X_sample.transpose() @ H)) A[d_update] = proxr(A[d_update], d) if it % math.ceil((X.shape[0]**2 / n_mb)) == 0: time_A.append(time.time() - tic) err = error(tl.unfold(X, 0), tl.norm(X), A[0], A[1], A[2]) NRE_A.append(err) mse = 10**10 for x in list(permutations(range(3), 3)): m = (1.0 / 3.0) * (MSE(A[x[0]], A_gt[0]) + MSE( A[x[1]], A_gt[1]) + MSE(A[x[2]], A_gt[2])) mse = min(mse, m) MSE_A.append(mse) print("MSE = {}, NRE = {}".format(mse, err)) return (time_A, NRE_A, MSE_A, A)