def compute_step(Tsize, Tl, T1_approx, factors, orig_factors, data, x, y, inner_parameters, it, old_error): """ This function uses the chosen inner method to compute the next step. """ # Initialize first variables. L = len(factors) damp, inner_method, cg_maxiter, cg_factor, cg_tol, tol_jump, symm, factors_norm, fix_mode = inner_parameters if type(inner_method) == list: inner_method = inner_method[it] # Call the inner method. if inner_method == 'cg': cg_maxiter = 1 + (L-2) * int(cg_factor * randint(1 + it**0.4, 2 + it**0.9)) y, grad, JT_J_grad, itn, residualnorm = cg(Tl, factors, data, y, damp, cg_maxiter, cg_tol) elif inner_method == 'cg_static': y, grad, JT_J_grad, itn, residualnorm = cg(Tl, factors, data, y, damp, cg_maxiter, cg_tol) elif inner_method == 'als': factors = als.als_iteration(Tl, factors, fix_mode) x = concatenate([factors[l].flatten('F') for l in range(L)]) y *= 0 elif inner_method == 'direct': y, grad, itn, residualnorm = direct(Tl, factors, data, y, damp) else: sys.exit("Wrong inner method name. Must be 'cg', 'cg_static', 'als' or 'direct'.") # Update results. x = x + y # Balance and transform factors. factors = cnv.x2cpd(x, factors) factors = cnv.transform(factors, symm, factors_norm) # Some mode may be fixed when the bicpd is called. if L == 3: for l in range(L): if fix_mode == l: factors[l] = deepcopy(orig_factors[l]) # Compute error. T1_approx = cnv.cpd2unfold1(T1_approx, factors) error = crt.fastnorm(Tl[0], T1_approx) / Tsize # Sometimes the step is too bad and increase the error by much. In this case we discard the computed step and # use the DogLeg method to compute the next step. if it > 3: if inner_method == 'cg' or inner_method == 'cg_static': if error > tol_jump * old_error: x = x - y factors, x, y, error = compute_dogleg_steps(Tsize, Tl, T1_approx, factors, grad, JT_J_grad, x, y, error, inner_parameters) if inner_method == 'als': return T1_approx, factors, x, y, [nan], '-', Tsize*error, error return T1_approx, factors, x, y, grad, itn, residualnorm, error
def tt_error(T, G, dims, L): """ Given a tensor T and a computed CPD Tensor Train G = (G1,...,GL), this function computes the error between T and the tensor associated to G. """ if L == 3: G0, G1, G2 = G T_approx = crt.tt_error_order3(T, G0, G1, G2, dims, L) if L == 4: G0, G1, G2, G3 = G T_approx = crt.tt_error_order4(T, G0, G1, G2, G3, dims, L) if L == 5: G0, G1, G2, G3, G4 = G T_approx = crt.tt_error_order5(T, G0, G1, G2, G3, G4, dims, L) if L == 6: G0, G1, G2, G3, G4, G5 = G T_approx = crt.tt_error_order6(T, G0, G1, G2, G3, G4, G5, dims, L) if L == 7: G0, G1, G2, G3, G4, G5, G6 = G T_approx = crt.tt_error_order7(T, G0, G1, G2, G3, G4, G5, G6, dims, L) if L == 8: G0, G1, G2, G3, G4, G5, G6, G7 = G T_approx = crt.tt_error_order8(T, G0, G1, G2, G3, G4, G5, G6, G7, dims, L) if L == 9: G0, G1, G2, G3, G4, G5, G6, G7, G8 = G T_approx = crt.tt_error_order9(T, G0, G1, G2, G3, G4, G5, G6, G7, G8, dims, L) if L == 10: G0, G1, G2, G3, G4, G5, G6, G7, G8, G9 = G T_approx = crt.tt_error_order10(T, G0, G1, G2, G3, G4, G5, G6, G7, G8, G9, dims, L) if L == 11: G0, G1, G2, G3, G4, G5, G6, G7, G8, G9, G10 = G T_approx = crt.tt_error_order11(T, G0, G1, G2, G3, G4, G5, G6, G7, G8, G9, G10, dims, L) if L == 12: G0, G1, G2, G3, G4, G5, G6, G7, G8, G9, G10, G11 = G T_approx = crt.tt_error_order12(T, G0, G1, G2, G3, G4, G5, G6, G7, G8, G9, G10, G11, dims, L) error = norm(T - T_approx) / norm(T) return error
def compute_dogleg_steps(Tsize, Tl, T1_approx, factors, grad, JT_J_grad, x, y, error, inner_parameters): """ Compute Dogleg step. """ count = 0 best_x = x.copy() best_y = y.copy() best_error = error best_factors = deepcopy(factors) gain_ratio = 1 delta = 1 while gain_ratio > 0: # Keep the previous value of x and error to compare with the new ones in the next iteration. old_x = x old_y = y old_error = error damp, inner_method, cg_maxiter, cg_factor, cg_tol, tol_jump, symm, factors_norm, fix_mode = inner_parameters # Apply dog leg method. y = dogleg(y, grad, JT_J_grad, delta) # Update results. x = x + y # Balance and transform factors. factors = cnv.x2cpd(x, factors, eq=False) factors = cnv.transform(factors, symm, factors_norm) # Compute error. T1_approx = cnv.cpd2unfold1(T1_approx, factors) error = crt.fastnorm(Tl[0], T1_approx) / Tsize # Update gain ratio. gain_ratio = update_gain_ratio(damp, old_error, error, Tsize, old_x, x, grad) if error < old_error: best_x = x.copy() best_y = y.copy() best_error = error best_factors = deepcopy(factors) # Update delta. delta = update_delta(delta, gain_ratio, norm(x - old_x)) count += 1 if count > 10: break return best_factors, best_x, best_y, best_error
def output_info(T1, Tsize, T1_approx, step_sizes_main, step_sizes_refine, errors_main, errors_refine, improv_main, improv_refine, gradients_main, gradients_refine, stop_main, stop_refine, options): """ Constructs the class containing the information of all relevant outputs relative to the computation of a third order CPD. """ if options.refine: num_steps = size(step_sizes_main) + size(step_sizes_refine) else: num_steps = size(step_sizes_main) if type(T1) == ndarray: rel_error = crt.fastnorm(T1, T1_approx) / Tsize # In the sparse case, the variable T1 is the triple T = [data, idxs, dims] and T1_approx is the variable factors. # We keep the original variable names used for the dense case but this distinction is important to know. else: data, idxs, dims = T1 factors = T1_approx rel_error = crt.sparse_fastnorm(data, idxs, dims, factors) / Tsize class output: def __init__(self): self.num_steps = num_steps self.rel_error = rel_error self.accuracy = max(0, 100 * (1 - rel_error)) self.step_sizes = [step_sizes_main, step_sizes_refine] self.errors = [errors_main, errors_refine] self.improv = [improv_main, improv_refine] self.gradients = [gradients_main, gradients_refine] self.stop = [stop_main, stop_refine] self.options = options def stop_msg(self): # stop_main message print() print('Main stop:') if self.stop[0] == 0: print('0 - Relative error is small enough.') if self.stop[0] == 1: print('1 - Steps are small enough.') if self.stop[0] == 2: print('2 - Improvement in the relative error is small enough.') if self.stop[0] == 3: print('3 - Gradient is small enough.') if self.stop[0] == 4: print('4 - Average error increased.') if self.stop[0] == 5: print('5 - Limit of iterations was reached.') if self.stop[0] == 6: print('6 - dGN diverged.') if self.stop[0] == 7: print( '7 - Average improvement is too small compared to the average error.' ) # stop_refine message print() print('Refinement stop:') if self.stop[1] == 0: print('0 - Relative error is small enough.') if self.stop[1] == 1: print('1 - Steps are small enough.') if self.stop[1] == 2: print('2 - Improvement in the relative error is small enough.') if self.stop[1] == 3: print('3 - Gradient is small enough.') if self.stop[1] == 4: print('4 - Average error increased.') if self.stop[1] == 5: print('5 - Limit of iterations was reached.') if self.stop[1] == 6: print('6 - dGN diverged.') if self.stop[0] == 7: print( '7 - Average improvement is too small compared to the average error.' ) if self.stop[1] == 8: print('8 - No refinement was performed.') return '' output = output() return output
def als(T, factors, R, options): """ This function uses the ALS method to compute an approximation of T with rank R. Inputs ------ T: float array factors: list of float 2-D array The factor matrices to be used as starting point. R: int. The desired rank of the approximating tensor. options: class See the function cpd for more information about the options available. Outputs ------- factors: list of float 2-D array The factor matrices of the CPD of T. step_sizes: float 1-D array Distance between the computed points at each iteration. errors: float 1-D array Error of the computed approximating tensor at each iteration. improv: float 1-D array Improvement of the error at each iteration. More precisely, the difference between the relative error of the current iteration and the previous one. gradients: float 1-D array Gradient of the error function at each iteration. stop: 0, 1, 2, 3, 4, 5, 6 or 7 This value indicates why the function stopped. See the function dGN for more details. """ # INITIALIZE RELEVANT VARIABLES # Extract all relevant variables from the class of options. maxiter = options.maxiter tol = options.tol tol_step = options.tol_step tol_improv = options.tol_improv tol_grad = options.tol_grad symm = options.symm display = options.display factors_norm = options.factors_norm # Verify if some factor should be fixed or not. This only happens when the bicpd function was called. L = len(factors) fix_mode = -1 orig_factors = [[] for l in range(L)] for l in range(L): if type(factors[l]) == list: fix_mode = l orig_factors[l] = factors[l][0].copy() factors[l] = factors[l][0] # Set the other variables. Tsize = norm(T) error = 1 best_error = inf stop = 5 const = 1 + int(maxiter / 10) # INITIALIZE RELEVANT ARRAYS x = concatenate([factors[l].flatten('F') for l in range(L)]) step_sizes = empty(maxiter) errors = empty(maxiter) improv = empty(maxiter) gradients = empty(maxiter) best_factors = [copy(factors[l]) for l in range(L)] # Compute unfoldings. Tl = [cnv.unfold(T, l + 1) for l in range(L)] T1_approx = empty(Tl[0].shape, dtype=float64) if display > 1: if display == 4: print(' ', '{:^9}'.format('Iteration'), '| {:^11}'.format('Rel error'), '| {:^11}'.format('Step size'), '| {:^11}'.format('Improvement'), '| {:^11}'.format('norm(grad)')) else: print(' ', '{:^9}'.format('Iteration'), '| {:^9}'.format('Rel error'), '| {:^11}'.format('Step size'), '| {:^10}'.format('Improvement'), '| {:^10}'.format('norm(grad)')) # START ALS ITERATIONS for it in range(maxiter): # Keep the previous value of x and error to compare with the new ones in the next iteration. old_x = x old_error = error # ALS iteration call. factors = als_iteration(Tl, factors, fix_mode) x = concatenate([factors[l].flatten('F') for l in range(L)]) # Transform factors. factors = cnv.transform(factors, symm, factors_norm) # Some mode may be fixed when the bicpd is called. if L == 3: for l in range(L): if fix_mode == l: factors[l] = copy(orig_factors[l]) # Compute error. T1_approx = cnv.cpd2unfold1(T1_approx, factors) error = crt.fastnorm(Tl[0], T1_approx) / Tsize # Update best solution. if error < best_error: best_error = error for l in range(L): best_factors[l] = copy(factors[l]) # Save relevant information about the current iteration. step_sizes[it] = norm(x - old_x) errors[it] = error gradients[it] = np.abs(old_error - error) / step_sizes[it] if it == 0: improv[it] = errors[it] else: improv[it] = np.abs(errors[it - 1] - errors[it]) # Show information about current iteration. if display > 1: if display == 4: print(' ', '{:^8}'.format(it + 1), '| {:^10.5e}'.format(errors[it]), '| {:^10.5e}'.format(step_sizes[it]), '| {:^10.5e}'.format(improv[it]), '| {:^11.5e}'.format(gradients[it])) else: print(' ', '{:^9}'.format(it + 1), '| {:^9.2e}'.format(errors[it]), '| {:^11.2e}'.format(step_sizes[it]), '| {:^11.2e}'.format(improv[it]), '| {:^10.2e}'.format(gradients[it])) # Stopping conditions. if it > 1: if errors[it] < tol: stop = 0 break if step_sizes[it] < tol_step: stop = 1 break if improv[it] < tol_improv: stop = 2 break if gradients[it] < tol_grad: stop = 3 break if it > 2 * const and it % const == 0: # Let const=1+int(maxiter/10). Comparing the average errors of const consecutive iterations prevents # the program to continue iterating when the error starts to oscillate without decreasing. mean1 = mean(errors[it - 2 * const:it - const]) mean2 = mean(errors[it - const:it]) if mean1 - mean2 <= tol_improv: stop = 4 break # If the average improvements is too small compared to the average errors, the program stops. mean3 = mean(improv[it - const:it]) if mean3 < 1e-3 * mean2: stop = 7 break # Prevent blow ups. if error > max(1, Tsize**2) / (1e-16 + tol): stop = 6 break # SAVE LAST COMPUTED INFORMATION errors = errors[0:it + 1] step_sizes = step_sizes[0:it + 1] improv = improv[0:it + 1] gradients = gradients[0:it + 1] return factors, step_sizes, errors, improv, gradients, stop
def cpd(T, R, options=False): """ Given a tensor T and a rank R, this function computes an approximated CPD of T with rank r. The factors matrices are given in the form of a list [W^(1),...,W^(L)]. They are such that sum_(r=1)^R W[:,r]^(1) ⊗ ... ⊗ W[:,r]^(L) is an approximation for T, where W[:,r]^(l) denotes the r-th column of W^(l). The same goes for the other factor matrices. Inputs ------ T: float array Objective tensor in coordinates. R: int The desired rank of the approximating tensor. options: class with the following parameters maxiter: int Number of maximum iterations allowed for the dGN function. Default is 200. tol, tol_step, tol_improv, tol_grad: float Tolerance criterion to stop the iteration process of the dGN function. Default is 1e-6 for all. Let T^(k) be the approximation at the k-th iteration, with corresponding CPD w^(k) in vectorized form. The program stops if 1) |T - T^(k)| / |T| < tol 2) | w^(k-1) - w^(k) | < tol_step 3) | |T - T^(k-1)| / |T| - |T - T^(k)| / |T| | < tol_improv 4) | grad F(w^(k)) | < tol_grad, where F(w^(k)) = 1/2 |T - T^(k)|^2 tol_mlsvd: float Tolerance criterion for the truncation. The idea is to obtain a truncation (U_1,...,U_L)*S such that |T - (U_1,...,U_L)*S| / |T| < tol_mlsvd. Default is 1e-16. If tol_mlsvd = -1 the program uses the original tensor, so the computation of the MLSVD is not performed. trunc_dims: int or list of ints Consider a third order tensor T. If trunc_dims is not 0, then it should be a list with three integers [R1,R2,R3] such that 1 <= R1 <= m, 1 <= R2 <= n, 1 <= R3 <= p. The compressed tensor will have dimensions (R1,R2,R3). Default is 0, which means 'automatic' truncation. initialization: string or list This options is used to choose the initial point to start the iterations. For more information, check the function starting_point. refine: bool If True, after the dGN iterations the program uses the solution to repeat the dGN over the original space using the solution as starting point. Default is False. symm: bool The user should set symm to True if the objective tensor is symmetric, otherwise symm is False. Default is False. trials: int This parameter is only used for tensor with order higher than 3. The computation of the tensor train CPD requires the computation of several CPD of third order tensors. If only one of these CPD's is of low quality (divergence or local minimum) then all effort is in vain. One work around is to compute several CPD'd and keep the best, for third order tensor. The parameter trials defines the maximum number of times we repeat the computation of each third order CPD. These trials stops when the relative error is less than 1e-4 or when the maximum number of trials is reached. Default is trials=1. display: -2, -1, 0, 1, 2, 3 or 4 This options is used to control how information about the computations are displayed on the screen. The possible values are -1, 0, 1 (default), 2, 3, 4. Notice that display=3 makes the overall running time large since it will force the program to show intermediate errors which are computationally costly. -1 is a special option for displaying minimal relevant information for tensors with order higher then 3. We summarize the display options below. -2: display same as options -1 plus the Tensor Train error -1: display only the errors of each CPD computation and the final relevant information 0: no information is printed 1: partial information is printed 2: full information is printed 3: full information + errors of truncation and starting point are printed 4: almost equal to display = 3 but now there are more digits displayed on the screen (display = 3 is a "cleaner" version of display = 4, with less information). epochs: int Number of Tensor Train CPD cycles. Use only for tensor with order higher than 3. Default is epochs=1. It is not necessary to create 'options' with all parameters described above. Any missing parameter is assigned to its default value automatically. For more information about the options, check the Tensor Fox tutorial at https://github.com/felipebottega/Tensor-Fox/tree/master/tutorial Outputs ------- factors: list of float 2D arrays with shape (dims[i], R) each The factors matrices which corresponds to an approximate CPD for T. final_outputs: list of classes Each tricpd and bicpd call gives a output class with all sort of information about the computations. The list 'final_outputs' contains all these classes. """ # INITIAL PREPARATIONS # Verify if T is sparse, in which case it will be given as a list with the data. if type(T) == list: T_orig = deepcopy(T) T = deepcopy(T_orig) data_orig, idxs_orig, dims_orig = T_orig else: dims_orig = T.shape L = len(dims_orig) # Set options. options = aux.make_options(options, L) method = options.method display = options.display tol_mlsvd = options.tol_mlsvd if type(tol_mlsvd) == list: if L > 3: tol_mlsvd = tol_mlsvd[0] else: tol_mlsvd = tol_mlsvd[1] # Test consistency of dimensions and rank. aux.consistency(R, dims_orig, options) # Verify method. if method == 'dGN' or method == 'als': factors, output = tricpd(T, R, options) return factors, output # Change ordering of indexes to improve performance if possible. T, ordering = aux.sort_dims(T) if type(T) == list: Tsize = norm(T[0]) dims = T[2] # If T is sparse, we must use the classic method, and tol_mlsvd is set to the default 1e-16 in the case the # user requested -1 or 0. if tol_mlsvd < 0: options.tol_mlsvd = 1e-16 tol_mlsvd = 1e-16 else: Tsize = norm(T) dims = T.shape # COMPRESSION STAGE if display != 0: print( '-----------------------------------------------------------------------------------------------' ) print('Computing MLSVD') # Compute compressed version of T with the MLSVD. We have that T = (U_1,...,U_L)*S. if display > 2 or display < -1: S, U, T1, sigmas, best_error = cmpr.mlsvd(T, Tsize, R, options) else: S, U, T1, sigmas = cmpr.mlsvd(T, Tsize, R, options) if display != 0: if prod(array(S.shape) == array(dims)): if tol_mlsvd == -1: print(' No compression and no truncation requested by user') print(' Working with dimensions', dims) else: print(' No compression detected') print(' Working with dimensions', dims) else: print(' Compression detected') print(' Compressing from', dims, 'to', S.shape) if display > 2 or display < -1: print(' Compression relative error = {:7e}'.format(best_error)) print() # Increase dimensions if r > min(S.shape). S_orig_dims = S.shape if R > min(S_orig_dims): inflate_status = True S = cnv.inflate(S, R, S_orig_dims) else: inflate_status = False # For higher order tensors the trunc_dims options is only valid for the original tensor and its MLSVD. options.trunc_dims = 0 # TENSOR TRAIN AND DAMPED GAUSS-NEWTON STAGE factors, outputs = highcpd(S, R, options) factors = cnv.deflate(factors, S_orig_dims, inflate_status) # Use the orthogonal transformations to work in the original space. for l in range(L): factors[l] = dot(U[l], factors[l]) # FINAL WORKS # Compute error. if type(T1) == ndarray: T1_approx = empty(T1.shape) T1_approx = cnv.cpd2unfold1(T1_approx, factors) rel_error = crt.fastnorm(T1, T1_approx) / Tsize # Go back to the original dimension ordering. factors = aux.unsort_dims(factors, ordering) else: # Go back to the original dimension ordering. factors = aux.unsort_dims(factors, ordering) rel_error = crt.sparse_fastnorm(data_orig, idxs_orig, dims_orig, factors) / Tsize num_steps = 0 for output in outputs: num_steps += output.num_steps accuracy = max(0, 100 * (1 - rel_error)) if options.display != 0: print() print( '===============================================================================================' ) print( '===============================================================================================' ) print('Final results') print(' Number of steps =', num_steps) print(' Relative error =', rel_error) acc = float('%.6e' % Decimal(accuracy)) print(' Accuracy = ', acc, '%') final_outputs = aux.make_final_outputs(num_steps, rel_error, accuracy, outputs, options) return factors, final_outputs
def tricpd(T, R, options): """ Given a tensor T and a rank R, this function computes an approximated CPD of T with rank R. This function is called when the user sets method = 'dGN'. Inputs ------ T: float array R: int options: class Outputs ------- factors: list of float 2D arrays output: class This class contains all information needed about the computations made. We summarize these information below. num_steps: the total number of steps (iterations) the dGN function used at the two runs. accuracy: the accuracy of the solution, which is defined by the formula 100*(1 - rel_error). 0 means 0% of accuracy (worst case) and 100 means 100% of accuracy (best case). rel_error: relative error |T - T_approx|/|T| of the approximation computed. step_sizes: array with the distances between consecutive computed points at each iteration. errors: array with the absolute errors of the approximating tensor at each iteration. improv: array with the differences between consecutive absolute errors. gradients: array with the gradient of the error function at each iteration. We expect that these gradients converges to zero as we keep iterating since the objective point is a local minimum. stop: it is a list of two integers. The first integer indicates why the dGN stopped at the first run, and the second integer indicates why the dGN stopped at the second run (refinement stage). Check the functions mlsvd and dGN for more information. """ # INITIALIZE RELEVANT VARIABLES # Verify if T is sparse, in which case it will be given as a list with the data. if type(T) == list: T_orig = deepcopy(T) T = deepcopy(T_orig) dims_orig = T_orig[2] else: dims_orig = T.shape L = len(dims_orig) # Set options. initialization = options.initialization refine = options.refine symm = options.symm display = options.display tol_mlsvd = options.tol_mlsvd method = options.method if type(tol_mlsvd) == list: tol_mlsvd = tol_mlsvd[0] # Change ordering of indexes to improve performance if possible. T, ordering = aux.sort_dims(T) if type(T) == list: Tsize = norm(T[0]) dims = T[2] # If T is sparse, we must use the classic method, and tol_mlsvd is set to the default 1e-16 in the case the # user requested -1 or 0. if tol_mlsvd < 0: tol_mlsvd = 1e-16 if type(tol_mlsvd) == list: options.tol_mlsvd[0] = 1e-16 else: options.tol_mlsvd = 1e-16 else: Tsize = norm(T) dims = T.shape # COMPRESSION STAGE if display > 0: print( '-----------------------------------------------------------------------------------------------' ) print('Computing MLSVD') # Compute compressed version of T with the MLSVD. We have that T = (U_1, ..., U_L)*S. if display > 2 or display < -1: S, U, T1, sigmas, best_error = cmpr.mlsvd(T, Tsize, R, options) else: S, U, T1, sigmas = cmpr.mlsvd(T, Tsize, R, options) dims_cmpr = S.shape # When the tensor is symmetric we want S to have equal dimensions. if symm: R_min = min(dims_cmpr) dims_cmpr = [R_min for l in range(L)] dims_cmpr_slices = tuple(slice(R_min) for l in range(L)) S = S[dims_cmpr_slices] U = [U[l][:, :R_min] for l in range(L)] if display > 0: if dims_cmpr == dims: if tol_mlsvd == -1: print(' No compression and no truncation requested by user') print(' Working with dimensions', dims) else: print(' No compression detected') print(' Working with dimensions', dims) else: print(' Compression detected') print(' Compressing from', dims, 'to', S.shape) if display > 2: print(' Compression relative error = {:7e}'.format(best_error)) # GENERATION OF STARTING POINT STAGE # Generate initial to start dGN. if display > 2 or display < -1: init_factors, init_error = init.starting_point(T, Tsize, S, U, R, ordering, options) else: init_factors = init.starting_point(T, Tsize, S, U, R, ordering, options) if display > 0: print( '-----------------------------------------------------------------------------------------------' ) if type(initialization) == list: print('Type of initialization: user') else: print('Type of initialization:', initialization) if display > 2: print( ' Initial guess relative error = {:5e}'.format(init_error)) # DAMPED GAUSS-NEWTON STAGE if display > 0: print( '-----------------------------------------------------------------------------------------------' ) print('Computing CPD') # Compute the approximated tensor in coordinates with dGN or ALS. if method == 'als': factors, step_sizes_main, errors_main, improv_main, gradients_main, stop_main = \ als.als(S, init_factors, R, options) else: factors, step_sizes_main, errors_main, improv_main, gradients_main, stop_main = \ gn.dGN(S, init_factors, R, options) # Use the orthogonal transformations to work in the original space. for l in range(L): factors[l] = dot(U[l], factors[l]) # REFINEMENT STAGE # If T is sparse, no refinement is made. if type(T) == list: refine = False if refine: if display > 0: print() print( '===============================================================================================' ) print('Computing refinement of solution') if display > 2: T1_approx = empty(T1.shape) T1_approx = cnv.cpd2unfold1(T1_approx, factors) init_error = crt.fastnorm(T1, T1_approx) / Tsize print( ' Initial guess relative error = {:5e}'.format(init_error)) if display > 0: print( '-----------------------------------------------------------------------------------------------' ) print('Computing CPD') if method == 'als': factors, step_sizes_refine, errors_refine, improv_refine, gradients_refine, stop_refine = \ als.als(T, factors, R, options) else: factors, step_sizes_refine, errors_refine, improv_refine, gradients_refine, stop_refine = \ gn.dGN(T, factors, R, options) else: step_sizes_refine = array([0]) errors_refine = array([0]) improv_refine = array([0]) gradients_refine = array([0]) stop_refine = 8 # FINAL WORKS # Compute error. if type(T1) == ndarray: T1_approx = empty(T1.shape) T1_approx = cnv.cpd2unfold1(T1_approx, factors) # Go back to the original dimension ordering. factors = aux.unsort_dims(factors, ordering) # Save and display final informations. output = aux.output_info(T1, Tsize, T1_approx, step_sizes_main, step_sizes_refine, errors_main, errors_refine, improv_main, improv_refine, gradients_main, gradients_refine, stop_main, stop_refine, options) else: # Go back to the original dimension ordering. factors = aux.unsort_dims(factors, ordering) # Save and display final informations. output = aux.output_info(T_orig, Tsize, factors, step_sizes_main, step_sizes_refine, errors_main, errors_refine, improv_main, improv_refine, gradients_main, gradients_refine, stop_main, stop_refine, options) if display > 0: print( '===============================================================================================' ) print('Final results') if refine: print(' Number of steps =', output.num_steps) else: print(' Number of steps =', output.num_steps) print(' Relative error =', output.rel_error) acc = float('%.6e' % Decimal(output.accuracy)) print(' Accuracy = ', acc, '%') return factors, output