print("test MSE of zero function", np.mean(ytest ** 2)) print('\n\n','*'*80,'\nrun',run) for num_examples in L_num_examples: print('______\nsample size:', num_examples) print('Current Experiment: Addition with noise '+str(noise_level) +' and ' + str(num_states)+' states') data_function = lambda l: generate_data_simple_addition(num_examples, l, noise_level =noise_level) Xl, yl = data_function(length) X2l, y2l = data_function(length * 2) X2l1, y2l1 = data_function(length * 2 + 1) for method in methods: print(method) if method != 'LSTM' and method != 'TIHT+SGD' and method != 'ALS': Tl = learning.sequence_to_tensor(Xl) T2l = learning.sequence_to_tensor(X2l) T2l1 = learning.sequence_to_tensor(X2l1) t=tic() Hl = learning.approximate_hankel(Tl, yl, alpha_ini_value=alpha, rank=num_states, eps=TIHT_epsilon, learning_rate=TIHT_learning_rate, max_iters=TIHT_max_iters, method=method, verbose=-1) H2l = learning.approximate_hankel(T2l, y2l, alpha_ini_value=alpha, rank=num_states, eps=TIHT_epsilon, learning_rate=TIHT_learning_rate, max_iters=TIHT_max_iters, method=method, verbose=-1) H2l1 = learning.approximate_hankel(T2l1, y2l1, alpha_ini_value=alpha, rank=num_states, eps=TIHT_epsilon, learning_rate=TIHT_learning_rate, max_iters=TIHT_max_iters, method=method, verbose=-1)
X_temp = np.swapaxes(X_temp, 1, 2) # print(X_temp[0:5]) # print(Y_temp[0:5]) # Tl = learning.sequence_to_tensor(X_temp) # # H_temp = learning.approximate_hankel(Tl, Y_temp, alpha_ini_value=1., # rank=num_states, eps=TIHT_epsilon, # learning_rate=TIHT_learning_rate, # max_iters=TIHT_max_iters, # method='TIHT', verbose=-1) # print(H_temp, H_temp.shape) X_temp, Y_temp = synthetic_data.pad_data( X_temp_vec, Y_temp_vec) X_temp = np.swapaxes(X_temp, 1, 2) Tl = learning.sequence_to_tensor(X_temp) H_temp = learning.approximate_hankel( Tl, Y_temp, alpha_ini_value=1., rank=num_states, eps=TIHT_epsilon, learning_rate=TIHT_learning_rate, max_iters=TIHT_max_iters, method='TIHT', verbose=-1) #print(H_temp) H_temp = H_temp.squeeze() #H_temp = recover_tensor(H_temp).squeeze() H_temp_shape = list(H_temp.shape)
alpha=1., lifting=True) Xtest_temp = torch.from_numpy(Xtest).float() pred = model(Xtest_temp) pred_numpy = pred.detach().numpy().reshape(-1, ) pred_numpy = pred_numpy.reshape(ytest.shape) rmse1 = cal_RMSE(pred_numpy, ytest, mean_data, std_data) mape1 = cal_MAPE(pred_numpy, ytest, mean_data, std_data) mae1 = cal_MAE(pred_numpy, ytest, mean_data, std_data) else: X_vec, y_vec = [], [] for i in range(0, len(X)): X_vec.append(learning.sequence_to_tensor(X[i])) y_vec.append(Y[i]) TL_vec = learning.get_all_TLs(X_vec, y_vec, rank=num_states, eps=TIHT_epsilon, learning_rate=TIHT_learning_rate, max_iters=TIHT_max_iters, method=method, verbose=0, alpha_ini_value=1.) Hl = learning.approximate_hankel_l(TL_vec=TL_vec, length=length) Hl_plus = learning.approximate_hankel_plus(TL_vec=TL_vec, length=length) Hl_minus = learning.approximate_hankel_minus(TL_vec=TL_vec,