def test_timer(self): """test timer""" # wait for 1 second with utl.timer() as timer: time.sleep(1) self.assertLess(np.abs(timer.elapsed - 1), 0.1)
# ----------------------------------- print('----------------------------------------------------------') print('p_CO in atm Method TT ranks Closeness CPU time') print('----------------------------------------------------------') # construct eigenvalue problems to find the stationary distributions # ------------------------------------------------------------------ for i in range(8): # construct and solve eigenvalue problem for current CO pressure operator = mdl.co_oxidation( 20, 10**(8 + p_CO_exp[i])).ortho_left().ortho_right() initial_guess = tt.ones(operator.row_dims, [1] * operator.order, ranks=R[i]).ortho_left().ortho_right() with utl.timer() as time: eigenvalues, solution = evp.als(tt.eye(operator.row_dims) + operator, initial_guess, repeats=10, solver='eigs') solution = (1 / solution.norm(p=1)) * solution # compute turn-over frequency of CO2 desorption tof.append(utl.two_cell_tof(solution, [2, 1], 1.7e5)) # print results string_p_CO = '10^' + (p_CO_exp[i] >= 0) * '+' + str("%.1f" % p_CO_exp[i]) string_method = ' ' * 9 + 'EVP' + ' ' * 9 string_rank = (R[i] < 10) * ' ' + str(R[i]) + ' ' * 8 string_closeness = str("%.2e" % (operator @ solution).norm()) + ' ' * 6 string_time = (time.elapsed < 10) * ' ' + str("%.2f" % time.elapsed) + 's'
def classification_mandy(data_path, m_start, m_final, m_step): """Kernel-based MANDy for classification. Parameters ---------- data_path: string path of data to load m_start: int minimum number of images m_final: int maximum number of images m_step: int step size for number of images Returns ------- classification_rates: list of floats amount of correctly identified images cpu_times: list of floats run times of training phases """ # load data data = np.load(data_path) x_train = data['tr_img'] y_train = data['tr_lbl'] x_test = data['te_img'] y_test = data['te_lbl'] # order of the transformed data tensor order = x_train.shape[0] # define basis functions alpha = 19 / 100 * np.pi basis_list = [] for i in range(order): basis_list.append([tdt.cos(i, alpha), tdt.sin(i, alpha)]) # define lists classification_rates = [] cpu_times = [] # output print('Images' + 8 * ' ' + 'Classification rate' + 6 * ' ' + 'CPU time') print(47 * '-') # loop over image numbers for m in range(m_start, m_final, m_step): # training phase (apply kernel-based MANDy) with utl.timer() as timer: z = reg.mandy_kb(x_train[:, :m], y_train[:, :m], basis_list) cpu_time = timer.elapsed # test phase (multiply z with gram matrix) gram = tdt.gram(x_train[:, :m], x_test, basis_list) solution = z.dot(gram) # compute classification rate n = y_test.shape[1] sol = np.zeros(y_test.shape) sol[np.argmax(solution, axis=0), np.arange(0, n)] = 1 classification_rate = 100 - 50 * np.sum(np.abs(sol - y_test)) / n # print results str_m = str(m) len_m = len(str_m) str_c = str("%.2f" % classification_rate + '%') len_c = len(str_c) str_t = str("%.2f" % cpu_time + 's') len_t = len(str_t) print(str_m + (20 - len_m) * ' ' + str_c + (27 - len_c - len_t) * ' ' + str_t) classification_rates.append(classification_rate) cpu_times.append(cpu_time) print(' ') return classification_rates, cpu_times
def classification_arr(data_path, m_start, m_final, m_step, rank): """Alternating ridge regression for classification. Parameters ---------- data_path: string path of data to load m_start: int minimum number of images m_final: int maximum number of images m_step: int step size for number of images rank: int TT rank of coefficient tensor Returns ------- classification_rates: list of floats amount of correctly identified images cpu_times: list of floats run times of training phases """ # load data data = np.load(data_path) x_train = data['tr_img'] y_train = data['tr_lbl'] x_test = data['te_img'] y_test = data['te_lbl'] # order of the transformed data tensor order = x_train.shape[0] # define basis functions alpha = 19 / 100 * np.pi basis_list = [] for i in range(order): basis_list.append([tdt.cos(i, alpha), tdt.sin(i, alpha)]) # initial guess ranks = [1] + [rank for _ in range(order - 1)] + [1] cores = [ 0.001 * np.ones([ranks[i], 2, 1, ranks[i + 1]]) for i in range(order) ] initial_guess = TT(cores).ortho() # define lists classification_rates = [] cpu_times = [] # output print('Images' + 8 * ' ' + 'Classification rate' + 6 * ' ' + 'CPU time') print(47 * '-') # loop over image numbers for m in range(m_start, m_final, m_step): # training phase (apply ARR) with utl.timer() as timer: xi = reg.arr(x_train[:, :m], y_train[:, :m], basis_list, initial_guess, repeats=5, rcond=10**(-2), progress=False) cpu_time = timer.elapsed # test phase (contract xi with transformed data tensor) d = y_test.shape[0] solution = [] for k in range(d): solution_vector = np.ones([1, 1]) for l in range(order): n = len(basis_list[l]) theta = np.array([basis_list[l][k](x_test) for k in range(n)]) solution_vector = np.einsum('ij,kj->ijk', solution_vector, theta) solution_vector = np.tensordot(xi[k].cores[l], solution_vector, axes=([0, 1], [0, 2]))[0, :, :] solution.append(solution_vector) solution = np.vstack(solution) # compute classification rate n = y_test.shape[1] sol = np.zeros(y_test.shape) sol[np.argmax(solution, axis=0), np.arange(0, n)] = 1 classification_rate = 100 - 50 * np.sum(np.abs(sol - y_test)) / n # print results str_m = str(m) len_m = len(str_m) str_c = str("%.2f" % classification_rate + '%') len_c = len(str_c) str_t = str("%.2f" % cpu_time + 's') len_t = len(str_t) print(str_m + (20 - len_m) * ' ' + str_c + (27 - len_c - len_t) * ' ' + str_t) classification_rates.append(classification_rate) cpu_times.append(cpu_time) print(' ') return classification_rates, cpu_times
def tedmd_hocur(dimensions, downsampling_rate, integer_lag_times, max_rank, directory): """tEDMD using AMUSEt with HOSVD Parameters ---------- dimensions: list[int] numbers of contact indices downsampling_rate: int downsampling rate for trajectory data integer_lag_times: list[int] integer lag times for application of tEDMD max_rank: int maximum rank for HOCUR directory: string directory data to load """ # progress start_time = utl.progress('Apply AMUSEt (HOCUR)', 0) for i in range(len(dimensions)): # parameters time_step = 2e-3 lag_times = time_step * downsampling_rate * integer_lag_times # define basis list basis_list = [[ tdt.ConstantFunction(), tdt.GaussFunction(i, 0.285, 0.001), tdt.GaussFunction(i, 0.62, 0.01) ] for i in range(dimensions[i])] # progress utl.progress('Apply AMUSEt (HOCUR, p=' + str(dimensions[i]) + ')', 100 * i / len(dimensions), cpu_time=_time.time() - start_time) # load contact indices (sorted by relevance) contact_indices = np.load( directory + 'ntl9_contact_indices.npz')['indices'][:dimensions[i]] # load data data, trajectory_lengths = load_data(directory, downsampling_rate, contact_indices, progress=False) # select snapshot indices for x and y data x_indices = [] y_indices = [] for j in range(len(integer_lag_times)): x_ind, y_ind = xy_indices(trajectory_lengths, integer_lag_times[j]) x_indices.append(x_ind) y_indices.append(y_ind) # apply AMUSEt with utl.timer() as timer: eigenvalues, _ = tedmd.amuset_hocur(data, x_indices, y_indices, basis_list, max_rank=max_rank) cpu_time = timer.elapsed for j in range(len(integer_lag_times)): eigenvalues[j] = [eigenvalues[j][1]] # Save results to file: dic = {} dic["lag_times"] = lag_times dic["eigenvalues"] = eigenvalues dic["cpu_time"] = cpu_time np.savez_compressed( directory + "Results_NTL9_HOCUR_d" + str(dimensions[i]) + ".npz", **dic) utl.progress('Apply AMUSEt (HOCUR)', 100 * (i + 1) / len(dimensions), cpu_time=_time.time() - start_time)