def test_factor(): global_var.init_caches(set_grids=False) global_var.tensor_cache.memory_usage_properties(obj_test_case=np.ones( (10, 10, 10)), mem_for_cache_frac=25) names = ['mock1', 'mock2', 'mock3'] mock = mock_token_family(names, mock_evaluator) test_factor_1 = Factor(names[0], mock, randomize=True) test_factor_2 = Factor(names[0], mock, randomize=True) print(test_factor_1.params, test_factor_1.params_description) print(test_factor_2.params, test_factor_2.params_description) # print(test_factor_3.params, test_factor_3.params_description) assert type(test_factor_1.cache_label) == tuple and type( test_factor_1.cache_label[0]) == str and type( test_factor_1.cache_label[1]) == tuple assert np.all(test_factor_1.evaluate() == test_factor_2.evaluate()) print(test_factor_1.params, test_factor_2.params) # assert test_factor_1 == test_factor_2, 'Equally defined tokens are not equal' test_factor_3 = Factor(names[1], mock, randomize=False) test_factor_3.Set_parameters(random=False, not_power=2, power=1) test_factor_4 = Factor(names[1], mock, randomize=False) test_factor_4.Set_parameters(random=False, not_power=2, power=1) assert test_factor_3 == test_factor_4, 'Equally defined tokens are not equal' print(test_factor_3.name)
def test_cache(): global_var.init_caches(set_grids=True) global_var.tensor_cache.memory_usage_properties(obj_test_case=np.ones( (10, 10, 10)), mem_for_cache_frac=25) x = np.linspace(0, 2 * np.pi, 100) global_var.grid_cache.memory_usage_properties(obj_test_case=x, mem_for_cache_frac=25) upload_grids(x, global_var.grid_cache) print(global_var.grid_cache.memory.keys(), global_var.grid_cache.memory.values()) assert '0' in global_var.grid_cache assert x in global_var.grid_cache assert (x, False) in global_var.grid_cache assert not (x, True) in global_var.grid_cache x_returned = global_var.grid_cache.get('0') assert np.all(x == x_returned) global_var.grid_cache.clear(full=True) y = np.linspace(0, 10, 200) grids = np.meshgrid(x, y) upload_grids(grids, global_var.grid_cache) print('memory for cache:', global_var.grid_cache.available_mem, 'B') print('consumed memory:', global_var.grid_cache.consumed_memory, 'B') print(global_var.grid_cache.memory.keys()) global_var.grid_cache.delete_entry(entry_label='0') assert not '0' in global_var.grid_cache.memory.keys() assert False
def test_TF(): global_var.init_caches(set_grids=True) global_var.tensor_cache.memory_usage_properties(obj_test_case=np.ones( (10, 10, 10)), mem_for_cache_frac=25) family = Token_family('test_type') family.use_glob_cache() family.set_status() mock_equal_params = {'not_power': 0, 'power': 0} mock_params = {'not_power': (1, 4), 'power': (1, 1)} family.set_evaluator(mock_evaluator) names = ['n1', 'n2', 'n3'] family.set_params(names, mock_params, mock_equal_params) print(family.cardinality()) occ, token_example = family.create('n1') print(occ, token_example.name) occ, token_example = family.create() print(occ, token_example.name) occ, token_example = family.create(occupied=['n1', 'n2']) print(occ, token_example.name, token_example.status)
def test_term(): ''' Check both (random and determined) ways to initialize the term, check correctness of term value evaluation & terms equality, output format, latex format. ''' global_var.init_caches(set_grids=False) global_var.tensor_cache.memory_usage_properties(obj_test_case=np.ones( (10, 10, 10)), mem_for_cache_frac=25) names = ['mock1', 'mock2', 'mock3'] mock = mock_token_family(names, mock_evaluator) test_term_1 = Term([ mock, ]) print(test_term_1.name) print('avalable', test_term_1.available_tokens[0].tokens) assert test_term_1.available_tokens[0].tokens == names assert type(test_term_1) == Term test_term_2 = Term([ mock, ], passed_term='mock3') print(test_term_2.name) assert type(test_term_2) == Term test_term_3 = Term([ mock, ], passed_term=['mock3', 'mock1']) print(test_term_3.name) assert type(test_term_3) == Term test_term_2.evaluate, test_term_3.evaluate
def test_equation(): ''' Use trigonometric identity sin^2 (x) + cos^2 (x) = 1 to generate data, with it: initialize the equation, equation splitting & weights discovery? output format, latex format. Additionally, test evaluator for trigonometric functions ''' global_var.init_caches(set_grids=True) global_var.tensor_cache.memory_usage_properties(obj_test_case=np.ones( (10, 10, 10)), mem_for_cache_frac=25) director = Operator_director() director.operator_assembly() x = np.linspace(0, 2 * np.pi, 100) global_var.grid_cache.memory_usage_properties(obj_test_case=x, mem_for_cache_frac=25) upload_grids(x, global_var.grid_cache) print(global_var.grid_cache.memory) names = ['sin', 'cos'] # simple case: single parameter of a token - power trig_tokens = Token_family('trig') trig_tokens.set_status(unique_specific_token=False, unique_token_type=False, meaningful=True, unique_for_right_part=False) equal_params = {'power': 0, 'freq': 0.2, 'dim': 0} trig_tokens.use_glob_cache() trig_params = OrderedDict([('power', (1., 1.)), ('freq', (0.5, 1.5)), ('dim', (0., 0.))]) trig_tokens.set_params(names, trig_params, equal_params) trig_tokens.set_evaluator(trigonometric_evaluator) eq1 = Equation( tokens=[ trig_tokens, ], basic_structure=[], terms_number=3, max_factors_in_term=2 ) # Задать возможности выбора вероятностей кол-ва множителей # assert False director.constructor.operator.set_sparcity(sparcity_value=1.) eq1.select_target_idx(target_idx_fixed=0) eq1.select_target_idx(operator=director.constructor.operator) print([term.name for term in eq1.structure]) print(eq1.fitness_value) print(eq1.described_variables) print(eq1.evaluate(normalize=False, return_val=True)) print('internal:', eq1.weights_internal, 'final:', eq1.weights_final) raise NotImplementedError
def test_pool(): global_var.init_caches(set_grids=True) global_var.tensor_cache.memory_usage_properties(obj_test_case=np.ones( (10, 10, 10)), mem_for_cache_frac=25) mock_equal_params = {'not_power': 0, 'power': 0} mock_params = {'not_power': (1, 4), 'power': (1, 1)} f1 = Token_family('type_1') f2 = Token_family('type_2') f3 = Token_family('type_3') set_family(f1, names=['t1_1', 't1_2', 't1_3'], params=mock_params, equal_params=mock_equal_params, evaluator=mock_evaluator, meaningful=True) set_family(f2, names=['t2_1', 't2_2', 't2_3', 't2_4'], params=mock_params, equal_params=mock_equal_params, evaluator=mock_evaluator, meaningful=False) set_family(f3, names=['t3_1', 't3_2'], params=mock_params, equal_params=mock_equal_params, evaluator=mock_evaluator, meaningful=True) pool = TF_Pool([f1, f2, f3]) print('meaningful:', [(family.type, family.tokens) for family in pool.families_meaningful]) print('all:', [(family.type, family.tokens) for family in pool.families]) pool.families_cardinality(meaningful_only=True) pool.families_cardinality(meaningful_only=False)
def test_single_token_type(): seed = None if type(seed) != type(None): np.random.seed(seed) folder = sys.path[-1] + 'preprocessing/Wave/' boundary = 15 print(sys.path) u_tensors = download_variable(folder + 'wave_HP.npy', folder + 'Derivatives.npy', boundary, time_axis=0) u_names = Define_Derivatives('u', 3, 2) global_var.init_caches(set_grids=False) global_var.tensor_cache.memory_usage_properties( obj_test_case=u_tensors[0, ...], mem_for_cache_frac=25) upload_simple_tokens(u_names, u_tensors, global_var.tensor_cache) u_tokens = Token_family('U') u_tokens.set_status(unique_specific_token=False, unique_token_type=False, meaningful=True, unique_for_right_part=True) equal_params = {'power': 0} u_token_params = OrderedDict([('power', (1, 2))]) u_tokens.set_params(u_names, u_token_params, equal_params) u_tokens.use_glob_cache() # u_eval_params = {'params_names':['power'], 'params_equality':{'power' : 0}} u_tokens.set_evaluator(simple_function_evaluator) director = Operator_director() director.operator_assembly() tokens = [ u_tokens, ] pop_constructor = operators.systems_population_constructor( tokens=tokens, terms_number=5, max_factors_in_term=1, eq_search_evo=director.constructor.operator) equation_creation_params = {'eq_search_iters': 2} optimizer = moeadd_optimizer(pop_constructor, 7, 20, equation_creation_params, delta=1 / 50., neighbors_number=3) evo_operator = operators.sys_search_evolutionary_operator( operators.mixing_xover, operators.gaussian_mutation) optimizer.set_evolutionary(operator=evo_operator) best_obj = np.concatenate( np.ones([1]), np.zeros(shape=len([ 1 for token_family in tokens if token_family.status['meaningful'] ]))) print(best_obj) raise NotImplementedError
max_order=max_order) ts_smoothed = np.load(ff_filename) print(derivs.shape) ''' Инициализируем кэш для хранения вычисленных векторов слагаемых, чтобы не пересчитывать их каждый выпуск, и не хранить в отдельных слагаемых, создавая возможные повторные вычисления. global_var - модуль с глобальными переменными; у него метод init_caches() - создаёт кэши global_var.tensor_cache - кэш со значениями множителей и слагаемых; global_var.grid_cache - кэш, хранящий в себе тензоры значений координат в узлах. Метод .memory_usage_properties задаёт свойства использования кэшем памяти. ''' global_var.init_caches(set_grids=True) global_var.tensor_cache.memory_usage_properties(obj_test_case=ts, mem_for_cache_frac=25) global_var.grid_cache.memory_usage_properties(obj_test_case=x, mem_for_cache_frac=5) ''' Задаём пулл токенов, из которых будут создавать уравнения. Граница в 10 элементов позволяет избавиться от ошибок в значении производных, которые встречаются на границах исследумой области. Также, выполняем предварительную загрузку данных в кэш. ''' boundary = 10 upload_grids(x[boundary:-boundary], global_var.grid_cache) u_derivs_stacked = prepare_var_tensor(ts_smoothed, derivs, time_axis=0,