예제 #1
0
def test_grids_cache():
    global_var.init_caches(set_grids=True)
    global_var.tensor_cache.memory_usage_properties(obj_test_case=np.ones((10,10,10)), mem_for_cache_frac = 5)  
    
    assert type(global_var.tensor_cache.base_tensors) == list, f'Creating wrong obj for base tensors: {type(global_var.tensor_cache.base_tensors)} instead of list'
    
    x = np.linspace(0, 2*np.pi, 100)
    global_var.grid_cache.memory_usage_properties(obj_test_case=x, mem_for_cache_frac = 5)  

    upload_grids(x, global_var.grid_cache)
    
    print(global_var.grid_cache.memory_default.keys(), global_var.grid_cache.memory_default.values())
    assert '0' in global_var.grid_cache 
    assert x in global_var.grid_cache 
    assert (x, False) in global_var.grid_cache 
    assert not (x, True) in global_var.grid_cache
    
    x_returned = global_var.grid_cache.get('0')
    assert np.all(x == x_returned)
    global_var.grid_cache.clear(full = True)
    
    y = np.linspace(0, 10, 200)
    grids = np.meshgrid(x, y)
    upload_grids(grids, global_var.grid_cache)
    print('memory for cache:', global_var.grid_cache.available_mem, 'B')
    print('consumed memory:', global_var.grid_cache.consumed_memory, 'B')
    print(global_var.grid_cache.memory_default.keys())
    global_var.grid_cache.delete_entry(entry_label = '0')
    assert not '0' in global_var.grid_cache.memory_default.keys()
예제 #2
0
def test_equation():
    '''
    
    Use trigonometric identity sin^2 (x) + cos^2 (x) = 1 to generate data, with it: initialize the equation, 
    equation splitting & weights discovery? output format, latex format. Additionally, test evaluator for trigonometric functions
    
    '''
    global_var.init_caches(set_grids=True)
    global_var.tensor_cache.memory_usage_properties(obj_test_case=np.ones((10,10,10)), mem_for_cache_frac = 25)  
    
    from epde.eq_search_strategy import Strategy_director
    from epde.operators.ea_stop_criteria import Iteration_limit
    
    director = Strategy_director(Iteration_limit, {'limit' : 100})
    director.strategy_assembly()
    
    x = np.linspace(0, 2*np.pi, 100)
    global_var.grid_cache.memory_usage_properties(obj_test_case=x, mem_for_cache_frac = 25)  
    upload_grids(x, global_var.grid_cache)
    names = ['sin', 'cos'] # simple case: single parameter of a token - power
    
    trig_tokens = Token_family('trig')    
    trig_tokens.set_status(unique_specific_token=False, unique_token_type=False, meaningful = True, 
                           unique_for_right_part = False)
    
    equal_params = {'power' : 0, 'freq' : 0.2, 'dim' : 0}
    
    trig_params = OrderedDict([('power', (1., 1.)), ('freq', (0.5, 1.5)), ('dim', (0., 0.))])
    trig_tokens.set_params(names, trig_params, equal_params)
    trig_tokens.set_evaluator(trigonometric_evaluator)  
    
    set_family(trig_tokens, names, trig_params, equal_params, trigonometric_evaluator, True)
    pool = TF_Pool([trig_tokens,])
    
    eq1 = Equation(pool , basic_structure = [], 
                   terms_number = 3, max_factors_in_term = 2)   # Задать возможности выбора вероятностей кол-ва множителей
    director.constructor.strategy.modify_block_params(block_label = 'rps1', param_label = 'sparsity', 
                                                             value = 1., suboperator_sequence = ['eq_level_rps', 'fitness_calculation', 'sparsity'])

    director._constructor._strategy.apply_block('rps1', 
                                                {'population' : [eq1,], 'separate_vars' : []})
    
    eq1.described_variables
    eq1.evaluate(normalize = False, return_val = True)
    eq1.weights_internal
    eq1.weights_final
    print(eq1.text_form)
예제 #3
0
def test_factor():
    global_var.init_caches(set_grids=False)
    global_var.tensor_cache.memory_usage_properties(obj_test_case=np.ones((10,10,10)), mem_for_cache_frac = 5)
    test_status = {'meaningful':True, 'unique_specific_token':False, 'unique_token_type':False, 
              'unique_for_right_part':False, 'requires_grid':False}
    try:
        name = 'made_to_fail'
        test_factor = Factor(name, test_status,  family_type = 'some family', randomize = True)
    except AssertionError:
        pass
    name = 'made_to_success'
    test_equal_params = {'not_power' : 0, 'power' : 0}
    test_params = {'not_power' : (1, 4), 'power' : (1, 1)}    
    test_factor = Factor(name, test_status, family_type = 'some family', randomize = True, params_description = test_params, 
                         equality_ranges = test_equal_params)
    _evaluator = Evaluator(mock_eval_function, [])
    test_factor.Set_evaluator(_evaluator)
    test_factor.evaluate()
    assert test_factor.name is not None
예제 #4
0
def test_custom_evaluator():
    x = np.linspace(0, 4*np.pi, 1000)
#    ts = np.ones()
    
    global_var.init_caches(set_grids=True)
#    global_var.tensor_cache.memory_usage_properties(obj_test_case=ts, mem_for_cache_frac = 5)  
    global_var.grid_cache.memory_usage_properties(obj_test_case=x, mem_for_cache_frac = 5)
    upload_grids(x, global_var.grid_cache)     
    
    test_lambdas = {'cos' : lambda *grids, **kwargs: np.cos(kwargs['freq'] * grids[int(kwargs['dim'])]) ** kwargs['power'], 
                   'sin' : lambda *grids, **kwargs: np.sin(kwargs['freq'] * grids[int(kwargs['dim'])]) ** kwargs['power']}
    test_eval = Custom_Evaluator(test_lambdas, eval_fun_params_labels = ['freq', 'dim', 'power'], use_factors_grids = True)

    trig_tokens = Token_family('Trigonometric')
    trig_names = ['sin', 'cos']
    trig_tokens.set_status(unique_specific_token=True, unique_token_type=True, 
                           meaningful = False, unique_for_right_part = False)
    trig_token_params = OrderedDict([('power', (1, 1)), ('freq', (0.95, 1.05)), ('dim', (0, 0))])
    trig_equal_params = {'power' : 0, 'freq' : 0.05, 'dim' : 0}
    trig_tokens.set_params(trig_names, trig_token_params, trig_equal_params)
    trig_tokens.set_evaluator(test_eval, [])
    
    global_var.tensor_cache.use_structural()

    pool = TF_Pool([trig_tokens])
    pool.families_cardinality()    
    _, test_factor = pool.create()
    test_factor.evaluate()
    
    test_lambdas = lambda *grids, **kwargs: np.cos(kwargs['freq'] * grids[int(kwargs['dim'])]) ** kwargs['power']
    test_eval = Custom_Evaluator(test_lambdas, eval_fun_params_labels = ['freq', 'dim', 'power'], use_factors_grids = True)    
    trig_tokens.set_evaluator(test_eval, [])

    pool = TF_Pool([trig_tokens])
    pool.families_cardinality()    
    _, test_factor = pool.create()
    test_factor.evaluate()
    
    
    
#    test_factor = 
    
예제 #5
0
def test_tensor_cache():
    global_var.init_caches(set_grids=False)
    global_var.tensor_cache.memory_usage_properties(obj_test_case=np.ones((10,10,10)), mem_for_cache_frac = 5)  
    
    tensors = np.zeros((5, 10, 10, 10))
    t_labels = Define_Derivatives('t', 2, 2)
    upload_simple_tokens(t_labels, global_var.tensor_cache, tensors)
    global_var.tensor_cache.use_structural(use_base_data = True)
    replacing_tensors = np.ones((5, 10, 10, 10))
    replacing_data = {}
    for idx, key in enumerate(global_var.tensor_cache.memory_default.keys()):
        replacing_data[key] = replacing_tensors[idx, ... ]
        
    print(replacing_data[list(global_var.tensor_cache.memory_default.keys())[0]].shape, list(global_var.tensor_cache.memory_default.values())[0].shape)
    global_var.tensor_cache.use_structural(use_base_data = False, replacing_data = replacing_data)
    print(global_var.tensor_cache.memory_default.keys())
    print(list(global_var.tensor_cache.memory_default.keys()))
    key = list(global_var.tensor_cache.memory_default.keys())[np.random.randint(low = 0,
                                                                                high = len(global_var.tensor_cache.memory_default.keys()))]
    global_var.tensor_cache.use_structural(use_base_data = False, 
                                           replacing_data = np.full(shape = (10, 10, 10), fill_value=2.),
                                           label = key)
예제 #6
0
def test_term():
    '''
    Check both (random and determined) ways to initialize the term, check correctness of term value evaluation & terms equality, 
    output format, latex format.

    '''

    x = np.linspace(0, 2*np.pi, 100)   
    global_var.init_caches(set_grids=True)
    global_var.tensor_cache.memory_usage_properties(obj_test_case=x, mem_for_cache_frac = 5)  
    global_var.grid_cache.memory_usage_properties(obj_test_case=x, mem_for_cache_frac = 5)  
    upload_grids(x, global_var.grid_cache)    
    mock_equal_params = {'not_power' : 0, 'power' : 0}
    mock_params = {'not_power' : (1, 4), 'power' : (1, 1)}

    f1 = Token_family('type_1')
    f2 = Token_family('type_2')
    f3 = Token_family('type_3')
    set_family(f1, names = ['t1_1', 't1_2', 't1_3'], params = mock_params, 
               equal_params=mock_equal_params, evaluator=mock_eval_function, meaningful=True);
    set_family(f2, names = ['t2_1', 't2_2', 't2_3', 't2_4'], params = mock_params, 
               equal_params=mock_equal_params, evaluator=mock_eval_function, meaningful=False); 
    set_family(f3, names = ['t3_1', 't3_2'], params = mock_params, 
               equal_params=mock_equal_params, evaluator=mock_eval_function, meaningful=True)
    
    pool = TF_Pool([f1, f2, f3])
    test_term_1 = Term(pool)

    print(test_term_1.name)
    print('avalable', test_term_1.available_tokens[0].tokens)
#    assert test_term_1.available_tokens[0].tokens == names
#    assert type(test_term_1) == Term

    test_term_2 = Term(pool, passed_term = 't1_1')
    print(test_term_2.name)
    print(test_term_2.solver_form)
    assert test_term_2.solver_form[1] is None and test_term_2.solver_form[2] == 1
예제 #7
0
def test_ode_auto():
    '''
    
    В этой задаче мы ищем уравнение u sin(x) + u' cos(x) = 1 по его решению: u = sin(x) + C cos(x), 
    где у частного решения C = 1.3.
    
    Задаём x - координатную ось по времени; ts - временной ряд условных измерений
    ff_filename - имя файла, куда сохраняется временной ряд; output_file_name - имя файла для производных
    step - шаг по времени
    '''

    #    delim = '/' if sys.platform == 'linux' else '\\'

    x = np.linspace(0, 4 * np.pi, 1000)
    print('path:', sys.path)
    ts = np.load(
        '/media/mike_ubuntu/DATA/EPDE_publication/tests/system/Test_data/fill366.npy'
    )  # tests/system/
    new_derivs = True

    ff_filename = '/media/mike_ubuntu/DATA/EPDE_publication/tests/system/Test_data/smoothed_ts.npy'
    output_file_name = '/media/mike_ubuntu/DATA/EPDE_publication/tests/system/Test_data/derivs.npy'
    step = x[1] - x[0]
    '''

    '''

    max_order = 1  # presence of the 2nd order derivatives leads to equality u = d^2u/dx^2 on this data (elaborate)

    if new_derivs:
        _, derivs = Preprocess_derivatives(ts,
                                           data_name=ff_filename,
                                           output_file_name=output_file_name,
                                           steps=(step, ),
                                           smooth=False,
                                           sigma=1,
                                           max_order=max_order)
        ts_smoothed = np.load(ff_filename)
    else:
        try:
            ts_smoothed = np.load(ff_filename)
            derivs = np.load(output_file_name)
        except FileNotFoundError:
            _, derivs = Preprocess_derivatives(
                ts,
                data_name=ff_filename,
                output_file_name=output_file_name,
                steps=(step, ),
                smooth=False,
                sigma=1,
                max_order=max_order)
            ts_smoothed = np.load(ff_filename)

    global_var.init_caches(set_grids=True)
    global_var.tensor_cache.memory_usage_properties(obj_test_case=ts,
                                                    mem_for_cache_frac=5)
    global_var.grid_cache.memory_usage_properties(obj_test_case=x,
                                                  mem_for_cache_frac=5)

    print(type(derivs))

    boundary = 10
    upload_grids(x[boundary:-boundary], global_var.grid_cache)
    u_derivs_stacked = prepare_var_tensor(ts_smoothed,
                                          derivs,
                                          time_axis=0,
                                          boundary=boundary)

    u_names, u_deriv_orders = Define_Derivatives('u', 1, 1)
    u_names = u_names
    u_deriv_orders = u_deriv_orders
    upload_simple_tokens(u_names, global_var.tensor_cache, u_derivs_stacked)

    u_tokens = Token_family('Function', family_of_derivs=True)
    u_tokens.set_status(unique_specific_token=False,
                        unique_token_type=False,
                        s_and_d_merged=False,
                        meaningful=True,
                        unique_for_right_part=False)
    u_token_params = OrderedDict([('power', (1, 1))])
    u_equal_params = {'power': 0}
    u_tokens.set_params(u_names, u_token_params, u_equal_params,
                        u_deriv_orders)
    u_tokens.set_evaluator(simple_function_evaluator, [])

    grid_names = [
        't',
    ]
    grid_tokens = Token_family('Grids')
    grid_tokens.set_status(unique_specific_token=True,
                           unique_token_type=True,
                           s_and_d_merged=False,
                           meaningful=False,
                           unique_for_right_part=False)
    grid_token_params = OrderedDict([('power', (1, 1))])
    grid_equal_params = {'power': 0}
    grid_tokens.set_params(grid_names, grid_token_params, grid_equal_params)
    grid_tokens.set_evaluator(simple_function_evaluator, [])
    #
    trig_tokens = Token_family('Trigonometric')
    trig_names = ['sin', 'cos']
    trig_tokens.set_status(unique_specific_token=True,
                           unique_token_type=True,
                           meaningful=False,
                           unique_for_right_part=False)
    trig_token_params = OrderedDict([('power', (1, 1)), ('freq', (0.95, 1.05)),
                                     ('dim', (0, 0))])
    trig_equal_params = {'power': 0, 'freq': 0.05, 'dim': 0}
    trig_tokens.set_params(trig_names, trig_token_params, trig_equal_params)
    trig_tokens.set_evaluator(trigonometric_evaluator, [])

    upload_simple_tokens(grid_names, global_var.tensor_cache, [
        x[boundary:-boundary],
    ])
    global_var.tensor_cache.use_structural()

    pool = TF_Pool([u_tokens, trig_tokens])  # grid_tokens,
    pool.families_cardinality()
    '''
    Используем базовый эволюционный оператор.
    '''
    #    test_strat = Strategy_director(Iteration_limit, {'limit' : 300})
    test_strat = Strategy_director_solver(Iteration_limit, {'limit': 50})
    test_strat.strategy_assembly()

    #    test_system = SoEq(pool = pool, terms_number = 4, max_factors_in_term=2, sparcity = (0.1,))
    #    test_system.set_eq_search_evolutionary(director.constructor.operator)
    #    test_system.create_equations(population_size=16, eq_search_iters=300)

    #    tokens=[h_tokens, trig_tokens]
    '''
    Настраиваем генератор новых уравнений, которые будут составлять популяцию для 
    алгоритма многокритериальной оптимизации.
    '''
    pop_constructor = operators.systems_population_constructor(
        pool=pool,
        terms_number=6,
        max_factors_in_term=2,
        eq_search_evo=test_strat.constructor.strategy,
        sparcity_interval=(0.0, 0.5))
    '''
    Задаём объект многокритериального оптимизатора, эволюционный оператор и задаём лучшие возможные 
    значения целевых функций.
    '''
    optimizer = moeadd_optimizer(pop_constructor,
                                 3,
                                 3,
                                 delta=1 / 50.,
                                 neighbors_number=3,
                                 solution_params={})
    evo_operator = operators.sys_search_evolutionary_operator(
        operators.mixing_xover, operators.gaussian_mutation)

    optimizer.set_evolutionary(operator=evo_operator)
    best_obj = np.concatenate((np.ones([
        1,
    ]),
                               np.zeros(shape=len([
                                   1 for token_family in pool.families
                                   if token_family.status['meaningful']
                               ]))))
    optimizer.pass_best_objectives(*best_obj)

    def simple_selector(sorted_neighbors, number_of_neighbors=4):
        return sorted_neighbors[:number_of_neighbors]

    '''
    Запускаем оптимизацию
    '''

    optimizer.optimize(
        simple_selector, 0.95, (4, ), 100,
        0.75)  # Простая форма искомого уравнения найдется и за 10 итераций

    for idx in range(len(optimizer.pareto_levels.levels)):
        print('\n')
        print(f'{idx}-th non-dominated level')
        [
            print(solution.structure[0].text_form, solution.evaluate())
            for solution in optimizer.pareto_levels.levels[idx]
        ]

    raise NotImplementedError
    '''
예제 #8
0
        entry_token_family.set_status(unique_specific_token=False, unique_token_type=False, 
                             s_and_d_merged = False, meaningful = True)     
        entry_token_family.set_params(entry.names, OrderedDict([('power', (1, data_fun_pow))]),
                                      {'power' : 0}, entry.d_orders)
        entry_token_family.set_evaluator(simple_function_evaluator, [])
            
        print(entry_token_family.tokens)
        data_tokens.append(entry_token_family)
 
    if isinstance(additional_tokens, list):
        if not all([isinstance(tf, (TokenFamily, Prepared_tokens)) for tf in additional_tokens]):
            raise TypeError(f'Incorrect type of additional tokens: expected list or TokenFamily/Prepared_tokens - obj, instead got list of {type(additional_tokens[0])}')                
    elif isinstance(additional_tokens, (TokenFamily, Prepared_tokens)):
        additional_tokens = [additional_tokens,]
    else:
        print(isinstance(additional_tokens, Prepared_tokens))
        raise TypeError(f'Incorrect type of additional tokens: expected list or TokenFamily/Prepared_tokens - obj, instead got {type(additional_tokens)}')
    return TF_Pool(data_tokens + [tf if isinstance(tf, TokenFamily) else tf.token_family 
                                  for tf in additional_tokens])

if __name__ == '__main__':
    global_var.time_axis = 0
    global_var.init_caches(set_grids = False)
    dummy_data = np.ones((10, 10))
    global_var.tensor_cache.memory_usage_properties(dummy_data, 5)

    dummy_derivs = np.ones((100, 2))
    dummy_pool = create_pool(data = dummy_data, derivs = [dummy_derivs,], max_deriv_order=1)
    _, test_factor = dummy_pool.create()
    print(test_factor.label, test_factor.params)
예제 #9
0
    def use_global_cache(self,
                         grids_as_tokens=True,
                         set_grids=True,
                         memory_for_cache=5,
                         boundary: int = 0):

        print(type(self.data_tensor), type(self.derivatives))
        derivs_stacked = prepare_var_tensor(self.data_tensor,
                                            self.derivatives,
                                            time_axis=global_var.time_axis,
                                            boundary=boundary)
        #                                            axes = self.coord_tensors)
        if isinstance(self.coord_tensors, (list, tuple)):
            coord_tensors_cut = []
            for tensor in self.coord_tensors:
                coord_tensors_cut.append(
                    np_ndarray_section(tensor, boundary=boundary))
        elif isinstance(self.coord_tensors, np.ndarray):
            coord_tensors_cut = np_ndarray_section(self.coord_tensors,
                                                   boundary=boundary)
        else:
            raise TypeError(
                'Coordinate tensors are presented in format, other than np.ndarray or list/tuple of np.ndarray`s'
            )

        try:
            upload_simple_tokens(self.names, global_var.tensor_cache,
                                 derivs_stacked)
            upload_simple_tokens([
                'u',
            ], global_var.initial_data_cache, [
                self.data_tensor,
            ])
            if set_grids:
                memory_for_cache = int(memory_for_cache / 2)
                upload_grids(self.coord_tensors, global_var.initial_data_cache)
                upload_grids(coord_tensors_cut, global_var.grid_cache)
                print(
                    f'completed grid cache with {len(global_var.grid_cache.memory_default)} tensors with labels {global_var.grid_cache.memory_default.keys()}'
                )

        except AttributeError:
            print('Somehow, cache had not been initialized')
            print(self.names, derivs_stacked.shape)
            print(global_var.tensor_cache.memory_default.keys())
            global_var.init_caches(set_grids=set_grids)
            if set_grids:
                print('setting grids')
                memory_for_cache = int(memory_for_cache / 2)
                global_var.grid_cache.memory_usage_properties(
                    obj_test_case=self.data_tensor,
                    mem_for_cache_frac=memory_for_cache)
                upload_grids(self.coord_tensors, global_var.initial_data_cache)
                upload_grids(coord_tensors_cut, global_var.grid_cache)
                print(
                    f'completed grid cache with {len(global_var.grid_cache.memory_default)} tensors with labels {global_var.grid_cache.memory_default.keys()}'
                )
            global_var.tensor_cache.memory_usage_properties(
                obj_test_case=self.data_tensor,
                mem_for_cache_frac=memory_for_cache)
            print(self.names, derivs_stacked.shape)
            upload_simple_tokens(self.names, global_var.tensor_cache,
                                 derivs_stacked)
            upload_simple_tokens([
                'u',
            ], global_var.initial_data_cache, [
                self.data_tensor,
            ])

        global_var.tensor_cache.use_structural()
예제 #10
0
    def __init__(self,
                 use_default_strategy: bool = True,
                 eq_search_stop_criterion: Stop_condition = Iteration_limit,
                 director=None,
                 equation_type: set = {'PDE', 'derivatives only'},
                 time_axis: int = 0,
                 init_cache: bool = True,
                 example_tensor_shape: Union[tuple, list] = (1000, ),
                 set_grids: bool = True,
                 eq_search_iter: int = 300,
                 use_solver: bool = False,
                 dimensionality: int = 1,
                 verbose_params: dict = {}):
        '''
        
        Intialization of the epde search object. Here, the user can declare the properties of the 
        search mechainsm by defining evolutionary search strategy.
        
        Parameters:
        --------------
        
        use_default_strategy : bool, optional
            True (base and recommended value), if the default evolutionary strategy shall be used, 
            False if the user - defined strategy will be passed further.
        
        eq_search_stop_criterion : epde.operators.ea_stop_criteria.Stop_condition object, optional
            The stop condition for the evolutionary search of the equation. Default value 
            represents loop over 300 evolutionary epochs.
            
        director : obj, optional
            User-defined director, responsible for construction of evolutionary strategy; 
            shall be declared for very specific tasks.
        
        equation_type : set of str, optional
            Will define equation type, TBD later.
        
        '''
        global_var.set_time_axis(time_axis)
        global_var.init_verbose(**verbose_params)

        if init_cache:
            global_var.init_caches(set_grids)

        if use_solver:
            global_var.dimensionality = dimensionality

        if director is not None and not use_default_strategy:
            self.director = director
        elif director is None and use_default_strategy:
            if use_solver:
                self.director = Strategy_director_solver(
                    eq_search_stop_criterion, {'limit': eq_search_iter})  #,
                # dimensionality=dimensionality)
            else:
                self.director = Strategy_director(eq_search_stop_criterion,
                                                  {'limit': eq_search_iter})
            self.director.strategy_assembly()
        else:
            raise NotImplementedError(
                'Wrong arguments passed during the epde search initialization')
        self.set_moeadd_params()
        self.search_conducted = False
예제 #11
0
def test_solver_forms():
    from epde.cache.cache import upload_simple_tokens, upload_grids, prepare_var_tensor
    from epde.prep.derivatives import Preprocess_derivatives
    from epde.supplementary import Define_Derivatives
    from epde.evaluators import simple_function_evaluator, trigonometric_evaluator
    from epde.operators.ea_stop_criteria import Iteration_limit
    from epde.eq_search_strategy import Strategy_director
    
#    delim = '/' if sys.platform == 'linux' else '\\'
    
    x = np.linspace(0, 4*np.pi, 1000)
    print('path:', sys.path)
    ts = np.load('/media/mike_ubuntu/DATA/EPDE_publication/tests/system/Test_data/fill366.npy') # tests/system/
    new_derivs = True
    
    ff_filename = '/media/mike_ubuntu/DATA/EPDE_publication/tests/system/Test_data/smoothed_ts.npy'
    output_file_name = '/media/mike_ubuntu/DATA/EPDE_publication/tests/system/Test_data/derivs.npy'
    step = x[1] - x[0]
    
    '''

    '''
    
    max_order = 1 # presence of the 2nd order derivatives leads to equality u = d^2u/dx^2 on this data (elaborate)
    
    if new_derivs:
        _, derivs = Preprocess_derivatives(ts, data_name = ff_filename, 
                                output_file_name = output_file_name,
                                steps = (step,), smooth = False, sigma = 1, max_order = max_order)
        ts_smoothed = np.load(ff_filename)        
    else:
        try:
            ts_smoothed = np.load(ff_filename)
            derivs = np.load(output_file_name)
        except FileNotFoundError:
            _, derivs = Preprocess_derivatives(ts, data_name = ff_filename, 
                                    output_file_name = output_file_name,
                                    steps = (step,), smooth = False, sigma = 1, max_order = max_order)            
            ts_smoothed = np.load(ff_filename) 
    for i in np.arange(10):
        global_var.init_caches(set_grids=True)
        global_var.tensor_cache.memory_usage_properties(obj_test_case=ts, mem_for_cache_frac = 5)  
        global_var.grid_cache.memory_usage_properties(obj_test_case=x, mem_for_cache_frac = 5)
    
        
        print(type(derivs))
    
        boundary = 10
        upload_grids(x[boundary:-boundary], global_var.grid_cache)   
        u_derivs_stacked = prepare_var_tensor(ts_smoothed, derivs, time_axis = 0, boundary = boundary)
        
        u_names, u_deriv_orders = Define_Derivatives('u', 1, 1) 
        u_names = u_names; u_deriv_orders = u_deriv_orders 
        upload_simple_tokens(u_names, global_var.tensor_cache, u_derivs_stacked)
        
        u_tokens = Token_family('Function', family_of_derivs = True)
        u_tokens.set_status(unique_specific_token=False, unique_token_type=False, s_and_d_merged = False, 
                            meaningful = True, unique_for_right_part = False)
        u_token_params = OrderedDict([('power', (1, 1))])
        u_equal_params = {'power' : 0}
        u_tokens.set_params(u_names, u_token_params, u_equal_params, u_deriv_orders)
        u_tokens.set_evaluator(simple_function_evaluator, [])
    
    
        grid_names = ['t',]    
        upload_simple_tokens(grid_names, global_var.tensor_cache, [x[boundary:-boundary],])    
        global_var.tensor_cache.use_structural()
    
    
        grid_tokens = Token_family('Grids')
        grid_tokens.set_status(unique_specific_token=True, unique_token_type=True, s_and_d_merged = False, 
                            meaningful = True, unique_for_right_part = False)
        grid_token_params = OrderedDict([('power', (1, 1))])
        grid_equal_params = {'power' : 0}
        grid_tokens.set_params(grid_names, grid_token_params, grid_equal_params)
        grid_tokens.set_evaluator(simple_function_evaluator, [])
        
        trig_tokens = Token_family('Trigonometric')
        trig_names = ['sin', 'cos']
        trig_tokens.set_status(unique_specific_token=True, unique_token_type=True, 
                               meaningful = False, unique_for_right_part = False)
        trig_token_params = OrderedDict([('power', (1, 1)), ('freq', (0.95, 1.05)), ('dim', (0, 0))])
        trig_equal_params = {'power' : 0, 'freq' : 0.05, 'dim' : 0}
        trig_tokens.set_params(trig_names, trig_token_params, trig_equal_params)
        trig_tokens.set_evaluator(trigonometric_evaluator, [])
    
        pool = TF_Pool([grid_tokens, u_tokens, trig_tokens])
        pool.families_cardinality()
    
        test_strat = Strategy_director(Iteration_limit, {'limit' : 300})
        test_strat.strategy_assembly()        
        
        eq1 = Equation(pool , basic_structure = [], 
                       terms_number = 6, max_factors_in_term = 2)   # Задать возможности выбора вероятностей кол-ва множителей
        test_strat.constructor.strategy.modify_block_params(block_label = 'rps1', param_label = 'sparsity', 
                                                                 value = 0.1, suboperator_sequence = ['eq_level_rps', 'fitness_calculation', 'sparsity'])
    
        test_strat._constructor._strategy.apply_block('rps1', 
                                                    {'population' : [eq1,], 'separate_vars' : []})
        
        print('text form:', eq1.text_form)
    #    print('solver form:', eq1.solver_form())
        print(eq1.max_deriv_orders())
    raise Exception('Test exception')