def get_all_params(): all_params = [] for param in step2.get_param_list(): all_params.append(param) for param in step3.get_param_list(): all_params.append(param) # _modelManager = ModelManager() for param in utils.dict_get_param_list(_modelManager.get_properties()): all_params.append(param) # for metric in utils.dict_get_param_list( learn_evaluate_results.learning_metrics_template): all_params.append(metric) # for base_params in utils.dict_get_param_list( learn_evaluate_results.post_learning_metrics_template): for postfix in ('val', 'test1', 'test2'): current = base_params current += '_' current += postfix all_params.append(current) # for obsolete_metrics in utils.dict_get_param_list( learn_evaluate_results.obsolete_metrics_for_backward_compatibility ): all_params.append(obsolete_metrics) # return all_params
def execute(dataset_name, dir_npy): # from train_data_generator import FCTrainDataGenerator fcg = FCTrainDataGenerator() fcg.load_compute_raw_data_additional_params(dataset_name) fcg.load_compute_raw_data() # step2.step2_params['step2_target_class_col_name'] = 'target_class' step2.step2_params['step2_profondeur_analyse'] = 3 step2.step2_params['step2_target_period'] = 'M15' # paramètres spécifiques à 'generate_big_define_target' step2.step2_params['step2_symbol_for_target'] = 'UsaInd' step2.step2_params['step2_targets_classes_count'] = 3 step2.step2_params['step2_symbol_spread'] = 2.5 # step2_params['step2_targetLongShort'] = 20.0 # step2_params['step2_ratio_coupure'] = 1.3 # step2_params['step2_use_ATR'] = False step2.step2_params['step2_targetLongShort'] = 0.95 step2.step2_params['step2_ratio_coupure'] = 1.1 step2.step2_params['step2_use_ATR'] = True # fcg.compute_target_additional_params(step2.step2_params) fcg.compute_target() # model_manager = ModelManager() # # step3 parameters : unchanged during loop # step3.step3_params['step3_column_names_to_scale'] = [] step3.step3_params['step3_column_names_not_to_scale'] = [ 'UsaInd_M15_time_slot', 'UsaInd_M15_pRSI_3', 'UsaInd_M15_pRSI_5', 'UsaInd_M15_pRSI_8', 'UsaInd_M15_pRSI_13', 'UsaInd_M15_pRSI_21'] step3.step3_params['step3_tests_by_class'] = 66 step3.step3_params['step3_idx_start'] = 0 # step3_idx_start = int(random.random()*1000) # for step3_recouvrement in (8, 13, 21, 34, 55, 89, 144, 233): # (2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233): for step3_samples_by_class in (330, 660): # (330, 660, 990, 1320, 1650, 1980): # # step3 parameters : modified by this loop # step3.step3_params['step3_recouvrement'] = step3_recouvrement # 1 / proportion recouvrement step3.step3_params['step3_time_depth'] = step3_recouvrement step3.step3_params['step3_samples_by_class'] = step3_samples_by_class # try: fcg.compute_learning_data_GRU_LSTM_Conv1D_additional_params(step3.step3_params) learning_data = fcg.compute_learning_data_GRU_LSTM_Conv1D() except : print("fcg.create_step3_data failed. STOP") return # # Model and learning parameters : unchanged during loop # _mm_dict = model_manager.get_properties() # _mm_dict['model_architecture'] = 'Conv1D_Dense' _mm_dict['conv1D_block1_MaxPooling1D_pool_size'] = 2 _mm_dict['config_GRU_LSTM_units'] = 128 _mm_dict['config_Dense_units'] = 96 _mm_dict['dropout_rate'] = 0.5 _mm_dict['optimizer_name'] = 'adam' _mm_dict['optimizer_modif_learning_rate'] = 0.75 # _mm_dict['fit_batch_size'] = 32 _mm_dict['fit_epochs_max'] = 500 _mm_dict['fit_earlystopping_patience'] = 100 # model_manager.update_properties(_mm_dict) # for conv1D_block1_filters in (55, 89, 144, 233, 377, 610, 987): for conv1D_block1_kernel_size in (2, 3, 5): # # Model and learning parameters : modified by this loop # _mm_dict = model_manager.get_properties() # _mm_dict['conv1D_block1_filters'] = conv1D_block1_filters _mm_dict['conv1D_block1_kernel_size'] = conv1D_block1_kernel_size # model_manager.update_properties(_mm_dict) # learn(dataset_name, dir_npy, model_manager, learning_data)