condition_analysis = True
    # Substract one because the last one is the iterations one.
    for an_j in range(N_analyses - 1):
        condition_analysis &= List_f_analyses[an_j](
            cf_a) == Standard_values_alyses[an_j]

    if (condition_analysis):
        pickle_results_path = pickle_results_path[i]
        model_file_path = models_path[i]
"""
##################################################################
LOAD THE CONFIGURATION FILE
##################################################################
"""
dtype = torch.float
device = pytut.get_device_name(cuda_index=0)
cf_a, training_logger = pkl.load_pickle(pickle_results_path)
cf_a.dtype = dtype  # Variable types
cf_a.device = device

## Modify these parameters so that I am not f****d in memory in my litle servergb
cf_a.datareader_lazy = True  # Force lazyness for RAM optimization
cf_a.batch_size_train = 30
cf_a.batch_size_validation = 30
cf_a.force_free_batch_memory = False
max_instances_in_memory = 100
print_conf_params(cf_a)

print("Expected EM: ", 100 * np.array(training_logger["validation"]["em"]))
print("Expected F1: ", 100 * np.array(training_logger["validation"]["f1"]))
"""
Example #2
0
#Nmodels = 3
for i in range(Nmodels):
    pickle_results_path,model_file_path = pytut.get_models_paths(list_model_ids[i], 
                                                    list_models_epoch_i[i],source_path = source_path)
    pickle_results_path_list.append(pickle_results_path)
    model_file_path_list.append(model_file_path)
    

"""
##################################################################
LOAD THE CONFIGURATION FILES
##################################################################
"""
dtype = torch.float
device = pytut.get_device_name(cuda_index = 0)
cf_a_list = []
training_logger_list = []
for i in range(Nmodels):
    [cf_a,training_logger] = pkl.load_pickle(pickle_results_path_list[i])
    ## Set data and device parameters
    cf_a.dtype = dtype  # Variable types
    cf_a.device = device
    cf_a.datareader_lazy = True # Force lazyness for RAM optimization
    cf_a.batch_size_train = 30
    cf_a.batch_size_validation = 30
    cf_a.max_instances_in_memory = 1000
    ## Fix backwards compatibility
    cf_a.phrase_layer_hidden_size = cf_a.modeling_span_end_hidden_size
    cf_a.phrase_layer_hidden_size = cf_a.modeling_span_end_hidden_size