def plot_conditionings(out):

    N_FT_FRAMES = 1000

    out = {
        key: value[:, 0:N_FT_FRAMES]
        for key, value in out.items()
        if isinstance(value, collections.Hashable)
    }

    plots = []

    for i in range(out["ld_scaled"].shape[0]):

        fig = plot_prediction(out["f0_scaled"][i, :, 0], out["ld_scaled"][i, :,
                                                                          0],
                              out["predicted_f0_scaled"][i, :, 0],
                              out["predicted_ld_scaled"][i, :, 0],
                              out["midi_pitch"][i, :,
                                                0], out["midi_velocity"][i, :,
                                                                         0])

        plots.append(figure2tensor(fig))

        plt.clf()

    return plots
PS = simulation_parameters.parameter_storage()

t_start = time.time()
simStarter = SimulationManager.SimulationManager(PS)

i_ = 0
w_input_exc = 2e-3
for sigma_v in sigma_v_range:
    for sigma_x in sigma_x_range:
        # -----   pre-computed connectivity 
#        new_params = {  'connectivity' : 'precomputed', 'w_sigma_x' : sigma_x, 'w_sigma_v' : sigma_v}
        new_params = {  'connectivity' : 'precomputed', 'w_sigma_x' : sigma_x, 'w_sigma_v' : sigma_v, 'w_input_exc' : w_input_exc}
        simStarter.update_values(new_params)

        # analysis 1
        plot_prediction.plot_prediction(simStarter.params)

        # copy files from the previous folder needed for the next simulation

#        new_params = { 'connectivity' : 'random'}
        new_params = {  'connectivity' : 'random',  'w_input_exc' : w_input_exc}
        simStarter.update_values(new_params)
        plot_prediction.plot_prediction(simStarter.params)
        i_ += 1

t_stop = time.time()
t_run = t_stop - t_start
print "Full analysis duration: %d sec or %.1f min for %d cells (%d exc, %d inh)" % (t_run, (t_run)/60., \
        simStarter.params['n_cells'], simStarter.params['n_exc'], simStarter.params['n_inh'])
    NM = NetworkModel(ps.params, comm)
    NM.setup(times=times)
    NM.create(input_created)
    if not input_created:
        spike_times_container = NM.create_input(load_files=load_files, save_output=save_input_files)
        input_created = (
            True
        )  # this can be set True ONLY if the parameter does not affect the input i.e. set this to false when sweeping f_max_stim, or blur_X/V!
    #         os.system('python plot_rasterplots.py %s' % ps.params['folder_name'])
    else:
        NM.spike_times_container = spike_times_container
    NM.connect()
    NM.run_sim(sim_cnt, record_v=record)
    NM.print_results(print_v=record)

    if pc_id == 0 and params["n_cells"] < max_neurons_to_record:
        import plot_prediction as pp

        pp.plot_prediction(params)
        os.system("python plot_rasterplots.py %s" % ps.params["folder_name"])
        os.system("python plot_connectivity_profile.py %s" % ps.params["folder_name"])
    if pc_id == 1 or not (USE_MPI):
        os.system("python plot_connectivity_profile.py %s" % ps.params["folder_name"])
        for conn_type in ["ee", "ei", "ie", "ii"]:
            os.system("python plot_weight_and_delay_histogram.py %s %s" % (conn_type, ps.params["folder_name"]))
    if pc_id == 1 or not (USE_MPI):
        os.system("python analyse_connectivity.py %s" % ps.params["folder_name"])

    if comm != None:
        comm.Barrier()
def predict(data: list,
            forecast_horizon: int = 1,
            feature_sets: dict = {'covariate': 'mRMR'},
            forced_covariates: list = [],
            test_type: str = 'whole-as-one',
            models: list = ['knn'],
            mixed_models: list = [],
            model_type: str = 'regression',
            splitting_type: str = 'training-validation',
            instance_testing_size: int or float = 0.2,
            instance_validation_size: int or float = 0.3,
            instance_random_partitioning: bool = False,
            fold_total_number: int = 5,
            feature_scaler: str = None,
            target_scaler: str = None,
            performance_benchmark: str = 'MAPE',
            performance_measures: list = ['MAPE'],
            performance_mode: str = 'normal',
            scenario: str or None = 'current',
            validation_performance_report: bool = True,
            testing_performance_report: bool = True,
            save_predictions: bool = True,
            plot_predictions: bool = False,
            verbose: int = 0):
    
    """
    Args:
        data:
        forecast_horizon:
        feature_sets:
        forced_covariates:
        test_type:
        models:
        mixed_models:
        model_type:
        splitting_type:
        instance_testing_size:
        instance_validation_size:
        instance_random_partitioning:
        fold_total_number:
        feature_scaler:
        target_scaler:
        performance_benchmark:
        performance_measures:
        performance_mode:
        scenario:
        validation_performance_report:
        testing_performance_report:
        save_predictions:
        plot_predictions:
        verbose:
    Returns:
    """
    # input checking
    # data
    if not isinstance(data, list):
        sys.exit("The input 'data' must be a list of DataFrames or a list of data addresses.")
    str_check = [isinstance(d, str) for d in data]
    df_check = [isinstance(d, pd.DataFrame) for d in data]
    if not (all(str_check) or all(df_check)):
        sys.exit("The input 'data' must be a list of DataFrames or a list data addresses.")
    # forecast_horizon
    if not (isinstance(forecast_horizon, int) and forecast_horizon >= 1):
        sys.exit("The input 'forecast_horizon' must be integer and greater than or equal to one.")
    # feature_scaler
    if feature_scaler not in configurations.FEATURE_SCALERS:
        sys.exit(f"The input 'feature_scaler' must be string and one of the following options:\n"
                 f"{configurations.FEATURE_SCALERS}")
    # target_scaler
    if target_scaler not in configurations.TARGET_SCALERS:
        sys.exit(f"The input 'target_scaler' must be string and one of the following options:\n"
                 f"{configurations.TARGET_SCALERS}")
    # test_type
    if test_type not in configurations.TEST_TYPES:
        sys.exit(f"The input 'test_type' must be string and one of the following options:\n"
                 f"{configurations.TEST_TYPES}")
    # feature_sets input checking
    if not (isinstance(feature_sets, dict) and len(feature_sets.keys()) == 1):
        sys.exit("feature_sets input format is not valid.")
    if not (list(feature_sets.keys())[0] in configurations.FEATURE_SELECTION_TYPES
            and list(feature_sets.values())[0] in configurations.RANKING_METHODS):
        sys.exit("feature_sets input is not valid.")
    # forced_covariates checking
    if not isinstance(forced_covariates, list):
        sys.exit("Error: The input 'forced_covariates' must be a list of covariates or an empty list.")
    # model_type input checking
    if model_type not in configurations.MODEL_TYPES:
        sys.exit("model_type input is not valid.")
    models_list = []
    # models input checking
    if not isinstance(models, list):
        sys.exit("models input format is not valid.")
    for model in models:
        if isinstance(model, str):
            if model not in configurations.PRE_DEFINED_MODELS:
                sys.exit("models input is not valid.")
            elif model not in models_list:
                models_list.append(model)
            else:models.remove(model)
        elif isinstance(model, dict):
            if len(list(model.keys())) == 1:
                if list(model.keys())[0] not in configurations.PRE_DEFINED_MODELS:
                    sys.exit("models input is not valid.")
                elif list(model.keys())[0] not in models_list:
                    models_list.append(list(model.keys())[0])
                else:models.remove(model)
            else:
                sys.exit("models input is not valid.")
        elif callable(model):
            if model.__name__ not in models_list:
                models_list.append(model.__name__)
            else:models.remove(model)
        else:
            sys.exit("Models input is not valid.")
    # mixed_models input checking
    if not isinstance(mixed_models, list):
        sys.exit("Mixed_models input format is not valid.")
    for model in mixed_models:
        if isinstance(model, str):
            if model not in configurations.PRE_DEFINED_MODELS:
                sys.exit("Mixed_models input is not valid.")
            elif 'mixed_'+model not in models_list:
                models_list.append('mixed_'+model)
            else:mixed_models.remove(model)
        elif isinstance(model, dict):
            if len(list(model.keys())) == 1:
                if list(model.keys())[0] not in configurations.PRE_DEFINED_MODELS:
                    sys.exit("Mixed_models input is not valid.")
                elif 'mixed_'+list(model.keys())[0] not in models_list:
                    models_list.append('mixed_'+list(model.keys())[0])
                else:mixed_models.remove(model)
            else:
                sys.exit("Mixed_models input is not valid.")
        elif callable(model):
            if model.__name__ not in models_list:
                models_list.append(model.__name__)
            else:mixed_models.remove(model)
        else:
            sys.exit("Mixed_models input is not valid.")
    # instance_testing_size input checking
    if not ((isinstance(instance_testing_size, float) and 0 < instance_testing_size < 1) or (
            isinstance(instance_testing_size, int) and instance_testing_size > 0)):
        sys.exit("instance_testing_size input is not valid.")
    # splitting_type input checking
    if splitting_type not in configurations.SPLITTING_TYPES:
        sys.exit("splitting_type input is not valid.")
    # instance_validation_size input checking
    if not ((isinstance(instance_validation_size, float) and 0 < instance_validation_size < 1) or (
            isinstance(instance_validation_size, int) and instance_validation_size > 0)):
        sys.exit("instance_validation_size input is not valid.")
    # instance_random_partitioning input checking
    if not isinstance(instance_random_partitioning, bool):
        sys.exit("instance_random_partitioning input is not valid.")
    # fold_total_number input checking
    if not (isinstance(fold_total_number, int), fold_total_number > 1):
        sys.exit("fold_total_number input is not valid.")
    # performance_benchmark input checking
    if performance_benchmark not in configurations.PERFORMANCE_BENCHMARKS:
        sys.exit("performance_benchmark input is not valid.")
    # performance_mode input checking
    if not isinstance(performance_mode, str):
        sys.exit("performance_mode input format is not valid.")
    if not any(performance_mode.startswith(performance_mode_starts_with)
               for performance_mode_starts_with in configurations.PERFORMANCE_MODES_STARTS_WITH):
        sys.exit("performance_mode input is not valid.")
    # performance_measures input checking
    if not (isinstance(performance_measures, list) and len(performance_measures) > 0):
        sys.exit("performance_measures input format is not valid.")
    for performance_measure in performance_measures:
        if performance_measure not in configurations.PERFORMANCE_MEASURES:
            sys.exit("performance_measures input is not valid.")
    # scenario
    if not ((isinstance(scenario, str) and scenario in configurations.SCENARIOS) or scenario is None):
        sys.exit("scenario input is not valid.")
    # validation_performance_report input checking
    if not isinstance(validation_performance_report, bool):
        sys.exit("validation_performance_report input is not valid.")
    # testing_performance_report input checking
    if not isinstance(testing_performance_report, bool):
        sys.exit("testing_performance_report input is not valid.")
    # save_predictions input checking
    if not isinstance(save_predictions, bool):
        sys.exit("save_predictions input is not valid.")
    # plot_predictions input checking
    if not isinstance(plot_predictions, bool):
        sys.exit("plot_predictions input is not valid.")
    elif (plot_predictions == True) and (save_predictions == False):
        sys.exit("For plotting the predictions, both plot_predictions and save_predictions inputs must be set to TRUE.")
    elif (plot_predictions == True) and (model_type == 'classification'):
        sys.exit("The plot_predictions input can be set to True only for regression model_type.")
        
    # verbose input checking
    if verbose not in configurations.VERBOSE_OPTIONS:
        sys.exit("verbose input is not valid.")

    # removing prediction and performance directories and test_process_backup csv file
    if os.path.exists('prediction'):
        shutil.rmtree('prediction')
    if os.path.exists('performance'):
        shutil.rmtree('performance')
    if os.path.isfile('test_process_backup.csv'):
        os.remove('test_process_backup.csv')

    # data preparing
    if isinstance(data[0], str):
        try:
            data = [pd.read_csv(d).sort_values(by=['temporal id', 'spatial id']) for d in data]
        except Exception as e:
            sys.exit(str(e))

    # forced_covariates manipulation
    forced_covariates = list(set(forced_covariates))
    forced_covariates = [forced_covariate
                         for forced_covariate in forced_covariates
                         if forced_covariate is not None and forced_covariate != '']

    # classification checking
    labels = None
    if model_type == 'classification':
        if not set(performance_measures) <= set(configurations.CLASSIFICATION_PERFORMANCE_MEASURES):
            sys.exit("Error: The input 'performance_measures' is not valid according to 'model_type=classification'.")
        if performance_benchmark not in configurations.CLASSIFICATION_PERFORMANCE_BENCHMARKS:
            sys.exit("Error: The input 'performance_benchmark' is not valid according to 'model_type=classification'.")
        if performance_mode != 'normal':
            performance_mode = 'normal'
            print("Warning: The input 'performance_mode' is set to 'normal' according to model_type=classification'.")
        if target_scaler is not None:
            target_scaler = None
            print("Warning: The input 'target_scaler' is set to None according to model_type=classification'.")
        target_column_name = list(filter(lambda x: x.startswith('Target'), data[0].columns.values))[0]
        labels = data[0].loc[:, target_column_name].unique().tolist()
        labels = [label for label in labels if not (label is None or str(label) == 'nan')]
        if len(labels) < 2:
            sys.exit("Error: The labels length must be at least two.")

    # one_by_one checking
    if test_type == 'one-by-one':
        splitting_type = 'training-validation'
        instance_validation_size = 1
        instance_random_partitioning = False
        if data[0]['spatial id'].nunique() == 1:
            if 'AUC' in performance_measures:
                performance_measures.remove('AUC')
            if 'R2_score' in performance_measures:
                performance_measures.remove('R2_score')
            if 'AUPR' in performance_measures:
                performance_measures.remove('AUPR')
            if len(performance_measures) == 0:
                sys.exit("Error: The input 'performance_measures' is not valid according to 'test_type=one-by-one'.")
            if 'AUC' in performance_benchmark:
                sys.exit("Error: The input 'performance_benchmark' is not valid according to 'test_type=one-by-one'.")
            if 'R2_score' in performance_measures:
                sys.exit("Error: The input 'performance_benchmark' is not valid according to 'test_type=one-by-one'.")
            if 'AUPR' in performance_measures:
                sys.exit("Error: The input 'performance_benchmark' is not valid according to 'test_type=one-by-one'.")
                
    # get target quantities
    granularity = [1]*len(data)
    for index in range(len(data)):
        target_mode, target_granularity, granularity[index], _ = get_target_quantities(data=data[index].copy())
        data[index], _ = get_target_temporal_ids(temporal_data = data[index].copy(), forecast_horizon = forecast_horizon,
                                              granularity = granularity[index])
        if model_type == 'classification':
            if not target_mode == 'normal':
                sys.exit(
                    "Error: The parameter 'target_mode' must be 'normal' according to 'model_type=classification'.")
            if not target_granularity == 1:
                sys.exit(
                    "Error: The parameter 'target_mode' must be 'normal' according to 'model_type=classification'.")
            if not granularity[index] == 1:
                sys.exit(
                    "Error: The temporal scale of input data must not be transformed according to 'model_type=classification'.")
    
    data, future_data = get_future_data(data=[d.copy() for d in data],
                                        forecast_horizon=forecast_horizon)
    
    # change the name of temporal id to be identified as shifted to target time point
    for index in range(len(data)):
        data[index] = data[index].rename(columns = {'temporal id':'target temporal id'})
        future_data[index] = future_data[index].rename(columns = {'temporal id':'target temporal id'})
    
    # # ranking
    # print('Ranking Process')
    # feature_selection_type = list(feature_sets.keys())[0]
    # ranking_method = list(feature_sets.values())[0]
    # ordered_covariates_or_features = []
    # if feature_selection_type == 'covariate':
    #     ordered_covariates_or_features = rank_covariates(data=data[0].copy(),
    #                                                      ranking_method=ranking_method,
    #                                                      forced_covariates=forced_covariates)
    # else:
    #     for d in data:
    #         ordered_covariates_or_features.append(rank_features(data=d.copy(),
    #                                                             ranking_method=ranking_method,
    #                                                             forced_covariates=forced_covariates))
    # # ordered_covariates_or_features = ordered_covariates_or_features[:7]
    # # print(ordered_covariates_or_features[2])

    # main process
    if test_type == 'whole-as-one':
        print('Whole As One')
        data_temporal_ids = [d['target temporal id'].unique() for d in data]
        # train_validate
        print(100 * '-')
        print('Train Validate Process')
        best_model, best_model_parameters, best_history_length, best_feature_or_covariate_set, base_models, _ = \
            train_validate(data=[d.copy() for d in data],
                           forecast_horizon=forecast_horizon,
                           feature_scaler=feature_scaler,
                           target_scaler=target_scaler,
                           feature_sets=feature_sets,
                           forced_covariates=forced_covariates,
                           model_type=model_type,
                           labels=labels,
                           models=models,
                           mixed_models=mixed_models,
                           instance_testing_size=instance_testing_size,
                           splitting_type=splitting_type,
                           instance_validation_size=instance_validation_size,
                           instance_random_partitioning=instance_random_partitioning,
                           fold_total_number=fold_total_number,
                           performance_benchmark=performance_benchmark,
                           performance_measures=performance_measures,
                           performance_report=validation_performance_report,
                           save_predictions=save_predictions,
                           verbose=verbose)

        # train_test
        print(100 * '-')
        print('Train Test Process')
        test_trained_model = train_test(data=data[best_history_length - 1].copy(),
                                                       forecast_horizon=forecast_horizon,
                                                       history_length=best_history_length,
                                                       feature_scaler=feature_scaler,
                                                       target_scaler=target_scaler,
                                                       feature_or_covariate_set=best_feature_or_covariate_set,
                                                       model_type=model_type,
                                                       labels=labels,
                                                       model=best_model,
                                                       base_models = base_models,
                                                       model_parameters=best_model_parameters,
                                                       instance_testing_size=instance_testing_size,
                                                       performance_measures=performance_measures,
                                                       performance_mode=performance_mode,
                                                       performance_report=testing_performance_report,
                                                       save_predictions=save_predictions,
                                                       verbose=verbose)
        # predict_future
        best_model, best_model_parameters, best_history_length, best_feature_or_covariate_set, base_models, _ = \
            train_validate(data=[d[d['target temporal id'].isin((
                                data_temporal_ids[index][:] if (forecast_horizon*granularity[index])-1 == 0 else data_temporal_ids[index][:-((forecast_horizon*granularity[index])-1)]))].copy()
                                for index, d in enumerate(data)],
                           forecast_horizon=forecast_horizon,
                           feature_scaler=feature_scaler,
                           target_scaler=target_scaler,
                           feature_sets=feature_sets,
                           forced_covariates=forced_covariates,
                           model_type=model_type,
                           labels=labels,
                           models=models,
                           mixed_models=mixed_models,
                           instance_testing_size=0,
                           splitting_type=splitting_type,
                           instance_validation_size=instance_validation_size,
                           instance_random_partitioning=instance_random_partitioning,
                           fold_total_number=fold_total_number,
                           performance_benchmark=performance_benchmark,
                           performance_measures=performance_measures,
                           performance_report=False,#validation_performance_report,
                           save_predictions=False,#save_predictions,
                           verbose=0)
        
        
        best_data = data[best_history_length - 1].copy()
        best_future_data = future_data[best_history_length - 1].copy()
        best_data_temporal_ids = best_data['target temporal id'].unique()
        temp = forecast_horizon*granularity[best_history_length - 1] - 1
        trained_model = predict_future(data=best_data[best_data['target temporal id'].isin((best_data_temporal_ids
                                                                                     if temp == 0
                                                                                     else best_data_temporal_ids[:-temp]
                                                                                     ))].copy(),
                                       future_data=best_future_data.copy(),
                                       forecast_horizon=forecast_horizon,
                                       feature_scaler=feature_scaler,
                                       target_scaler=target_scaler,
                                       feature_or_covariate_set=best_feature_or_covariate_set,
                                       model_type=model_type,
                                       labels=labels,
                                       model=best_model,
                                       base_models = base_models,
                                       model_parameters=best_model_parameters,
                                       scenario=scenario,
                                       save_predictions=save_predictions,
                                       verbose=verbose)

    elif test_type == 'one-by-one':
        print('One By One')
        # loop over test points
        data_temporal_ids = [d['target temporal id'].unique() for d in data]
        if isinstance(instance_testing_size, float):
            instance_testing_size = int(round(instance_testing_size * len(data_temporal_ids[0])))
        for i in range(instance_testing_size):
            print(100 * '#')
            print('test_point =', i + 1)
            # train_validate
            print(100 * '-')
            print('Train Validate Process')
            best_model, best_model_parameters, best_history_length, best_feature_or_covariate_set, base_models, _ = \
                train_validate(data=
                               [d[d['target temporal id'].isin((
                                   data_temporal_ids[index][:] if i == 0 else data_temporal_ids[index][:-i]))].copy()
                                for index, d in enumerate(data)],
                               forecast_horizon=forecast_horizon,
                               feature_scaler=feature_scaler,
                               forced_covariates=forced_covariates,
                               target_scaler=target_scaler,
                               feature_sets=feature_sets,
                               model_type=model_type,
                               labels=labels,
                               models=models,
                               mixed_models=mixed_models,
                               instance_testing_size=1,
                               splitting_type=splitting_type,
                               instance_validation_size=instance_validation_size,
                               instance_random_partitioning=instance_random_partitioning,
                               fold_total_number=fold_total_number,
                               performance_benchmark=performance_benchmark,
                               performance_measures=performance_measures,
                               performance_report=validation_performance_report,
                               save_predictions=save_predictions,
                               verbose=verbose)
            

            # train_test
            print(100 * '-')
            print('Train Test Process')
            d = data[best_history_length - 1].copy()
            test_trained_model = train_test(data=d[d['target temporal id'].isin(
                (data_temporal_ids[best_history_length - 1][:]
                 if i == 0
                 else data_temporal_ids[best_history_length - 1][:-i]
                 ))].copy(),
                                                           forecast_horizon=forecast_horizon,
                                                           history_length=best_history_length,
                                                           feature_scaler=feature_scaler,
                                                           target_scaler=target_scaler,
                                                           feature_or_covariate_set=best_feature_or_covariate_set,
                                                           model_type=model_type,
                                                           labels=labels,
                                                           model=best_model,
                                                           base_models = base_models,
                                                           model_parameters=best_model_parameters,
                                                           instance_testing_size=1,
                                                           performance_measures=performance_measures,
                                                           performance_mode=performance_mode,
                                                           performance_report=testing_performance_report,
                                                           save_predictions=save_predictions,
                                                           verbose=verbose)
        # predict_future
        print(100 * '-')
        print('Train Validate Process')
        best_model, best_model_parameters, best_history_length, best_feature_or_covariate_set, base_models, _ = \
            train_validate(data=[d[d['target temporal id'].isin((
                                   data_temporal_ids[index][:] if (forecast_horizon*granularity[index])-1 == 0 else data_temporal_ids[index][:-((forecast_horizon*granularity[index])-1)]))].copy()
                                for index, d in enumerate(data)],
                           forecast_horizon=forecast_horizon,
                           feature_scaler=feature_scaler,
                           target_scaler=target_scaler,
                           feature_sets=feature_sets,
                           forced_covariates=forced_covariates,
                           model_type=model_type,
                           labels=labels,
                           models=models,
                           mixed_models=mixed_models,
                           instance_testing_size=0,
                           splitting_type=splitting_type,
                           instance_validation_size=instance_validation_size,
                           instance_random_partitioning=instance_random_partitioning,
                           fold_total_number=fold_total_number,
                           performance_benchmark=performance_benchmark,
                           performance_measures=performance_measures,
                           performance_report=False,#validation_performance_report,
                           save_predictions=False,#save_predictions,
                           verbose=0)
        
            
        best_data = data[best_history_length - 1].copy()
        best_future_data = future_data[best_history_length - 1].copy()
        best_data_temporal_ids = best_data['target temporal id'].unique()
        best_future_data_temporal_ids = best_future_data['target temporal id'].unique()
        for i in range(forecast_horizon*granularity[best_history_length - 1]):
            print(150 * '*')
            print('i =', i + 1)
            temp = forecast_horizon*granularity[best_history_length - 1] - i - 1
            print(100 * '-')
            print('Predict Future Process')
            trained_model = predict_future(data=best_data[best_data['target temporal id'].isin(
                (best_data_temporal_ids if temp == 0
                 else best_data_temporal_ids[:-temp]))].copy(),
                                           future_data=best_future_data[best_future_data['target temporal id'] ==
                                                                        best_future_data_temporal_ids[i]].copy(),
                                           forecast_horizon=forecast_horizon,
                                           feature_scaler=feature_scaler,
                                           target_scaler=target_scaler,
                                           feature_or_covariate_set=best_feature_or_covariate_set,
                                           model_type=model_type,
                                           labels=labels,
                                           model=best_model,
                                           base_models = base_models,
                                           model_parameters=best_model_parameters,
                                           scenario=scenario,
                                           save_predictions=save_predictions,
                                           verbose=verbose)
            
    if (validation_performance_report == True and testing_performance_report == True):
        performance_bar_plot(forecast_horizon,test_type,performance_benchmark)
        performance_summary(forecast_horizon,test_type,performance_benchmark)
        
    if plot_predictions == True:
        if len(data[0]['spatial id'].unique())<3:
            spatial_ids = data[0]['spatial id'].unique()
        else:
            spatial_ids = list(random.sample(list(data[0]['spatial id'].unique()),3))
        plot_prediction(data = data[0].copy(), test_type = test_type, forecast_horizon = forecast_horizon,
                         plot_type = 'test', granularity = granularity[0], spatial_ids = spatial_ids)
        plot_prediction(data = data[0].copy(), test_type = test_type, forecast_horizon = forecast_horizon,
                         plot_type = 'future', granularity = granularity[0], spatial_ids = spatial_ids)


    return None
epochs = 400
learning_rate = 0.01
decay_rate = learning_rate / epochs
momentum = 0.9
batch_size = 8

cell_net.compile(
    optimizer=optimizers.SGD(lr=learning_rate,
                             momentum=momentum,
                             decay=decay_rate,
                             nesterov=False),
    #cell_net.compile(optimizer=optimizers.RMSprop(lr=learning_rate, rho=0.9, epsilon=None, decay=decay_rate),
    loss='mse',
    metrics=['mae'])

history = cell_net.fit(train_images,
                       train_targets,
                       epochs=epochs,
                       batch_size=batch_size,
                       validation_data=(test_images, test_targets))

prediction = cell_net.predict(test_images)

#Plot an image with prediction
importlib.reload(plot_prediction)
image_num = 3
plot_prediction.plot_prediction(test_images[image_num, :, :, 1],
                                prediction[image_num, :], 7, 0.5)

#plot_prediction.plot_prediction(test_images[image_num,:,:,1], test_targets[image_num,:], 7, 0.5)