def test_calculate_aic(self):
     global table
     
     table = ac.calculate_aic(data   = sampler_args['data'], 
                              models = sampler_args['models'], 
                              priors = sampler_args['priors'],
                              params = accepted.iloc[-10:]
                              )
     assert len(table)
                                         figs=trace_figs,
                                         AX=trace_AX)
    print('\ntrace_AX\n', trace_AX)
    #modify the figure setting else all the labels overlapped.
    for tax in trace_AX.values():
        tax.set_xticks(tax.get_xticks()[::2])
        tax.set_yticks(tax.get_yticks()[::2])
        tax.tick_params(axis='both', labelsize=16)
        tax.xaxis.label.set_size(fontsize=16)
        tax.yaxis.label.set_size(fontsize=16)

    plt.tight_layout(pad=1.0)  # set spacing between figures

    #Rank models
    table = ac.calculate_aic(data=sampler_args['data'],
                             models=sampler_args['models'],
                             priors=sampler_args['priors'],
                             params=accepted.iloc[-10:])

    ranked_table = ac.rank_aic(table, inplace=False)
    print('\nRanked AIC table:\n', ranked_table.head())

    #export the ranked table into csv file inside the output folder
    ranked_table.to_csv(output_folder / 'ranked_table.csv')

    best = ranked_table.iloc[0]
    best_row_index = best['row']
    best_model_num = best['model_num']

    new_settings = config_data[best_model_num]
    new_settings['settings_name'] = 'LogicGate_OR_bestfitted'
    new_settings['parameters'] = cf.get_params_for_model(
        sampler_args['models'][model_num]['init'] = model_init
        sampler_args['models'][model_num]['int_args'][
            'modify_params'] = modify_params

    traces = {}
    result = cf.simulated_annealing(**sampler_args)
    accepted = result['a']
    posterior = accepted.iloc[-40::4]
    traces[1] = accepted
    '''
    In order to calculate the AIC, we need the data, models, priors and
    parameters for evaluation. 
    '''
    table = ac.calculate_aic(data=sampler_args['data'],
                             models=sampler_args['models'],
                             priors=sampler_args['priors'],
                             params=posterior)
    '''
    rank_aic accepts a DataFrame containing AIC values indexed under 
    aic_column_name. It then sorts the DataFrame and adds columns for the normalized
    AIC and the evidence for that model. The original columns in the input
    DataFrame remain untouched.
    
    As can be seen from the result, rank_aic isn't picky about what indices and 
    columns you use as long as your DataFrame has its AIC values indexed under aic_column_name.
    '''

    ranked_table = ac.rank_aic(table, inplace=False)
    '''
    After choosing the best model and its fitted parameters, we can convert it into 
    a settings data structure so we can reuse it as a template in the future.