def test_rank_aic(self): global table ranked_table = ac.rank_aic(table, aic_column_name='AIC Value', inplace=False) table = ranked_table assert len(table)
for tax in trace_AX.values(): tax.set_xticks(tax.get_xticks()[::2]) tax.set_yticks(tax.get_yticks()[::2]) tax.tick_params(axis='both', labelsize=16) tax.xaxis.label.set_size(fontsize=16) tax.yaxis.label.set_size(fontsize=16) plt.tight_layout(pad=1.0) # set spacing between figures #Rank models table = ac.calculate_aic(data=sampler_args['data'], models=sampler_args['models'], priors=sampler_args['priors'], params=accepted.iloc[-10:]) ranked_table = ac.rank_aic(table, inplace=False) print('\nRanked AIC table:\n', ranked_table.head()) #export the ranked table into csv file inside the output folder ranked_table.to_csv(output_folder / 'ranked_table.csv') best = ranked_table.iloc[0] best_row_index = best['row'] best_model_num = best['model_num'] new_settings = config_data[best_model_num] new_settings['settings_name'] = 'LogicGate_OR_bestfitted' new_settings['parameters'] = cf.get_params_for_model( models=sampler_args['models'], trace=accepted, model_num=best_model_num,
labels=labels, titles=titles, legend_args=legend_args, figs=None, AX=None) ''' 6. Model Ranking with AIC Calculation ''' table = ac.calculate_aic(data=sampler_args['data'], models=sampler_args['models'], priors=sampler_args['priors'], params=posterior) ranked_table = ac.rank_aic(table, aic_column_name='AIC Value', inplace=False) ''' rank_aic accepts a DataFrame containing AIC values indexed under aic_column_name. It then sorts the DataFrame and adds columns for the normalized AIC and the evidence for that model. The original columns in the input DataFrame remain untouched. rank_aic isn't picky about what indices and columns you use as long as your DataFrame has its AIC values indexed under the argument aic_column_name. ''' ''' 7. Saving the Results ''' ''' Saving the models and settings allows us to reuse them without having to