def show_evaluation_hist_from_path(league_name): # requested params : [thr, field] args = request.json model_dir = args['model_dir'] model_name = args['model_name'] response = check_predict_paths(model_dir, model_name) if not response['check']: return make_response(response['msg'], 400) paths = response['paths'] config, model = load_configs_from_paths(paths) league_params, data_params = config['league'], config['data'] params = {**args, **league_params, **data_params} params['save_dir'] = STATIC_DIR outcome, msg = check_league(league_name) if (outcome == False): response = make_response(msg, 404) else: # model = models[league_name] feat_eng = config['feat_eng'] testset, pred, true = real_case_inference(model, params, feat_eng) pred_df, outcome, fig = evaluate_results(true, pred, params, plot=False) response = show_plot(fig) return response
def show_evaluation_hist(league_name): # requested params : [thr, field] args = request.json config = configs[league_name]['league'] params = {**args, **config} params['save_dir'] = STATIC_DIR outcome, msg = check_league(league_name) if (outcome == False): response = make_response(msg, 404) else: model = models[league_name] feat_eng = configs[league_name]['feat_eng'] testset, pred, true = real_case_inference(model, params, feat_eng) pred_df, outcome, fig = evaluate_results(true, pred, params, plot=False) response = show_plot(fig) return response
def simulate(test_data, pred, true, params): # Required Params : [field, save_dir, # thr, thr_list, # n_matches, money_bet, # combo_list, plot] field = params['field'] thr = params['thr'] plot = params['plot'] if 'plot' in list(params.keys()) else True testset = test_data[field] testset = testset[testset['f-opponent'].isnull() == False] pred_df, _, _ = evaluate_results(true, pred, params, plot=plot) data_result = postprocessing_test_data(testset, pred_df) _, thr_outcomes, _ = thr_analysis(true, pred, params) thr_outcome = thr_outcomes[str(thr)] summary, sim_result, _ = simulation(data_result, params, thr_outcome, plot=plot) show_summary(summary)
def strategy_stats(testset, pred, true, params): field = params['field'] thr_list = params['thr_list'] filter_bet_list = params['filter_bet_list'] result_df = pd.DataFrame() ckp_df = pd.DataFrame() for thr in tqdm(thr_list, desc='Thr \t'): for filter_bet in filter_bet_list: _, _, thr_outcome = thr_analysis(true, pred, params) if (thr_outcome[str(thr)]['tpr'] != 'nan'): params['thr'] = thr params['filter_bet'] = filter_bet pred_df, outcome, _ = evaluate_results(true, pred, params, plot=False) sim_data = postprocessing_test_data(testset, pred_df) sim_result, _ = simulation(sim_data, params, plot=False) if (sim_result is not None): summary = summarize_sim_result(sim_result, params) if (summary is None): continue else: continue summary_df = summary_dataframe(summary) summary_df.insert(0, 'match', sim_result['match'].to_list()) summary_df.insert(0, 'Match N.', sim_result['n_match'].to_list()) summary_df.insert(0, 'Bet N.', np.arange(1, len(sim_result) + 1)) result_df = result_df.append(summary_df) ckp_df = ckp_df.append(checkpoint_view_df(summary_df)) result_df = result_df.reset_index(drop=True) save_dir = params['save_dir'] if (save_dir is not None): filepath = f'{save_dir}simulation_analysis_{field}.csv' result_df.to_csv(filepath, sep=';', decimal=',') filepath = f'{save_dir}simulation_ckp_analysis_{field}.csv' ckp_df.to_csv(filepath, sep=';', decimal=',') return result_df, ckp_df
def strategy_analysis(): """ Requested Params: dict {'league_name': OPTIONAL 1 'model_dir': OPTIONAL 2 'model_name': OPTIONAL 2, 'thr_list': OPTIONAL, 'filter_bet_list': OPTIONAL, 'money_bet': OPTIONAL, 'combo_list':OPTIONAL, 'n_matches':OPTIONAL Returns: """ params = request.json response, model, config = load_model_and_config(params, models, configs) if not response['check']: return make_response(response['msg'], 400) league_params = config['league'] data_params = config['data'] feat_eng = config['feat_eng'] params = {**params, **league_params, **data_params} # params['save_dir'] = STATIC_DIR for field in [HOME, AWAY]: params['field'] = field testset, pred, true = real_case_inference(model, params, feat_eng) eval_params = check_evaluation_params(params) pred_df, _, _ = evaluate_results(true, pred, eval_params, plot=False) simulation_params = check_simulation_params(params) result_df, ckp_df = strategy_stats(testset, pred, true, simulation_params) response = make_response(f'Strategy stats saved at {params["save_dir"]}', 200) return response
def show_simulation(league_name): # requested params : [thr, thr_list, field, filter_bet, money_bet, n_matches, combo] params = request.json response, model, config = load_model_and_config(params, models, configs) if not response['check']: return make_response(response['msg'], 400) league_params = config['league'] data_params = config['data'] feat_eng = config['feat_eng'] params = {**params, **league_params, **data_params} # params['save_dir'] = STATIC_DIR testset, pred, true = real_case_inference(model, params, feat_eng) eval_params = check_evaluation_params(params) pred_df, _, _ = evaluate_results(true, pred, eval_params, plot=False) _, _, thr_dict = thr_analysis(true, pred, params) thr_outcome = thr_dict[str(params['thr'])] data_result = postprocessing_test_data(testset, pred_df) simulation_params = check_simulation_params(params) sim_result, fig = simulation(data_result, simulation_params, plot=False) fig.savefig(f'{STATIC_DIR}simulation.png') # summary_str = show_summary(summary) # save_str_file(summary_str, f'{STATIC_DIR}summary', mode='w') # save_str_file(summary_str, f'{data_params["save_dir"]}summary', mode='w') response = show_plot(fig) params['field'] = HOME testset, pred, true = real_case_inference(model, params, feat_eng) result_home_df = strategy_stats(testset, pred, true, simulation_params) params['field'] = AWAY testset, pred, true = real_case_inference(model, params, feat_eng) result_away_df = strategy_stats(testset, pred, true, simulation_params) return response