# get only centrality selected train_test_data_cent = [ pd.DataFrame(), [], pd.DataFrame(), [] ] train_test_data_cent[0] = train_test_data[0].query( f'matter {split_ineq_sign} and centrality > {cent_bins[0]} and centrality < {cent_bins[1]} and ct >= {ct_bins_df[0]} and ct < {ct_bins_df[1]}' ) train_test_data_cent[2] = train_test_data[2].query( f'matter {split_ineq_sign} and centrality > {cent_bins[0]} and centrality < {cent_bins[1]} and ct >= {ct_bins_df[0]} and ct < {ct_bins_df[1]}' ) train_test_data_cent[1] = train_test_data_cent[0]['y_true'] train_test_data_cent[3] = train_test_data_cent[2]['y_true'] # get predictions for training and test sets test_y_score = model_hdl.predict(train_test_data_cent[2]) train_y_score = model_hdl.predict(train_test_data_cent[0]) # second condition needed because of issue with Qt libraries if MAKE_TRAIN_TEST_PLOT and not MAKE_PRESELECTION_EFFICIENCY: if not os.path.isdir(f'{PLOT_DIR}/train_test_out'): os.mkdir(f'{PLOT_DIR}/train_test_out') plot_utils.plot_output_train_test(model_hdl, train_test_data_cent, logscale=True, density=True, labels=leg_labels) plt.savefig( f'{PLOT_DIR}/train_test_out/{bin_df}_out.pdf') plot_utils.plot_feature_imp(train_test_data_cent[0],
# data preparation DIGITS_DATA = datasets.load_digits(n_class=2) DIGITS = pd.DataFrame(DIGITS_DATA.data[:, 0:10]) # pylint: disable=E1101 Y_DIGITS = DIGITS_DATA.target # pylint: disable=E1101 SIG_DF = DIGITS[Y_DIGITS == 1] BKG_DF = DIGITS[Y_DIGITS == 0] TRAIN_SET, TEST_SET, Y_TRAIN, Y_TEST = train_test_split( DIGITS, Y_DIGITS, test_size=0.5, random_state=42) DATA = [TRAIN_SET, Y_TRAIN, TEST_SET, Y_TEST] # -------------------------------------------- # training and testing INPUT_MODEL = xgb.XGBClassifier() MODEL = ModelHandler(INPUT_MODEL) MODEL.train_test_model(DATA) Y_PRED = MODEL.predict(DATA[2]) Y_PRED_TRAIN = MODEL.predict(DATA[0]) EFFICIENCY, THRESHOLD = analysis_utils.bdt_efficiency_array(DATA[3], Y_PRED, n_points=10) # -------------------------------------------- def test_plot_distr(): """ Test the feature distribution plot """ assert isinstance(plot_utils.plot_distr( [SIG_DF, BKG_DF], SIG_DF.columns), np.ndarray) def test_plot_corr(): """
def train_test(inputCfg, PtBin, OutPutDirPt, TrainTestData, iBin): #pylint: disable=too-many-statements, too-many-branches ''' function for model training and testing ''' n_classes = len(np.unique(TrainTestData[3])) modelClf = xgb.XGBClassifier(use_label_encoder=False) TrainCols = inputCfg['ml']['training_columns'] HyperPars = inputCfg['ml']['hyper_par'][iBin] if not isinstance(TrainCols, list): print('\033[91mERROR: training columns must be defined!\033[0m') sys.exit() if not isinstance(HyperPars, dict): print( '\033[91mERROR: hyper-parameters must be defined or be an empty dict!\033[0m' ) sys.exit() ModelHandl = ModelHandler(modelClf, TrainCols, HyperPars) # hyperparams optimization if inputCfg['ml']['hyper_par_opt']['do_hyp_opt']: print('Perform bayesian optimization') BayesOptConfig = inputCfg['ml']['hyper_par_opt']['bayes_opt_config'] if not isinstance(BayesOptConfig, dict): print('\033[91mERROR: bayes_opt_config must be defined!\033[0m') sys.exit() if n_classes > 2: average_method = inputCfg['ml']['roc_auc_average'] roc_method = inputCfg['ml']['roc_auc_approach'] if not (average_method in ['macro', 'weighted'] and roc_method in ['ovo', 'ovr']): print( '\033[91mERROR: selected ROC configuration is not valid!\033[0m' ) sys.exit() if average_method == 'weighted': metric = f'roc_auc_{roc_method}_{average_method}' else: metric = f'roc_auc_{roc_method}' else: metric = 'roc_auc' print('Performing hyper-parameters optimisation: ...', end='\r') OutFileHypPars = open( f'{OutPutDirPt}/HyperParOpt_pT_{PtBin[0]}_{PtBin[1]}.txt', 'wt') sys.stdout = OutFileHypPars ModelHandl.optimize_params_bayes( TrainTestData, BayesOptConfig, metric, nfold=inputCfg['ml']['hyper_par_opt']['nfolds'], init_points=inputCfg['ml']['hyper_par_opt']['initpoints'], n_iter=inputCfg['ml']['hyper_par_opt']['niter'], njobs=inputCfg['ml']['hyper_par_opt']['njobs']) OutFileHypPars.close() sys.stdout = sys.__stdout__ print('Performing hyper-parameters optimisation: Done!') print( f'Output saved in {OutPutDirPt}/HyperParOpt_pT_{PtBin[0]}_{PtBin[1]}.txt' ) print(f'Best hyper-parameters:\n{ModelHandl.get_model_params()}') else: ModelHandl.set_model_params(HyperPars) # train and test the model with the updated hyper-parameters yPredTest = ModelHandl.train_test_model( TrainTestData, True, output_margin=inputCfg['ml']['raw_output'], average=inputCfg['ml']['roc_auc_average'], multi_class_opt=inputCfg['ml']['roc_auc_approach']) yPredTrain = ModelHandl.predict(TrainTestData[0], inputCfg['ml']['raw_output']) # save model handler in pickle ModelHandl.dump_model_handler( f'{OutPutDirPt}/ModelHandler_pT_{PtBin[0]}_{PtBin[1]}.pickle') ModelHandl.dump_original_model( f'{OutPutDirPt}/XGBoostModel_pT_{PtBin[0]}_{PtBin[1]}.model', True) #plots LegLabels = [ inputCfg['output']['leg_labels']['Bkg'], inputCfg['output']['leg_labels']['Prompt'] ] if inputCfg['output']['leg_labels']['FD'] is not None: LegLabels.append(inputCfg['output']['leg_labels']['FD']) OutputLabels = [ inputCfg['output']['out_labels']['Bkg'], inputCfg['output']['out_labels']['Prompt'] ] if inputCfg['output']['out_labels']['FD'] is not None: OutputLabels.append(inputCfg['output']['out_labels']['FD']) #_____________________________________________ plt.rcParams["figure.figsize"] = (10, 7) MLOutputFig = plot_utils.plot_output_train_test( ModelHandl, TrainTestData, 80, inputCfg['ml']['raw_output'], LegLabels, inputCfg['plots']['train_test_log'], density=True) if n_classes > 2: for Fig, Lab in zip(MLOutputFig, OutputLabels): Fig.savefig( f'{OutPutDirPt}/MLOutputDistr{Lab}_pT_{PtBin[0]}_{PtBin[1]}.pdf' ) else: MLOutputFig.savefig( f'{OutPutDirPt}/MLOutputDistr_pT_{PtBin[0]}_{PtBin[1]}.pdf') #_____________________________________________ plt.rcParams["figure.figsize"] = (10, 9) ROCCurveFig = plot_utils.plot_roc(TrainTestData[3], yPredTest, None, LegLabels, inputCfg['ml']['roc_auc_average'], inputCfg['ml']['roc_auc_approach']) ROCCurveFig.savefig( f'{OutPutDirPt}/ROCCurveAll_pT_{PtBin[0]}_{PtBin[1]}.pdf') pickle.dump( ROCCurveFig, open(f'{OutPutDirPt}/ROCCurveAll_pT_{PtBin[0]}_{PtBin[1]}.pkl', 'wb')) #_____________________________________________ plt.rcParams["figure.figsize"] = (10, 9) ROCCurveTTFig = plot_utils.plot_roc_train_test( TrainTestData[3], yPredTest, TrainTestData[1], yPredTrain, None, LegLabels, inputCfg['ml']['roc_auc_average'], inputCfg['ml']['roc_auc_approach']) ROCCurveTTFig.savefig( f'{OutPutDirPt}/ROCCurveTrainTest_pT_{PtBin[0]}_{PtBin[1]}.pdf') #_____________________________________________ PrecisionRecallFig = plot_utils.plot_precision_recall( TrainTestData[3], yPredTest, LegLabels) PrecisionRecallFig.savefig( f'{OutPutDirPt}/PrecisionRecallAll_pT_{PtBin[0]}_{PtBin[1]}.pdf') #_____________________________________________ plt.rcParams["figure.figsize"] = (12, 7) FeaturesImportanceFig = plot_utils.plot_feature_imp( TrainTestData[2][TrainCols], TrainTestData[3], ModelHandl, LegLabels) n_plot = n_classes if n_classes > 2 else 1 for iFig, Fig in enumerate(FeaturesImportanceFig): if iFig < n_plot: label = OutputLabels[iFig] if n_classes > 2 else '' Fig.savefig( f'{OutPutDirPt}/FeatureImportance{label}_pT_{PtBin[0]}_{PtBin[1]}.pdf' ) else: Fig.savefig( f'{OutPutDirPt}/FeatureImportanceAll_pT_{PtBin[0]}_{PtBin[1]}.pdf' ) return ModelHandl
def benchmark_hyperparam_optimizers(filename_dict, params, params_range, flag_dict, presel_dict, training_variables='', testsize=0.75): import time from sklearn.metrics import roc_auc_score N_run = 1 data_path = filename_dict['data_path'] analysis_path = filename_dict['analysis_path'] print('Loading MC signal') mc_signal = TreeHandler() mc_signal.get_handler_from_large_file( file_name=data_path + filename_dict['MC_signal_filename'], tree_name=filename_dict['MC_signal_table']) print('MC signal loaded\n') print('Loading background data for training') background_ls = TreeHandler() background_ls.get_handler_from_large_file( file_name=data_path + filename_dict['train_bckg_filename'], tree_name=filename_dict['train_bckg_table']) background_ls.apply_preselections(presel_dict['train_bckg_presel']) background_ls.shuffle_data_frame(size=min(background_ls.get_n_cand(), mc_signal.get_n_cand() * 4)) print('Done\n') train_test_data = train_test_generator([mc_signal, background_ls], [1, 0], test_size=testsize) if training_variables == '': training_variables = train_test_data[0].columns.tolist() model_clf = xgb.XGBClassifier() model_hdl = ModelHandler(model_clf, training_variables) times = [] roc = [] for i in range(N_run): start = time.time() model_hdl.optimize_params_bayes(train_test_data, params_range, 'roc_auc', njobs=-1) model_hdl.train_test_model(train_test_data, ) y_pred_test = model_hdl.predict( train_test_data[2], True) #used to evaluate model performance roc.append(roc_auc_score(train_test_data[3], y_pred_test)) times.append(time.time() - start) print('BAYES OPTIMIZATION WITH SKLEARN') print('Mean time : ' + str(np.mean(time))) print('Mean ROC : ' + str(np.mean(roc))) print('--------------\n') for i in range(N_run): model_hdl.optimize_params_optuna(train_test_data, params_range, 'roc_auc', timeout=np.mean(times), njobs=-1) model_hdl.train_test_model(train_test_data, ) y_pred_test = model_hdl.predict( train_test_data[2], True) #used to evaluate model performance roc.append(roc_auc_score(train_test_data[3], y_pred_test)) print('OPTUNA') print('Fixed time : ' + str(np.mean(time))) print('Mean ROC : ' + str(np.mean(roc))) print('--------------\n')
def train_test(inputCfg, PtMin, PtMax, OutPutDirPt, TrainTestData): ''' function for model training and testing ''' modelClf = xgb.XGBClassifier() TrainCols = inputCfg['ml']['training_columns'] HyperPars = inputCfg['ml']['hyper_par'] if not isinstance(TrainCols, list): print('ERROR: training columns must be defined!') sys.exit() if not isinstance(HyperPars, dict): print('ERROR: hyper-parameters must be defined or be an empty dict!') sys.exit() ModelHandl = ModelHandler(modelClf, TrainCols, HyperPars) # hyperparams optimization --> not working with multi-class classification at the moment #HypRanges = { # # # defines the maximum depth of a single tree (regularization) # 'max_depth': (1, 30), # 'learning_rate': (0.01, 0.3), # learning rate # 'n_estimators': (50, 1000) # number of boosting trees #} #ModelHandl.optimize_params_bayes(TrainTestData, HypRanges, None) # train and test the model with the updated hyperparameters ModelHandl.train_test_model(TrainTestData) yPredTest = ModelHandl.predict(TrainTestData[2], inputCfg['ml']['raw_output'], True) # save model handler in pickle ModelHandl.dump_model_handler( f'{OutPutDirPt}/ModelHandler_pT_{PtMin}_{PtMax}.pickle') #plots LegLabels = inputCfg['output']['leg_labels'] OutputLabels = inputCfg['output']['out_labels'] #_____________________________________________ plt.rcParams["figure.figsize"] = (10, 7) MLOutputFig = plot_utils.plot_output_train_test( ModelHandl, TrainTestData, 80, inputCfg['ml']['raw_output'], LegLabels, True, inputCfg['plots']['train_test_log'], density=True) for Fig, Lab in zip(MLOutputFig, OutputLabels): Fig.savefig(f'{OutPutDirPt}/MLOutputDistr{Lab}_pT_{PtMin}_{PtMax}.pdf') #_____________________________________________ plt.rcParams["figure.figsize"] = (8, 7) ROCCurveFig = plot_utils.plot_roc(TrainTestData[3], yPredTest, LegLabels) ROCCurveFig.savefig(f'{OutPutDirPt}/ROCCurveAll_pT_{PtMin}_{PtMax}.pdf') #_____________________________________________ PrecisionRecallFig = plot_utils.plot_precision_recall( TrainTestData[3], yPredTest, LegLabels) PrecisionRecallFig.savefig( f'{OutPutDirPt}/PrecisionRecallAll_pT_{PtMin}_{PtMax}.pdf') #_____________________________________________ plt.rcParams["figure.figsize"] = (12, 7) FeaturesImportanceFig = plot_utils.plot_feature_imp( TrainTestData[2][TrainCols], TrainTestData[3], ModelHandl) for iFig, Fig in enumerate(FeaturesImportanceFig): if iFig < 3: Fig.savefig( f'{OutPutDirPt}/FeatureImportance{OutputLabels[iFig]}_pT_{PtMin}_{PtMax}.pdf' ) else: Fig.savefig( f'{OutPutDirPt}/FeatureImportanceAll_pT_{PtMin}_{PtMax}.pdf') return ModelHandl
ct_bins_df_index = int(ct_bins[0]/5) for ct_bins_df in zip(CT_BINS_APPLY[i_cent_bins][ct_bins_df_index][:-1], CT_BINS_APPLY[i_cent_bins][ct_bins_df_index][1:]): bin_df = f'{split}_{cent_bins[0]}_{cent_bins[1]}_{ct_bins_df[0]}_{ct_bins_df[1]}' print(bin_df) # get only centrality selected train_test_data_cent = [pd.DataFrame(), [], pd.DataFrame(), []] train_test_data_cent[0] = train_test_data[0].query(f'matter {split_ineq_sign} and centrality > {cent_bins[0]} and centrality < {cent_bins[1]} and ct >= {ct_bins_df[0]} and ct < {ct_bins_df[1]}') train_test_data_cent[2] = train_test_data[2].query(f'matter {split_ineq_sign} and centrality > {cent_bins[0]} and centrality < {cent_bins[1]} and ct >= {ct_bins_df[0]} and ct < {ct_bins_df[1]}') train_test_data_cent[1] = train_test_data_cent[0]['y_true'] train_test_data_cent[3] = train_test_data_cent[2]['y_true'] # get predictions for training and test sets print(train_test_data_cent[2]) if (ct_bins_df[0]==0): continue test_y_score = model_hdl.predict(train_test_data_cent[2], output_margin=False) train_y_score = model_hdl.predict(train_test_data_cent[0], output_margin=False) train_test_data_cent[0].loc[:,'model_output_prompt'] = train_y_score[:,2] train_test_data_cent[2].loc[:,'model_output_prompt'] = test_y_score[:,2] train_test_data_cent[0].loc[:,'model_output_non_prompt'] = train_y_score[:,1] train_test_data_cent[2].loc[:,'model_output_non_prompt'] = test_y_score[:,1] train_test_data_cent[0].loc[:,'model_output_background'] = train_y_score[:,0] train_test_data_cent[2].loc[:,'model_output_background'] = test_y_score[:,0] # write train_test_data_cent[0].to_parquet(f'df/train_data_{cent_bins[0]}_{cent_bins[1]}_{ct_bins_df[0]}_{ct_bins_df[1]}.parquet.gzip',compression='gzip') train_test_data_cent[2].to_parquet(f'df/test_data_{cent_bins[0]}_{cent_bins[1]}_{ct_bins_df[0]}_{ct_bins_df[1]}.parquet.gzip',compression='gzip') # second condition needed because of issue with Qt libraries if MAKE_TRAIN_TEST_PLOT and not MAKE_PRESELECTION_EFFICIENCY: if not os.path.isdir(f'{PLOT_DIR}/train_test_out'):
def train_xgboost_model(signal, background, filename_dict, params, params_range, flag_dict, training_variables='', testsize=0.5): ''' Trains an XGBOOST model using hipe4ml and plot output distribution and feature importance ''' print('Training XGBOOST model') training_fig_path = filename_dict['analysis_path'] + "/images/training" train_test_data = train_test_generator([signal, background], [1, 0], test_size=testsize) if training_variables == '': training_variables = train_test_data[0].columns.tolist() model_clf = xgb.XGBClassifier() model_hdl = ModelHandler(model_clf, training_variables) if not flag_dict['use_default_param']: model_hdl.set_model_params(params) if flag_dict['benchmark_opt']: print('Benchamarking optimizers\n') import time from sklearn.metrics import roc_auc_score times_sk = [] roc_sk = [] for i in range(1): start = time.time() model_hdl.optimize_params_bayes(train_test_data, params_range, 'roc_auc', njobs=-1) model_hdl.train_test_model(train_test_data, ) y_pred_test = model_hdl.predict( train_test_data[2], True) #used to evaluate model performance roc_sk.append(roc_auc_score(train_test_data[3], y_pred_test)) times_sk.append(time.time() - start) print('\nBAYES OPTIMIZATION WITH SKLEARN') print('Mean time : ' + str(np.mean(times_sk))) print('Mean ROC : ' + str(np.mean(roc_sk))) print('--------------\n') print('OPTUNA') time = [] roc = [] for i in range(1): for key in params: if isinstance(params[key], str): params_range[key] = params[key] model_hdl.optimize_params_optuna(train_test_data, params_range, 'roc_auc', timeout=flag_dict['timeout'], n_jobs=flag_dict['n_jobs']) model_hdl.train_test_model(train_test_data, ) y_pred_test = model_hdl.predict( train_test_data[2], True) #used to evaluate model performance roc.append(roc_auc_score(train_test_data[3], y_pred_test)) print('\nBAYES OPTIMIZATION WITH SKLEARN') print('Mean time : ' + str(np.mean(times_sk))) print('Mean ROC : ' + str(np.mean(roc_sk))) print('--------------\n') print('OPTUNA') print('Fixed time : ' + str(np.mean(time))) print('Mean ROC : ' + str(np.mean(roc))) print('--------------\n') if flag_dict['optimize_bayes']: import time print('Doing Bayes optimization of hyperparameters\n') start = time.time() model_hdl.optimize_params_bayes(train_test_data, params_range, 'roc_auc', n_iter=700, njobs=flag_dict['n_jobs']) print('Elapsed time: ' + str(time.time() - start)) if flag_dict['optimize_optuna']: print('Doing Optuna optimization of hyperparameters\n') for key in params: if isinstance(params[key], str): params_range[key] = params[key] study = model_hdl.optimize_params_optuna(train_test_data, params_range, scoring='roc_auc', timeout=flag_dict['timeout'], n_jobs=flag_dict['n_jobs'], n_trials=None) print('Parameters optimization done!\n') if flag_dict['plot_optim']: print('Saving optimization plots') fig = optuna.visualization.plot_slice(study) fig.write_image(training_fig_path + '/optuna_slice.png') fig = optuna.visualization.plot_optimization_history(study) fig.write_image(training_fig_path + '/optuna_history.png') '''fig = optuna.visualization.plot_param_importances(study) fig.write_image(training_fig_path + '/optuna_param_importance.png') fig = optuna.visualization.plot_contour(study) fig.write_image(training_fig_path + '/optuna_contour.png')''' print('Done\n') import joblib joblib.dump(study, filename_dict['analysis_path'] + "model/study.pkl") model_hdl.train_test_model(train_test_data, ) print(model_hdl.get_model_params()) print('Predicting values on training and test datas') y_pred_train = model_hdl.predict(train_test_data[0], True) y_pred_test = model_hdl.predict(train_test_data[2], True) #used to evaluate model performance print('Prediction done\n') plt.rcParams["figure.figsize"] = (10, 7) leg_labels = ['background', 'signal'] print('Saving Output comparison plot') plt.figure() ml_out_fig = plot_utils.plot_output_train_test(model_hdl, train_test_data, 100, True, leg_labels, True, density=False) plt.savefig(training_fig_path + '/output_train_test.png', dpi=300, facecolor='white') plt.close() print('Done\n') print('Saving ROC AUC plot') plt.figure() roc_train_test_fig = plot_utils.plot_roc_train_test( train_test_data[3], y_pred_test, train_test_data[1], y_pred_train, None, leg_labels) #ROC AUC plot plt.savefig(training_fig_path + '/ROC_AUC_train_test.png', dpi=300, facecolor='white') import pickle with open(training_fig_path + '/ROC_AUC_train_test.pickle', 'wb') as f: pickle.dump(roc_train_test_fig, f) plt.close() print('Done\n') print('Saving feature importance plots') plt.figure() feat_imp_1, feat_imp_2 = plot_utils.plot_feature_imp(train_test_data[2], train_test_data[3], model_hdl, approximate=True) feat_imp_1.savefig(training_fig_path + '/feature_importance_HIPE4ML_violin.png', dpi=300, facecolor='white') feat_imp_2.savefig(training_fig_path + '/feature_importance_HIPE4ML_bar.png', dpi=300, facecolor='white') plt.close() print('Done\n') efficiency_score_conversion(train_test_data, y_pred_test, filename_dict) return train_test_data, y_pred_test, model_hdl
# -------------------------------------------- INPUT_MODEL = xgb.XGBClassifier() MODEL = ModelHandler(INPUT_MODEL) # hyperparams optimization HYP_RANGES = { # # defines the maximum depth of a single tree (regularization) 'max_depth': (5, 15), # 'learning_rate': (0.01, 0.3), # learning rate 'n_estimators': (5, 10), # number of boosting trees } MODEL.optimize_params_bayes(DATA, HYP_RANGES, 'roc_auc') # train and test the model with the updated hyperparameters MODEL.train_test_model(DATA) Y_PRED = MODEL.predict(DATA[2]) # Calculate the BDT efficiency as a function of the BDT score EFFICIENCY, THRESHOLD = analysis_utils.bdt_efficiency_array( DATA[3], Y_PRED, n_points=10) # -------------------------------------------- # PLOTTING # -------------------------------------------- FEATURES_DISTRIBUTIONS_PLOT = plot_utils.plot_distr( [SIG_DF, BKG_DF], SIG_DF.columns) CORRELATION_MATRIX_PLOT = plot_utils.plot_corr([SIG_DF, BKG_DF], SIG_DF.columns) BDT_OUTPUT_PLOT = plot_utils.plot_output_train_test(MODEL, DATA) ROC_CURVE_PLOT = plot_utils.plot_roc(DATA[3], Y_PRED) PRECISION_RECALL_PLOT = plot_utils.plot_precision_recall(DATA[3], Y_PRED)
if TRAIN: model_hdl.train_test_model(train_test_data) model_file_name = str(f'models/{bin}_trained') if OPTIMIZE: model_file_name = str( f'models/{bin}_optimized_trained') model_hdl.dump_model_handler(model_file_name) else: if OPTIMIZED: model_hdl.load_model_handler( f'models/{bin}_optimized_trained') else: model_hdl.load_model_handler(f'models/{bin}_trained') # get predictions for training and test set test_y_score = model_hdl.predict(train_test_data[2]) train_y_score = model_hdl.predict(train_test_data[0]) # second condition needed because of issue with Qt libraries if MAKE_TRAIN_TEST_PLOT and not MAKE_PRESELECTION_EFFICIENCY: if not os.path.isdir(f'{PLOT_DIR}/train_test_out'): os.mkdir(f'{PLOT_DIR}/train_test_out') plot_utils.plot_output_train_test(model_hdl, train_test_data, logscale=True, density=True, labels=leg_labels) plt.savefig(f'{PLOT_DIR}/train_test_out/{bin}_out') plot_utils.plot_feature_imp(train_test_data[0], train_test_data[1], model_hdl)
model_handler.set_training_columns(COLUMNS) if OPTIMIZE: model_handler.optimize_params_bayes(data, HYPERPARAMS_RANGE, 'roc_auc', init_points=10, n_iter=10) model_handler.train_test_model(data) print("train test model") print( f'--- model trained and tested in {((time.time() - part_time) / 60):.2f} minutes ---\n' ) y_pred = model_handler.predict(data[2]) data[2].insert(0, 'score', y_pred) eff, tsd = analysis_utils.bdt_efficiency_array( data[3], y_pred, n_points=1000) score_from_eff_array = analysis_utils.score_from_efficiency_array( data[3], y_pred, FIX_EFF_ARRAY) fixed_eff_array = np.vstack( (FIX_EFF_ARRAY, score_from_eff_array)) if SIGMA_MC: ml_analysis.MC_sigma_array(data, fixed_eff_array, cclass, ptbin, ctbin, split) ml_analysis.save_ML_analysis(model_handler, fixed_eff_array, cent_class=cclass,
def get_skimmed_large_data(data_path, cent_classes, pt_bins, ct_bins, training_columns, application_columns, mode, split=''): print('\n++++++++++++++++++++++++++++++++++++++++++++++++++') print('\nStarting BDT appplication on large data') if mode == 3: handlers_path = os.environ['HYPERML_MODELS_3'] + '/handlers' efficiencies_path = os.environ['HYPERML_EFFICIENCIES_3'] if mode == 2: handlers_path = os.environ['HYPERML_MODELS_2'] + '/handlers' efficiencies_path = os.environ['HYPERML_EFFICIENCIES_2'] executor = ThreadPoolExecutor() iterator = uproot.pandas.iterate(data_path, 'DataTable', executor=executor, reportfile=True) df_applied = pd.DataFrame() for current_file, data in iterator: rename_df_columns(data) print('current file: {}'.format(current_file)) print('start entry chunk: {}, stop entry chunk: {}'.format( data.index[0], data.index[-1])) for cclass in cent_classes: for ptbin in zip(pt_bins[:-1], pt_bins[1:]): for ctbin in zip(ct_bins[:-1], ct_bins[1:]): info_string = '_{}{}_{}{}_{}{}'.format( cclass[0], cclass[1], ptbin[0], ptbin[1], ctbin[0], ctbin[1]) filename_handler = handlers_path + '/model_handler' + info_string + split + '.pkl' filename_efficiencies = efficiencies_path + '/Eff_Score' + info_string + split + '.npy' model_handler = ModelHandler() model_handler.load_model_handler(filename_handler) eff_score_array = np.load(filename_efficiencies) tsd = eff_score_array[1][-1] data_range = f'{ctbin[0]}<ct<{ctbin[1]} and {ptbin[0]}<pt<{ptbin[1]} and {cclass[0]}<=centrality<{cclass[1]}' df_tmp = data.query(data_range) df_tmp.insert( 0, 'score', model_handler.predict(df_tmp[training_columns])) df_tmp = df_tmp.query('score>@tsd') df_tmp = df_tmp.loc[:, application_columns] df_applied = df_applied.append(df_tmp, ignore_index=True, sort=False) print(df_applied.info(memory_usage='deep')) return df_applied
class Optimiserhipe4mltree: # Class Attribute species = "optimiser_hipe4mltree" def __init__(self, data_param, binmin, binmax, training_var, bkg_sel, hyper_pars): self.logger = get_logger() # directory #self.do_mlprefilter = datap.get("doml_asprefilter", None) self.dirmlout = data_param["ml"]["mlout"] self.dirmlplot = data_param["ml"]["mlplot"] #if self.do_mlprefilter is True: # self.dirmodel = self.dirmodel + "/prefilter" # self.dirmlplot = self.dirmlplot + "/prefilter" #if self.do_mlprefilter is False: # self.dirmodel = self.dirmodel + "/analysis" # self.dirmlplot = self.dirmlplot + "/analysis" self.inputtreedata = "/Users/lvermunt/cernbox/Analyses/ML/input/hipe4mlTTree/data.root" self.inputtreemc = "/Users/lvermunt/cernbox/Analyses/ML/input/hipe4mlTTree/prompt.root" self.v_train = None self.p_binmin = binmin self.p_binmax = binmax self.s_selsigml = "" self.s_selbkgml = bkg_sel #"inv_mass < 1.82 or 1.92 < inv_mass < 2.00" self.v_bkgoversigfrac = 3 self.v_sig = 1 self.v_bkg = 0 self.rnd_splt = data_param["ml"]["rnd_splt"] self.test_frac = data_param["ml"]["test_frac"] self.prompthandler = None self.datahandler = None self.bkghandler = None self.traintestdata = None self.ypredtrain_hipe4ml = None self.ypredtest_hipe4ml = None self.preparesample() self.p_hipe4ml_model = None self.v_hipe4ml_pars = hyper_pars self.load_hipe4mlmodel() self.bayesoptconfig_hipe4ml = data_param["hipe4ml"]["hyper_par_opt"][ "bayes_opt_config"] self.average_method_hipe4ml = data_param["hipe4ml"]["roc_auc_average"] self.nfold_hipe4ml = data_param["hipe4ml"]["hyper_par_opt"]["nfolds"] self.init_points = data_param["hipe4ml"]["hyper_par_opt"]["initpoints"] self.n_iter_hipe4ml = data_param["hipe4ml"]["hyper_par_opt"]["niter"] self.njobs_hipe4ml = data_param["hipe4ml"]["hyper_par_opt"]["njobs"] self.roc_method_hipe4ml = data_param["hipe4ml"]["roc_auc_approach"] self.raw_output_hipe4ml = data_param["hipe4ml"]["raw_output"] self.train_test_log_hipe4ml = data_param["hipe4ml"]["train_test_log"] self.multiclass_labels = data_param["ml"].get("multiclass_labels", None) self.logger.info("Using the following training variables: %s", self.v_train) def preparesample(self): self.logger.info("Prepare Sample for hipe4ml") self.signalhandler = TreeHandler(self.inputtreemc, 'treeMLDplus') nsigcand = self.signalhandler.get_n_cand() self.datahandler = TreeHandler(self.inputtreedata, 'treeMLDplus') self.bkghandler = self.datahandler.get_subset(self.s_selbkgml, size=nsigcand * self.v_bkgoversigfrac) self.traintestdata = train_test_generator( [self.signalhandler, self.bkghandler], [self.v_sig, self.v_bkg], test_size=self.test_frac, random_state=self.rnd_splt) def load_hipe4mlmodel(self): self.logger.info("Loading hipe4ml model") self.v_train = self.signalhandler.get_var_names() self.v_train.remove('inv_mass') self.v_train.remove('pt_cand') model_xgboost = xgb.XGBClassifier() self.p_hipe4ml_model = ModelHandler(model_xgboost, self.v_train) def set_hipe4ml_modelpar(self): self.logger.info("Setting hipe4ml hyperparameters") self.p_hipe4ml_model.set_model_params(self.v_hipe4ml_pars) def do_hipe4mlhyperparopti(self): self.logger.info("Optimising hipe4ml hyperparameters (Bayesian)") if not (self.average_method_hipe4ml in ['macro', 'weighted'] and self.roc_method_hipe4ml in ['ovo', 'ovr']): self.logger.fatal("Selected ROC configuration is not valid!") if self.average_method_hipe4ml == 'weighted': metric = f'roc_auc_{self.roc_method_hipe4ml}_{self.average_method_hipe4ml}' else: metric = f'roc_auc_{self.roc_method_hipe4ml}' hypparsfile = f'{self.dirmlout}/HyperParOpt_pT_{self.p_binmin}_{self.p_binmax}.txt' outfilehyppars = open(hypparsfile, 'wt') sys.stdout = outfilehyppars self.p_hipe4ml_model.optimize_params_bayes(self.traintestdata, self.bayesoptconfig_hipe4ml, metric, self.nfold_hipe4ml, self.init_points, self.n_iter_hipe4ml, self.njobs_hipe4ml) outfilehyppars.close() sys.stdout = sys.__stdout__ self.logger.info("Performing hyper-parameters optimisation: Done!") def do_hipe4mltrain(self): self.logger.info("Training + testing hipe4ml model") t0 = time.time() self.p_hipe4ml_model.train_test_model(self.traintestdata, self.average_method_hipe4ml, self.roc_method_hipe4ml) self.ypredtrain_hipe4ml = self.p_hipe4ml_model.predict( self.traintestdata[0], self.raw_output_hipe4ml) self.ypredtest_hipe4ml = self.p_hipe4ml_model.predict( self.traintestdata[2], self.raw_output_hipe4ml) modelhandlerfile = f'{self.dirmlout}/ModelHandler_pT_{self.p_binmin}_{self.p_binmax}.pkl' self.p_hipe4ml_model.dump_model_handler(modelhandlerfile) modelfile = f'{self.dirmlout}/ModelHandler_pT_{self.p_binmin}_{self.p_binmax}.model' self.p_hipe4ml_model.dump_original_model(modelfile) self.logger.info("Training + testing hipe4ml: Done!") self.logger.info("Time elapsed = %.3f", time.time() - t0) def do_hipe4mlplot(self): self.logger.info("Plotting hipe4ml model") leglabels = ["Background", "Prompt signal"] outputlabels = ["Bkg", "SigPrompt"] # _____________________________________________ plot_utils.plot_distr([self.bkghandler, self.signalhandler], self.v_train, 100, leglabels) plt.subplots_adjust(left=0.06, bottom=0.06, right=0.99, top=0.96, hspace=0.55, wspace=0.55) figname = f'{self.dirmlplot}/DistributionsAll_pT_{self.p_binmin}_{self.p_binmax}.pdf' plt.savefig(figname) plt.close('all') # _____________________________________________ corrmatrixfig = plot_utils.plot_corr( [self.bkghandler, self.signalhandler], self.v_train, leglabels) for figg, labb in zip(corrmatrixfig, outputlabels): plt.figure(figg.number) plt.subplots_adjust(left=0.2, bottom=0.25, right=0.95, top=0.9) figname = f'{self.dirmlplot}/CorrMatrix{labb}_pT_{self.p_binmin}_{self.p_binmax}.pdf' figg.savefig(figname) # _____________________________________________ plt.rcParams["figure.figsize"] = (10, 7) mloutputfig = plot_utils.plot_output_train_test( self.p_hipe4ml_model, self.traintestdata, 80, self.raw_output_hipe4ml, leglabels, self.train_test_log_hipe4ml, density=True) figname = f'{self.dirmlplot}/MLOutputDistr_pT_{self.p_binmin}_{self.p_binmax}.pdf' mloutputfig.savefig(figname) # _____________________________________________ plt.rcParams["figure.figsize"] = (10, 9) roccurvefig = plot_utils.plot_roc(self.traintestdata[3], self.ypredtest_hipe4ml, None, leglabels, self.average_method_hipe4ml, self.roc_method_hipe4ml) figname = f'{self.dirmlplot}/ROCCurveAll_pT_{self.p_binmin}_{self.p_binmax}.pdf' roccurvefig.savefig(figname) # _____________________________________________ plt.rcParams["figure.figsize"] = (10, 9) roccurvettfig = plot_utils.plot_roc_train_test( self.traintestdata[3], self.ypredtest_hipe4ml, self.traintestdata[1], self.ypredtrain_hipe4ml, None, leglabels, self.average_method_hipe4ml, self.roc_method_hipe4ml) figname = f'{self.dirmlplot}/ROCCurveTrainTest_pT_{self.p_binmin}_{self.p_binmax}.pdf' roccurvettfig.savefig(figname) # _____________________________________________ precisionrecallfig = plot_utils.plot_precision_recall( self.traintestdata[3], self.ypredtest_hipe4ml, leglabels) figname = f'{self.dirmlplot}/PrecisionRecallAll_pT_{self.p_binmin}_{self.p_binmax}.pdf' precisionrecallfig.savefig(figname) # _____________________________________________ plt.rcParams["figure.figsize"] = (12, 7) featuresimportancefig = plot_utils.plot_feature_imp( self.traintestdata[2][self.v_train], self.traintestdata[3], self.p_hipe4ml_model, leglabels) for i in range(0, len(featuresimportancefig)): figname = (f'{self.dirmlplot}/FeatureImportanceOpt{i}_' f'pT_{self.p_binmin}_{self.p_binmax}.pdf') featuresimportancefig[i].savefig(figname)