def train_test(inputCfg, PtBin, OutPutDirPt, TrainTestData, iBin): #pylint: disable=too-many-statements, too-many-branches ''' function for model training and testing ''' n_classes = len(np.unique(TrainTestData[3])) modelClf = xgb.XGBClassifier(use_label_encoder=False) TrainCols = inputCfg['ml']['training_columns'] HyperPars = inputCfg['ml']['hyper_par'][iBin] if not isinstance(TrainCols, list): print('\033[91mERROR: training columns must be defined!\033[0m') sys.exit() if not isinstance(HyperPars, dict): print( '\033[91mERROR: hyper-parameters must be defined or be an empty dict!\033[0m' ) sys.exit() ModelHandl = ModelHandler(modelClf, TrainCols, HyperPars) # hyperparams optimization if inputCfg['ml']['hyper_par_opt']['do_hyp_opt']: print('Perform bayesian optimization') BayesOptConfig = inputCfg['ml']['hyper_par_opt']['bayes_opt_config'] if not isinstance(BayesOptConfig, dict): print('\033[91mERROR: bayes_opt_config must be defined!\033[0m') sys.exit() if n_classes > 2: average_method = inputCfg['ml']['roc_auc_average'] roc_method = inputCfg['ml']['roc_auc_approach'] if not (average_method in ['macro', 'weighted'] and roc_method in ['ovo', 'ovr']): print( '\033[91mERROR: selected ROC configuration is not valid!\033[0m' ) sys.exit() if average_method == 'weighted': metric = f'roc_auc_{roc_method}_{average_method}' else: metric = f'roc_auc_{roc_method}' else: metric = 'roc_auc' print('Performing hyper-parameters optimisation: ...', end='\r') OutFileHypPars = open( f'{OutPutDirPt}/HyperParOpt_pT_{PtBin[0]}_{PtBin[1]}.txt', 'wt') sys.stdout = OutFileHypPars ModelHandl.optimize_params_bayes( TrainTestData, BayesOptConfig, metric, nfold=inputCfg['ml']['hyper_par_opt']['nfolds'], init_points=inputCfg['ml']['hyper_par_opt']['initpoints'], n_iter=inputCfg['ml']['hyper_par_opt']['niter'], njobs=inputCfg['ml']['hyper_par_opt']['njobs']) OutFileHypPars.close() sys.stdout = sys.__stdout__ print('Performing hyper-parameters optimisation: Done!') print( f'Output saved in {OutPutDirPt}/HyperParOpt_pT_{PtBin[0]}_{PtBin[1]}.txt' ) print(f'Best hyper-parameters:\n{ModelHandl.get_model_params()}') else: ModelHandl.set_model_params(HyperPars) # train and test the model with the updated hyper-parameters yPredTest = ModelHandl.train_test_model( TrainTestData, True, output_margin=inputCfg['ml']['raw_output'], average=inputCfg['ml']['roc_auc_average'], multi_class_opt=inputCfg['ml']['roc_auc_approach']) yPredTrain = ModelHandl.predict(TrainTestData[0], inputCfg['ml']['raw_output']) # save model handler in pickle ModelHandl.dump_model_handler( f'{OutPutDirPt}/ModelHandler_pT_{PtBin[0]}_{PtBin[1]}.pickle') ModelHandl.dump_original_model( f'{OutPutDirPt}/XGBoostModel_pT_{PtBin[0]}_{PtBin[1]}.model', True) #plots LegLabels = [ inputCfg['output']['leg_labels']['Bkg'], inputCfg['output']['leg_labels']['Prompt'] ] if inputCfg['output']['leg_labels']['FD'] is not None: LegLabels.append(inputCfg['output']['leg_labels']['FD']) OutputLabels = [ inputCfg['output']['out_labels']['Bkg'], inputCfg['output']['out_labels']['Prompt'] ] if inputCfg['output']['out_labels']['FD'] is not None: OutputLabels.append(inputCfg['output']['out_labels']['FD']) #_____________________________________________ plt.rcParams["figure.figsize"] = (10, 7) MLOutputFig = plot_utils.plot_output_train_test( ModelHandl, TrainTestData, 80, inputCfg['ml']['raw_output'], LegLabels, inputCfg['plots']['train_test_log'], density=True) if n_classes > 2: for Fig, Lab in zip(MLOutputFig, OutputLabels): Fig.savefig( f'{OutPutDirPt}/MLOutputDistr{Lab}_pT_{PtBin[0]}_{PtBin[1]}.pdf' ) else: MLOutputFig.savefig( f'{OutPutDirPt}/MLOutputDistr_pT_{PtBin[0]}_{PtBin[1]}.pdf') #_____________________________________________ plt.rcParams["figure.figsize"] = (10, 9) ROCCurveFig = plot_utils.plot_roc(TrainTestData[3], yPredTest, None, LegLabels, inputCfg['ml']['roc_auc_average'], inputCfg['ml']['roc_auc_approach']) ROCCurveFig.savefig( f'{OutPutDirPt}/ROCCurveAll_pT_{PtBin[0]}_{PtBin[1]}.pdf') pickle.dump( ROCCurveFig, open(f'{OutPutDirPt}/ROCCurveAll_pT_{PtBin[0]}_{PtBin[1]}.pkl', 'wb')) #_____________________________________________ plt.rcParams["figure.figsize"] = (10, 9) ROCCurveTTFig = plot_utils.plot_roc_train_test( TrainTestData[3], yPredTest, TrainTestData[1], yPredTrain, None, LegLabels, inputCfg['ml']['roc_auc_average'], inputCfg['ml']['roc_auc_approach']) ROCCurveTTFig.savefig( f'{OutPutDirPt}/ROCCurveTrainTest_pT_{PtBin[0]}_{PtBin[1]}.pdf') #_____________________________________________ PrecisionRecallFig = plot_utils.plot_precision_recall( TrainTestData[3], yPredTest, LegLabels) PrecisionRecallFig.savefig( f'{OutPutDirPt}/PrecisionRecallAll_pT_{PtBin[0]}_{PtBin[1]}.pdf') #_____________________________________________ plt.rcParams["figure.figsize"] = (12, 7) FeaturesImportanceFig = plot_utils.plot_feature_imp( TrainTestData[2][TrainCols], TrainTestData[3], ModelHandl, LegLabels) n_plot = n_classes if n_classes > 2 else 1 for iFig, Fig in enumerate(FeaturesImportanceFig): if iFig < n_plot: label = OutputLabels[iFig] if n_classes > 2 else '' Fig.savefig( f'{OutPutDirPt}/FeatureImportance{label}_pT_{PtBin[0]}_{PtBin[1]}.pdf' ) else: Fig.savefig( f'{OutPutDirPt}/FeatureImportanceAll_pT_{PtBin[0]}_{PtBin[1]}.pdf' ) return ModelHandl
def test_plot_roc(): """ Test the roc curve plot """ assert isinstance(plot_utils.plot_roc(DATA[3], Y_PRED), matplotlib.figure.Figure)
HYP_RANGES = { # # defines the maximum depth of a single tree (regularization) 'max_depth': (5, 15), # 'learning_rate': (0.01, 0.3), # learning rate 'n_estimators': (5, 10), # number of boosting trees } MODEL.optimize_params_bayes(DATA, HYP_RANGES, 'roc_auc') # train and test the model with the updated hyperparameters MODEL.train_test_model(DATA) Y_PRED = MODEL.predict(DATA[2]) # Calculate the BDT efficiency as a function of the BDT score EFFICIENCY, THRESHOLD = analysis_utils.bdt_efficiency_array( DATA[3], Y_PRED, n_points=10) # -------------------------------------------- # PLOTTING # -------------------------------------------- FEATURES_DISTRIBUTIONS_PLOT = plot_utils.plot_distr( [SIG_DF, BKG_DF], SIG_DF.columns) CORRELATION_MATRIX_PLOT = plot_utils.plot_corr([SIG_DF, BKG_DF], SIG_DF.columns) BDT_OUTPUT_PLOT = plot_utils.plot_output_train_test(MODEL, DATA) ROC_CURVE_PLOT = plot_utils.plot_roc(DATA[3], Y_PRED) PRECISION_RECALL_PLOT = plot_utils.plot_precision_recall(DATA[3], Y_PRED) BDT_EFFICIENCY_PLOT = plot_utils.plot_bdt_eff(THRESHOLD, EFFICIENCY) FEATURES_IMPORTANCE = plot_utils.plot_feature_imp(TEST_SET, Y_TEST, MODEL) plt.show() # ---------------------------------------------
def train_test(inputCfg, PtMin, PtMax, OutPutDirPt, TrainTestData): ''' function for model training and testing ''' modelClf = xgb.XGBClassifier() TrainCols = inputCfg['ml']['training_columns'] HyperPars = inputCfg['ml']['hyper_par'] if not isinstance(TrainCols, list): print('ERROR: training columns must be defined!') sys.exit() if not isinstance(HyperPars, dict): print('ERROR: hyper-parameters must be defined or be an empty dict!') sys.exit() ModelHandl = ModelHandler(modelClf, TrainCols, HyperPars) # hyperparams optimization --> not working with multi-class classification at the moment #HypRanges = { # # # defines the maximum depth of a single tree (regularization) # 'max_depth': (1, 30), # 'learning_rate': (0.01, 0.3), # learning rate # 'n_estimators': (50, 1000) # number of boosting trees #} #ModelHandl.optimize_params_bayes(TrainTestData, HypRanges, None) # train and test the model with the updated hyperparameters ModelHandl.train_test_model(TrainTestData) yPredTest = ModelHandl.predict(TrainTestData[2], inputCfg['ml']['raw_output'], True) # save model handler in pickle ModelHandl.dump_model_handler( f'{OutPutDirPt}/ModelHandler_pT_{PtMin}_{PtMax}.pickle') #plots LegLabels = inputCfg['output']['leg_labels'] OutputLabels = inputCfg['output']['out_labels'] #_____________________________________________ plt.rcParams["figure.figsize"] = (10, 7) MLOutputFig = plot_utils.plot_output_train_test( ModelHandl, TrainTestData, 80, inputCfg['ml']['raw_output'], LegLabels, True, inputCfg['plots']['train_test_log'], density=True) for Fig, Lab in zip(MLOutputFig, OutputLabels): Fig.savefig(f'{OutPutDirPt}/MLOutputDistr{Lab}_pT_{PtMin}_{PtMax}.pdf') #_____________________________________________ plt.rcParams["figure.figsize"] = (8, 7) ROCCurveFig = plot_utils.plot_roc(TrainTestData[3], yPredTest, LegLabels) ROCCurveFig.savefig(f'{OutPutDirPt}/ROCCurveAll_pT_{PtMin}_{PtMax}.pdf') #_____________________________________________ PrecisionRecallFig = plot_utils.plot_precision_recall( TrainTestData[3], yPredTest, LegLabels) PrecisionRecallFig.savefig( f'{OutPutDirPt}/PrecisionRecallAll_pT_{PtMin}_{PtMax}.pdf') #_____________________________________________ plt.rcParams["figure.figsize"] = (12, 7) FeaturesImportanceFig = plot_utils.plot_feature_imp( TrainTestData[2][TrainCols], TrainTestData[3], ModelHandl) for iFig, Fig in enumerate(FeaturesImportanceFig): if iFig < 3: Fig.savefig( f'{OutPutDirPt}/FeatureImportance{OutputLabels[iFig]}_pT_{PtMin}_{PtMax}.pdf' ) else: Fig.savefig( f'{OutPutDirPt}/FeatureImportanceAll_pT_{PtMin}_{PtMax}.pdf') return ModelHandl
def do_hipe4mlplot(self): self.logger.info("Plotting hipe4ml model") leglabels = ["Background", "Prompt signal"] outputlabels = ["Bkg", "SigPrompt"] # _____________________________________________ plot_utils.plot_distr([self.bkghandler, self.signalhandler], self.v_train, 100, leglabels) plt.subplots_adjust(left=0.06, bottom=0.06, right=0.99, top=0.96, hspace=0.55, wspace=0.55) figname = f'{self.dirmlplot}/DistributionsAll_pT_{self.p_binmin}_{self.p_binmax}.pdf' plt.savefig(figname) plt.close('all') # _____________________________________________ corrmatrixfig = plot_utils.plot_corr( [self.bkghandler, self.signalhandler], self.v_train, leglabels) for figg, labb in zip(corrmatrixfig, outputlabels): plt.figure(figg.number) plt.subplots_adjust(left=0.2, bottom=0.25, right=0.95, top=0.9) figname = f'{self.dirmlplot}/CorrMatrix{labb}_pT_{self.p_binmin}_{self.p_binmax}.pdf' figg.savefig(figname) # _____________________________________________ plt.rcParams["figure.figsize"] = (10, 7) mloutputfig = plot_utils.plot_output_train_test( self.p_hipe4ml_model, self.traintestdata, 80, self.raw_output_hipe4ml, leglabels, self.train_test_log_hipe4ml, density=True) figname = f'{self.dirmlplot}/MLOutputDistr_pT_{self.p_binmin}_{self.p_binmax}.pdf' mloutputfig.savefig(figname) # _____________________________________________ plt.rcParams["figure.figsize"] = (10, 9) roccurvefig = plot_utils.plot_roc(self.traintestdata[3], self.ypredtest_hipe4ml, None, leglabels, self.average_method_hipe4ml, self.roc_method_hipe4ml) figname = f'{self.dirmlplot}/ROCCurveAll_pT_{self.p_binmin}_{self.p_binmax}.pdf' roccurvefig.savefig(figname) # _____________________________________________ plt.rcParams["figure.figsize"] = (10, 9) roccurvettfig = plot_utils.plot_roc_train_test( self.traintestdata[3], self.ypredtest_hipe4ml, self.traintestdata[1], self.ypredtrain_hipe4ml, None, leglabels, self.average_method_hipe4ml, self.roc_method_hipe4ml) figname = f'{self.dirmlplot}/ROCCurveTrainTest_pT_{self.p_binmin}_{self.p_binmax}.pdf' roccurvettfig.savefig(figname) # _____________________________________________ precisionrecallfig = plot_utils.plot_precision_recall( self.traintestdata[3], self.ypredtest_hipe4ml, leglabels) figname = f'{self.dirmlplot}/PrecisionRecallAll_pT_{self.p_binmin}_{self.p_binmax}.pdf' precisionrecallfig.savefig(figname) # _____________________________________________ plt.rcParams["figure.figsize"] = (12, 7) featuresimportancefig = plot_utils.plot_feature_imp( self.traintestdata[2][self.v_train], self.traintestdata[3], self.p_hipe4ml_model, leglabels) for i in range(0, len(featuresimportancefig)): figname = (f'{self.dirmlplot}/FeatureImportanceOpt{i}_' f'pT_{self.p_binmin}_{self.p_binmax}.pdf') featuresimportancefig[i].savefig(figname)