sample, var, n) else: logger.info( " No datasets available for evaluation of model trained with datasets: %s , generator variation: %s with %s events.", sample, var, n) logger.info("ABORTING") sys.exit() loading = Loader() carl = RatioEstimator() carl.load('models/' + sample + '/' + var + '_carl_' + str(n)) evaluate = ['train', 'val'] for i in evaluate: r_hat, _ = carl.evaluate(x='data/' + sample + '/' + var + '/X0_' + i + '_' + str(n) + '.npy') w = 1. / r_hat loading.load_result( x0='data/' + sample + '/' + var + '/X0_' + i + '_' + str(n) + '.npy', x1='data/' + sample + '/' + var + '/X1_' + i + '_' + str(n) + '.npy', weights=w, label=i, do=sample, var=var, plot=True, n=n, path=p, ) carl.evaluate_performance( x='data/' + sample + '/' + var + '/X_val_' + str(n) + '.npy', y='data/' + sample + '/' + var + '/y_val_' + str(n) + '.npy')
r_hat, s_hat = carl.evaluate(x='data/' + global_name + '/X0_' + i + '_' + str(n) + '.npy') print("s_hat = {}".format(s_hat)) print("r_hat = {}".format(r_hat)) w = 1. / r_hat # I thought r_hat = p_{1}(x) / p_{0}(x) ??? print("w = {}".format(w)) print("<evaluate.py::__init__>:: Loading Result for {}".format(i)) loading.load_result( x0='data/' + global_name + '/X0_' + i + '_' + str(n) + '.npy', x1='data/' + global_name + '/X1_' + i + '_' + str(n) + '.npy', w0='data/' + global_name + '/w0_' + i + '_' + str(n) + '.npy', w1='data/' + global_name + '/w1_' + i + '_' + str(n) + '.npy', metaData='data/' + global_name + '/metaData_' + str(n) + '.pkl', weights=w, features=features, #weightFeature=weightFeature, label=i, plot=True, nentries=n, #TreeName=treename, #pathA=p+nominal+".root", #pathB=p+variation+".root", global_name=global_name, plot_ROC=opts.plot_ROC, plot_obs_ROC=opts.plot_obs_ROC, ) # Evaluate performance carl.evaluate_performance( x='data/' + global_name + '/X_val_' + str(n) + '.npy', y='data/' + global_name + '/y_val_' + str(n) + '.npy')