def open_plots_pdf(fname): close_plots_pdf() if type(fname) is str: if fname[-4:] != ".pdf": fname = fname + ".pdf" utils.info("utils.open_plots_pdf()", f"opening pdf file {fname}") glob.plot_file = PdfPages(fname) else: raise ValueError( "utils.open_plots_pdf()", f"filename of type {type(fname)} provided where string expected")
def populate_scan_grid_using_ScaleByL6 (BSM_input_dists, new_grid, scan_params, SM=None) : try : lambda_grid_index = new_grid.keys.index("Lambda") lambda_list_index = [p.name for p in scan_params].index("Lambda") utils.info("populate_input_grid_using_ScaleByL6()", "successfully found \'Lambda\' in param list") except ValueError as e : raise KeyError("populate_input_grid_using_ScaleByL6(): no parameter \'Lambda\' found in param list") n_dim = len(new_grid.keys) if n_dim == 1 : if len(BSM_input_dists) > 1 : raise RuntimeError("populate_input_grid_using_ScaleByL6(): don't know which input to scale Lambda from...") L_ref, dist_ref = 0., None for L, i in BSM_input_dists.items() : L_ref = float(L[0]) dist_ref = i if dist_ref.includes_SM : if SM is None : raise ValueError("populate_scan_grid_using_ScaleByL6(): need to subtract SM from BSM input but none provided") dist_ref.subtract_values(SM.values) dist_ref.subtract_cov (SM.cov) L_values = new_grid.axes[0] for idx in range(len(L_values)) : sf = (L_ref/L_values[idx]) ** 6 new_grid.values[idx] = dist_ref * sf elif n_dim == 2 : other_param_grid_index = 1 - lambda_grid_index other_param_list_index = 1 - lambda_list_index other_param_key = new_grid.keys[other_param_grid_index] for other_param_axis_idx in range(len(new_grid.axes[other_param_grid_index])) : other_param_value = new_grid.axes[other_param_grid_index][other_param_axis_idx] lambda_ref, dist_ref = None, None for key, item in BSM_input_dists.items() : if type(other_param_value)(key[other_param_list_index]) != other_param_value : continue if dist_ref is not None : raise RuntimeError("populate_input_grid_using_ScaleByL6(): don't know which input to scale Lambda from...") lambda_ref = np.float64(key[lambda_list_index]) dist_ref = item if dist_ref.includes_SM : if SM is None : raise ValueError("populate_scan_grid_using_ScaleByL6(): need to subtract SM from BSM input but none provided") dist_ref.subtract_values(SM.values) dist_ref.subtract_cov (SM.cov) for grid_idx_lambda in range(len(new_grid.axes[lambda_grid_index])) : lambda_value = np.float64(new_grid.axes[lambda_grid_index][grid_idx_lambda]) sf = (lambda_ref/lambda_value) ** 6 if lambda_grid_index == 0 : idx_x, idx_y = grid_idx_lambda, other_param_axis_idx else : idx_x, idx_y = other_param_axis_idx, grid_idx_lambda new_grid.values[idx_x][idx_y] = dist_ref * sf elif n_dim > 2 : raise NotImplementedError(f"populate_input_grid_using_ScaleByL6(): only 1D and 2D scans implemented, {n_dim}D asked") return new_grid
def load_from_config(self, cfg, **kwargs): if "INPUTS" not in cfg: utils.error("InputStore.load_from_config()", "No INPUTS in config file. Ignoring.") return inputs = utils.string_to_object( cfg.get("INPUTS", "Inputs", fallback="[]")) self.do_quick_load = glob.custom_store.get("QuickStoreDistributions", False) self.do_quick_save = glob.custom_store.get("QuickLoadDistributions", False) look_for_params = kwargs.get("look_for_params", False) for key in inputs: fname = cfg.get("INPUTS", f"{key}.file.path", fallback="") ftype = cfg.get("INPUTS", f"{key}.file.type", fallback="unknown") save_fname = "." + fname.replace("/", "_") + ".pickle" if self.do_quick_load and self.quick_load( save_fname, extract=utils.string_to_object( cfg.get("INPUTS", f"{key}.extract", fallback="[]"))): utils.info( "InputStore.load_from_config()", f"File {save_fname} quick-loaded in the place of {ftype} file {fname}" ) else: if ftype == "hepdata": self.load_from_hepdata(fname, cfg=cfg, key=key, save=save_fname, look_for_params=look_for_params) elif ftype == "yoda": self.load_from_yoda(fname, cfg=cfg, key=key, save=save_fname, look_for_params=look_for_params) elif ftype == "pickle": self.load_from_pickle(fname, cfg=cfg, key=key, save=save_fname, look_for_params=look_for_params) else: raise ValueError( f"InputStore.load_from_config(): Input {key} file {fname} has an unrecognised type {ftype}" )
def do_general_setup(cfg_name=None): if cfg_name is None: cfg_name = glob.custom_store["config name"] do_show_plots = glob.custom_store.get("do show plots", True) plots_fname = glob.custom_store.get("plots filename", None) tag = glob.custom_store.get("tag", None) # # read config file # utils.info("do_general_setup()", f"Loading config file {cfg_name}") config.read_config(cfg_name, update_fallback=True) # # get input measured / expected / SM / BSM distributions # utils.info("do_general_setup()", "Creating meas, SM and BSM distributions") meas_dist = inputs.get_meas_dist_from_input_store( name="get_limits.py::meas") SM_model_dist = inputs.get_SM_dist_from_input_store( name="get_limits.py::SM::model", key="theoretical") SM_exp_dist = inputs.get_SM_dist_from_input_store( name="get_limits.py::SM::pred", key="experimental") SM_pred_dist = Distribution(SM_exp_dist) SM_pred_dist.cov = SM_pred_dist.cov + SM_model_dist.cov BSM_input_dists = inputs.get_BSM_dists_from_input_store( prefix="get_limits.py::BSM::") glob.custom_store["meas_dist"] = meas_dist glob.custom_store["SM_model_dist"] = SM_model_dist glob.custom_store["SM_exp_dist"] = SM_exp_dist glob.custom_store["SM_pred_dist"] = SM_pred_dist glob.custom_store["BSM_input_dists"] = BSM_input_dists # # generate model distributions across BSM grid # utils.info("do_general_setup()", "Populating predictions across param grid") BSM_scan_dists = prediction.generate_BSM_predictions(BSM_input_dists, SM=SM_model_dist) utils.info("do_general_setup()", "Adding SM to BSM") def add_to_dist(x, **kwargs): x.add(kwargs["SM_model_dist"]) do_for_all_in_tensor(BSM_scan_dists.values, add_to_dist, SM_model_dist=SM_model_dist) glob.custom_store["BSM_scan_dists"] = BSM_scan_dists # # configure confidence level generators # CL_generator = CLGeneratorGrid(glob.scan_params, generate=True) CL_generator.set_distributions(BSM_scan_dists) CL_generator.set_SM_distribution(SM_model_dist) glob.CL_generator = CL_generator
def parse_command_line_arguments(*argv, **kwargs): utils.info("parse_command_line_arguments()", "Parsing arguments") try: opts, rest = getopt.getopt( sys.argv[1:], "", [f"{k}=" for k in kwargs] + ["save=", "tag=", "show="] + [f"{a}=" for a in argv]) except getopt.GetoptError as err: utils.error( "parse_command_line_arguments()", "The following error was thrown whilst parsing command-line arguments" ) utils.fatal("parse_command_line_arguments()", err) if len(rest) is not 1: raise ValueError( f"parse_command_line_arguments(): expected 1 unlabelled argument where {len(argv)} provided" ) cfg_name = rest[0] save_fname, do_show, tag = None, True, None ret = {} if not utils.is_file(cfg_name): raise RuntimeError( f"parse_command_line_arguments(): config file {cfg_name} not found" ) for option, value in opts: if option in ["--tag"]: tag = str(value) utils.info("parse_command_line_arguments()", f"Labelling temporary files using the tag: {tag}") if option in ["--save"]: save_fname = str(value) utils.info("parse_command_line_arguments()", f"Opening plots file {save_fname}") plotting.open_plots_pdf(save_fname) if option in ["--show"]: do_show = utils.string_to_object(value) if type(do_show) != bool: raise ValueError( f"parse_command_line_arguments(): --show value \"{value}\" could not be cast to a bool" ) if option in argv: ret[option] = True glob.custom_store["config name"] = cfg_name glob.custom_store["do show plots"] = do_show glob.custom_store["plots filename"] = save_fname glob.custom_store["tag"] = tag return ret
def print_setup(): utils.info( "print_setup()", "Scan params are: " + " vs. ".join([f"{p}" for p in glob.scan_params])) utils.info("print_setup()", f"Using {glob.limits_method} method") utils.info( "print_setup()", f"Using test statistic {glob.test_statistic} following the distribution strategy [{glob.test_stat_strategy}]" ) utils.info("print_setup()", f"Limits at confidence level {glob.confidence_level}") utils.info( "print_setup()", "Measured distribution is {}".format(glob.custom_store["meas_dist"])) utils.info( "print_setup()", "SM experimental distribution is {}".format( glob.custom_store["SM_exp_dist"])) utils.info( "print_setup()", "SM theoretical distribution is {}".format( glob.custom_store["SM_model_dist"])) utils.info( "print_setup()", "SM combined distribution is {}".format( glob.custom_store["SM_pred_dist"])) for key, item in glob.custom_store["BSM_input_dists"].items(): utils.info("print_setup()", f"BSM input distribution at point {key} is {item}")
def main(): # # config and setup # parse_command_line_arguments("hello") do_general_setup() print_setup() num_scan_params = len(glob.scan_params) # # get SM expected limit # utils.info("get_limits.py", "Getting expected and observed confidence limits") exp_limit = glob.CL_generator.get_limit(glob.custom_store["SM_exp_dist"]) obs_limit = glob.CL_generator.get_limit(glob.custom_store["meas_dist"]) if num_scan_params == 1: utils.info( "get_limits.py", f"Observed {100.*glob.confidence_level:.2f}% confidence limit is {exp_limit}" ) utils.info( "get_limits.py", f"Expected {100.*glob.confidence_level:.2f}% confidence limit is {obs_limit}" ) # # generate SM toys and get limits # utils.info( "get_limits()", f"Throwing toys around the experimental SM expectation and getting limits" ) SM_toy_limits, SM_coverage_grid = get_toy_spread_of_limits() n_toys = len(SM_toy_limits) if num_scan_params == 1: utils.info( "get_limits.py", f"Median {100.*glob.confidence_level:.2f}% limit of SM toys is {SM_toy_limits[int(0.5*n_toys)]:.0f}" ) utils.info( "get_limits.py", f"16th percentile {100.*glob.confidence_level:.2f}% limit of SM toys is {SM_toy_limits[int(0.16*n_toys)]:.0f}" ) utils.info( "get_limits.py", f"84th percentile {100.*glob.confidence_level:.2f}% limit of SM toys is {SM_toy_limits[int(0.84*n_toys)]:.0f}" ) # # plot # plotting.set_mpl_style() fig = plt.figure() ax = fig.add_subplot(1, 1, 1) if num_scan_params == 1: limit_toys_5pc, limit_toys_16pc, limit_toys_median, limit_toys_84pc, limit_toys_95pc = SM_toy_limits[ int(0.05 * n_toys)], SM_toy_limits[int( 0.16 * n_toys)], SM_toy_limits[int( 0.5 * n_toys)], SM_toy_limits[int( 0.84 * n_toys)], SM_toy_limits[int(0.95 * n_toys)] plt.axvspan(limit_toys_5pc, limit_toys_95pc, color="darkorange", linestyle=None) plt.axvspan(limit_toys_16pc, limit_toys_84pc, color="gold", linestyle=None) plt.plot([limit_toys_median, limit_toys_median], [0, 1], color="darkblue", linestyle="dashed", linewidth=1) plt.plot([exp_limit, exp_limit], [0, 1], color="green") plt.plot([obs_limit, obs_limit], [0, 1], color="purple") ax.yaxis.set_visible(False) ax.set_ylim([0, 1]) else: plt.contourf(SM_coverage_grid.axes[0], SM_coverage_grid.axes[1], SM_coverage_grid.values.transpose(), [0.05, 0.16, 0.84, 0.95], linestyles=None, colors=["gold", "darkorange", "gold"]) plt.contour(SM_coverage_grid.axes[0], SM_coverage_grid.axes[1], SM_coverage_grid.values.transpose(), [0.5], linestyles="dashed", colors=["darkblue"], linewidths=1) for limit in exp_limit: plt.plot([x[0] for x in limit], [y[1] for y in limit], color="green") for limit in obs_limit: plt.plot([x[0] for x in limit], [y[1] for y in limit], color="purple") plt.ylabel( f"{glob.scan_params[1].label} [{glob.scan_params[1].units}]", horizontalalignment='right', y=1.0, fontsize="large") format_axis_from_config("GET_LIMITS") plt.xlabel(f"{glob.scan_params[0].label} [{glob.scan_params[0].units}]", horizontalalignment='right', x=1.0, fontsize="large") plt.legend([ Line2D([0], [0], color="purple", lw=2), Line2D([0], [0], color="green", lw=2), Line2D([0], [0], color="darkblue", linestyle="dashed", lw=1), Patch(color="gold", linestyle=None), Patch(color="darkorange", linestyle=None) ], [ f"Obs. ({100*glob.confidence_level:.0f}% $CL_s$)", f"Exp. ({100*glob.confidence_level:.0f}% $CL_s$)", "SM toys: median", "SM toys: 68% coverage", "SM toys: 95% coverage" ], loc=utils.string_to_object( glob.config.get("GET_LIMITS", "legend.position", fallback="\'best\'"))) if glob.custom_store["do show plots"]: plt.show() if glob.custom_store["plots filename"] is not None: plotting.save_figure(fig) plotting.close_plots_pdf()
def get_toy_spread_of_limits(n_toys=None, confidence_level=None, tag=None): # # resolve settings from defaults and those provided # SM_pred_dist = glob.custom_store["SM_pred_dist"] BSM_scan_dists = glob.custom_store["BSM_scan_dists"] if tag is None: tag = glob.custom_store["tag"] if n_toys is None: n_toys = SM_pred_dist.n_toys if confidence_level is None: confidence_level = glob.confidence_level if tag is None: fname = ".get_limits__get_toy_spread_of_1D_limits.pickle" else: fname = f".get_limits__get_toy_spread_of_1D_limits.{tag}.pickle" # # specify the settings which must match when saving/loading results from file # required_kwargs = {} required_kwargs["test_stat"] = glob.test_statistic required_kwargs["limits_method"] = glob.limits_method required_kwargs["test_stat_strategy"] = glob.test_stat_strategy required_kwargs["confidence_level"] = confidence_level required_kwargs["n_toys"] = n_toys n_axes = len(BSM_scan_dists.axes) required_kwargs["n_axes"] = n_axes for idx in range(n_axes): required_kwargs[f"axis.index_{idx}"] = BSM_scan_dists.keys[idx] for axis, key in zip(BSM_scan_dists.axes, BSM_scan_dists.keys): required_kwargs[f"{key}.length"] = len(axis) for idx in range(len(axis)): required_kwargs[f"{key}.index_{idx}"] = axis[idx] # # load toys limits if specified *and* the saved settings match those required # if glob.custom_store.get("QuickLoadSMToys", False): load_success, pickle_file = utils.open_from_pickle( fname, **required_kwargs) if load_success: utils.info("get_toy_spread_of_limits()", f"Sucessfully loaded toy limits from {fname}") return pickle_file["limits"], pickle_file["coverage"] # # otherwise throw toys # utils.info("get_toy_spread_of_limits()", f"Throwing {n_toys} toys") SM_exp_toys = SM_pred_dist.generate_toys(n_toys) # # and get the limits # utils.info("get_toy_spread_of_limits()", "Getting expected confidence limits for toys") SM_toy_limits = [] grid_of_coverage = Grid(glob.scan_params, generate=True) tmp_array_of_coverage = grid_of_coverage.values.flatten() for toy_idx in range(n_toys): grid_of_CL = glob.CL_generator.get_CL_grid(SM_exp_toys[toy_idx]) limit = st.get_limit_from_levels(grid_of_CL, confidence_level) SM_toy_limits.append(limit) flattened_grid_of_CL = grid_of_CL.values.flatten() for i in range(len(tmp_array_of_coverage)): this_CL = flattened_grid_of_CL[i] if this_CL > (1.0 - confidence_level): continue tmp_array_of_coverage[i] = tmp_array_of_coverage[i] + 1 pct_complete = 100. * (1 + toy_idx) / n_toys if pct_complete % 10 == 0: utils.info("get_toy_spread_of_limits()", f"{int(pct_complete)}% toys processed") tmp_array_of_coverage = tmp_array_of_coverage / float(n_toys) grid_of_coverage.values = tmp_array_of_coverage.reshape( grid_of_coverage.values.shape) # # sort the limits # if len(BSM_scan_dists.axes) == 1: SM_toy_limits.sort() if glob.custom_store.get("QuickStoreSMToys", False): utils.save_to_pickle(fname, { "limits": SM_toy_limits, "coverage": grid_of_coverage }, **required_kwargs) return SM_toy_limits, grid_of_coverage