def run_iter(model, result_row, i_iter, config, valid_generator, test_generator): logger = logging.getLogger() logger.info('-' * 45) logger.info(f'iter : {i_iter}') flush(logger) iter_directory = os.path.join(model.results_path, f'iter_{i_iter}') os.makedirs(iter_directory, exist_ok=True) result_row['i'] = i_iter result_row['n_test_samples'] = config.N_TESTING_SAMPLES suffix = f'-mu={config.TRUE.mu:1.2f}_rescale={config.TRUE.rescale}' logger.info('Generate testing data') test_generator.reset() X_test, y_test, w_test = test_generator.generate( *config.TRUE, n_samples=config.N_TESTING_SAMPLES) # CHEATER : cheat_target, cheat_sigma = model.predict( X_test, w_test, np.array(config.TRUE.nuisance_parameters)) result_row['cheat_mu'] = cheat_target result_row['cheat_sigma_mu'] = cheat_sigma # MEASURE STAT/SYST VARIANCE logger.info('MEASURE STAT/SYST VARIANCE') conditional_results = make_conditional_estimation(model, X_test, w_test, config) fname = os.path.join(iter_directory, "no_nuisance.csv") conditional_estimate = pd.DataFrame(conditional_results) conditional_estimate['i'] = i_iter conditional_estimate.to_csv(fname) param_sampler = param_generator # Prior distribution # MONTE CARLO logger.info('Making {} predictions'.format(NCALL)) all_pred, all_params = many_predict(model, X_test, w_test, param_sampler, ncall=NCALL) logger.info('Gathering it all') mc_data = monte_carlo_data(all_pred, all_params) save_monte_carlo(mc_data, iter_directory, ext=suffix) target, sigma = monte_carlo_infer(mc_data) result_row.update(config.CALIBRATED.to_dict()) result_row.update(config.CALIBRATED_ERROR.to_dict(suffix=_ERROR)) result_row.update(config.TRUE.to_dict(suffix=_TRUTH)) name = config.INTEREST_PARAM_NAME result_row[name] = target result_row[name + _ERROR] = sigma result_row[name + _TRUTH] = config.TRUE.interest_parameters logger.info('mu = {} =vs= {} +/- {}'.format( config.TRUE.interest_parameters, target, sigma)) return result_row.copy(), conditional_estimate
def run_estimation_iter(model, result_row, i_iter, config, valid_generator, test_generator, calibs): logger = logging.getLogger() logger.info('-' * 45) logger.info(f'iter : {i_iter}') flush(logger) iter_directory = os.path.join(model.results_path, f'iter_{i_iter}') os.makedirs(iter_directory, exist_ok=True) result_row['i'] = i_iter result_row['n_test_samples'] = test_generator.n_samples suffix = config.get_suffix() logger.info('Generate testing data') test_generator.reset() X_test, y_test, w_test = test_generator.generate( *config.TRUE, n_samples=config.N_TESTING_SAMPLES, no_grad=True) # CHEATER : cheat_target, cheat_sigma = model.predict( X_test, w_test, np.array(config.TRUE.nuisance_parameters)) result_row['cheat_mu'] = cheat_target result_row['cheat_sigma_mu'] = cheat_sigma # CALIBRATION config = calibrates(calibs, config, X_test, w_test) for name, value in config.CALIBRATED.items(): result_row[name + "_calib"] = value for name, value in config.CALIBRATED_ERROR.items(): result_row[name + "_calib_error"] = value param_sampler = lambda: param_generator(config) # MONTE CARLO logger.info('Making {} predictions'.format(NCALL)) all_pred, all_params = many_predict(model, X_test, w_test, param_sampler, ncall=NCALL) logger.info('Gathering it all') mc_data = monte_carlo_data(all_pred, all_params) save_monte_carlo(mc_data, iter_directory, ext=suffix) target, sigma = monte_carlo_infer(mc_data) result_row.update(config.CALIBRATED.to_dict()) result_row.update(config.CALIBRATED_ERROR.to_dict(suffix=_ERROR)) result_row.update(config.TRUE.to_dict(suffix=_TRUTH)) name = config.INTEREST_PARAM_NAME result_row[name] = target result_row[name + _ERROR] = sigma result_row[name + _TRUTH] = config.TRUE.interest_parameters logger.info('mu = {} =vs= {} +/- {}'.format( config.TRUE.interest_parameters, target, sigma)) return result_row.copy()
def run_iter(model, result_row, i_iter, config, valid_generator, test_generator): logger = logging.getLogger() logger.info('-' * 45) logger.info(f'iter : {i_iter}') iter_directory = os.path.join(model.results_path, f'iter_{i_iter}') os.makedirs(iter_directory, exist_ok=True) result_row['i'] = i_iter suffix = config.get_suffix() # suffix += f'_nasty_bkg={config.TRUE.nasty_bkg}_sigma_soft={config.TRUE.sigma_soft}' logger.info('Generate testing data') test_generator.reset() X_test, y_test, w_test = test_generator.generate(*config.TRUE, n_samples=None) # CALIBRATION # logger.info('r = {} =vs= {} +/- {}'.format(config.TRUE_R, r_mean, r_sigma) ) # logger.info('lam = {} =vs= {} +/- {}'.format(config.TRUE_LAMBDA, lam_mean, lam_sigma) ) # CHEATER : cheat_target, cheat_sigma = model.predict( y_test, w_test, np.array(config.TRUE.nuisance_parameters)) result_row['cheat_mu'] = cheat_target result_row['cheat_sigma_mu'] = cheat_sigma param_sampler = param_generator # MONTE CARLO logger.info('Making {} predictions'.format(NCALL)) all_pred, all_params = many_predict(model, y_test, w_test, param_sampler, ncall=NCALL) logger.info('Gathering it all') mc_data = monte_carlo_data(all_pred, all_params) save_monte_carlo(mc_data, iter_directory, ext=suffix) target, sigma = monte_carlo_infer(mc_data) result_row.update(params_to_dict(config.CALIBRATED)) result_row.update(params_to_dict(config.CALIBRATED_ERROR, ext=_ERROR)) result_row.update(params_to_dict(config.TRUE, ext=_TRUTH)) name = config.INTEREST_PARAM_NAME result_row[name] = target result_row[name + _ERROR] = sigma result_row[name + _TRUTH] = config.TRUE.mu logger.info('mu = {} =vs= {} +/- {}'.format(config.TRUE.mu, target, sigma)) return result_row.copy()
def run_estimation_iter(model, result_row, i_iter, config, valid_generator, test_generator, calib_r, calib_lam): logger = logging.getLogger() logger.info('-'*45) logger.info(f'iter : {i_iter}') flush(logger) iter_directory = os.path.join(model.results_path, f'iter_{i_iter}') os.makedirs(iter_directory, exist_ok=True) result_row['i'] = i_iter result_row['n_test_samples'] = config.N_TESTING_SAMPLES suffix = f'-mu={config.TRUE.mu:1.2f}_r={config.TRUE.r}_lambda={config.TRUE.lam}' logger.info('Generate testing data') test_generator.reset() X_test, y_test, w_test = test_generator.generate(*config.TRUE, n_samples=config.N_TESTING_SAMPLES) # CALIBRATION r_mean, r_sigma = calib_r.predict(X_test, w_test) lam_mean, lam_sigma = calib_lam.predict(X_test, w_test) logger.info('r = {} =vs= {} +/- {}'.format(config.TRUE.r, r_mean, r_sigma) ) logger.info('lam = {} =vs= {} +/- {}'.format(config.TRUE.lam, lam_mean, lam_sigma) ) config.CALIBRATED = Parameter(r_mean, lam_mean, config.CALIBRATED.interest_parameters) config.CALIBRATED_ERROR = Parameter(r_sigma, lam_sigma, config.CALIBRATED_ERROR.interest_parameters) # CHEATER : cheat_target, cheat_sigma = model.predict(X_test, w_test, np.array(config.TRUE.nuisance_parameters)) result_row['cheat_mu'] = cheat_target result_row['cheat_sigma_mu'] = cheat_sigma # MONTE CARLO logger.info('Making {} predictions'.format(NCALL)) param_sampler = calib_param_sampler(r_mean, r_sigma, lam_mean, lam_sigma) all_pred, all_params = many_predict(model, X_test, w_test, param_sampler, ncall=NCALL) logger.info('Gathering it all') mc_data = monte_carlo_data(all_pred, all_params) save_monte_carlo(mc_data, iter_directory, ext=suffix) target, sigma = monte_carlo_infer(mc_data) result_row.update(config.CALIBRATED.to_dict()) result_row.update(config.CALIBRATED_ERROR.to_dict( suffix=_ERROR) ) result_row.update(config.TRUE.to_dict(suffix=_TRUTH) ) name = config.INTEREST_PARAM_NAME result_row[name] = target result_row[name+_ERROR] = sigma result_row[name+_TRUTH] = config.TRUE.interest_parameters logger.info('mu = {} =vs= {} +/- {}'.format(config.TRUE.interest_parameters, target, sigma) ) return result_row.copy()