def get_osc_prob_maps(self, **kwargs): """ Returns an oscillation probability map dictionary calculated at the values of the input parameters: deltam21,deltam31,theta12,theta13,theta23,deltacp for flavor_from to flavor_to, with the binning of ebins,czbins. The dictionary is formatted as: 'nue_maps': {'nue':map,'numu':map,'nutau':map}, 'numu_maps': {...} 'nue_bar_maps': {...} 'numu_bar_maps': {...} NOTES: * expects all angles in [rad] * this method doesn't calculate the oscillation probabilities itself, but calls get_osc_probLT_dict internally, to get a high resolution map of the oscillation probs, """ #Get the finely binned maps as implemented in the derived class logging.info('Retrieving finely binned maps') with Timer(verbose=False) as t: fine_maps = self.get_osc_probLT_dict(**kwargs) print " ==> elapsed time to get all fine maps: %s sec" % t.secs logging.info("Smoothing fine maps...") smoothed_maps = {} smoothed_maps['ebins'] = self.ebins smoothed_maps['czbins'] = self.czbins with Timer(verbose=False) as t: for from_nu, tomap_dict in fine_maps.items(): if 'vals' in from_nu: continue new_tomaps = {} for to_nu, pvals in tomap_dict.items(): logging.debug("Getting smoothed map %s/%s" % (from_nu, to_nu)) new_tomaps[to_nu] = get_smoothed_map( pvals, fine_maps['evals'], fine_maps['czvals'], self.ebins, self.czbins) smoothed_maps[from_nu] = new_tomaps tprofile.debug(" ==> elapsed time to smooth maps: %s sec" % t.secs) return smoothed_maps
def get_template(self, params, return_stages=False): ''' Runs entire template-making chain, using parameters found in 'params' dict. If 'return_stages' is set to True, returns output from each stage as a simple tuple. ''' logging.info("STAGE 1: Getting Atm Flux maps...") with Timer() as t: flux_maps = get_flux_maps(self.flux_service, self.ebins, self.czbins, **params) tprofile.debug("==> elapsed time for flux stage: %s sec" % t.secs) logging.info("STAGE 2: Getting osc prob maps...") with Timer() as t: osc_flux_maps = get_osc_flux(flux_maps, self.osc_service, oversample_e=self.oversample_e, oversample_cz=self.oversample_cz, **params) tprofile.debug("==> elapsed time for oscillations stage: %s sec" % t.secs) logging.info("STAGE 3: Getting event rate true maps...") with Timer() as t: event_rate_maps = get_event_rates(osc_flux_maps, self.aeff_service, **params) tprofile.debug("==> elapsed time for aeff stage: %s sec" % t.secs) logging.info("STAGE 4: Getting event rate reco maps...") with Timer() as t: event_rate_reco_maps = get_reco_maps(event_rate_maps, self.reco_service, **params) tprofile.debug("==> elapsed time for reco stage: %s sec" % t.secs) logging.info("STAGE 5: Getting pid maps...") with Timer(verbose=False) as t: final_event_rate = get_pid_maps(event_rate_reco_maps, self.pid_service) tprofile.debug("==> elapsed time for pid stage: %s sec" % t.secs) if not return_stages: return final_event_rate # Otherwise, return all stages as a simple tuple return (flux_maps, osc_flux_maps, event_rate_maps, event_rate_reco_maps, final_event_rate)
def get_template_no_osc(self, params): ''' Runs template making chain, but without oscillations ''' logging.info("STAGE 1: Getting Atm Flux maps...") with Timer() as t: flux_maps = get_flux_maps(self.flux_service, self.ebins, self.czbins, **params) tprofile.debug("==> elapsed time for flux stage: %s sec" % t.secs) # Skipping oscillation stage... logging.info(" >>Skipping Stage 2 in no oscillations case...") flavours = ['nutau', 'nutau_bar'] # Create the empty nutau maps: test_map = flux_maps['nue'] for flav in flavours: flux_maps[flav] = { 'map': np.zeros_like(test_map['map']), 'ebins': np.zeros_like(test_map['ebins']), 'czbins': np.zeros_like(test_map['czbins']) } logging.info("STAGE 3: Getting event rate true maps...") with Timer() as t: event_rate_maps = get_event_rates(flux_maps, self.aeff_service, **params) tprofile.debug("==> elapsed time for aeff stage: %s sec" % t.secs) logging.info("STAGE 4: Getting event rate reco maps...") with Timer() as t: event_rate_reco_maps = get_reco_maps(event_rate_maps, self.reco_service, **params) tprofile.debug("==> elapsed time for reco stage: %s sec" % t.secs) logging.info("STAGE 5: Getting pid maps...") with Timer(verbose=False) as t: final_event_rate = get_pid_maps(event_rate_reco_maps, self.pid_service) tprofile.debug("==> elapsed time for pid stage: %s sec" % t.secs) return final_event_rate
def find_alt_hierarchy_fit(asimov_data_set, template_maker, hypo_params, hypo_normal, minimizer_settings, only_atm_params=True, check_octant=False): """ For the hypothesis of the mass hierarchy being NMH ('normal_hierarchy'=True) or IMH ('normal_hierarchy'=False), finds the best fit for the free params in 'param_values' for the alternative (opposite) hierarchy that maximizes the LLH of the Asimov data set. \params: * asimov_data_set - asimov data set to find best fit llh * template_maker - instance of class pisa.analysis.TemplateMaker, used to generate the asimov data set. * hypo_params - parameters for template generation * hypo_normal - boolean for NMH (True) or IMH (False) * minimizer_settings - settings for bfgs minimization * only_atm_params - boolean to denote whether the fit will be over the atmospheric oscillation parameters only or over all the free params in params """ # Find best fit of the alternative hierarchy to the # hypothesized asimov data set. #hypo_types = [('hypo_IMH',False)] if data_normal else [('hypo_NMH',True)] hypo_params = select_hierarchy(hypo_params, normal_hierarchy=hypo_normal) with Timer() as t: llh_data = find_max_llh_bfgs(asimov_data_set, template_maker, hypo_params, minimizer_settings, normal_hierarchy=hypo_normal, check_octant=check_octant) profile.info("==> elapsed time for optimizer: %s sec" % t.secs) return llh_data
def get_gradients(data_tag, param, template_maker, fiducial_params, grid_settings, store_dir): """ Use the template maker to create all the templates needed to obtain the gradients. """ logging.info("Working on parameter %s." % param) steps = get_steps(param, grid_settings, fiducial_params) pmaps = {} # Generate one template for each value of the parameter in question and store in pmaps for param_value in steps: # Make the template corresponding to the current value of the parameter with Timer() as t: maps = template_maker.get_template( get_values( dict( fiducial_params, **{ param: dict(fiducial_params[param], **{'value': param_value}) }))) tprofile.info("==> elapsed time for template: %s sec" % t.secs) pmaps[param_value] = maps # Store the maps used to calculate partial derivatives if store_dir != tempfile.gettempdir(): logging.info("Writing maps for parameter %s to %s" % (param, store_dir)) to_json(pmaps, os.path.join(store_dir, param + "_" + data_tag + ".json")) gradient_map = get_derivative_map(pmaps, fiducial_params[param], degree=2) return gradient_map
action='store_true', default=False, help="Save all stages.") parser.add_argument('-o', '--outfile', dest='outfile', metavar='FILE', type=str, action='store', default="template.json", help='file to store the output') args = parser.parse_args() set_verbosity(args.verbose) with Timer() as t: #Load all the settings model_settings = from_json(args.template_settings) #Select a hierarchy logging.info('Selected %s hierarchy' % ('normal' if args.normal else 'inverted')) params = select_hierarchy(model_settings['params'], normal_hierarchy=args.normal) #Intialize template maker template_maker = TemplateMaker(get_values(params), **model_settings['binning']) tprofile.info(" ==> elapsed time to initialize templates: %s sec" % t.secs)
fmap = get_pseudo_data_fmap(pseudo_data_template_maker, get_values( select_hierarchy( pseudo_data_settings['params'], normal_hierarchy=data_normal)), seed=results[data_tag]['seed'], chan=channel) # 2) find max llh (and best fit free params) from matching pseudo data # to templates. for hypo_tag, hypo_normal in [('hypo_NMH', True), ('hypo_IMH', False)]: physics.info("Finding best fit for %s under %s assumption" % (data_tag, hypo_tag)) with Timer() as t: llh_data = find_max_llh_bfgs(fmap, template_maker, template_settings['params'], minimizer_settings, args.save_steps, normal_hierarchy=hypo_normal) profile.info("==> elapsed time for optimizer: %s sec" % t.secs) # Store the LLH data results[data_tag][hypo_tag] = llh_data # Store this trial trials += [results] profile.info("stop trial %d" % itrial)
def get_fisher_matrices(template_settings, grid_settings, IMH=True, NMH=False, dump_all_stages=False, save_templates=False, outdir=None): ''' Main function that runs the Fisher analysis for the chosen hierarchy(ies) (inverted by default). Returns a dictionary of Fisher matrices, in the format: {'IMH': {'cscd': [...], 'trck': [...], 'comb': [...], }, 'NMH': {'cscd': [...], 'trck': [...], 'comb': [...], } } If save_templates=True and no hierarchy is given, only fiducial templates will be written out; if one is given, then the templates used to obtain the gradients will be written out in addition. ''' if outdir is None and (save_templates or dump_all_stages): logging.info( "No output directory specified. Will save templates to current working directory." ) outdir = os.getcwd() tprofile.info("start initializing") # Get the parameters params = template_settings['params'] bins = template_settings['binning'] # Artifically add the hierarchy parameter to the list of parameters # The method get_hierarchy_gradients below will know how to deal with it params['hierarchy_nh'] = { "value": 1., "range": [0., 1.], "fixed": False, "prior": None } params['hierarchy_ih'] = { "value": 0., "range": [0., 1.], "fixed": False, "prior": None } chosen_data = [] if IMH: chosen_data.append(('IMH', False)) logging.info("Fisher matrix will be built for IMH.") if NMH: chosen_data.append(('NMH', True)) logging.info("Fisher matrix will be built for NMH.") if chosen_data == []: # In this case, only the fiducial maps (for both hierarchies) will be written logging.info("No Fisher matrices will be built.") # There is no sense in performing any of the following steps if no Fisher matrices are to be built # and no templates are to be saved. if chosen_data != [] or dump_all_stages or save_templates: # Initialise return dict to hold Fisher matrices fisher = { data_tag: { 'cscd': [], 'trck': [], 'comb': [] } for data_tag, data_normal in chosen_data } # Get a template maker with the settings used to initialize template_maker = TemplateMaker(get_values(params), **bins) tprofile.info("stop initializing\n") # Generate fiducial templates for both hierarchies (needed for partial derivatives # w.r.t. hierarchy parameter) fiducial_maps = {} for hierarchy in ['NMH', 'IMH']: logging.info("Generating fiducial templates for %s." % hierarchy) # Get the fiducial parameter values corresponding to this hierarchy fiducial_params = select_hierarchy( params, normal_hierarchy=(hierarchy == 'NMH')) # Generate fiducial maps, either all of them or only the ultimate one tprofile.info("start template calculation") with Timer() as t: fid_maps = template_maker.get_template( get_values(fiducial_params), return_stages=dump_all_stages) tprofile.info("==> elapsed time for template: %s sec" % t.secs) fiducial_maps[ hierarchy] = fid_maps[4] if dump_all_stages else fid_maps # save fiducial map(s) # all stages if dump_all_stages: stage_names = ("0_unoscillated_flux", "1_oscillated_flux", "2_oscillated_counts", "3_reco", "4_pid") stage_maps = {} for stage in xrange(0, len(fid_maps)): stage_maps[stage_names[stage]] = fid_maps[stage] logging.info( "Writing fiducial maps (all stages) for %s to %s." % (hierarchy, outdir)) to_json(stage_maps, os.path.join(outdir, "fid_map_" + hierarchy + ".json")) # only the final stage elif save_templates: logging.info( "Writing fiducial map (final stage) for %s to %s." % (hierarchy, outdir)) to_json(fiducial_maps[hierarchy], os.path.join(outdir, "fid_map_" + hierarchy + ".json")) # Get_gradients and get_hierarchy_gradients will both (temporarily) # store the templates used to generate the gradient maps store_dir = outdir if save_templates else tempfile.gettempdir() # Calculate Fisher matrices for the user-defined cases (NHM true and/or IMH true) for data_tag, data_normal in chosen_data: logging.info("Running Fisher analysis for %s." % (data_tag)) # The fiducial params are selected from the hierarchy case that does NOT match # the data, as we are varying from this model to find the 'best fit' fiducial_params = select_hierarchy(params, not data_normal) # Get the free parameters (i.e. those for which the gradients should be calculated) free_params = select_hierarchy(get_free_params(params), not data_normal) gradient_maps = {} for param in free_params.keys(): # Special treatment for the hierarchy parameter if param == 'hierarchy': gradient_maps[param] = get_hierarchy_gradients( data_tag=data_tag, fiducial_maps=fiducial_maps, fiducial_params=fiducial_params, grid_settings=grid_settings, store_dir=store_dir) else: gradient_maps[param] = get_gradients( data_tag=data_tag, param=param, template_maker=template_maker, fiducial_params=fiducial_params, grid_settings=grid_settings, store_dir=store_dir) logging.info("Building Fisher matrix for %s." % (data_tag)) # Build Fisher matrices for the given hierarchy fisher[data_tag] = build_fisher_matrix( gradient_maps=gradient_maps, fiducial_map=fiducial_maps['IMH'] if data_normal else fiducial_maps['NMH'], template_settings=fiducial_params) # If Fisher matrices exist for both channels, add the matrices to obtain # the combined one. if len(fisher[data_tag].keys()) > 1: fisher[data_tag]['comb'] = FisherMatrix( matrix=np.array([ f.matrix for f in fisher[data_tag].itervalues() ]).sum(axis=0), parameters=gradient_maps.keys(), #order is important here! best_fits=[ fiducial_params[par]['value'] for par in gradient_maps.keys() ], priors=[ Prior.from_param(fiducial_params[par]) for par in gradient_maps.keys() ], ) return fisher else: logging.info("Nothing to be done.") return {}
def llh_bfgs(opt_vals, *args): """ Function that the bfgs algorithm tries to minimize. Essentially, it is a wrapper function around get_template() and get_binwise_llh(). This fuction is set up this way, because the fmin_l_bfgs_b algorithm must take a function with two inputs: params & *args, where 'params' are the actual VALUES to be varied, and must correspond to the limits in 'bounds', and 'args' are arguments which are not varied and optimized, but needed by the get_template() function here. Thus, we pass the arguments to this function as follows: --opt_vals: [param1,param2,...,paramN] - systematics varied in the optimization. --args: [names,scales,fmap,fixed_params,template_maker,opt_steps_dict,priors] where names: are the dict keys corresponding to param1, param2,... scales: the scales to be applied before passing to get_template [IMPORTANT! In the optimizer, all parameters must be ~ the same order. Here, we keep them between 0.1,1 so the "epsilon" step size will vary the parameters in roughly the same precision.] fmap: pseudo data flattened map fixed_params: dictionary of other paramters needed by the get_template() function template_maker: template maker object opt_steps_dict: dictionary recording information regarding the steps taken for each trial of the optimization process. priors: gaussian priors corresponding to opt_vals list. Format: [(prior1,best1),(prior2,best2),...,(priorN,bestN)] """ names, scales, fmap, fixed_params, template_maker, opt_steps_dict, priors = args # free parameters being "optimized" by minimizer re-scaled to their true values. unscaled_opt_vals = [ opt_vals[i] / scales[i] for i in xrange(len(opt_vals)) ] unscaled_free_params = { names[i]: val for i, val in enumerate(unscaled_opt_vals) } template_params = dict(unscaled_free_params.items() + get_values(fixed_params).items()) # Now get true template, and compute LLH with Timer() as t: if template_params['theta23'] == 0.0: logging.info( "Zero theta23, so generating no oscillations template...") true_template = template_maker.get_template_no_osc(template_params) else: true_template = template_maker.get_template(template_params) profile.info("==> elapsed time for template maker: %s sec" % t.secs) true_fmap = flatten_map(true_template, chan=template_params['channel']) # NOTE: The minus sign is present on both of these next two lines # to reflect the fact that the optimizer finds a minimum rather # than maximum. llh = -get_binwise_llh(fmap, true_fmap) llh -= sum([ get_prior_llh(opt_val, sigma, value) for (opt_val, (sigma, value)) in zip(unscaled_opt_vals, priors) ]) # Save all optimizer-tested values to opt_steps_dict, to see # optimizer history later for key in names: opt_steps_dict[key].append(template_params[key]) opt_steps_dict['llh'].append(llh) physics.debug("LLH is %.2f at: " % llh) for name, val in zip(names, opt_vals): physics.debug(" %20s = %6.4f" % (name, val)) return llh
def bfgs_metric(opt_vals, names, scales, fmap, fixed_params, template_maker, opt_steps_dict, priors, metric_name='llh'): """ Function that the bfgs algorithm tries to minimize: wraps get_template() and get_binwise_llh() (or get_binwise_chisquare()), and returns the negative log likelihood (the chisquare). This function is set up this way because the fmin_l_bfgs_b algorithm must take a function with two inputs: params & *args, where 'params' are the actual VALUES to be varied, and must correspond to the limits in 'bounds', and 'args' are arguments which are not varied and optimized, but needed by the get_template() function here. Parameters ---------- opt_vals : sequence of scalars Systematics varied in the optimization. Format: [param1, param2, ... , paramN] names : sequence of str Dictionary keys corresponding to param1, param2, ... scales : sequence of float Scales to be applied before passing to get_template [IMPORTANT! In the optimizer, all parameters must be ~ the same order. Here, we keep them between 0.1,1 so the "epsilon" step size will vary the parameters with roughly the same precision.] fmap : sequence of float Pseudo data flattened map fixed_params : dict Other paramters needed by the get_template() function. template_maker : template maker object opt_steps_dict: dict Dictionary recording information regarding the steps taken for each trial of the optimization process. priors : sequence of pisa.utils.params.Prior objects Priors corresponding to opt_vals list. metric_name : string Returns chisquare instead of negative llh if metric_name is 'chisquare'. Note: this string has to be present as a key in opt_steps_dict Returns ------- metric_val : float either minimum negative llh or chisquare found by BFGS minimizer """ # free parameters being "optimized" by minimizer re-scaled to their true # values. unscaled_opt_vals = [ opt_vals[i] / scales[i] for i in xrange(len(opt_vals)) ] unscaled_free_params = { names[i]: val for i, val in enumerate(unscaled_opt_vals) } template_params = dict(unscaled_free_params.items() + get_values(fixed_params).items()) # Now get true template, and compute metric with Timer() as t: if template_params['theta23'] == 0.0: logging.info( "Zero theta23, so generating no oscillations template...") true_template = template_maker.get_template_no_osc(template_params) else: true_template = template_maker.get_template(template_params) tprofile.info("==> elapsed time for template maker: %s sec" % t.secs) true_fmap = flatten_map(template=true_template, channel=template_params['channel']) # NOTE: The minus sign is present on both of these next two lines # because the optimizer finds a minimum rather than maximum, so we # have to minimize the negative of the log likelhood. if metric_name == 'chisquare': metric_val = get_binwise_chisquare(fmap, true_fmap) metric_val += sum([ prior.chi2(opt_val) for (opt_val, prior) in zip(unscaled_opt_vals, priors) ]) elif metric_name == 'llh': metric_val = -get_binwise_llh(fmap, true_fmap) metric_val -= sum([ prior.llh(opt_val) for (opt_val, prior) in zip(unscaled_opt_vals, priors) ]) # Save all optimizer-tested values to opt_steps_dict, to see # optimizer history later for key in names: opt_steps_dict[key].append(template_params[key]) opt_steps_dict[metric_name].append(metric_val) physics.debug("%s is %.2f at: " % (metric_name, metric_val)) for name, val in zip(names, opt_vals): physics.debug(" %20s = %6.4f" % (name, val)) return metric_val