def find_max_llh_bfgs(fmap, template_maker, params, bfgs_settings, save_steps=False, normal_hierarchy=None, check_octant=False): """ Finds the template (and free systematic params) that maximize likelihood that the data came from the chosen template of true params, using the limited memory BFGS algorithm subject to bounds (l_bfgs_b). returns a dictionary of llh data and best fit params, in the format: {'llh': [...], 'param1': [...], 'param2': [...], ...} where 'param1', 'param2', ... are the free params varied by optimizer, and they hold a list of all the values tested in optimizer algorithm, unless save_steps is False, in which case they are one element in length-the best fit params and best fit llh. """ # Get params dict which will be optimized (free_params) and which # won't be (fixed_params) but are still needed for get_template() fixed_params = get_fixed_params(select_hierarchy(params,normal_hierarchy)) free_params = get_free_params(select_hierarchy(params,normal_hierarchy)) if len(free_params) == 0: logging.warn("NO FREE PARAMS, returning LLH") true_template = template_maker.get_template(get_values(fixed_params)) channel = params['channel']['value'] true_fmap = flatten_map(true_template,chan=channel) return {'llh': [-get_binwise_llh(fmap,true_fmap)]} init_vals = get_param_values(free_params) scales = get_param_scales(free_params) bounds = get_param_bounds(free_params) priors = get_param_priors(free_params) names = sorted(free_params.keys()) # Scale init-vals and bounds to work with bfgs opt: init_vals = np.array(init_vals)*np.array(scales) bounds = [bounds[i]*scales[i] for i in range(len(bounds))] opt_steps_dict = {key:[] for key in names} opt_steps_dict['llh'] = [] const_args = (names,scales,fmap,fixed_params,template_maker,opt_steps_dict,priors) display_optimizer_settings(free_params, names, init_vals, bounds, priors, bfgs_settings) best_fit_vals,llh,dict_flags = opt.fmin_l_bfgs_b( llh_bfgs, init_vals, args=const_args, approx_grad=True, iprint=0, bounds=bounds, **get_values(bfgs_settings)) # If needed, run optimizer again, checking for second octant solution: if check_octant and ('theta23' in free_params.keys()): physics.info("Checking alternative octant solution") old_th23_val = free_params['theta23']['value'] delta = np.pi - old_th23_val free_params['theta23']['value'] = np.pi + delta init_vals = get_param_values(free_params) const_args = (names,scales,fmap,fixed_params,template_maker,opt_steps_dict,priors) display_optimizer_settings(free_params, names, init_vals, bounds, priors, bfgs_settings) alt_fit_vals,alt_llh,alt_dict_flags = opt.fmin_l_bfgs_b( llh_bfgs, init_vals, args=const_args, approx_grad=True, iprint=0, bounds=bounds, **get_values(bfgs_settings)) # Alternative octant solution is optimal: if alt_llh < llh: best_fit_vals = alt_fit_vals llh = alt_llh dict_flags = alt_dict_flags best_fit_params = { name: value for name, value in zip(names, best_fit_vals) } #Report best fit physics.info('Found best LLH = %.2f in %d calls at:' %(llh,dict_flags['funcalls'])) for name, val in best_fit_params.items(): physics.info(' %20s = %6.4f'%(name,val)) #Report any warnings if there are lvl = logging.WARN if (dict_flags['warnflag'] != 0) else logging.DEBUG for name, val in dict_flags.items(): physics.log(lvl," %s : %s"%(name,val)) if not save_steps: # Do not store the extra history of opt steps: for key in opt_steps_dict.keys(): opt_steps_dict[key] = [opt_steps_dict[key][-1]] return opt_steps_dict
def find_max_grid(fmap, template_maker, params, grid_settings, save_steps=True, normal_hierarchy=True): ''' Finds the template (and free systematic params) that maximize likelihood that the data came from the chosen template of true params, using a brute force grid scan over the whole parameter space. returns a dictionary of llh data and best fit params, in the format: {'llh': [...], 'param1': [...], 'param2': [...], ...} where 'param1', 'param2', ... are the free params that are varied in the scan. If save_steps is False, all lists only contain the best-fit parameters and llh values. ''' #print "NOW INSIDE find_max_grid:" #print "After fixing to their true values, params dict is now: " #for key in params.keys(): # try: print " >>param: %s value: %s"%(key,str(params[key]['best'])) # except: continue # Get params dict which will be optimized (free_params) and which # won't be (fixed_params) but are still needed for get_template() fixed_params = get_fixed_params(select_hierarchy(params, normal_hierarchy)) free_params = get_free_params(select_hierarchy(params, normal_hierarchy)) # Obtain just the priors priors = get_param_priors(free_params) # Calculate steps [(prior,value),...] for all free parameters calc_steps(free_params, grid_settings['steps']) # Build a list from all parameters that holds a list of (name, step) tuples steplist = [[(name, step) for step in param['steps']] for name, param in sorted(free_params.items())] # Prepare to store all the steps steps = {key: [] for key in free_params.keys()} steps['llh'] = [] # Iterate over the cartesian product for pos in product(*steplist): # Get a dict with all parameter values at this position # including the fixed parameters template_params = dict(list(pos) + get_values(fixed_params).items()) #print " >> NOW IN LOOP: " #for key in template_params.keys(): # try: print " >>param: %s value: %s"%(key,str(template_params[key]['value'])) # except: continue # Now get true template tprofile.info('start template calculation') true_template = template_maker.get_template(template_params) tprofile.info('stop template calculation') true_fmap = flatten_map(true_template) # and calculate the likelihood llh = -get_binwise_llh(fmap, true_fmap) # get sorted vals to match with priors vals = [v for k, v in sorted(pos)] llh -= sum([prior.llh(val) for val, prior in zip(vals, priors)]) # Save all values to steps and report steps['llh'].append(llh) physics.debug("LLH is %.2f at: " % llh) for key, val in pos: steps[key].append(val) physics.debug(" %20s = %6.4f" % (key, val)) # Find best fit value maxllh = min(steps['llh']) maxpos = steps['llh'].index(maxllh) # Report best fit physics.info('Found best LLH = %.2f in %d calls at:' % (maxllh, len(steps['llh']))) for name, vals in steps.items(): physics.info(' %20s = %6.4f' % (name, vals[maxpos])) # only save this maximum if asked for if not save_steps: steps[name] = vals[maxpos] return steps
def find_max_llh_bfgs(fmap, template_maker, params, bfgs_settings, save_steps=False, normal_hierarchy=None, check_octant=False): """ Finds the template (and free systematic params) that maximize likelihood that the data came from the chosen template of true params, using the limited memory BFGS algorithm subject to bounds (l_bfgs_b). returns a dictionary of llh data and best fit params, in the format: {'llh': [...], 'param1': [...], 'param2': [...], ...} where 'param1', 'param2', ... are the free params varied by optimizer, and they hold a list of all the values tested in optimizer algorithm, unless save_steps is False, in which case they are one element in length-the best fit params and best fit llh. """ # Get params dict which will be optimized (free_params) and which # won't be (fixed_params) but are still needed for get_template() fixed_params = get_fixed_params(select_hierarchy(params, normal_hierarchy)) free_params = get_free_params(select_hierarchy(params, normal_hierarchy)) if len(free_params) == 0: logging.warn("NO FREE PARAMS, returning LLH") true_template = template_maker.get_template(get_values(fixed_params)) channel = params['channel']['value'] true_fmap = flatten_map(true_template, chan=channel) return {'llh': [-get_binwise_llh(fmap, true_fmap)]} init_vals = get_param_values(free_params) scales = get_param_scales(free_params) bounds = get_param_bounds(free_params) priors = get_param_priors(free_params) names = sorted(free_params.keys()) # Scale init-vals and bounds to work with bfgs opt: init_vals = np.array(init_vals) * np.array(scales) bounds = [bounds[i] * scales[i] for i in range(len(bounds))] opt_steps_dict = {key: [] for key in names} opt_steps_dict['llh'] = [] const_args = (names, scales, fmap, fixed_params, template_maker, opt_steps_dict, priors) display_optimizer_settings(free_params, names, init_vals, bounds, priors, bfgs_settings) best_fit_vals, llh, dict_flags = opt.fmin_l_bfgs_b( llh_bfgs, init_vals, args=const_args, approx_grad=True, iprint=0, bounds=bounds, **get_values(bfgs_settings)) # If needed, run optimizer again, checking for second octant solution: if check_octant and ('theta23' in free_params.keys()): physics.info("Checking alternative octant solution") old_th23_val = free_params['theta23']['value'] delta = np.pi - old_th23_val free_params['theta23']['value'] = np.pi + delta init_vals = get_param_values(free_params) const_args = (names, scales, fmap, fixed_params, template_maker, opt_steps_dict, priors) display_optimizer_settings(free_params, names, init_vals, bounds, priors, bfgs_settings) alt_fit_vals, alt_llh, alt_dict_flags = opt.fmin_l_bfgs_b( llh_bfgs, init_vals, args=const_args, approx_grad=True, iprint=0, bounds=bounds, **get_values(bfgs_settings)) # Alternative octant solution is optimal: if alt_llh < llh: best_fit_vals = alt_fit_vals llh = alt_llh dict_flags = alt_dict_flags best_fit_params = { name: value for name, value in zip(names, best_fit_vals) } #Report best fit physics.info('Found best LLH = %.2f in %d calls at:' % (llh, dict_flags['funcalls'])) for name, val in best_fit_params.items(): physics.info(' %20s = %6.4f' % (name, val)) #Report any warnings if there are lvl = logging.WARN if (dict_flags['warnflag'] != 0) else logging.DEBUG for name, val in dict_flags.items(): physics.log(lvl, " %s : %s" % (name, val)) if not save_steps: # Do not store the extra history of opt steps: for key in opt_steps_dict.keys(): opt_steps_dict[key] = [opt_steps_dict[key][-1]] return opt_steps_dict
def find_max_llh_bfgs(fmap, template_maker, params, bfgs_settings, save_steps=False, normal_hierarchy=True): """ Finds the template (and free systematic params) that maximize likelihood that the data came from the chosen template of true params, using the limited memory BFGS algorithm subject to bounds (l_bfgs_b). returns a dictionary of llh data and best fit params, in the format: {'llh': [...], 'param1': [...], 'param2': [...], ...} where 'param1', 'param2', ... are the free params varied by optimizer, and they hold a list of all the values tested in optimizer algorithm, unless save_steps is False, in which case they are one element in length-the best fit params and best fit llh. """ # Get params dict which will be optimized (free_params) and which # won't be (fixed_params) but are still needed for get_template() fixed_params = get_fixed_params(select_hierarchy(params, normal_hierarchy)) free_params = get_free_params(select_hierarchy(params, normal_hierarchy)) init_vals = get_param_values(free_params) scales = get_param_scales(free_params) bounds = get_param_bounds(free_params) priors = get_param_priors(free_params) names = sorted(free_params.keys()) # Scale init-vals and bounds to work with bfgs opt: init_vals = np.array(init_vals) * np.array(scales) bounds = [bounds[i] * scales[i] for i in range(len(bounds))] opt_steps_dict = {key: [] for key in names} opt_steps_dict["llh"] = [] const_args = (names, scales, fmap, fixed_params, template_maker, opt_steps_dict, priors) physics.info("%d parameters to be optimized" % len(free_params)) for name, init, (down, up), (prior, best) in zip(names, init_vals, bounds, priors): physics.info( ("%20s : init = %6.4f, bounds = [%6.4f,%6.4f], " "best = %6.4f, prior = " + ("%6.4f" if prior else "%s")) % (name, init, up, down, best, prior) ) physics.debug("Optimizer settings:") for key, item in bfgs_settings.items(): physics.debug(" %s -> `%s` = %.2e" % (item["desc"], key, item["value"])) best_fit_vals, llh, dict_flags = opt.fmin_l_bfgs_b( llh_bfgs, init_vals, args=const_args, approx_grad=True, iprint=0, bounds=bounds, **get_values(bfgs_settings) ) best_fit_params = {name: value for name, value in zip(names, best_fit_vals)} # Report best fit physics.info("Found best LLH = %.2f in %d calls at:" % (llh, dict_flags["funcalls"])) for name, val in best_fit_params.items(): physics.info(" %20s = %6.4f" % (name, val)) # Report any warnings if there are lvl = logging.WARN if (dict_flags["warnflag"] != 0) else logging.DEBUG for name, val in dict_flags.items(): physics.log(lvl, " %s : %s" % (name, val)) if not save_steps: # Do not store the extra history of opt steps: for key in opt_steps_dict.keys(): opt_steps_dict[key] = [opt_steps_dict[key][-1]] return opt_steps_dict
def find_max_grid(fmap,template_maker,params,grid_settings,save_steps=True, normal_hierarchy=True): ''' Finds the template (and free systematic params) that maximize likelihood that the data came from the chosen template of true params, using a brute force grid scan over the whole parameter space. returns a dictionary of llh data and best fit params, in the format: {'llh': [...], 'param1': [...], 'param2': [...], ...} where 'param1', 'param2', ... are the free params that are varied in the scan. If save_steps is False, all lists only contain the best-fit parameters and llh values. ''' #print "NOW INSIDE find_max_grid:" #print "After fixing to their true values, params dict is now: " #for key in params.keys(): # try: print " >>param: %s value: %s"%(key,str(params[key]['best'])) # except: continue # Get params dict which will be optimized (free_params) and which # won't be (fixed_params) but are still needed for get_template() fixed_params = get_fixed_params(select_hierarchy(params,normal_hierarchy)) free_params = get_free_params(select_hierarchy(params,normal_hierarchy)) #Obtain just the priors priors = get_param_priors(free_params) #Calculate steps for all free parameters calc_steps(free_params, grid_settings['steps']) #Build a list from all parameters that holds a list of (name, step) tuples steplist = [ [(name,step) for step in param['steps']] for name, param in sorted(free_params.items())] #Prepare to store all the steps steps = {key:[] for key in free_params.keys()} steps['llh'] = [] #Iterate over the cartesian product for pos in product(*steplist): #Get a dict with all parameter values at this position #including the fixed parameters template_params = dict(list(pos) + get_values(fixed_params).items()) #print " >> NOW IN LOOP: " #for key in template_params.keys(): # try: print " >>param: %s value: %s"%(key,str(template_params[key]['value'])) # except: continue # Now get true template profile.info('start template calculation') true_template = template_maker.get_template(template_params) profile.info('stop template calculation') true_fmap = flatten_map(true_template) #and calculate the likelihood llh = -get_binwise_llh(fmap,true_fmap) #get sorted vals to match with priors vals = [ v for k,v in sorted(pos) ] llh -= sum([ get_prior_llh(vals,sigma,value) for (vals,(sigma,value)) in zip(vals,priors)]) # Save all values to steps and report steps['llh'].append(llh) physics.debug("LLH is %.2f at: "%llh) for key, val in pos: steps[key].append(val) physics.debug(" %20s = %6.4f" %(key, val)) #Find best fit value maxllh = min(steps['llh']) maxpos = steps['llh'].index(maxllh) #Report best fit physics.info('Found best LLH = %.2f in %d calls at:' %(maxllh,len(steps['llh']))) for name, vals in steps.items(): physics.info(' %20s = %6.4f'%(name,vals[maxpos])) #only save this maximum if asked for if not save_steps: steps[name]=vals[maxpos] return steps