def llh_bfgs(opt_vals,*args): """ Function that the bfgs algorithm tries to minimize. Essentially, it is a wrapper function around get_template() and get_binwise_llh(). This fuction is set up this way, because the fmin_l_bfgs_b algorithm must take a function with two inputs: params & *args, where 'params' are the actual VALUES to be varied, and must correspond to the limits in 'bounds', and 'args' are arguments which are not varied and optimized, but needed by the get_template() function here. Thus, we pass the arguments to this function as follows: --opt_vals: [param1,param2,...,paramN] - systematics varied in the optimization. --args: [names,scales,fmap,fixed_params,template_maker,opt_steps_dict,priors] where names: are the dict keys corresponding to param1, param2,... scales: the scales to be applied before passing to get_template [IMPORTANT! In the optimizer, all parameters must be ~ the same order. Here, we keep them between 0.1,1 so the "epsilon" step size will vary the parameters in roughly the same precision.] fmap: pseudo data flattened map fixed_params: dictionary of other paramters needed by the get_template() function template_maker: template maker object opt_steps_dict: dictionary recording information regarding the steps taken for each trial of the optimization process. priors: gaussian priors corresponding to opt_vals list. Format: [(prior1,best1),(prior2,best2),...,(priorN,bestN)] """ names,scales,fmap,fixed_params,template_maker,opt_steps_dict,priors = args # free parameters being "optimized" by minimizer re-scaled to their true values. unscaled_opt_vals = [opt_vals[i]/scales[i] for i in xrange(len(opt_vals))] unscaled_free_params = { names[i]: val for i,val in enumerate(unscaled_opt_vals) } template_params = dict(unscaled_free_params.items() + get_values(fixed_params).items()) # Now get true template, and compute LLH with Timer() as t: if template_params['theta23'] == 0.0: logging.info("Zero theta23, so generating no oscillations template...") true_template = template_maker.get_template_no_osc(template_params) else: true_template = template_maker.get_template(template_params) profile.info("==> elapsed time for template maker: %s sec"%t.secs) true_fmap = flatten_map(true_template,chan=template_params['channel']) # NOTE: The minus sign is present on both of these next two lines # to reflect the fact that the optimizer finds a minimum rather # than maximum. llh = -get_binwise_llh(fmap,true_fmap) llh -= sum([ get_prior_llh(opt_val,sigma,value) for (opt_val,(sigma,value)) in zip(unscaled_opt_vals,priors)]) # Save all optimizer-tested values to opt_steps_dict, to see # optimizer history later for key in names: opt_steps_dict[key].append(template_params[key]) opt_steps_dict['llh'].append(llh) physics.debug("LLH is %.2f at: "%llh) for name, val in zip(names, opt_vals): physics.debug(" %20s = %6.4f" %(name,val)) return llh
def find_max_grid(fmap,template_maker,params,grid_settings,save_steps=True, normal_hierarchy=True): ''' Finds the template (and free systematic params) that maximize likelihood that the data came from the chosen template of true params, using a brute force grid scan over the whole parameter space. returns a dictionary of llh data and best fit params, in the format: {'llh': [...], 'param1': [...], 'param2': [...], ...} where 'param1', 'param2', ... are the free params that are varied in the scan. If save_steps is False, all lists only contain the best-fit parameters and llh values. ''' #print "NOW INSIDE find_max_grid:" #print "After fixing to their true values, params dict is now: " #for key in params.keys(): # try: print " >>param: %s value: %s"%(key,str(params[key]['best'])) # except: continue # Get params dict which will be optimized (free_params) and which # won't be (fixed_params) but are still needed for get_template() fixed_params = get_fixed_params(select_hierarchy(params,normal_hierarchy)) free_params = get_free_params(select_hierarchy(params,normal_hierarchy)) #Obtain just the priors priors = get_param_priors(free_params) #Calculate steps for all free parameters calc_steps(free_params, grid_settings['steps']) #Build a list from all parameters that holds a list of (name, step) tuples steplist = [ [(name,step) for step in param['steps']] for name, param in sorted(free_params.items())] #Prepare to store all the steps steps = {key:[] for key in free_params.keys()} steps['llh'] = [] #Iterate over the cartesian product for pos in product(*steplist): #Get a dict with all parameter values at this position #including the fixed parameters template_params = dict(list(pos) + get_values(fixed_params).items()) #print " >> NOW IN LOOP: " #for key in template_params.keys(): # try: print " >>param: %s value: %s"%(key,str(template_params[key]['value'])) # except: continue # Now get true template profile.info('start template calculation') true_template = template_maker.get_template(template_params) profile.info('stop template calculation') true_fmap = flatten_map(true_template) #and calculate the likelihood llh = -get_binwise_llh(fmap,true_fmap) #get sorted vals to match with priors vals = [ v for k,v in sorted(pos) ] llh -= sum([ get_prior_llh(vals,sigma,value) for (vals,(sigma,value)) in zip(vals,priors)]) # Save all values to steps and report steps['llh'].append(llh) physics.debug("LLH is %.2f at: "%llh) for key, val in pos: steps[key].append(val) physics.debug(" %20s = %6.4f" %(key, val)) #Find best fit value maxllh = min(steps['llh']) maxpos = steps['llh'].index(maxllh) #Report best fit physics.info('Found best LLH = %.2f in %d calls at:' %(maxllh,len(steps['llh']))) for name, vals in steps.items(): physics.info(' %20s = %6.4f'%(name,vals[maxpos])) #only save this maximum if asked for if not save_steps: steps[name]=vals[maxpos] return steps
def llh_bfgs(opt_vals, *args): """ Function that the bfgs algorithm tries to minimize. Essentially, it is a wrapper function around get_template() and get_binwise_llh(). This fuction is set up this way, because the fmin_l_bfgs_b algorithm must take a function with two inputs: params & *args, where 'params' are the actual VALUES to be varied, and must correspond to the limits in 'bounds', and 'args' are arguments which are not varied and optimized, but needed by the get_template() function here. Thus, we pass the arguments to this function as follows: --opt_vals: [param1,param2,...,paramN] - systematics varied in the optimization. --args: [names,scales,fmap,fixed_params,template_maker,opt_steps_dict,priors] where names: are the dict keys corresponding to param1, param2,... scales: the scales to be applied before passing to get_template [IMPORTANT! In the optimizer, all parameters must be ~ the same order. Here, we keep them between 0.1,1 so the "epsilon" step size will vary the parameters in roughly the same precision.] fmap: pseudo data flattened map fixed_params: dictionary of other paramters needed by the get_template() function template_maker: template maker object opt_steps_dict: dictionary recording information regarding the steps taken for each trial of the optimization process. priors: gaussian priors corresponding to opt_vals list. Format: [(prior1,best1),(prior2,best2),...,(priorN,bestN)] """ names, scales, fmap, fixed_params, template_maker, opt_steps_dict, priors = args # free parameters being "optimized" by minimizer re-scaled to their true values. unscaled_opt_vals = [ opt_vals[i] / scales[i] for i in xrange(len(opt_vals)) ] unscaled_free_params = { names[i]: val for i, val in enumerate(unscaled_opt_vals) } template_params = dict(unscaled_free_params.items() + get_values(fixed_params).items()) # Now get true template, and compute LLH with Timer() as t: if template_params['theta23'] == 0.0: logging.info( "Zero theta23, so generating no oscillations template...") true_template = template_maker.get_template_no_osc(template_params) else: true_template = template_maker.get_template(template_params) profile.info("==> elapsed time for template maker: %s sec" % t.secs) true_fmap = flatten_map(true_template, chan=template_params['channel']) # NOTE: The minus sign is present on both of these next two lines # to reflect the fact that the optimizer finds a minimum rather # than maximum. llh = -get_binwise_llh(fmap, true_fmap) llh -= sum([ get_prior_llh(opt_val, sigma, value) for (opt_val, (sigma, value)) in zip(unscaled_opt_vals, priors) ]) # Save all optimizer-tested values to opt_steps_dict, to see # optimizer history later for key in names: opt_steps_dict[key].append(template_params[key]) opt_steps_dict['llh'].append(llh) physics.debug("LLH is %.2f at: " % llh) for name, val in zip(names, opt_vals): physics.debug(" %20s = %6.4f" % (name, val)) return llh
def find_max_grid(fmap, template_maker, params, grid_settings, save_steps=True, normal_hierarchy=True): ''' Finds the template (and free systematic params) that maximize likelihood that the data came from the chosen template of true params, using a brute force grid scan over the whole parameter space. returns a dictionary of llh data and best fit params, in the format: {'llh': [...], 'param1': [...], 'param2': [...], ...} where 'param1', 'param2', ... are the free params that are varied in the scan. If save_steps is False, all lists only contain the best-fit parameters and llh values. ''' #print "NOW INSIDE find_max_grid:" #print "After fixing to their true values, params dict is now: " #for key in params.keys(): # try: print " >>param: %s value: %s"%(key,str(params[key]['best'])) # except: continue # Get params dict which will be optimized (free_params) and which # won't be (fixed_params) but are still needed for get_template() fixed_params = get_fixed_params(select_hierarchy(params, normal_hierarchy)) free_params = get_free_params(select_hierarchy(params, normal_hierarchy)) #Obtain just the priors priors = get_param_priors(free_params) #Calculate steps for all free parameters calc_steps(free_params, grid_settings['steps']) #Build a list from all parameters that holds a list of (name, step) tuples steplist = [[(name, step) for step in param['steps']] for name, param in sorted(free_params.items())] #Prepare to store all the steps steps = {key: [] for key in free_params.keys()} steps['llh'] = [] #Iterate over the cartesian product for pos in product(*steplist): #Get a dict with all parameter values at this position #including the fixed parameters template_params = dict(list(pos) + get_values(fixed_params).items()) #print " >> NOW IN LOOP: " #for key in template_params.keys(): # try: print " >>param: %s value: %s"%(key,str(template_params[key]['value'])) # except: continue # Now get true template profile.info('start template calculation') true_template = template_maker.get_template(template_params) profile.info('stop template calculation') true_fmap = flatten_map(true_template) #and calculate the likelihood llh = -get_binwise_llh(fmap, true_fmap) #get sorted vals to match with priors vals = [v for k, v in sorted(pos)] llh -= sum([ get_prior_llh(vals, sigma, value) for (vals, (sigma, value)) in zip(vals, priors) ]) # Save all values to steps and report steps['llh'].append(llh) physics.debug("LLH is %.2f at: " % llh) for key, val in pos: steps[key].append(val) physics.debug(" %20s = %6.4f" % (key, val)) #Find best fit value maxllh = min(steps['llh']) maxpos = steps['llh'].index(maxllh) #Report best fit physics.info('Found best LLH = %.2f in %d calls at:' % (maxllh, len(steps['llh']))) for name, vals in steps.items(): physics.info(' %20s = %6.4f' % (name, vals[maxpos])) #only save this maximum if asked for if not save_steps: steps[name] = vals[maxpos] return steps