Exemple #1
0
def get_gradients(data_tag, param, template_maker, fiducial_params,
                  grid_settings, store_dir):
  """
  Use the template maker to create all the templates needed to obtain the gradients.
  """
  logging.info("Working on parameter %s."%param)

  steps = get_steps(param, grid_settings, fiducial_params)

  pmaps = {}

  # Generate one template for each value of the parameter in question and store in pmaps
  for param_value in steps:

      # Make the template corresponding to the current value of the parameter
      with Timer() as t:
          maps = template_maker.get_template(
              get_values(dict(fiducial_params,**{param:dict(fiducial_params[param],
                                                            **{'value': param_value})})))
      tprofile.info("==> elapsed time for template: %s sec"%t.secs)

      pmaps[param_value] = maps

  # Store the maps used to calculate partial derivatives
  if store_dir != tempfile.gettempdir():
  	logging.info("Writing maps for parameter %s to %s"%(param,store_dir))

  to_json(pmaps, os.path.join(store_dir,param+"_"+data_tag+".json"))

  gradient_map = get_derivative_map(pmaps,fiducial_params[param],degree=2)

  return gradient_map
Exemple #2
0
def find_alt_hierarchy_fit(asimov_data_set,
                           template_maker,
                           hypo_params,
                           hypo_normal,
                           minimizer_settings,
                           only_atm_params=True,
                           check_octant=False):
    """
    For the hypothesis of the mass hierarchy being NMH
    ('normal_hierarchy'=True) or IMH ('normal_hierarchy'=False), finds the
    best fit for the free params in 'param_values' for the alternative
    (opposite) hierarchy that maximizes the LLH of the Asimov data set.

    \params:
      * asimov_data_set - asimov data set to find best fit llh
      * template_maker - instance of class pisa.analysis.TemplateMaker, used to
        generate the asimov data set.
      * hypo_params - parameters for template generation
      * hypo_normal - boolean for NMH (True) or IMH (False)
      * minimizer_settings - settings for bfgs minimization
      * only_atm_params - boolean to denote whether the fit will be over the
        atmospheric oscillation parameters only or over all the free params
        in params
    """

    # Find best fit of the alternative hierarchy to the
    # hypothesized asimov data set.
    #hypo_types = [('hypo_IMH',False)] if data_normal else  [('hypo_NMH',True)]
    hypo_params = select_hierarchy(hypo_params, normal_hierarchy=hypo_normal)

    with Timer() as t:
        llh_data = find_opt_bfgs(fmap=asimov_data_set,
                                 template_maker=template_maker,
                                 params=hypo_params,
                                 bfgs_settings=minimizer_settings,
                                 normal_hierarchy=hypo_normal,
                                 check_octant=check_octant)
    tprofile.info("==> elapsed time for optimizer: %s sec" % t.secs)

    return llh_data
Exemple #3
0
def get_gradients(data_tag, param, template_maker, fiducial_params,
                  grid_settings, store_dir):
    """
  Use the template maker to create all the templates needed to obtain the gradients.
  """
    logging.info("Working on parameter %s." % param)

    steps = get_steps(param, grid_settings, fiducial_params)

    pmaps = {}

    # Generate one template for each value of the parameter in question and store in pmaps
    for param_value in steps:

        # Make the template corresponding to the current value of the parameter
        with Timer() as t:
            maps = template_maker.get_template(
                get_values(
                    dict(
                        fiducial_params, **{
                            param:
                            dict(fiducial_params[param],
                                 **{'value': param_value})
                        })))
        tprofile.info("==> elapsed time for template: %s sec" % t.secs)

        pmaps[param_value] = maps

    # Store the maps used to calculate partial derivatives
    if store_dir != tempfile.gettempdir():
        logging.info("Writing maps for parameter %s to %s" %
                     (param, store_dir))

    to_json(pmaps, os.path.join(store_dir, param + "_" + data_tag + ".json"))

    gradient_map = get_derivative_map(pmaps, fiducial_params[param], degree=2)

    return gradient_map
Exemple #4
0
def find_alt_hierarchy_fit(asimov_data_set, template_maker,hypo_params,hypo_normal,
                           minimizer_settings,only_atm_params=True,check_octant=False):
    """
    For the hypothesis of the mass hierarchy being NMH
    ('normal_hierarchy'=True) or IMH ('normal_hierarchy'=False), finds the
    best fit for the free params in 'param_values' for the alternative
    (opposite) hierarchy that maximizes the LLH of the Asimov data set.

    \params:
      * asimov_data_set - asimov data set to find best fit llh
      * template_maker - instance of class pisa.analysis.TemplateMaker, used to
        generate the asimov data set.
      * hypo_params - parameters for template generation
      * hypo_normal - boolean for NMH (True) or IMH (False)
      * minimizer_settings - settings for bfgs minimization
      * only_atm_params - boolean to denote whether the fit will be over the
        atmospheric oscillation parameters only or over all the free params
        in params
    """

    # Find best fit of the alternative hierarchy to the
    # hypothesized asimov data set.
    #hypo_types = [('hypo_IMH',False)] if data_normal else  [('hypo_NMH',True)]
    hypo_params = select_hierarchy(hypo_params, normal_hierarchy=hypo_normal)

    with Timer() as t:
        llh_data = find_opt_bfgs(
            fmap=asimov_data_set,
            template_maker=template_maker,
            params=hypo_params,
            bfgs_settings=minimizer_settings,
            normal_hierarchy=hypo_normal,
            check_octant=check_octant)
    tprofile.info("==> elapsed time for optimizer: %s sec"%t.secs)

    return llh_data
def get_llh_hypothesis(
        data_tag, asimov_data, ntrials, template_maker, template_params,
        minimizer_settings, save_steps, check_octant):
    """
    Runs the llh fitter ntrials number of times, pulling pseudo data sets from
    asimov_data.

    \Params:
      * data_tag - hierarchy type running for assumed true.
      * asimov_data - asimov (unfluctuated) data from which to generate poisson
        fluctuated pseudo data
      * ntrials - number of trials to run for each hierarchy hypothesis
      * template_maker - instance of TemplateMaker class, from which to fit pseudo
        data to
      * template_params - dictionary of parameters at which to test the pseudo
        data and find the best match llh
      * minimizer_settings - settings for bfgs minimizer in llh fit
      * save_steps - flag to save the optimizer steps
      * check_octant - boolean to check both octants of theta23

    \returns - trials list that holds the dictionaries of llh results.
    """

    trials = []
    for itrial in xrange(1,ntrials+1):
        results = {} # one trial of results

        tprofile.info("start trial %d"%itrial)
        logging.info(">"*10 + "Running trial: %05d"%itrial + "<"*10)

        results['seed'] = get_seed()
        logging.info("  RNG seed: %ld"%results['seed'])
        # Get random map generated from asimov data (or from data_tag).
        fmap = get_random_map(asimov_data, seed=results['seed'])

        for hypo_tag, hypo_normal in [('hypo_NMH',True),('hypo_IMH',False)]:

            physics.info(
                "Finding best fit for %s under %s assumption"%(data_tag,hypo_tag))
            with Timer() as t:
                llh_data = find_max_llh_bfgs(
                    fmap, template_maker, template_params,
                    minimizer_settings, save_steps,
                    normal_hierarchy=hypo_normal, check_octant=check_octant)
            tprofile.info("==> elapsed time for optimizer: %s sec"%t.secs)

            # Store the LLH data
            results[hypo_tag] = llh_data

        trials += [results]
        tprofile.info("stop trial %d"%itrial)

    return trials
Exemple #6
0
def find_max_grid(fmap, template_maker, params, grid_settings, save_steps=True,
                  normal_hierarchy=True):
    '''
    Finds the template (and free systematic params) that maximize
    likelihood that the data came from the chosen template of true
    params, using a brute force grid scan over the whole parameter space.

    returns a dictionary of llh data and best fit params, in the format:
      {'llh': [...],
       'param1': [...],
       'param2': [...],
       ...}
    where 'param1', 'param2', ... are the free params that are varied in the
    scan. If save_steps is False, all lists only contain the best-fit parameters
    and llh values.
    '''

    #print "NOW INSIDE find_max_grid:"
    #print "After fixing to their true values, params dict is now: "
    #for key in params.keys():
    #    try: print "  >>param: %s value: %s"%(key,str(params[key]['best']))
    #    except: continue


    # Get params dict which will be optimized (free_params) and which
    # won't be (fixed_params) but are still needed for get_template()
    fixed_params = get_fixed_params(select_hierarchy(params, normal_hierarchy))
    free_params = get_free_params(select_hierarchy(params, normal_hierarchy))

    # Obtain just the priors
    priors = get_param_priors(free_params)

    # Calculate steps [(prior,value),...] for all free parameters
    calc_steps(free_params, grid_settings['steps'])

    # Build a list from all parameters that holds a list of (name, step) tuples
    steplist = [ [(name,step) for step in param['steps']] for name, param in sorted(free_params.items())]

    # Prepare to store all the steps
    steps = {key:[] for key in free_params.keys()}
    steps['llh'] = []

    # Iterate over the cartesian product
    for pos in product(*steplist):

        # Get a dict with all parameter values at this position
        # including the fixed parameters
        template_params = dict(list(pos) + get_values(fixed_params).items())

        #print "   >> NOW IN LOOP: "
        #for key in template_params.keys():
        #    try: print "  >>param: %s value: %s"%(key,str(template_params[key]['value']))
        #    except: continue

        # Now get true template
        tprofile.info('start template calculation')
        true_template = template_maker.get_template(template_params)
        tprofile.info('stop template calculation')
        true_fmap = flatten_map(true_template)

        # and calculate the likelihood
        llh = -get_binwise_llh(fmap, true_fmap)

        # get sorted vals to match with priors
        vals = [ v for k,v in sorted(pos) ]
        llh -= sum([prior.llh(val) for val, prior in zip(vals, priors)])

        # Save all values to steps and report
        steps['llh'].append(llh)
        physics.debug("LLH is %.2f at: "%llh)
        for key, val in pos:
            steps[key].append(val)
            physics.debug(" %20s = %6.4f" %(key, val))

    # Find best fit value
    maxllh = min(steps['llh'])
    maxpos = steps['llh'].index(maxllh)

    # Report best fit
    physics.info('Found best LLH = %.2f in %d calls at:'
                 %(maxllh,len(steps['llh'])))
    for name, vals in steps.items():
        physics.info('  %20s = %6.4f'%(name,vals[maxpos]))

        # only save this maximum if asked for
        if not save_steps:
            steps[name]=vals[maxpos]

    return steps
Exemple #7
0
def find_max_grid(fmap,
                  template_maker,
                  params,
                  grid_settings,
                  save_steps=True,
                  normal_hierarchy=True):
    '''
    Finds the template (and free systematic params) that maximize
    likelihood that the data came from the chosen template of true
    params, using a brute force grid scan over the whole parameter space.

    returns a dictionary of llh data and best fit params, in the format:
      {'llh': [...],
       'param1': [...],
       'param2': [...],
       ...}
    where 'param1', 'param2', ... are the free params that are varied in the
    scan. If save_steps is False, all lists only contain the best-fit parameters
    and llh values.
    '''

    #print "NOW INSIDE find_max_grid:"
    #print "After fixing to their true values, params dict is now: "
    #for key in params.keys():
    #    try: print "  >>param: %s value: %s"%(key,str(params[key]['best']))
    #    except: continue

    # Get params dict which will be optimized (free_params) and which
    # won't be (fixed_params) but are still needed for get_template()
    fixed_params = get_fixed_params(select_hierarchy(params, normal_hierarchy))
    free_params = get_free_params(select_hierarchy(params, normal_hierarchy))

    # Obtain just the priors
    priors = get_param_priors(free_params)

    # Calculate steps [(prior,value),...] for all free parameters
    calc_steps(free_params, grid_settings['steps'])

    # Build a list from all parameters that holds a list of (name, step) tuples
    steplist = [[(name, step) for step in param['steps']]
                for name, param in sorted(free_params.items())]

    # Prepare to store all the steps
    steps = {key: [] for key in free_params.keys()}
    steps['llh'] = []

    # Iterate over the cartesian product
    for pos in product(*steplist):

        # Get a dict with all parameter values at this position
        # including the fixed parameters
        template_params = dict(list(pos) + get_values(fixed_params).items())

        #print "   >> NOW IN LOOP: "
        #for key in template_params.keys():
        #    try: print "  >>param: %s value: %s"%(key,str(template_params[key]['value']))
        #    except: continue

        # Now get true template
        tprofile.info('start template calculation')
        true_template = template_maker.get_template(template_params)
        tprofile.info('stop template calculation')
        true_fmap = flatten_map(true_template)

        # and calculate the likelihood
        llh = -get_binwise_llh(fmap, true_fmap)

        # get sorted vals to match with priors
        vals = [v for k, v in sorted(pos)]
        llh -= sum([prior.llh(val) for val, prior in zip(vals, priors)])

        # Save all values to steps and report
        steps['llh'].append(llh)
        physics.debug("LLH is %.2f at: " % llh)
        for key, val in pos:
            steps[key].append(val)
            physics.debug(" %20s = %6.4f" % (key, val))

    # Find best fit value
    maxllh = min(steps['llh'])
    maxpos = steps['llh'].index(maxllh)

    # Report best fit
    physics.info('Found best LLH = %.2f in %d calls at:' %
                 (maxllh, len(steps['llh'])))
    for name, vals in steps.items():
        physics.info('  %20s = %6.4f' % (name, vals[maxpos]))

        # only save this maximum if asked for
        if not save_steps:
            steps[name] = vals[maxpos]

    return steps
check_scipy_version(minimizer_settings)

if args.gpu_id is not None:
    template_settings['params']['gpu_id'] = {}
    template_settings['params']['gpu_id']['value'] = args.gpu_id
    template_settings['params']['gpu_id']['fixed'] = True

template_maker = TemplateMaker(get_values(template_settings['params']),
                               **template_settings['binning'])

# Assemble output dict
output = {'template_settings' : template_settings,
          'minimizer_settings' : minimizer_settings}

for data_tag, data_normal in [('true_NMH',True),('true_IMH',False)]:
    tprofile.info("Assuming: %s"%data_tag)

    output[data_tag] = {"true_h_fiducial": {}}

    # Always do the true hierarchy fiducial LLR distribution:
    true_h_fiducial = {}

    # Get Asimov data set for assuming true: data_tag
    asimov_data = getAsimovData(
        template_maker, template_settings['params'], data_normal)

    trials = get_llh_hypothesis(
        data_tag, asimov_data, args.ntrials, template_maker,
        template_settings["params"], minimizer_settings,
        args.save_steps, check_octant)
    template_settings['params']['gpu_id']['value'] = args.gpu_id
    template_settings['params']['gpu_id']['fixed'] = True

template_maker = TemplateMaker(get_values(template_settings['params']),
                               **template_settings['binning'])

# Assemble output dict
output = {'template_settings' : template_settings,
          'minimizer_settings' : minimizer_settings}


asimov_data = {}
asimov_data_null = {}
alt_mh_settings = {}
for data_tag, data_normal in [('true_NMH',True),('true_IMH',False)]:
    tprofile.info("Assuming: %s"%data_tag)

    output[data_tag] = {}

    # Get Asimov data set for assuming true: data_tag
    asimov_data = getAsimovData(
        template_maker, template_settings['params'], data_normal)

    alt_params = fix_non_atm_params(template_settings['params'])
    alt_mh_settings, llh_data = getAltHierarchyBestFit(
        asimov_data, template_maker, alt_params, minimizer_settings,
        (not data_normal), check_octant)

    asimov_data_null = get_asimov_fmap(
        template_maker=template_maker,
        fiducial_params=alt_mh_settings,
Exemple #10
0
def get_fisher_matrices(template_settings,
                        grid_settings,
                        IMH=True,
                        NMH=False,
                        dump_all_stages=False,
                        save_templates=False,
                        outdir=None):
    '''
  Main function that runs the Fisher analysis for the chosen hierarchy(ies) (inverted by default).

  Returns a dictionary of Fisher matrices, in the format:
  {'IMH': {'cscd': [...],
          'trck': [...],
          'comb': [...],
          },
  'NMH': {'cscd': [...],
          'trck': [...],
          'comb': [...],
         }
  }

  If save_templates=True and no hierarchy is given, only fiducial templates will be written out;
  if one is given, then the templates used to obtain the gradients will be written out in
  addition.
  '''
    if outdir is None and (save_templates or dump_all_stages):
        logging.info(
            "No output directory specified. Will save templates to current working directory."
        )
        outdir = os.getcwd()

    tprofile.info("start initializing")

    # Get the parameters
    params = template_settings['params']
    bins = template_settings['binning']

    # Artifically add the hierarchy parameter to the list of parameters
    # The method get_hierarchy_gradients below will know how to deal with it
    params['hierarchy_nh'] = {
        "value": 1.,
        "range": [0., 1.],
        "fixed": False,
        "prior": None
    }
    params['hierarchy_ih'] = {
        "value": 0.,
        "range": [0., 1.],
        "fixed": False,
        "prior": None
    }

    chosen_data = []
    if IMH:
        chosen_data.append(('IMH', False))
        logging.info("Fisher matrix will be built for IMH.")
    if NMH:
        chosen_data.append(('NMH', True))
        logging.info("Fisher matrix will be built for NMH.")
    if chosen_data == []:
        # In this case, only the fiducial maps (for both hierarchies) will be written
        logging.info("No Fisher matrices will be built.")

    # There is no sense in performing any of the following steps if no Fisher matrices are to be built
    # and no templates are to be saved.
    if chosen_data != [] or dump_all_stages or save_templates:

        # Initialise return dict to hold Fisher matrices
        fisher = {
            data_tag: {
                'cscd': [],
                'trck': [],
                'comb': []
            }
            for data_tag, data_normal in chosen_data
        }

        # Get a template maker with the settings used to initialize
        template_maker = TemplateMaker(get_values(params), **bins)

        tprofile.info("stop initializing\n")

        # Generate fiducial templates for both hierarchies (needed for partial derivatives
        # w.r.t. hierarchy parameter)
        fiducial_maps = {}
        for hierarchy in ['NMH', 'IMH']:

            logging.info("Generating fiducial templates for %s." % hierarchy)

            # Get the fiducial parameter values corresponding to this hierarchy
            fiducial_params = select_hierarchy(
                params, normal_hierarchy=(hierarchy == 'NMH'))

            # Generate fiducial maps, either all of them or only the ultimate one
            tprofile.info("start template calculation")
            with Timer() as t:
                fid_maps = template_maker.get_template(
                    get_values(fiducial_params), return_stages=dump_all_stages)
            tprofile.info("==> elapsed time for template: %s sec" % t.secs)

            fiducial_maps[
                hierarchy] = fid_maps[4] if dump_all_stages else fid_maps

            # save fiducial map(s)
            # all stages
            if dump_all_stages:
                stage_names = ("0_unoscillated_flux", "1_oscillated_flux",
                               "2_oscillated_counts", "3_reco", "4_pid")
                stage_maps = {}
                for stage in xrange(0, len(fid_maps)):
                    stage_maps[stage_names[stage]] = fid_maps[stage]
                logging.info(
                    "Writing fiducial maps (all stages) for %s to %s." %
                    (hierarchy, outdir))
                to_json(stage_maps,
                        os.path.join(outdir, "fid_map_" + hierarchy + ".json"))
            # only the final stage
            elif save_templates:
                logging.info(
                    "Writing fiducial map (final stage) for %s to %s." %
                    (hierarchy, outdir))
                to_json(fiducial_maps[hierarchy],
                        os.path.join(outdir, "fid_map_" + hierarchy + ".json"))

        # Get_gradients and get_hierarchy_gradients will both (temporarily)
        # store the templates used to generate the gradient maps
        store_dir = outdir if save_templates else tempfile.gettempdir()

        # Calculate Fisher matrices for the user-defined cases (NHM true and/or IMH true)
        for data_tag, data_normal in chosen_data:

            logging.info("Running Fisher analysis for %s." % (data_tag))

            # The fiducial params are selected from the hierarchy case that does NOT match
            # the data, as we are varying from this model to find the 'best fit'
            fiducial_params = select_hierarchy(params, not data_normal)

            # Get the free parameters (i.e. those for which the gradients should be calculated)
            free_params = select_hierarchy(get_free_params(params),
                                           not data_normal)
            gradient_maps = {}
            for param in free_params.keys():
                # Special treatment for the hierarchy parameter
                if param == 'hierarchy':
                    gradient_maps[param] = get_hierarchy_gradients(
                        data_tag=data_tag,
                        fiducial_maps=fiducial_maps,
                        fiducial_params=fiducial_params,
                        grid_settings=grid_settings,
                        store_dir=store_dir)
                else:
                    gradient_maps[param] = get_gradients(
                        data_tag=data_tag,
                        param=param,
                        template_maker=template_maker,
                        fiducial_params=fiducial_params,
                        grid_settings=grid_settings,
                        store_dir=store_dir)

            logging.info("Building Fisher matrix for %s." % (data_tag))

            # Build Fisher matrices for the given hierarchy
            fisher[data_tag] = build_fisher_matrix(
                gradient_maps=gradient_maps,
                fiducial_map=fiducial_maps['IMH']
                if data_normal else fiducial_maps['NMH'],
                template_settings=fiducial_params)

            # If Fisher matrices exist for both channels, add the matrices to obtain
            # the combined one.
            if len(fisher[data_tag].keys()) > 1:
                fisher[data_tag]['comb'] = FisherMatrix(
                    matrix=np.array([
                        f.matrix for f in fisher[data_tag].itervalues()
                    ]).sum(axis=0),
                    parameters=gradient_maps.keys(),  #order is important here!
                    best_fits=[
                        fiducial_params[par]['value']
                        for par in gradient_maps.keys()
                    ],
                    priors=[
                        Prior.from_param(fiducial_params[par])
                        for par in gradient_maps.keys()
                    ],
                )
        return fisher

    else:
        logging.info("Nothing to be done.")
        return {}
Exemple #11
0
def bfgs_metric(opt_vals, names, scales, fmap, fixed_params, template_maker,
                opt_steps_dict, priors, metric_name='llh'):
    """
    Function that the bfgs algorithm tries to minimize: wraps get_template()
    and get_binwise_llh() (or get_binwise_chisquare()), and returns
    the negative log likelihood (the chisquare).

    This function is set up this way because the fmin_l_bfgs_b algorithm must
    take a function with two inputs: params & *args, where 'params' are the
    actual VALUES to be varied, and must correspond to the limits in 'bounds',
    and 'args' are arguments which are not varied and optimized, but needed by
    the get_template() function here.

    Parameters
    ----------
    opt_vals : sequence of scalars
        Systematics varied in the optimization.
        Format: [param1, param2, ... , paramN]
    names : sequence of str
        Dictionary keys corresponding to param1, param2, ...
    scales : sequence of float
        Scales to be applied before passing to get_template
        [IMPORTANT! In the optimizer, all parameters must be ~ the same order.
        Here, we keep them between 0.1,1 so the "epsilon" step size will vary
        the parameters with roughly the same precision.]
    fmap : sequence of float
        Pseudo data flattened map
    fixed_params : dict
        Other paramters needed by the get_template() function.
    template_maker : template maker object
    opt_steps_dict: dict
        Dictionary recording information regarding the steps taken for each
        trial of the optimization process.
    priors : sequence of pisa.utils.params.Prior objects
        Priors corresponding to opt_vals list.
    metric_name : string
	Returns chisquare instead of negative llh if metric_name is 'chisquare'.
	Note: this string has to be present as a key in opt_steps_dict

    Returns
    -------
    metric_val : float
        either minimum negative llh or chisquare found by BFGS minimizer

    """
    # free parameters being "optimized" by minimizer re-scaled to their true
    # values.
    unscaled_opt_vals = [opt_vals[i]/scales[i] for i in xrange(len(opt_vals))]

    unscaled_free_params = { names[i]: val for i,val in enumerate(unscaled_opt_vals) }
    template_params = dict(unscaled_free_params.items() +
                           get_values(fixed_params).items())

    # Now get true template, and compute metric
    with Timer() as t:
        if template_params['theta23'] == 0.0:
            logging.info("Zero theta23, so generating no oscillations template...")
            true_template = template_maker.get_template_no_osc(template_params)
        else:
            true_template = template_maker.get_template(template_params)

    tprofile.info("==> elapsed time for template maker: %s sec"%t.secs)
    true_fmap = flatten_map(template=true_template,
                            channel=template_params['channel'])

    # NOTE: The minus sign is present on both of these next two lines
    # because the optimizer finds a minimum rather than maximum, so we
    # have to minimize the negative of the log likelhood.
    if metric_name=='chisquare':
	metric_val = get_binwise_chisquare(fmap, true_fmap)
	metric_val += sum([prior.chi2(opt_val)
                           for (opt_val, prior) in zip(unscaled_opt_vals, priors)])
    elif metric_name=='llh':
	metric_val = -get_binwise_llh(fmap, true_fmap)
	metric_val -= sum([prior.llh(opt_val)
                           for (opt_val, prior) in zip(unscaled_opt_vals, priors)])

    #prior_list = [prior.llh(opt_val)
    #         for (opt_val, prior) in zip(unscaled_opt_vals, priors)]
    #print("  prior sum: ",sum(prior_list))
    #neg_llh -= sum(prior_list)

    # Save all optimizer-tested values to opt_steps_dict, to see
    # optimizer history later
    for key in names:
        opt_steps_dict[key].append(template_params[key])
    opt_steps_dict[metric_name].append(metric_val)

    physics.debug("%s is %.2f at: "%(metric_name, metric_val))
    for name, val in zip(names, opt_vals):
        physics.debug(" %20s = %6.4f" %(name,val))

    return metric_val
Exemple #12
0
if args.gpu_id is not None:
    template_settings['params']['gpu_id'] = {}
    template_settings['params']['gpu_id']['value'] = args.gpu_id
    template_settings['params']['gpu_id']['fixed'] = True

#Get the parameters
params = template_settings['params']

# Make sure that atmospheric parameters are fixed:
logging.warn("Ensuring that atmospheric parameters are fixed for this analysis")
params = fix_atm_params(params)

with Timer() as t:
    template_maker = TemplateMaker(get_values(params),**template_settings['binning'])
tprofile.info("==> elapsed time to initialize templates: %s sec"%t.secs)

results = {}
# Store for future checking:
results['template_settings'] = template_settings
results['minimizer_settings'] = minimizer_settings
results['grid_settings'] = grid_settings


# Set up data/hypo nmh or imh
if args.data_nmh:
    data_tag = 'data_NMH'
    data_normal = True
else:
    data_tag = 'data_IMH'
    data_normal = False
                asimov_data = getAsimovData(template_maker,
                                            template_settings['params'],
                                            data_normal)
                if output['seed'] is not None:
                    fmap = get_random_map(asimov_data,
                                          seed=output['seed'])
                else:
                    print "Using Asimov Data"
                    fmap = asimov_data

                for itrial in xrange(1,2):

                    results = {}
                    results[dkey] = {}
                    tprofile.info("start trial %d"%itrial)
                    logging.info(">"*10 + "Running trial: %05d"%itrial + "<"*10)

                    results[dkey][hkey] = {}

                    for Tname in names:
                        for key in output[dkey][hkey].keys():
                            if key not in ['llh','llh_check','template_settings','seed']:
                                if key in ['theta23','deltam31']:
                                    if data_normal:
                                        ts_key = key + '_nh'
                                    else:
                                        ts_key = key + '_ih'
                                else:
                                    ts_key = key
                                template_settings['params'][ts_key]['value'] = output[dkey][hkey][key]
Exemple #14
0
def bfgs_metric(opt_vals,
                names,
                scales,
                fmap,
                fixed_params,
                template_maker,
                opt_steps_dict,
                priors,
                metric_name='llh'):
    """
    Function that the bfgs algorithm tries to minimize: wraps get_template()
    and get_binwise_llh() (or get_binwise_chisquare()), and returns
    the negative log likelihood (the chisquare).

    This function is set up this way because the fmin_l_bfgs_b algorithm must
    take a function with two inputs: params & *args, where 'params' are the
    actual VALUES to be varied, and must correspond to the limits in 'bounds',
    and 'args' are arguments which are not varied and optimized, but needed by
    the get_template() function here.

    Parameters
    ----------
    opt_vals : sequence of scalars
        Systematics varied in the optimization.
        Format: [param1, param2, ... , paramN]
    names : sequence of str
        Dictionary keys corresponding to param1, param2, ...
    scales : sequence of float
        Scales to be applied before passing to get_template
        [IMPORTANT! In the optimizer, all parameters must be ~ the same order.
        Here, we keep them between 0.1,1 so the "epsilon" step size will vary
        the parameters with roughly the same precision.]
    fmap : sequence of float
        Pseudo data flattened map
    fixed_params : dict
        Other paramters needed by the get_template() function.
    template_maker : template maker object
    opt_steps_dict: dict
        Dictionary recording information regarding the steps taken for each
        trial of the optimization process.
    priors : sequence of pisa.utils.params.Prior objects
        Priors corresponding to opt_vals list.
    metric_name : string
	Returns chisquare instead of negative llh if metric_name is 'chisquare'.
	Note: this string has to be present as a key in opt_steps_dict

    Returns
    -------
    metric_val : float
        either minimum negative llh or chisquare found by BFGS minimizer

    """
    # free parameters being "optimized" by minimizer re-scaled to their true
    # values.
    unscaled_opt_vals = [
        opt_vals[i] / scales[i] for i in xrange(len(opt_vals))
    ]

    unscaled_free_params = {
        names[i]: val
        for i, val in enumerate(unscaled_opt_vals)
    }
    template_params = dict(unscaled_free_params.items() +
                           get_values(fixed_params).items())

    # Now get true template, and compute metric
    with Timer() as t:
        if template_params['theta23'] == 0.0:
            logging.info(
                "Zero theta23, so generating no oscillations template...")
            true_template = template_maker.get_template_no_osc(template_params)
        else:
            true_template = template_maker.get_template(template_params)

    tprofile.info("==> elapsed time for template maker: %s sec" % t.secs)
    true_fmap = flatten_map(template=true_template,
                            channel=template_params['channel'])

    # NOTE: The minus sign is present on both of these next two lines
    # because the optimizer finds a minimum rather than maximum, so we
    # have to minimize the negative of the log likelhood.
    if metric_name == 'chisquare':
        metric_val = get_binwise_chisquare(fmap, true_fmap)
        metric_val += sum([
            prior.chi2(opt_val)
            for (opt_val, prior) in zip(unscaled_opt_vals, priors)
        ])
    elif metric_name == 'llh':
        metric_val = -get_binwise_llh(fmap, true_fmap)
        metric_val -= sum([
            prior.llh(opt_val)
            for (opt_val, prior) in zip(unscaled_opt_vals, priors)
        ])

    # Save all optimizer-tested values to opt_steps_dict, to see
    # optimizer history later
    for key in names:
        opt_steps_dict[key].append(template_params[key])
    opt_steps_dict[metric_name].append(metric_val)

    physics.debug("%s is %.2f at: " % (metric_name, metric_val))
    for name, val in zip(names, opt_vals):
        physics.debug(" %20s = %6.4f" % (name, val))

    return metric_val
Exemple #15
0
def get_fisher_matrices(template_settings, grid_settings, IMH=True, NMH=False,
                        dump_all_stages=False, save_templates=False,
                        outdir=None):
  '''
  Main function that runs the Fisher analysis for the chosen hierarchy(ies) (inverted by default).

  Returns a dictionary of Fisher matrices, in the format:
  {'IMH': {'cscd': [...],
          'trck': [...],
          'comb': [...],
          },
  'NMH': {'cscd': [...],
          'trck': [...],
          'comb': [...],
         }
  }

  If save_templates=True and no hierarchy is given, only fiducial templates will be written out;
  if one is given, then the templates used to obtain the gradients will be written out in
  addition.
  '''
  if outdir is None and (save_templates or dump_all_stages):
    logging.info("No output directory specified. Will save templates to current working directory.")
    outdir = os.getcwd()

  tprofile.info("start initializing")

  # Get the parameters
  params = template_settings['params']
  bins = template_settings['binning']

  # Artifically add the hierarchy parameter to the list of parameters
  # The method get_hierarchy_gradients below will know how to deal with it
  params['hierarchy_nh'] = { "value": 1., "range": [0.,1.],
                           "fixed": False, "prior": None}
  params['hierarchy_ih'] = { "value": 0., "range": [0.,1.],
                           "fixed": False, "prior": None}

  chosen_data = []
  if IMH:
    chosen_data.append(('IMH',False))
    logging.info("Fisher matrix will be built for IMH.")
  if NMH:
    chosen_data.append(('NMH',True))
    logging.info("Fisher matrix will be built for NMH.")
  if chosen_data == []:
    # In this case, only the fiducial maps (for both hierarchies) will be written
    logging.info("No Fisher matrices will be built.")

  # There is no sense in performing any of the following steps if no Fisher matrices are to be built
  # and no templates are to be saved.
  if chosen_data!=[] or dump_all_stages or save_templates:

    # Initialise return dict to hold Fisher matrices
    fisher = { data_tag:{'cscd':[],'trck':[],'comb':[]} for data_tag, data_normal in chosen_data }

    # Get a template maker with the settings used to initialize
    template_maker = TemplateMaker(get_values(params),**bins)

    tprofile.info("stop initializing\n")

    # Generate fiducial templates for both hierarchies (needed for partial derivatives
    # w.r.t. hierarchy parameter)
    fiducial_maps = {}
    for hierarchy in ['NMH','IMH']:

      logging.info("Generating fiducial templates for %s."%hierarchy)

      # Get the fiducial parameter values corresponding to this hierarchy
      fiducial_params = select_hierarchy(params,normal_hierarchy=(hierarchy=='NMH'))

      # Generate fiducial maps, either all of them or only the ultimate one
      tprofile.info("start template calculation")
      with Timer() as t:
        fid_maps = template_maker.get_template(get_values(fiducial_params),
                                               return_stages=dump_all_stages)
      tprofile.info("==> elapsed time for template: %s sec"%t.secs)

      fiducial_maps[hierarchy] = fid_maps[4] if dump_all_stages else fid_maps

      # save fiducial map(s)
      # all stages
      if dump_all_stages:
        stage_names = ("0_unoscillated_flux","1_oscillated_flux","2_oscillated_counts","3_reco","4_pid")
        stage_maps = {}
        for stage in xrange(0,len(fid_maps)):
          stage_maps[stage_names[stage]] = fid_maps[stage]
        logging.info("Writing fiducial maps (all stages) for %s to %s."%(hierarchy,outdir))
        to_json(stage_maps,os.path.join(outdir,"fid_map_"+hierarchy+".json"))
      # only the final stage
      elif save_templates:
        logging.info("Writing fiducial map (final stage) for %s to %s."%(hierarchy,outdir))
        to_json(fiducial_maps[hierarchy],os.path.join(outdir,"fid_map_"+hierarchy+".json"))

    # Get_gradients and get_hierarchy_gradients will both (temporarily)
    # store the templates used to generate the gradient maps
    store_dir = outdir if save_templates else tempfile.gettempdir()

    # Calculate Fisher matrices for the user-defined cases (NHM true and/or IMH true)
    for data_tag, data_normal in chosen_data:

      logging.info("Running Fisher analysis for %s."%(data_tag))

      # The fiducial params are selected from the hierarchy case that does NOT match
      # the data, as we are varying from this model to find the 'best fit'
      fiducial_params = select_hierarchy(params,not data_normal)

      # Get the free parameters (i.e. those for which the gradients should be calculated)
      free_params = select_hierarchy(get_free_params(params),not data_normal)
      gradient_maps = {}
      for param in free_params.keys():
        # Special treatment for the hierarchy parameter
        if param=='hierarchy':
          gradient_maps[param] = get_hierarchy_gradients(
              data_tag=data_tag,
              fiducial_maps=fiducial_maps,
              fiducial_params=fiducial_params,
              grid_settings=grid_settings,
              store_dir=store_dir
          )
        else:
          gradient_maps[param] = get_gradients(
              data_tag=data_tag,
              param=param,
              template_maker=template_maker,
              fiducial_params=fiducial_params,
              grid_settings=grid_settings,
              store_dir=store_dir
          )

      logging.info("Building Fisher matrix for %s."%(data_tag))

      # Build Fisher matrices for the given hierarchy
      fisher[data_tag] = build_fisher_matrix(
          gradient_maps=gradient_maps,
          fiducial_map=fiducial_maps['IMH'] if data_normal else fiducial_maps['NMH'],
          template_settings=fiducial_params
      )

      # If Fisher matrices exist for both channels, add the matrices to obtain
      # the combined one.
      if len(fisher[data_tag].keys()) > 1:
        fisher[data_tag]['comb'] = FisherMatrix(
            matrix=np.array([f.matrix for f in fisher[data_tag].itervalues()]).sum(axis=0),
            parameters=gradient_maps.keys(),  #order is important here!
            best_fits=[fiducial_params[par]['value'] for par in gradient_maps.keys()],
            priors=[Prior.from_param(fiducial_params[par]) for par in gradient_maps.keys()],
        )
    return fisher

  else:
    logging.info("Nothing to be done.")
    return {}
    results[data_tag] = {}
    # 1) get "Asimov" average fiducial template:
    asimov_fmap = get_asimov_fmap(pseudo_data_template_maker,
                                  get_values(
                                      select_hierarchy(
                                          pseudo_data_settings['params'],
                                          normal_hierarchy=data_normal)),
                                  channel=channel)

    # 2) find max llh or min chisquare (and best fit free params) from matching pseudo data
    #    to templates.
    for hypo_tag, hypo_normal in [('hypo_NMH', True), ('hypo_IMH', False)]:
        physics.info("Finding best fit for %s under %s assumption" %
                     (data_tag, hypo_tag))
        tprofile.info("start optimizer")
        tprofile.info("Using %s" % metric_name)

        opt_data = find_opt_bfgs(asimov_fmap,
                                 template_maker,
                                 template_settings['params'],
                                 minimizer_settings,
                                 args.save_steps,
                                 normal_hierarchy=hypo_normal,
                                 check_octant=args.check_octant,
                                 metric_name=metric_name)

        tprofile.info("stop optimizer")

        # Store the optimum data
        results[data_tag][hypo_tag] = opt_data
Exemple #17
0
if args.gpu_id is not None:
    template_settings['params']['gpu_id'] = {}
    template_settings['params']['gpu_id']['value'] = args.gpu_id
    template_settings['params']['gpu_id']['fixed'] = True

template_maker = TemplateMaker(get_values(template_settings['params']),
                               **template_settings['binning'])

# Assemble output dict
output = {'template_settings' : template_settings,
          'minimizer_settings' : minimizer_settings}


for data_tag, data_normal in [('data_NMH',True),('data_IMH',False)]:
    tprofile.info("Assuming: %s"%data_tag)

    output[data_tag] = {}

    # Get Asimov data set for assuming true: data_tag, and store for
    # later comparison
    asimov_data = getAsimovData(
        template_maker, template_settings['params'], data_normal)
    output[data_tag]['asimov_data'] = asimov_data

    trials = []
    for itrial in xrange(1,args.ntrials+1):
        results = {} # one trial of results

        tprofile.info("start trial %d"%itrial)
        logging.info(">"*10 + "Running trial: %05d"%itrial + "<"*10)
Exemple #18
0
                        help="Title of the geometry or test in plots")
    parser.add_argument('--save',action='store_true',default=False,
                        help="Save plots in cwd")
    parser.add_argument('-o','--outdir',metavar='DIR',default="",
                        help="Directory to save the output figures.")
    parser.add_argument('-v', '--verbose', action='count', default=0,
                        help='set verbosity level')
    args = parser.parse_args()
    set_verbosity(args.verbose)

    template_settings = from_json(args.template_settings)

    with Timer() as t:
        template_maker = TemplateMaker(get_values(template_settings['params']),
                                       **template_settings['binning'])
    tprofile.info("==> elapsed time to initialize templates: %s sec"%t.secs)

    # Make nmh template:
    nmh_params = select_hierarchy(template_settings['params'],
                                  normal_hierarchy=True)
    imh_params = select_hierarchy(template_settings['params'],
                                  normal_hierarchy=False)
    with Timer(verbose=False) as t:
        nmh = template_maker.get_template(get_values(nmh_params), return_stages=args.all)
    tprofile.info("==> elapsed time to get NMH template: %s sec"%t.secs)
    with Timer(verbose=False) as t:
        imh = template_maker.get_template(get_values(imh_params), return_stages=args.all)
    tprofile.info("==> elapsed time to get IMH template: %s sec"%t.secs)

    # Or equivalently, if args.all:
    if type(nmh) is tuple:
Exemple #19
0
    template_settings['params']['gpu_id'] = {}
    template_settings['params']['gpu_id']['value'] = args.gpu_id
    template_settings['params']['gpu_id']['fixed'] = True

#Get the parameters
params = template_settings['params']

# Make sure that atmospheric parameters are fixed:
logging.warn(
    "Ensuring that atmospheric parameters are fixed for this analysis")
params = fix_atm_params(params)

with Timer() as t:
    template_maker = TemplateMaker(get_values(params),
                                   **template_settings['binning'])
tprofile.info("==> elapsed time to initialize templates: %s sec" % t.secs)

results = {}
# Store for future checking:
results['template_settings'] = template_settings
results['minimizer_settings'] = minimizer_settings
results['grid_settings'] = grid_settings

# Set up data/hypo nmh or imh
if args.data_nmh:
    data_tag = 'data_NMH'
    data_normal = True
else:
    data_tag = 'data_IMH'
    data_normal = False
if args.hypo_nmh:
    def fill_osc_prob(self,
                      osc_prob_dict,
                      ecen,
                      czcen,
                      theta12=None,
                      theta13=None,
                      theta23=None,
                      deltam21=None,
                      deltam31=None,
                      deltacp=None,
                      energy_scale=None,
                      YeI=None,
                      YeO=None,
                      YeM=None,
                      **kwargs):
        '''
        Loops over ecen,czcen and fills the osc_prob_dict maps, with
        probabilities calculated according to prob3
        '''

        neutrinos = ['nue', 'numu', 'nutau']
        anti_neutrinos = ['nue_bar', 'numu_bar', 'nutau_bar']
        mID = ['', '_bar']

        nu_barger = {
            'nue': 1,
            'numu': 2,
            'nutau': 3,
            'nue_bar': 1,
            'numu_bar': 2,
            'nutau_bar': 3
        }

        logging.info("Defining osc_prob_dict from BargerPropagator...")
        tprofile.info("start oscillation calculation")
        # Set to true, since we are using sin^2(theta) variables
        kSquared = True
        sin2th12Sq = np.sin(theta12)**2
        sin2th13Sq = np.sin(theta13)**2
        sin2th23Sq = np.sin(theta23)**2
        evals = []
        czvals = []
        total_bins = int(len(ecen) * len(czcen))
        mod = total_bins / 20
        loglevel = logging.root.getEffectiveLevel()
        for ie, energy in enumerate(ecen):
            for icz, coszen in enumerate(czcen):
                evals.append(energy)
                czvals.append(coszen)
                scaled_energy = energy * energy_scale

                if loglevel <= logging.INFO:
                    if ((ie + 1) * (icz + 1) % mod == 0):
                        sys.stdout.write(".")
                        sys.stdout.flush()

                # In BargerPropagator code, it takes the "atmospheric
                # mass difference"-the nearest two mass differences, so
                # that it takes as input deltam31 for IMH and deltam32
                # for NMH
                mAtm = deltam31 if deltam31 < 0.0 else (deltam31 - deltam21)

                ########### FIRST FOR NEUTRINOS ##########
                kNuBar = 1  # +1 for nu -1 for nubar
                self.barger_prop.SetMNS(sin2th12Sq, sin2th13Sq, sin2th23Sq,
                                        deltam21, mAtm, deltacp, scaled_energy,
                                        kSquared, kNuBar)

                self.barger_prop.DefinePath(coszen, self.prop_height, YeI, YeO,
                                            YeM)
                self.barger_prop.propagate(kNuBar)

                for nu in ['nue', 'numu']:
                    nu_i = nu_barger[nu]
                    nu = nu + '_maps'
                    for to_nu in neutrinos:
                        nu_f = nu_barger[to_nu]
                        osc_prob_dict[nu][to_nu].append(
                            self.barger_prop.GetProb(nu_i, nu_f))

                ########### SECOND FOR ANTINEUTRINOS ##########
                kNuBar = -1
                self.barger_prop.SetMNS(sin2th12Sq, sin2th13Sq, sin2th23Sq,
                                        deltam21, mAtm, deltacp, scaled_energy,
                                        kSquared, kNuBar)
                self.barger_prop.DefinePath(coszen, self.prop_height, YeI, YeO,
                                            YeM)
                self.barger_prop.propagate(kNuBar)

                for nu in ['nue_bar', 'numu_bar']:
                    nu_i = nu_barger[nu]
                    nu += '_maps'
                    for to_nu in anti_neutrinos:
                        nu_f = nu_barger[to_nu]
                        osc_prob_dict[nu][to_nu].append(
                            self.barger_prop.GetProb(nu_i, nu_f))

        if loglevel <= logging.INFO: sys.stdout.write("\n")

        tprofile.info("stop oscillation calculation")

        return evals, czvals
# //////////////////////////////////////////////////////////////////////
results = {}
for data_tag, data_normal in [('data_NMH',True),('data_IMH',False)]:

    results[data_tag] = {}
    # 1) get "Asimov" average fiducial template:
    asimov_fmap = get_asimov_fmap(pseudo_data_template_maker,
                                  get_values(select_hierarchy(pseudo_data_settings['params'],
                                                              normal_hierarchy=data_normal)),
                                  channel=channel)

    # 2) find max llh or min chisquare (and best fit free params) from matching pseudo data
    #    to templates.
    for hypo_tag, hypo_normal in [('hypo_NMH',True),('hypo_IMH',False)]:
        physics.info("Finding best fit for %s under %s assumption"%(data_tag,hypo_tag))
	tprofile.info("start optimizer")
	tprofile.info("Using %s"%metric_name)

	opt_data = find_opt_bfgs(asimov_fmap,template_maker,template_settings['params'],
				 minimizer_settings,args.save_steps,
				 normal_hierarchy=hypo_normal, check_octant=args.check_octant,
				 metric_name=metric_name)

	tprofile.info("stop optimizer")

	# Store the optimum data
	results[data_tag][hypo_tag] = opt_data

# Assemble output dict
output = {'results' : results,
          'template_settings' : template_settings,
Exemple #22
0
                        default="template.json",
                        help='file to store the output')
    args = parser.parse_args()

    set_verbosity(args.verbose)

    with Timer() as t:
        #Load all the settings
        model_settings = from_json(args.template_settings)

        #Select a hierarchy
        logging.info('Selected %s hierarchy' %
                     ('normal' if args.normal else 'inverted'))
        params = select_hierarchy(model_settings['params'],
                                  normal_hierarchy=args.normal)

        #Intialize template maker
        template_maker = TemplateMaker(get_values(params),
                                       **model_settings['binning'])
    tprofile.info("  ==> elapsed time to initialize templates: %s sec" %
                  t.secs)

    #Now get the actual template
    with Timer(verbose=False) as t:
        template_maps = template_maker.get_template(
            get_values(params), return_stages=args.save_all)
    tprofile.info("==> elapsed time to get template: %s sec" % t.secs)

    logging.info("Saving file to %s" % args.outfile)
    to_json(template_maps, args.outfile)
    def fill_osc_prob(self, osc_prob_dict, ecen, czcen,
                      theta12=None, theta13=None, theta23=None,
                      deltam21=None, deltam31=None, deltacp=None,
                      energy_scale=None, YeI = None, YeO = None,
                      YeM = None,**kwargs):
        '''
        Loops over ecen,czcen and fills the osc_prob_dict maps, with
        probabilities calculated according to prob3
        '''

        neutrinos = ['nue','numu','nutau']
        anti_neutrinos = ['nue_bar','numu_bar','nutau_bar']
        mID = ['','_bar']

        nu_barger = {'nue':1,'numu':2,'nutau':3,
                     'nue_bar':1,'numu_bar':2,'nutau_bar':3}

        logging.info("Defining osc_prob_dict from BargerPropagator...")
        tprofile.info("start oscillation calculation")
        # Set to true, since we are using sin^2(theta) variables
        kSquared = True
        sin2th12Sq = np.sin(theta12)**2
        sin2th13Sq = np.sin(theta13)**2
        sin2th23Sq = np.sin(theta23)**2
        evals = []
        czvals = []
        total_bins = int(len(ecen)*len(czcen))
        mod = total_bins/20
        loglevel = logging.root.getEffectiveLevel()
        for ie,energy in enumerate(ecen):
            for icz, coszen in enumerate(czcen):
                evals.append(energy)
                czvals.append(coszen)
                scaled_energy = energy*energy_scale

                if loglevel <= logging.INFO:
                    if( (ie+1)*(icz+1) % mod == 0):
                        sys.stdout.write(".")
                        sys.stdout.flush()

                # In BargerPropagator code, it takes the "atmospheric
                # mass difference"-the nearest two mass differences, so
                # that it takes as input deltam31 for IMH and deltam32
                # for NMH
                mAtm = deltam31 if deltam31 < 0.0 else (deltam31 - deltam21)

                ########### FIRST FOR NEUTRINOS ##########
                kNuBar = 1 # +1 for nu -1 for nubar
                self.barger_prop.SetMNS(sin2th12Sq,sin2th13Sq,sin2th23Sq,deltam21,mAtm,
                                        deltacp,scaled_energy,kSquared,kNuBar)

                self.barger_prop.DefinePath(coszen, self.prop_height, YeI, YeO, YeM)
                self.barger_prop.propagate(kNuBar)

                for nu in ['nue','numu']:
                    nu_i = nu_barger[nu]
                    nu = nu+'_maps'
                    for to_nu in neutrinos:
                        nu_f = nu_barger[to_nu]
                        osc_prob_dict[nu][to_nu].append(
                            self.barger_prop.GetProb(nu_i,nu_f))


                ########### SECOND FOR ANTINEUTRINOS ##########
                kNuBar = -1
                self.barger_prop.SetMNS(sin2th12Sq,sin2th13Sq,sin2th23Sq,deltam21,
                                        mAtm,deltacp,scaled_energy,kSquared,kNuBar)
                self.barger_prop.DefinePath(coszen, self.prop_height, YeI, YeO, YeM)
                self.barger_prop.propagate(kNuBar)

                for nu in ['nue_bar','numu_bar']:
                    nu_i = nu_barger[nu]
                    nu+='_maps'
                    for to_nu in anti_neutrinos:
                        nu_f = nu_barger[to_nu]
                        osc_prob_dict[nu][to_nu].append(
                            self.barger_prop.GetProb(nu_i,nu_f))

        if loglevel <= logging.INFO: sys.stdout.write("\n")

        tprofile.info("stop oscillation calculation")

        return evals,czvals