# llh86I= data_multi.init86I(energy=True,mode='box') # llh59= data_multi.init59(energy=True,mode='box') # llh40= data_multi.init40(energy=True,mode='box') # #else: # llh40= data_multi.init40(energy=True, weighting = modelweights['{}'.format(llhweight)],mode='box') # llh79 = data_multi.init79(energy=True, weighting = modelweights['{}'.format(llhweight)],mode='box') # llh86I= data_multi.init86I(energy=True, weighting = modelweights['{}'.format(llhweight)],mode='box') # llh59= data_multi.init59(energy=True, weighting = modelweights['{}'.format(llhweight)],mode='box') import new_data_multi ## Like in the background trials, we have to define which llhmodel to use. if llhweight == 'uniform': llh79 = new_data_multi.init79(energy=True, mode='box') llh86I = new_data_multi.init86I(energy=True, mode='box') llh59 = new_data_multi.init59(energy=True, mode='box') llh40 = new_data_multi.init40(energy=True, mode='box') else: llh40 = new_data_multi.init40( energy=True, weighting=modelweights['{}'.format(llhweight)], mode='box') llh79 = new_data_multi.init79( energy=True, weighting=modelweights['{}'.format(llhweight)], mode='box') llh86I = new_data_multi.init86I( energy=True, weighting=modelweights['{}'.format(llhweight)],
parser.add_option ('--years', dest = 'years', type = int, default = 4, metavar = 'YEARS', help = 'Number of years of data') opts, args = parser.parse_args () batch = opts.batch batchsize = opts.batchsize llhweight = opts.llhweight years = opts.years ## We'll assign the proper weighting scheme for the search, then use it to calculate and cache the associated bckg trials: ## llh79 = new_data_multi.init79(energy=True,mode='box') llh86I= new_data_multi.init86I(energy=True,mode='box') llh59= new_data_multi.init59(energy=True,mode='box') llh40= new_data_multi.init40(energy=True,mode='box') #We've loaded in the appropriate llh samples, now let's put them both in the blender (not sure about weighting) if years == 2: samples = [llh79,llh86I] elif years == 3: samples = [llh59,llh79,llh86I] elif years == 4: samples = [llh40,llh59,llh79,llh86I] llhmodel = new_data_multi.multi_init(samples,energy=True)
dest='years', type=int, default=4, metavar='YEARS', help='Number of years of data') opts, args = parser.parse_args() batch = opts.batch batchsize = opts.batchsize llhweight = opts.llhweight years = opts.years ## We'll assign the proper weighting scheme for the search, then use it to calculate and cache the associated bckg trials: ## llh79 = new_data_multi.init79(energy=True) llh86I = new_data_multi.init86I(energy=True) llh59 = new_data_multi.init59(energy=True) llh40 = new_data_multi.init40(energy=True) #We've loaded in the appropriate llh samples, now let's put them both in the blender (not sure about weighting) if years == 2: samples = [llh79, llh86I] elif years == 3: samples = [llh59, llh79, llh86I] elif years == 4: samples = [llh40, llh59, llh79, llh86I] llhmodel = new_data_multi.multi_init(samples, energy=True) bckg_trials = StackingMultiPointSourceLLH.do_trials(