src_ra = np.radians(src_ra) ## NOTE: ADD WEIGHTS HERE FOR THE INJECTED EVENTS!! (Naturally only for flux, redshift. (tried with theo_weight, haven't tested it yet) inj = PointSourceInjector(Gamma, sinDec_bandwidth=.05, src_dec=src_dec, theo_weight=flux, seed=0) results = PointSourceLLH.weighted_sensitivity(llh_flux, src_ra=src_ra, src_dec=src_dec, alpha=2.867e-7, beta=.5, inj=inj, trials={ 'n_inj': [], 'TS': [], 'nsources': [], 'gamma': [] }, bckg_trials=bckg_trials, eps=0.01, n_iter=250) print results #choose an output dir, and make sure it exists this_dir = os.path.dirname(os.path.abspath(__file__)) out_dir = misc.ensure_dir( '/data/user/brelethford/Output/stacking_sensitivity/SwiftBAT70m/flux/disc/' )
##I need to have this src_dec=[np.radians(dec_deg)] src_ra=[0.0] ## NOTE: ADD WEIGHTS HERE FOR THE INJECTED EVENTS!! (Naturally only for flux, redshift. ##For now the weighted sensitivity function only works if there are at least two sources. to push it through for a single source, I'll copy the source location.## ### Mike - This is the step that allows the function weighted_sensitivity to process! ### #src_dec=[src_dec[0],src_dec[0]] #src_ra=[src_ra[0],src_ra[0]] inj = PointSourceInjector(Gamma, sinDec_bandwidth=.05, src_dec= src_dec,seed=0) results = PointSourceLLH.weighted_sensitivity(llh_single,src_ra=src_ra,src_dec=src_dec,alpha=.5,beta=.9,inj=inj,trials={'n_inj':[],'TS':[],'nsources':[],'gamma':[]},bckg_trials=bckg_trials,eps=0.01,n_iter=250, miniter=2500)# maxtrial=1000) #Currently have n_iter down from 1000 to reduce estimation time. Also lowering maxtrial from 1000 to 500. print results #choose an output dir, and make sure it exists this_dir = os.path.dirname(os.path.abspath (__file__)) out_dir = misc.ensure_dir (outfolder+'sensitivity/') # save the output outfile = out_dir + 'dec{0:+010.5}.array'.format(dec_deg) print 'Saving', outfile, '...' cache.save(results, outfile)
# mc = datatest_stack.MC() # extra=datatest_stack.extra() # dpsi=extra["dpsi"] # print llh ##Okay, so the following is the part where we need to split this up into parallel processing. I think the pertinant variable to use here is n_iter... let's test a background scramble with n_iter=5 to see how fast it goes. Though, maybe it's max_iter? check with previously pickled results to see which number of bckg trials we got. #bckg_trials_flux = PointSourceLLH.background_scrambles(llh_flux,src_ra,src_dec,alpha=0.5, maxiter=batchsize) ## And let's also cache bckg_trials for the other weighting schemes. ## # bckg_trials_flux = PointSourceLLH.background_scrambles(llh_flux, src_ra, src_dec, alpha=0.5, maxiter=batchsize) # # #def get_redshift_bckg_trials(): # return PointSourceLLH.background_scrambles(llh_redshift,src_ra,src_dec,alpha=0.5) # #bckg_trials_redshift = cache.get (datafolder + 'SwiftBAT70m/pickle/bckg_trials_redshift.pickle', get_redshift_bckg_trials) # ##This one's gonna work a little differently than the single source sensitivity. First off, we need to calculate the background scrambles ahead of time, with the definition provided in psLLH_stack.py. I'll try to do this all within the same function:## ## Background Trials have the following keys:## ##['beta', 'TS_beta', 'beta_err', 'n_inj', 'nsources', 'TS', 'gamma']## ## Let's use a uniform weight (none) first to yield our bckg trials. ##
params['redshift'][6]], [params['flux'][0], params['flux'][6]] print('my sources are at declination(s):') ## There are three modelweights I can use, so lets put them in a dictionary for easy access. ## modelweights = {'flux': flux, 'redshift': list(np.power(redshift, -2))} import data_box ## We'll assign the proper weighting scheme for the search, then use it to calculate and cache the associated bckg trials: ## llhmodel = data_box.init(energy=True, weighting=modelweights['flux']) bckg_trials = PointSourceLLH.background_scrambles(llhmodel, src_ra, src_dec, alpha=0.5, maxiter=30000) print(bckg_trials['TS']) #choose an output dir, and make sure it exists this_dir = os.path.dirname(os.path.abspath(__file__)) out_dir = misc.ensure_dir( '/data/user/brelethford/Output/stacking_sensitivity/SwiftBAT70m/northsouth_one_bckg/{}/background_trials/' .format(sky)) # save the output outfile = out_dir + 'background_box.array' print 'Saving', outfile, '...' cache.save(bckg_trials, outfile)
llhmodel = data_multi.init79(energy=True, mode='box') elif year == '86': llhmodel = data_multi.init86I(energy=True, mode='box') elif year == '59': llhmodel = data_multi.init59(energy=True, mode='box') #If I change the injection range I'll redefine the _e_range variable in ps_injector_stack.py. sensitivity = PointSourceLLH.weighted_sensitivity(llhmodel, src_ra=src_ra, src_dec=src_dec, alpha=.5, beta=.9, inj=inj, trials={ 'n_inj': [], 'TS': [], 'nsources': [], 'gamma': [] }, bckg_trials=bckg_trials, eps=0.02, n_iter=250) print sensitivity #discovery = PointSourceLLH.weighted_sensitivity(llhmodel,src_ra=src_ra,src_dec=src_dec,alpha=2.867e-7,beta=.5,inj=inj,trials={'n_inj':[],'TS':[],'nsources':[],'gamma':[]},bckg_trials=bckg_trials,eps=0.01,n_iter=250) #print discovery #choose an output dir, and make sure it exists this_dir = os.path.dirname(os.path.abspath(__file__))
#bckg_trials_uniform = PointSourceLLH.background_scrambles(llh_uniform,src_ra,src_dec,alpha=0.5, maxiter=batchsize) ## And let's also cache bckg_trials for the other weighting schemes. ## # #def get_flux_bckg_trials(): # return PointSourceLLH.background_scrambles(llh_flux,src_ra,src_dec,alpha=0.5) # #bckg_trials_flux = cache.get (datafolder + 'SwiftBAT70m/pickle/bckg_trials_flux.pickle', get_flux_bckg_trials) # # # bckg_trials_redshift = PointSourceLLH.background_scrambles(llh_redshift, src_ra, src_dec, alpha=0.5, maxiter=batchsize) # ##This one's gonna work a little differently than the single source sensitivity. First off, we need to calculate the background scrambles ahead of time, with the definition provided in psLLH_stack.py. I'll try to do this all within the same function:## ## Background Trials have the following keys:## ##['beta', 'TS_beta', 'beta_err', 'n_inj', 'nsources', 'TS', 'gamma']## ## Let's use a uniform weight (none) first to yield our bckg trials. ## #choose an output dir, and make sure it exists this_dir = os.path.dirname(os.path.abspath(__file__)) out_dir = misc.ensure_dir( '/data/user/brelethford/Output/stacking_sensitivity/SwiftBAT70m/redshift/background_trials/' )
dec_deg = np.arcsin(opts.dec) * 180. / np.pi src_ra = [0.0] src_dec = [np.radians(opts.dec)] batch = opts.batch batchsize = opts.batchsize import datatest_stack_bins llh_uniform = datatest_stack_bins.init(energy=False) ##Okay, so the following is the part where we need to split this up into parallel processing. I think the pertinant variable to use here is n_iter... let's test a background scramble with n_iter=5 to see how fast it goes. Though, maybe it's max_iter? check with previously pickled results to see which number of bckg trials we got. bckg_trials_single = PointSourceLLH.background_scrambles(llh_uniform, src_ra, src_dec, alpha=0.5, maxiter=batchsize) ## Background Trials have the following keys:## ##['beta', 'TS_beta', 'beta_err', 'n_inj', 'nsources', 'TS', 'gamma']## ## Let's use a uniform weight (none) first to yield our bckg trials. ## #choose an output dir, and make sure it exists this_dir = os.path.dirname(os.path.abspath(__file__)) out_dir = misc.ensure_dir( '/data/user/brelethford/Output/all_sky_sensitivity/results/single_stacked/no_energy/dec{0:+010.5}/' .format(dec_deg)) # save the output outfile = out_dir + 'background_dec_{0:+010.5}_batch_{1:03}.array'.format(