示例#1
0
# mc = datatest_stack.MC()
# extra=datatest_stack.extra()
# dpsi=extra["dpsi"]
# print llh

##Okay, so the following is the part where we need to split this up into parallel processing. I think the pertinant variable to use here is n_iter... let's test a background scramble with n_iter=5 to see how fast it goes. Though, maybe it's max_iter? check with previously pickled results to see which number of bckg trials we got.

#bckg_trials_flux = PointSourceLLH.background_scrambles(llh_flux,src_ra,src_dec,alpha=0.5, maxiter=batchsize)

## And let's also cache bckg_trials for the other weighting schemes. ##

#

bckg_trials_flux = PointSourceLLH.background_scrambles(llh_flux,
                                                       src_ra,
                                                       src_dec,
                                                       alpha=0.5,
                                                       maxiter=batchsize)
#
#
#def get_redshift_bckg_trials():
#    return PointSourceLLH.background_scrambles(llh_redshift,src_ra,src_dec,alpha=0.5)
#
#bckg_trials_redshift = cache.get (datafolder + 'SwiftBAT70m/pickle/bckg_trials_redshift.pickle', get_redshift_bckg_trials)
#

##This one's gonna work a little differently than the single source sensitivity. First off, we need to calculate the background scrambles ahead of time, with the definition provided in psLLH_stack.py. I'll try to do this all within the same function:##

## Background Trials have the following keys:##
##['beta', 'TS_beta', 'beta_err', 'n_inj', 'nsources', 'TS', 'gamma']##
## Let's use a uniform weight (none) first to yield our bckg trials. ##
示例#2
0
#bckg_trials_uniform = PointSourceLLH.background_scrambles(llh_uniform,src_ra,src_dec,alpha=0.5, maxiter=batchsize)

## And let's also cache bckg_trials for the other weighting schemes. ##

#
#def get_flux_bckg_trials():
#    return PointSourceLLH.background_scrambles(llh_flux,src_ra,src_dec,alpha=0.5)
#
#bckg_trials_flux = cache.get (datafolder + 'SwiftBAT70m/pickle/bckg_trials_flux.pickle', get_flux_bckg_trials)
#
#
#
bckg_trials_redshift = PointSourceLLH.background_scrambles(llh_redshift,
                                                           src_ra,
                                                           src_dec,
                                                           alpha=0.5,
                                                           maxiter=batchsize)
#

##This one's gonna work a little differently than the single source sensitivity. First off, we need to calculate the background scrambles ahead of time, with the definition provided in psLLH_stack.py. I'll try to do this all within the same function:##

## Background Trials have the following keys:##
##['beta', 'TS_beta', 'beta_err', 'n_inj', 'nsources', 'TS', 'gamma']##
## Let's use a uniform weight (none) first to yield our bckg trials. ##

#choose an output dir, and make sure it exists
this_dir = os.path.dirname(os.path.abspath(__file__))
out_dir = misc.ensure_dir(
    '/data/user/brelethford/Output/stacking_sensitivity/SwiftBAT70m/redshift/background_trials/'
)
示例#3
0
        params['redshift'][6]], [params['flux'][0], params['flux'][6]]

print('my sources are at declination(s):')

## There are three modelweights I can use, so lets put them in a dictionary for easy access. ##

modelweights = {'flux': flux, 'redshift': list(np.power(redshift, -2))}

import data_box

## We'll assign the proper weighting scheme for the search, then use it to calculate and cache the associated bckg trials: ##
llhmodel = data_box.init(energy=True, weighting=modelweights['flux'])

bckg_trials = PointSourceLLH.background_scrambles(llhmodel,
                                                  src_ra,
                                                  src_dec,
                                                  alpha=0.5,
                                                  maxiter=30000)

print(bckg_trials['TS'])

#choose an output dir, and make sure it exists
this_dir = os.path.dirname(os.path.abspath(__file__))
out_dir = misc.ensure_dir(
    '/data/user/brelethford/Output/stacking_sensitivity/SwiftBAT70m/northsouth_one_bckg/{}/background_trials/'
    .format(sky))

# save the output
outfile = out_dir + 'background_box.array'
print 'Saving', outfile, '...'
cache.save(bckg_trials, outfile)
示例#4
0
dec_deg = np.arcsin(opts.dec) * 180. / np.pi

src_ra = [0.0]
src_dec = [np.radians(opts.dec)]

batch = opts.batch
batchsize = opts.batchsize
import datatest_stack_bins

llh_uniform = datatest_stack_bins.init(energy=False)

##Okay, so the following is the part where we need to split this up into parallel processing. I think the pertinant variable to use here is n_iter... let's test a background scramble with n_iter=5 to see how fast it goes. Though, maybe it's max_iter? check with previously pickled results to see which number of bckg trials we got.

bckg_trials_single = PointSourceLLH.background_scrambles(llh_uniform,
                                                         src_ra,
                                                         src_dec,
                                                         alpha=0.5,
                                                         maxiter=batchsize)

## Background Trials have the following keys:##
##['beta', 'TS_beta', 'beta_err', 'n_inj', 'nsources', 'TS', 'gamma']##
## Let's use a uniform weight (none) first to yield our bckg trials. ##

#choose an output dir, and make sure it exists
this_dir = os.path.dirname(os.path.abspath(__file__))
out_dir = misc.ensure_dir(
    '/data/user/brelethford/Output/all_sky_sensitivity/results/single_stacked/no_energy/dec{0:+010.5}/'
    .format(dec_deg))

# save the output
outfile = out_dir + 'background_dec_{0:+010.5}_batch_{1:03}.array'.format(