Ejemplo n.º 1
0
parser.add_option('--years',
                  dest='years',
                  type=int,
                  default=3,
                  metavar='YEARS',
                  help='Number of years of data')

opts, args = parser.parse_args()
batch = opts.batch
batchsize = opts.batchsize
years = opts.years
##For this check we'll use the gamma weighting scheme for llh method and injection. We'll add years of data depending on the variable 'years'.

llh59 = data_multi.init59(energy=True, weighting=flux)
llh79 = data_multi.init79(energy=True, weighting=flux)
llh86 = data_multi.init86I(energy=True, weighting=flux)
samples = [llh59, llh79, llh86]

llhmodel = data_multi.multi_init(samples, energy=True)

bckg_trials = PointSourceLLH.background_scrambles(llhmodel,
                                                  src_ra,
                                                  src_dec,
                                                  alpha=0.5,
                                                  maxiter=batchsize)

#choose an output dir, and make sure it exists
this_dir = os.path.dirname(os.path.abspath(__file__))
out_dir = misc.ensure_dir(
    '/data/user/brelethford/Output/stacking_sensitivity/2LAC/flux_3yr/background_trials/'
)
Ejemplo n.º 2
0
##I need to have this
src_dec = [np.radians(dec_deg)]
src_ra = [0.0]

## Now to import my llh model framework. ##
import data_multi

## Like in the background trials, we have to define which llhmodel to use.
##Now, I'll input my injections scheme (same for cut and uncut) and likelihood model (mc varies with injection range). ##
inj = PointSourceInjector(Gamma, sinDec_bandwidth=.05, src_dec=src_dec, seed=0)
if year == '40':
    llhmodel = data_multi.init40(energy=True, mode='box')
elif year == '79':
    llhmodel = data_multi.init79(energy=True, mode='box')
elif year == '86':
    llhmodel = data_multi.init86I(energy=True, mode='box')
elif year == '59':
    llhmodel = data_multi.init59(energy=True, mode='box')

#If I change the injection range I'll redefine the _e_range variable in ps_injector_stack.py.

sensitivity = PointSourceLLH.weighted_sensitivity(llhmodel,
                                                  src_ra=src_ra,
                                                  src_dec=src_dec,
                                                  alpha=.5,
                                                  beta=.9,
                                                  inj=inj,
                                                  trials={
                                                      'n_inj': [],
                                                      'TS': [],
                                                      'nsources': [],
Ejemplo n.º 3
0
bckg_trials = {
    'n_inj': n_inj,
    'nsources': np.asarray(nsources),
    'TS': np.asarray(TS),
    'beta': beta,
    'beta_err': beta_err,
    'TS_beta': TS_beta,
    'gamma': np.asarray(gamma)
}

## Now to import my llh model framework. Both the llhmodel and injector have to be the old version, but the data is all the stuff used by the new version - totally up to date.##
import data_multi

## Like in the background trials, we have to define which llhmodel to use.
llh79 = data_multi.init79(energy=True)
llh86I = data_multi.init86I(energy=True)
llh59 = data_multi.init59(energy=True)
llh40 = data_multi.init40(energy=True)

#We've loaded in the appropriate llh samples, now let's put them both in the blender (not sure about weighting)
samples = [llh40, llh59, llh79, llh86I]

llhmodel = data_multi.multi_init(samples, energy=True)

##Remember, weighted sensitivity requires src dec in radians.#

##Now, I'll input my injection weighting scheme from the commandline. APPARENTLY, I need these as radians, not sin(dec).##
#Also, I need to clip these just before they hit the pole - otherwise I'm gonna get out of bounds errors.
limit = np.round(np.pi / 2, 7)
#DONT change the rounding on limit - I chose it specifically because it rounds down.
src_dec = np.clip(np.arcsin(np.linspace(-1, 1, 21)), -limit, limit)
Ejemplo n.º 4
0
m=np.count_nonzero(np.asarray(TSs) > (TS_beta))
i = len(TSs)
fraction = float(m)/float(i)
beta_err = (np.sqrt(fraction * (1. - fraction) / float(i)) if 0 < beta < 1 else 1.)

##Now we have all the pieces of the original dictionary. Time to glue bckg_trials back in place, in their proper file type.##
bckg_trials = {'n_inj':n_inj,'nsources':np.asarray(nsources), 'TS':np.asarray(TS), 'beta':beta, 'beta_err':beta_err, 'TS_beta':TS_beta, 'gamma':np.asarray(gamma)}

src_ra, src_dec, weights =  params[0], params[1], params[2]

## Now to import my llh model framework. ##
import data_multi

llh40= data_multi.init40(energy=True, weighting = weights)
llh79 = data_multi.init79(energy=True, weighting = weights)
llh86I= data_multi.init86I(energy=True, weighting = weights)
llh59= data_multi.init59(energy=True, weighting = weights)

#We've loaded in the appropriate llh samples, now let's put them both in the blender (not sure about weighting)
samples = [llh40,llh59,llh79,llh86I]

llhmodel = data_multi.multi_init(samples,energy=True)

inj = PointSourceInjector(Gamma, sinDec_bandwidth=.05, src_dec= src_dec, theo_weight = weights, seed=0) 

sensitivity = MultiPointSourceLLH.weighted_sensitivity(llhmodel,src_ra=src_ra,src_dec=src_dec,alpha=.5,beta=.9,inj=inj,trials={'n_inj':[],'TS':[],'nsources':[],'gamma':[]},bckg_trials=bckg_trials,eps=0.02,n_iter=250)
print sensitivity

#discovery = PointSourceLLH.weighted_sensitivity(llhmodel,src_ra=src_ra,src_dec=src_dec,alpha=2.867e-7,beta=.5,inj=inj,trials={'n_inj':[],'TS':[],'nsources':[],'gamma':[]},bckg_trials=bckg_trials,eps=0.01,n_iter=250)
#print discovery
Ejemplo n.º 5
0
}

src_ra, src_dec, redshift, gamma, flux, lum = params['ra'], params[
    'dec'], params['redshift'], params['gamma'], params['flux'], params['lum']

## Time to define my modelweights in a dictionary. ##

modelweights = {'flux': flux, 'redshift': list(np.power(redshift, -2))}

## Now to import my llh model framework. ##
import data_multi

## Like in the background trials, we have to define which llhmodel to use.
if llhweight == 'uniform':
    llh79 = data_multi.init79(energy=True)
    llh86I = data_multi.init86I(energy=True)
    llh59 = data_multi.init59(energy=True)
    llh40 = data_multi.init40(energy=True)

else:
    llh40 = data_multi.init40(energy=True,
                              weighting=modelweights['{}'.format(llhweight)])
    llh79 = data_multi.init79(energy=True,
                              weighting=modelweights['{}'.format(llhweight)])
    llh86I = data_multi.init86I(energy=True,
                                weighting=modelweights['{}'.format(llhweight)])
    llh59 = data_multi.init59(energy=True,
                              weighting=modelweights['{}'.format(llhweight)])

#We've loaded in the appropriate llh samples, now let's put them both in the blender (not sure about weighting)
if n == 2:
Ejemplo n.º 6
0
def plot_error(year):
    #The following currently devoted to making energy and angular error plots for a year of data.
    # init likelihood class
    if year == 40:
        llh = data_multi.init40(energy=True)
        mc = cache.load(filename_pickle + "IC40/mc.pickle")
        extra = cache.load(filename_pickle + "IC40/dpsi.pickle")
    if year == 59:
        llh = data_multi.init59(energy=True)
        mc = cache.load(filename_pickle + "IC59/mc.pickle")
        extra = cache.load(filename_pickle + "IC59/dpsi.pickle")
    if year == 79:
        llh = data_multi.init79(energy=True)
        mc = cache.load(filename_pickle + "IC79/mc.pickle")
        extra = cache.load(filename_pickle + "IC79/dpsi.pickle")
    elif year == 86:
        llh = data_multi.init86I(energy=True)
        mc = cache.load(filename_pickle + "IC86I/mc.pickle")
        extra = cache.load(filename_pickle + "IC86I/dpsi.pickle")
    dpsi = extra['dpsi']

    print(llh)

    # datatest
    #Currently don't need to remake these plots. but DONT DELETE

    colors = ['b', 'g', 'y', 'r']
    gamma = np.linspace(1., 2.7, 4)

    # fig_energy, (ax1, ax2) = plt.subplots(ncols=2)
    # ax1.hist([llh.exp["logE"]] + [mc["logE"] for i in gamma],
    #          weights=[np.ones(len(llh.exp))]
    #                   + [mc["ow"] * mc["trueE"]**(-g) for g in gamma],
    #          label=["Pseudo-Data"] + [r"$\gamma={0:.1f}$".format(g) for g in gamma], color= ['k'] + [colors[g] for g in range(len(gamma))],
    #          histtype="step", bins=100, log=True, normed=True, cumulative=-1)
    # ax1.legend(loc="best")
    # ax1.set_title("Reconstructed Energy - IC{}".format(str(year)))
    # ax1.set_xlabel("logE")
    # ax1.set_ylabel("Relative Abundance")
    # ax1.hist([llh.exp["logE"]] + [mc["logE"] for i in gamma],
    #          weights=[np.ones(len(llh.exp))]
    #                   + [mc["ow"] * mc["trueE"]**(-g) for g in gamma],
    #          label=["Data"] + [r"$\gamma={0:.1f}$".format(g) for g in gamma], color= ['k'] + [colors[g] for g in range(len(gamma))],
    #          histtype="step", bins=100, log=True, normed=True, cumulative=-1)
    # ax2.set_title("Zoomed In")
    # ax2.set_xlabel("logE")
    # ax2.set_xlim(4,10)
    # ax2.set_ylim(1e-5,1)
    # ax2.hist([llh.exp["logE"]] + [mc["logE"] for i in gamma],
    #          weights=[np.ones(len(llh.exp))]
    #                   + [mc["ow"] * mc["trueE"]**(-g) for g in gamma],
    #          label=["Pseudo-Data"] + [r"$\gamma={0:.1f}$".format(g) for g in gamma], color= ['k'] + [colors[g] for g in range(len(gamma))],
    #          histtype="step", bins=100, log=True, normed=True, cumulative=-1)
    # fig_energy.savefig(filename_plots + 'energy_hists_IC{}.pdf'.format(str(year)))

    fig_angular_error, (ax1, ax2) = plt.subplots(ncols=2, figsize=(10, 5))
    dec = np.arcsin(mc["sinDec"])
    angdist = np.degrees(dpsi)

    ax1.hist([np.log10(np.degrees(mc["sigma"])) for i in gamma],
             label=[r"$\sigma$ - $\gamma={0:.1f}$".format(g) for g in gamma],
             linestyle='dashed',
             weights=[mc["ow"] * mc["trueE"]**(-g) for g in gamma],
             color=[colors[g] for g in range(len(gamma))],
             histtype="step",
             bins=100,
             normed=True)

    ax1.hist(
        [np.log10(angdist) for i in gamma],
        label=[r"$\Delta \psi$ - $\gamma={0:.1f}$".format(g) for g in gamma],
        weights=[mc["ow"] * mc["trueE"]**(-g) for g in gamma],
        linestyle='solid',
        color=[colors[g] for g in range(len(gamma))],
        histtype="step",
        bins=100,
        normed=True)
    ax1.set_title("Reco MC Angular Error Check - IC{}".format(str(year)))
    ax1.set_xlabel(r"log$\sigma_{ang}$ (degrees)")
    ax1.set_ylabel("Relative Abundance")
    ax1.set_ylim(0, 1.5)

    ax2.hist([(np.degrees(mc["sigma"])) for i in gamma],
             label=[r"$\sigma$ - $\gamma={0:.1f}$".format(g) for g in gamma],
             linestyle='dashed',
             weights=[mc["ow"] * mc["trueE"]**(-g) for g in gamma],
             color=[colors[g] for g in range(len(gamma))],
             histtype="step",
             bins=1000,
             normed=True)

    ax2.hist(
        [(angdist) for i in gamma],
        label=[r"$\Delta \psi$ - $\gamma={0:.1f}$".format(g) for g in gamma],
        weights=[mc["ow"] * mc["trueE"]**(-g) for g in gamma],
        linestyle='solid',
        color=[colors[g] for g in range(len(gamma))],
        histtype="step",
        bins=1000,
        normed=True)
    ax2.legend(loc="upper right")
    ax2.set_xlim(0, 5)
    ax2.set_ylim(0, 3.5)
    ax2.set_xlabel(r"$\sigma_{ang}$ (degrees)")
    fig_angular_error.savefig(filename_plots +
                              'angular_error_hists_IC{}.pdf'.format(str(year)))