Пример #1
0
def volume_binned_pylal(f_dist, m_dist, bins=15):
    """ Compute the sensitive volume using a distanced 
    binned efficiency estimate
    
    Parameters
    -----------
    found_distance: numpy.ndarray
        The distances of found injections
    missed_dsistance: numpy.ndarray
        The distances of missed injections
        
    Returns
    --------
    volume: float
        Volume estimate
    volume_error: float
        The standared error in the volume
    """
    def sims_to_bin(sim):
        return (sim, 0)

    from pylal import rate
    from pylal.imr_utils import compute_search_volume_in_bins, compute_search_efficiency_in_bins
    found = f_dist
    total = numpy.concatenate([f_dist, m_dist])
    ndbins = rate.NDBins([
        rate.LinearBins(min(total), max(total), bins),
        rate.LinearBins(0., 1, 1)
    ])
    vol, verr = compute_search_volume_in_bins(found, total, ndbins,
                                              sims_to_bin)
    return vol.array[0], verr.array[0]
Пример #2
0
 def get_2d_mass_bins(self, low, high, bins):
   """
   Given the component mass range low, high of the search it will
   return 2D bins with size bins in each direction
   """
   mass1Bin = rate.LinearBins(low,high,bins)
   mass2Bin = rate.LinearBins(low,high,bins)
   twoDMB=rate.NDBins( (mass1Bin,mass2Bin) )
   return twoDMB
Пример #3
0
def guess_distance_chirp_mass_bins_from_sims(sims, mbins = 11, distbins = 200):
	"""
	Given a list of the injections, guess at the chirp mass and distance
	bins.
	"""
	dist_mchirp_vals = map(sim_to_distance_chirp_mass_bins_function, sims)

	distances = [tup[0] for tup in dist_mchirp_vals]
	mchirps = [tup[1] for tup in dist_mchirp_vals]

	return rate.NDBins([rate.LinearBins(min(distances), max(distances), distbins), rate.LinearBins(min(mchirps), max(mchirps), mbins)])
Пример #4
0
def guess_distance_effective_spin_parameter_bins_from_sims(sims, chibins = 11, distbins = 200):
	"""
	Given a list of the injections, guess at the chi = (m1*s1z +
	m2*s2z)/(m1+m2) and distance bins.
	"""
	dist_chi_vals = map(sim_to_distance_effective_spin_parameter_bins_function, sims)

	distances = [tup[0] for tup in dist_chi_vals]
	chis = [tup[1] for tup in dist_chi_vals]

	return rate.NDBins([rate.LinearBins(min(distances), max(distances), distbins), rate.LinearBins(min(chis), max(chis), chibins)])
    def add_contents(self, contents):
        if self.tisi_rows is None:
            # get a list of time slide dictionaries
            self.tisi_rows = contents.time_slide_table.as_dict().values()

            # find the largest and smallest offsets
            min_offset = min(offset for vector in self.tisi_rows
                             for offset in vector.values())
            max_offset = max(offset for vector in self.tisi_rows
                             for offset in vector.values())

            # a guess at the time slide spacing:  works if the
            # time slides are distributed as a square grid over
            # the plot area.  (max - min)^2 gives the area of
            # the time slide square in square seconds; dividing
            # by the length of the time slide list gives the
            # average area per time slide;  taking the square
            # root of that gives the average distance between
            # adjacent time slides in seconds
            time_slide_spacing = ((max_offset - min_offset)**2 /
                                  len(self.tisi_rows))**0.5

            # use an average of 3 bins per time slide in each
            # direction, but round to an odd integer
            nbins = math.ceil(
                (max_offset - min_offset) / time_slide_spacing * 3)

            # construct the binning
            self.bins = rate.BinnedRatios(
                rate.NDBins((rate.LinearBins(min_offset, max_offset, nbins),
                             rate.LinearBins(min_offset, max_offset, nbins))))

        self.seglists |= contents.seglists

        for offsets in contents.connection.cursor().execute(
                """
SELECT tx.offset, ty.offset FROM
	coinc_event
	JOIN time_slide AS tx ON (
		tx.time_slide_id == coinc_event.time_slide_id
	)
	JOIN time_slide AS ty ON (
		ty.time_slide_id == coinc_event.time_slide_id
	)
WHERE
	coinc_event.coinc_def_id == ?
	AND tx.instrument == ?
	AND ty.instrument == ?
		""", (contents.bb_definer_id, self.x_instrument, self.y_instrument)):
            try:
                self.bins.incnumerator(offsets)
            except IndexError:
                # beyond plot boundaries
                pass
Пример #6
0
def guess_distance_total_mass_bins_from_sims(sims, nbins = 11, distbins = 200):
       """
       Given a list of the injections, guess at the mass1, mass2 and distance
       bins. Floor and ceil will be used to round down to the nearest integers.
       """

       total_lo = numpy.floor(min([sim.mass1 + sim.mass2 for sim in sims]))
       total_hi = numpy.ceil(max([sim.mass1 + sim.mass2 for sim in sims]))
       mindist = numpy.floor(min([sim.distance for sim in sims]))
       maxdist = numpy.ceil(max([sim.distance for sim in sims]))

       return rate.NDBins((rate.LinearBins(mindist, maxdist, distbins), rate.LinearBins(total_lo, total_hi, nbins)))
 def __init__(self, x, y, magnitude, max_magnitude):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot("X", "Y")
     self.fig.set_size_inches(6, 6)
     self.x = x
     self.y = y
     self.magnitude = magnitude
     self.n_foreground = 0
     self.n_background = 0
     self.n_injections = 0
     max_magnitude = math.log10(max_magnitude)
     self.foreground_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
     self.background_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
     self.coinc_injection_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
     self.incomplete_coinc_injection_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
def dt_binning(instrument1, instrument2):
    # FIXME:  hard-coded for directional search
    #dt = 0.02 + inject.light_travel_time(instrument1, instrument2)
    dt = 0.02
    return rate.NDBins(
        (rate.ATanBins(-dt, +dt, 12001), rate.LinearBins(0.0, 2 * math.pi,
                                                         61)))
class test_MovingHistogramFixedN(unittest.TestCase):
    bins = rate.LinearBins(0, 1, 10)
    max_hist_size = 100

    def setUp(self):
        self.hist = mh.MovingHistogramFixedN(self.bins, self.max_hist_size)

    def test_monotonicity_check(self):
        for i in range(num_tests):
            self.setUp()
            a, b = np.random.randint(100, size=2)
            self.hist.update(a, 0.5)
            if b < a:
                self.assertRaises(ValueError, lambda: self.hist.update(b, 0.5))
            else:
                self.hist.update(b, 0.5)

    def test_pdf_normalization(self):
        for i in range(num_tests):
            self.setUp()
            for t, s in enumerate(np.random.random(size=100)):
                self.hist.update(t, s)
            x = np.linspace(0, 1, 100)
            y = np.array([self.hist.get_pdf(a) for a in x])
            integral = integrate.simps(y, x)
            self.assertTrue(abs(integral - 1.0) < 0.01)

    def test_cdf_normalization(self):
        for i in range(num_tests):
            self.setUp()
            for t, s in enumerate(np.random.random(size=100)):
                self.hist.update(t, s)
            self.assertAlmostEqual(self.hist.get_cdf(self.bins.max), 1)

    def test_sf_normalization(self):
        for i in range(num_tests):
            self.setUp()
            for t, s in enumerate(np.random.random(size=100)):
                self.hist.update(t, s)
            self.assertAlmostEqual(self.hist.get_sf(self.bins.min), 1)

    def test_hist_size_limit(self):
        for t, s in enumerate(np.random.random(size=self.max_hist_size + num_tests)):
            self.hist.update(t, s)
            self.assertTrue(len(self.hist) <= self.max_hist_size)

    def test_hist_discards_oldest(self):
        for t, s in enumerate(np.random.random(size=self.max_hist_size + num_tests)):
            self.hist.update(t, s)
            self.assertEqual(self.hist.get_oldest_timestamp(), max(0, t - self.max_hist_size + 1))

    def test_matches_naive_hist(self):
        rand_nums = np.random.random(size=self.max_hist_size + num_tests)
        for t, s in enumerate(rand_nums):
            self.hist.update(t, s)
            naive_hist = np.zeros(len(self.bins), dtype=int)
            for n in rand_nums[max(0, t - self.max_hist_size + 1):t + 1]:
                naive_hist[self.bins[n]] += 1
            self.assertTrue((naive_hist == self.hist.counts).all())
Пример #10
0
	def __init__(self, instrument, interval, width):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot(r"$f_{\mathrm{recovered}} / f_{\mathrm{injected}}$", "Event Number Density")
		self.axes.loglog()
		self.instrument = instrument
		self.found = 0
		# 21 bins per filter width
		bins = int(float(abs(interval)) / width) * 21
		binning = rate.NDBins((rate.LinearBins(interval[0], interval[1], bins),))
		self.offsets = rate.BinnedArray(binning)
		self.coinc_offsets = rate.BinnedArray(binning)
Пример #11
0
	def __init__(self, instrument, interval, width):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot(r"$t_{\mathrm{recovered}} - t_{\mathrm{injected}}$ (s)", "Triggers per Unit Offset")
		self.axes.semilogy()
		self.instrument = instrument
		self.found = 0
		# 21 bins per filter width
		bins = int(float(abs(interval)) / width) * 21
		binning = rate.NDBins((rate.LinearBins(interval[0], interval[1], bins),))
		self.offsets = rate.BinnedArray(binning)
		self.coinc_offsets = rate.BinnedArray(binning)
Пример #12
0
 def __init__(self, ifo, interval, width):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot(
         "Peak Frequency (Hz)",
         "Trigger Rate Spectral Density (triggers / s / Hz)")
     self.ifo = ifo
     self.nevents = 0
     # 21 bins per filter width
     bins = int(float(abs(interval)) / width) * 21
     binning = rate.NDBins((rate.LinearBins(interval[0], interval[1],
                                            bins), ))
     self.rate = rate.BinnedDensity(binning)
Пример #13
0
 def __init__(self, ifo, width, max):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot(
         "Delay (s)", "Count / Delay")
     self.ifo = ifo
     self.nevents = 0
     # 21 bins per filter width
     interval = segments.segment(0, max + 2)
     self.bins = rate.BinnedDensity(
         rate.NDBins(
             (rate.LinearBins(interval[0], interval[1],
                              int(float(abs(interval)) / width) * 21), )))
     self.axes.semilogy()
Пример #14
0
    def check_off_distribution(self, pop_min, pop_max, far = False):

        data_list_by_grb = self.lik_by_grb
        if far:
           data_list_by_grb = self.ifar_by_grb
           
        # prepare the plot
        if far:
            tag = 'log(IFAR)'
            sname = 'far'
        else:
            tag = 'Likelihood'
            sname = 'lik'
        plot = plotutils.SimplePlot(tag, r"cumulative sum",\
                                    r"Cumulative distribution offsource")
        
        # create the hist data in a consistent binning
        nbins = 20
        bins = rate.LinearBins(pop_min, pop_max, nbins)
        px = bins.lower()
        
        for data_list in data_list_by_grb:

            grb_name = data_list.grb_name
            
            tmp_pop = data_list.off_by_trial
            tmp_arr = np.array(tmp_pop, dtype=float)
            off_pop = tmp_arr[~np.isinf(tmp_arr)]            
            off_pop.sort()
            py = range(len(off_pop), 0, -1)
            if far:
                off_pop = np.log10(off_pop)


            ifos = self.grb_data[grb_name]['ifos']
            if ifos=='H1L1':
                linestyle = '-'
            elif ifos == 'H1H2':
                linestyle = '-.'
            else:
                linestyle = ':'
            
            
            # add content to the plot
            plot.add_content(off_pop, py, color = self.colors.next(),\
                             linestyle = linestyle, label=grb_name)
        plot.finalize()
        plot.ax.set_yscale("log")
        return plot
Пример #15
0
 def __init__(self, instruments):
     self.densities = {}
     for pair in intertools.combinations(sorted(instruments), 2):
         # FIXME:  hard-coded for directional search
         #dt = 0.02 + snglcoinc.light_travel_time(*pair)
         dt = 0.02
         self.densities["%s_%s_dt" % pair] = rate.BinnedLnDPF(
             rate.NDBins((rate.ATanBins(-dt, +dt, 12001),
                          rate.LinearBins(0.0, 2 * math.pi, 61))))
         self.densities["%s_%s_dband" % pair] = rate.BinnedLnDPF(
             rate.NDBins((rate.LinearBins(-2.0, +2.0, 12001),
                          rate.LinearBins(0.0, 2 * math.pi, 61))))
         self.densities["%s_%s_ddur" % pair] = rate.BinnedLnDPF(
             rate.NDBins((rate.LinearBins(-2.0, +2.0, 12001),
                          rate.LinearBins(0.0, 2 * math.pi, 61))))
         self.densities["%s_%s_df" % pair] = rate.BinnedLnDPF(
             rate.NDBins((rate.LinearBins(-2.0, +2.0, 12001),
                          rate.LinearBins(0.0, 2 * math.pi, 61))))
         self.densities["%s_%s_dh" % pair] = rate.BinnedLnDPF(
             rate.NDBins((rate.LinearBins(-2.0, +2.0, 12001),
                          rate.LinearBins(0.0, 2 * math.pi, 61))))
Пример #16
0
def get_exttrig_trials(on_segs, off_segs, veto_files):
    """
    Return a tuple of (off-source time bins, off-source veto mask,
    index of trial that is on source).
    The off-source veto mask is a one-dimensional boolean array where True
    means vetoed.
    @param on_segs: On-source segments
    @param off_segs: Off-source segments 
    @param veto_files: List of filenames containing vetoes
    """

    # Check that offsource length is a multiple of the onsource segment length
    trial_len = int(abs(on_segs))
    if abs(off_segs) % trial_len != 0:
        raise ValueError, "The provided file's analysis segment is not "\
            "divisible by the fold time."
    extent = (off_segs | on_segs).extent()

    # generate bins for trials
    num_trials = int(abs(extent)) // trial_len
    trial_bins = rate.LinearBins(extent[0], extent[1], num_trials)

    # incorporate veto file; in trial_veto_mask, True means vetoed.
    trial_veto_mask = numpy.zeros(num_trials, dtype=numpy.bool8)
    for veto_file in veto_files:
        new_veto_segs = segmentsUtils.fromsegwizard(open(veto_file),
                                                    coltype=int)
        if new_veto_segs.intersects(on_segs):
            print >>sys.stderr, "warning: %s overlaps on-source segment" \
                % veto_file
        trial_veto_mask |= rate.bins_spanned(trial_bins,
                                             new_veto_segs,
                                             dtype=numpy.bool8)

    # identify onsource trial index
    onsource_mask = rate.bins_spanned(trial_bins, on_segs, dtype=numpy.bool8)
    if sum(onsource_mask) != 1:
        raise ValueError, "on-source segment spans more or less than one trial"
    onsource_ind = numpy.arange(len(onsource_mask))[onsource_mask]

    return trial_bins, trial_veto_mask, onsource_ind
Пример #17
0
class CoincParamsDistributions(ligolw_burca_tailor.BurcaCoincParamsDistributions):
	binnings = {
        	"H1_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"H2_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"L1_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"H1H2_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"H1L1_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"H2L1_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),))
	}
	filters = {
        	"H1_eff_snr": rate.gaussian_window(21),
        	"H2_eff_snr": rate.gaussian_window(21),
        	"L1_eff_snr": rate.gaussian_window(21),
        	"H1H2_eff_snr": rate.gaussian_window(21),
        	"H1L1_eff_snr": rate.gaussian_window(21),
        	"H2L1_eff_snr": rate.gaussian_window(21)
	}
Пример #18
0
def plotspectrogram(sequencelist, outfile, epoch=0, deltaT=1, f0=0, deltaF=1,\
                    t0=0, ydata=None, **kwargs):
    """
    Plots a list of REAL?VectorSequences on a time-frequency-amplitude colour
    map. The epochand deltaT arguments define the spacing in the x direction
    for the given VectorSequence, and similarly f0 and deltaF in the
    y-direction. If a list of VectorSequences is given, any of these arguments
    can be in list form, one for each sequence.

    ydata can be given as to explicitly define the frequencies at which the
    sequences are sampled.
    """
    # construct list of series
    if not hasattr(sequencelist, "__contains__"):
        sequencelist = [sequencelist]
    numseq = len(sequencelist)

    # format variables
    if isinstance(epoch, numbers.Number) or isinstance(epoch, lal.LIGOTimeGPS):
        epoch = [epoch] * numseq
        epoch = map(float, epoch)
    if not len(epoch) == numseq:
        raise ValueError("Wrong number of epoch arguments given.")
    if isinstance(deltaT, numbers.Number):
        deltaT = [deltaT] * numseq
        deltaT = map(float, deltaT)
    if not len(deltaT) == numseq:
        raise ValueError("Wrong number of deltaT arguments given.")
    if not ydata is None:
        if isinstance(f0, numbers.Number):
            f0 = [f0] * numseq
            f0 = map(float, f0)
        if not len(f0) == numseq:
            raise ValueError("Wrong number of f0 arguments given.")
        if isinstance(deltaF, numbers.Number):
            deltaF = [deltaF] * numseq
            deltaF = map(float, deltaF)
        if not len(deltaF) == numseq:
            raise ValueError("Wrong number of deltaF arguments given.")

    # get limits
    xlim = kwargs.pop("xlim", None)
    ylim = kwargs.pop("ylim", None)
    colorlim = kwargs.pop("colorlim", None)
    if xlim:
        start, end = xlim
    else:
        start = min(epoch)
        end   = max(e + l.length * dt\
                    for e,l,dt in zip(epoch, sequencelist, deltaT))
    if not ydata is None and not ylim:
        ylim = [ydata.min(), ydata.max()]

    # get axis scales
    logx = kwargs.pop("logx", False)
    logy = kwargs.pop("logy", False)
    logcolor = kwargs.pop("logcolor", False)

    # get legend loc
    loc = kwargs.pop("loc", 0)
    alpha = kwargs.pop("alpha", 0.8)

    # get colorbar options
    hidden_colorbar = kwargs.pop("hidden_colorbar", False)

    # get savefig option
    bbox_inches = kwargs.pop("bbox_inches", None)

    #
    # get labels
    #

    xlabel = kwargs.pop("xlabel", None)
    if xlabel:
        unit = 1
    if not xlabel:
        unit, timestr = plotutils.time_axis_unit(end - start)
        if not t0:
            t0 = start
        t0 = lal.LIGOTimeGPS(t0)
        if int(t0.gpsNanoSeconds) == 0:
            xlabel = datetime.datetime(*lal.GPSToUTC(int(t0))[:6])\
                         .strftime("%B %d %Y, %H:%M:%S %ZUTC")
            xlabel = "Time (%s) since %s (%s)" % (timestr, xlabel, int(t0))
        else:
            xlabel = datetime.datetime(*lal.GPSToUTC(t0.gpsSeconds)[:6])\
                          .strftime("%B %d %Y, %H:%M:%S %ZUTC")
            xlabel = "Time (%s) since %s (%s)"\
                     % (timestr,
                        xlabel.replace(" UTC",".%.3s UTC" % t0.gpsNanoSeconds),\
                        t0)
        t0 = float(t0)
    ylabel = kwargs.pop("ylabel", "Frequency (Hz)")
    colorlabel = kwargs.pop("colorlabel", "Amplitude")
    title = kwargs.pop("title", "")
    subtitle = kwargs.pop("subtitle", "")

    #
    # restrict data to the correct limits for plotting
    #

    interpolate = logy and ydata is None

    for i, sequence in enumerate(sequencelist):
        if interpolate:
            # interpolate the data onto a log-scale
            sequence, ydata = loginterpolate(sequence, f0[i], deltaF[i])
        if logy and ylim:
            plotted = (ydata > ylim[0]) & (ydata <= ylim[1])
            newVectorLength = int(plotted.sum())
            newsequence = lal.CreateREAL8VectorSequence(sequence.length,\
                                                            newVectorLength)
            for j in range(sequence.length):
                newsequence.data[j, :] = sequence.data[j, :][plotted]
            del sequence
            sequencelist[i] = newsequence
    if len(sequencelist) and logy and ylim:
        ydata = ydata[plotted]

    #
    # format bins
    #

    xbins = []
    for i in range(numseq):
        xmin = epoch[i]
        xmax = epoch[i] + sequencelist[i].length * deltaT[i]
        xbins.append(rate.LinearBins(float(xmin-t0)/unit, float(xmax-t0)/unit,\
                                     2))

    ybins = []
    for i in range(numseq):
        if ydata is not None:
            ydata = numpy.asarray(ydata)
            ymin = ydata.min()
            ymax = ydata.max()
        else:
            ymin = f0[i]
            ymax = f0[i] + sequencelist[i].vectorLength * deltaF[i]
        if logy:
            if ymin == 0:
                ymin = deltaF[i]
            ybins.append(rate.LogarithmicBins(ymin, ymax, 2))
        else:
            ybins.append(rate.LinearBins(ymin, ymax, 2))

    #
    # plot
    #

    kwargs.setdefault("interpolation", "kaiser")

    plot = plotutils.ImagePlot(xlabel=xlabel, ylabel=ylabel, title=title,\
                               subtitle=subtitle, colorlabel=colorlabel)

    for sequence, x, y in zip(sequencelist, xbins, ybins):
        data = numpy.ma.masked_where(numpy.isnan(sequence.data), sequence.data,\
                                     copy=False)
        plot.add_content(data.T, x, y, **kwargs)

    # finalize
    plot.finalize(colorbar=True, logcolor=logcolor, minorticks=True,\
                  clim=colorlim)
    if hidden_colorbar:
        plotutils.add_colorbar(plot.ax, visible=False)

    # set logscale
    if logx:
        plot.ax.xaxis.set_scale("log")
    if logy:
        plot.ax.yaxis.set_scale("log")
    plot.ax._update_transScale()

    # format axes
    if xlim:
        xlim = (numpy.asarray(xlim).astype(float) - t0) / unit
        plot.ax.set_xlim(xlim)
    if ylim:
        plot.ax.set_ylim(ylim)

    # set grid and ticks
    plot.ax.grid(True, which="both")
    plotutils.set_time_ticks(plot.ax)
    plotutils.set_minor_ticks(plot.ax)

    # save and close
    plot.savefig(outfile, bbox_inches=bbox_inches,\
                 bbox_extra_artists=plot.ax.texts)
    plot.close()
Пример #19
0
	def finish(self, threshold):
		# bin the injections

		self._bin_events()

		# use the same binning for the found injection density as
		# was constructed for the efficiency

		self.found_density = rate.BinnedArray(self.efficiency.denominator.bins)

		# construct the amplitude weighting function

		amplitude_weight = rate.BinnedArray(rate.NDBins((rate.LinearBins(threshold - 100, threshold + 100, 10001),)))

		# gaussian window's width is the number of bins
		# corresponding to 10 units of amplitude, which is computed
		# by dividing 10 by the "volume" of the bin corresponding
		# to threshold.  index is the index of the element in
		# amplitude_weight corresponding to the threshold.

		index, = amplitude_weight.bins[threshold,]
		window = rate.gaussian_window(10.0 / amplitude_weight.bins.volumes()[index])
		window /= 10 * window[(len(window) - 1) / 2]

		# set the window data into the BinnedArray object.  the
		# Gaussian peaks on the middle element of the window, which
		# we want to place at index in the amplitude_weight array.

		lo = index - (len(window) - 1) / 2
		hi = lo + len(window)
		if lo < 0 or hi > len(amplitude_weight.array):
			raise ValueError("amplitude weighting window too large")
		amplitude_weight.array[lo:hi] = window

		# store the recovered injections in the found density bins
		# weighted by amplitude

		for x, y, z in self.recovered_xyz:
			try:
				weight = amplitude_weight[z,]
			except IndexError:
				# beyond the edge of the window
				weight = 0.0
			self.found_density[x, y] += weight

		# the efficiency is only valid up to the highest energy
		# that has been injected.  this creates problems later on
		# so, instead, for each frequency, identify the highest
		# energy that has been measured and copy the values for
		# that bin's numerator and denominator into the bins for
		# all higher energies.  do the same for the counts in the
		# found injection density bins.
		#
		# numpy.indices() returns two arrays array, the first of
		# which has each element set equal to its x index, the
		# second has each element set equal to its y index, we keep
		# the latter.  meanwhile numpy.roll() cyclically permutes
		# the efficiency bins down one along the y (energy) axis.
		# from this, the conditional finds bins where the
		# efficiency is greater than 0.9 but <= 0.9 in the bin
		# immediately above it in energy.  we select the elements
		# from the y index array where the conditional is true, and
		# then use numpy.max() along the y direction to return the
		# highest such y index for each x index, which is a 1-D
		# array.  finally, enumerate() is used to iterate over x
		# index and corresponding y index, and if the y index is
		# not negative (was found) the value from that x-y bin is
		# copied to all higher bins.

		n = self.efficiency.numerator.array
		d = self.efficiency.denominator.array
		f = self.found_density.array
		bady = -1
		for x, y in enumerate(numpy.max(numpy.where((d > 0) & (numpy.roll(d, -1, axis = 1) <= 0), numpy.indices(d.shape)[1], bady), axis = 1)):
			if y != bady:
				n[x, y + 1:] = n[x, y]
				d[x, y + 1:] = d[x, y]
				f[x, y + 1:] = f[x, y]

		# now do the same for the bins at energies below those that
		# have been measured.

		bady = d.shape[1]
		for x, y in enumerate(numpy.min(numpy.where((d > 0) & (numpy.roll(d, 1, axis = 1) <= 0), numpy.indices(d.shape)[1], bady), axis = 1)):
			if y != bady:
				n[x, 0:y] = n[x, y]
				d[x, 0:y] = d[x, y]
				f[x, 0:y] = f[x, y]

		diagnostic_plot(self.efficiency.numerator.array, self.efficiency.denominator.bins, r"Efficiency Numerator (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_numerator_before.png")
		diagnostic_plot(self.efficiency.denominator.array, self.efficiency.denominator.bins, r"Efficiency Denominator (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_denominator_before.png")
		diagnostic_plot(self.found_density.array, self.efficiency.denominator.bins, r"Injections Lost / Unit of Threshold (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_found_density_before.png")

		# smooth the efficiency bins and the found injection
		# density bins using the same 2D window.

		window = rate.gaussian_window(self.window_size_x, self.window_size_y)
		window /= window[tuple((numpy.array(window.shape, dtype = "double") - 1) / 2)]
		rate.filter_binned_ratios(self.efficiency, window)
		rate.filter_array(self.found_density.array, window)

		diagnostic_plot(self.efficiency.numerator.array, self.efficiency.denominator.bins, r"Efficiency Numerator (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_numerator_after.png")
		diagnostic_plot(self.efficiency.denominator.array, self.efficiency.denominator.bins, r"Efficiency Denominator (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_denominator_after.png")
		diagnostic_plot(self.found_density.array, self.efficiency.denominator.bins, r"Injections Lost / Unit of Threshold (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_found_density_after.png")

		# compute the uncertainties in the efficiency and its
		# derivative by assuming these to be the binomial counting
		# fluctuations in the numerators.

		p = self.efficiency.ratio()
		self.defficiency = numpy.sqrt(p * (1 - p) / self.efficiency.denominator.array)
		p = self.found_density.array / self.efficiency.denominator.array
		self.dfound_density = numpy.sqrt(p * (1 - p) / self.efficiency.denominator.array)
Пример #20
0
 def __init__(self, interval, width):
     # 21 bins per filter width
     bins = int(float(abs(interval)) / width) * 21
     self.binning = rate.NDBins((rate.LinearBins(interval[0], interval[1],
                                                 bins), ))
     self.data = {}
Пример #21
0
class StringCoincParamsDistributions(snglcoinc.CoincParamsDistributions):
	# FIXME:  switch to new default when possible
	ligo_lw_name_suffix = u"pylal_ligolw_burca_tailor_coincparamsdistributions"

	instrument_categories = snglcoinc.InstrumentCategories()

	binnings = {
		"H1_snr2_chi2": rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801), rate.ATanLogarithmicBins(.1, 1e4, 801))),
		"H2_snr2_chi2": rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801), rate.ATanLogarithmicBins(.1, 1e4, 801))),
		"L1_snr2_chi2": rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801), rate.ATanLogarithmicBins(.1, 1e4, 801))),
		"V1_snr2_chi2": rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801), rate.ATanLogarithmicBins(.1, 1e4, 801))),
		"H1_H2_dt": dt_binning("H1", "H2"),
		"H1_L1_dt": dt_binning("H1", "L1"),
		"H1_V1_dt": dt_binning("H1", "V1"),
		"H2_L1_dt": dt_binning("H2", "L1"),
		"H2_V1_dt": dt_binning("H2", "V1"),
		"L1_V1_dt": dt_binning("L1", "V1"),
		"H1_H2_dA": rate.NDBins((rate.ATanBins(-0.5, +0.5, 801),)),
		"H1_L1_dA": rate.NDBins((rate.ATanBins(-0.5, +0.5, 801),)),
		"H1_V1_dA": rate.NDBins((rate.ATanBins(-0.5, +0.5, 801),)),
		"H2_L1_dA": rate.NDBins((rate.ATanBins(-0.5, +0.5, 801),)),
		"H2_V1_dA": rate.NDBins((rate.ATanBins(-0.5, +0.5, 801),)),
		"L1_V1_dA": rate.NDBins((rate.ATanBins(-0.5, +0.5, 801),)),
		"H1_H2_df": rate.NDBins((rate.ATanBins(-0.2, +0.2, 501),)),
		"H1_L1_df": rate.NDBins((rate.ATanBins(-0.2, +0.2, 501),)),
		"H1_V1_df": rate.NDBins((rate.ATanBins(-0.2, +0.2, 501),)),
		"H2_L1_df": rate.NDBins((rate.ATanBins(-0.2, +0.2, 501),)),
		"H2_V1_df": rate.NDBins((rate.ATanBins(-0.2, +0.2, 501),)),
		"L1_V1_df": rate.NDBins((rate.ATanBins(-0.2, +0.2, 501),)),
		# instrument group bin centres are at 1, 2, 3, ...;  only
		# non-negative rss timing residual bins will be used but we
		# want a binning that's linear at the origin so instead of
		# inventing a new one we just use atan bins that are
		# symmetric about 0
		"instrumentgroup,rss_timing_residual": rate.NDBins((rate.LinearBins(0.5, instrument_categories.max() + 0.5, instrument_categories.max()), rate.ATanBins(-0.02, +0.02, 1001)))
	}

	filters = {
		"H1_snr2_chi2": rate.gaussian_window(11, 11, sigma = 20),
		"H2_snr2_chi2": rate.gaussian_window(11, 11, sigma = 20),
		"L1_snr2_chi2": rate.gaussian_window(11, 11, sigma = 20),
		"V1_snr2_chi2": rate.gaussian_window(11, 11, sigma = 20),
		"H1_H2_dt": rate.gaussian_window(11, sigma = 20),
		"H1_L1_dt": rate.gaussian_window(11, sigma = 20),
		"H1_V1_dt": rate.gaussian_window(11, sigma = 20),
		"H2_L1_dt": rate.gaussian_window(11, sigma = 20),
		"H2_V1_dt": rate.gaussian_window(11, sigma = 20),
		"L1_V1_dt": rate.gaussian_window(11, sigma = 20),
		"H1_H2_dA": rate.gaussian_window(11, sigma = 20),
		"H1_L1_dA": rate.gaussian_window(11, sigma = 20),
		"H1_V1_dA": rate.gaussian_window(11, sigma = 20),
		"H2_L1_dA": rate.gaussian_window(11, sigma = 20),
		"H2_V1_dA": rate.gaussian_window(11, sigma = 20),
		"L1_V1_dA": rate.gaussian_window(11, sigma = 20),
		"H1_H2_df": rate.gaussian_window(11, sigma = 20),
		"H1_L1_df": rate.gaussian_window(11, sigma = 20),
		"H1_V1_df": rate.gaussian_window(11, sigma = 20),
		"H2_L1_df": rate.gaussian_window(11, sigma = 20),
		"H2_V1_df": rate.gaussian_window(11, sigma = 20),
		"L1_V1_df": rate.gaussian_window(11, sigma = 20),
		# instrument group filter is a no-op, should produce a
		# 1-bin top-hat window.
		"instrumentgroup,rss_timing_residual": rate.gaussian_window(1e-100, 11, sigma = 20)
	}

	@staticmethod
	def coinc_params(events, offsetvector, triangulators):
		#
		# check for coincs that have been vetoed entirely
		#

		if len(events) < 2:
			return None

		#
		# Initialize the parameter dictionary, sort the events by
		# instrument name (the multi-instrument parameters are defined for
		# the instruments in this order and the triangulators are
		# constructed this way too), and retrieve the sorted instrument
		# names
		#

		params = {}
		events = tuple(sorted(events, key = lambda event: event.ifo))
		instruments = tuple(event.ifo for event in events)

		#
		# zero-instrument parameters
		#

		ignored, ignored, ignored, rss_timing_residual = triangulators[instruments](tuple(event.peak + offsetvector[event.ifo] for event in events))
		# FIXME:  rss_timing_residual is forced to 0 to disable this
		# feature.  all the code to compute it properly is still here and
		# given suitable initializations, the distribution data is still
		# two-dimensional and has a suitable filter applied to it, but all
		# events are forced into the RSS_{\Delta t} = 0 bin, in effect
		# removing that dimension from the data.  We can look at this again
		# sometime in the future if we're curious why it didn't help.  Just
		# delete the next line and you're back in business.
		rss_timing_residual = 0.0
		params["instrumentgroup,rss_timing_residual"] = (StringCoincParamsDistributions.instrument_categories.category(instruments), rss_timing_residual)

		#
		# one-instrument parameters
		#

		for event in events:
			prefix = "%s_" % event.ifo

			params["%ssnr2_chi2" % prefix] = (event.snr**2.0, event.chisq / event.chisq_dof)

		#
		# two-instrument parameters.  note that events are sorted by
		# instrument
		#

		for event1, event2 in iterutils.choices(events, 2):
			assert event1.ifo != event2.ifo

			prefix = "%s_%s_" % (event1.ifo, event2.ifo)

			dt = float((event1.peak + offsetvector[event1.ifo]) - (event2.peak + offsetvector[event2.ifo]))
			params["%sdt" % prefix] = (dt,)

			dA = math.log10(abs(event1.amplitude / event2.amplitude))
			params["%sdA" % prefix] = (dA,)

			# f_cut = central_freq + bandwidth/2
			f_cut1 = event1.central_freq + event1.bandwidth / 2
			f_cut2 = event2.central_freq + event2.bandwidth / 2
			df = float((math.log10(f_cut1) - math.log10(f_cut2)) / (math.log10(f_cut1) + math.log10(f_cut2)))
			params["%sdf" % prefix] = (df,)

		#
		# done
		#

		return params

	def add_slidelessbackground(self, database, experiments, param_func_args = ()):
		# FIXME:  this needs to be taught how to not slide H1 and
		# H2 with respect to each other

		# segment lists
		seglists = database.seglists - database.vetoseglists

		# construct the event list dictionary.  remove vetoed
		# events from the lists and save event peak times so they
		# can be restored later
		eventlists = {}
		orig_peak_times = {}
		for event in database.sngl_burst_table:
			if event.peak in seglists[event.ifo]:
				try:
					eventlists[event.ifo].append(event)
				except KeyError:
					eventlists[event.ifo] = [event]
				orig_peak_times[event] = event.peak

		# parse the --thresholds H1,L1=... command-line options from burca
		delta_t = [float(threshold.split("=")[-1]) for threshold in ligolw_process.get_process_params(database.xmldoc, "ligolw_burca", "--thresholds")]
		if not all(delta_t[0] == threshold for threshold in delta_t[1:]):
			raise ValueError("\Delta t is not unique in ligolw_burca arguments")
		delta_t = delta_t.pop()

		# construct the coinc generator.  note that H1+H2-only
		# coincs are forbidden, which is affected here by removing
		# that instrument combination from the object's internal
		# .rates dictionary
		coinc_generator = snglcoinc.CoincSynthesizer(eventlists, seglists, delta_t)
		if frozenset(("H1", "H2")) in coinc_generator.rates:
			del coinc_generator.rates[frozenset(("H1", "H2"))]

		# build a dictionary of time-of-arrival generators
		toa_generator = dict((instruments, coinc_generator.plausible_toas(instruments)) for instruments in coinc_generator.rates.keys())

		# how many coincs?  the expected number is obtained by
		# multiplying the total zero-lag time for which at least
		# two instruments were on by the sum of the rates for all
		# coincs to get the mean number of coincs per zero-lag
		# observation time, and multiplying that by the number of
		# experiments the background should simulate to get the
		# mean number of background events to simulate.  the actual
		# number simulated is a Poisson-distributed RV with that
		# mean.
		n_coincs, = scipy.stats.poisson.rvs(float(abs(segmentsUtils.vote(seglists.values(), 2))) * sum(coinc_generator.rates.values()) * experiments)

		# generate synthetic background coincs
		zero_lag_offset_vector = offsetvector((instrument, 0.0) for instrument in seglists)
		for n, events in enumerate(coinc_generator.coincs(lsctables.SnglBurst.get_peak)):
			# n = 1 on 2nd iteration, so placing this condition
			# where it is in the loop causes the correct number
			# of events to be added to the background
			if n >= n_coincs:
				break
			# assign fake peak times
			toas = toa_generator[frozenset(event.ifo for event in events)].next()
			for event in events:
				event.peak = toas[event.ifo]
			# compute coincidence parameters
			self.add_background(self.coinc_params(events, zero_lag_offset_vector, *param_func_args))

		# restore original peak times
		for event, peak_time in orig_peak_times.iteritems():
			event.peak = peak_time

options, filenames = parse_command_line()


#
# =============================================================================
#
#   Custom SnglBurstTable append() method to put triggers directly into bins
#
# =============================================================================
#


nbins = int(float(abs(options.read_segment)) / options.window) * bins_per_filterwidth
binning = rate.NDBins((rate.LinearBins(options.read_segment[0], options.read_segment[1], nbins),))
trigger_rate = rate.BinnedArray(binning)

num_triggers = 0


def snglburst_append(self, row, verbose = options.verbose):
	global num_triggers, rate
	t = row.peak
	if t in options.read_segment:
		trigger_rate[t,] += 1.0
	num_triggers += 1
	if verbose and not (num_triggers % 125):
		print >>sys.stderr, "sngl_burst rows read:  %d\r" % num_triggers,

Пример #23
0
class BurcaCoincParamsDistributions(snglcoinc.CoincParamsDistributions):
    binnings = {
        "H1_H2_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_L1_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H2_L1_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_V1_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "L1_V1_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_H2_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_L1_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H2_L1_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_V1_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "L1_V1_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_H2_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_L1_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H2_L1_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_V1_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "L1_V1_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_H2_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_L1_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H2_L1_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_V1_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "L1_V1_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_H2_dt":
        dt_binning("H1", "H2"),
        "H1_L1_dt":
        dt_binning("H1", "L1"),
        "H2_L1_dt":
        dt_binning("H2", "L1"),
        "H1_V1_dt":
        dt_binning("H1", "V1"),
        "L1_V1_dt":
        dt_binning("L1", "V1")
    }

    filters = {
        "H1_H2_dband": rate.gaussian_window(11, 5),
        "H1_L1_dband": rate.gaussian_window(11, 5),
        "H2_L1_dband": rate.gaussian_window(11, 5),
        "H1_V1_dband": rate.gaussian_window(11, 5),
        "L1_V1_dband": rate.gaussian_window(11, 5),
        "H1_H2_ddur": rate.gaussian_window(11, 5),
        "H1_L1_ddur": rate.gaussian_window(11, 5),
        "H2_L1_ddur": rate.gaussian_window(11, 5),
        "H1_V1_ddur": rate.gaussian_window(11, 5),
        "L1_V1_ddur": rate.gaussian_window(11, 5),
        "H1_H2_df": rate.gaussian_window(11, 5),
        "H1_L1_df": rate.gaussian_window(11, 5),
        "H2_L1_df": rate.gaussian_window(11, 5),
        "H1_V1_df": rate.gaussian_window(11, 5),
        "L1_V1_df": rate.gaussian_window(11, 5),
        "H1_H2_dh": rate.gaussian_window(11, 5),
        "H1_V1_df": rate.gaussian_window(11, 5),
        "L1_V1_df": rate.gaussian_window(11, 5),
        "H1_L1_dh": rate.gaussian_window(11, 5),
        "H2_L1_dh": rate.gaussian_window(11, 5),
        "H1_H2_dt": rate.gaussian_window(11, 5),
        "H1_L1_dt": rate.gaussian_window(11, 5),
        "H2_L1_dt": rate.gaussian_window(11, 5),
        "H1_V1_dh": rate.gaussian_window(11, 5),
        "L2_V1_dh": rate.gaussian_window(11, 5)
    }

    @classmethod
    def from_filenames(cls, filenames, name, verbose=False):
        """
		Convenience function to deserialize
		CoincParamsDistributions objects from a collection of XML
		files and return their sum.  The return value is a
		two-element tuple.  The first element is the deserialized
		and summed CoincParamsDistributions object, the second is a
		segmentlistdict indicating the interval of time spanned by
		the out segments in the search_summary rows matching the
		process IDs that were attached to the
		CoincParamsDistributions objects in the XML.
		"""
        self = None
        for n, filename in enumerate(filenames, 1):
            if verbose:
                print >> sys.stderr, "%d/%d:" % (n, len(filenames)),
            xmldoc = ligolw_utils.load_filename(
                filename, verbose=verbose, contenthandler=cls.contenthandler)
            if self is None:
                self = cls.from_xml(xmldoc, name)
                seglists = lsctables.SearchSummaryTable.get_table(
                    xmldoc).get_out_segmentlistdict(set([self.process_id
                                                         ])).coalesce()
            else:
                other = cls.from_xml(xmldoc, name)
                self += other
                seglists |= lsctables.SearchSummaryTable.get_table(
                    xmldoc).get_out_segmentlistdict(set([other.process_id
                                                         ])).coalesce()
                del other
            xmldoc.unlink()
        return self, seglists
Пример #24
0
    def create_hist_plot(self, n_bin, range = None):

        def create_area(x, dx, y, dy):
            px = [x-dx/2, x+dx/2, x+dx/2, x-dx/2, x-dx/2]
            py = [y-dy/2, y-dy/2, y+dy/2, y+dy/2, y-dy/2]
            return px, py
        
        def draw_error_boxes(plot, x, dx, y, dy, col):
            
            bx, by = create_area(x, dx, y, dy )
            plot.ax.fill(bx, by, ec='w', fc = col, alpha = 0.2)
            
            bx, by = create_area(x, dx, y, 2*dy )
            plot.ax.fill(bx, by, ec='w', fc = col, alpha = 0.2)
            
            bx, by = create_area(x, dx, y, 3*dy )
            plot.ax.fill(bx, by, ec='k', fc = col, alpha = 0.2)

            return plot
        

        # set the surroundings of the parameter space
        if range is None:
            data_on = np.asarray(self.on_list)                
            inf_ind = np.isinf(data_on)
            val_min = data_on[~inf_ind].min()
            val_max = data_on.max()
        else:
            val_min = range[0]
            val_max = range[1]

        # create the hists
        hist_on = np.zeros(n_bin)
        hist_off = np.zeros(n_bin)   
        
        # create the rate bins
        lik_bins = rate.LinearBins(val_min, val_max, n_bin) 

        # and fill the histograms
        for x in self.off_list:
            if x>=val_min and x<=val_max:            
                hist_off[lik_bins[x]] += 1
            
        for x in self.on_list:
            if x>=val_min and x<=val_max:            
                hist_on[lik_bins[x]] += 1

        # get the centres
        px = lik_bins.centres()
        dx = px[1]-px[0]

        # norm the histograms
        norm = self.n_grb/self.n_off
        hist_on_norm = hist_on
        hist_off_norm = norm*hist_off

        # create the plot
        plot = plotutils.SimplePlot(r"Likelihood", r"counts",\
                                    r"Histogramm of on/offsource with %d bins"%\
                                    (n_bin))

        # the norm of the normed histograms: 1.0
        plot.add_content(px, hist_on, color = 'r', marker = 'o',\
                 markersize = 10.0, label = 'on-source')
        plot.add_content(px, hist_off_norm, color = 'b',marker = 's', \
                 markersize = 10, label = 'off-source')

        # add the error shading
        for x,n in zip(px, hist_on):
            
            # calculate the error (just the statistic error)
            dn = np.sqrt(n)
            plot = draw_error_boxes(plot, x, dx, n, dn, 'r')

        plot.finalize()           
        plot.ax.axis([val_min, val_max, 0.0, 20.0])
        #return plot
            
        # insert the lines of the data itself
        for x in self.on_list:
            if x>=val_min and x<=val_max: 
                plot.ax.plot([x,x],[0.0, 1.0],'k')            
        plot.ax.axis([val_min, val_max, 0.0, 20.0])
        
        return plot