Ejemplo n.º 1
0
def compute_search_efficiency_in_bins(found, total, ndbins, sim_to_bins_function = lambda sim: (sim.distance,)):
	"""
	This program creates the search efficiency in the provided ndbins.  The
	first dimension of ndbins must be the distance.  You also must provide a
	function that maps a sim inspiral row to the correct tuple to index the ndbins.
	"""

	input = rate.BinnedRatios(ndbins)

	# increment the numerator with the missed injections
	[input.incnumerator(sim_to_bins_function(sim)) for sim in found]

	# increment the denominator with the total injections
	[input.incdenominator(sim_to_bins_function(sim)) for sim in total]

	# regularize by setting denoms to 1 to avoid nans
	input.regularize()

	# pull out the efficiency array, it is the ratio
	eff = rate.BinnedArray(rate.NDBins(ndbins), array = input.ratio())

        # compute binomial uncertainties in each bin
        err_arr = numpy.sqrt(eff.array * (1-eff.array)/input.denominator.array)
	err = rate.BinnedArray(rate.NDBins(ndbins), array = err_arr)

	return eff, err
 def __init__(self, x, y, magnitude, max_magnitude):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot("X", "Y")
     self.fig.set_size_inches(6, 6)
     self.x = x
     self.y = y
     self.magnitude = magnitude
     self.n_foreground = 0
     self.n_background = 0
     self.n_injections = 0
     max_magnitude = math.log10(max_magnitude)
     self.foreground_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
     self.background_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
     self.coinc_injection_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
     self.incomplete_coinc_injection_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
 def __init__(self, x_instrument, y_instrument, magnitude, desc,
              min_magnitude, max_magnitude):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot(
         "%s %s" % (x_instrument, desc), "%s %s" % (y_instrument, desc))
     self.fig.set_size_inches(6, 6)
     self.axes.loglog()
     self.x_instrument = x_instrument
     self.y_instrument = y_instrument
     self.magnitude = magnitude
     self.foreground_x = []
     self.foreground_y = []
     self.n_foreground = 0
     self.n_background = 0
     self.n_injections = 0
     self.foreground_bins = rate.BinnedArray(
         rate.NDBins(
             (rate.LogarithmicBins(min_magnitude, max_magnitude, 1024),
              rate.LogarithmicBins(min_magnitude, max_magnitude, 1024))))
     self.background_bins = rate.BinnedArray(
         rate.NDBins(
             (rate.LogarithmicBins(min_magnitude, max_magnitude, 1024),
              rate.LogarithmicBins(min_magnitude, max_magnitude, 1024))))
     self.coinc_injection_bins = rate.BinnedArray(
         rate.NDBins(
             (rate.LogarithmicBins(min_magnitude, max_magnitude, 1024),
              rate.LogarithmicBins(min_magnitude, max_magnitude, 1024))))
     self.incomplete_coinc_injection_bins = rate.BinnedArray(
         rate.NDBins(
             (rate.LogarithmicBins(min_magnitude, max_magnitude, 1024),
              rate.LogarithmicBins(min_magnitude, max_magnitude, 1024))))
Ejemplo n.º 4
0
	def __init__(self, instruments):
		self.densities = {}
		for instrument in instruments:
			self.densities["%s_snr2_chi2" % instrument] = rate.BinnedLnPDF(rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801), rate.ATanLogarithmicBins(.1, 1e4, 801))))
		for pair in itertools.combinations(sorted(instruments), 2):
			dt = 0.005 + snglcoinc.light_travel_time(instrument1, instrument2)	# seconds
			self.densities["%s_%s_dt" % pair] = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-dt, +dt, 801),)))
			self.densities["%s_%s_dA" % pair] = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-0.5, +0.5, 801),)))
			self.densities["%s_%s_df" % pair] = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-0.2, +0.2, 501),)))
		# only non-negative rss timing residual bins will be used
		# but we want a binning that's linear at the origin so
		# instead of inventing a new one we just use atan bins that
		# are symmetric about 0
		self.densities["instrumentgroup,rss_timing_residual"] = rate.BinnedLnPDF(rate.NDBins((snglcoinc.InstrumentBins(names = instruments), rate.ATanBins(-0.02, +0.02, 1001))))
Ejemplo n.º 5
0
def dt_binning(instrument1, instrument2):
    # FIXME:  hard-coded for directional search
    #dt = 0.02 + inject.light_travel_time(instrument1, instrument2)
    dt = 0.02
    return rate.NDBins(
        (rate.ATanBins(-dt, +dt, 12001), rate.LinearBins(0.0, 2 * math.pi,
                                                         61)))
Ejemplo n.º 6
0
	def _bin_events(self, binning = None):
		# called internally by finish()
		if binning is None:
			minx, maxx = min(self.injected_x), max(self.injected_x)
			miny, maxy = min(self.injected_y), max(self.injected_y)
			binning = rate.NDBins((rate.LogarithmicBins(minx, maxx, 256), rate.LogarithmicBins(miny, maxy, 256)))

		self.efficiency = rate.BinnedRatios(binning)

		for xy in zip(self.injected_x, self.injected_y):
			self.efficiency.incdenominator(xy)
		for xy in zip(self.found_x, self.found_y):
			self.efficiency.incnumerator(xy)

		# 1 / error^2 is the number of injections that need to be
		# within the window in order for the fractional uncertainty
		# in that number to be = error.  multiplying by
		# bins_per_inj tells us how many bins the window needs to
		# cover, and taking the square root translates that into
		# the window's length on a side in bins.  because the
		# contours tend to run parallel to the x axis, the window
		# is dilated in that direction to improve resolution.

		bins_per_inj = self.efficiency.used() / float(len(self.injected_x))
		self.window_size_x = self.window_size_y = math.sqrt(bins_per_inj / self.error**2)
		self.window_size_x *= math.sqrt(2)
		self.window_size_y /= math.sqrt(2)
		if self.window_size_x > 100 or self.window_size_y > 100:
			# program will take too long to run
			raise ValueError("smoothing filter too large (not enough injections)")

		print >>sys.stderr, "The smoothing window for %s is %g x %g bins" % ("+".join(self.instruments), self.window_size_x, self.window_size_y),
		print >>sys.stderr, "which is %g%% x %g%% of the binning" % (100.0 * self.window_size_x / binning[0].n, 100.0 * self.window_size_y / binning[1].n)
Ejemplo n.º 7
0
def volume_binned_pylal(f_dist, m_dist, bins=15):
    """ Compute the sensitive volume using a distanced 
    binned efficiency estimate
    
    Parameters
    -----------
    found_distance: numpy.ndarray
        The distances of found injections
    missed_dsistance: numpy.ndarray
        The distances of missed injections
        
    Returns
    --------
    volume: float
        Volume estimate
    volume_error: float
        The standared error in the volume
    """
    def sims_to_bin(sim):
        return (sim, 0)

    from pylal import rate
    from pylal.imr_utils import compute_search_volume_in_bins, compute_search_efficiency_in_bins
    found = f_dist
    total = numpy.concatenate([f_dist, m_dist])
    ndbins = rate.NDBins([
        rate.LinearBins(min(total), max(total), bins),
        rate.LinearBins(0., 1, 1)
    ])
    vol, verr = compute_search_volume_in_bins(found, total, ndbins,
                                              sims_to_bin)
    return vol.array[0], verr.array[0]
Ejemplo n.º 8
0
 def get_2d_mass_bins(self, low, high, bins):
   """
   Given the component mass range low, high of the search it will
   return 2D bins with size bins in each direction
   """
   mass1Bin = rate.LinearBins(low,high,bins)
   mass2Bin = rate.LinearBins(low,high,bins)
   twoDMB=rate.NDBins( (mass1Bin,mass2Bin) )
   return twoDMB
Ejemplo n.º 9
0
def make_binning(plots):
	plots = [plot for instrument in plots.keys() for plot in plots[instrument] if isinstance(plot, SimBurstUtils.Efficiency_hrss_vs_freq)]
	if not plots:
		return None
	minx = min([min(plot.injected_x) for plot in plots])
	maxx = max([max(plot.injected_x) for plot in plots])
	miny = min([min(plot.injected_y) for plot in plots])
	maxy = max([max(plot.injected_y) for plot in plots])
	return rate.NDBins((rate.LogarithmicBins(minx, maxx, 512), rate.LogarithmicBins(miny, maxy, 512)))
Ejemplo n.º 10
0
	def __init__(self, instrument, interval, width):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot(r"$f_{\mathrm{recovered}} / f_{\mathrm{injected}}$", "Event Number Density")
		self.axes.loglog()
		self.instrument = instrument
		self.found = 0
		# 21 bins per filter width
		bins = int(float(abs(interval)) / width) * 21
		binning = rate.NDBins((rate.LinearBins(interval[0], interval[1], bins),))
		self.offsets = rate.BinnedArray(binning)
		self.coinc_offsets = rate.BinnedArray(binning)
Ejemplo n.º 11
0
	def __init__(self, instrument, interval, width):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot(r"$t_{\mathrm{recovered}} - t_{\mathrm{injected}}$ (s)", "Triggers per Unit Offset")
		self.axes.semilogy()
		self.instrument = instrument
		self.found = 0
		# 21 bins per filter width
		bins = int(float(abs(interval)) / width) * 21
		binning = rate.NDBins((rate.LinearBins(interval[0], interval[1], bins),))
		self.offsets = rate.BinnedArray(binning)
		self.coinc_offsets = rate.BinnedArray(binning)
Ejemplo n.º 12
0
def guess_nd_bins(sims, bin_dict={"distance": (200, rate.LinearBins)}):
    """
	Given a dictionary of bin counts and bin objects keyed by sim
	attribute, come up with a sensible NDBins scheme
	"""
    return rate.NDBins([
        bintup[1](min([getattr(sim, attr) for sim in sims]),
                  max([getattr(sim, attr)
                       for sim in sims]) + sys.float_info.min, bintup[0])
        for attr, bintup in bin_dict.items()
    ])
Ejemplo n.º 13
0
def guess_distance_chirp_mass_bins_from_sims(sims, mbins = 11, distbins = 200):
	"""
	Given a list of the injections, guess at the chirp mass and distance
	bins.
	"""
	dist_mchirp_vals = map(sim_to_distance_chirp_mass_bins_function, sims)

	distances = [tup[0] for tup in dist_mchirp_vals]
	mchirps = [tup[1] for tup in dist_mchirp_vals]

	return rate.NDBins([rate.LinearBins(min(distances), max(distances), distbins), rate.LinearBins(min(mchirps), max(mchirps), mbins)])
Ejemplo n.º 14
0
def guess_distance_effective_spin_parameter_bins_from_sims(sims, chibins = 11, distbins = 200):
	"""
	Given a list of the injections, guess at the chi = (m1*s1z +
	m2*s2z)/(m1+m2) and distance bins.
	"""
	dist_chi_vals = map(sim_to_distance_effective_spin_parameter_bins_function, sims)

	distances = [tup[0] for tup in dist_chi_vals]
	chis = [tup[1] for tup in dist_chi_vals]

	return rate.NDBins([rate.LinearBins(min(distances), max(distances), distbins), rate.LinearBins(min(chis), max(chis), chibins)])
    def add_contents(self, contents):
        if self.tisi_rows is None:
            # get a list of time slide dictionaries
            self.tisi_rows = contents.time_slide_table.as_dict().values()

            # find the largest and smallest offsets
            min_offset = min(offset for vector in self.tisi_rows
                             for offset in vector.values())
            max_offset = max(offset for vector in self.tisi_rows
                             for offset in vector.values())

            # a guess at the time slide spacing:  works if the
            # time slides are distributed as a square grid over
            # the plot area.  (max - min)^2 gives the area of
            # the time slide square in square seconds; dividing
            # by the length of the time slide list gives the
            # average area per time slide;  taking the square
            # root of that gives the average distance between
            # adjacent time slides in seconds
            time_slide_spacing = ((max_offset - min_offset)**2 /
                                  len(self.tisi_rows))**0.5

            # use an average of 3 bins per time slide in each
            # direction, but round to an odd integer
            nbins = math.ceil(
                (max_offset - min_offset) / time_slide_spacing * 3)

            # construct the binning
            self.bins = rate.BinnedRatios(
                rate.NDBins((rate.LinearBins(min_offset, max_offset, nbins),
                             rate.LinearBins(min_offset, max_offset, nbins))))

        self.seglists |= contents.seglists

        for offsets in contents.connection.cursor().execute(
                """
SELECT tx.offset, ty.offset FROM
	coinc_event
	JOIN time_slide AS tx ON (
		tx.time_slide_id == coinc_event.time_slide_id
	)
	JOIN time_slide AS ty ON (
		ty.time_slide_id == coinc_event.time_slide_id
	)
WHERE
	coinc_event.coinc_def_id == ?
	AND tx.instrument == ?
	AND ty.instrument == ?
		""", (contents.bb_definer_id, self.x_instrument, self.y_instrument)):
            try:
                self.bins.incnumerator(offsets)
            except IndexError:
                # beyond plot boundaries
                pass
Ejemplo n.º 16
0
 def __init__(self, ifo, interval, width):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot(
         "Peak Frequency (Hz)",
         "Trigger Rate Spectral Density (triggers / s / Hz)")
     self.ifo = ifo
     self.nevents = 0
     # 21 bins per filter width
     bins = int(float(abs(interval)) / width) * 21
     binning = rate.NDBins((rate.LinearBins(interval[0], interval[1],
                                            bins), ))
     self.rate = rate.BinnedDensity(binning)
Ejemplo n.º 17
0
 def __init__(self, instruments):
     self.densities = {}
     for pair in intertools.combinations(sorted(instruments), 2):
         # FIXME:  hard-coded for directional search
         #dt = 0.02 + snglcoinc.light_travel_time(*pair)
         dt = 0.02
         self.densities["%s_%s_dt" % pair] = rate.BinnedLnDPF(
             rate.NDBins((rate.ATanBins(-dt, +dt, 12001),
                          rate.LinearBins(0.0, 2 * math.pi, 61))))
         self.densities["%s_%s_dband" % pair] = rate.BinnedLnDPF(
             rate.NDBins((rate.LinearBins(-2.0, +2.0, 12001),
                          rate.LinearBins(0.0, 2 * math.pi, 61))))
         self.densities["%s_%s_ddur" % pair] = rate.BinnedLnDPF(
             rate.NDBins((rate.LinearBins(-2.0, +2.0, 12001),
                          rate.LinearBins(0.0, 2 * math.pi, 61))))
         self.densities["%s_%s_df" % pair] = rate.BinnedLnDPF(
             rate.NDBins((rate.LinearBins(-2.0, +2.0, 12001),
                          rate.LinearBins(0.0, 2 * math.pi, 61))))
         self.densities["%s_%s_dh" % pair] = rate.BinnedLnDPF(
             rate.NDBins((rate.LinearBins(-2.0, +2.0, 12001),
                          rate.LinearBins(0.0, 2 * math.pi, 61))))
Ejemplo n.º 18
0
def guess_distance_total_mass_bins_from_sims(sims, nbins = 11, distbins = 200):
       """
       Given a list of the injections, guess at the mass1, mass2 and distance
       bins. Floor and ceil will be used to round down to the nearest integers.
       """

       total_lo = numpy.floor(min([sim.mass1 + sim.mass2 for sim in sims]))
       total_hi = numpy.ceil(max([sim.mass1 + sim.mass2 for sim in sims]))
       mindist = numpy.floor(min([sim.distance for sim in sims]))
       maxdist = numpy.ceil(max([sim.distance for sim in sims]))

       return rate.NDBins((rate.LinearBins(mindist, maxdist, distbins), rate.LinearBins(total_lo, total_hi, nbins)))
Ejemplo n.º 19
0
 def __init__(self, ifo, width, max):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot(
         "Delay (s)", "Count / Delay")
     self.ifo = ifo
     self.nevents = 0
     # 21 bins per filter width
     interval = segments.segment(0, max + 2)
     self.bins = rate.BinnedDensity(
         rate.NDBins(
             (rate.LinearBins(interval[0], interval[1],
                              int(float(abs(interval)) / width) * 21), )))
     self.axes.semilogy()
Ejemplo n.º 20
0
def compute_search_efficiency_in_bins(found,
                                      total,
                                      ndbins,
                                      sim_to_bins_function=lambda sim:
                                      (sim.distance, )):
    """
	This program creates the search efficiency in the provided ndbins.  The
	first dimension of ndbins must be the distance.  You also must provide a
	function that maps a sim inspiral row to the correct tuple to index the ndbins.
	"""

    input = rate.BinnedRatios(ndbins)

    # increment the numerator with the missed injections
    [input.incnumerator(sim_to_bins_function(sim)) for sim in found]

    # increment the denominator with the total injections
    [input.incdenominator(sim_to_bins_function(sim)) for sim in total]

    # regularize by setting empty bins to zero efficiency
    input.denominator.array[input.numerator.array < 1] = 1e35

    # pull out the efficiency array, it is the ratio
    eff = rate.BinnedArray(rate.NDBins(ndbins), array=input.ratio())

    # compute binomial uncertainties in each bin
    k = input.numerator.array
    N = input.denominator.array
    eff_lo_arr = (N * (2 * k + 1) - numpy.sqrt(4 * N * k *
                                               (N - k) + N**2)) / (2 * N *
                                                                   (N + 1))
    eff_hi_arr = (N * (2 * k + 1) + numpy.sqrt(4 * N * k *
                                               (N - k) + N**2)) / (2 * N *
                                                                   (N + 1))

    eff_lo = rate.BinnedArray(rate.NDBins(ndbins), array=eff_lo_arr)
    eff_hi = rate.BinnedArray(rate.NDBins(ndbins), array=eff_hi_arr)

    return eff_lo, eff, eff_hi
Ejemplo n.º 21
0
def compute_search_volume_in_bins(found, total, ndbins, sim_to_bins_function):
	"""
	This program creates the search volume in the provided ndbins.  The
	first dimension of ndbins must be the distance over which to integrate.  You
	also must provide a function that maps a sim inspiral row to the correct tuple
	to index the ndbins.
	"""

	eff, err = compute_search_efficiency_in_bins(found, total, ndbins, sim_to_bins_function)
	dx = ndbins[0].upper() - ndbins[0].lower()
	r = ndbins[0].centres()

	# we have one less dimension on the output
	vol = rate.BinnedArray(rate.NDBins(ndbins[1:]))
	errors = rate.BinnedArray(rate.NDBins(ndbins[1:]))

	# integrate efficiency to obtain volume
	vol.array = numpy.trapz(eff.array.T * 4. * numpy.pi * r**2, r, dx)

	# propagate errors in eff to errors in V
        errors.array = numpy.sqrt(( (4*numpy.pi *r**2 *err.array.T *dx)**2 ).sum(-1))

	return vol, errors
Ejemplo n.º 22
0
def compute_search_efficiency_in_bins(found, total, ndbins, sim_to_bins_function = lambda sim: (sim.distance,)):
	"""
	This program creates the search efficiency in the provided ndbins.  The
	first dimension of ndbins must be the distance.  You also must provide a
	function that maps a sim inspiral row to the correct tuple to index the ndbins.
	"""

	num = rate.BinnedArray(ndbins)
	den = rate.BinnedArray(ndbins)

	# increment the numerator with the found injections
	for sim in found:
		num[sim_to_bins_function(sim)] += 1

	# increment the denominator with the total injections
	for sim in total:
		den[sim_to_bins_function(sim)] += 1

	# sanity check
	assert (num.array <= den.array).all(), "some bins have more found injections than were made"

	# regularize by setting empty bins to zero efficiency
	den.array[numpy.logical_and(num.array == 0, den.array == 0)] = 1

	# pull out the efficiency array, it is the ratio
	eff = rate.BinnedArray(rate.NDBins(ndbins), array = num.array / den.array)

	# compute binomial uncertainties in each bin
	k = num.array
	N = den.array
	eff_lo_arr = ( N*(2*k + 1) - numpy.sqrt(4*N*k*(N - k) + N**2) ) / (2*N*(N + 1))
	eff_hi_arr = ( N*(2*k + 1) + numpy.sqrt(4*N*k*(N - k) + N**2) ) / (2*N*(N + 1))

	eff_lo = rate.BinnedArray(rate.NDBins(ndbins), array = eff_lo_arr)
	eff_hi = rate.BinnedArray(rate.NDBins(ndbins), array = eff_hi_arr)

	return eff_lo, eff, eff_hi
Ejemplo n.º 23
0
def compute_search_volume(eff):
	"""
	Integrate efficiency to get search volume.
	"""
	# get distance bins
	ndbins = eff.bins
	dx = ndbins[0].upper() - ndbins[0].lower()
	r = ndbins[0].centres()

	# we have one less dimension on the output
	vol = rate.BinnedArray(rate.NDBins(ndbins[1:]))

	# integrate efficiency to obtain volume
	vol.array = numpy.trapz(eff.array.T * 4. * numpy.pi * r**2, r, dx)

	return vol
Ejemplo n.º 24
0
class CoincParamsDistributions(ligolw_burca_tailor.BurcaCoincParamsDistributions):
	binnings = {
        	"H1_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"H2_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"L1_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"H1H2_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"H1L1_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"H2L1_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),))
	}
	filters = {
        	"H1_eff_snr": rate.gaussian_window(21),
        	"H2_eff_snr": rate.gaussian_window(21),
        	"L1_eff_snr": rate.gaussian_window(21),
        	"H1H2_eff_snr": rate.gaussian_window(21),
        	"H1L1_eff_snr": rate.gaussian_window(21),
        	"H2L1_eff_snr": rate.gaussian_window(21)
	}
Ejemplo n.º 25
0
def dt_binning(instrument1, instrument2):
    dt = 0.005 + snglcoinc.light_travel_time(instrument1,
                                             instrument2)  # seconds
    return rate.NDBins((rate.ATanBins(-dt, +dt, 801), ))
Ejemplo n.º 26
0
class StringCoincParamsDistributions(snglcoinc.CoincParamsDistributions):
    ligo_lw_name_suffix = u"stringcusp_coincparamsdistributions"

    binnings = {
        "H1_snr2_chi2":
        rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801),
                     rate.ATanLogarithmicBins(.1, 1e4, 801))),
        "H2_snr2_chi2":
        rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801),
                     rate.ATanLogarithmicBins(.1, 1e4, 801))),
        "L1_snr2_chi2":
        rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801),
                     rate.ATanLogarithmicBins(.1, 1e4, 801))),
        "V1_snr2_chi2":
        rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801),
                     rate.ATanLogarithmicBins(.1, 1e4, 801))),
        "H1_H2_dt":
        dt_binning("H1", "H2"),
        "H1_L1_dt":
        dt_binning("H1", "L1"),
        "H1_V1_dt":
        dt_binning("H1", "V1"),
        "H2_L1_dt":
        dt_binning("H2", "L1"),
        "H2_V1_dt":
        dt_binning("H2", "V1"),
        "L1_V1_dt":
        dt_binning("L1", "V1"),
        "H1_H2_dA":
        rate.NDBins((rate.ATanBins(-0.5, +0.5, 801), )),
        "H1_L1_dA":
        rate.NDBins((rate.ATanBins(-0.5, +0.5, 801), )),
        "H1_V1_dA":
        rate.NDBins((rate.ATanBins(-0.5, +0.5, 801), )),
        "H2_L1_dA":
        rate.NDBins((rate.ATanBins(-0.5, +0.5, 801), )),
        "H2_V1_dA":
        rate.NDBins((rate.ATanBins(-0.5, +0.5, 801), )),
        "L1_V1_dA":
        rate.NDBins((rate.ATanBins(-0.5, +0.5, 801), )),
        "H1_H2_df":
        rate.NDBins((rate.ATanBins(-0.2, +0.2, 501), )),
        "H1_L1_df":
        rate.NDBins((rate.ATanBins(-0.2, +0.2, 501), )),
        "H1_V1_df":
        rate.NDBins((rate.ATanBins(-0.2, +0.2, 501), )),
        "H2_L1_df":
        rate.NDBins((rate.ATanBins(-0.2, +0.2, 501), )),
        "H2_V1_df":
        rate.NDBins((rate.ATanBins(-0.2, +0.2, 501), )),
        "L1_V1_df":
        rate.NDBins((rate.ATanBins(-0.2, +0.2, 501), )),
        # only non-negative rss timing residual bins will be used
        # but we want a binning that's linear at the origin so
        # instead of inventing a new one we just use atan bins that
        # are symmetric about 0
        "instrumentgroup,rss_timing_residual":
        rate.NDBins((snglcoinc.InstrumentBins(names=("H1", "H2", "L1", "V1")),
                     rate.ATanBins(-0.02, +0.02, 1001)))
    }

    filters = {
        "H1_snr2_chi2":
        rate.gaussian_window(11, 11, sigma=20),
        "H2_snr2_chi2":
        rate.gaussian_window(11, 11, sigma=20),
        "L1_snr2_chi2":
        rate.gaussian_window(11, 11, sigma=20),
        "V1_snr2_chi2":
        rate.gaussian_window(11, 11, sigma=20),
        "H1_H2_dt":
        rate.gaussian_window(11, sigma=20),
        "H1_L1_dt":
        rate.gaussian_window(11, sigma=20),
        "H1_V1_dt":
        rate.gaussian_window(11, sigma=20),
        "H2_L1_dt":
        rate.gaussian_window(11, sigma=20),
        "H2_V1_dt":
        rate.gaussian_window(11, sigma=20),
        "L1_V1_dt":
        rate.gaussian_window(11, sigma=20),
        "H1_H2_dA":
        rate.gaussian_window(11, sigma=20),
        "H1_L1_dA":
        rate.gaussian_window(11, sigma=20),
        "H1_V1_dA":
        rate.gaussian_window(11, sigma=20),
        "H2_L1_dA":
        rate.gaussian_window(11, sigma=20),
        "H2_V1_dA":
        rate.gaussian_window(11, sigma=20),
        "L1_V1_dA":
        rate.gaussian_window(11, sigma=20),
        "H1_H2_df":
        rate.gaussian_window(11, sigma=20),
        "H1_L1_df":
        rate.gaussian_window(11, sigma=20),
        "H1_V1_df":
        rate.gaussian_window(11, sigma=20),
        "H2_L1_df":
        rate.gaussian_window(11, sigma=20),
        "H2_V1_df":
        rate.gaussian_window(11, sigma=20),
        "L1_V1_df":
        rate.gaussian_window(11, sigma=20),
        # instrument group filter is a no-op, should produce a
        # 1-bin top-hat window.
        "instrumentgroup,rss_timing_residual":
        rate.gaussian_window(1e-100, 11, sigma=20)
    }

    @staticmethod
    def coinc_params(events, offsetvector, triangulators):
        #
        # check for coincs that have been vetoed entirely
        #

        if len(events) < 2:
            return None

        #
        # Initialize the parameter dictionary, sort the events by
        # instrument name (the multi-instrument parameters are defined for
        # the instruments in this order and the triangulators are
        # constructed this way too), and retrieve the sorted instrument
        # names
        #

        params = {}
        events = tuple(sorted(events, key=lambda event: event.ifo))
        instruments = tuple(event.ifo for event in events)

        #
        # zero-instrument parameters
        #

        ignored, ignored, ignored, rss_timing_residual = triangulators[
            instruments](tuple(event.peak + offsetvector[event.ifo]
                               for event in events))
        # FIXME:  rss_timing_residual is forced to 0 to disable this
        # feature.  all the code to compute it properly is still here and
        # given suitable initializations, the distribution data is still
        # two-dimensional and has a suitable filter applied to it, but all
        # events are forced into the RSS_{\Delta t} = 0 bin, in effect
        # removing that dimension from the data.  We can look at this again
        # sometime in the future if we're curious why it didn't help.  Just
        # delete the next line and you're back in business.
        rss_timing_residual = 0.0
        params["instrumentgroup,rss_timing_residual"] = (
            frozenset(instruments), rss_timing_residual)

        #
        # one-instrument parameters
        #

        for event in events:
            prefix = "%s_" % event.ifo

            params["%ssnr2_chi2" % prefix] = (event.snr**2.0,
                                              event.chisq / event.chisq_dof)

        #
        # two-instrument parameters.  note that events are sorted by
        # instrument
        #

        for event1, event2 in iterutils.choices(events, 2):
            assert event1.ifo != event2.ifo

            prefix = "%s_%s_" % (event1.ifo, event2.ifo)

            dt = float((event1.peak + offsetvector[event1.ifo]) -
                       (event2.peak + offsetvector[event2.ifo]))
            params["%sdt" % prefix] = (dt, )

            dA = math.log10(abs(event1.amplitude / event2.amplitude))
            params["%sdA" % prefix] = (dA, )

            # f_cut = central_freq + bandwidth/2
            f_cut1 = event1.central_freq + event1.bandwidth / 2
            f_cut2 = event2.central_freq + event2.bandwidth / 2
            df = float((math.log10(f_cut1) - math.log10(f_cut2)) /
                       (math.log10(f_cut1) + math.log10(f_cut2)))
            params["%sdf" % prefix] = (df, )

        #
        # done
        #

        return params

    def add_slidelessbackground(self,
                                database,
                                experiments,
                                param_func_args=()):
        # FIXME:  this needs to be taught how to not slide H1 and
        # H2 with respect to each other

        # segment lists
        seglists = database.seglists - database.vetoseglists

        # construct the event list dictionary.  remove vetoed
        # events from the lists and save event peak times so they
        # can be restored later
        eventlists = {}
        orig_peak_times = {}
        for event in database.sngl_burst_table:
            if event.peak in seglists[event.ifo]:
                try:
                    eventlists[event.ifo].append(event)
                except KeyError:
                    eventlists[event.ifo] = [event]
                orig_peak_times[event] = event.peak

        # parse the --thresholds H1,L1=... command-line options from burca
        delta_t = [
            float(threshold.split("=")[-1])
            for threshold in ligolw_process.get_process_params(
                database.xmldoc, "ligolw_burca", "--thresholds")
        ]
        if not all(delta_t[0] == threshold for threshold in delta_t[1:]):
            raise ValueError(
                "\Delta t is not unique in ligolw_burca arguments")
        delta_t = delta_t.pop()

        # construct the coinc generator.  note that H1+H2-only
        # coincs are forbidden, which is affected here by removing
        # that instrument combination from the object's internal
        # .rates dictionary
        coinc_generator = snglcoinc.CoincSynthesizer(eventlists, seglists,
                                                     delta_t)
        if frozenset(("H1", "H2")) in coinc_generator.rates:
            del coinc_generator.rates[frozenset(("H1", "H2"))]

        # build a dictionary of time-of-arrival generators
        toa_generator = dict(
            (instruments, coinc_generator.plausible_toas(instruments))
            for instruments in coinc_generator.rates.keys())

        # how many coincs?  the expected number is obtained by
        # multiplying the total zero-lag time for which at least
        # two instruments were on by the sum of the rates for all
        # coincs to get the mean number of coincs per zero-lag
        # observation time, and multiplying that by the number of
        # experiments the background should simulate to get the
        # mean number of background events to simulate.  the actual
        # number simulated is a Poisson-distributed RV with that
        # mean.
        n_coincs, = scipy.stats.poisson.rvs(
            float(abs(segmentsUtils.vote(seglists.values(), 2))) *
            sum(coinc_generator.rates.values()) * experiments)

        # generate synthetic background coincs
        zero_lag_offset_vector = offsetvector(
            (instrument, 0.0) for instrument in seglists)
        for n, events in enumerate(
                coinc_generator.coincs(lsctables.SnglBurst.get_peak)):
            # n = 1 on 2nd iteration, so placing this condition
            # where it is in the loop causes the correct number
            # of events to be added to the background
            if n >= n_coincs:
                break
            # assign fake peak times
            toas = toa_generator[frozenset(event.ifo
                                           for event in events)].next()
            for event in events:
                event.peak = toas[event.ifo]
            # compute coincidence parameters
            self.add_background(
                self.coinc_params(events, zero_lag_offset_vector,
                                  *param_func_args))

        # restore original peak times
        for event, peak_time in orig_peak_times.iteritems():
            event.peak = peak_time
Ejemplo n.º 27
0
	def finish(self, threshold):
		# bin the injections

		self._bin_events()

		# use the same binning for the found injection density as
		# was constructed for the efficiency

		self.found_density = rate.BinnedArray(self.efficiency.denominator.bins)

		# construct the amplitude weighting function

		amplitude_weight = rate.BinnedArray(rate.NDBins((rate.LinearBins(threshold - 100, threshold + 100, 10001),)))

		# gaussian window's width is the number of bins
		# corresponding to 10 units of amplitude, which is computed
		# by dividing 10 by the "volume" of the bin corresponding
		# to threshold.  index is the index of the element in
		# amplitude_weight corresponding to the threshold.

		index, = amplitude_weight.bins[threshold,]
		window = rate.gaussian_window(10.0 / amplitude_weight.bins.volumes()[index])
		window /= 10 * window[(len(window) - 1) / 2]

		# set the window data into the BinnedArray object.  the
		# Gaussian peaks on the middle element of the window, which
		# we want to place at index in the amplitude_weight array.

		lo = index - (len(window) - 1) / 2
		hi = lo + len(window)
		if lo < 0 or hi > len(amplitude_weight.array):
			raise ValueError("amplitude weighting window too large")
		amplitude_weight.array[lo:hi] = window

		# store the recovered injections in the found density bins
		# weighted by amplitude

		for x, y, z in self.recovered_xyz:
			try:
				weight = amplitude_weight[z,]
			except IndexError:
				# beyond the edge of the window
				weight = 0.0
			self.found_density[x, y] += weight

		# the efficiency is only valid up to the highest energy
		# that has been injected.  this creates problems later on
		# so, instead, for each frequency, identify the highest
		# energy that has been measured and copy the values for
		# that bin's numerator and denominator into the bins for
		# all higher energies.  do the same for the counts in the
		# found injection density bins.
		#
		# numpy.indices() returns two arrays array, the first of
		# which has each element set equal to its x index, the
		# second has each element set equal to its y index, we keep
		# the latter.  meanwhile numpy.roll() cyclically permutes
		# the efficiency bins down one along the y (energy) axis.
		# from this, the conditional finds bins where the
		# efficiency is greater than 0.9 but <= 0.9 in the bin
		# immediately above it in energy.  we select the elements
		# from the y index array where the conditional is true, and
		# then use numpy.max() along the y direction to return the
		# highest such y index for each x index, which is a 1-D
		# array.  finally, enumerate() is used to iterate over x
		# index and corresponding y index, and if the y index is
		# not negative (was found) the value from that x-y bin is
		# copied to all higher bins.

		n = self.efficiency.numerator.array
		d = self.efficiency.denominator.array
		f = self.found_density.array
		bady = -1
		for x, y in enumerate(numpy.max(numpy.where((d > 0) & (numpy.roll(d, -1, axis = 1) <= 0), numpy.indices(d.shape)[1], bady), axis = 1)):
			if y != bady:
				n[x, y + 1:] = n[x, y]
				d[x, y + 1:] = d[x, y]
				f[x, y + 1:] = f[x, y]

		# now do the same for the bins at energies below those that
		# have been measured.

		bady = d.shape[1]
		for x, y in enumerate(numpy.min(numpy.where((d > 0) & (numpy.roll(d, 1, axis = 1) <= 0), numpy.indices(d.shape)[1], bady), axis = 1)):
			if y != bady:
				n[x, 0:y] = n[x, y]
				d[x, 0:y] = d[x, y]
				f[x, 0:y] = f[x, y]

		diagnostic_plot(self.efficiency.numerator.array, self.efficiency.denominator.bins, r"Efficiency Numerator (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_numerator_before.png")
		diagnostic_plot(self.efficiency.denominator.array, self.efficiency.denominator.bins, r"Efficiency Denominator (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_denominator_before.png")
		diagnostic_plot(self.found_density.array, self.efficiency.denominator.bins, r"Injections Lost / Unit of Threshold (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_found_density_before.png")

		# smooth the efficiency bins and the found injection
		# density bins using the same 2D window.

		window = rate.gaussian_window(self.window_size_x, self.window_size_y)
		window /= window[tuple((numpy.array(window.shape, dtype = "double") - 1) / 2)]
		rate.filter_binned_ratios(self.efficiency, window)
		rate.filter_array(self.found_density.array, window)

		diagnostic_plot(self.efficiency.numerator.array, self.efficiency.denominator.bins, r"Efficiency Numerator (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_numerator_after.png")
		diagnostic_plot(self.efficiency.denominator.array, self.efficiency.denominator.bins, r"Efficiency Denominator (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_denominator_after.png")
		diagnostic_plot(self.found_density.array, self.efficiency.denominator.bins, r"Injections Lost / Unit of Threshold (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_found_density_after.png")

		# compute the uncertainties in the efficiency and its
		# derivative by assuming these to be the binomial counting
		# fluctuations in the numerators.

		p = self.efficiency.ratio()
		self.defficiency = numpy.sqrt(p * (1 - p) / self.efficiency.denominator.array)
		p = self.found_density.array / self.efficiency.denominator.array
		self.dfound_density = numpy.sqrt(p * (1 - p) / self.efficiency.denominator.array)
Ejemplo n.º 28
0
 def __init__(self, interval, width):
     # 21 bins per filter width
     bins = int(float(abs(interval)) / width) * 21
     self.binning = rate.NDBins((rate.LinearBins(interval[0], interval[1],
                                                 bins), ))
     self.data = {}

options, filenames = parse_command_line()


#
# =============================================================================
#
#   Custom SnglBurstTable append() method to put triggers directly into bins
#
# =============================================================================
#


nbins = int(float(abs(options.read_segment)) / options.window) * bins_per_filterwidth
binning = rate.NDBins((rate.LinearBins(options.read_segment[0], options.read_segment[1], nbins),))
trigger_rate = rate.BinnedArray(binning)

num_triggers = 0


def snglburst_append(self, row, verbose = options.verbose):
	global num_triggers, rate
	t = row.peak
	if t in options.read_segment:
		trigger_rate[t,] += 1.0
	num_triggers += 1
	if verbose and not (num_triggers % 125):
		print >>sys.stderr, "sngl_burst rows read:  %d\r" % num_triggers,

Ejemplo n.º 30
0
class BurcaCoincParamsDistributions(snglcoinc.CoincParamsDistributions):
    binnings = {
        "H1_H2_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_L1_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H2_L1_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_V1_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "L1_V1_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_H2_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_L1_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H2_L1_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_V1_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "L1_V1_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_H2_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_L1_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H2_L1_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_V1_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "L1_V1_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_H2_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_L1_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H2_L1_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_V1_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "L1_V1_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_H2_dt":
        dt_binning("H1", "H2"),
        "H1_L1_dt":
        dt_binning("H1", "L1"),
        "H2_L1_dt":
        dt_binning("H2", "L1"),
        "H1_V1_dt":
        dt_binning("H1", "V1"),
        "L1_V1_dt":
        dt_binning("L1", "V1")
    }

    filters = {
        "H1_H2_dband": rate.gaussian_window(11, 5),
        "H1_L1_dband": rate.gaussian_window(11, 5),
        "H2_L1_dband": rate.gaussian_window(11, 5),
        "H1_V1_dband": rate.gaussian_window(11, 5),
        "L1_V1_dband": rate.gaussian_window(11, 5),
        "H1_H2_ddur": rate.gaussian_window(11, 5),
        "H1_L1_ddur": rate.gaussian_window(11, 5),
        "H2_L1_ddur": rate.gaussian_window(11, 5),
        "H1_V1_ddur": rate.gaussian_window(11, 5),
        "L1_V1_ddur": rate.gaussian_window(11, 5),
        "H1_H2_df": rate.gaussian_window(11, 5),
        "H1_L1_df": rate.gaussian_window(11, 5),
        "H2_L1_df": rate.gaussian_window(11, 5),
        "H1_V1_df": rate.gaussian_window(11, 5),
        "L1_V1_df": rate.gaussian_window(11, 5),
        "H1_H2_dh": rate.gaussian_window(11, 5),
        "H1_V1_df": rate.gaussian_window(11, 5),
        "L1_V1_df": rate.gaussian_window(11, 5),
        "H1_L1_dh": rate.gaussian_window(11, 5),
        "H2_L1_dh": rate.gaussian_window(11, 5),
        "H1_H2_dt": rate.gaussian_window(11, 5),
        "H1_L1_dt": rate.gaussian_window(11, 5),
        "H2_L1_dt": rate.gaussian_window(11, 5),
        "H1_V1_dh": rate.gaussian_window(11, 5),
        "L2_V1_dh": rate.gaussian_window(11, 5)
    }

    @classmethod
    def from_filenames(cls, filenames, name, verbose=False):
        """
		Convenience function to deserialize
		CoincParamsDistributions objects from a collection of XML
		files and return their sum.  The return value is a
		two-element tuple.  The first element is the deserialized
		and summed CoincParamsDistributions object, the second is a
		segmentlistdict indicating the interval of time spanned by
		the out segments in the search_summary rows matching the
		process IDs that were attached to the
		CoincParamsDistributions objects in the XML.
		"""
        self = None
        for n, filename in enumerate(filenames, 1):
            if verbose:
                print >> sys.stderr, "%d/%d:" % (n, len(filenames)),
            xmldoc = ligolw_utils.load_filename(
                filename, verbose=verbose, contenthandler=cls.contenthandler)
            if self is None:
                self = cls.from_xml(xmldoc, name)
                seglists = lsctables.SearchSummaryTable.get_table(
                    xmldoc).get_out_segmentlistdict(set([self.process_id
                                                         ])).coalesce()
            else:
                other = cls.from_xml(xmldoc, name)
                self += other
                seglists |= lsctables.SearchSummaryTable.get_table(
                    xmldoc).get_out_segmentlistdict(set([other.process_id
                                                         ])).coalesce()
                del other
            xmldoc.unlink()
        return self, seglists