def __init__(self, x, y, magnitude, max_magnitude):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot("X", "Y")
     self.fig.set_size_inches(6, 6)
     self.x = x
     self.y = y
     self.magnitude = magnitude
     self.n_foreground = 0
     self.n_background = 0
     self.n_injections = 0
     max_magnitude = math.log10(max_magnitude)
     self.foreground_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
     self.background_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
     self.coinc_injection_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
     self.incomplete_coinc_injection_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
示例#2
0
def compute_search_efficiency_in_bins(found, total, ndbins, sim_to_bins_function = lambda sim: (sim.distance,)):
	"""
	This program creates the search efficiency in the provided ndbins.  The
	first dimension of ndbins must be the distance.  You also must provide a
	function that maps a sim inspiral row to the correct tuple to index the ndbins.
	"""

	input = rate.BinnedRatios(ndbins)

	# increment the numerator with the missed injections
	[input.incnumerator(sim_to_bins_function(sim)) for sim in found]

	# increment the denominator with the total injections
	[input.incdenominator(sim_to_bins_function(sim)) for sim in total]

	# regularize by setting denoms to 1 to avoid nans
	input.regularize()

	# pull out the efficiency array, it is the ratio
	eff = rate.BinnedArray(rate.NDBins(ndbins), array = input.ratio())

        # compute binomial uncertainties in each bin
        err_arr = numpy.sqrt(eff.array * (1-eff.array)/input.denominator.array)
	err = rate.BinnedArray(rate.NDBins(ndbins), array = err_arr)

	return eff, err
 def __init__(self, x_instrument, y_instrument, magnitude, desc,
              min_magnitude, max_magnitude):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot(
         "%s %s" % (x_instrument, desc), "%s %s" % (y_instrument, desc))
     self.fig.set_size_inches(6, 6)
     self.axes.loglog()
     self.x_instrument = x_instrument
     self.y_instrument = y_instrument
     self.magnitude = magnitude
     self.foreground_x = []
     self.foreground_y = []
     self.n_foreground = 0
     self.n_background = 0
     self.n_injections = 0
     self.foreground_bins = rate.BinnedArray(
         rate.NDBins(
             (rate.LogarithmicBins(min_magnitude, max_magnitude, 1024),
              rate.LogarithmicBins(min_magnitude, max_magnitude, 1024))))
     self.background_bins = rate.BinnedArray(
         rate.NDBins(
             (rate.LogarithmicBins(min_magnitude, max_magnitude, 1024),
              rate.LogarithmicBins(min_magnitude, max_magnitude, 1024))))
     self.coinc_injection_bins = rate.BinnedArray(
         rate.NDBins(
             (rate.LogarithmicBins(min_magnitude, max_magnitude, 1024),
              rate.LogarithmicBins(min_magnitude, max_magnitude, 1024))))
     self.incomplete_coinc_injection_bins = rate.BinnedArray(
         rate.NDBins(
             (rate.LogarithmicBins(min_magnitude, max_magnitude, 1024),
              rate.LogarithmicBins(min_magnitude, max_magnitude, 1024))))
示例#4
0
	def __init__(self, instrument, interval, width):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot(r"$f_{\mathrm{recovered}} / f_{\mathrm{injected}}$", "Event Number Density")
		self.axes.loglog()
		self.instrument = instrument
		self.found = 0
		# 21 bins per filter width
		bins = int(float(abs(interval)) / width) * 21
		binning = rate.NDBins((rate.LinearBins(interval[0], interval[1], bins),))
		self.offsets = rate.BinnedArray(binning)
		self.coinc_offsets = rate.BinnedArray(binning)
示例#5
0
	def __init__(self, instrument, interval, width):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot(r"$t_{\mathrm{recovered}} - t_{\mathrm{injected}}$ (s)", "Triggers per Unit Offset")
		self.axes.semilogy()
		self.instrument = instrument
		self.found = 0
		# 21 bins per filter width
		bins = int(float(abs(interval)) / width) * 21
		binning = rate.NDBins((rate.LinearBins(interval[0], interval[1], bins),))
		self.offsets = rate.BinnedArray(binning)
		self.coinc_offsets = rate.BinnedArray(binning)
示例#6
0
 def _derivitave_fit(self, farts, FARt, vAs, twodbin):
   '''
      Relies on scipy spline fits for each mass bin
      to find the derivitave of the volume at a given
      FAR.  See how this works for a simple case where
      I am clearly giving it a parabola.  To high precision it calculates
      the proper derivitave. 
      A = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
      B = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
      C = interpolate.splrep(B,A,s=0, k=4)
      interpolate.splev(5,C,der=1) 
      10.000
   '''
   dA = rate.BinnedArray(twodbin)
   for m1 in range(dA.array.shape[0]):
     for m2 in range(dA.array.shape[1]):
       da = []
       for f in farts.centres():
         da.append(vAs[farts[f]].array[m1][m2])
       fit = interpolate.splrep(farts.centres(),da,k=4)
       val = interpolate.splev(FARt,fit,der=1)
       #print val
       # FIXME this prevents negative derivitives arising from bad fits
       if val < 0: val = 0
       dA.array[m1][m2] = val # minus the derivitave
   return dA
示例#7
0
def get_volume_derivative(injfnames, twoDMassBins, dBin, FAR,
                          zero_lag_segments, gw):
    if (FAR == 0):
        print "\n\nFAR = 0\n \n"
        # FIXME lambda = ~inf if loudest event is above loudest timeslide?
        output = rate.BinnedArray(twoDMassBins)
        output.array = 10**6 * numpy.ones(output.array.shape)
        return output
    livetime = float(abs(zero_lag_segments))
    FARh = FAR * 100000
    FARl = FAR * 0.001
    nbins = 5
    FARS = rate.LogarithmicBins(FARl, FARh, nbins)
    vA = []
    vA2 = []
    for far in FARS.centres():
        m, f = get_injections(injfnames, far, zero_lag_segments)
        print >> sys.stderr, "computing volume at FAR " + str(far)
        vAt, vA2t = twoD_SearchVolume(f, m, twoDMassBins, dBin, gw, livetime,
                                      1)
        # we need to compute derivitive of log according to ul paper
        vAt.array = scipy.log10(vAt.array + 0.001)
        vA.append(vAt)
    # the derivitive is calcuated with respect to FAR * t
    FARS = rate.LogarithmicBins(FARl * livetime, FARh * livetime, nbins)
    return derivitave_fit(FARS, FAR * livetime, vA, twoDMassBins)
示例#8
0
 def live_time_array(self, instruments):
   """
   return an array of live times, note every bin will be the same :) it is just a 
   convenience.
   """
   live_time = rate.BinnedArray(self.twoDMassBins)
   live_time.array += 1.0
   live_time.array *= self.livetime[instruments]
   return live_time
 def __init__(self, ifo, interval, width):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot(
         "Peak Frequency (Hz)",
         "Trigger Rate Spectral Density (triggers / s / Hz)")
     self.ifo = ifo
     self.nevents = 0
     # 21 bins per filter width
     bins = int(float(abs(interval)) / width) * 21
     binning = rate.NDBins((rate.LinearBins(interval[0], interval[1],
                                            bins), ))
     self.rate = rate.BinnedArray(binning)
 def __init__(self, ifo, width, max):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot(
         "Delay (s)", "Count / Delay")
     self.ifo = ifo
     self.nevents = 0
     # 21 bins per filter width
     interval = segments.segment(0, max + 2)
     self.bins = rate.BinnedArray(
         rate.NDBins(
             (rate.LinearBins(interval[0], interval[1],
                              int(float(abs(interval)) / width) * 21), )))
     self.axes.semilogy()
示例#11
0
def compute_search_efficiency_in_bins(found,
                                      total,
                                      ndbins,
                                      sim_to_bins_function=lambda sim:
                                      (sim.distance, )):
    """
	This program creates the search efficiency in the provided ndbins.  The
	first dimension of ndbins must be the distance.  You also must provide a
	function that maps a sim inspiral row to the correct tuple to index the ndbins.
	"""

    input = rate.BinnedRatios(ndbins)

    # increment the numerator with the missed injections
    [input.incnumerator(sim_to_bins_function(sim)) for sim in found]

    # increment the denominator with the total injections
    [input.incdenominator(sim_to_bins_function(sim)) for sim in total]

    # regularize by setting empty bins to zero efficiency
    input.denominator.array[input.numerator.array < 1] = 1e35

    # pull out the efficiency array, it is the ratio
    eff = rate.BinnedArray(rate.NDBins(ndbins), array=input.ratio())

    # compute binomial uncertainties in each bin
    k = input.numerator.array
    N = input.denominator.array
    eff_lo_arr = (N * (2 * k + 1) - numpy.sqrt(4 * N * k *
                                               (N - k) + N**2)) / (2 * N *
                                                                   (N + 1))
    eff_hi_arr = (N * (2 * k + 1) + numpy.sqrt(4 * N * k *
                                               (N - k) + N**2)) / (2 * N *
                                                                   (N + 1))

    eff_lo = rate.BinnedArray(rate.NDBins(ndbins), array=eff_lo_arr)
    eff_hi = rate.BinnedArray(rate.NDBins(ndbins), array=eff_hi_arr)

    return eff_lo, eff, eff_hi
示例#12
0
def compute_search_volume_in_bins(found, total, ndbins, sim_to_bins_function):
	"""
	This program creates the search volume in the provided ndbins.  The
	first dimension of ndbins must be the distance over which to integrate.  You
	also must provide a function that maps a sim inspiral row to the correct tuple
	to index the ndbins.
	"""

	eff, err = compute_search_efficiency_in_bins(found, total, ndbins, sim_to_bins_function)
	dx = ndbins[0].upper() - ndbins[0].lower()
	r = ndbins[0].centres()

	# we have one less dimension on the output
	vol = rate.BinnedArray(rate.NDBins(ndbins[1:]))
	errors = rate.BinnedArray(rate.NDBins(ndbins[1:]))

	# integrate efficiency to obtain volume
	vol.array = numpy.trapz(eff.array.T * 4. * numpy.pi * r**2, r, dx)

	# propagate errors in eff to errors in V
        errors.array = numpy.sqrt(( (4*numpy.pi *r**2 *err.array.T *dx)**2 ).sum(-1))

	return vol, errors
示例#13
0
def compute_search_efficiency_in_bins(found, total, ndbins, sim_to_bins_function = lambda sim: (sim.distance,)):
	"""
	This program creates the search efficiency in the provided ndbins.  The
	first dimension of ndbins must be the distance.  You also must provide a
	function that maps a sim inspiral row to the correct tuple to index the ndbins.
	"""

	num = rate.BinnedArray(ndbins)
	den = rate.BinnedArray(ndbins)

	# increment the numerator with the found injections
	for sim in found:
		num[sim_to_bins_function(sim)] += 1

	# increment the denominator with the total injections
	for sim in total:
		den[sim_to_bins_function(sim)] += 1

	# sanity check
	assert (num.array <= den.array).all(), "some bins have more found injections than were made"

	# regularize by setting empty bins to zero efficiency
	den.array[numpy.logical_and(num.array == 0, den.array == 0)] = 1

	# pull out the efficiency array, it is the ratio
	eff = rate.BinnedArray(rate.NDBins(ndbins), array = num.array / den.array)

	# compute binomial uncertainties in each bin
	k = num.array
	N = den.array
	eff_lo_arr = ( N*(2*k + 1) - numpy.sqrt(4*N*k*(N - k) + N**2) ) / (2*N*(N + 1))
	eff_hi_arr = ( N*(2*k + 1) + numpy.sqrt(4*N*k*(N - k) + N**2) ) / (2*N*(N + 1))

	eff_lo = rate.BinnedArray(rate.NDBins(ndbins), array = eff_lo_arr)
	eff_hi = rate.BinnedArray(rate.NDBins(ndbins), array = eff_hi_arr)

	return eff_lo, eff, eff_hi
示例#14
0
def compute_search_volume(eff):
	"""
	Integrate efficiency to get search volume.
	"""
	# get distance bins
	ndbins = eff.bins
	dx = ndbins[0].upper() - ndbins[0].lower()
	r = ndbins[0].centres()

	# we have one less dimension on the output
	vol = rate.BinnedArray(rate.NDBins(ndbins[1:]))

	# integrate efficiency to obtain volume
	vol.array = numpy.trapz(eff.array.T * 4. * numpy.pi * r**2, r, dx)

	return vol
示例#15
0
	def finish(self, threshold):
		# bin the injections

		self._bin_events()

		# use the same binning for the found injection density as
		# was constructed for the efficiency

		self.found_density = rate.BinnedArray(self.efficiency.denominator.bins)

		# construct the amplitude weighting function

		amplitude_weight = rate.BinnedArray(rate.NDBins((rate.LinearBins(threshold - 100, threshold + 100, 10001),)))

		# gaussian window's width is the number of bins
		# corresponding to 10 units of amplitude, which is computed
		# by dividing 10 by the "volume" of the bin corresponding
		# to threshold.  index is the index of the element in
		# amplitude_weight corresponding to the threshold.

		index, = amplitude_weight.bins[threshold,]
		window = rate.gaussian_window(10.0 / amplitude_weight.bins.volumes()[index])
		window /= 10 * window[(len(window) - 1) / 2]

		# set the window data into the BinnedArray object.  the
		# Gaussian peaks on the middle element of the window, which
		# we want to place at index in the amplitude_weight array.

		lo = index - (len(window) - 1) / 2
		hi = lo + len(window)
		if lo < 0 or hi > len(amplitude_weight.array):
			raise ValueError("amplitude weighting window too large")
		amplitude_weight.array[lo:hi] = window

		# store the recovered injections in the found density bins
		# weighted by amplitude

		for x, y, z in self.recovered_xyz:
			try:
				weight = amplitude_weight[z,]
			except IndexError:
				# beyond the edge of the window
				weight = 0.0
			self.found_density[x, y] += weight

		# the efficiency is only valid up to the highest energy
		# that has been injected.  this creates problems later on
		# so, instead, for each frequency, identify the highest
		# energy that has been measured and copy the values for
		# that bin's numerator and denominator into the bins for
		# all higher energies.  do the same for the counts in the
		# found injection density bins.
		#
		# numpy.indices() returns two arrays array, the first of
		# which has each element set equal to its x index, the
		# second has each element set equal to its y index, we keep
		# the latter.  meanwhile numpy.roll() cyclically permutes
		# the efficiency bins down one along the y (energy) axis.
		# from this, the conditional finds bins where the
		# efficiency is greater than 0.9 but <= 0.9 in the bin
		# immediately above it in energy.  we select the elements
		# from the y index array where the conditional is true, and
		# then use numpy.max() along the y direction to return the
		# highest such y index for each x index, which is a 1-D
		# array.  finally, enumerate() is used to iterate over x
		# index and corresponding y index, and if the y index is
		# not negative (was found) the value from that x-y bin is
		# copied to all higher bins.

		n = self.efficiency.numerator.array
		d = self.efficiency.denominator.array
		f = self.found_density.array
		bady = -1
		for x, y in enumerate(numpy.max(numpy.where((d > 0) & (numpy.roll(d, -1, axis = 1) <= 0), numpy.indices(d.shape)[1], bady), axis = 1)):
			if y != bady:
				n[x, y + 1:] = n[x, y]
				d[x, y + 1:] = d[x, y]
				f[x, y + 1:] = f[x, y]

		# now do the same for the bins at energies below those that
		# have been measured.

		bady = d.shape[1]
		for x, y in enumerate(numpy.min(numpy.where((d > 0) & (numpy.roll(d, 1, axis = 1) <= 0), numpy.indices(d.shape)[1], bady), axis = 1)):
			if y != bady:
				n[x, 0:y] = n[x, y]
				d[x, 0:y] = d[x, y]
				f[x, 0:y] = f[x, y]

		diagnostic_plot(self.efficiency.numerator.array, self.efficiency.denominator.bins, r"Efficiency Numerator (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_numerator_before.png")
		diagnostic_plot(self.efficiency.denominator.array, self.efficiency.denominator.bins, r"Efficiency Denominator (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_denominator_before.png")
		diagnostic_plot(self.found_density.array, self.efficiency.denominator.bins, r"Injections Lost / Unit of Threshold (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_found_density_before.png")

		# smooth the efficiency bins and the found injection
		# density bins using the same 2D window.

		window = rate.gaussian_window(self.window_size_x, self.window_size_y)
		window /= window[tuple((numpy.array(window.shape, dtype = "double") - 1) / 2)]
		rate.filter_binned_ratios(self.efficiency, window)
		rate.filter_array(self.found_density.array, window)

		diagnostic_plot(self.efficiency.numerator.array, self.efficiency.denominator.bins, r"Efficiency Numerator (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_numerator_after.png")
		diagnostic_plot(self.efficiency.denominator.array, self.efficiency.denominator.bins, r"Efficiency Denominator (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_denominator_after.png")
		diagnostic_plot(self.found_density.array, self.efficiency.denominator.bins, r"Injections Lost / Unit of Threshold (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_found_density_after.png")

		# compute the uncertainties in the efficiency and its
		# derivative by assuming these to be the binomial counting
		# fluctuations in the numerators.

		p = self.efficiency.ratio()
		self.defficiency = numpy.sqrt(p * (1 - p) / self.efficiency.denominator.array)
		p = self.found_density.array / self.efficiency.denominator.array
		self.dfound_density = numpy.sqrt(p * (1 - p) / self.efficiency.denominator.array)
示例#16
0
	def __init__(self, efficiency_data, confidence):
		self.rate_array = rate.BinnedArray(efficiency_data.efficiency.denominator.bins)
		self.amplitude_lbl = efficiency_data.amplitude_lbl
		self.confidence = confidence
示例#17
0
  def twoD_SearchVolume(self, instruments, dbin=None, FAR=None, bootnum=None, derr=0.197, dsys=0.074):
    """ 
    Compute the search volume in the mass/mass plane, bootstrap
    and measure the first and second moment (assumes the underlying 
    distribution can be characterized by those two parameters) 
    This is gonna be brutally slow
    derr = (0.134**2+.103**2+.102**2)**.5 = 0.197 which is the 3 detector 
    calibration uncertainty in quadrature.  This is conservative since some injections
     will be H1L1 and have a lower error of .17
    the dsys is the DC offset which is the max offset of .074. 
    """

    if not FAR: FAR = self.far[instruments]
    found, missed = self.get_injections(instruments, FAR)
    twodbin = self.twoDMassBins
    wnfunc = self.gw
    livetime = self.livetime[instruments]
    if not bootnum: bootnum = self.bootnum

    if wnfunc: wnfunc /= wnfunc[(wnfunc.shape[0]-1) / 2, (wnfunc.shape[1]-1) / 2]

    x = twodbin.shape[0]
    y = twodbin.shape[1]
    z = int(self.opts.dist_bins)

    rArrays = []
    volArray=rate.BinnedArray(twodbin)
    volArray2=rate.BinnedArray(twodbin)
    #set up ratio arrays for each distance bin
    for k in range(z):
      rArrays.append(rate.BinnedRatios(twodbin))

    # Bootstrap to account for errors
    for n in range(bootnum):
      #initialize by setting these to zero
      for k in range(z):
        rArrays[k].numerator.array = numpy.zeros(rArrays[k].numerator.bins.shape)
        rArrays[k].denominator.array = numpy.zeros(rArrays[k].numerator.bins.shape)
      #Scramble the inj population and distances
      if bootnum > 1: 
        sm, sf = self._scramble_pop(missed, found)
        # I make a separate array of distances to speed up this calculation
        f_dist = self._scramble_dist(sf, derr, dsys)
      else: 
        sm, sf = missed, found
        f_dist = numpy.array([l.distance for l in found])
     
      # compute the distance bins
      if not dbin: 
        dbin = rate.LogarithmicBins(min(f_dist),max(f_dist), z)
      #else: print dbin.centres()
      

      # get rid of all missed injections outside the distance bins
      # to prevent binning errors
      sm, m_dist = self.cut_distance(sm, dbin)
      sf, f_dist = self.cut_distance(sf, dbin)


      for i, l in enumerate(sf):#found:
        tbin = rArrays[dbin[f_dist[i]]]
        tbin.incnumerator( (l.mass1, l.mass2) )
      for i, l in enumerate(sm):#missed:
        tbin = rArrays[dbin[m_dist[i]]]
        tbin.incdenominator( (l.mass1, l.mass2) )
    
      tmpArray2=rate.BinnedArray(twodbin) #start with a zero array to compute the mean square
      for k in range(z): 
        tbins = rArrays[k]
        tbins.denominator.array += tbins.numerator.array
        if wnfunc: rate.filter_array(tbins.denominator.array,wnfunc)
        if wnfunc: rate.filter_array(tbins.numerator.array,wnfunc)
        tbins.regularize()
        # logarithmic(d)
        integrand = 4.0 * pi * tbins.ratio() * dbin.centres()[k]**3 * dbin.delta
        volArray.array += integrand
        tmpArray2.array += integrand #4.0 * pi * tbins.ratio() * dbin.centres()[k]**3 * dbin.delta
        print >>sys.stderr, "bootstrapping:\t%.1f%% and Calculating smoothed volume:\t%.1f%%\r" % ((100.0 * n / bootnum), (100.0 * k / z)),
      tmpArray2.array *= tmpArray2.array
      volArray2.array += tmpArray2.array
    
    print >>sys.stderr, "" 
    #Mean and variance
    volArray.array /= bootnum
    volArray2.array /= bootnum
    volArray2.array -= volArray.array**2 # Variance
    volArray.array *= livetime
    volArray2.array *= livetime*livetime # this gets two powers of live time
    return volArray, volArray2
示例#18
0
def compute_volume_vs_mass(found, missed, mass_bins, bin_type, dbins=None,
                           distribution_param=None, distribution=None, limits_param=None,
                           max_param=None, min_param=None):
    """
    Compute the average luminosity an experiment was sensitive to given the sets
    of found and missed injections and assuming luminosity is uniformly distributed
    in space.

    If distance_param and distance_distribution are not None, use an unbinned
    Monte Carlo integral (which optionally takes max_distance and min_distance
    parameters) for the volume and error
    Otherwise use a simple efficiency * distance**2 binned integral
    In either case use distance bins to return the efficiency in each bin
    """
    # initialize MC volume integral method: binned or unbinned
    if distribution_param is not None and distribution is not None and limits_param is not None:
        mc_unbinned = True
    else:
        mc_unbinned = False

    # mean and std estimate for sensitive volume averaged over search time
    volArray = rate.BinnedArray(mass_bins)
    vol2Array = rate.BinnedArray(mass_bins)

    # found/missed stats
    foundArray = rate.BinnedArray(mass_bins)
    missedArray = rate.BinnedArray(mass_bins)

    # efficiency over distance and its standard (binomial) error
    effvmass = []
    errvmass = []

    if bin_type == "Mass1_Mass2": # two-d case first
        for j,mc1 in enumerate(mass_bins.centres()[0]):
            for k,mc2 in enumerate(mass_bins.centres()[1]):

                # filter out injections not in this mass bin
                newfound = filter_injections_by_mass(found, mass_bins, j, bin_type, k)
                newmissed = filter_injections_by_mass(missed, mass_bins, j, bin_type, k)

                foundArray[(mc1,mc2)] = len(newfound)
                missedArray[(mc1,mc2)] = len(newmissed)

                # compute the volume using this injection set
                meaneff, efferr, meanvol, volerr = mean_efficiency_volume(newfound, newmissed, dbins)
                effvmass.append(meaneff)
                errvmass.append(efferr)
                if mc_unbinned:
                    meanvol, volerr = volume_montecarlo(newfound, newmissed, distribution_param,
                                            distribution, limits_param, max_param, min_param)
                volArray[(mc1,mc2)] = meanvol
                vol2Array[(mc1,mc2)] = volerr

        return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass


    for j,mc in enumerate(mass_bins.centres()[0]):

        # filter out injections not in this mass bin
        newfound = filter_injections_by_mass(found, mass_bins, j, bin_type)
        newmissed = filter_injections_by_mass(missed, mass_bins, j, bin_type)

        foundArray[(mc,)] = len(newfound)
        missedArray[(mc,)] = len(newmissed)

        # compute the volume using this injection set
        meaneff, efferr, meanvol, volerr = mean_efficiency_volume(newfound, newmissed, dbins)
        effvmass.append(meaneff)
        errvmass.append(efferr)
        # if the unbinned MC calculation is available, do it
        if mc_unbinned:
            meanvol, volerr = volume_montecarlo(newfound, newmissed, distribution_param,
                                    distribution, limits_param, max_param, min_param)
        volArray[(mc,)] = meanvol
        vol2Array[(mc,)] = volerr

    return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass
示例#19
0
	def finish(self):
		fig, axes = SnglBurstUtils.make_burst_plot(r"Injection Amplitude (\(\mathrm{s}^{-\frac{1}{3}}\))", "Detection Efficiency", width = 108.0)
		axes.set_title(r"Detection Efficiency vs.\ Amplitude")
		axes.semilogx()
		axes.set_position([0.10, 0.150, 0.86, 0.77])

		# set desired yticks
		axes.set_yticks((0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0))
		axes.set_yticklabels((r"\(0\)", r"\(0.1\)", r"\(0.2\)", r"\(0.3\)", r"\(0.4\)", r"\(0.5\)", r"\(0.6\)", r"\(0.7\)", r"\(0.8\)", r"\(0.9\)", r"\(1.0\)"))
		axes.xaxis.grid(True, which = "major,minor")
		axes.yaxis.grid(True, which = "major,minor")

		# put made and found injections in the denominators and
		# numerators of the efficiency bins
		bins = rate.NDBins((rate.LogarithmicBins(min(sim.amplitude for sim in self.all), max(sim.amplitude for sim in self.all), 400),))
		efficiency_num = rate.BinnedArray(bins)
		efficiency_den = rate.BinnedArray(bins)
		for sim in self.found:
			efficiency_num[sim.amplitude,] += 1
		for sim in self.all:
			efficiency_den[sim.amplitude,] += 1

		# generate and plot trend curves.  adjust window function
		# normalization so that denominator array correctly
		# represents the number of injections contributing to each
		# bin:  make w(0) = 1.0.  note that this factor has no
		# effect on the efficiency because it is common to the
		# numerator and denominator arrays.  we do this for the
		# purpose of computing the Poisson error bars, which
		# requires us to know the counts for the bins
		windowfunc = rate.gaussian_window(self.filter_width)
		windowfunc /= windowfunc[len(windowfunc) / 2 + 1]
		rate.filter_binned_array(efficiency_num, windowfunc)
		rate.filter_binned_array(efficiency_den, windowfunc)

		# regularize:  adjust unused bins so that the efficiency is
		# 0, not NaN
		assert (efficiency_num <= efficiency_den).all()
		efficiency_den[efficiency_num == 0 & efficiency_den == 0] = 1

		line1, A50, A50_err = render_data_from_bins(file("string_efficiency.dat", "w"), axes, efficiency_num, efficiency_den, self.cal_uncertainty, self.filter_width, colour = "k", linestyle = "-", erroralpha = 0.2)
		print >>sys.stderr, "Pipeline's 50%% efficiency point for all detections = %g +/- %g%%\n" % (A50, A50_err * 100)

		# add a legend to the axes
		axes.legend((line1,), (r"\noindent Injections recovered with $\Lambda > %s$" % SnglBurstUtils.latexnumber("%.2e" % self.detection_threshold),), loc = "lower right")

		# adjust limits
		axes.set_xlim([1e-21, 2e-18])
		axes.set_ylim([0.0, 1.0])

		#
		# dump some information about the highest-amplitude missed
		# and quietest-amplitude found injections
		#

		self.loudest_missed.sort(reverse = True)
		self.quietest_found.sort(reverse = True)

		f = file("string_loud_missed_injections.txt", "w")
		print >>f, "Highest Amplitude Missed Injections"
		print >>f, "==================================="
		for amplitude, sim, offsetvector, filename, likelihood_ratio in self.loudest_missed:
			print >>f
			print >>f, "%s in %s:" % (str(sim.simulation_id), filename)
			if likelihood_ratio is None:
				print >>f, "Not recovered"
			else:
				print >>f, "Recovered with \\Lambda = %.16g, detection threshold was %.16g" % (likelihood_ratio, self.detection_threshold)
			for instrument in self.seglists:
				print >>f, "In %s:" % instrument
				print >>f, "\tInjected amplitude:\t%.16g" % SimBurstUtils.string_amplitude_in_instrument(sim, instrument, offsetvector)
				print >>f, "\tTime of injection:\t%s s" % sim.time_at_instrument(instrument, offsetvector)
			print >>f, "Amplitude in waveframe:\t%.16g" % sim.amplitude
			t = sim.get_time_geocent()
			print >>f, "Time at geocentre:\t%s s" % t
			print >>f, "Segments within 60 seconds:\t%s" % segmentsUtils.segmentlistdict_to_short_string(self.seglists & segments.segmentlistdict((instrument, segments.segmentlist([segments.segment(t-offsetvector[instrument]-60, t-offsetvector[instrument]+60)])) for instrument in self.seglists))
			print >>f, "Vetoes within 60 seconds:\t%s" % segmentsUtils.segmentlistdict_to_short_string(self.vetoseglists & segments.segmentlistdict((instrument, segments.segmentlist([segments.segment(t-offsetvector[instrument]-60, t-offsetvector[instrument]+60)])) for instrument in self.vetoseglists))

		f = file("string_quiet_found_injections.txt", "w")
		print >>f, "Lowest Amplitude Found Injections"
		print >>f, "================================="
		for inv_amplitude, sim, offsetvector, filename, likelihood_ratio in self.quietest_found:
			print >>f
			print >>f, "%s in %s:" % (str(sim.simulation_id), filename)
			if likelihood_ratio is None:
				print >>f, "Not recovered"
			else:
				print >>f, "Recovered with \\Lambda = %.16g, detection threshold was %.16g" % (likelihood_ratio, self.detection_threshold)
			for instrument in self.seglists:
				print >>f, "In %s:" % instrument
				print >>f, "\tInjected amplitude:\t%.16g" % SimBurstUtils.string_amplitude_in_instrument(sim, instrument, offsetvector)
				print >>f, "\tTime of injection:\t%s s" % sim.time_at_instrument(instrument, offsetvector)
			print >>f, "Amplitude in waveframe:\t%.16g" % sim.amplitude
			t = sim.get_time_geocent()
			print >>f, "Time at geocentre:\t%s s" % t
			print >>f, "Segments within 60 seconds:\t%s" % segmentsUtils.segmentlistdict_to_short_string(self.seglists & segments.segmentlistdict((instrument, segments.segmentlist([segments.segment(t-offsetvector[instrument]-60, t-offsetvector[instrument]+60)])) for instrument in self.seglists))
			print >>f, "Vetoes within 60 seconds:\t%s" % segmentsUtils.segmentlistdict_to_short_string(self.vetoseglists & segments.segmentlistdict((instrument, segments.segmentlist([segments.segment(t-offsetvector[instrument]-60, t-offsetvector[instrument]+60)])) for instrument in self.vetoseglists))

		#
		# done
		#

		return fig,
示例#20
0
def twoD_SearchVolume(found, missed, twodbin, dbin, wnfunc, livetime, bootnum=1, derr=0.197, dsys=0.074):
  """ 
  Compute the search volume in the mass/mass plane, bootstrap
  and measure the first and second moment (assumes the underlying 
  distribution can be characterized by those two parameters) 
  This is gonna be brutally slow
  derr = (0.134**2+.103**2+.102**2)**.5 = 0.197 which is the 3 detector 
  calibration uncertainty in quadrature.  This is conservative since some injections
  will be H1L1 and have a lower error of .17
  the dsys is the DC offset which is the max offset of .074. 
  """
  if wnfunc: wnfunc /= wnfunc[(wnfunc.shape[0]-1) / 2, (wnfunc.shape[1]-1) / 2]
  x = twodbin.shape[0]
  y = twodbin.shape[1]
  z = dbin.n
  rArrays = []
  volArray=rate.BinnedArray(twodbin)
  volArray2=rate.BinnedArray(twodbin)
  #set up ratio arrays for each distance bin
  for k in range(z):
    rArrays.append(rate.BinnedRatios(twodbin))

  # Bootstrap to account for errors
  for n in range(bootnum):
    #initialize by setting these to zero
    for k in range(z):
      rArrays[k].numerator.array = numpy.zeros(rArrays[k].numerator.bins.shape)
      rArrays[k].denominator.array = numpy.zeros(rArrays[k].numerator.bins.shape)
    #Scramble the inj population
    if bootnum > 1: sm, sf = scramble_pop(missed, found)
    else: sm, sf = missed, found
    for l in sf:#found:
      tbin = rArrays[dbin[scramble_dist(l.distance,derr,dsys)]]
      tbin.incnumerator( (l.mass1, l.mass2) )
    for l in sm:#missed:
      tbin = rArrays[dbin[scramble_dist(l.distance,derr,dsys)]]
      tbin.incdenominator( (l.mass1, l.mass2) )
    
    tmpArray2=rate.BinnedArray(twodbin) #start with a zero array to compute the mean square
    for k in range(z): 
      tbins = rArrays[k]
      tbins.denominator.array += tbins.numerator.array
      if wnfunc: rate.filter_array(tbins.denominator.array,wnfunc)
      if wnfunc: rate.filter_array(tbins.numerator.array,wnfunc)
      tbins.regularize()
      # logarithmic(d)
      integrand = 4.0 * pi * tbins.ratio() * dbin.centres()[k]**3 * dbin.delta
      volArray.array += integrand
      tmpArray2.array += integrand #4.0 * pi * tbins.ratio() * dbin.centres()[k]**3 * dbin.delta
      print >>sys.stderr, "bootstrapping:\t%.1f%% and Calculating smoothed volume:\t%.1f%%\r" % ((100.0 * n / bootnum), (100.0 * k / z)),
    tmpArray2.array *= tmpArray2.array
    volArray2.array += tmpArray2.array
    
  print >>sys.stderr, "" 
  #Mean and variance
  volArray.array /= bootnum
  volArray2.array /= bootnum
  volArray2.array -= volArray.array**2 # Variance
  volArray.array *= livetime
  volArray2.array *= livetime*livetime # this gets two powers of live time
  return volArray, volArray2
options, filenames = parse_command_line()


#
# =============================================================================
#
#   Custom SnglBurstTable append() method to put triggers directly into bins
#
# =============================================================================
#


nbins = int(float(abs(options.read_segment)) / options.window) * bins_per_filterwidth
binning = rate.NDBins((rate.LinearBins(options.read_segment[0], options.read_segment[1], nbins),))
trigger_rate = rate.BinnedArray(binning)

num_triggers = 0


def snglburst_append(self, row, verbose = options.verbose):
	global num_triggers, rate
	t = row.peak
	if t in options.read_segment:
		trigger_rate[t,] += 1.0
	num_triggers += 1
	if verbose and not (num_triggers % 125):
		print >>sys.stderr, "sngl_burst rows read:  %d\r" % num_triggers,


lsctables.SnglBurstTable.append = snglburst_append
示例#22
0
 def __init__(self, binning):
     self.found = 0
     self.offsets = rate.BinnedArray(binning)
示例#23
0
def compute_volume_vs_mass(found,
                           missed,
                           mass_bins,
                           bin_type,
                           dbins=None,
                           ploteff=False):
    """
    Compute the average luminosity an experiment was sensitive to given the sets
    of found and missed injections and assuming luminosity is unformly distributed
    in space.
    """
    # mean and std estimate for luminosity (in L10s)
    volArray = rate.BinnedArray(mass_bins)
    vol2Array = rate.BinnedArray(mass_bins)

    # found/missed stats
    foundArray = rate.BinnedArray(mass_bins)
    missedArray = rate.BinnedArray(mass_bins)

    #
    # compute the mean luminosity in each mass bin
    #
    effvmass = []
    errvmass = []

    if bin_type == "Mass1_Mass2":  # two-d case first
        for j, mc1 in enumerate(mass_bins.centres()[0]):
            for k, mc2 in enumerate(mass_bins.centres()[1]):
                newfound = filter_injections_by_mass(found, mass_bins, j,
                                                     bin_type, k)
                newmissed = filter_injections_by_mass(missed, mass_bins, j,
                                                      bin_type, k)

                foundArray[(mc1, mc2)] = len(newfound)
                missedArray[(mc1, mc2)] = len(newmissed)

                # compute the volume using this injection set
                meaneff, efferr, meanvol, volerr = mean_efficiency_volume(
                    newfound, newmissed, dbins)
                effvmass.append(meaneff)
                errvmass.append(efferr)
                volArray[(mc1, mc2)] = meanvol
                vol2Array[(mc1, mc2)] = volerr

        return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass

    for j, mc in enumerate(mass_bins.centres()[0]):

        # filter out injections not in this mass bin
        newfound = filter_injections_by_mass(found, mass_bins, j, bin_type)
        newmissed = filter_injections_by_mass(missed, mass_bins, j, bin_type)

        foundArray[(mc, )] = len(newfound)
        missedArray[(mc, )] = len(newmissed)

        # compute the volume using this injection set
        meaneff, efferr, meanvol, volerr = mean_efficiency_volume(
            newfound, newmissed, dbins)
        effvmass.append(meaneff)
        errvmass.append(efferr)
        volArray[(mc, )] = meanvol
        vol2Array[(mc, )] = volerr

    return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass