def finish(self):
        for offsets in self.tisi_rows:
            self.seglists.offsets.update(offsets)
            self.bins.incdenominator(
                (offsets[self.x_instrument], offsets[self.y_instrument]),
                float(abs(self.seglists.intersection(self.seglists.keys()))))
        self.bins.logregularize()
        zvals = self.bins.ratio()
        rate.filter_array(zvals, rate.gaussian_window(3, 3))
        xcoords, ycoords = self.bins.centres()
        self.axes.contour(xcoords, ycoords, numpy.transpose(numpy.log(zvals)))
        for offsets in self.tisi_rows:
            if any(offsets):
                # time slide vector is non-zero-lag
                self.axes.plot((offsets[self.x_instrument], ),
                               (offsets[self.y_instrument], ), "k+")
            else:
                # time slide vector is zero-lag
                self.axes.plot((offsets[self.x_instrument], ),
                               (offsets[self.y_instrument], ), "r+")

        self.axes.set_xlim([self.bins.bins().min[0], self.bins.bins().max[0]])
        self.axes.set_ylim([self.bins.bins().min[1], self.bins.bins().max[1]])
        self.axes.set_title(
            r"Coincident Event Rate vs. Instrument Time Offset (Logarithmic Rate Contours)"
        )
 def finish(self):
     self.axes.set_title("Trigger Rate vs. Peak Frequency\n(%d Triggers)" %
                         self.nevents)
     # 21 bins per filter width
     rate.filter_array(self.rate.array, rate.gaussian_window(21))
     xvals = self.rate.centres()[0]
     self.axes.plot(xvals, self.rate.at_centres(), "k")
     self.axes.semilogy()
     self.axes.set_xlim((min(xvals), max(xvals)))
    def finish(self):
        self.axes.set_title("Time Between Triggers\n(%d Triggers)" %
                            self.nevents)

        rate.filter_array(self.bins.array, rate.gaussian_window(21))
        xvals = self.bins.centres()[0]
        yvals = self.bins.at_centres()
        self.axes.plot(xvals, yvals, "k")

        self.axes.set_xlim((0, xvals[-1]))
        self.axes.set_ylim((1, 10.0**(int(math.log10(yvals.max())) + 1)))
Beispiel #4
0
	def finish(self):
		for key, pdf in self.densities.items():
			if key.endswith("_snr2_chi2"):
				rate.filter_array(pdf.array, rate.gaussian_window(11, 11, sigma = 20))
			elif key.endswith("_dt") or key.endswith("_dA") or key.endswith("_df"):
				rate.filter_array(pdf.array, rate.gaussian_window(11, sigma = 20))
			elif key.startswith("instrumentgroup"):
				# instrument group filter is a no-op
				pass
			else:
				# shouldn't get here
				raise Exception
			pdf.normalize()
		self.mkinterps()
Beispiel #5
0
 def finish(self):
     for instrument, data in sorted(self.data.items()):
         fig, axes = SnglBurstUtils.make_burst_plot(
             r"$t_{\mathrm{recovered}} - t_{\mathrm{injected}}$ (s)",
             "Triggers per Unit Offset")
         axes.semilogy()
         axes.set_title(
             "Trigger Peak Time - Injection Peak Time in %s\n(%d Found Injections)"
             % (instrument, data.found))
         # 21 bins per filter width
         rate.filter_array(data.offsets.array, rate.gaussian_window(21))
         axes.plot(data.offsets.centres()[0], data.offsets.at_centres(),
                   "k")
         #axes.legend(["%s residuals" % instrument, "SNR-weighted mean of residuals in all instruments"], loc = "lower right")
         yield fig
Beispiel #6
0
 def finish(self):
     self.axes.set_title(
         "Trigger Peak Time - Injection Peak Time\n(%d Found Injections)" %
         self.found)
     # 21 bins per filter width
     filter = rate.gaussian_window(21)
     rate.filter_array(self.offsets.array, filter)
     rate.filter_array(self.coinc_offsets.array, filter)
     self.axes.plot(self.offsets.centres()[0], self.offsets.at_centres(),
                    "k")
     self.axes.plot(self.coinc_offsets.centres()[0],
                    self.coinc_offsets.at_centres(), "r")
     self.axes.legend([
         "%s residuals" % self.instrument,
         "SNR-weighted mean of residuals in all instruments"
     ],
                      loc="lower right")
Beispiel #7
0
 def finish(self):
     self.axes.set_title(
         "Trigger Peak Frequency / Injection Centre Frequency\n(%d Found Injections)"
         % self.found)
     # 21 bins per filter width
     filter = rate.gaussian_window(21)
     rate.filter_array(self.offsets.array, filter)
     rate.filter_array(self.coinc_offsets.array, filter)
     self.axes.plot(10**self.offsets.centres()[0],
                    self.offsets.at_centres(), "k")
     self.axes.plot(10**self.coinc_offsets.centres()[0],
                    self.coinc_offsets.at_centres(), "r")
     self.axes.legend([
         "%s triggers" % self.instrument,
         "SNR-weighted mean of all matching triggers"
     ],
                      loc="lower right")
     ymin, ymax = self.axes.get_ylim()
     if ymax / ymin > 1e6:
         ymin = ymax / 1e6
         self.axes.set_ylim((ymin, ymax))
Beispiel #8
0
	def finish(self, threshold):
		# bin the injections

		self._bin_events()

		# use the same binning for the found injection density as
		# was constructed for the efficiency

		self.found_density = rate.BinnedArray(self.efficiency.denominator.bins)

		# construct the amplitude weighting function

		amplitude_weight = rate.BinnedArray(rate.NDBins((rate.LinearBins(threshold - 100, threshold + 100, 10001),)))

		# gaussian window's width is the number of bins
		# corresponding to 10 units of amplitude, which is computed
		# by dividing 10 by the "volume" of the bin corresponding
		# to threshold.  index is the index of the element in
		# amplitude_weight corresponding to the threshold.

		index, = amplitude_weight.bins[threshold,]
		window = rate.gaussian_window(10.0 / amplitude_weight.bins.volumes()[index])
		window /= 10 * window[(len(window) - 1) / 2]

		# set the window data into the BinnedArray object.  the
		# Gaussian peaks on the middle element of the window, which
		# we want to place at index in the amplitude_weight array.

		lo = index - (len(window) - 1) / 2
		hi = lo + len(window)
		if lo < 0 or hi > len(amplitude_weight.array):
			raise ValueError("amplitude weighting window too large")
		amplitude_weight.array[lo:hi] = window

		# store the recovered injections in the found density bins
		# weighted by amplitude

		for x, y, z in self.recovered_xyz:
			try:
				weight = amplitude_weight[z,]
			except IndexError:
				# beyond the edge of the window
				weight = 0.0
			self.found_density[x, y] += weight

		# the efficiency is only valid up to the highest energy
		# that has been injected.  this creates problems later on
		# so, instead, for each frequency, identify the highest
		# energy that has been measured and copy the values for
		# that bin's numerator and denominator into the bins for
		# all higher energies.  do the same for the counts in the
		# found injection density bins.
		#
		# numpy.indices() returns two arrays array, the first of
		# which has each element set equal to its x index, the
		# second has each element set equal to its y index, we keep
		# the latter.  meanwhile numpy.roll() cyclically permutes
		# the efficiency bins down one along the y (energy) axis.
		# from this, the conditional finds bins where the
		# efficiency is greater than 0.9 but <= 0.9 in the bin
		# immediately above it in energy.  we select the elements
		# from the y index array where the conditional is true, and
		# then use numpy.max() along the y direction to return the
		# highest such y index for each x index, which is a 1-D
		# array.  finally, enumerate() is used to iterate over x
		# index and corresponding y index, and if the y index is
		# not negative (was found) the value from that x-y bin is
		# copied to all higher bins.

		n = self.efficiency.numerator.array
		d = self.efficiency.denominator.array
		f = self.found_density.array
		bady = -1
		for x, y in enumerate(numpy.max(numpy.where((d > 0) & (numpy.roll(d, -1, axis = 1) <= 0), numpy.indices(d.shape)[1], bady), axis = 1)):
			if y != bady:
				n[x, y + 1:] = n[x, y]
				d[x, y + 1:] = d[x, y]
				f[x, y + 1:] = f[x, y]

		# now do the same for the bins at energies below those that
		# have been measured.

		bady = d.shape[1]
		for x, y in enumerate(numpy.min(numpy.where((d > 0) & (numpy.roll(d, 1, axis = 1) <= 0), numpy.indices(d.shape)[1], bady), axis = 1)):
			if y != bady:
				n[x, 0:y] = n[x, y]
				d[x, 0:y] = d[x, y]
				f[x, 0:y] = f[x, y]

		diagnostic_plot(self.efficiency.numerator.array, self.efficiency.denominator.bins, r"Efficiency Numerator (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_numerator_before.png")
		diagnostic_plot(self.efficiency.denominator.array, self.efficiency.denominator.bins, r"Efficiency Denominator (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_denominator_before.png")
		diagnostic_plot(self.found_density.array, self.efficiency.denominator.bins, r"Injections Lost / Unit of Threshold (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_found_density_before.png")

		# smooth the efficiency bins and the found injection
		# density bins using the same 2D window.

		window = rate.gaussian_window(self.window_size_x, self.window_size_y)
		window /= window[tuple((numpy.array(window.shape, dtype = "double") - 1) / 2)]
		rate.filter_binned_ratios(self.efficiency, window)
		rate.filter_array(self.found_density.array, window)

		diagnostic_plot(self.efficiency.numerator.array, self.efficiency.denominator.bins, r"Efficiency Numerator (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_numerator_after.png")
		diagnostic_plot(self.efficiency.denominator.array, self.efficiency.denominator.bins, r"Efficiency Denominator (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_denominator_after.png")
		diagnostic_plot(self.found_density.array, self.efficiency.denominator.bins, r"Injections Lost / Unit of Threshold (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_found_density_after.png")

		# compute the uncertainties in the efficiency and its
		# derivative by assuming these to be the binomial counting
		# fluctuations in the numerators.

		p = self.efficiency.ratio()
		self.defficiency = numpy.sqrt(p * (1 - p) / self.efficiency.denominator.array)
		p = self.found_density.array / self.efficiency.denominator.array
		self.dfound_density = numpy.sqrt(p * (1 - p) / self.efficiency.denominator.array)
Beispiel #9
0
  def twoD_SearchVolume(self, instruments, dbin=None, FAR=None, bootnum=None, derr=0.197, dsys=0.074):
    """ 
    Compute the search volume in the mass/mass plane, bootstrap
    and measure the first and second moment (assumes the underlying 
    distribution can be characterized by those two parameters) 
    This is gonna be brutally slow
    derr = (0.134**2+.103**2+.102**2)**.5 = 0.197 which is the 3 detector 
    calibration uncertainty in quadrature.  This is conservative since some injections
     will be H1L1 and have a lower error of .17
    the dsys is the DC offset which is the max offset of .074. 
    """

    if not FAR: FAR = self.far[instruments]
    found, missed = self.get_injections(instruments, FAR)
    twodbin = self.twoDMassBins
    wnfunc = self.gw
    livetime = self.livetime[instruments]
    if not bootnum: bootnum = self.bootnum

    if wnfunc: wnfunc /= wnfunc[(wnfunc.shape[0]-1) / 2, (wnfunc.shape[1]-1) / 2]

    x = twodbin.shape[0]
    y = twodbin.shape[1]
    z = int(self.opts.dist_bins)

    rArrays = []
    volArray=rate.BinnedArray(twodbin)
    volArray2=rate.BinnedArray(twodbin)
    #set up ratio arrays for each distance bin
    for k in range(z):
      rArrays.append(rate.BinnedRatios(twodbin))

    # Bootstrap to account for errors
    for n in range(bootnum):
      #initialize by setting these to zero
      for k in range(z):
        rArrays[k].numerator.array = numpy.zeros(rArrays[k].numerator.bins.shape)
        rArrays[k].denominator.array = numpy.zeros(rArrays[k].numerator.bins.shape)
      #Scramble the inj population and distances
      if bootnum > 1: 
        sm, sf = self._scramble_pop(missed, found)
        # I make a separate array of distances to speed up this calculation
        f_dist = self._scramble_dist(sf, derr, dsys)
      else: 
        sm, sf = missed, found
        f_dist = numpy.array([l.distance for l in found])
     
      # compute the distance bins
      if not dbin: 
        dbin = rate.LogarithmicBins(min(f_dist),max(f_dist), z)
      #else: print dbin.centres()
      

      # get rid of all missed injections outside the distance bins
      # to prevent binning errors
      sm, m_dist = self.cut_distance(sm, dbin)
      sf, f_dist = self.cut_distance(sf, dbin)


      for i, l in enumerate(sf):#found:
        tbin = rArrays[dbin[f_dist[i]]]
        tbin.incnumerator( (l.mass1, l.mass2) )
      for i, l in enumerate(sm):#missed:
        tbin = rArrays[dbin[m_dist[i]]]
        tbin.incdenominator( (l.mass1, l.mass2) )
    
      tmpArray2=rate.BinnedArray(twodbin) #start with a zero array to compute the mean square
      for k in range(z): 
        tbins = rArrays[k]
        tbins.denominator.array += tbins.numerator.array
        if wnfunc: rate.filter_array(tbins.denominator.array,wnfunc)
        if wnfunc: rate.filter_array(tbins.numerator.array,wnfunc)
        tbins.regularize()
        # logarithmic(d)
        integrand = 4.0 * pi * tbins.ratio() * dbin.centres()[k]**3 * dbin.delta
        volArray.array += integrand
        tmpArray2.array += integrand #4.0 * pi * tbins.ratio() * dbin.centres()[k]**3 * dbin.delta
        print >>sys.stderr, "bootstrapping:\t%.1f%% and Calculating smoothed volume:\t%.1f%%\r" % ((100.0 * n / bootnum), (100.0 * k / z)),
      tmpArray2.array *= tmpArray2.array
      volArray2.array += tmpArray2.array
    
    print >>sys.stderr, "" 
    #Mean and variance
    volArray.array /= bootnum
    volArray2.array /= bootnum
    volArray2.array -= volArray.array**2 # Variance
    volArray.array *= livetime
    volArray2.array *= livetime*livetime # this gets two powers of live time
    return volArray, volArray2
Beispiel #10
0
 def finish(self):
     for key, pdf in self.densities.items():
         rate.filter_array(pdf.array, rate.gaussian_window(11, 5))
         pdf.normalize()
     self.mkinterps()
Beispiel #11
0
def twoD_SearchVolume(found, missed, twodbin, dbin, wnfunc, livetime, bootnum=1, derr=0.197, dsys=0.074):
  """ 
  Compute the search volume in the mass/mass plane, bootstrap
  and measure the first and second moment (assumes the underlying 
  distribution can be characterized by those two parameters) 
  This is gonna be brutally slow
  derr = (0.134**2+.103**2+.102**2)**.5 = 0.197 which is the 3 detector 
  calibration uncertainty in quadrature.  This is conservative since some injections
  will be H1L1 and have a lower error of .17
  the dsys is the DC offset which is the max offset of .074. 
  """
  if wnfunc: wnfunc /= wnfunc[(wnfunc.shape[0]-1) / 2, (wnfunc.shape[1]-1) / 2]
  x = twodbin.shape[0]
  y = twodbin.shape[1]
  z = dbin.n
  rArrays = []
  volArray=rate.BinnedArray(twodbin)
  volArray2=rate.BinnedArray(twodbin)
  #set up ratio arrays for each distance bin
  for k in range(z):
    rArrays.append(rate.BinnedRatios(twodbin))

  # Bootstrap to account for errors
  for n in range(bootnum):
    #initialize by setting these to zero
    for k in range(z):
      rArrays[k].numerator.array = numpy.zeros(rArrays[k].numerator.bins.shape)
      rArrays[k].denominator.array = numpy.zeros(rArrays[k].numerator.bins.shape)
    #Scramble the inj population
    if bootnum > 1: sm, sf = scramble_pop(missed, found)
    else: sm, sf = missed, found
    for l in sf:#found:
      tbin = rArrays[dbin[scramble_dist(l.distance,derr,dsys)]]
      tbin.incnumerator( (l.mass1, l.mass2) )
    for l in sm:#missed:
      tbin = rArrays[dbin[scramble_dist(l.distance,derr,dsys)]]
      tbin.incdenominator( (l.mass1, l.mass2) )
    
    tmpArray2=rate.BinnedArray(twodbin) #start with a zero array to compute the mean square
    for k in range(z): 
      tbins = rArrays[k]
      tbins.denominator.array += tbins.numerator.array
      if wnfunc: rate.filter_array(tbins.denominator.array,wnfunc)
      if wnfunc: rate.filter_array(tbins.numerator.array,wnfunc)
      tbins.regularize()
      # logarithmic(d)
      integrand = 4.0 * pi * tbins.ratio() * dbin.centres()[k]**3 * dbin.delta
      volArray.array += integrand
      tmpArray2.array += integrand #4.0 * pi * tbins.ratio() * dbin.centres()[k]**3 * dbin.delta
      print >>sys.stderr, "bootstrapping:\t%.1f%% and Calculating smoothed volume:\t%.1f%%\r" % ((100.0 * n / bootnum), (100.0 * k / z)),
    tmpArray2.array *= tmpArray2.array
    volArray2.array += tmpArray2.array
    
  print >>sys.stderr, "" 
  #Mean and variance
  volArray.array /= bootnum
  volArray2.array /= bootnum
  volArray2.array -= volArray.array**2 # Variance
  volArray.array *= livetime
  volArray2.array *= livetime*livetime # this gets two powers of live time
  return volArray, volArray2
    def finish(self):
        self.axes.set_title(
            r"\begin{center}Distribution of Coincident Events (%d Foreground, %d Background Events, %d Injections Found in Coincidence, Logarithmic Density Contours)\end{center}"
            % (self.n_foreground, self.n_background, self.n_injections))
        xcoords, ycoords = self.background_bins.centres()

        # prepare the data
        rate.filter_array(self.foreground_bins.array,
                          rate.gaussian_window(8, 8))
        rate.filter_array(self.background_bins.array,
                          rate.gaussian_window(8, 8))
        rate.filter_array(self.coinc_injection_bins.array,
                          rate.gaussian_window(8, 8))
        rate.filter_array(self.incomplete_coinc_injection_bins.array,
                          rate.gaussian_window(8, 8))
        self.foreground_bins.logregularize()
        self.background_bins.logregularize()
        self.coinc_injection_bins.logregularize()
        self.incomplete_coinc_injection_bins.logregularize()

        # plot background contours
        max_density = math.log(self.background_bins.array.max())
        self.axes.contour(xcoords,
                          ycoords,
                          numpy.transpose(numpy.log(
                              self.background_bins.array)),
                          [max_density - n for n in xrange(0, 10, 1)],
                          cmap=matplotlib.cm.Greys)

        # plot foreground (zero-lag) contours
        max_density = math.log(self.foreground_bins.array.max())
        self.axes.contour(xcoords,
                          ycoords,
                          numpy.transpose(numpy.log(
                              self.foreground_bins.array)),
                          [max_density - n for n in xrange(0, 10, 1)],
                          cmap=matplotlib.cm.Reds)
        #self.axes.plot(self.foreground_x, self.foreground_y, "r+")

        # plot coincident injection contours
        max_density = math.log(self.coinc_injection_bins.array.max())
        self.axes.contour(xcoords,
                          ycoords,
                          numpy.transpose(
                              numpy.log(self.coinc_injection_bins.array)),
                          [max_density - n for n in xrange(0, 10, 1)],
                          cmap=matplotlib.cm.Blues)

        # plot incomplete coincident injection contours
        max_density = math.log(
            self.incomplete_coinc_injection_bins.array.max())
        self.axes.contour(xcoords,
                          ycoords,
                          numpy.transpose(
                              numpy.log(
                                  self.incomplete_coinc_injection_bins.array)),
                          [max_density - n for n in xrange(0, 10, 1)],
                          cmap=matplotlib.cm.Greens)

        # fix axes limits
        self.axes.set_xlim([
            self.background_bins.bins.min[0], self.background_bins.bins.max[0]
        ])
        self.axes.set_ylim([
            self.background_bins.bins.min[1], self.background_bins.bins.max[1]
        ])