示例#1
0
	def finish(self):
		for key, pdf in self.densities.items():
			if key.endswith("_snr2_chi2"):
				rate.filter_array(pdf.array, rate.gaussian_window(11, 11, sigma = 20))
			elif key.endswith("_dt") or key.endswith("_dA") or key.endswith("_df"):
				rate.filter_array(pdf.array, rate.gaussian_window(11, sigma = 20))
			elif key.startswith("instrumentgroup"):
				# instrument group filter is a no-op
				pass
			else:
				# shouldn't get here
				raise Exception
			pdf.normalize()
		self.mkinterps()
    def finish(self):
        for offsets in self.tisi_rows:
            self.seglists.offsets.update(offsets)
            self.bins.incdenominator(
                (offsets[self.x_instrument], offsets[self.y_instrument]),
                float(abs(self.seglists.intersection(self.seglists.keys()))))
        self.bins.logregularize()
        zvals = self.bins.ratio()
        rate.filter_array(zvals, rate.gaussian_window(3, 3))
        xcoords, ycoords = self.bins.centres()
        self.axes.contour(xcoords, ycoords, numpy.transpose(numpy.log(zvals)))
        for offsets in self.tisi_rows:
            if any(offsets):
                # time slide vector is non-zero-lag
                self.axes.plot((offsets[self.x_instrument], ),
                               (offsets[self.y_instrument], ), "k+")
            else:
                # time slide vector is zero-lag
                self.axes.plot((offsets[self.x_instrument], ),
                               (offsets[self.y_instrument], ), "r+")

        self.axes.set_xlim([self.bins.bins().min[0], self.bins.bins().max[0]])
        self.axes.set_ylim([self.bins.bins().min[1], self.bins.bins().max[1]])
        self.axes.set_title(
            r"Coincident Event Rate vs. Instrument Time Offset (Logarithmic Rate Contours)"
        )
 def finish(self):
     self.axes.set_title("Trigger Rate vs. Peak Frequency\n(%d Triggers)" %
                         self.nevents)
     # 21 bins per filter width
     rate.filter_array(self.rate.array, rate.gaussian_window(21))
     xvals = self.rate.centres()[0]
     self.axes.plot(xvals, self.rate.at_centres(), "k")
     self.axes.semilogy()
     self.axes.set_xlim((min(xvals), max(xvals)))
示例#4
0
	def finish(self):
		self.axes.set_title("Trigger Peak Time - Injection Peak Time\n(%d Found Injections)" % self.found)
		# 21 bins per filter width
		filter = rate.gaussian_window(21)
		rate.to_moving_mean_density(self.offsets, filter)
		rate.to_moving_mean_density(self.coinc_offsets, filter)
		self.axes.plot(self.offsets.centres()[0], self.offsets.array, "k")
		self.axes.plot(self.coinc_offsets.centres()[0], self.coinc_offsets.array, "r")
		self.axes.legend(["%s residuals" % self.instrument, "SNR-weighted mean of residuals in all instruments"], loc = "lower right")
    def finish(self):
        self.axes.set_title("Time Between Triggers\n(%d Triggers)" %
                            self.nevents)

        rate.filter_array(self.bins.array, rate.gaussian_window(21))
        xvals = self.bins.centres()[0]
        yvals = self.bins.at_centres()
        self.axes.plot(xvals, yvals, "k")

        self.axes.set_xlim((0, xvals[-1]))
        self.axes.set_ylim((1, 10.0**(int(math.log10(yvals.max())) + 1)))
    def finish(self):
        self.axes.set_title("Time Between Triggers\n(%d Triggers)" %
                            self.nevents)

        xvals = self.bins.centres()[0]
        rate.to_moving_mean_density(self.bins, rate.gaussian_window(21))
        self.axes.plot(xvals, self.bins.array, "k")

        self.axes.set_xlim((0, xvals[-1]))
        self.axes.set_ylim(
            (1, 10.0**(int(math.log10(max(self.bins.array))) + 1)))
示例#7
0
	def finish(self):
		self.axes.set_title("Trigger Peak Frequency / Injection Centre Frequency\n(%d Found Injections)" % self.found)
		# 21 bins per filter width
		filter = rate.gaussian_window(21)
		rate.to_moving_mean_density(self.offsets, filter)
		rate.to_moving_mean_density(self.coinc_offsets, filter)
		self.axes.plot(10**self.offsets.centres()[0], self.offsets.array, "k")
		self.axes.plot(10**self.coinc_offsets.centres()[0], self.coinc_offsets.array, "r")
		self.axes.legend(["%s triggers" % self.instrument, "SNR-weighted mean of all matching triggers"], loc = "lower right")
		ymin, ymax = self.axes.get_ylim()
		if ymax / ymin > 1e6:
			ymin = ymax / 1e6
			self.axes.set_ylim((ymin, ymax))
示例#8
0
 def finish(self):
     for instrument, data in sorted(self.data.items()):
         fig, axes = SnglBurstUtils.make_burst_plot(
             r"$t_{\mathrm{recovered}} - t_{\mathrm{injected}}$ (s)",
             "Triggers per Unit Offset")
         axes.semilogy()
         axes.set_title(
             "Trigger Peak Time - Injection Peak Time in %s\n(%d Found Injections)"
             % (instrument, data.found))
         # 21 bins per filter width
         rate.filter_array(data.offsets.array, rate.gaussian_window(21))
         axes.plot(data.offsets.centres()[0], data.offsets.at_centres(),
                   "k")
         #axes.legend(["%s residuals" % instrument, "SNR-weighted mean of residuals in all instruments"], loc = "lower right")
         yield fig
示例#9
0
	def finish(self, binning = None):
		# compute the binning if needed, and set the injections
		# into the numerator and denominator bins.  also compute
		# the smoothing window's parameters.

		self._bin_events(binning)

		# smooth the efficiency data.
		print >>sys.stderr, "Sum of numerator bins before smoothing = %g" % self.efficiency.numerator.array.sum()
		print >>sys.stderr, "Sum of denominator bins before smoothing = %g" % self.efficiency.denominator.array.sum()
		rate.filter_binned_ratios(self.efficiency, rate.gaussian_window(self.window_size_x, self.window_size_y))
		print >>sys.stderr, "Sum of numerator bins after smoothing = %g" % self.efficiency.numerator.array.sum()
		print >>sys.stderr, "Sum of denominator bins after smoothing = %g" % self.efficiency.denominator.array.sum()

		# regularize to prevent divide-by-zero errors
		self.efficiency.regularize()
示例#10
0
	def finish(self, binning = None):
		# compute the binning if needed, and set the injections
		# into the numerator and denominator bins.  also compute
		# the smoothing window's parameters.

		self._bin_events(binning)

		# smooth the efficiency data.
		print >>sys.stderr, "Sum of numerator bins before smoothing = %g" % self.efficiency.numerator.array.sum()
		print >>sys.stderr, "Sum of denominator bins before smoothing = %g" % self.efficiency.denominator.array.sum()
		rate.filter_binned_ratios(self.efficiency, rate.gaussian_window(self.window_size_x, self.window_size_y))
		print >>sys.stderr, "Sum of numerator bins after smoothing = %g" % self.efficiency.numerator.array.sum()
		print >>sys.stderr, "Sum of denominator bins after smoothing = %g" % self.efficiency.denominator.array.sum()

		# regularize to prevent divide-by-zero errors
		self.efficiency.regularize()
示例#11
0
class CoincParamsDistributions(ligolw_burca_tailor.BurcaCoincParamsDistributions):
	binnings = {
        	"H1_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"H2_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"L1_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"H1H2_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"H1L1_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),)),
        	"H2L1_eff_snr": rate.NDBins((rate.LinearBins(0.0, 50.0, 1000),))
	}
	filters = {
        	"H1_eff_snr": rate.gaussian_window(21),
        	"H2_eff_snr": rate.gaussian_window(21),
        	"L1_eff_snr": rate.gaussian_window(21),
        	"H1H2_eff_snr": rate.gaussian_window(21),
        	"H1L1_eff_snr": rate.gaussian_window(21),
        	"H2L1_eff_snr": rate.gaussian_window(21)
	}
class BurcaCoincParamsDistributions(snglcoinc.CoincParamsDistributions):
    binnings = {
        "H1_H2_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_L1_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H2_L1_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_V1_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "L1_V1_dband":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_H2_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_L1_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H2_L1_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_V1_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "L1_V1_ddur":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_H2_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_L1_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H2_L1_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_V1_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "L1_V1_df":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_H2_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_L1_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H2_L1_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_V1_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "L1_V1_dh":
        rate.NDBins(
            (rate.LinearBins(-2.0, +2.0,
                             12001), rate.LinearBins(0.0, 2 * math.pi, 61))),
        "H1_H2_dt":
        dt_binning("H1", "H2"),
        "H1_L1_dt":
        dt_binning("H1", "L1"),
        "H2_L1_dt":
        dt_binning("H2", "L1"),
        "H1_V1_dt":
        dt_binning("H1", "V1"),
        "L1_V1_dt":
        dt_binning("L1", "V1")
    }

    filters = {
        "H1_H2_dband": rate.gaussian_window(11, 5),
        "H1_L1_dband": rate.gaussian_window(11, 5),
        "H2_L1_dband": rate.gaussian_window(11, 5),
        "H1_V1_dband": rate.gaussian_window(11, 5),
        "L1_V1_dband": rate.gaussian_window(11, 5),
        "H1_H2_ddur": rate.gaussian_window(11, 5),
        "H1_L1_ddur": rate.gaussian_window(11, 5),
        "H2_L1_ddur": rate.gaussian_window(11, 5),
        "H1_V1_ddur": rate.gaussian_window(11, 5),
        "L1_V1_ddur": rate.gaussian_window(11, 5),
        "H1_H2_df": rate.gaussian_window(11, 5),
        "H1_L1_df": rate.gaussian_window(11, 5),
        "H2_L1_df": rate.gaussian_window(11, 5),
        "H1_V1_df": rate.gaussian_window(11, 5),
        "L1_V1_df": rate.gaussian_window(11, 5),
        "H1_H2_dh": rate.gaussian_window(11, 5),
        "H1_V1_df": rate.gaussian_window(11, 5),
        "L1_V1_df": rate.gaussian_window(11, 5),
        "H1_L1_dh": rate.gaussian_window(11, 5),
        "H2_L1_dh": rate.gaussian_window(11, 5),
        "H1_H2_dt": rate.gaussian_window(11, 5),
        "H1_L1_dt": rate.gaussian_window(11, 5),
        "H2_L1_dt": rate.gaussian_window(11, 5),
        "H1_V1_dh": rate.gaussian_window(11, 5),
        "L2_V1_dh": rate.gaussian_window(11, 5)
    }

    @classmethod
    def from_filenames(cls, filenames, name, verbose=False):
        """
		Convenience function to deserialize
		CoincParamsDistributions objects from a collection of XML
		files and return their sum.  The return value is a
		two-element tuple.  The first element is the deserialized
		and summed CoincParamsDistributions object, the second is a
		segmentlistdict indicating the interval of time spanned by
		the out segments in the search_summary rows matching the
		process IDs that were attached to the
		CoincParamsDistributions objects in the XML.
		"""
        self = None
        for n, filename in enumerate(filenames, 1):
            if verbose:
                print >> sys.stderr, "%d/%d:" % (n, len(filenames)),
            xmldoc = ligolw_utils.load_filename(
                filename, verbose=verbose, contenthandler=cls.contenthandler)
            if self is None:
                self = cls.from_xml(xmldoc, name)
                seglists = lsctables.SearchSummaryTable.get_table(
                    xmldoc).get_out_segmentlistdict(set([self.process_id
                                                         ])).coalesce()
            else:
                other = cls.from_xml(xmldoc, name)
                self += other
                seglists |= lsctables.SearchSummaryTable.get_table(
                    xmldoc).get_out_segmentlistdict(set([other.process_id
                                                         ])).coalesce()
                del other
            xmldoc.unlink()
        return self, seglists
	weeks = float(abs(segment)) / 86400.0 / 7.0	# FIXME: leep seconds?
	border = (0.5, 0.75, 0.125, 0.625)	# inches
	width = max(10.0, weeks * 3.0 + border[0] + border[2])	# inches
	height = 8.0	# inches
	fig = figure.Figure()
	canvas = FigureCanvas(fig)
	fig.set_size_inches(width, height)
	fig.gca().set_position([border[0] / width, border[1] / height, (width - border[0] - border[2]) / width, (height - border[1] - border[3]) / height])
	return fig


fig = newfig(options.segment)
axes = fig.gca()


rate.to_moving_mean_density(trigger_rate, rate.gaussian_window(bins_per_filterwidth))


axes.plot(trigger_rate.centres()[0], trigger_rate.array)


axes.set_xlim(list(options.segment))
axes.grid(True)


for seg in ~seglist & segments.segmentlist([options.segment]):
	axes.axvspan(seg[0], seg[1], facecolor = "k", alpha = 0.2)


axes.set_title("%s Excess Power Trigger Rate vs. Time\n(%d Triggers, %g s Moving Average)" % (options.instrument, num_triggers, options.window))
示例#14
0
	def finish(self):
		fig, axes = SnglBurstUtils.make_burst_plot(r"Injection Amplitude (\(\mathrm{s}^{-\frac{1}{3}}\))", "Detection Efficiency", width = 108.0)
		axes.set_title(r"Detection Efficiency vs.\ Amplitude")
		axes.semilogx()
		axes.set_position([0.10, 0.150, 0.86, 0.77])

		# set desired yticks
		axes.set_yticks((0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0))
		axes.set_yticklabels((r"\(0\)", r"\(0.1\)", r"\(0.2\)", r"\(0.3\)", r"\(0.4\)", r"\(0.5\)", r"\(0.6\)", r"\(0.7\)", r"\(0.8\)", r"\(0.9\)", r"\(1.0\)"))
		axes.xaxis.grid(True, which = "major,minor")
		axes.yaxis.grid(True, which = "major,minor")

		# put made and found injections in the denominators and
		# numerators of the efficiency bins
		bins = rate.NDBins((rate.LogarithmicBins(min(sim.amplitude for sim in self.all), max(sim.amplitude for sim in self.all), 400),))
		efficiency_num = rate.BinnedArray(bins)
		efficiency_den = rate.BinnedArray(bins)
		for sim in self.found:
			efficiency_num[sim.amplitude,] += 1
		for sim in self.all:
			efficiency_den[sim.amplitude,] += 1

		# generate and plot trend curves.  adjust window function
		# normalization so that denominator array correctly
		# represents the number of injections contributing to each
		# bin:  make w(0) = 1.0.  note that this factor has no
		# effect on the efficiency because it is common to the
		# numerator and denominator arrays.  we do this for the
		# purpose of computing the Poisson error bars, which
		# requires us to know the counts for the bins
		windowfunc = rate.gaussian_window(self.filter_width)
		windowfunc /= windowfunc[len(windowfunc) / 2 + 1]
		rate.filter_binned_array(efficiency_num, windowfunc)
		rate.filter_binned_array(efficiency_den, windowfunc)

		# regularize:  adjust unused bins so that the efficiency is
		# 0, not NaN
		assert (efficiency_num <= efficiency_den).all()
		efficiency_den[efficiency_num == 0 & efficiency_den == 0] = 1

		line1, A50, A50_err = render_data_from_bins(file("string_efficiency.dat", "w"), axes, efficiency_num, efficiency_den, self.cal_uncertainty, self.filter_width, colour = "k", linestyle = "-", erroralpha = 0.2)
		print >>sys.stderr, "Pipeline's 50%% efficiency point for all detections = %g +/- %g%%\n" % (A50, A50_err * 100)

		# add a legend to the axes
		axes.legend((line1,), (r"\noindent Injections recovered with $\Lambda > %s$" % SnglBurstUtils.latexnumber("%.2e" % self.detection_threshold),), loc = "lower right")

		# adjust limits
		axes.set_xlim([1e-21, 2e-18])
		axes.set_ylim([0.0, 1.0])

		#
		# dump some information about the highest-amplitude missed
		# and quietest-amplitude found injections
		#

		self.loudest_missed.sort(reverse = True)
		self.quietest_found.sort(reverse = True)

		f = file("string_loud_missed_injections.txt", "w")
		print >>f, "Highest Amplitude Missed Injections"
		print >>f, "==================================="
		for amplitude, sim, offsetvector, filename, likelihood_ratio in self.loudest_missed:
			print >>f
			print >>f, "%s in %s:" % (str(sim.simulation_id), filename)
			if likelihood_ratio is None:
				print >>f, "Not recovered"
			else:
				print >>f, "Recovered with \\Lambda = %.16g, detection threshold was %.16g" % (likelihood_ratio, self.detection_threshold)
			for instrument in self.seglists:
				print >>f, "In %s:" % instrument
				print >>f, "\tInjected amplitude:\t%.16g" % SimBurstUtils.string_amplitude_in_instrument(sim, instrument, offsetvector)
				print >>f, "\tTime of injection:\t%s s" % sim.time_at_instrument(instrument, offsetvector)
			print >>f, "Amplitude in waveframe:\t%.16g" % sim.amplitude
			t = sim.get_time_geocent()
			print >>f, "Time at geocentre:\t%s s" % t
			print >>f, "Segments within 60 seconds:\t%s" % segmentsUtils.segmentlistdict_to_short_string(self.seglists & segments.segmentlistdict((instrument, segments.segmentlist([segments.segment(t-offsetvector[instrument]-60, t-offsetvector[instrument]+60)])) for instrument in self.seglists))
			print >>f, "Vetoes within 60 seconds:\t%s" % segmentsUtils.segmentlistdict_to_short_string(self.vetoseglists & segments.segmentlistdict((instrument, segments.segmentlist([segments.segment(t-offsetvector[instrument]-60, t-offsetvector[instrument]+60)])) for instrument in self.vetoseglists))

		f = file("string_quiet_found_injections.txt", "w")
		print >>f, "Lowest Amplitude Found Injections"
		print >>f, "================================="
		for inv_amplitude, sim, offsetvector, filename, likelihood_ratio in self.quietest_found:
			print >>f
			print >>f, "%s in %s:" % (str(sim.simulation_id), filename)
			if likelihood_ratio is None:
				print >>f, "Not recovered"
			else:
				print >>f, "Recovered with \\Lambda = %.16g, detection threshold was %.16g" % (likelihood_ratio, self.detection_threshold)
			for instrument in self.seglists:
				print >>f, "In %s:" % instrument
				print >>f, "\tInjected amplitude:\t%.16g" % SimBurstUtils.string_amplitude_in_instrument(sim, instrument, offsetvector)
				print >>f, "\tTime of injection:\t%s s" % sim.time_at_instrument(instrument, offsetvector)
			print >>f, "Amplitude in waveframe:\t%.16g" % sim.amplitude
			t = sim.get_time_geocent()
			print >>f, "Time at geocentre:\t%s s" % t
			print >>f, "Segments within 60 seconds:\t%s" % segmentsUtils.segmentlistdict_to_short_string(self.seglists & segments.segmentlistdict((instrument, segments.segmentlist([segments.segment(t-offsetvector[instrument]-60, t-offsetvector[instrument]+60)])) for instrument in self.seglists))
			print >>f, "Vetoes within 60 seconds:\t%s" % segmentsUtils.segmentlistdict_to_short_string(self.vetoseglists & segments.segmentlistdict((instrument, segments.segmentlist([segments.segment(t-offsetvector[instrument]-60, t-offsetvector[instrument]+60)])) for instrument in self.vetoseglists))

		#
		# done
		#

		return fig,
    def finish(self):
        self.axes.set_title(
            r"\begin{center}Distribution of Coincident Events (%d Foreground, %d Background Events, %d Injections Found in Coincidence, Logarithmic Density Contours)\end{center}"
            % (self.n_foreground, self.n_background, self.n_injections))
        xcoords, ycoords = self.background_bins.centres()

        # prepare the data
        rate.filter_array(self.foreground_bins.array,
                          rate.gaussian_window(8, 8))
        rate.filter_array(self.background_bins.array,
                          rate.gaussian_window(8, 8))
        rate.filter_array(self.coinc_injection_bins.array,
                          rate.gaussian_window(8, 8))
        rate.filter_array(self.incomplete_coinc_injection_bins.array,
                          rate.gaussian_window(8, 8))
        self.foreground_bins.logregularize()
        self.background_bins.logregularize()
        self.coinc_injection_bins.logregularize()
        self.incomplete_coinc_injection_bins.logregularize()

        # plot background contours
        max_density = math.log(self.background_bins.array.max())
        self.axes.contour(xcoords,
                          ycoords,
                          numpy.transpose(numpy.log(
                              self.background_bins.array)),
                          [max_density - n for n in xrange(0, 10, 1)],
                          cmap=matplotlib.cm.Greys)

        # plot foreground (zero-lag) contours
        max_density = math.log(self.foreground_bins.array.max())
        self.axes.contour(xcoords,
                          ycoords,
                          numpy.transpose(numpy.log(
                              self.foreground_bins.array)),
                          [max_density - n for n in xrange(0, 10, 1)],
                          cmap=matplotlib.cm.Reds)
        #self.axes.plot(self.foreground_x, self.foreground_y, "r+")

        # plot coincident injection contours
        max_density = math.log(self.coinc_injection_bins.array.max())
        self.axes.contour(xcoords,
                          ycoords,
                          numpy.transpose(
                              numpy.log(self.coinc_injection_bins.array)),
                          [max_density - n for n in xrange(0, 10, 1)],
                          cmap=matplotlib.cm.Blues)

        # plot incomplete coincident injection contours
        max_density = math.log(
            self.incomplete_coinc_injection_bins.array.max())
        self.axes.contour(xcoords,
                          ycoords,
                          numpy.transpose(
                              numpy.log(
                                  self.incomplete_coinc_injection_bins.array)),
                          [max_density - n for n in xrange(0, 10, 1)],
                          cmap=matplotlib.cm.Greens)

        # fix axes limits
        self.axes.set_xlim([
            self.background_bins.bins.min[0], self.background_bins.bins.max[0]
        ])
        self.axes.set_ylim([
            self.background_bins.bins.min[1], self.background_bins.bins.max[1]
        ])
示例#16
0
	def finish(self, threshold):
		# bin the injections

		self._bin_events()

		# use the same binning for the found injection density as
		# was constructed for the efficiency

		self.found_density = rate.BinnedArray(self.efficiency.denominator.bins)

		# construct the amplitude weighting function

		amplitude_weight = rate.BinnedArray(rate.NDBins((rate.LinearBins(threshold - 100, threshold + 100, 10001),)))

		# gaussian window's width is the number of bins
		# corresponding to 10 units of amplitude, which is computed
		# by dividing 10 by the "volume" of the bin corresponding
		# to threshold.  index is the index of the element in
		# amplitude_weight corresponding to the threshold.

		index, = amplitude_weight.bins[threshold,]
		window = rate.gaussian_window(10.0 / amplitude_weight.bins.volumes()[index])
		window /= 10 * window[(len(window) - 1) / 2]

		# set the window data into the BinnedArray object.  the
		# Gaussian peaks on the middle element of the window, which
		# we want to place at index in the amplitude_weight array.

		lo = index - (len(window) - 1) / 2
		hi = lo + len(window)
		if lo < 0 or hi > len(amplitude_weight.array):
			raise ValueError("amplitude weighting window too large")
		amplitude_weight.array[lo:hi] = window

		# store the recovered injections in the found density bins
		# weighted by amplitude

		for x, y, z in self.recovered_xyz:
			try:
				weight = amplitude_weight[z,]
			except IndexError:
				# beyond the edge of the window
				weight = 0.0
			self.found_density[x, y] += weight

		# the efficiency is only valid up to the highest energy
		# that has been injected.  this creates problems later on
		# so, instead, for each frequency, identify the highest
		# energy that has been measured and copy the values for
		# that bin's numerator and denominator into the bins for
		# all higher energies.  do the same for the counts in the
		# found injection density bins.
		#
		# numpy.indices() returns two arrays array, the first of
		# which has each element set equal to its x index, the
		# second has each element set equal to its y index, we keep
		# the latter.  meanwhile numpy.roll() cyclically permutes
		# the efficiency bins down one along the y (energy) axis.
		# from this, the conditional finds bins where the
		# efficiency is greater than 0.9 but <= 0.9 in the bin
		# immediately above it in energy.  we select the elements
		# from the y index array where the conditional is true, and
		# then use numpy.max() along the y direction to return the
		# highest such y index for each x index, which is a 1-D
		# array.  finally, enumerate() is used to iterate over x
		# index and corresponding y index, and if the y index is
		# not negative (was found) the value from that x-y bin is
		# copied to all higher bins.

		n = self.efficiency.numerator.array
		d = self.efficiency.denominator.array
		f = self.found_density.array
		bady = -1
		for x, y in enumerate(numpy.max(numpy.where((d > 0) & (numpy.roll(d, -1, axis = 1) <= 0), numpy.indices(d.shape)[1], bady), axis = 1)):
			if y != bady:
				n[x, y + 1:] = n[x, y]
				d[x, y + 1:] = d[x, y]
				f[x, y + 1:] = f[x, y]

		# now do the same for the bins at energies below those that
		# have been measured.

		bady = d.shape[1]
		for x, y in enumerate(numpy.min(numpy.where((d > 0) & (numpy.roll(d, 1, axis = 1) <= 0), numpy.indices(d.shape)[1], bady), axis = 1)):
			if y != bady:
				n[x, 0:y] = n[x, y]
				d[x, 0:y] = d[x, y]
				f[x, 0:y] = f[x, y]

		diagnostic_plot(self.efficiency.numerator.array, self.efficiency.denominator.bins, r"Efficiency Numerator (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_numerator_before.png")
		diagnostic_plot(self.efficiency.denominator.array, self.efficiency.denominator.bins, r"Efficiency Denominator (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_denominator_before.png")
		diagnostic_plot(self.found_density.array, self.efficiency.denominator.bins, r"Injections Lost / Unit of Threshold (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_found_density_before.png")

		# smooth the efficiency bins and the found injection
		# density bins using the same 2D window.

		window = rate.gaussian_window(self.window_size_x, self.window_size_y)
		window /= window[tuple((numpy.array(window.shape, dtype = "double") - 1) / 2)]
		rate.filter_binned_ratios(self.efficiency, window)
		rate.filter_array(self.found_density.array, window)

		diagnostic_plot(self.efficiency.numerator.array, self.efficiency.denominator.bins, r"Efficiency Numerator (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_numerator_after.png")
		diagnostic_plot(self.efficiency.denominator.array, self.efficiency.denominator.bins, r"Efficiency Denominator (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_denominator_after.png")
		diagnostic_plot(self.found_density.array, self.efficiency.denominator.bins, r"Injections Lost / Unit of Threshold (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_found_density_after.png")

		# compute the uncertainties in the efficiency and its
		# derivative by assuming these to be the binomial counting
		# fluctuations in the numerators.

		p = self.efficiency.ratio()
		self.defficiency = numpy.sqrt(p * (1 - p) / self.efficiency.denominator.array)
		p = self.found_density.array / self.efficiency.denominator.array
		self.dfound_density = numpy.sqrt(p * (1 - p) / self.efficiency.denominator.array)
示例#17
0
class StringCoincParamsDistributions(snglcoinc.CoincParamsDistributions):
    ligo_lw_name_suffix = u"stringcusp_coincparamsdistributions"

    binnings = {
        "H1_snr2_chi2":
        rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801),
                     rate.ATanLogarithmicBins(.1, 1e4, 801))),
        "H2_snr2_chi2":
        rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801),
                     rate.ATanLogarithmicBins(.1, 1e4, 801))),
        "L1_snr2_chi2":
        rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801),
                     rate.ATanLogarithmicBins(.1, 1e4, 801))),
        "V1_snr2_chi2":
        rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801),
                     rate.ATanLogarithmicBins(.1, 1e4, 801))),
        "H1_H2_dt":
        dt_binning("H1", "H2"),
        "H1_L1_dt":
        dt_binning("H1", "L1"),
        "H1_V1_dt":
        dt_binning("H1", "V1"),
        "H2_L1_dt":
        dt_binning("H2", "L1"),
        "H2_V1_dt":
        dt_binning("H2", "V1"),
        "L1_V1_dt":
        dt_binning("L1", "V1"),
        "H1_H2_dA":
        rate.NDBins((rate.ATanBins(-0.5, +0.5, 801), )),
        "H1_L1_dA":
        rate.NDBins((rate.ATanBins(-0.5, +0.5, 801), )),
        "H1_V1_dA":
        rate.NDBins((rate.ATanBins(-0.5, +0.5, 801), )),
        "H2_L1_dA":
        rate.NDBins((rate.ATanBins(-0.5, +0.5, 801), )),
        "H2_V1_dA":
        rate.NDBins((rate.ATanBins(-0.5, +0.5, 801), )),
        "L1_V1_dA":
        rate.NDBins((rate.ATanBins(-0.5, +0.5, 801), )),
        "H1_H2_df":
        rate.NDBins((rate.ATanBins(-0.2, +0.2, 501), )),
        "H1_L1_df":
        rate.NDBins((rate.ATanBins(-0.2, +0.2, 501), )),
        "H1_V1_df":
        rate.NDBins((rate.ATanBins(-0.2, +0.2, 501), )),
        "H2_L1_df":
        rate.NDBins((rate.ATanBins(-0.2, +0.2, 501), )),
        "H2_V1_df":
        rate.NDBins((rate.ATanBins(-0.2, +0.2, 501), )),
        "L1_V1_df":
        rate.NDBins((rate.ATanBins(-0.2, +0.2, 501), )),
        # only non-negative rss timing residual bins will be used
        # but we want a binning that's linear at the origin so
        # instead of inventing a new one we just use atan bins that
        # are symmetric about 0
        "instrumentgroup,rss_timing_residual":
        rate.NDBins((snglcoinc.InstrumentBins(names=("H1", "H2", "L1", "V1")),
                     rate.ATanBins(-0.02, +0.02, 1001)))
    }

    filters = {
        "H1_snr2_chi2":
        rate.gaussian_window(11, 11, sigma=20),
        "H2_snr2_chi2":
        rate.gaussian_window(11, 11, sigma=20),
        "L1_snr2_chi2":
        rate.gaussian_window(11, 11, sigma=20),
        "V1_snr2_chi2":
        rate.gaussian_window(11, 11, sigma=20),
        "H1_H2_dt":
        rate.gaussian_window(11, sigma=20),
        "H1_L1_dt":
        rate.gaussian_window(11, sigma=20),
        "H1_V1_dt":
        rate.gaussian_window(11, sigma=20),
        "H2_L1_dt":
        rate.gaussian_window(11, sigma=20),
        "H2_V1_dt":
        rate.gaussian_window(11, sigma=20),
        "L1_V1_dt":
        rate.gaussian_window(11, sigma=20),
        "H1_H2_dA":
        rate.gaussian_window(11, sigma=20),
        "H1_L1_dA":
        rate.gaussian_window(11, sigma=20),
        "H1_V1_dA":
        rate.gaussian_window(11, sigma=20),
        "H2_L1_dA":
        rate.gaussian_window(11, sigma=20),
        "H2_V1_dA":
        rate.gaussian_window(11, sigma=20),
        "L1_V1_dA":
        rate.gaussian_window(11, sigma=20),
        "H1_H2_df":
        rate.gaussian_window(11, sigma=20),
        "H1_L1_df":
        rate.gaussian_window(11, sigma=20),
        "H1_V1_df":
        rate.gaussian_window(11, sigma=20),
        "H2_L1_df":
        rate.gaussian_window(11, sigma=20),
        "H2_V1_df":
        rate.gaussian_window(11, sigma=20),
        "L1_V1_df":
        rate.gaussian_window(11, sigma=20),
        # instrument group filter is a no-op, should produce a
        # 1-bin top-hat window.
        "instrumentgroup,rss_timing_residual":
        rate.gaussian_window(1e-100, 11, sigma=20)
    }

    @staticmethod
    def coinc_params(events, offsetvector, triangulators):
        #
        # check for coincs that have been vetoed entirely
        #

        if len(events) < 2:
            return None

        #
        # Initialize the parameter dictionary, sort the events by
        # instrument name (the multi-instrument parameters are defined for
        # the instruments in this order and the triangulators are
        # constructed this way too), and retrieve the sorted instrument
        # names
        #

        params = {}
        events = tuple(sorted(events, key=lambda event: event.ifo))
        instruments = tuple(event.ifo for event in events)

        #
        # zero-instrument parameters
        #

        ignored, ignored, ignored, rss_timing_residual = triangulators[
            instruments](tuple(event.peak + offsetvector[event.ifo]
                               for event in events))
        # FIXME:  rss_timing_residual is forced to 0 to disable this
        # feature.  all the code to compute it properly is still here and
        # given suitable initializations, the distribution data is still
        # two-dimensional and has a suitable filter applied to it, but all
        # events are forced into the RSS_{\Delta t} = 0 bin, in effect
        # removing that dimension from the data.  We can look at this again
        # sometime in the future if we're curious why it didn't help.  Just
        # delete the next line and you're back in business.
        rss_timing_residual = 0.0
        params["instrumentgroup,rss_timing_residual"] = (
            frozenset(instruments), rss_timing_residual)

        #
        # one-instrument parameters
        #

        for event in events:
            prefix = "%s_" % event.ifo

            params["%ssnr2_chi2" % prefix] = (event.snr**2.0,
                                              event.chisq / event.chisq_dof)

        #
        # two-instrument parameters.  note that events are sorted by
        # instrument
        #

        for event1, event2 in iterutils.choices(events, 2):
            assert event1.ifo != event2.ifo

            prefix = "%s_%s_" % (event1.ifo, event2.ifo)

            dt = float((event1.peak + offsetvector[event1.ifo]) -
                       (event2.peak + offsetvector[event2.ifo]))
            params["%sdt" % prefix] = (dt, )

            dA = math.log10(abs(event1.amplitude / event2.amplitude))
            params["%sdA" % prefix] = (dA, )

            # f_cut = central_freq + bandwidth/2
            f_cut1 = event1.central_freq + event1.bandwidth / 2
            f_cut2 = event2.central_freq + event2.bandwidth / 2
            df = float((math.log10(f_cut1) - math.log10(f_cut2)) /
                       (math.log10(f_cut1) + math.log10(f_cut2)))
            params["%sdf" % prefix] = (df, )

        #
        # done
        #

        return params

    def add_slidelessbackground(self,
                                database,
                                experiments,
                                param_func_args=()):
        # FIXME:  this needs to be taught how to not slide H1 and
        # H2 with respect to each other

        # segment lists
        seglists = database.seglists - database.vetoseglists

        # construct the event list dictionary.  remove vetoed
        # events from the lists and save event peak times so they
        # can be restored later
        eventlists = {}
        orig_peak_times = {}
        for event in database.sngl_burst_table:
            if event.peak in seglists[event.ifo]:
                try:
                    eventlists[event.ifo].append(event)
                except KeyError:
                    eventlists[event.ifo] = [event]
                orig_peak_times[event] = event.peak

        # parse the --thresholds H1,L1=... command-line options from burca
        delta_t = [
            float(threshold.split("=")[-1])
            for threshold in ligolw_process.get_process_params(
                database.xmldoc, "ligolw_burca", "--thresholds")
        ]
        if not all(delta_t[0] == threshold for threshold in delta_t[1:]):
            raise ValueError(
                "\Delta t is not unique in ligolw_burca arguments")
        delta_t = delta_t.pop()

        # construct the coinc generator.  note that H1+H2-only
        # coincs are forbidden, which is affected here by removing
        # that instrument combination from the object's internal
        # .rates dictionary
        coinc_generator = snglcoinc.CoincSynthesizer(eventlists, seglists,
                                                     delta_t)
        if frozenset(("H1", "H2")) in coinc_generator.rates:
            del coinc_generator.rates[frozenset(("H1", "H2"))]

        # build a dictionary of time-of-arrival generators
        toa_generator = dict(
            (instruments, coinc_generator.plausible_toas(instruments))
            for instruments in coinc_generator.rates.keys())

        # how many coincs?  the expected number is obtained by
        # multiplying the total zero-lag time for which at least
        # two instruments were on by the sum of the rates for all
        # coincs to get the mean number of coincs per zero-lag
        # observation time, and multiplying that by the number of
        # experiments the background should simulate to get the
        # mean number of background events to simulate.  the actual
        # number simulated is a Poisson-distributed RV with that
        # mean.
        n_coincs, = scipy.stats.poisson.rvs(
            float(abs(segmentsUtils.vote(seglists.values(), 2))) *
            sum(coinc_generator.rates.values()) * experiments)

        # generate synthetic background coincs
        zero_lag_offset_vector = offsetvector(
            (instrument, 0.0) for instrument in seglists)
        for n, events in enumerate(
                coinc_generator.coincs(lsctables.SnglBurst.get_peak)):
            # n = 1 on 2nd iteration, so placing this condition
            # where it is in the loop causes the correct number
            # of events to be added to the background
            if n >= n_coincs:
                break
            # assign fake peak times
            toas = toa_generator[frozenset(event.ifo
                                           for event in events)].next()
            for event in events:
                event.peak = toas[event.ifo]
            # compute coincidence parameters
            self.add_background(
                self.coinc_params(events, zero_lag_offset_vector,
                                  *param_func_args))

        # restore original peak times
        for event, peak_time in orig_peak_times.iteritems():
            event.peak = peak_time
示例#18
0
 def finish(self):
     for key, pdf in self.densities.items():
         rate.filter_array(pdf.array, rate.gaussian_window(11, 5))
         pdf.normalize()
     self.mkinterps()