Ejemplo n.º 1
0
 def __init__(self, x_instrument, y_instrument, magnitude, desc,
              min_magnitude, max_magnitude):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot(
         "%s %s" % (x_instrument, desc), "%s %s" % (y_instrument, desc))
     self.fig.set_size_inches(6, 6)
     self.axes.loglog()
     self.x_instrument = x_instrument
     self.y_instrument = y_instrument
     self.magnitude = magnitude
     self.foreground_x = []
     self.foreground_y = []
     self.n_foreground = 0
     self.n_background = 0
     self.n_injections = 0
     self.foreground_bins = rate.BinnedArray(
         rate.NDBins(
             (rate.LogarithmicBins(min_magnitude, max_magnitude, 1024),
              rate.LogarithmicBins(min_magnitude, max_magnitude, 1024))))
     self.background_bins = rate.BinnedArray(
         rate.NDBins(
             (rate.LogarithmicBins(min_magnitude, max_magnitude, 1024),
              rate.LogarithmicBins(min_magnitude, max_magnitude, 1024))))
     self.coinc_injection_bins = rate.BinnedArray(
         rate.NDBins(
             (rate.LogarithmicBins(min_magnitude, max_magnitude, 1024),
              rate.LogarithmicBins(min_magnitude, max_magnitude, 1024))))
     self.incomplete_coinc_injection_bins = rate.BinnedArray(
         rate.NDBins(
             (rate.LogarithmicBins(min_magnitude, max_magnitude, 1024),
              rate.LogarithmicBins(min_magnitude, max_magnitude, 1024))))
Ejemplo n.º 2
0
 def __init__(self, x, y, magnitude, max_magnitude):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot("X", "Y")
     self.fig.set_size_inches(6, 6)
     self.x = x
     self.y = y
     self.magnitude = magnitude
     self.n_foreground = 0
     self.n_background = 0
     self.n_injections = 0
     max_magnitude = math.log10(max_magnitude)
     self.foreground_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
     self.background_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
     self.coinc_injection_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
     self.incomplete_coinc_injection_bins = rate.BinnedArray(
         rate.NDBins((rate.LinearBins(-max_magnitude, max_magnitude, 1024),
                      rate.LinearBins(-max_magnitude, max_magnitude,
                                      1024))))
Ejemplo n.º 3
0
	def __init__(self, rankingstat, nsamples = 2**24, verbose = False):
		#
		# bailout used by .from_xml() class method to get an
		# uninitialized instance
		#

		if rankingstat is None:
			return

		#
		# initialize binnings
		#

		self.noise_lr_lnpdf = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-10., 30., 3000),)))
		self.signal_lr_lnpdf = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-10., 30., 3000),)))
		self.candidates_lr_lnpdf = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-10., 30., 3000),)))

		#
		# obtain analyzed segments that will be used to obtain livetime
		#

		self.segments = segmentsUtils.vote(rankingstat.denominator.triggerrates.segmentlistdict().values(),rankingstat.min_instruments)

		#
		# run importance-weighted random sampling to populate binnings.
		#

		self.signal_lr_lnpdf.array, self.noise_lr_lnpdf.array = binned_log_likelihood_ratio_rates_from_samples(
			self.signal_lr_lnpdf,
			self.noise_lr_lnpdf,
			rankingstat.ln_lr_samples(rankingstat.denominator.random_params(), rankingstat),
			nsamples = nsamples)

		if verbose:
			print("done computing ranking statistic PDFs", file=sys.stderr) 

		#
		# apply density estimation kernels to counts
		#

		self.density_estimate(self.noise_lr_lnpdf, "noise model")
		self.density_estimate(self.signal_lr_lnpdf, "signal model")

		#
		# set the total sample count in the noise and signal
		# ranking statistic histogram equal to the total expected
		# count of the respective events from the experiment. This
		# information is required so that when adding ranking
		# statistic PDFs in our .__iadd__() method they are
		# combined with the correct relative weights, so that
		# .__iadd__() has the effect of marginalizing the
		# distribution over the experients being combined.
		#

		self.noise_lr_lnpdf.array /= self.noise_lr_lnpdf.array.sum()
		self.noise_lr_lnpdf.normalize()
		self.signal_lr_lnpdf.array /= self.signal_lr_lnpdf.array.sum()
		self.signal_lr_lnpdf.normalize()
Ejemplo n.º 4
0
	def __init__(self, instruments):
		self.densities = {}
		for pair in intertools.combinations(sorted(instruments), 2):
			# FIXME:  hard-coded for directional search
			#dt = 0.02 + snglcoinc.light_travel_time(*pair)
			dt = 0.02
			self.densities["%s_%s_dt" % pair] = rate.BinnedLnDPF(rate.NDBins((rate.ATanBins(-dt, +dt, 12001), rate.LinearBins(0.0, 2 * math.pi, 61))))
			self.densities["%s_%s_dband" % pair] = rate.BinnedLnDPF(rate.NDBins((rate.LinearBins(-2.0, +2.0, 12001), rate.LinearBins(0.0, 2 * math.pi, 61))))
			self.densities["%s_%s_ddur" % pair] = rate.BinnedLnDPF(rate.NDBins((rate.LinearBins(-2.0, +2.0, 12001), rate.LinearBins(0.0, 2 * math.pi, 61))))
			self.densities["%s_%s_df" % pair] = rate.BinnedLnDPF(rate.NDBins((rate.LinearBins(-2.0, +2.0, 12001), rate.LinearBins(0.0, 2 * math.pi, 61))))
			self.densities["%s_%s_dh" % pair] = rate.BinnedLnDPF(rate.NDBins((rate.LinearBins(-2.0, +2.0, 12001), rate.LinearBins(0.0, 2 * math.pi, 61))))
Ejemplo n.º 5
0
	def __init__(self, instruments):
		self.densities = {}
		for instrument in instruments:
			self.densities["%s_snr2_chi2" % instrument] = rate.BinnedLnPDF(rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801), rate.ATanLogarithmicBins(.1, 1e4, 801))))
		for pair in itertools.combinations(sorted(instruments), 2):
			dt = 0.005 + snglcoinc.light_travel_time(*pair)	# seconds
			self.densities["%s_%s_dt" % pair] = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-dt, +dt, 801),)))
			self.densities["%s_%s_dA" % pair] = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-0.5, +0.5, 801),)))
			self.densities["%s_%s_df" % pair] = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-0.2, +0.2, 501),)))
		# only non-negative rss timing residual bins will be used
		# but we want a binning that's linear at the origin so
		# instead of inventing a new one we just use atan bins that
		# are symmetric about 0
		self.densities["instrumentgroup,rss_timing_residual"] = rate.BinnedLnPDF(rate.NDBins((InstrumentBins(names = instruments), rate.ATanBins(-0.02, +0.02, 1001))))
Ejemplo n.º 6
0
def plot_fiducial_efficiency(eff_fids, fiducial_far, ifos, bins, bin_type):
	fig = plt.figure()
	fig.set_size_inches((8., 8. / plotutil.golden_ratio))
	ax_eff = fig.gca()

	# plot the volume/range versus far/snr for each bin
	mbins = rate.NDBins(bins[1:])
	labels = []
	for lo, eff, hi, ds, label in fiducial_efficiency_to_range_label(eff_fids, mbins, bins[0], bin_type):
		labels.append(label)

		# NOTE create regular plots, and define log x,y scales below
		#      since otherwise, fill_between allocates too many blocks and crashes
		line, = ax_eff.plot(ds, eff, label=label, linewidth=2)
		ax_eff.fill_between(ds, lo, hi, alpha=0.5, color=line.get_color())

	ax_eff.set_xlabel("Distance (Mpc)")
	ax_eff.set_ylabel("Efficiency")
	ax_eff.set_xscale("log")
	ax_eff.legend(loc="upper right")
	ax_eff.grid()

	ax_eff.set_title("%s Observing ($\mathrm{FAR} < %s\,\mathrm{Hz}$)" % ("".join(sorted(list(ifos))), plotutil.latexnumber("%.2e" % fiducial_far)))
	fig.tight_layout(pad = .8)

	return fig
Ejemplo n.º 7
0
def plot_range_vs_far(volumes, fars, livetime, ifos, bins, bin_type):
	fig = plt.figure()
	fig.set_size_inches((8., 8. / plotutil.golden_ratio))
	ax_far_range = fig.gca()

	# plot the volume/range versus far/snr for each bin
	mbins = rate.NDBins(bins[1:])
	labels = []
	for lo, center, hi, label in volumes_bins_to_range_label(volumes, mbins, bin_type):
		labels.append(label)

		# NOTE create regular plots, and define log x,y scales below
		#      since otherwise, fill_between allocates too many blocks and crashes
		center = vt_to_range(center, livetime[ifos])
		lo = vt_to_range(lo, livetime[ifos])
		hi = vt_to_range(hi, livetime[ifos])
		line, = ax_far_range.plot(fars, center, label=label, linewidth=2)
		ax_far_range.fill_between(fars, lo, hi, alpha=0.5, color=line.get_color())

	ax_far_range.set_xlabel("Combined FAR (Hz)")
	ax_far_range.set_ylabel("Range (Mpc)")
	ax_far_range.set_xscale("log")
	ax_far_range.set_xlim([min(fars), max(fars)])
	ax_far_range.invert_xaxis()
	ax_far_range.legend(loc="lower left")
	ax_far_range.grid()

	ax_far_range.set_title("%s Observing (%.2f days)" % ("".join(sorted(list(ifos))), livetime[ifos]*365.25))
	fig.tight_layout(pad = .8)

	return fig
Ejemplo n.º 8
0
	def __init__(self, ifo, interval, width):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot("Peak Frequency (Hz)", "Trigger Rate Spectral Density (triggers / s / Hz)")
		self.ifo = ifo
		self.nevents = 0
		# 21 bins per filter width
		bins = int(float(abs(interval)) / width) * 21
		binning = rate.NDBins((rate.LinearBins(interval[0], interval[1], bins),))
		self.rate = rate.BinnedDensity(binning)
Ejemplo n.º 9
0
	def __init__(self, ifo, width, max):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot("Delay (s)", "Count / Delay")
		self.ifo = ifo
		self.nevents = 0
		# 21 bins per filter width
		interval = segments.segment(0, max + 2)
		self.bins = rate.BinnedDensity(rate.NDBins((rate.LinearBins(interval[0], interval[1], int(float(abs(interval)) / width) * 21),)))
		self.axes.semilogy()
Ejemplo n.º 10
0
 def __init__(self, *args, **kwargs):
     super(LnSignalDensity, self).__init__(*args, **kwargs)
     # initialize SNRPDF
     self.SNRPDF = {}
     for n in range(self.min_instruments, len(self.instruments) + 1):
         for ifo_combos in itertools.combinations(sorted(self.instruments),
                                                  n):
             self.SNRPDF[ifo_combos] = rate.BinnedArray(
                 rate.NDBins([self.snr_binning] * len(ifo_combos)))
Ejemplo n.º 11
0
 def get_2d_mass_bins(self, low, high, bins):
     """
 Given the component mass range low, high of the search it will
 return 2D bins with size bins in each direction
 """
     mass1Bin = rate.LinearBins(low, high, bins)
     mass2Bin = rate.LinearBins(low, high, bins)
     twoDMB = rate.NDBins((mass1Bin, mass2Bin))
     return twoDMB
Ejemplo n.º 12
0
def guess_distance_effective_spin_parameter_bins_from_sims(sims, chibins = 11, distbins = 200):
	"""
	Given a list of the injections, guess at the chi = (m1*s1z +
	m2*s2z)/(m1+m2) and distance bins.
	"""
	dist_chi_vals = map(sim_to_distance_effective_spin_parameter_bins_function, sims)

	distances = [tup[0] for tup in dist_chi_vals]
	chis = [tup[1] for tup in dist_chi_vals]

	return rate.NDBins([rate.LinearBins(min(distances), max(distances), distbins), rate.LinearBins(min(chis), max(chis), chibins)])
Ejemplo n.º 13
0
def guess_distance_chirp_mass_bins_from_sims(sims, mbins = 11, distbins = 200):
	"""
	Given a list of the injections, guess at the chirp mass and distance
	bins.
	"""
	dist_mchirp_vals = map(sim_to_distance_chirp_mass_bins_function, sims)

	distances = [tup[0] for tup in dist_mchirp_vals]
	mchirps = [tup[1] for tup in dist_mchirp_vals]

	return rate.NDBins([rate.LinearBins(min(distances), max(distances), distbins), rate.LinearBins(min(mchirps), max(mchirps), mbins)])
Ejemplo n.º 14
0
    def add_contents(self, contents):
        if self.tisi_rows is None:
            # get a list of time slide dictionaries
            self.tisi_rows = contents.time_slide_table.as_dict().values()

            # find the largest and smallest offsets
            min_offset = min(offset for vector in self.tisi_rows
                             for offset in vector.values())
            max_offset = max(offset for vector in self.tisi_rows
                             for offset in vector.values())

            # a guess at the time slide spacing:  works if the
            # time slides are distributed as a square grid over
            # the plot area.  (max - min)^2 gives the area of
            # the time slide square in square seconds; dividing
            # by the length of the time slide list gives the
            # average area per time slide;  taking the square
            # root of that gives the average distance between
            # adjacent time slides in seconds
            time_slide_spacing = ((max_offset - min_offset)**2 /
                                  len(self.tisi_rows))**0.5

            # use an average of 3 bins per time slide in each
            # direction, but round to an odd integer
            nbins = int(
                math.ceil((max_offset - min_offset) / time_slide_spacing * 3))

            # construct the binning
            self.counts = rate.BinnedArray(
                rate.NDBins((rate.LinearBins(min_offset, max_offset, nbins),
                             rate.LinearBins(min_offset, max_offset, nbins))))

        self.seglists |= contents.seglists

        for offsets in contents.connection.cursor().execute(
                """
SELECT tx.offset, ty.offset FROM
	coinc_event
	JOIN time_slide AS tx ON (
		tx.time_slide_id == coinc_event.time_slide_id
	)
	JOIN time_slide AS ty ON (
		ty.time_slide_id == coinc_event.time_slide_id
	)
WHERE
	coinc_event.coinc_def_id == ?
	AND tx.instrument == ?
	AND ty.instrument == ?
		""", (contents.bb_definer_id, self.x_instrument, self.y_instrument)):
            try:
                self.counts[offsets] += 1
            except IndexError:
                # beyond plot boundaries
                pass
Ejemplo n.º 15
0
def guess_distance_total_mass_bins_from_sims(sims, nbins = 11, distbins = 200):
       """
       Given a list of the injections, guess at the mass1, mass2 and distance
       bins. Floor and ceil will be used to round down to the nearest integers.
       """

       total_lo = numpy.floor(min([sim.mass1 + sim.mass2 for sim in sims]))
       total_hi = numpy.ceil(max([sim.mass1 + sim.mass2 for sim in sims]))
       mindist = numpy.floor(min([sim.distance for sim in sims]))
       maxdist = numpy.ceil(max([sim.distance for sim in sims]))

       return rate.NDBins((rate.LinearBins(mindist, maxdist, distbins), rate.LinearBins(total_lo, total_hi, nbins)))
 def __init__(self, instrument, interval, width):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot(
         r"$f_{\mathrm{recovered}} / f_{\mathrm{injected}}$",
         "Event Number Density")
     self.axes.loglog()
     self.instrument = instrument
     self.found = 0
     # 21 bins per filter width
     bins = int(float(abs(interval)) / width) * 21
     binning = rate.NDBins((rate.LinearBins(interval[0], interval[1],
                                            bins), ))
     self.offsets = rate.BinnedDensity(binning)
     self.coinc_offsets = rate.BinnedDensity(binning)
 def __init__(self, instrument, interval, width):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot(
         r"$t_{\mathrm{recovered}} - t_{\mathrm{injected}}$ (s)",
         "Triggers per Unit Offset")
     self.axes.semilogy()
     self.instrument = instrument
     self.found = 0
     # 21 bins per filter width
     bins = int(float(abs(interval)) / width) * 21
     binning = rate.NDBins((rate.LinearBins(interval[0], interval[1],
                                            bins), ))
     self.offsets = rate.BinnedDensity(binning)
     self.coinc_offsets = rate.BinnedDensity(binning)
def make_binning(plots):
    plots = [
        plot for instrument in plots.keys() for plot in plots[instrument]
        if isinstance(plot, SimBurstUtils.Efficiency_hrss_vs_freq)
    ]
    if not plots:
        return None
    minx = min([min(plot.injected_x) for plot in plots])
    maxx = max([max(plot.injected_x) for plot in plots])
    miny = min([min(plot.injected_y) for plot in plots])
    maxy = max([max(plot.injected_y) for plot in plots])
    return rate.NDBins(
        (rate.LogarithmicBins(minx, maxx,
                              512), rate.LogarithmicBins(miny, maxy, 512)))
Ejemplo n.º 19
0
def compute_search_efficiency_in_bins(found, total, ndbins, sim_to_bins_function = lambda sim: (sim.distance,)):
	"""
	This program creates the search efficiency in the provided ndbins.  The
	first dimension of ndbins must be the distance.  You also must provide a
	function that maps a sim inspiral row to the correct tuple to index the ndbins.
	"""

	num = rate.BinnedArray(ndbins)
	den = rate.BinnedArray(ndbins)

	# increment the numerator with the found injections
	for sim in found:
		num[sim_to_bins_function(sim)] += 1

	# increment the denominator with the total injections
	for sim in total:
		den[sim_to_bins_function(sim)] += 1

	# sanity check
	assert (num.array <= den.array).all(), "some bins have more found injections than were made"

	# regularize by setting empty bins to zero efficiency
	den.array[numpy.logical_and(num.array == 0, den.array == 0)] = 1

	# pull out the efficiency array, it is the ratio
	eff = rate.BinnedArray(rate.NDBins(ndbins), array = num.array / den.array)

	# compute binomial uncertainties in each bin
	k = num.array
	N = den.array
	eff_lo_arr = ( N*(2*k + 1) - numpy.sqrt(4*N*k*(N - k) + N**2) ) / (2*N*(N + 1))
	eff_hi_arr = ( N*(2*k + 1) + numpy.sqrt(4*N*k*(N - k) + N**2) ) / (2*N*(N + 1))

	eff_lo = rate.BinnedArray(rate.NDBins(ndbins), array = eff_lo_arr)
	eff_hi = rate.BinnedArray(rate.NDBins(ndbins), array = eff_hi_arr)

	return eff_lo, eff, eff_hi
Ejemplo n.º 20
0
def create_efficiency_plot(axes, all_injections, found_injections, detection_threshold, cal_uncertainty):
	filter_width = 16.7
	# formats
	axes.semilogx()
	axes.set_position([0.10, 0.150, 0.86, 0.77])

	# set desired yticks
	axes.set_yticks((0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0))
	axes.set_yticklabels((r"\(0\)", r"\(0.1\)", r"\(0.2\)", r"\(0.3\)", r"\(0.4\)", r"\(0.5\)", r"\(0.6\)", r"\(0.7\)", r"\(0.8\)", r"\(0.9\)", r"\(1.0\)"))
	axes.xaxis.grid(True, which = "major,minor")
	axes.yaxis.grid(True, which = "major,minor")

	# put made and found injections in the denominators and
	# numerators of the efficiency bins
	bins = rate.NDBins((rate.LogarithmicBins(min(sim.amplitude for sim in all_injections), max(sim.amplitude for sim in all_injections), 400),))
	efficiency_num = rate.BinnedArray(bins)
	efficiency_den = rate.BinnedArray(bins)
	for sim in found_injections:
		efficiency_num[sim.amplitude,] += 1
	for sim in all_injections:
		efficiency_den[sim.amplitude,] += 1

	# generate and plot trend curves.  adjust window function
	# normalization so that denominator array correctly
	# represents the number of injections contributing to each
	# bin:  make w(0) = 1.0.  note that this factor has no
	# effect on the efficiency because it is common to the
	# numerator and denominator arrays.  we do this for the
	# purpose of computing the Poisson error bars, which
	# requires us to know the counts for the bins
	windowfunc = rate.gaussian_window(filter_width)
	windowfunc /= windowfunc[len(windowfunc) / 2 + 1]
	rate.filter_array(efficiency_num.array, windowfunc)
	rate.filter_array(efficiency_den.array, windowfunc)

	# regularize:  adjust unused bins so that the efficiency is
	# 0, not NaN
	assert (efficiency_num.array <= efficiency_den.array).all()
	efficiency_den.array[(efficiency_num.array == 0) & (efficiency_den.array == 0)] = 1

	line1, A50, A50_err = render_data_from_bins(file("string_efficiency.dat", "w"), axes, efficiency_num, efficiency_den, cal_uncertainty, filter_width, colour = "k", linestyle = "-", erroralpha = 0.2)

	# add a legend to the axes
	axes.legend((line1,), (r"\noindent Injections recovered with $\log \Lambda > %.2f$" % detection_threshold,), loc = "lower right")

	# adjust limits
	axes.set_xlim([3e-22, 3e-19])
	axes.set_ylim([0.0, 1.0])
Ejemplo n.º 21
0
def compute_search_volume(eff):
	"""
	Integrate efficiency to get search volume.
	"""
	# get distance bins
	ndbins = eff.bins
	dx = ndbins[0].upper() - ndbins[0].lower()
	r = ndbins[0].centres()

	# we have one less dimension on the output
	vol = rate.BinnedArray(rate.NDBins(ndbins[1:]))

	# integrate efficiency to obtain volume
	vol.array = numpy.trapz(eff.array.T * 4. * numpy.pi * r**2, r, dx)

	return vol
Ejemplo n.º 22
0
    def _bin_events(self, binning=None):
        # called internally by finish()
        if binning is None:
            minx, maxx = min(self.injected_x), max(self.injected_x)
            miny, maxy = min(self.injected_y), max(self.injected_y)
            binning = rate.NDBins((rate.LogarithmicBins(minx, maxx, 256),
                                   rate.LogarithmicBins(miny, maxy, 256)))

        self.efficiency = rate.BinnedRatios(binning)

        for xy in zip(self.injected_x, self.injected_y):
            self.efficiency.incdenominator(xy)
        for xy in zip(self.found_x, self.found_y):
            self.efficiency.incnumerator(xy)

        # 1 / error^2 is the number of injections that need to be
        # within the window in order for the fractional uncertainty
        # in that number to be = error.  multiplying by
        # bins_per_inj tells us how many bins the window needs to
        # cover, and taking the square root translates that into
        # the window's length on a side in bins.  because the
        # contours tend to run parallel to the x axis, the window
        # is dilated in that direction to improve resolution.

        bins_per_inj = self.efficiency.used() / float(len(self.injected_x))
        self.window_size_x = self.window_size_y = math.sqrt(bins_per_inj /
                                                            self.error**2)
        self.window_size_x *= math.sqrt(2)
        self.window_size_y /= math.sqrt(2)
        if self.window_size_x > 100 or self.window_size_y > 100:
            # program will take too long to run
            raise ValueError(
                "smoothing filter too large (not enough injections)")

        print("The smoothing window for %s is %g x %g bins" % ("+".join(
            self.instruments), self.window_size_x, self.window_size_y),
              end=' ',
              file=sys.stderr)
        print("which is %g%% x %g%% of the binning" %
              (100.0 * self.window_size_x / binning[0].n,
               100.0 * self.window_size_y / binning[1].n),
              file=sys.stderr)
Ejemplo n.º 23
0
def rate_posterior_from_samples(samples):
    """
	Construct and return a BinnedArray containing a histogram of a
	sequence of samples.  If limits is None (default) then the limits
	of the binning will be determined automatically from the sequence,
	otherwise limits is expected to be a tuple or other two-element
	sequence providing the (low, high) limits, and in that case the
	sequence can be a generator.
	"""
    nbins = int(math.sqrt(len(samples)) / 40.)
    assert nbins >= 1, "too few samples to construct histogram"
    lo = samples.min() * (1. - nbins / len(samples))
    hi = samples.max() * (1. + nbins / len(samples))
    ln_pdf = rate.BinnedLnPDF(
        rate.NDBins((rate.LogarithmicBins(lo, hi, nbins), )))
    count = ln_pdf.count  # avoid attribute look-up in loop
    for sample in samples:
        count[sample, ] += 1.
    rate.filter_array(ln_pdf.array, rate.gaussian_window(5), use_fft=False)
    ln_pdf.normalize()
    return ln_pdf
Ejemplo n.º 24
0
def plot_range(found_inj, missed_inj, seg_bins, tlohi, dlohi, horizon_history, colors = {'H1': numpy.array((1.0, 0.0, 0.0)), 'L1':  numpy.array((0.0, 0.8, 0.0)), 'V1':  numpy.array((1.0, 0.0, 1.0))}, fig = None, axes = None):

	tlo, thi = tlohi
	dlo, dhi = dlohi
	if fig is None:
		fig = plt.figure()
	if axes is None:
		axes = fig.add_subplot(111)

	# FIXME Add number of distance bins as option
	ndbins = rate.NDBins((rate.LinearBins(dlo, dhi, int(dhi - dlo + 1)), rate.IrregularBins(seg_bins)))
	vol, err = imr_utils.compute_search_volume_in_bins([f[1] for f in found_inj], missed_inj + [f[1] for f in found_inj], ndbins, lambda sim: (sim.distance, sim.geocent_end_time))

	x = vol.bins[0].lower()
	dx = vol.bins[0].upper() - vol.bins[0].lower()
	y = (vol.array * 3./ (4. * math.pi))**(1./3.)
	yerr = (1./3.) * (3./(4.*math.pi))**(1./3.)*vol.array**(-2./3.) * err.array
	yerr[~numpy.isfinite(yerr)] = 0.
	err_lo = y - 2.0 * yerr
	err_lo[err_lo<=0] = 0.
	err_hi = y + 2.0 * yerr

	axes.bar(x, err_hi-err_lo, bottom=err_lo, color='c', alpha=0.6, label='95\% confidence interval\non range estimated \nfrom injections', width=dx, linewidth=0)
	for ifo in horizon_history.keys():
		horizon_times = numpy.array(horizon_history[ifo].keys()).clip(tlo,thi)
		sensemon_range = numpy.array([horizon_history[ifo][seg]/2.26 for seg in horizon_times])
		axes.scatter(horizon_times.compress(horizon_times <= horizon_history[ifo].maxkey()), sensemon_range, s = 1, color = colors[ifo], label='%s SenseMon Range' % ifo, alpha=1.0)


	xticks = numpy.linspace(tlo,thi,9)
	x_format = tkr.FuncFormatter(lambda x, pos: datetime.datetime(*GPSToUTC(int(x))[:7]).strftime("%Y-%m-%d, %H:%M:%S UTC"))
	axes.set_ylabel('Range (Mpc)')
	axes.set_xlim(tlo,thi)
	axes.set_ylim(0,5*(int(max(err_hi)/5.)+1))
	axes.xaxis.set_major_formatter(x_format)
	axes.xaxis.set_ticks(xticks)
	axes.grid(color=(0.1,0.4,0.5), linewidth=2)
	return fig, axes
Ejemplo n.º 25
0
def plot_sensitivity_vs_far(volumes, fars, livetime, ifos, bins, bin_type):
	fig = plt.figure()
	fig.set_size_inches((8., 8. / plotutil.golden_ratio))
	ax_far = fig.gca()

	# plot the volume/range versus far/snr for each bin
	mbins = rate.NDBins(bins[1:])
	labels = []
	for lo, center, hi, label in volumes_bins_to_range_label(volumes, mbins, bin_type):
		labels.append(label)

		# NOTE create regular plots, and define log x,y scales below
		#      since otherwise, fill_between allocates too many blocks and crashes
		line, = ax_far.plot(fars, center, label=label, linewidth=2)
		ax_far.fill_between(fars, lo, hi, alpha=0.5, color=line.get_color())

	ax_far.set_xlabel("Combined FAR (Hz)")
	ax_far.set_ylabel(r"Volume $\times$ Time ($\mathrm{Mpc}^3 \mathrm{yr}$)")
	ax_far.set_xscale("log")
	ax_far.set_yscale("log")
	ax_far.set_xlim([min(fars), max(fars)])
	ax_far.invert_xaxis()
	ax_far.legend(loc="lower left")
	ax_far.grid()

	vol_tix = ax_far.get_yticks()
	tx = ax_far.twinx() # map volume to distance
	tx.set_yticks(vol_tix)
	tx.set_yscale("log")
	tx.set_ylim(ax_far.get_ylim())
	tx.set_yticklabels(["%.3g" % (vt_to_range(float(k), livetime[ifos])) for k in vol_tix])
	tx.set_ylabel("Range (Mpc)")

	ax_far.set_title("%s Observing (%.2f days)" % ("".join(sorted(list(ifos))), livetime[ifos]*365.25))
	fig.tight_layout(pad = .8)

	return fig
Ejemplo n.º 26
0
class LnLRDensity(snglcoinc.LnLRDensity):
    # SNR^2, chi^2/snr^2 binning definition
    snr2_chi2_binning = rate.NDBins((rate.ATanLogarithmicBins(10, 1e3, 801),
                                     rate.ATanLogarithmicBins(1e-3, 1.0, 801)))
    snr2_duration_binning = rate.NDBins(
        (rate.ATanLogarithmicBins(10, 1e3, 801),
         rate.ATanLogarithmicBins(1e-4, 1e1, 801)))

    def __init__(self,
                 instruments,
                 delta_t,
                 snr_threshold,
                 num_templates,
                 min_instruments=2):
        # check input
        if min_instruments < 2:
            raise ValueError("min_instruments=%d must be >=2" %
                             min_instruments)
        if min_instruments > len(instruments):
            raise ValueError(
                "not enough instruments (%s) to satisfy min_instruments=%d" %
                (", ".join(sorted(instruments)), min_instruments))
        assert delta_t > 0 and snr_threshold > 0

        self.instruments = frozenset(instruments)
        self.delta_t = delta_t
        self.snr_threshold = snr_threshold
        self.num_templates = num_templates
        self.min_instruments = min_instruments
        self.densities = {}
        for instrument in self.instruments:
            self.densities["%s_snr2_chi2" % instrument] = rate.BinnedLnPDF(
                self.snr2_chi2_binning)
            self.densities["%s_snr2_duration" % instrument] = rate.BinnedLnPDF(
                self.snr2_duration_binning)

    def __call__(self):
        try:
            interps = self.interps
        except AttributeError:
            self.mkinterps()
            interps = self.interps

    def __iadd__(self, other):
        if type(self) != type(other) or set(self.densities) != set(
                other.densities):
            raise TypeError("cannot add %s and %s" % (type(self), type(other)))
        for key, lnpdf in self.densities.items():
            lnpdf += other.densities[key]
        try:
            del self.interps
        except AttributeError:
            pass
        return self

    def increment(self, event):
        self.densities["%s_snr2_chi2" %
                       event.ifo].count[event.snr**2., event.chisq /
                                        event.chisq_dof / event.snr**2.] += 1.0
        self.densities["%s_snr2_duration" %
                       event.ifo].count[event.snr**2., event.duration] += 1.0

    def copy(self):
        new = type(self)(self.instruments,
                         min_instruments=self.min_instruments)
        for key, lnpdf in self.densities.items():
            new.densities[key] = lnpdf.copy()
        return new

    def mkinterps(self):
        self.interps = dict(
            (key, lnpdf.mkinterp()) for key, lnpdf in self.densities.items())

    def finish(self):
        snrsq_kernel_width_at_64 = 16.,
        chisq_kernel_width = 0.02,
        sigma = 10.
        for key, lnpdf in self.densities.items():
            if key.endswith("_snr2_chi2"):
                numsamples = max(lnpdf.array.sum() / 10. + 1., 1e3)
                snrsq_bins, chisq_bins = lnpdf.bins
                snrsq_per_bin_at_64 = (snrsq_bins.upper() -
                                       snrsq_bins.lower())[snrsq_bins[64.]]
                chisq_per_bin_at_0_02 = (chisq_bins.upper() -
                                         chisq_bins.lower())[chisq_bins[0.02]]

                # apply Silverman's rule so that the width scales
                # with numsamples**(-1./6.) for a 2D PDF
                snrsq_kernel_bins = snrsq_kernel_width_at_64 / snrsq_per_bin_at_64 / numsamples**(
                    1. / 6.)
                chisq_kernel_bins = chisq_kernel_width / chisq_per_bin_at_0_02 / numsamples**(
                    1. / 6.)

                # check the size of the kernel. We don't ever let
                # it get smaller than the 2.5 times the bin size
                if snrsq_kernel_bins < 2.5:
                    snrsq_kernel_bins = 2.5
                    warnings.warn("Replacing snrsq kernel bins with 2.5")
                if chisq_kernel_bins < 2.5:
                    chisq_kernel_bins = 2.5
                    warnings.warn("Replacing chisq kernel bins with 2.5")

                # convolve bin count with density estimation kernel
                rate.filter_array(
                    lnpdf.array,
                    rate.gaussian_window(snrsq_kernel_bins,
                                         chisq_kernel_bins,
                                         sigma=sigma))

                # zero everything below the SNR cutoff. need to do the slicing
                # ourselves to avoid zero-ing the at-threshold bin
                lnpdf.array[:lnpdf.bins[0][self.snr_threshold], :] = 0.
            elif key.endswith("_snr2_duration"):
                # FIXME the duration filter kernel is left as a guess
                rate.filter_array(
                    lnpdf.array,
                    rate.gaussian_window(snrsq_kernel_bins, 11, sigma=sigma))
                # zero everything below the SNR cutoff. need to do the slicing
                # ourselves to avoid zero-ing the at-threshold bin
                lnpdf.array[:lnpdf.bins[0][self.snr_threshold], :] = 0.
            else:
                # shouldn't get here
                raise Exception
            lnpdf.normalize()
        self.mkinterps()

        #
        # never allow PDFs that have had the density estimation
        # transform applied to be written to disk:  on-disk files
        # must only ever provide raw counts.  also don't allow
        # density estimation to be applied twice
        #

        def to_xml(*args, **kwargs):
            raise NotImplementedError(
                "writing .finish()'ed LnLRDensity object to disk is forbidden")

        self.to_xml = to_xml

        def finish(*args, **kwargs):
            raise NotImplementedError(
                ".finish()ing a .finish()ed LnLRDensity object is forbidden")

        self.finish = finish

    def to_xml(self, name):
        xml = super(LnLRDensity, self).to_xml(name)
        xml.appendChild(
            ligolw_param.Param.from_pyvalue(
                u"instruments",
                lsctables.instrumentsproperty.set(self.instruments)))
        xml.appendChild(
            ligolw_param.Param.from_pyvalue(u"delta_t", self.delta_t))
        xml.appendChild(
            ligolw_param.Param.from_pyvalue(u"snr_threshold",
                                            self.snr_threshold))
        xml.appendChild(
            ligolw_param.Param.from_pyvalue(u"num_templates",
                                            self.num_templates))
        xml.appendChild(
            ligolw_param.Param.from_pyvalue(u"min_instruments",
                                            self.min_instruments))
        for key, lnpdf in self.densities.items():
            xml.appendChild(lnpdf.to_xml(key))
        return xml

    @classmethod
    def from_xml(cls, xml, name):
        xml = cls.get_xml_root(xml, name)
        self = cls(
            instruments=lsctables.instrumentsproperty.get(
                ligolw_param.get_pyvalue(xml, u"instruments")),
            delta_t=ligolw_param.get_pyvalue(xml, u"delta_t"),
            snr_threshold=ligolw_param.get_pyvalue(xml, u"snr_threshold"),
            num_templates=ligolw_param.get_pyvalue(xml, u"num_templates"),
            min_instruments=ligolw_param.get_pyvalue(xml, u"min_instruments"))
        for key in self.densities:
            self.densities[key] = rate.BinnedLnPDF.from_xml(xml, key)
        return self
Ejemplo n.º 27
0
	def finish(self, threshold):
		# bin the injections

		self._bin_events()

		# use the same binning for the found injection density as
		# was constructed for the efficiency

		self.found_density = rate.BinnedArray(self.efficiency.denominator.bins)

		# construct the amplitude weighting function

		amplitude_weight = rate.BinnedArray(rate.NDBins((rate.LinearBins(threshold - 100, threshold + 100, 10001),)))

		# gaussian window's width is the number of bins
		# corresponding to 10 units of amplitude, which is computed
		# by dividing 10 by the "volume" of the bin corresponding
		# to threshold.  index is the index of the element in
		# amplitude_weight corresponding to the threshold.

		index, = amplitude_weight.bins[threshold,]
		window = rate.gaussian_window(10.0 / amplitude_weight.bins.volumes()[index])
		window /= 10 * window[(len(window) - 1) / 2]

		# set the window data into the BinnedArray object.  the
		# Gaussian peaks on the middle element of the window, which
		# we want to place at index in the amplitude_weight array.

		lo = index - (len(window) - 1) / 2
		hi = lo + len(window)
		if lo < 0 or hi > len(amplitude_weight.array):
			raise ValueError("amplitude weighting window too large")
		amplitude_weight.array[lo:hi] = window

		# store the recovered injections in the found density bins
		# weighted by amplitude

		for x, y, z in self.recovered_xyz:
			try:
				weight = amplitude_weight[z,]
			except IndexError:
				# beyond the edge of the window
				weight = 0.0
			self.found_density[x, y] += weight

		# the efficiency is only valid up to the highest energy
		# that has been injected.  this creates problems later on
		# so, instead, for each frequency, identify the highest
		# energy that has been measured and copy the values for
		# that bin's numerator and denominator into the bins for
		# all higher energies.  do the same for the counts in the
		# found injection density bins.
		#
		# numpy.indices() returns two arrays array, the first of
		# which has each element set equal to its x index, the
		# second has each element set equal to its y index, we keep
		# the latter.  meanwhile numpy.roll() cyclically permutes
		# the efficiency bins down one along the y (energy) axis.
		# from this, the conditional finds bins where the
		# efficiency is greater than 0.9 but <= 0.9 in the bin
		# immediately above it in energy.  we select the elements
		# from the y index array where the conditional is true, and
		# then use numpy.max() along the y direction to return the
		# highest such y index for each x index, which is a 1-D
		# array.  finally, enumerate() is used to iterate over x
		# index and corresponding y index, and if the y index is
		# not negative (was found) the value from that x-y bin is
		# copied to all higher bins.

		n = self.efficiency.numerator.array
		d = self.efficiency.denominator.array
		f = self.found_density.array
		bady = -1
		for x, y in enumerate(numpy.max(numpy.where((d > 0) & (numpy.roll(d, -1, axis = 1) <= 0), numpy.indices(d.shape)[1], bady), axis = 1)):
			if y != bady:
				n[x, y + 1:] = n[x, y]
				d[x, y + 1:] = d[x, y]
				f[x, y + 1:] = f[x, y]

		# now do the same for the bins at energies below those that
		# have been measured.

		bady = d.shape[1]
		for x, y in enumerate(numpy.min(numpy.where((d > 0) & (numpy.roll(d, 1, axis = 1) <= 0), numpy.indices(d.shape)[1], bady), axis = 1)):
			if y != bady:
				n[x, 0:y] = n[x, y]
				d[x, 0:y] = d[x, y]
				f[x, 0:y] = f[x, y]

		diagnostic_plot(self.efficiency.numerator.array, self.efficiency.denominator.bins, r"Efficiency Numerator (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_numerator_before.png")
		diagnostic_plot(self.efficiency.denominator.array, self.efficiency.denominator.bins, r"Efficiency Denominator (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_denominator_before.png")
		diagnostic_plot(self.found_density.array, self.efficiency.denominator.bins, r"Injections Lost / Unit of Threshold (Before Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_found_density_before.png")

		# smooth the efficiency bins and the found injection
		# density bins using the same 2D window.

		window = rate.gaussian_window(self.window_size_x, self.window_size_y)
		window /= window[tuple((numpy.array(window.shape, dtype = "double") - 1) / 2)]
		rate.filter_binned_ratios(self.efficiency, window)
		rate.filter_array(self.found_density.array, window)

		diagnostic_plot(self.efficiency.numerator.array, self.efficiency.denominator.bins, r"Efficiency Numerator (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_numerator_after.png")
		diagnostic_plot(self.efficiency.denominator.array, self.efficiency.denominator.bins, r"Efficiency Denominator (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_efficiency_denominator_after.png")
		diagnostic_plot(self.found_density.array, self.efficiency.denominator.bins, r"Injections Lost / Unit of Threshold (After Averaging)", self.amplitude_lbl, "lalapps_excesspowerfinal_found_density_after.png")

		# compute the uncertainties in the efficiency and its
		# derivative by assuming these to be the binomial counting
		# fluctuations in the numerators.

		p = self.efficiency.ratio()
		self.defficiency = numpy.sqrt(p * (1 - p) / self.efficiency.denominator.array)
		p = self.found_density.array / self.efficiency.denominator.array
		self.dfound_density = numpy.sqrt(p * (1 - p) / self.efficiency.denominator.array)
Ejemplo n.º 28
0
def SNRPDF(instruments,
           horizon_distances,
           snr_cutoff,
           n_samples=200000,
           bins=rate.ATanLogarithmicBins(3.6, 1e3, 150)):
    """
	Precomputed SNR PDF for each detector
	Returns a BinnedArray containing
	P(snr_{inst1}, snr_{inst2}, ... | signal seen in exactly
		{inst1, inst2, ...} in a network of instruments
		with a given set of horizon distances)
	
	i.e., the joint probability density of observing a set of
	SNRs conditional on them being the result of signal that
	has been recovered in a given subset of the instruments in
	a network of instruments with a given set of horizon
	distances.

	The axes of the PDF correspond to the instruments in
	alphabetical order.  The binning used for all axes is set
	with the bins parameter.

	The n_samples parameter sets the number of iterations for
	the internal Monte Carlo sampling loop.
	"""
    if n_samples < 1:
        raise ValueError("n_samples=%d must be >= 1" % n_samples)

    # get instrument names
    instruments = sorted(instruments)
    if len(instruments) < 1:
        raise ValueError(instruments)
    # get the horizon distances in the same order
    DH_times_8 = 8. * numpy.array(
        [horizon_distances[inst] for inst in instruments])
    # get detector responses in the same order
    resps = [
        lalsimulation.DetectorPrefixToLALDetector(str(inst)).response
        for inst in instruments
    ]

    # get horizon distances and responses of remaining
    # instruments (order doesn't matter as long as they're in
    # the same order)
    DH_times_8_other = 8. * numpy.array([
        dist
        for inst, dist in horizon_distances.items() if inst not in instruments
    ])
    resps_other = tuple(
        lalsimulation.DetectorPrefixToLALDetector(str(inst)).response
        for inst in horizon_distances if inst not in instruments)

    # initialize the PDF array, and pre-construct the sequence of
    # snr, d(snr) tuples. since the last SNR bin probably has
    # infinite size, we remove it from the sequence
    # (meaning the PDF will be left 0 in that bin)
    pdf = rate.BinnedArray(rate.NDBins([bins] * len(instruments)))
    snr_sequence = rate.ATanLogarithmicBins(3.6, 1e3, 500)
    snr_snrlo_snrhi_sequence = numpy.array(
        zip(snr_sequence.centres(), snr_sequence.lower(),
            snr_sequence.upper())[:-1])

    # compute the SNR at which to begin iterations over bins
    assert type(snr_cutoff) is float
    snr_min = snr_cutoff - 3.0
    assert snr_min > 0.0

    # we select random uniformly-distributed right assensions
    # so there's no point in also choosing random GMSTs and any
    # vlaue is as good as any other
    gmst = 0.0

    # run the sample the requested # of iterations. save some
    # symbols to avoid doing module attribute look-ups in the
    # loop
    acos = math.acos
    random_uniform = random.uniform
    twopi = 2. * math.pi
    pi_2 = math.pi / 2.
    xlal_am_resp = lal.ComputeDetAMResponse
    # FIXME:  scipy.stats.rice.rvs broken on reference OS.
    # switch to it when we can rely on a new-enough scipy
    #rice_rvs = stats.rice.rvs	# broken on reference OS
    # the .reshape is needed in the event that x is a 1x1
    # array:  numpy returns a scalar from sqrt(), but we must
    # have something that we can iterate over
    rice_rvs = lambda x: numpy.sqrt(stats.ncx2.rvs(2., x**2.)).reshape(x.shape)

    for i in xrange(n_samples):
        # select random sky location and source orbital
        # plane inclination
        # the signal is linearly polaraized, and h_cross = 0
        # is assumed, so we need only F+ (its absolute value).
        ra = random_uniform(0., twopi)
        dec = pi_2 - acos(random_uniform(-1., 1.))
        psi = random_uniform(0., twopi)
        fplus = tuple(
            abs(xlal_am_resp(resp, ra, dec, psi, gmst)[0]) for resp in resps)

        # 1/8 ratio of inverse SNR to distance for each instrument
        # (1/8 because horizon distance is defined for an SNR of 8,
        # and we've omitted that factor for performance)
        snr_times_D = DH_times_8 * fplus

        # snr * D in instrument whose SNR grows fastest
        # with decreasing D
        max_snr_times_D = snr_times_D.max()

        # snr_times_D.min() / snr_min = the furthest a
        # source can be and still be above snr_min in all
        # instruments involved.  max_snr_times_D / that
        # distance = the SNR that distance corresponds to
        # in the instrument whose SNR grows fastest with
        # decreasing distance --- the SNR the source has in
        # the most sensitive instrument when visible to all
        # instruments in the combo
        try:
            start_index = snr_sequence[max_snr_times_D /
                                       (snr_times_D.min() / snr_min)]
        except ZeroDivisionError:
            # one of the instruments that must be able
            # to see the event is blind to it
            continue

        # min_D_other is minimum distance at which source
        # becomes visible in an instrument that isn't
        # involved.  max_snr_times_D / min_D_other gives
        # the SNR in the most sensitive instrument at which
        # the source becomes visible to one of the
        # instruments not allowed to participate
        if len(DH_times_8_other):
            min_D_other = (DH_times_8_other * fplus).min() / snr_cutoff
            try:
                end_index = snr_sequence[max_snr_times_D / min_D_other] + 1
            except ZeroDivisionError:
                # all instruments that must not see
                # it are blind to it
                end_index = None
        else:
            # there are no other instruments
            end_index = None

        # if start_index >= end_index then in order for the
        # source to be close enough to be visible in all
        # the instruments that must see it it is already
        # visible to one or more instruments that must not.
        # don't need to check for this, the for loop that
        # comes next will simply not have any iterations.

        # iterate over the nominal SNRs (= noise-free SNR
        # in the most sensitive instrument) at which we
        # will add weight to the PDF.  from the SNR in
        # most sensitive instrument, the distance to the
        # source is:
        #
        #	D = max_snr_times_D / snr
        #
        # and the (noise-free) SNRs in all instruments are:
        #
        #	snr_times_D / D
        #
        # scipy's Rice-distributed RV code is used to
        # add the effect of background noise, converting
        # the noise-free SNRs into simulated observed SNRs
        #
        # number of sources b/w Dlo and Dhi:
        #
        #	d count \propto D^2 |dD|
        #	  count \propto Dhi^3 - Dlo**3
        D_Dhi_Dlo_sequence = max_snr_times_D / snr_snrlo_snrhi_sequence[
            start_index:end_index]
        for snr, weight in zip(
                rice_rvs(snr_times_D /
                         numpy.reshape(D_Dhi_Dlo_sequence[:, 0],
                                       (len(D_Dhi_Dlo_sequence), 1))),
                D_Dhi_Dlo_sequence[:, 1]**3. - D_Dhi_Dlo_sequence[:, 2]**3.):
            pdf[tuple(snr)] += weight

    # check for divide-by-zeros that weren't caught.  also
    # finds nans if they are there
    assert numpy.isfinite(pdf.array).all()

    # convolve samples with gaussian kernel
    rate.filter_array(pdf.array,
                      rate.gaussian_window(*(1.875, ) * len(pdf.array.shape)))
    # protect against round-off in FFT convolution leading to
    # negative valuesin the PDF
    numpy.clip(pdf.array, 0., PosInf, pdf.array)
    # zero counts in bins that are below the trigger threshold.
    # have to convert SNRs to indexes ourselves and adjust so
    # that we don't zero the bin in which the SNR threshold
    # falls
    range_all = slice(None, None)
    range_low = slice(None, pdf.bins[0][snr_cutoff])
    for i in xrange(len(instruments)):
        slices = [range_all] * len(instruments)
        slices[i] = range_low
    # convert bin counts to normalized PDF
    pdf.to_pdf()
    # one last sanity check
    assert numpy.isfinite(pdf.array).all()
    # done
    return pdf
Ejemplo n.º 29
0
 def __init__(self, interval, width):
     # 21 bins per filter width
     bins = int(float(abs(interval)) / width) * 21
     self.binning = rate.NDBins((rate.LinearBins(interval[0], interval[1],
                                                 bins), ))
     self.data = {}
    ]


options, filenames = parse_command_line()

#
# =============================================================================
#
#   Custom SnglBurstTable append() method to put triggers directly into bins
#
# =============================================================================
#

nbins = int(
    float(abs(options.read_segment)) / options.window) * bins_per_filterwidth
binning = rate.NDBins((rate.LinearBins(options.read_segment[0],
                                       options.read_segment[1], nbins), ))
trigger_rate = rate.BinnedDensity(binning)

num_triggers = 0


def snglburst_append(self, row, verbose=options.verbose):
    global num_triggers, rate
    t = row.peak
    if t in options.read_segment:
        trigger_rate.count[t, ] += 1.0
    num_triggers += 1
    if verbose and not (num_triggers % 125):
        print >> sys.stderr, "sngl_burst rows read:  %d\r" % num_triggers,