예제 #1
0
    def __init__(self,
                 instruments,
                 delta_t,
                 snr_threshold,
                 num_templates,
                 min_instruments=2):
        # check input
        if min_instruments < 2:
            raise ValueError("min_instruments=%d must be >=2" %
                             min_instruments)
        if min_instruments > len(instruments):
            raise ValueError(
                "not enough instruments (%s) to satisfy min_instruments=%d" %
                (", ".join(sorted(instruments)), min_instruments))
        assert delta_t > 0 and snr_threshold > 0

        self.instruments = frozenset(instruments)
        self.delta_t = delta_t
        self.snr_threshold = snr_threshold
        self.num_templates = num_templates
        self.min_instruments = min_instruments
        self.densities = {}
        for instrument in self.instruments:
            self.densities["%s_snr2_chi2" % instrument] = rate.BinnedLnPDF(
                self.snr2_chi2_binning)
            self.densities["%s_snr2_duration" % instrument] = rate.BinnedLnPDF(
                self.snr2_duration_binning)
예제 #2
0
	def __init__(self, rankingstat, nsamples = 2**24, verbose = False):
		#
		# bailout used by .from_xml() class method to get an
		# uninitialized instance
		#

		if rankingstat is None:
			return

		#
		# initialize binnings
		#

		self.noise_lr_lnpdf = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-10., 30., 3000),)))
		self.signal_lr_lnpdf = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-10., 30., 3000),)))
		self.candidates_lr_lnpdf = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-10., 30., 3000),)))

		#
		# obtain analyzed segments that will be used to obtain livetime
		#

		self.segments = segmentsUtils.vote(rankingstat.denominator.triggerrates.segmentlistdict().values(),rankingstat.min_instruments)

		#
		# run importance-weighted random sampling to populate binnings.
		#

		self.signal_lr_lnpdf.array, self.noise_lr_lnpdf.array = binned_log_likelihood_ratio_rates_from_samples(
			self.signal_lr_lnpdf,
			self.noise_lr_lnpdf,
			rankingstat.ln_lr_samples(rankingstat.denominator.random_params(), rankingstat),
			nsamples = nsamples)

		if verbose:
			print("done computing ranking statistic PDFs", file=sys.stderr) 

		#
		# apply density estimation kernels to counts
		#

		self.density_estimate(self.noise_lr_lnpdf, "noise model")
		self.density_estimate(self.signal_lr_lnpdf, "signal model")

		#
		# set the total sample count in the noise and signal
		# ranking statistic histogram equal to the total expected
		# count of the respective events from the experiment. This
		# information is required so that when adding ranking
		# statistic PDFs in our .__iadd__() method they are
		# combined with the correct relative weights, so that
		# .__iadd__() has the effect of marginalizing the
		# distribution over the experients being combined.
		#

		self.noise_lr_lnpdf.array /= self.noise_lr_lnpdf.array.sum()
		self.noise_lr_lnpdf.normalize()
		self.signal_lr_lnpdf.array /= self.signal_lr_lnpdf.array.sum()
		self.signal_lr_lnpdf.normalize()
예제 #3
0
	def __init__(self, instruments):
		self.densities = {}
		for instrument in instruments:
			self.densities["%s_snr2_chi2" % instrument] = rate.BinnedLnPDF(rate.NDBins((rate.ATanLogarithmicBins(10, 1e7, 801), rate.ATanLogarithmicBins(.1, 1e4, 801))))
		for pair in itertools.combinations(sorted(instruments), 2):
			dt = 0.005 + snglcoinc.light_travel_time(*pair)	# seconds
			self.densities["%s_%s_dt" % pair] = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-dt, +dt, 801),)))
			self.densities["%s_%s_dA" % pair] = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-0.5, +0.5, 801),)))
			self.densities["%s_%s_df" % pair] = rate.BinnedLnPDF(rate.NDBins((rate.ATanBins(-0.2, +0.2, 501),)))
		# only non-negative rss timing residual bins will be used
		# but we want a binning that's linear at the origin so
		# instead of inventing a new one we just use atan bins that
		# are symmetric about 0
		self.densities["instrumentgroup,rss_timing_residual"] = rate.BinnedLnPDF(rate.NDBins((InstrumentBins(names = instruments), rate.ATanBins(-0.02, +0.02, 1001))))
예제 #4
0
def rate_posterior_from_samples(samples):
    """
	Construct and return a BinnedArray containing a histogram of a
	sequence of samples.  If limits is None (default) then the limits
	of the binning will be determined automatically from the sequence,
	otherwise limits is expected to be a tuple or other two-element
	sequence providing the (low, high) limits, and in that case the
	sequence can be a generator.
	"""
    nbins = int(math.sqrt(len(samples)) / 40.)
    assert nbins >= 1, "too few samples to construct histogram"
    lo = samples.min() * (1. - nbins / len(samples))
    hi = samples.max() * (1. + nbins / len(samples))
    ln_pdf = rate.BinnedLnPDF(
        rate.NDBins((rate.LogarithmicBins(lo, hi, nbins), )))
    count = ln_pdf.count  # avoid attribute look-up in loop
    for sample in samples:
        count[sample, ] += 1.
    rate.filter_array(ln_pdf.array, rate.gaussian_window(5), use_fft=False)
    ln_pdf.normalize()
    return ln_pdf
예제 #5
0
    def __init__(self,
                 rankingstat,
                 signal_noise_pdfs=None,
                 nsamples=2**24,
                 nthreads=8,
                 verbose=False):
        #
        # bailout out used by .from_xml() class method to get an
        # uninitialized instance
        #

        if rankingstat is None:
            return

        #
        # initialize binnings
        #

        self.noise_lr_lnpdf = rate.BinnedLnPDF(
            rate.NDBins((rate.ATanBins(0., 110., 6000), )))
        self.signal_lr_lnpdf = rate.BinnedLnPDF(
            rate.NDBins((rate.ATanBins(0., 110., 6000), )))
        self.zero_lag_lr_lnpdf = rate.BinnedLnPDF(
            rate.NDBins((rate.ATanBins(0., 110., 6000), )))
        self.segments = segmentsUtils.vote(rankingstat.segmentlists.values(),
                                           rankingstat.min_instruments)
        if rankingstat.template_ids is None:
            raise ValueError(
                "cannot be initialized from a RankingStat that is not for a specific set of templates"
            )
        self.template_ids = rankingstat.template_ids

        #
        # bailout used by codes that want all-zeros histograms
        #

        if not nsamples:
            return

        #
        # run importance-weighted random sampling to populate
        # binnings.
        #

        if signal_noise_pdfs is None:
            signal_noise_pdfs = rankingstat

        nthreads = int(nthreads)
        assert nthreads >= 1
        threads = []
        for i in range(nthreads):
            assert nsamples // nthreads >= 1
            q = multiprocessing.SimpleQueue()
            p = multiprocessing.Process(
                target=lambda: self.
                binned_log_likelihood_ratio_rates_from_samples_wrapper(
                    q,
                    self.signal_lr_lnpdf,
                    self.noise_lr_lnpdf,
                    rankingstat.ln_lr_samples(
                        rankingstat.denominator.random_params(),
                        signal_noise_pdfs),
                    nsamples=nsamples // nthreads))
            p.start()
            threads.append((p, q))
            nsamples -= nsamples // nthreads
            nthreads -= 1
            # sleep a bit to help random number seeds change
            time.sleep(1.5)
        while threads:
            p, q = threads.pop(0)
            signal_counts, noise_counts = q.get()
            self.signal_lr_lnpdf.array += signal_counts
            self.noise_lr_lnpdf.array += noise_counts
            p.join()
            if p.exitcode:
                raise Exception("sampling thread failed")
        if verbose:
            print("done computing ranking statistic PDFs", file=sys.stderr)

        #
        # apply density estimation kernels to counts
        #

        self.density_estimate(self.noise_lr_lnpdf, "noise model")
        self.density_estimate(self.signal_lr_lnpdf, "signal model")

        #
        # set the total sample count in the noise and signal
        # ranking statistic histogram equal to the total expected
        # count of the respective events from the experiment.  this
        # information is required so that when adding ranking
        # statistic PDFs in our .__iadd__() method they are
        # combined with the correct relative weights, so that
        # .__iadd__() has the effect of marginalizing the
        # distribution over the experiments being combined.
        #

        self.noise_lr_lnpdf.array *= sum(
            rankingstat.denominator.candidate_count_model().values(
            )) / self.noise_lr_lnpdf.array.sum()
        self.noise_lr_lnpdf.normalize()
        self.signal_lr_lnpdf.array *= rankingstat.numerator.candidate_count_model(
        ) / self.signal_lr_lnpdf.array.sum()
        self.signal_lr_lnpdf.normalize()