def add_contents(self, contents):
        if self.tisi_rows is None:
            # get a list of time slide dictionaries
            self.tisi_rows = contents.time_slide_table.as_dict().values()

            # find the largest and smallest offsets
            min_offset = min(offset for vector in self.tisi_rows
                             for offset in vector.values())
            max_offset = max(offset for vector in self.tisi_rows
                             for offset in vector.values())

            # a guess at the time slide spacing:  works if the
            # time slides are distributed as a square grid over
            # the plot area.  (max - min)^2 gives the area of
            # the time slide square in square seconds; dividing
            # by the length of the time slide list gives the
            # average area per time slide;  taking the square
            # root of that gives the average distance between
            # adjacent time slides in seconds
            time_slide_spacing = ((max_offset - min_offset)**2 /
                                  len(self.tisi_rows))**0.5

            # use an average of 3 bins per time slide in each
            # direction, but round to an odd integer
            nbins = math.ceil(
                (max_offset - min_offset) / time_slide_spacing * 3)

            # construct the binning
            self.bins = rate.BinnedRatios(
                rate.NDBins((rate.LinearBins(min_offset, max_offset, nbins),
                             rate.LinearBins(min_offset, max_offset, nbins))))

        self.seglists |= contents.seglists

        for offsets in contents.connection.cursor().execute(
                """
SELECT tx.offset, ty.offset FROM
	coinc_event
	JOIN time_slide AS tx ON (
		tx.time_slide_id == coinc_event.time_slide_id
	)
	JOIN time_slide AS ty ON (
		ty.time_slide_id == coinc_event.time_slide_id
	)
WHERE
	coinc_event.coinc_def_id == ?
	AND tx.instrument == ?
	AND ty.instrument == ?
		""", (contents.bb_definer_id, self.x_instrument, self.y_instrument)):
            try:
                self.bins.incnumerator(offsets)
            except IndexError:
                # beyond plot boundaries
                pass
Exemplo n.º 2
0
    def _bin_events(self, binning=None):
        # called internally by finish()
        if binning is None:
            minx, maxx = min(self.injected_x), max(self.injected_x)
            miny, maxy = min(self.injected_y), max(self.injected_y)
            binning = rate.NDBins((rate.LogarithmicBins(minx, maxx, 256),
                                   rate.LogarithmicBins(miny, maxy, 256)))

        self.efficiency = rate.BinnedRatios(binning)

        for xy in zip(self.injected_x, self.injected_y):
            self.efficiency.incdenominator(xy)
        for xy in zip(self.found_x, self.found_y):
            self.efficiency.incnumerator(xy)

        # 1 / error^2 is the number of injections that need to be
        # within the window in order for the fractional uncertainty
        # in that number to be = error.  multiplying by
        # bins_per_inj tells us how many bins the window needs to
        # cover, and taking the square root translates that into
        # the window's length on a side in bins.  because the
        # contours tend to run parallel to the x axis, the window
        # is dilated in that direction to improve resolution.

        bins_per_inj = self.efficiency.used() / float(len(self.injected_x))
        self.window_size_x = self.window_size_y = math.sqrt(bins_per_inj /
                                                            self.error**2)
        self.window_size_x *= math.sqrt(2)
        self.window_size_y /= math.sqrt(2)
        if self.window_size_x > 100 or self.window_size_y > 100:
            # program will take too long to run
            raise ValueError(
                "smoothing filter too large (not enough injections)")

        print("The smoothing window for %s is %g x %g bins" % ("+".join(
            self.instruments), self.window_size_x, self.window_size_y),
              end=' ',
              file=sys.stderr)
        print("which is %g%% x %g%% of the binning" %
              (100.0 * self.window_size_x / binning[0].n,
               100.0 * self.window_size_y / binning[1].n),
              file=sys.stderr)
Exemplo n.º 3
0
    def twoD_SearchVolume(self,
                          instruments,
                          dbin=None,
                          FAR=None,
                          bootnum=None,
                          derr=0.197,
                          dsys=0.074):
        """ 
    Compute the search volume in the mass/mass plane, bootstrap
    and measure the first and second moment (assumes the underlying 
    distribution can be characterized by those two parameters) 
    This is gonna be brutally slow
    derr = (0.134**2+.103**2+.102**2)**.5 = 0.197 which is the 3 detector 
    calibration uncertainty in quadrature.  This is conservative since some injections
     will be H1L1 and have a lower error of .17
    the dsys is the DC offset which is the max offset of .074. 
    """

        if not FAR: FAR = self.far[instruments]
        found, missed = self.get_injections(instruments, FAR)
        twodbin = self.twoDMassBins
        wnfunc = self.gw
        livetime = self.livetime[instruments]
        if not bootnum: bootnum = self.bootnum

        if wnfunc:
            wnfunc /= wnfunc[(wnfunc.shape[0] - 1) / 2,
                             (wnfunc.shape[1] - 1) / 2]

        x = twodbin.shape[0]
        y = twodbin.shape[1]
        z = int(self.opts.dist_bins)

        rArrays = []
        volArray = rate.BinnedArray(twodbin)
        volArray2 = rate.BinnedArray(twodbin)
        #set up ratio arrays for each distance bin
        for k in range(z):
            rArrays.append(rate.BinnedRatios(twodbin))

        # Bootstrap to account for errors
        for n in range(bootnum):
            #initialize by setting these to zero
            for k in range(z):
                rArrays[k].numerator.array = numpy.zeros(
                    rArrays[k].numerator.bins.shape)
                rArrays[k].denominator.array = numpy.zeros(
                    rArrays[k].numerator.bins.shape)
            #Scramble the inj population and distances
            if bootnum > 1:
                sm, sf = self._scramble_pop(missed, found)
                # I make a separate array of distances to speed up this calculation
                f_dist = self._scramble_dist(sf, derr, dsys)
            else:
                sm, sf = missed, found
                f_dist = numpy.array([l.distance for l in found])

            # compute the distance bins
            if not dbin:
                dbin = rate.LogarithmicBins(min(f_dist), max(f_dist), z)
            #else: print dbin.centres()

            # get rid of all missed injections outside the distance bins
            # to prevent binning errors
            sm, m_dist = self.cut_distance(sm, dbin)
            sf, f_dist = self.cut_distance(sf, dbin)

            for i, l in enumerate(sf):  #found:
                tbin = rArrays[dbin[f_dist[i]]]
                tbin.incnumerator((l.mass1, l.mass2))
            for i, l in enumerate(sm):  #missed:
                tbin = rArrays[dbin[m_dist[i]]]
                tbin.incdenominator((l.mass1, l.mass2))

            tmpArray2 = rate.BinnedArray(
                twodbin)  #start with a zero array to compute the mean square
            for k in range(z):
                tbins = rArrays[k]
                tbins.denominator.array += tbins.numerator.array
                if wnfunc: rate.filter_array(tbins.denominator.array, wnfunc)
                if wnfunc: rate.filter_array(tbins.numerator.array, wnfunc)
                tbins.regularize()
                # logarithmic(d)
                integrand = 4.0 * pi * tbins.ratio() * dbin.centres(
                )[k]**3 * dbin.delta
                volArray.array += integrand
                tmpArray2.array += integrand  #4.0 * pi * tbins.ratio() * dbin.centres()[k]**3 * dbin.delta
                print(
                    "bootstrapping:\t%.1f%% and Calculating smoothed volume:\t%.1f%%\r"
                    % ((100.0 * n / bootnum), (100.0 * k / z)),
                    end=' ',
                    file=sys.stderr)
            tmpArray2.array *= tmpArray2.array
            volArray2.array += tmpArray2.array

        print("", file=sys.stderr)
        #Mean and variance
        volArray.array /= bootnum
        volArray2.array /= bootnum
        volArray2.array -= volArray.array**2  # Variance
        volArray.array *= livetime
        volArray2.array *= livetime * livetime  # this gets two powers of live time
        return volArray, volArray2
Exemplo n.º 4
0
def twoD_SearchVolume(found,
                      missed,
                      twodbin,
                      dbin,
                      wnfunc,
                      livetime,
                      bootnum=1,
                      derr=0.197,
                      dsys=0.074):
    """ 
  Compute the search volume in the mass/mass plane, bootstrap
  and measure the first and second moment (assumes the underlying 
  distribution can be characterized by those two parameters) 
  This is gonna be brutally slow
  derr = (0.134**2+.103**2+.102**2)**.5 = 0.197 which is the 3 detector 
  calibration uncertainty in quadrature.  This is conservative since some injections
  will be H1L1 and have a lower error of .17
  the dsys is the DC offset which is the max offset of .074. 
  """
    if wnfunc:
        wnfunc /= wnfunc[(wnfunc.shape[0] - 1) / 2, (wnfunc.shape[1] - 1) / 2]
    x = twodbin.shape[0]
    y = twodbin.shape[1]
    z = dbin.n
    rArrays = []
    volArray = rate.BinnedArray(twodbin)
    volArray2 = rate.BinnedArray(twodbin)
    #set up ratio arrays for each distance bin
    for k in range(z):
        rArrays.append(rate.BinnedRatios(twodbin))

    # Bootstrap to account for errors
    for n in range(bootnum):
        #initialize by setting these to zero
        for k in range(z):
            rArrays[k].numerator.array = numpy.zeros(
                rArrays[k].numerator.bins.shape)
            rArrays[k].denominator.array = numpy.zeros(
                rArrays[k].numerator.bins.shape)
        #Scramble the inj population
        if bootnum > 1: sm, sf = scramble_pop(missed, found)
        else: sm, sf = missed, found
        for l in sf:  #found:
            tbin = rArrays[dbin[scramble_dist(l.distance, derr, dsys)]]
            tbin.incnumerator((l.mass1, l.mass2))
        for l in sm:  #missed:
            tbin = rArrays[dbin[scramble_dist(l.distance, derr, dsys)]]
            tbin.incdenominator((l.mass1, l.mass2))

        tmpArray2 = rate.BinnedArray(
            twodbin)  #start with a zero array to compute the mean square
        for k in range(z):
            tbins = rArrays[k]
            tbins.denominator.array += tbins.numerator.array
            if wnfunc: rate.filter_array(tbins.denominator.array, wnfunc)
            if wnfunc: rate.filter_array(tbins.numerator.array, wnfunc)
            tbins.regularize()
            # logarithmic(d)
            integrand = 4.0 * pi * tbins.ratio() * dbin.centres(
            )[k]**3 * dbin.delta
            volArray.array += integrand
            tmpArray2.array += integrand  #4.0 * pi * tbins.ratio() * dbin.centres()[k]**3 * dbin.delta
            print(
                "bootstrapping:\t%.1f%% and Calculating smoothed volume:\t%.1f%%\r"
                % ((100.0 * n / bootnum), (100.0 * k / z)),
                end=' ',
                file=sys.stderr)
        tmpArray2.array *= tmpArray2.array
        volArray2.array += tmpArray2.array

    print("", file=sys.stderr)
    #Mean and variance
    volArray.array /= bootnum
    volArray2.array /= bootnum
    volArray2.array -= volArray.array**2  # Variance
    volArray.array *= livetime
    volArray2.array *= livetime * livetime  # this gets two powers of live time
    return volArray, volArray2