Example #1
0
def time_slides_livetime(seglists,
                         time_slides,
                         min_instruments,
                         verbose=False,
                         clip=None):
    """
	seglists is a segmentlistdict of times when each of a set of
	instruments were on, time_slides is a sequence of
	instrument-->offset dictionaries, each vector of offsets in the
	sequence is applied to the segmentlists and the total time during
	which at least min_instruments were on is summed and returned.  If
	clip is not None, after each offset vector is applied to seglists
	the result is intersected with clip before computing the livetime.
	If verbose is True then progress reports are printed to stderr.
	"""
    livetime = 0.0
    seglists = seglists.copy()  # don't modify original
    N = len(time_slides)
    if verbose:
        print >> sys.stderr, "computing the live time for %d time slides:" % N
    for n, time_slide in enumerate(time_slides):
        if verbose:
            print >> sys.stderr, "\t%.1f%%\r" % (100.0 * n / N),
        seglists.offsets.update(time_slide)
        if clip is None:
            livetime += float(
                abs(segmentsUtils.vote(seglists.values(), min_instruments)))
        else:
            livetime += float(
                abs(
                    segmentsUtils.vote((seglists & clip).values(),
                                       min_instruments)))
    if verbose:
        print >> sys.stderr, "\t100.0%"
    return livetime
Example #2
0
def time_slides_livetime(seglists, time_slides, min_instruments, verbose = False, clip = None):
	"""
	seglists is a segmentlistdict of times when each of a set of
	instruments were on, time_slides is a sequence of
	instrument-->offset dictionaries, each vector of offsets in the
	sequence is applied to the segmentlists and the total time during
	which at least min_instruments were on is summed and returned.  If
	clip is not None, after each offset vector is applied to seglists
	the result is intersected with clip before computing the livetime.
	If verbose is True then progress reports are printed to stderr.
	"""
	livetime = 0.0
	seglists = seglists.copy()	# don't modify original
	N = len(time_slides)
	if verbose:
		print >>sys.stderr, "computing the live time for %d time slides:" % N
	for n, time_slide in enumerate(time_slides):
		if verbose:
			print >>sys.stderr, "\t%.1f%%\r" % (100.0 * n / N),
		seglists.offsets.update(time_slide)
		if clip is None:
			livetime += float(abs(segmentsUtils.vote(seglists.values(), min_instruments)))
		else:
			livetime += float(abs(segmentsUtils.vote((seglists & clip).values(), min_instruments)))
	if verbose:
		print >>sys.stderr, "\t100.0%"
	return livetime
Example #3
0
	def test_vote(self):
		"""
		Test vote().
		"""
		for i in range(algebra_repeats):
			seglists = []
			for j in range(random.randint(0, 10)):
				seglists.append(verifyutils.random_coalesced_list(algebra_listlength))
			n = random.randint(0, len(seglists))
			correct = reduce(lambda x, y: x | y, (votes and reduce(lambda a, b: a & b, votes) or segments.segmentlist() for votes in iterutils.choices(seglists, n)), segments.segmentlist())
			self.assertEqual(correct, segmentsUtils.vote(seglists, n))
    def test_vote(self):
        """
		Test vote().
		"""
        for i in range(algebra_repeats):
            seglists = []
            for j in range(random.randint(0, 10)):
                seglists.append(
                    verifyutils.random_coalesced_list(algebra_listlength))
            n = random.randint(0, len(seglists))
            correct = reduce(lambda x, y: x | y,
                             (votes and reduce(lambda a, b: a & b, votes)
                              or segments.segmentlist()
                              for votes in iterutils.choices(seglists, n)),
                             segments.segmentlist())
            self.assertEqual(correct, segmentsUtils.vote(seglists, n))
Example #5
0
    def add_slidelessbackground(self,
                                database,
                                experiments,
                                param_func_args=()):
        # FIXME:  this needs to be taught how to not slide H1 and
        # H2 with respect to each other

        # segment lists
        seglists = database.seglists - database.vetoseglists

        # construct the event list dictionary.  remove vetoed
        # events from the lists and save event peak times so they
        # can be restored later
        eventlists = {}
        orig_peak_times = {}
        for event in database.sngl_burst_table:
            if event.peak in seglists[event.ifo]:
                try:
                    eventlists[event.ifo].append(event)
                except KeyError:
                    eventlists[event.ifo] = [event]
                orig_peak_times[event] = event.peak

        # parse the --thresholds H1,L1=... command-line options from burca
        delta_t = [
            float(threshold.split("=")[-1])
            for threshold in ligolw_process.get_process_params(
                database.xmldoc, "ligolw_burca", "--thresholds")
        ]
        if not all(delta_t[0] == threshold for threshold in delta_t[1:]):
            raise ValueError(
                "\Delta t is not unique in ligolw_burca arguments")
        delta_t = delta_t.pop()

        # construct the coinc generator.  note that H1+H2-only
        # coincs are forbidden, which is affected here by removing
        # that instrument combination from the object's internal
        # .rates dictionary
        coinc_generator = snglcoinc.CoincSynthesizer(eventlists, seglists,
                                                     delta_t)
        if frozenset(("H1", "H2")) in coinc_generator.rates:
            del coinc_generator.rates[frozenset(("H1", "H2"))]

        # build a dictionary of time-of-arrival generators
        toa_generator = dict(
            (instruments, coinc_generator.plausible_toas(instruments))
            for instruments in coinc_generator.rates.keys())

        # how many coincs?  the expected number is obtained by
        # multiplying the total zero-lag time for which at least
        # two instruments were on by the sum of the rates for all
        # coincs to get the mean number of coincs per zero-lag
        # observation time, and multiplying that by the number of
        # experiments the background should simulate to get the
        # mean number of background events to simulate.  the actual
        # number simulated is a Poisson-distributed RV with that
        # mean.
        n_coincs, = scipy.stats.poisson.rvs(
            float(abs(segmentsUtils.vote(seglists.values(), 2))) *
            sum(coinc_generator.rates.values()) * experiments)

        # generate synthetic background coincs
        zero_lag_offset_vector = offsetvector(
            (instrument, 0.0) for instrument in seglists)
        for n, events in enumerate(
                coinc_generator.coincs(lsctables.SnglBurst.get_peak)):
            # n = 1 on 2nd iteration, so placing this condition
            # where it is in the loop causes the correct number
            # of events to be added to the background
            if n >= n_coincs:
                break
            # assign fake peak times
            toas = toa_generator[frozenset(event.ifo
                                           for event in events)].next()
            for event in events:
                event.peak = toas[event.ifo]
            # compute coincidence parameters
            self.add_background(
                self.coinc_params(events, zero_lag_offset_vector,
                                  *param_func_args))

        # restore original peak times
        for event, peak_time in orig_peak_times.iteritems():
            event.peak = peak_time
Example #6
0
	def add_slidelessbackground(self, database, experiments, param_func_args = ()):
		# FIXME:  this needs to be taught how to not slide H1 and
		# H2 with respect to each other

		# segment lists
		seglists = database.seglists - database.vetoseglists

		# construct the event list dictionary.  remove vetoed
		# events from the lists and save event peak times so they
		# can be restored later
		eventlists = {}
		orig_peak_times = {}
		for event in database.sngl_burst_table:
			if event.peak in seglists[event.ifo]:
				try:
					eventlists[event.ifo].append(event)
				except KeyError:
					eventlists[event.ifo] = [event]
				orig_peak_times[event] = event.peak

		# parse the --thresholds H1,L1=... command-line options from burca
		delta_t = [float(threshold.split("=")[-1]) for threshold in ligolw_process.get_process_params(database.xmldoc, "lalapps_burca", "--thresholds")]
		if not all(delta_t[0] == threshold for threshold in delta_t[1:]):
			raise ValueError("\Delta t is not unique in lalapps_burca arguments")
		delta_t = delta_t.pop()

		# construct the coinc generator.  note that H1+H2-only
		# coincs are forbidden, which is affected here by removing
		# that instrument combination from the object's internal
		# .rates dictionary
		coinc_generator = snglcoinc.CoincSynthesizer(eventlists, seglists, delta_t)
		if frozenset(("H1", "H2")) in coinc_generator.rates:
			del coinc_generator.rates[frozenset(("H1", "H2"))]

		# build a dictionary of time-of-arrival generators
		toa_generator = dict((instruments, coinc_generator.plausible_toas(instruments)) for instruments in coinc_generator.rates.keys())

		# how many coincs?  the expected number is obtained by
		# multiplying the total zero-lag time for which at least
		# two instruments were on by the sum of the rates for all
		# coincs to get the mean number of coincs per zero-lag
		# observation time, and multiplying that by the number of
		# experiments the background should simulate to get the
		# mean number of background events to simulate.  the actual
		# number simulated is a Poisson-distributed RV with that
		# mean.
		n_coincs, = scipy.stats.poisson.rvs(float(abs(segmentsUtils.vote(seglists.values(), 2))) * sum(coinc_generator.rates.values()) * experiments)

		# generate synthetic background coincs
		zero_lag_offset_vector = offsetvector.fromkeys(seglists, 0.0)
		for n, events in enumerate(coinc_generator.coincs(lsctables.SnglBurst.get_peak)):
			# n = 1 on 2nd iteration, so placing this condition
			# where it is in the loop causes the correct number
			# of events to be added to the background
			if n >= n_coincs:
				break
			# assign fake peak times
			toas = toa_generator[frozenset(event.ifo for event in events)].next()
			for event in events:
				event.peak = toas[event.ifo]
			# compute coincidence parameters
			self.denominator.increment(self.coinc_params(events, zero_lag_offset_vector, *param_func_args))

		# restore original peak times
		for event, peak_time in orig_peak_times.iteritems():
			event.peak = peak_time