def get_injections(contents):
	"""
	Generator function to return

		sim, event_list, offsetvector

	tuples by querying the sim_burst, coinc_event and sngl_burst tables
	in the database described by contents.  Only coincs corresponding
	to "exact" sim_burst<-->coinc_event coincs will be retrieved.
	"""
	cursor = contents.connection.cursor()
	for values in contents.connection.cursor().execute("""
SELECT
	sim_burst.*,
	burst_coinc_event_map.event_id
FROM
	sim_burst
	JOIN coinc_event_map AS sim_coinc_event_map ON (
		sim_coinc_event_map.table_name == 'sim_burst'
		AND sim_coinc_event_map.event_id == sim_burst.simulation_id
	)
	JOIN coinc_event AS sim_coinc_event ON (
		sim_coinc_event.coinc_event_id == sim_coinc_event_map.coinc_event_id
	)
	JOIN coinc_event_map AS burst_coinc_event_map ON (
		burst_coinc_event_map.coinc_event_id == sim_coinc_event_map.coinc_event_id
		AND burst_coinc_event_map.table_name == 'coinc_event'
	)
WHERE
	sim_coinc_event.coinc_def_id == ?
	""", (contents.sce_definer_id,)):
		# retrieve the injection and the coinc_event_id
		sim = contents.sim_burst_table.row_from_cols(values)
		coinc_event_id = values[-1]

		# retrieve the list of the sngl_bursts in this
		# coinc, and their time slide dictionary
		rows = [(contents.sngl_burst_table.row_from_cols(row), row[-1]) for row in cursor.execute("""
SELECT
	sngl_burst.*,
	time_slide.offset
FROM
	sngl_burst
	JOIN coinc_event_map ON (
		coinc_event_map.table_name == 'sngl_burst'
		AND coinc_event_map.event_id == sngl_burst.event_id
	)
	JOIN coinc_event ON (
		coinc_event.coinc_event_id == coinc_event_map.coinc_event_id
	)
	JOIN time_slide ON (
		coinc_event.time_slide_id == time_slide.time_slide_id
		AND time_slide.instrument == sngl_burst.ifo
	)
WHERE
	coinc_event.coinc_event_id == ?
		""", (coinc_event_id,))]
		# pass the events to whatever wants them
		yield sim, [event for event, offset in rows], offsetvector((event.ifo, offset) for event, offset in rows)
	cursor.close()
Beispiel #2
0
def SlidesIter(slides):
	"""
	Accepts a dictionary mapping instrument --> list-of-offsets (for
	example, as returned by parse_slides()), and iterates over the
	cartesian (outer) product of the offset lists, yielding all
	possible N-way instrument --> offset mappings.

	Example:

	>>> slides = {"H1": [-1, 0, 1], "H2": [-1, 0, 1], "L1": [0]}
	>>> list(SlidesIter(slides))
	[offsetvector({'H2': -1, 'H1': -1, 'L1': 0}), offsetvector({'H2': -1, 'H1': 0, 'L1': 0}), offsetvector({'H2': -1, 'H1': 1, 'L1': 0}), offsetvector({'H2': 0, 'H1': -1, 'L1': 0}), offsetvector({'H2': 0, 'H1': 0, 'L1': 0}), offsetvector({'H2': 0, 'H1': 1, 'L1': 0}), offsetvector({'H2': 1, 'H1': -1, 'L1': 0}), offsetvector({'H2': 1, 'H1': 0, 'L1': 0}), offsetvector({'H2': 1, 'H1': 1, 'L1': 0})]
	"""
	if not slides:
		# things get a little odd in the even that no
		# instrument/offset-list pairs are given. instead of
		# yielding an empty sequence, itertools.product(*()) yields
		# a sequence containing a single empty tuple, so instead of
		# yielding no offsetvectors this function yields one empty
		# one.  that's not what calling codes generally expect the
		# response to be so we trap the case and return an empty
		# sequence
		return
	instruments = slides.keys()
	for slide in itertools.product(*slides.values()):
		yield offsetvector.offsetvector(zip(instruments, slide))
Beispiel #3
0
def SlidesIter(slides):
    """
	Accepts a dictionary mapping instrument --> list-of-offsets (for
	example, as returned by parse_slides()), and iterates over the
	cartesian (outer) product of the offset lists, yielding all
	possible N-way instrument --> offset mappings.

	Example:

	>>> slides = {"H1": [-1, 0, 1], "H2": [-1, 0, 1], "L1": [0]}
	>>> list(SlidesIter(slides))
	[offsetvector({'H2': -1, 'H1': -1, 'L1': 0}), offsetvector({'H2': -1, 'H1': 0, 'L1': 0}), offsetvector({'H2': -1, 'H1': 1, 'L1': 0}), offsetvector({'H2': 0, 'H1': -1, 'L1': 0}), offsetvector({'H2': 0, 'H1': 0, 'L1': 0}), offsetvector({'H2': 0, 'H1': 1, 'L1': 0}), offsetvector({'H2': 1, 'H1': -1, 'L1': 0}), offsetvector({'H2': 1, 'H1': 0, 'L1': 0}), offsetvector({'H2': 1, 'H1': 1, 'L1': 0})]
	"""
    if not slides:
        # things get a little odd in the even that no
        # instrument/offset-list pairs are given. instead of
        # yielding an empty sequence, itertools.product(*()) yields
        # a sequence containing a single empty tuple, so instead of
        # yielding no offsetvectors this function yields one empty
        # one.  that's not what calling codes generally expect the
        # response to be so we trap the case and return an empty
        # sequence
        return
    instruments = slides.keys()
    for slide in itertools.product(*slides.values()):
        yield offsetvector.offsetvector(zip(instruments, slide))
    def offset_vector(self, time_slide_id):
        """
		Return the offsetvector given by time_slide_id.
		"""
        return offsetvector.offsetvector(
            (row.instrument, row.offset)
            for row in self.time_slide_index[time_slide_id])
Beispiel #5
0
def Inspiral_Num_Slides_Iter(count, offsets):
    '''
  This generator yields a sequence of time slide dictionaries in the
  style of lalapps_thinca's time slides.  Each resulting dictionary
  maps instrument to offset.  The input is a count of time slides (an
  integer), and a dictionary mapping instrument to offset.  The
  output dictionaries describe time slides that are integer multiples
  of the input time shifts.
  Example (formatted for clarity):

  >>> list(Inspiral_Num_Slides_Iter(3, {"H1": 0.0, "H2": 5.0, "L1": 10.0}))
  [{'H2': -15.0, 'H1': -0.0, 'L1': -30.0},
   {'H2': -10.0, 'H1': -0.0, 'L1': -20.0},
   {'H2': -5.0, 'H1': -0.0, 'L1': -10.0},
   {'H2': 0.0, 'H1': 0.0, 'L1': 0.0},
   {'H2': 5.0, 'H1': 0.0, 'L1': 10.0},
   {'H2': 10.0, 'H1': 0.0, 'L1': 20.0},
   {'H2': 15.0, 'H1': 0.0, 'L1': 30.0}]

  Output time slides are integer multiples of the input time shift 
  vector in the range [-count, +count], including zero, and are 
  returned in increasing order of multiplier.
  '''
    for n in range(-count, +count + 1):
        yield offsetvector.offsetvector( (instrument, offset * n) \
            for instrument, offset in offsets.items() )
def Inspiral_Num_Slides_Iter(count, offsets):
  '''
  This generator yields a sequence of time slide dictionaries in the
  style of lalapps_thinca's time slides.  Each resulting dictionary
  maps instrument to offset.  The input is a count of time slides (an
  integer), and a dictionary mapping instrument to offset.  The
  output dictionaries describe time slides that are integer multiples
  of the input time shifts.
  Example (formatted for clarity):

  >>> list(Inspiral_Num_Slides_Iter(3, {"H1": 0.0, "H2": 5.0, "L1": 10.0}))
  [{'H2': -15.0, 'H1': -0.0, 'L1': -30.0},
   {'H2': -10.0, 'H1': -0.0, 'L1': -20.0},
   {'H2': -5.0, 'H1': -0.0, 'L1': -10.0},
   {'H2': 0.0, 'H1': 0.0, 'L1': 0.0},
   {'H2': 5.0, 'H1': 0.0, 'L1': 10.0},
   {'H2': 10.0, 'H1': 0.0, 'L1': 20.0},
   {'H2': 15.0, 'H1': 0.0, 'L1': 30.0}]

  Output time slides are integer multiples of the input time shift 
  vector in the range [-count, +count], including zero, and are 
  returned in increasing order of multiplier.
  '''
  for n in range(-count, +count + 1):
    yield offsetvector.offsetvector( (instrument, offset * n) \
        for instrument, offset in offsets.items() )
Beispiel #7
0
    def as_dict(self):
        """
		Return a ditionary mapping time slide IDs to offset
		dictionaries.
		"""
        return dict((
            ilwd.ilwdchar(id),
            offsetvector.offsetvector((instrument, offset)
                                      for id, instrument, offset in values)
        ) for id, values in itertools.groupby(
            self.cursor.execute(
                "SELECT time_slide_id, instrument, offset FROM time_slide ORDER BY time_slide_id"
            ), lambda (id, instrument, offset): id))
Beispiel #8
0
def parse_inspiral_num_slides_slidespec(slidespec):
	"""
	Accepts a string in the format
	count:instrument=offset[,instrument=offset...] and returns the
	tuple (count, {instrument: offset, ...})

	Example:

	>>> parse_inspiral_num_slides_slidespec("3:H1=0,H2=5,L1=10")
	(3, offsetvector({'H2': 5.0, 'H1': 0.0, 'L1': 10.0}))
	"""
	count, offsets = slidespec.strip().split(":")
	offsetvect = offsetvector.offsetvector((instrument.strip(), float(offset)) for instrument, offset in (token.strip().split("=") for token in offsets.strip().split(",")))
	return int(count), offsetvect
Beispiel #9
0
def parse_lalapps_thinca_slidespec(slidespec):
    """
  Accepts a string in the format
  count:instrument=offset[,instrument=offset...] and returns the
  tuple (count, {instrument: offset, ...})
  Example:

  >>> parse_inspiral_num_slides_slidespec("3:H1=0,H2=5,L1=10")
  (3, {'H2': 5.0, 'H1': 0.0, 'L1': 10.0})
  """
    count, offsets = slidespec.strip().split(":")
    tokens = offsets.strip().split(",")
    offsetvect = offsetvector.offsetvector( (instrument.strip(), float(offset)) \
        for instrument, offset in (token.strip().split("=") for token in tokens) )
    return int(count), offsetvect
Beispiel #10
0
def SlidesIter(slides):
	"""
	Accepts a dictionary mapping instrument --> list-of-offsets (for
	example, as returned by parse_slides()), and iterates over the
	cartesian (outer) product of the offset lists, yielding all
	possible N-way instrument --> offset mappings.

	Example:

	>>> slides = {"H1": [-1, 0, 1], "H2": [-1, 0, 1], "L1": [0]}
	>>> list(SlidesIter(slides))
	[offsetvector({'H2': -1, 'H1': -1, 'L1': 0}), offsetvector({'H2': -1, 'H1': 0, 'L1': 0}), offsetvector({'H2': -1, 'H1': 1, 'L1': 0}), offsetvector({'H2': 0, 'H1': -1, 'L1': 0}), offsetvector({'H2': 0, 'H1': 0, 'L1': 0}), offsetvector({'H2': 0, 'H1': 1, 'L1': 0}), offsetvector({'H2': 1, 'H1': -1, 'L1': 0}), offsetvector({'H2': 1, 'H1': 0, 'L1': 0}), offsetvector({'H2': 1, 'H1': 1, 'L1': 0})]
	"""
	instruments = slides.keys()
	for slide in itertools.product(*slides.values()):
		yield offsetvector.offsetvector(zip(instruments, slide))
Beispiel #11
0
def SlidesIter(slides):
    """
	Accepts a dictionary mapping instrument --> list-of-offsets (for
	example, as returned by parse_slides()), and iterates over the
	cartesian (outer) product of the offset lists, yielding all
	possible N-way instrument --> offset mappings.

	Example:

	>>> slides = {"H1": [-1, 0, 1], "H2": [-1, 0, 1], "L1": [0]}
	>>> list(SlidesIter(slides))
	[offsetvector({'H2': -1, 'H1': -1, 'L1': 0}), offsetvector({'H2': -1, 'H1': 0, 'L1': 0}), offsetvector({'H2': -1, 'H1': 1, 'L1': 0}), offsetvector({'H2': 0, 'H1': -1, 'L1': 0}), offsetvector({'H2': 0, 'H1': 0, 'L1': 0}), offsetvector({'H2': 0, 'H1': 1, 'L1': 0}), offsetvector({'H2': 1, 'H1': -1, 'L1': 0}), offsetvector({'H2': 1, 'H1': 0, 'L1': 0}), offsetvector({'H2': 1, 'H1': 1, 'L1': 0})]
	"""
    instruments = slides.keys()
    for slide in itertools.product(*slides.values()):
        yield offsetvector.offsetvector(zip(instruments, slide))
    def get_noninjections(self):
        """
		Generator function to return

			is_background, event_list, offsetvector

		tuples by querying the coinc_event and sngl_burst tables in
		the database.  Only coincs corresponding to
		sngl_burst<-->sngl_burst coincs will be retrieved.
		"""
        cursor = self.connection.cursor()
        for coinc_event_id, time_slide_id in self.connection.cursor().execute(
                """
	SELECT
		coinc_event_id,
		time_slide_id
	FROM
		coinc_event
	WHERE
		coinc_def_id == ?
		""", (self.bb_definer_id, )):
            rows = [(self.sngl_burst_table.row_from_cols(row), row[-1])
                    for row in cursor.execute(
                        """
	SELECT
		sngl_burst.*,
		time_slide.offset
	FROM
		coinc_event_map
		JOIN sngl_burst ON (
			coinc_event_map.table_name == 'sngl_burst'
			AND sngl_burst.event_id == coinc_event_map.event_id
		)
		JOIN time_slide ON (
			time_slide.instrument == sngl_burst.ifo
		)
	WHERE
		coinc_event_map.coinc_event_id == ?
		AND time_slide.time_slide_id == ?
			""", (coinc_event_id, time_slide_id))]
            offsets = offsetvector(
                (event.ifo, offset) for event, offset in rows)
            yield any(offsets.values()), [event
                                          for event, offset in rows], offsets
        cursor.close()
def get_noninjections(contents):
	"""
	Generator function to return

		is_background, event_list, offsetvector

	tuples by querying the coinc_event and sngl_burst tables in the
	database described by contents.  Only coincs corresponding to
	sngl_burst<-->sngl_burst coincs will be retrieved.
	"""
	cursor = contents.connection.cursor()
	for coinc_event_id, time_slide_id in contents.connection.cursor().execute("""
SELECT
	coinc_event_id,
	time_slide_id
FROM
	coinc_event
WHERE
	coinc_def_id == ?
	""", (contents.bb_definer_id,)):
		rows = [(contents.sngl_burst_table.row_from_cols(row), row[-1]) for row in cursor.execute("""
SELECT
	sngl_burst.*,
	time_slide.offset
FROM
	coinc_event_map
	JOIN sngl_burst ON (
		coinc_event_map.table_name == 'sngl_burst'
		AND sngl_burst.event_id == coinc_event_map.event_id
	)
	JOIN time_slide ON (
		time_slide.instrument == sngl_burst.ifo
	)
WHERE
	coinc_event_map.coinc_event_id == ?
	AND time_slide.time_slide_id == ?
		""", (coinc_event_id, time_slide_id))]
		offsets = offsetvector((event.ifo, offset) for event, offset in rows)
		yield any(offsets.values()), [event for event, offset in rows], offsets
	cursor.close()
Beispiel #14
0
def get_time_slides(connection):
	"""
	Query the database for the IDs and offsets of all time slides, and
	return two dictionaries one containing the all-zero time slides and
	the other containing the not-all-zero time slides.
	"""
	zero_lag_time_slides = {}
	background_time_slides = {}
	for time_slide_id, rows in itertools.groupby(connection.cursor().execute("""
SELECT
	time_slide_id,
	instrument,
	offset
FROM
	time_slide
ORDER BY
	time_slide_id
	"""), lambda (time_slide_id, instrument, offset): ilwd.ilwdchar(time_slide_id)):
		offset_vector = offsetvector.offsetvector((instrument, offset) for time_slide_id, instrument, offset in rows)
		if any(offset_vector.values()):
			background_time_slides[time_slide_id] = offset_vector
		else:
			zero_lag_time_slides[time_slide_id] = offset_vector
Beispiel #15
0
    def add_slidelessbackground(self,
                                database,
                                experiments,
                                param_func_args=()):
        # FIXME:  this needs to be taught how to not slide H1 and
        # H2 with respect to each other

        # segment lists
        seglists = database.seglists - database.vetoseglists

        # construct the event list dictionary.  remove vetoed
        # events from the lists and save event peak times so they
        # can be restored later
        eventlists = {}
        orig_peak_times = {}
        for event in database.sngl_burst_table:
            if event.peak in seglists[event.ifo]:
                try:
                    eventlists[event.ifo].append(event)
                except KeyError:
                    eventlists[event.ifo] = [event]
                orig_peak_times[event] = event.peak

        # parse the --thresholds H1,L1=... command-line options from burca
        delta_t = [
            float(threshold.split("=")[-1])
            for threshold in ligolw_process.get_process_params(
                database.xmldoc, "ligolw_burca", "--thresholds")
        ]
        if not all(delta_t[0] == threshold for threshold in delta_t[1:]):
            raise ValueError(
                "\Delta t is not unique in ligolw_burca arguments")
        delta_t = delta_t.pop()

        # construct the coinc generator.  note that H1+H2-only
        # coincs are forbidden, which is affected here by removing
        # that instrument combination from the object's internal
        # .rates dictionary
        coinc_generator = snglcoinc.CoincSynthesizer(eventlists, seglists,
                                                     delta_t)
        if frozenset(("H1", "H2")) in coinc_generator.rates:
            del coinc_generator.rates[frozenset(("H1", "H2"))]

        # build a dictionary of time-of-arrival generators
        toa_generator = dict(
            (instruments, coinc_generator.plausible_toas(instruments))
            for instruments in coinc_generator.rates.keys())

        # how many coincs?  the expected number is obtained by
        # multiplying the total zero-lag time for which at least
        # two instruments were on by the sum of the rates for all
        # coincs to get the mean number of coincs per zero-lag
        # observation time, and multiplying that by the number of
        # experiments the background should simulate to get the
        # mean number of background events to simulate.  the actual
        # number simulated is a Poisson-distributed RV with that
        # mean.
        n_coincs, = scipy.stats.poisson.rvs(
            float(abs(segmentsUtils.vote(seglists.values(), 2))) *
            sum(coinc_generator.rates.values()) * experiments)

        # generate synthetic background coincs
        zero_lag_offset_vector = offsetvector(
            (instrument, 0.0) for instrument in seglists)
        for n, events in enumerate(
                coinc_generator.coincs(lsctables.SnglBurst.get_peak)):
            # n = 1 on 2nd iteration, so placing this condition
            # where it is in the loop causes the correct number
            # of events to be added to the background
            if n >= n_coincs:
                break
            # assign fake peak times
            toas = toa_generator[frozenset(event.ifo
                                           for event in events)].next()
            for event in events:
                event.peak = toas[event.ifo]
            # compute coincidence parameters
            self.add_background(
                self.coinc_params(events, zero_lag_offset_vector,
                                  *param_func_args))

        # restore original peak times
        for event, peak_time in orig_peak_times.iteritems():
            event.peak = peak_time
Beispiel #16
0
background_time_slides = {}
background_seglists = segments.segmentlistdict()
for filename in options.background_time_slides:
    cache_entry = CacheEntry(None, None, None,
                             "file://localhost" + os.path.abspath(filename))

    background_time_slides[cache_entry] = lsctables.TimeSlideTable.get_table(
        ligolw_utils.load_filename(filename,
                                   verbose=options.verbose,
                                   contenthandler=ligolw_segments.
                                   LIGOLWContentHandler)).as_dict().values()

    for i in range(len(background_time_slides[cache_entry])):
        background_time_slides[cache_entry][i] = offsetvector.offsetvector(
            (instrument, LIGOTimeGPS(offset)) for instrument, offset in
            background_time_slides[cache_entry][i].items())
    background_seglists |= cosmicstring.compute_segment_lists(
        seglists,
        offsetvector.component_offsetvectors(
            background_time_slides[cache_entry], 2), min_segment_length, pad)

injection_time_slides = {}
injection_seglists = segments.segmentlistdict()
for filename in options.injection_time_slides:
    cache_entry = CacheEntry(None, None, None,
                             "file://localhost" + os.path.abspath(filename))

    injection_time_slides[cache_entry] = lsctables.TimeSlideTable.get_table(
        ligolw_utils.load_filename(filename,
                                   verbose=options.verbose,
	def add_slidelessbackground(self, database, experiments, param_func_args = ()):
		# FIXME:  this needs to be taught how to not slide H1 and
		# H2 with respect to each other

		# segment lists
		seglists = database.seglists - database.vetoseglists

		# construct the event list dictionary.  remove vetoed
		# events from the lists and save event peak times so they
		# can be restored later
		eventlists = {}
		orig_peak_times = {}
		for event in database.sngl_burst_table:
			if event.peak in seglists[event.ifo]:
				try:
					eventlists[event.ifo].append(event)
				except KeyError:
					eventlists[event.ifo] = [event]
				orig_peak_times[event] = event.peak

		# parse the --thresholds H1,L1=... command-line options from burca
		delta_t = [float(threshold.split("=")[-1]) for threshold in ligolw_process.get_process_params(database.xmldoc, "ligolw_burca", "--thresholds")]
		if not all(delta_t[0] == threshold for threshold in delta_t[1:]):
			raise ValueError("\Delta t is not unique in ligolw_burca arguments")
		delta_t = delta_t.pop()

		# construct the coinc generator.  note that H1+H2-only
		# coincs are forbidden, which is affected here by removing
		# that instrument combination from the object's internal
		# .rates dictionary
		coinc_generator = snglcoinc.CoincSynthesizer(eventlists, seglists, delta_t)
		if frozenset(("H1", "H2")) in coinc_generator.rates:
			del coinc_generator.rates[frozenset(("H1", "H2"))]

		# build a dictionary of time-of-arrival generators
		toa_generator = dict((instruments, coinc_generator.plausible_toas(instruments)) for instruments in coinc_generator.rates.keys())

		# how many coincs?  the expected number is obtained by
		# multiplying the total zero-lag time for which at least
		# two instruments were on by the sum of the rates for all
		# coincs to get the mean number of coincs per zero-lag
		# observation time, and multiplying that by the number of
		# experiments the background should simulate to get the
		# mean number of background events to simulate.  the actual
		# number simulated is a Poisson-distributed RV with that
		# mean.
		n_coincs, = scipy.stats.poisson.rvs(float(abs(segmentsUtils.vote(seglists.values(), 2))) * sum(coinc_generator.rates.values()) * experiments)

		# generate synthetic background coincs
		zero_lag_offset_vector = offsetvector((instrument, 0.0) for instrument in seglists)
		for n, events in enumerate(coinc_generator.coincs(lsctables.SnglBurst.get_peak)):
			# n = 1 on 2nd iteration, so placing this condition
			# where it is in the loop causes the correct number
			# of events to be added to the background
			if n >= n_coincs:
				break
			# assign fake peak times
			toas = toa_generator[frozenset(event.ifo for event in events)].next()
			for event in events:
				event.peak = toas[event.ifo]
			# compute coincidence parameters
			self.add_background(self.coinc_params(events, zero_lag_offset_vector, *param_func_args))

		# restore original peak times
		for event, peak_time in orig_peak_times.iteritems():
			event.peak = peak_time
# requiring trigger construction.
#

if options.verbose:
	print >>sys.stderr, "Computing segments for which lalapps_StringSearch jobs are required ..."

background_time_slides = {}
background_seglists = segments.segmentlistdict()
for filename in options.background_time_slides:
	cache_entry = CacheEntry(None, None, None, "file://localhost" + os.path.abspath(filename))

	background_time_slides[cache_entry] = lsctables.TimeSlideTable.get_table(ligolw_utils.load_filename(filename, verbose = options.verbose, contenthandler = ligolw_segments.LIGOLWContentHandler)).as_dict().values()


	for i in range(len(background_time_slides[cache_entry])):
		background_time_slides[cache_entry][i] = offsetvector.offsetvector((instrument, LIGOTimeGPS(offset)) for instrument, offset in background_time_slides[cache_entry][i].items())
	background_seglists |= cosmicstring.compute_segment_lists(seglists, offsetvector.component_offsetvectors(background_time_slides[cache_entry], 2), min_segment_length, pad)

injection_time_slides = {}
injection_seglists = segments.segmentlistdict()
for filename in options.injection_time_slides:
	cache_entry = CacheEntry(None, None, None, "file://localhost" + os.path.abspath(filename))

        injection_time_slides[cache_entry] = lsctables.TimeSlideTable.get_table(ligolw_utils.load_filename(filename, verbose = options.verbose, contenthandler = ligolw_segments.LIGOLWContentHandler)).as_dict().values()

	for i in range(len(injection_time_slides[cache_entry])):
		injection_time_slides[cache_entry][i] = offsetvector.offsetvector((instrument, LIGOTimeGPS(offset)) for instrument, offset in injection_time_slides[cache_entry][i].items())
	injection_seglists |= cosmicstring.compute_segment_lists(seglists, offsetvector.component_offsetvectors(injection_time_slides[cache_entry], 2), min_segment_length, pad)


#
Beispiel #19
0
	def offset_vector(self, time_slide_id):
		"""
		Return the offsetvector given by time_slide_id.
		"""
		return offsetvector.offsetvector((row.instrument, row.offset) for row in self.time_slide_index[time_slide_id])
Beispiel #20
0
	def as_dict(self):
		"""
		Return a ditionary mapping time slide IDs to offset
		dictionaries.
		"""
		return dict((ilwd.ilwdchar(time_slide_id), offsetvector.offsetvector((instrument, offset) for time_slide_id, instrument, offset in values)) for time_slide_id, values in itertools.groupby(self.cursor.execute("SELECT time_slide_id, instrument, offset FROM time_slide ORDER BY time_slide_id"), lambda (time_slide_id, instrument, offset): time_slide_id))
    def get_injections(self):
        """
		Generator function to return

			sim, event_list, offsetvector

		tuples by querying the sim_burst, coinc_event and
		sngl_burst tables in the database.  Only coincs
		corresponding to "exact" sim_burst<-->coinc_event coincs
		will be retrieved.
		"""
        cursor = self.connection.cursor()
        for values in self.connection.cursor().execute(
                """
	SELECT
		sim_burst.*,
		burst_coinc_event_map.event_id
	FROM
		sim_burst
		JOIN coinc_event_map AS sim_coinc_event_map ON (
			sim_coinc_event_map.table_name == 'sim_burst'
			AND sim_coinc_event_map.event_id == sim_burst.simulation_id
		)
		JOIN coinc_event AS sim_coinc_event ON (
			sim_coinc_event.coinc_event_id == sim_coinc_event_map.coinc_event_id
		)
		JOIN coinc_event_map AS burst_coinc_event_map ON (
			burst_coinc_event_map.coinc_event_id == sim_coinc_event_map.coinc_event_id
			AND burst_coinc_event_map.table_name == 'coinc_event'
		)
	WHERE
		sim_coinc_event.coinc_def_id == ?
		""", (self.sce_definer_id, )):
            # retrieve the injection and the coinc_event_id
            sim = self.sim_burst_table.row_from_cols(values)
            coinc_event_id = values[-1]

            # retrieve the list of the sngl_bursts in this
            # coinc, and their time slide dictionary
            rows = [(self.sngl_burst_table.row_from_cols(row), row[-1])
                    for row in cursor.execute(
                        """
	SELECT
		sngl_burst.*,
		time_slide.offset
	FROM
		sngl_burst
		JOIN coinc_event_map ON (
			coinc_event_map.table_name == 'sngl_burst'
			AND coinc_event_map.event_id == sngl_burst.event_id
		)
		JOIN coinc_event ON (
			coinc_event.coinc_event_id == coinc_event_map.coinc_event_id
		)
		JOIN time_slide ON (
			coinc_event.time_slide_id == time_slide.time_slide_id
			AND time_slide.instrument == sngl_burst.ifo
		)
	WHERE
		coinc_event.coinc_event_id == ?
			""", (coinc_event_id, ))]
            # pass the events to whatever wants them
            yield sim, [event for event, offset in rows], offsetvector(
                (event.ifo, offset) for event, offset in rows)
        cursor.close()