def get_injection_weight_func(contents, reweight_type, amplitude_cutoff): if reweight_type == "off": def weight_func(sim, amplitude_cutoff = amplitude_cutoff): # amplitude cut-off is not used return 1.0 elif reweight_type == "astrophysical": population, = ligolw_process.get_process_params(contents.xmldoc, "lalapps_binj", "--population") if population != "string_cusp": raise ValueError("lalapps_binj was not run with --population=\"string_cusp\"") def weight_func(sim, amplitude_cutoff = amplitude_cutoff): # the "string_cusp" injection population is uniform # in log A, meaning the number of injections # between log A and log A + d log A is independent # of A. that corresponds to a distribution density # in A of P(A) dA \propto A^{-1} dA. we want P(A) # dA \propto A^{-4} dA, so each physical injection # needs to be treated as if it was A^{-3} virtual # injections, then the density of virtual # injections will be P(A) dA \propto A^{-4} dA. # the factor of 10^{21} simply renders the # amplitudes closer to 1; since double-precision # arithmetic is used this should have no affect on # the results, but it might help make the numbers a # little easier for humans to look at should anyone # have occasion to do so. return (max(sim.amplitude, amplitude_cutoff) * 1e21)**-3 else: raise ValueError(reweight_type) return weight_func
def get_injection_weight_func(contents, reweight_type, amplitude_cutoff): if reweight_type == "off": def weight_func(sim, amplitude_cutoff=amplitude_cutoff): # amplitude cut-off is not used return 1.0 elif reweight_type == "astrophysical": population, = ligolw_process.get_process_params( contents.xmldoc, "lalapps_binj", "--population") if population != "string_cusp": raise ValueError( "lalapps_binj was not run with --population=\"string_cusp\"") def weight_func(sim, amplitude_cutoff=amplitude_cutoff): # the "string_cusp" injection population is uniform # in log A, meaning the number of injections # between log A and log A + d log A is independent # of A. that corresponds to a distribution density # in A of P(A) dA \propto A^{-1} dA. we want P(A) # dA \propto A^{-4} dA, so each physical injection # needs to be treated as if it was A^{-3} virtual # injections, then the density of virtual # injections will be P(A) dA \propto A^{-4} dA. # the factor of 10^{21} simply renders the # amplitudes closer to 1; since double-precision # arithmetic is used this should have no affect on # the results, but it might help make the numbers a # little easier for humans to look at should anyone # have occasion to do so. return (max(sim.amplitude, amplitude_cutoff) * 1e21)**-3 else: raise ValueError(reweight_type) return weight_func
def infer_flow(xmldoc): """ Attempt to infer the low frequency by combing through the process table and trying to pick out the low frequency option given to that program. If you trust this, you will, for sure, be disappointed at some point in using this program. """ proctable = lsctables.ProcessTable.get_table(xmldoc) # FIXME: ...but really, I don't you think can fix this... procs = set( [p.program for p in proctable if VALID_TMPLT_GENS.has_key(p.program)]) if len(procs) == 0: return None # FIXME: You askin' for trouble, son. try: return min([ min( process.get_process_params(xmldoc, prog, VALID_TMPLT_GENS[prog], False)) for prog in procs ]) except ValueError: pass # No flow found. Bad luck for you. return None
def get_template_bank_f_low(xmldoc): """Determine the low frequency cutoff from a template bank file, whether the template bank was produced by lalapps_tmpltbank or lalapps_cbc_sbank. bayestar_sim_to_tmpltbank does not have a command line for the low-frequency cutoff; instead, it is recorded in a row of the search_summvars table.""" try: template_bank_f_low, = ligolw_process.get_process_params(xmldoc, 'tmpltbank', '--low-frequency-cutoff') except ValueError: try: template_bank_f_low, = ligolw_process.get_process_params(xmldoc, 'lalapps_cbc_sbank', '--flow') except ValueError: try: search_summvars_table = ligolw_table.get_table(xmldoc, lsctables.SearchSummVarsTable.tableName) template_bank_f_low, = (search_summvars.value for search_summvars in search_summvars_table if search_summvars.name == 'low-frequency cutoff') except ValueError: raise ValueError("Could not determine low-frequency cutoff") return template_bank_f_low
def get_template_bank_f_low(xmldoc): """Determine the low frequency cutoff from a template bank file, whether the template bank was produced by lalapps_tmpltbank or lalapps_cbc_sbank. bayestar_sim_to_tmpltbank does not have a command line for the low-frequency cutoff; instead, it is recorded in a row of the search_summvars table.""" try: template_bank_f_low, = ligolw_process.get_process_params( xmldoc, 'tmpltbank', '--low-frequency-cutoff') except ValueError: try: template_bank_f_low, = ligolw_process.get_process_params( xmldoc, 'lalapps_cbc_sbank', '--flow') except ValueError: try: search_summvars_table = ligolw_table.get_table( xmldoc, lsctables.SearchSummVarsTable.tableName) template_bank_f_low, = ( search_summvars.value for search_summvars in search_summvars_table if search_summvars.name == 'low-frequency cutoff') except ValueError: raise ValueError("Could not determine low-frequency cutoff") return template_bank_f_low
def process_file(filename, products, live_time_program, tmp_path = None, veto_segments_name = None, verbose = False): # # connect to database and summarize contents # working_filename = dbtables.get_connection_filename(filename, tmp_path = tmp_path, verbose = verbose) contents = SnglBurstUtils.CoincDatabase(sqlite3.connect(working_filename), live_time_program, search = "StringCusp", veto_segments_name = veto_segments_name) if verbose: SnglBurstUtils.summarize_coinc_database(contents, filename = working_filename) # # augment summary with extra stuff we need. the filename # is recorded for dumping debuggin information related to # missed injections. if burca was run with the # --coincidence-segments option then the value is copied # into a segmentlistdict to facilitate the computation of # livetime # contents.filename = filename contents.coincidence_segments = ligolwprocess.get_process_params(contents.xmldoc, "lalapps_burca", "--coincidence-segments") if contents.coincidence_segments: # as a side-effect, this enforces the rule that # burca has been run on the input file exactly once contents.coincidence_segments, = contents.coincidence_segments contents.coincidence_segments = segments.segmentlistdict.fromkeys(contents.seglists, segmentsUtils.from_range_strings(contents.coincidence_segments.split(","), boundtype = dbtables.lsctables.LIGOTimeGPS).coalesce()) else: contents.coincidence_segments = None # # process contents # for n, product in enumerate(products): if verbose: print >>sys.stderr, "%s: adding to product %d ..." % (working_filename, n) product.add_contents(contents, verbose = verbose) # # close # contents.connection.close() dbtables.discard_connection_filename(filename, working_filename, verbose = verbose)
def infer_flow(xmldoc): """ Attempt to infer the low frequency by combing through the process table and trying to pick out the low frequency option given to that program. If you trust this, you will, for sure, be disappointed at some point in using this program. """ proctable = lsctables.ProcessTable.get_table(xmldoc) # FIXME: ...but really, I don't you think can fix this... procs = set([p.program for p in proctable if VALID_TMPLT_GENS.has_key(p.program)]) if len(procs) == 0: return None # FIXME: You askin' for trouble, son. try: return min([min(process.get_process_params(xmldoc, prog, VALID_TMPLT_GENS[prog], False)) for prog in procs]) except ValueError: pass # No flow found. Bad luck for you. return None
def add_slidelessbackground(self, database, experiments, param_func_args=()): # FIXME: this needs to be taught how to not slide H1 and # H2 with respect to each other # segment lists seglists = database.seglists - database.vetoseglists # construct the event list dictionary. remove vetoed # events from the lists and save event peak times so they # can be restored later eventlists = {} orig_peak_times = {} for event in database.sngl_burst_table: if event.peak in seglists[event.ifo]: try: eventlists[event.ifo].append(event) except KeyError: eventlists[event.ifo] = [event] orig_peak_times[event] = event.peak # parse the --thresholds H1,L1=... command-line options from burca delta_t = [ float(threshold.split("=")[-1]) for threshold in ligolw_process.get_process_params( database.xmldoc, "ligolw_burca", "--thresholds") ] if not all(delta_t[0] == threshold for threshold in delta_t[1:]): raise ValueError( "\Delta t is not unique in ligolw_burca arguments") delta_t = delta_t.pop() # construct the coinc generator. note that H1+H2-only # coincs are forbidden, which is affected here by removing # that instrument combination from the object's internal # .rates dictionary coinc_generator = snglcoinc.CoincSynthesizer(eventlists, seglists, delta_t) if frozenset(("H1", "H2")) in coinc_generator.rates: del coinc_generator.rates[frozenset(("H1", "H2"))] # build a dictionary of time-of-arrival generators toa_generator = dict( (instruments, coinc_generator.plausible_toas(instruments)) for instruments in coinc_generator.rates.keys()) # how many coincs? the expected number is obtained by # multiplying the total zero-lag time for which at least # two instruments were on by the sum of the rates for all # coincs to get the mean number of coincs per zero-lag # observation time, and multiplying that by the number of # experiments the background should simulate to get the # mean number of background events to simulate. the actual # number simulated is a Poisson-distributed RV with that # mean. n_coincs, = scipy.stats.poisson.rvs( float(abs(segmentsUtils.vote(seglists.values(), 2))) * sum(coinc_generator.rates.values()) * experiments) # generate synthetic background coincs zero_lag_offset_vector = offsetvector( (instrument, 0.0) for instrument in seglists) for n, events in enumerate( coinc_generator.coincs(lsctables.SnglBurst.get_peak)): # n = 1 on 2nd iteration, so placing this condition # where it is in the loop causes the correct number # of events to be added to the background if n >= n_coincs: break # assign fake peak times toas = toa_generator[frozenset(event.ifo for event in events)].next() for event in events: event.peak = toas[event.ifo] # compute coincidence parameters self.add_background( self.coinc_params(events, zero_lag_offset_vector, *param_func_args)) # restore original peak times for event, peak_time in orig_peak_times.iteritems(): event.peak = peak_time
def add_slidelessbackground(self, database, experiments, param_func_args = ()): # FIXME: this needs to be taught how to not slide H1 and # H2 with respect to each other # segment lists seglists = database.seglists - database.vetoseglists # construct the event list dictionary. remove vetoed # events from the lists and save event peak times so they # can be restored later eventlists = {} orig_peak_times = {} for event in database.sngl_burst_table: if event.peak in seglists[event.ifo]: try: eventlists[event.ifo].append(event) except KeyError: eventlists[event.ifo] = [event] orig_peak_times[event] = event.peak # parse the --thresholds H1,L1=... command-line options from burca delta_t = [float(threshold.split("=")[-1]) for threshold in ligolw_process.get_process_params(database.xmldoc, "lalapps_burca", "--thresholds")] if not all(delta_t[0] == threshold for threshold in delta_t[1:]): raise ValueError("\Delta t is not unique in lalapps_burca arguments") delta_t = delta_t.pop() # construct the coinc generator. note that H1+H2-only # coincs are forbidden, which is affected here by removing # that instrument combination from the object's internal # .rates dictionary coinc_generator = snglcoinc.CoincSynthesizer(eventlists, seglists, delta_t) if frozenset(("H1", "H2")) in coinc_generator.rates: del coinc_generator.rates[frozenset(("H1", "H2"))] # build a dictionary of time-of-arrival generators toa_generator = dict((instruments, coinc_generator.plausible_toas(instruments)) for instruments in coinc_generator.rates.keys()) # how many coincs? the expected number is obtained by # multiplying the total zero-lag time for which at least # two instruments were on by the sum of the rates for all # coincs to get the mean number of coincs per zero-lag # observation time, and multiplying that by the number of # experiments the background should simulate to get the # mean number of background events to simulate. the actual # number simulated is a Poisson-distributed RV with that # mean. n_coincs, = scipy.stats.poisson.rvs(float(abs(segmentsUtils.vote(seglists.values(), 2))) * sum(coinc_generator.rates.values()) * experiments) # generate synthetic background coincs zero_lag_offset_vector = offsetvector.fromkeys(seglists, 0.0) for n, events in enumerate(coinc_generator.coincs(lsctables.SnglBurst.get_peak)): # n = 1 on 2nd iteration, so placing this condition # where it is in the loop causes the correct number # of events to be added to the background if n >= n_coincs: break # assign fake peak times toas = toa_generator[frozenset(event.ifo for event in events)].next() for event in events: event.peak = toas[event.ifo] # compute coincidence parameters self.denominator.increment(self.coinc_params(events, zero_lag_offset_vector, *param_func_args)) # restore original peak times for event, peak_time in orig_peak_times.iteritems(): event.peak = peak_time
import sys import glob from optparse import OptionParser from glue import cbcwebpage from glue.ligolw import lsctables, table, utils from glue.ligolw.utils import process optp = OptionParser() opts, args = optp.parse_args() xmlfiles = sorted(glob.glob("*HVETO_ROUND_*.xml.gz")) ignore_list = process.get_process_params(utils.load_filename(xmlfiles[0]), "laldetchar-hveto", "--ignore-list")[0] reference_channel = process.get_process_params( utils.load_filename(xmlfiles[0]), "laldetchar-hveto", "--reference-channel")[0] gps_start = process.get_process_params(utils.load_filename(xmlfiles[0]), "laldetchar-hveto", "--gps-start")[0] gps_end = process.get_process_params(utils.load_filename(xmlfiles[0]), "laldetchar-hveto", "--gps-end")[0] page = cbcwebpage.cbcpage(title="HVeto Summary") subp = page.add_subpage("overview", "HVeto Run Overview", "HVeto Run Overview") #subp.add_text(txt="GPS segment: %10.2f -- %10.2f" % (gps_start, gps_end)) subp.div("""
import sys import glob from optparse import OptionParser from glue import cbcwebpage from glue.ligolw import lsctables, table, utils from glue.ligolw.utils import process optp = OptionParser() opts, args = optp.parse_args() xmlfiles = sorted(glob.glob("*HVETO_ROUND_*.xml.gz")) ignore_list = process.get_process_params(utils.load_filename(xmlfiles[0]), "laldetchar-hveto", "--ignore-list")[0] reference_channel = process.get_process_params(utils.load_filename(xmlfiles[0]), "laldetchar-hveto", "--reference-channel")[0] gps_start = process.get_process_params(utils.load_filename(xmlfiles[0]), "laldetchar-hveto", "--gps-start")[0] gps_end = process.get_process_params(utils.load_filename(xmlfiles[0]), "laldetchar-hveto", "--gps-end")[0] page = cbcwebpage.cbcpage(title="HVeto Summary") subp = page.add_subpage("overview", "HVeto Run Overview", "HVeto Run Overview") #subp.add_text(txt="GPS segment: %10.2f -- %10.2f" % (gps_start, gps_end)) subp.div(""" <big><b> GPS segment: %10.2f -- %10.2f <br/> Reference channel: %s <br/> </b></big> """ % (gps_start, gps_end, reference_channel))