def load_bbh_trigs(bbhtrigfile, segs): # Read in the BBh triggers bbh_trigs = SnglInspiralTable.read(bbhtrigfile) # Check if BBH triggers have been read in successfully if not bbh_trigs: sys.exit("ERROR: No triggers for BBH file: %s" % bbhtrigfile) else: print "%d BBH triggers read" % len(bbh_trigs) #Get the BBH triggers that lie within the valid segment list bbh_trigs = bbh_trigs.vetoed(segs) # Sort the bbh_triggers by their end times. This will be useful later bbh_trigs.sort(key=lambda x: x.end_time + x.end_time_ns * 1.0e-9) return bbh_trigs
#! /usr/bin/env python # This is a simple script to check if all the data in the BBH trigger # files is according to specification. Particularly, that all the triggers # are from the same detector and that no triggers overlap within a 0.1 second # window import sys import numpy as np from gwpy.table.lsctables import SnglInspiralTable triggers = sys.argv[1] gwdata = SnglInspiralTable.read(triggers) ifos = np.array(gwdata.getColumnByName('ifo')[:]) same_ifo = np.any(ifos != ifos[0]) if same_ifo: sys.exit("Error: Not all triggers are from the same detector!") etimes = np.array(gwdata.getColumnByName('end_time')[:]) etimes_ns = np.array(gwdata.getColumnByName('end_time_ns')[:]) etimes = etimes + etimes_ns * 1.0e-9 etimes = np.sort(etimes) delta_t = np.diff(etimes) overlap = np.any(delta_t < 0.1) if overlap:
help='IFO, H1 or L1') parser.add_argument('--ranking-statistic', type=str, required=True, default='newsnr', choices=['snr','newsnr'],help='Ranking statistic, snr or newsnr') parser.add_argument('--output-file', type=str, required=True, help='Full path to output file') parser.add_argument('--central-time',type=float,required=False, help='Central time to look at') parser.add_argument('--window',type=float,required=False,default=40.0, help='Script will find loudest trigger within +/- window seconds') parser.add_argument('--plot-window',type=float,required=True,default=40.0, help='Plot will display +/- plot-window seconds around the loudest trigger') parser.add_argument('--N', type=int, required=False, default=1, help='Code will plot the Nth loudest trigger') args = parser.parse_args() events = SnglInspiralTable.read(args.single_ifo_trigs) ifo = args.ifo logging.info('Parsing XML files') # get SNR of single detector triggers and find index of loudest event snr = events.get_column('snr') newsnr = [row.get_new_snr() for row in events] end_time = events.get_column('end_time') end_time_ns = events.get_column('end_time_ns') end_times = np.add(end_time,end_time_ns*10**-9) if args.ranking_statistic == 'snr': highest_idx = np.argsort(snr)[-args.N] trig_time = end_times[highest_idx] elif args.ranking_statistic == 'newsnr':
#"ASC-AS_B_RF45_Q_PIT_OUT_DQ", #"SUS-OMC_M1_ISIWIT_T_DQ", #"PSL-ISS_AOM_DRIVER_MON_OUT_DQ", #"LSC-PRCL_OUT_DQ"] ifo = sys.argv[1] bbhdir = sys.argv[2] bbhfile= glob.glob(os.path.join(bbhdir, ifo+'*.xml.gz'))[0] omiccachedir = sys.argv[3] # Read in the segment file segments = SegmentList.read('/home/albert.wandui/detchar'+\ '/ER7/jul13/%s_ER7_segments.txt' %ifo) # Read in the BBH triggers bbh_trigs = SnglInspiralTable.read(bbhfile) # We only want the triggers in the given segments bbh_trigs = bbh_trigs.vetoed(segments) #bbh_trigs.sort(key=lambda x: x.end_time + x.end_time_ns * 1.0e-9) print "Read in all the BBH triggers!!!\n" print "Let's start working on the Omicron triggers...\n" # ---------------------------------------------------------------------------- # # Read in all the Omicron caches # Also get an idea of the speed of the code when reading from cache file vs # letting vet get the data itself Nchannels = len(channels) def get_omicron_triggers(channel, ifo, segments, cachefile):
import numpy as np from gwpy.table.lsctables import SnglInspiralTable import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import scipy.stats as stats #L1data = np.load('ER7L1triggers.npz', 'r') #H1data = np.load('ER7H1triggers.npz', 'r') import sys fn = sys.argv[1] gwdata = SnglInspiralTable.read(fn) endtime =np.array(gwdata.getColumnByName('end_time')[:]) +\ np.array(gwdata.getColumnByName('end_time_ns')[:]) * 1.0e-9 mass1 = np.array(gwdata.getColumnByName('mass1')) mass2 = np.array(gwdata.getColumnByName('mass2')) snr = np.array(gwdata.getColumnByName('snr')) chisq = np.array(gwdata.getColumnByName('chisq')) mtotal = mass1 + mass2 eta = mass1 * mass2/ mtotal**2 mchirp = eta**(3./5)*mtotal # Make the boolean arrays for selecting the data that we need. # Set a threshold on the SNR so that we can focus on the # fall off at lower SNRs. min = 5.5 max = np.max(snr) snr_sel = np.logical_and(snr >= min, snr <max) mass_sel = np.vstack((np.logical_and(mchirp > 0.0, mchirp <= 5.0),\