def generate_segments(ifo1_data, ifo2_data, ifo3_data, ifo4_data): """ compute the segments arising as the overlap of the four sets of single ifo segment lists. ifo1_data = data segments for ifo1 ifo2_data = data segments for ifo2 ifo3_data = data segments for ifo3 ifo4_data = data segments for ifo4 """ segment_list = pipeline.ScienceData() segment_list = copy.deepcopy(ifo1_data) segment_list.intersect_4(ifo2_data, ifo3_data, ifo4_data) return segment_list
playground_only = 0 print "reading in single ifo science segments and creating master chunks...", sys.stdout.flush() segments = {} data = {} for ifo in ifo_list: try: segments[ifo] = cp.get('input', ifo + '-segments') except: segments[ifo] = None data[ifo] = pipeline.ScienceData() if segments[ifo]: data[ifo].read(segments[ifo], length + 2 * pad) data[ifo].make_chunks(length, overlap, playground_only, 0, overlap / 2, pad) data[ifo].make_chunks_from_unused(length, overlap / 2, playground_only, 0, 0, overlap / 2, pad) print "done" sys.stdout.flush() # work out the earliest and latest times that are being analyzed if not gps_start_time: gps_start_time = 10000000000 for ifo in ifo_list: if data[ifo] and (data[ifo][0].start() < gps_start_time):
epoch_cnt = 0; # loop over the segments defined by the calibration epochs print("\n") for epoch in epochs.epoch_segs(): noise_output_files = [] noise_output_files2 = [] print("setting up jobs for calibration epoch " + str(epoch[1])+" - "+str(epoch[2]) + "...") #output the epochs in their own directories epoch_dir = 'EPOCH'+'-'+str(epoch[1])+'-'+str(epoch[2]) mkdir_node2 = strain.MkdirNode(mkdir_job,epoch_dir) if opts.write_dax: dag.add_node(mkdir_node2) if opts.cat_noise_jobs: catfile = strain.open_noise_cat_file(epoch_dir) # Make a ScienceData class for the calibration epochs epoch_data = pipeline.ScienceData() epoch_data.append_from_tuple(epoch) # read science segs that are greater or equal to a chunk from the input file data = pipeline.ScienceData() data.read(opts.segment_filename,0) # intersect the science segments with the calibration epoch data.intersection(epoch_data) # create the chunks from the science segments data.make_chunks(length,0,0,0,0) data.make_short_chunks_from_unused(0,0,0,0,0) # create all the LSCdataFind jobs to run in sequence prev_df1 = None prev_df2 = None # only do data find jobs if requested # find all the h(t) data
print("Double check the parameter file's injection section!") os.abort() #We assume the input segment list has entries exceeding layerTopBlockSize #so we will try to loop it. If the pipe builder was invoked with a FLOATING #top block size then we will issue an error IFF there is more than 1 layer configured segmentListName = segmentList dataBlockSize = int( float(str.strip(cp.get('layerconfig', 'layerTopBlockSize')))) if not (topBlockFloat): #Convert the segment list to smaller blocks reformatSegList = tracksearch.tracksearchConvertSegList( segmentList, dataBlockSize, cp, topBlockFloat, overrideBurn) reformatSegList.writeSegList() segmentListName = reformatSegList.getSegmentName() allData = pipeline.ScienceData() allData.read(segmentListName, dataBlockSize) allData.make_chunks(dataBlockSize) else: #Do optimized floating blocks #Check for layer2 setSize = int(cp.get('layerconfig', 'layer1SetSize')) timeScale = float(cp.get('layerconfig', 'layer1TimeScale')) minSize = setSize * timeScale print("Building pipe with rubber block size option enabled.") print("Minimum duration block: " + str(minSize)) print("Maximum duration block: " + str(dataBlockSize)) for opt in cp.options('layerconfig'): if str(opt).lower().__contains__(str('layer2TimeScale').lower()): print( "Error found additional layerconfig options for multi-resolution search."
calibrated = False for opt in cp.options('data'): if (opt.find('calibrated') > -1): calibrated = True # get the pad and chunk lengths from the values in the ini file pad = int(cp.get('data', 'pad-data')) n = int(cp.get('data', 'segment-length')) s = int(cp.get('data', 'number-of-segments')) r = int(cp.get('data', 'sample-rate')) o = int(cp.get('inspiral', 'segment-overlap')) length = (n * s - (s - 1) * o) / r overlap = o / r # read science segs that are greater or equal to a chunk from the input file data = pipeline.ScienceData() data.read(cp.get('input', 'segments'), length + 2 * pad) # create the chunks from the science segments data.make_chunks(length, overlap, playground_only, 0, overlap / 2, pad) data.make_chunks_from_unused(length, overlap / 2, playground_only, overlap / 2, 0, overlap / 2, pad) # get the order of the ifos to filter ifo1 = cp.get('pipeline', 'ifo1') ifo2 = cp.get('pipeline', 'ifo2') ifo1_snr = cp.get('pipeline', 'ifo1-snr-threshold') ifo2_snr = cp.get('pipeline', 'ifo2-snr-threshold') ifo1_chisq = cp.get('pipeline', 'ifo1-chisq-threshold') ifo2_chisq = cp.get('pipeline', 'ifo2-chisq-threshold')
# Step 1: read science segs that are greater or equal to a chunk # from the input file print "reading in single ifo science segments and creating master chunks...", sys.stdout.flush() segments = {} data = {} for ifo in ifo_list: try: segments[ifo] = cp.get('input', ifo +'-segments') except: segments[ifo] = None data[ifo] = pipeline.ScienceData() if segments[ifo]: data[ifo].read(segments[ifo],length + 2 * pad) data[ifo].make_chunks(length,overlap,playground_only,0,overlap/2,pad) data[ifo].make_chunks_from_unused(length,overlap/2,playground_only, 0,0,overlap/2,pad) print "done" # work out the earliest and latest times that are being analyzed if not gps_start_time: gps_start_time = 10000000000 for ifo in ifo_list: if data[ifo] and (data[ifo][0].start() < gps_start_time): gps_start_time = data[ifo][0].start() print "GPS start time not specified, obtained from segment lists as " + \