Ejemplo n.º 1
0
    def add_events(self, sbtable, inseg=None):
        """
		Add a trial to the current running tally. If segment is provided, then the key in the trial table is set to be this. Otherwise, the segment is determined from the peak times of the snglbursts
		"""

        # If no events are provided and no segment is indicated, there is no
        # operation to map this into a trial, so we do nothing
        if len(sbtable) == 0 and inseg is None:
            return

        if inseg is None:
            inseg = []
            for sb in sbtable:
                start = sb.start_time + 1e-9 * sb.start_time_ns
                stop = sb.start_time + sb.duration
                inseg.append(segments.segment(start, stop))
            inseg = segments.segmentlist(inseg).coalesce()
            inseg = segments.segment(inseg[0][0], inseg[-1][1])

        oldsegs = filter(lambda s: s.intersects(inseg), self.onsource.keys())

        # FIXME: Is it possible for this to be > 1?
        # Yes, but the reorganization logic is tricky.
        # Call normalize often (like everytime you add a new segment).
        if len(oldsegs) == 1:
            oldseg = oldsegs[0]
            sbtable += self.onsource[oldseg]
            del self.onsource[oldseg]
            inseg = oldseg | inseg

        self.onsource[inseg] = sbtable
Ejemplo n.º 2
0
def do_summary_table(xmldoc, sim_tree, liv_tree):
    try:
        search_summary = lsctables.SearchSummaryTable.get_table(xmldoc)
    except ValueError:
        search_summary = lsctables.New(lsctables.SearchSummaryTable, [
            "process_id", "nevents", "ifos", "comment", "in_start_time",
            "in_start_time_ns", "out_start_time", "out_start_time_ns",
            "in_end_time", "in_end_time_ns", "out_end_time", "out_end_time_ns"
        ])
        xmldoc.childNodes[0].appendChild(search_summary)

    process_id_type = lsctables.ProcessID

    runids = set()
    for i in range(0, sim_tree.GetEntries()):
        sim_tree.GetEntry(i)

        # Id for the run processed by WaveBurst -> process ID
        if sim_tree.run in runids:
            continue

        row = search_summary.RowType()
        row.process_id = process_id_type(sim_tree.run)
        runids.add(sim_tree.run)

        # Search Summary Table
        # events found in the run -> nevents
        setattr(row, "nevents", sim_tree.GetEntries())

        # Imstruments involved in the search
        row.ifos = lsctables.ifos_from_instrument_set(
            get_ifos_from_index(
                branch_array_to_list(sim_tree.ifo, sim_tree.ndim)))
        setattr(row, "comment", "waveburst")

        # Begin and end time of the segment
        # TODO: This is a typical offset on either side of the job for artifacts
        # It can, and probably will change in the future, and should not be hardcoded
        # TODO: Make this work properly. We need a gps end from the livetime
        waveoffset = 8
        livetime = 600
        #live_entries = live_tree.GetEntries()
        # This is WAAAAAAAAAAAAAY too slow
        #for l in range(0, live_entries):
        #liv_tree.GetEntry(l)
        #livetime = max(livetime, liv_tree.live)

        #if livetime < 0:
        #sys.exit("Could not find livetime, cannot fill all of summary table.")
        # in -- with waveoffset
        # out -- without waveoffset
        row.set_in(
            segments.segment(LIGOTimeGPS(sim_tree.gps - waveoffset),
                             LIGOTimeGPS(sim_tree.gps + livetime +
                                         waveoffset)))
        row.set_out(
            segments.segment(LIGOTimeGPS(sim_tree.gps),
                             LIGOTimeGPS(sim_tree.gps + livetime)))

        search_summary.append(row)
Ejemplo n.º 3
0
def generated_vdb_ascii(json_str,filepath):
    res_dict=json.loads(json_str)
    active_list=res_dict['active']
    active_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in active_list])
    known_list=res_dict['known']
    known_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in known_list])
    query_start=res_dict['query_information']['start']
    query_stop=res_dict['query_information']['end']
    if query_start!=0 and query_stop!=0:
        requested_span=segments.segmentlist([segments.segment(query_start,query_stop)])
    else:
        requested_span=segments.segmentlist([segments.segment(0,9999999999)])
    active_segments_string=',1 \n'.join([str(i[0])+","+str(i[1]) for i in active_segments])+",1 \n"
    unknown_segments=requested_span-known_segments
    unknown_segments_string=',-1 \n'.join([str(i[0])+","+str(i[1]) for i in unknown_segments])+",-1 \n"
    known_not_active_segments=known_segments-active_segments
    known_not_active_segments_string=',0 \n'.join([str(i[0])+","+str(i[1]) for i in known_not_active_segments])+",0 \n"
    output_fileh=open(filepath,'w+')
    query_info_string=json.dumps(res_dict['query_information'], indent=1)
    output_fileh.writelines(query_info_string)
    output_fileh.write('\n')
    output_fileh.writelines(active_segments_string)
    output_fileh.writelines(unknown_segments_string)
    output_fileh.writelines(known_not_active_segments_string)
    output_fileh.close()
    return filepath
Ejemplo n.º 4
0
def generated_vdb_ascii(json_dict,filepath):
    #res_dict=json.loads(json_str)
    res_dict=json_dict
    active_list=res_dict['active']
    active_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in active_list])
    active_segments.coalesce()
    known_list=res_dict['known']
    known_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in known_list])
    known_segments.coalesce()
    query_start=res_dict['query_information']['start']
    query_stop=res_dict['query_information']['end']
    if query_start!=0 and query_stop!=0:
        requested_span=segments.segmentlist([segments.segment(query_start,query_stop)])
    else:
        requested_span=segments.segmentlist([segments.segment(0,9999999999)])
    active_segments_string=',1 \n'.join([str(i[0])+","+str(i[1]) for i in active_segments])+",1 \n"
    unknown_segments=requested_span-known_segments    
    unknown_segments_string=',-1 \n'.join([str(i[0])+","+str(i[1]) for i in unknown_segments])+",-1 \n"    
    known_not_active_segments=known_segments-active_segments
    known_not_active_segments_string=',0 \n'.join([str(i[0])+","+str(i[1]) for i in known_not_active_segments])+",0 \n"
    output_fileh=open(filepath,'a')
    query_info_string=json.dumps(res_dict['query_information'], indent=1)
    output_fileh.writelines(query_info_string)
    output_fileh.write('\n')
    output_fileh.writelines(active_segments_string)
    output_fileh.writelines(unknown_segments_string)
    output_fileh.writelines(known_not_active_segments_string)
    output_fileh.close()
    return filepath
Ejemplo n.º 5
0
def split_segment(seg, min_segment_length, pad, overlap,
                  short_segment_duration, max_job_length):
    # avoid infinite loop
    if min_segment_length + 2 * pad <= overlap:
        raise ValueError(
            "infinite loop: min_segment_length + 2 * pad must be > overlap")

    # clip max_job_length down to an allowed size
    max_job_length = clip_segment_length(max_job_length, pad,
                                         short_segment_duration)

    seglist = segments.segmentlist()
    while abs(seg) >= min_segment_length + 2 * pad:
        # try to use max_job_length each time
        if abs(seg) >= max_job_length:
            seglist.append(segments.segment(seg[0], seg[0] + max_job_length))
        else:
            seglist.append(
                segments.segment(
                    seg[0], seg[0] + clip_segment_length(
                        abs(seg), pad, short_segment_duration)))
        assert abs(seglist[-1]) != 0  # safety-check for no-op
        # bounds must be integers
        if abs((int(seglist[-1][0]) - seglist[-1][0]) /
               seglist[-1][0]) > 1e-14 or abs(
                   (int(seglist[-1][1]) - seglist[-1][1]) /
                   seglist[-1][1]) > 1e-14:
            raise ValueError("segment %s does not have integer boundaries" %
                             str(seglist[-1]))
        # advance segment
        seg = segments.segment(seglist[-1][1] - overlap, seg[1])
    if not seglist:
        raise ValueError("unable to use segment %s" % str(seg))
    return seglist
Ejemplo n.º 6
0
    def normalize(self):
        """
		Redistribute events to offsource and onsource based on current time span.
		"""
        all_segs = segments.segmentlist(self.onsource.keys())
        if len(all_segs) == 0:
            return

        if len(self.offsource.keys()) > 0:
            all_segs += segments.segmentlist(self.offsource.keys())
        all_segs.coalesce()
        begin, end = all_segs[0][0], all_segs[-1][1]
        span = float(end - begin)
        if span < self.onsource_interval:
            # Not much we can do.
            return

        if span > self.offsource_interval + self.onsource_interval:
            begin = end - (self.offsource_interval + self.onsource_interval)

        onsource_seg = segments.segment(end - self.onsource_interval, end)
        offsource_seg = segments.segment(begin, end - self.onsource_interval)

        for seg, sbt in self.offsource.items():
            try:
                seg & offsource_seg
            except ValueError:  # offsource segment is out of the current window
                del self.offsource[seg]
                continue

            newseg = seg & offsource_seg
            if seg != newseg:
                del self.offsource[seg]
                self.offsource[newseg] = filter(
                    lambda sb:
                    (sb.peak_time + 1e-9 * sb.peak_time_ns) in newseg, sbt)

        for seg, sbt in self.onsource.items():
            if seg in onsource_seg:
                continue
            elif offsource_seg.disjoint(seg) == 1:
                # segment ran off the span since last check
                del self.onsource[seg]
                continue

            offseg = seg & offsource_seg
            del self.onsource[seg]

            try:
                onseg = seg & onsource_seg
                self.onsource[onseg] = filter(
                    lambda sb:
                    (sb.peak_time + 1e-9 * sb.peak_time_ns) in onseg, sbt)
            except ValueError:  # onsource segment completely out of new segment
                pass

            self.offsource[offseg] = filter(
                lambda sb: (sb.peak_time + 1e-9 * sb.peak_time_ns) in offseg,
                sbt)
Ejemplo n.º 7
0
def find_tile_greedy_slew(current_time,
                          current_ra,
                          current_dec,
                          tilesegmentlists,
                          tileprobs,
                          config_struct,
                          tile_struct,
                          keynames,
                          tileAllocatedTime,
                          exptimecheckkeys=[],
                          idle=0):
    next_obs = -1
    idx2 = -1
    score_selected = -1
    slew_readout_selected = -1
    explength_selected = -1
    for ii in range(len(tilesegmentlists)):  # for every tile
        key = keynames[ii]
        # exclude some tiles
        if keynames[ii] in exptimecheckkeys or np.absolute(
                tileAllocatedTime[ii]) < 1e-5:
            continue
        # calculate slew readout time
        distance = np.sqrt((tile_struct[key]['ra'] - current_ra)**2 +
                           (tile_struct[key]['dec'] - current_dec)**2)
        slew_readout = np.max(
            [config_struct['readout'], distance / config_struct['slew_rate']])
        slew_readout = np.max([slew_readout - idle, 0])
        slew_readout = slew_readout / 86400
        for jj in range(len(tilesegmentlists[ii])):  # for every segment
            seg = tilesegmentlists[ii][jj]
            if current_time + slew_readout < seg[
                    1]:  # if ends later than current + slew
                if current_time + slew_readout >= seg[
                        0]:  # if starts earlier than current + slew
                    # calculate the score
                    explength = np.min([
                        seg[1] - current_time - slew_readout,
                        tileAllocatedTime[ii]
                    ])
                    score = tileprobs[ii] * explength / (1 + slew_readout)
                    if idx2 == -1 or score > score_selected:
                        idx2 = keynames[ii]
                        score_selected = score
                        slew_readout_selected = slew_readout
                        explength_selected = explength
                elif idx2 == -1:  # if starts later than current + slew
                    if next_obs == -1 or next_obs > seg[0]:
                        next_obs = seg[0]
                break
    exp_idle_seg = None
    if idx2 != -1:
        exp_idle_seg = segments.segment(
            current_time + slew_readout,
            current_time + slew_readout + explength_selected)
    elif next_obs != -1:
        exp_idle_seg = segments.segment(current_time, next_obs)
    return idx2, slew_readout_selected, exp_idle_seg
Ejemplo n.º 8
0
def associate_psds_to_segments(opt, fd_segments, gwstrain, flen, delta_f, flow,
                               dyn_range_factor=1., precision=None):
    """Generate a set of overlapping PSDs covering the data in GWstrain.
    Then associate these PSDs with the appropriate segment in strain_segments.

    Parameters
    -----------
    opt : object
        Result of parsing the CLI with OptionParser, or any object with the
        required attributes (psd_model, psd_file, asd_file, psd_estimation,
        psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
    fd_segments : StrainSegments.fourier_segments() object
        The fourier transforms of the various analysis segments. The psd
        attribute of each segment is updated to point to the appropriate PSD.
    gwstrain : Strain object
        The timeseries of raw data on which to estimate PSDs.
    flen : int
        The length in samples of the output PSDs.
    delta_f : float
        The frequency step of the output PSDs.
    flow: float
        The low frequncy cutoff to use when calculating the PSD.
    dyn_range_factor : {1, float}
        For PSDs taken from models or text files, if `dyn_range_factor` is
        not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
    precision : str, choices (None,'single','double')
        If not specified, or specified as None, the precision of the returned
        PSD will match the precision of the data, if measuring a PSD, or will
        match the default precision of the model if using an analytical PSD.
        If 'single' the PSD will be converted to float32, if not already in
        that precision. If 'double' the PSD will be converted to float64, if
        not already in that precision.
    """
    psds_and_times = generate_overlapping_psds(opt, gwstrain, flen, delta_f,
                                       flow, dyn_range_factor=dyn_range_factor,
                                       precision=precision)

    for fd_segment in fd_segments:
        best_psd = None
        psd_overlap = 0
        inp_seg = segments.segment(fd_segment.seg_slice.start,
                                   fd_segment.seg_slice.stop)
        for start_idx, end_idx, psd in psds_and_times:
            psd_seg = segments.segment(start_idx, end_idx)
            if psd_seg.intersects(inp_seg):
                curr_overlap = abs(inp_seg & psd_seg)
                if curr_overlap > psd_overlap:
                    psd_overlap = curr_overlap
                    best_psd = psd
        if best_psd is None:
            err_msg = "No PSDs found intersecting segment!"
            raise ValueError(err_msg)
        fd_segment.psd = best_psd
def load_segments_from_xml(xml_doc, return_dict=False, select_id=None):
    """Read a ligo.segments.segmentlist from the file object file containing an
    xml segment table.

    Parameters
    ----------
        xml_doc: name of segment xml file

        Keyword Arguments:
            return_dict : [ True | False ]
                return a ligo.segments.segmentlistdict containing coalesced
                ligo.segments.segmentlists keyed by seg_def.name for each entry
                in the contained segment_def_table. Default False
            select_id : int
                return a ligo.segments.segmentlist object containing only
                those segments matching the given segment_def_id integer

    """

    # Load SegmentDefTable and SegmentTable
    seg_def_table = load_xml_table(xml_doc,
                                   glsctables.SegmentDefTable.tableName)
    seg_table = load_xml_table(xml_doc, glsctables.SegmentTable.tableName)

    if return_dict:
        segs = segments.segmentlistdict()
    else:
        segs = segments.segmentlist()

    seg_id = {}
    for seg_def in seg_def_table:
        seg_id[int(seg_def.segment_def_id)] = str(seg_def.name)
        if return_dict:
            segs[str(seg_def.name)] = segments.segmentlist()

    for seg in seg_table:
        if return_dict:
            segs[seg_id[int(seg.segment_def_id)]]\
                .append(segments.segment(seg.start_time, seg.end_time))
            continue
        if select_id and int(seg.segment_def_id) == select_id:
            segs.append(segments.segment(seg.start_time, seg.end_time))
            continue
        segs.append(segments.segment(seg.start_time, seg.end_time))

    if return_dict:
        for seg_name in seg_id.values():
            segs[seg_name] = segs[seg_name].coalesce()
    else:
        segs = segs.coalesce()

    return segs
Ejemplo n.º 10
0
def do_summary_table(xmldoc, sim_tree, liv_tree):
  try: 
    search_summary = lsctables.SearchSummaryTable.get_table(xmldoc)
  except ValueError:
    search_summary = lsctables.New(lsctables.SearchSummaryTable,
    ["process_id", "nevents", "ifos", "comment", "in_start_time",
    "in_start_time_ns", "out_start_time", "out_start_time_ns",
    "in_end_time", "in_end_time_ns", "out_end_time", "out_end_time_ns"])
    xmldoc.childNodes[0].appendChild(search_summary)

  process_id_type = lsctables.ProcessID

  runids = set()
  for i in range(0, sim_tree.GetEntries()) :
    sim_tree.GetEntry(i)

    # Id for the run processed by WaveBurst -> process ID
    if sim_tree.run in runids :
      continue

    row = search_summary.RowType()
    row.process_id = process_id_type(sim_tree.run)
    runids.add(sim_tree.run)

    # Search Summary Table
    # events found in the run -> nevents
    setattr(row, "nevents", sim_tree.GetEntries())

    # Imstruments involved in the search
    row.ifos = lsctables.ifos_from_instrument_set( get_ifos_from_index( branch_array_to_list ( sim_tree.ifo, sim_tree.ndim ) ) )
    setattr(row, "comment", "waveburst")

    # Begin and end time of the segment
    # TODO: This is a typical offset on either side of the job for artifacts
    # It can, and probably will change in the future, and should not be hardcoded
		# TODO: Make this work properly. We need a gps end from the livetime
    waveoffset = 8
    livetime = 600
    #live_entries = live_tree.GetEntries()
    # This is WAAAAAAAAAAAAAY too slow
    #for l in range(0, live_entries):
      #liv_tree.GetEntry(l)
      #livetime = max(livetime, liv_tree.live)

    #if livetime < 0:
      #sys.exit("Could not find livetime, cannot fill all of summary table.")
    # in -- with waveoffset
    # out -- without waveoffset
    row.set_in(segments.segment(LIGOTimeGPS(sim_tree.gps - waveoffset), LIGOTimeGPS(sim_tree.gps + livetime + waveoffset)))
    row.set_out(segments.segment(LIGOTimeGPS(sim_tree.gps), LIGOTimeGPS(sim_tree.gps + livetime)))

    search_summary.append(row)
Ejemplo n.º 11
0
def get_segments(params, config_struct):

    gpstime = params["gpstime"]
    event_mjd = Time(gpstime, format='gps', scale='utc').mjd

    segmentlist = segments.segmentlist()
    n_windows = len(params["Tobs"]) // 2
    start_segments = event_mjd + params["Tobs"][::2]
    end_segments = event_mjd + params["Tobs"][1::2]
    for start_segment, end_segment in zip(start_segments, end_segments):
        segmentlist.append(segments.segment(start_segment, end_segment))

    observer = ephem.Observer()
    observer.lat = str(config_struct["latitude"])
    observer.lon = str(config_struct["longitude"])
    observer.horizon = str(-12.0)
    observer.elevation = config_struct["elevation"]

    date_start = ephem.Date(
        Time(segmentlist[0][0], format='mjd', scale='utc').iso)
    date_end = ephem.Date(
        Time(segmentlist[-1][1], format='mjd', scale='utc').iso)
    observer.date = ephem.Date(
        Time(segmentlist[0][0], format='mjd', scale='utc').iso)

    sun = ephem.Sun()
    nightsegmentlist = segments.segmentlist()
    while date_start < date_end:
        date_rise = observer.next_rising(sun, start=date_start)
        date_set = observer.next_setting(sun, start=date_start)
        if date_set > date_rise:
            date_set = observer.previous_setting(sun, start=date_start)

        astropy_rise = Time(date_rise.datetime(), scale='utc').mjd
        astropy_set = Time(date_set.datetime(), scale='utc').mjd

        segment = segments.segment(astropy_set, astropy_rise)
        nightsegmentlist = nightsegmentlist + segments.segmentlist([segment])
        nightsegmentlist.coalesce()

        date_start = date_rise
        observer.date = date_rise

    segmentlistdic = segments.segmentlistdict()
    segmentlistdic["observations"] = segmentlist
    segmentlistdic["night"] = nightsegmentlist
    segmentlist = segmentlistdic.intersection(["observations", "night"])
    segmentlist.coalesce()

    return segmentlist
Ejemplo n.º 12
0
def new_plots(instrument, amplitude_func, amplitude_lbl, plots):
	l = (
		FreqVsTime(instrument),
		HrssVsFreqScatter(instrument, amplitude_func, amplitude_lbl),
		SimBurstUtils.Efficiency_hrss_vs_freq((instrument,), amplitude_func, amplitude_lbl, 0.1),
		TriggerCountHistogram(instrument),
		RecoveredVsInjectedhrss(instrument, amplitude_func, amplitude_lbl),
		RecoveredPerInjectedhrssVsFreq(instrument, amplitude_func, amplitude_lbl),
		RecoveredPerInjectedhrssVsBandwidth(instrument, amplitude_func, amplitude_lbl),
		RecoveredTimeOffset(instrument, segments.segment(-0.03, +0.03), 0.00015),
		RecoveredFrequencyOffset(instrument, segments.segment(-1.0, +1.0), .002),
		RecoveredVsInjectedFreq(instrument, amplitude_func)
	)
	return [l[i] for i in plots]
Ejemplo n.º 13
0
def subdivide(seglist, length, min_length=0):
    """
	Subdivide a segent list into smaller segments of length, allowing for a minimum length (default = 0).
	"""
    newlist = []
    for seg in seglist:
        while abs(seg) - min_length > length + min_length:
            newlist.append(segments.segment(seg[0], seg[0] + length))
            seg = segments.segment(seg[0] + length, seg[1])

        if abs(seg) > 0:
            newlist.append(segments.segment(seg[0], seg[1] - min_length))
            newlist.append(segments.segment(seg[1] - min_length, seg[1]))

    return segments.segmentlist(newlist)
Ejemplo n.º 14
0
def construct_trials(seg_files, seg_dict, ifos, slide_dict, vetoes):
    """Constructs trials from triggers, timeslides, segments and vetoes"""

    trial_dict = {}

    # Get segments
    segs = read_seg_files(seg_files)

    # Separate segments
    trial_time = abs(segs['on'])

    for slide_id in slide_dict:
        # These can only *reduce* the analysis time
        curr_seg_list = seg_dict[slide_id]

        # Construct the buffer segment list
        seg_buffer = segments.segmentlist()
        for ifo in ifos:
            slide_offset = slide_dict[slide_id][ifo]
            seg_buffer.append(
                segments.segment(segs['buffer'][0] - slide_offset,
                                 segs['buffer'][1] - slide_offset))
        seg_buffer.coalesce()

        # Construct the ifo-indexed dictionary of slid veteoes
        slid_vetoes = slide_vetoes(vetoes, slide_dict, slide_id)

        # Construct trial list and check against buffer
        trial_dict[slide_id] = segments.segmentlist()
        for curr_seg in curr_seg_list:
            iter_int = 1
            while 1:
                trial_end = curr_seg[0] + trial_time * iter_int
                if trial_end > curr_seg[1]:
                    break
                curr_trial = segments.segment(trial_end - trial_time,
                                              trial_end)
                if not seg_buffer.intersects_segment(curr_trial):
                    intersect = numpy.any([
                        slid_vetoes[ifo].intersects_segment(curr_trial)
                        for ifo in ifos
                    ])
                    if not intersect:
                        trial_dict[slide_id].append(curr_trial)

                iter_int += 1

    return trial_dict
Ejemplo n.º 15
0
 def create_node(self,
                 trig_files,
                 bank_file,
                 stat_files,
                 veto_file,
                 veto_name,
                 template_str,
                 pivot_ifo,
                 fixed_ifo,
                 tags=None):
     if tags is None:
         tags = []
     segs = trig_files.get_times_covered_by_files()
     seg = segments.segment(segs[0][0], segs[-1][1])
     node = Node(self)
     node.add_input_opt('--template-bank', bank_file)
     node.add_input_list_opt('--trigger-files', trig_files)
     if len(stat_files) > 0:
         node.add_input_list_opt('--statistic-files', stat_files)
     if veto_file is not None:
         node.add_input_opt('--veto-files', veto_file)
         node.add_opt('--segment-name', veto_name)
     node.add_opt('--pivot-ifo', pivot_ifo)
     node.add_opt('--fixed-ifo', fixed_ifo)
     node.add_opt('--template-fraction-range', template_str)
     node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags)
     return node
Ejemplo n.º 16
0
    def from_T050017(cls, url, coltype = LIGOTimeGPS):
        """
        Parse a URL in the style of T050017-00 into a CacheEntry.  The
        T050017-00 file name format is, essentially,

        observatory-description-start-duration.extension

        Example:

        >>> c = CacheEntry.from_T050017("file://localhost/data/node144/frames/S5/strain-L2/LLO/L-L1_RDS_C03_L2-8365/L-L1_RDS_C03_L2-836562330-83.gwf")
        >>> c.observatory
        'L'
        >>> c.host
        'localhost'
        >>> os.path.basename(c.path)
        'L-L1_RDS_C03_L2-836562330-83.gwf'
        """
        match = cls._url_regex.search(url)
        if not match:
            raise ValueError("could not convert %s to CacheEntry" % repr(url))
        observatory = match.group("obs")
        description = match.group("dsc")
        # FIXME:  remove typecasts when LIGOTimeGPS can be passed a unicode
        start = str(match.group("strt"))
        duration = str(match.group("dur"))
        if start == "-" and duration == "-":
            # no segment information
            segment = None
        else:
            segment = segments.segment(coltype(start), coltype(start) + coltype(duration))
        return cls(observatory, description, segment, url)
Ejemplo n.º 17
0
def get_summary(basedir, ifo, cluster, cat, start_time, end_time):
    all_sum     = segmentlist([])
    cur_time    = start_time

    while cur_time < end_time:
        tstring  = os.popen('tconvert -f %Y%m/%Y%m%d ' + str(cur_time)).readlines()[0].strip()
        infile   = open('%s/%s/%s-0-SUMMARY_%s.csv'  % (basedir, tstring, ifo, cluster))
        lines    = [l.strip().split(',') for l in infile.readlines()]
        summary  = segmentlist([segment(int(l[0]), int(l[1])) for l in lines]).coalesce()
        all_sum  = all_sum + summary

        cur_time += 60 * 60 * 24

    all_sum = all_sum & segmentlist([segment(start_time, end_time)])

    return all_sum
Ejemplo n.º 18
0
    def create_node(self,
                    zerolag,
                    full_data,
                    injfull,
                    fullinj,
                    ifos,
                    tags=None):
        if tags is None:
            tags = []
        segs = zerolag.get_times_covered_by_files()
        seg = segments.segment(segs[0][0], segs[-1][1])

        node = Node(self)
        node.add_input_list_opt('--zero-lag-coincs', zerolag)

        if isinstance(full_data, list):
            node.add_input_list_opt('--full-data-background', full_data)
        else:
            node.add_input_opt('--full-data-background', full_data)

        node.add_input_list_opt('--mixed-coincs-inj-full', injfull)
        node.add_input_list_opt('--mixed-coincs-full-inj', fullinj)
        node.add_opt('--ifos', ifos)
        node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags)
        return node
Ejemplo n.º 19
0
def convert_json_list_to_segmentlist(jsonlist):
     """
     Helper function used to convert json list of lists type object to a
     segmentlist object
     """
     segment_list=segments.segmentlist([segments.segment(x[0],x[1]) for x in jsonlist])
     return segment_list
Ejemplo n.º 20
0
	def get_output(self):
		if self._AnalysisNode__output is None:
			if None in (self.get_start(), self.get_end(), self.get_ifo(), self.__usertag):
				raise ValueError("start time, end time, ifo, or user tag has not been set")
			seg = segments.segment(lal.LIGOTimeGPS(self.get_start()), lal.LIGOTimeGPS(self.get_end()))
			self.set_output(os.path.join(self.output_dir, "%s-POWER_%s-%d-%d.xml.gz" % (self.get_ifo(), self.__usertag, int(self.get_start()), int(self.get_end()) - int(self.get_start()))))
		return self._AnalysisNode__output
Ejemplo n.º 21
0
	def get_output(self):
		if self._AnalysisNode__output is None:
			if None in (self.get_start(), self.get_end(), self.get_ifo(), self.__usertag):
				raise ValueError("start time, end time, ifo, or user tag has not been set")
			seg = segments.segment(lal.LIGOTimeGPS(self.get_start()), lal.LIGOTimeGPS(self.get_end()))
			self.set_output(os.path.join(self.output_dir, "%s-POWER_%s-%d-%d.xml.gz" % (self.get_ifo(), self.__usertag, int(self.get_start()), int(self.get_end()) - int(self.get_start()))))
		return self._AnalysisNode__output
Ejemplo n.º 22
0
    def process_row(self, channel, rate, bin_idx, buftime, row):
        """
		Given a channel, rate, and the current buffer
		time, will process a row from a gstreamer buffer.
		"""
        # if segments provided, ensure that trigger falls within these segments
        if self.frame_segments[self.instrument]:
            trigger_seg = segments.segment(
                LIGOTimeGPS(row.end_time, row.end_time_ns),
                LIGOTimeGPS(row.end_time, row.end_time_ns))

        if not self.frame_segments[self.instrument] or self.frame_segments[
                self.instrument].intersects_segment(trigger_seg):
            waveform = self.waveforms[channel].index_to_waveform(
                rate, bin_idx, row.channel_index)
            trigger_time = row.end_time + row.end_time_ns * 1e-9

            # append row for data transfer/saving
            channel_name = self.bin_to_channel(channel, bin_idx)
            feature_row = {
                'timestamp': utils.floor_div(buftime, 1. / self.sample_rate),
                'channel': channel_name,
                'snr': row.snr,
                'phase': row.phase,
                'time': trigger_time,
                'frequency': waveform['frequency'],
                'q': waveform['q'],
                'duration': waveform['duration'],
            }
            timestamp = utils.floor_div(buftime, self.buffer_size)
            self.feature_queue.append(timestamp, channel_name, feature_row)
Ejemplo n.º 23
0
def get_exposures(params, config_struct, segmentlist):
    '''
    Convert the availability times to a list segments with the length of telescope exposures.
    segmentlist: the segments that the telescope can do the follow-up.
    '''
    exposurelist = segments.segmentlist()
    if "overhead_per_exposure" in config_struct.keys():
        overhead = config_struct["overhead_per_exposure"]
    else:
        overhead = 0.0

    # add the filter change time to the total overheads for integrated
    if not params["doAlternatingFilters"]:
        overhead = overhead + config_struct["filt_change_time"]

    exposure_time = np.max(params["exposuretimes"])

    for ii in range(len(segmentlist)):
        start_segment, end_segment = segmentlist[ii][0], segmentlist[ii][1]
        exposures = np.arange(start_segment, end_segment,
                              (overhead + exposure_time) / 86400.0)

        for jj in range(len(exposures)):
            exposurelist.append(
                segments.segment(exposures[jj],
                                 exposures[jj] + exposure_time / 86400.0))

    return exposurelist
Ejemplo n.º 24
0
def new_plots(plots):
    deltat_seg = segments.segment(-0.3, +0.3)
    deltat_width = 0.03125
    l = [
        RateContours("H2", "H1"),
        ConfidenceContours("H2", "H1", magnitude_a, "Confidence", 1, 10**10),
        ConfidenceContours("H2", "L1", magnitude_a, "Confidence", 1, 10**10),
        ConfidenceContours("L1", "H1", magnitude_a, "Confidence", 1, 10**10),
        ConfidenceContours("H2", "H1", magnitude_b,
                           r"Power / D.o.F. / ($F_{+}^{2} + F_{\times}^{2}$)",
                           1, 10**10),
        ConfidenceContours("H2", "L1", magnitude_b,
                           r"Power / D.o.F. / ($F_{+}^{2} + F_{\times}^{2}$)",
                           1, 10**10),
        ConfidenceContours("L1", "H1", magnitude_b,
                           r"Power / D.o.F. / ($F_{+}^{2} + F_{\times}^{2}$)",
                           1, 10**10),
        ConfidenceContourProjection(
            numpy.array((-1 / math.sqrt(2), +1 / math.sqrt(2), 0), "Float64"),
            numpy.array(
                (-1 / math.sqrt(4), -1 / math.sqrt(4), +1 / math.sqrt(2)),
                "Float64"), magnitude_b, 10**5),
        RateVsConfidence("H1"),
        RateVsConfidence("H2"),
        RateVsConfidence("L1")
    ]
    return [l[i] for i in plots]
Ejemplo n.º 25
0
def convert_json_list_to_segmentlist(jsonlist):
     """ 
     Helper function used to convert json list of lists type object to a 
     segmentlist object
     """
     segment_list=segments.segmentlist([segments.segment(x[0],x[1]) for x in jsonlist])
     return segment_list
Ejemplo n.º 26
0
    def from_T050017(cls, url, coltype = LIGOTimeGPS):
        """
        Parse a URL in the style of T050017-00 into a CacheEntry.  The
        T050017-00 file name format is, essentially,

        observatory-description-start-duration.extension

        Example:

        >>> c = CacheEntry.from_T050017("file://localhost/data/node144/frames/S5/strain-L2/LLO/L-L1_RDS_C03_L2-8365/L-L1_RDS_C03_L2-836562330-83.gwf")
        >>> c.observatory
        'L'
        >>> c.host
        'localhost'
        >>> os.path.basename(c.path)
        'L-L1_RDS_C03_L2-836562330-83.gwf'
        """
        match = cls._url_regex.search(url)
        if not match:
            raise ValueError("could not convert %s to CacheEntry" % repr(url))
        observatory = match.group("obs")
        description = match.group("dsc")
        # FIXME:  remove typecasts when LIGOTimeGPS can be passed a unicode
        start = str(match.group("strt"))
        duration = str(match.group("dur"))
        if start == "-" and duration == "-":
            # no segment information
            segment = None
        else:
            segment = segments.segment(coltype(start), coltype(start) + coltype(duration))
        return cls(observatory, description, segment, url)
Ejemplo n.º 27
0
def compute_segment_lists(seglists, offset_vectors, min_segment_length, pad):
  # don't modify original
  seglists = seglists.copy()

  # ignore offset vectors referencing instruments we don't have
  offset_vectors = [offset_vector for offset_vector in offset_vectors if set(offset_vector.keys()).issubset(set(seglists.keys()))]

  # cull too-short single-instrument segments from the input
  # segmentlist dictionary;  this can significantly increase
  # the speed of the get_coincident_segmentlistdict()
  # function when the input segmentlists have had many data
  # quality holes poked out of them
  remove_too_short_segments(seglists, min_segment_length, pad)

  # extract the segments that are coincident under the time
  # slides
  new = cafe.get_coincident_segmentlistdict(seglists, offset_vectors)

  # round to integer boundaries because lalapps_StringSearch can't accept
  # non-integer start/stop times
  # FIXME:  fix that in lalapps_StringSearch
  for seglist in new.values():
    for i in range(len(seglist)):
      seglist[i] = segments.segment(int(math.floor(seglist[i][0])), int(math.ceil(seglist[i][1])))
  # intersect with original segments to ensure we haven't expanded beyond
  # original bounds
  new &= seglists

  # again remove too-short segments
  remove_too_short_segments(new, min_segment_length, pad)

  # done
  return new
Ejemplo n.º 28
0
def get_ha_segments(config_struct,segmentlist,observer,fxdbdy,radec):

    if "ha_constraint" in config_struct:
        ha_constraint = config_struct["ha_constraint"].split(",")
        ha_min = float(ha_constraint[0])
        ha_max = float(ha_constraint[1])
    else:
        ha_min, ha_max = -24.0, 24.0

    if config_struct["telescope"] == "DECam":
        if radec.dec.deg <= -30.0:
            ha_min, ha_max = -5.2, 5.2
        else:
            ha_min, ha_max = -0.644981*np.sqrt(35.0-radec.dec.deg), 0.644981*np.sqrt(35.0-radec.dec.deg)
            
    location = astropy.coordinates.EarthLocation(config_struct["longitude"],
                                                 config_struct["latitude"],
                                                 config_struct["elevation"])

    halist = segments.segmentlist()
    for seg in segmentlist:
        mjds = np.linspace(seg[0], seg[1], 100)
        tt = Time(mjds, format='mjd', scale='utc', location=location)
        lst = tt.sidereal_time('mean')
        ha = (lst - radec.ra).hour
        idx = np.where((ha >= ha_min) & (ha <= ha_max))[0]
        if len(idx) >= 2:
            halist.append(segments.segment(mjds[idx[0]],mjds[idx[-1]]))
 
    return halist
Ejemplo n.º 29
0
def filename_metadata(filename):
    """Return metadata parsed from a filename following LIGO-T050017

    Parameters
    ----------
    filename : `str`
        the path name of a file

    Returns
    -------
    obs : `str`
        the observatory metadata

    tag : `str`
        the file tag

    segment : `ligo.segments.segment`
        the GPS ``[start, stop)`` interval for this file

    Notes
    -----
    `LIGO-T050017 <https://dcc.ligo.org/LIGO-T050017>`__ declares a
    file naming convention that includes documenting the GPS start integer
    and integer duration of a file, see that document for more details.
    """
    obs, desc, start, end = os.path.basename(filename).split('-')
    start = int(start)
    end = int(end.split('.')[0])
    return obs, desc, segment(start, start + end)
Ejemplo n.º 30
0
	def kwargs_from_triggers(self, events, offsetvector):
		assert len(events) >= self.min_instruments

		#
		# pick a random, but reproducible, trigger to provide a
		# reference timestamp for, e.g, the \Delta t's between
		# instruments and the time spanned by the candidate.
		#
		# the trigger times are conveyed as offsets-from-epoch.
		# the trigger times are taken to be their time-shifted
		# values, the time-shifted reference trigger is used to
		# define the epoch.  the objective here is to allow the
		# trigger times to be converted to floats without loss of
		# precision, without loosing knowledge of the \Delta t's
		# between triggers, and in such a way that singles always
		# have a time-shifted offset-from-epoch of 0.
		#
		# for the time spanned by the event, we need a segment for
		# every instrument whether or not it provided a trigger,
		# and reflecting the offset vector that was considered when
		# this candidate was formed (the ranking statistic needs to
		# know when it was we were looking for triggers in the
		# instruments that failed to provide them).  for
		# instruments that do not provide a trigger, we time-shift
		# the reference trigger's interval under the assumption
		# that because we use exact-match coincidence the interval
		# is the same for all instruments.
		#

		reference = min(events, key = lambda event: event.ifo)
		ref_start, ref_offset = reference.start_time, offsetvector[reference.ifo]
		# segment spanned by reference event
		seg = segments.segment(ref_start, ref_start + reference.duration)
		# initially populate segs dictionary shifting reference
		# instrument's segment according to offset vectors
		segs = dict((instrument, seg.shift(ref_offset - offsetvector[instrument])) for instrument in self.instruments)
		
		# for any any real triggers we have, use their true
		# intervals
		segs.update((event.ifo, segments.segment(event.start_time, event.start_time+event.duration)) for event in events)

		return dict(
			segments = segs, 
			snr2s = dict((event.ifo, event.snr**2.) for event in events),
			chi2s_over_snr2s = dict((event.ifo, event.chisq / event.chisq_dof / event.snr**2.) for event in events),
			durations = dict((event.ifo, event.duration) for event in events)
		)
def new_plots(instrument, amplitude_func, amplitude_lbl, plots):
    l = (FreqVsTime(instrument),
         HrssVsFreqScatter(instrument, amplitude_func, amplitude_lbl),
         SimBurstUtils.Efficiency_hrss_vs_freq(
             (instrument, ), amplitude_func, amplitude_lbl,
             0.1), TriggerCountHistogram(instrument),
         RecoveredVsInjectedhrss(instrument, amplitude_func, amplitude_lbl),
         RecoveredPerInjectedhrssVsFreq(instrument, amplitude_func,
                                        amplitude_lbl),
         RecoveredPerInjectedhrssVsBandwidth(instrument, amplitude_func,
                                             amplitude_lbl),
         RecoveredTimeOffset(instrument, segments.segment(-0.03, +0.03),
                             0.00015),
         RecoveredFrequencyOffset(instrument, segments.segment(-1.0, +1.0),
                                  .002),
         RecoveredVsInjectedFreq(instrument, amplitude_func))
    return [l[i] for i in plots]
Ejemplo n.º 32
0
def ExcessPowerNearCoincCompare(sim, burst, offsetvector):
	"""
	Return False (injection matches coinc) if the peak time of the sim
	is "near" the burst event.
	"""
	tinj = sim.time_at_instrument(burst.ifo, offsetvector)
	window = SimBurstUtils.burst_is_near_injection_window
	return segments.segment(tinj - window, tinj + window).disjoint(burst.period)
Ejemplo n.º 33
0
	def __init__(self, ifo, width, max):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot("Delay (s)", "Count / Delay")
		self.ifo = ifo
		self.nevents = 0
		# 21 bins per filter width
		interval = segments.segment(0, max + 2)
		self.bins = rate.BinnedDensity(rate.NDBins((rate.LinearBins(interval[0], interval[1], int(float(abs(interval)) / width) * 21),)))
		self.axes.semilogy()
Ejemplo n.º 34
0
def segmentlistdict_unnormalize(seglistdict, origin):
	"""
	The opposite of segmentlistdict_normalize(), restores the times in
	a segmentlist dictionary to absolute times.  The modification is
	done in place.
	"""
	for seglist in seglistdict.itervalues():
		seglist[:] = (segments.segment(origin + seg[0], origin + seg[1]) for seg in seglist)
Ejemplo n.º 35
0
	def finish(self):
		self.axes.plot(self.injected_x, self.injected_y, "k+")
		if not options.made_only:
			self.axes.plot(self.missed_x, self.missed_y, "rx")
		for seg in ~self.seglist & segments.segmentlist([segments.segment(self.axes.get_xlim())]):
			self.axes.axvspan(float(seg[0]), float(seg[1]), facecolor = "k", alpha = 0.2)
		self.axes.set_ylim([min(self.injected_y), max(self.injected_y)])
		self.axes.set_title("Injection Locations\n(%d Injections)" % self.num_injections)
Ejemplo n.º 36
0
def convert_json_list_to_segmentlist(jsonlist):
    """ 
    Helper function used to convert JSON list of lists-type object to a 
    segmentlist object-
    * Utility method, ripped from jsonhelper.py in dqsegdb package, until we 
    can use it as a dependency.
    """
    return segments.segmentlist([segments.segment(x[0],x[1]) for x in jsonlist])
Ejemplo n.º 37
0
def ExcessPowerNearCoincCompare(sim, burst, offsetvector):
	"""
	Return False (injection matches coinc) if the peak time of the sim
	is "near" the burst event.
	"""
	tinj = sim.time_at_instrument(burst.ifo, offsetvector)
	window = SimBurstUtils.burst_is_near_injection_window
	return segments.segment(tinj - window, tinj + window).disjoint(burst.period)
Ejemplo n.º 38
0
	def __init__(self, ifo, width, max):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot("Delay (s)", "Count / Delay")
		self.ifo = ifo
		self.nevents = 0
		# 21 bins per filter width
		interval = segments.segment(0, max + 2)
		self.bins = rate.BinnedDensity(rate.NDBins((rate.LinearBins(interval[0], interval[1], int(float(abs(interval)) / width) * 21),)))
		self.axes.semilogy()
Ejemplo n.º 39
0
 def sngl_burst_is_vetoed(ifo,
                          start,
                          start_ns,
                          duration,
                          veto_segs=veto_segs):
     start = dbtables.lsctables.LIGOTimeGPS(start, start_ns)
     return ifo in veto_segs and veto_segs[ifo].intersects_segment(
         segments.segment(start, start + duration))
Ejemplo n.º 40
0
def segmentlistdict_normalize(seglistdict, origin):
	"""
	Convert the times in a segmentlist dictionary to floats relative to
	origin.  The purpose is to allow segment lists stored as
	LIGOTimeGPS times to be manipulated more quickly without loss of
	precision.  The modification is done in place.
	"""
	for seglist in seglistdict.itervalues():
		seglist[:] = (segments.segment(float(seg[0] - origin), float(seg[1] - origin)) for seg in seglist)
Ejemplo n.º 41
0
def generated_ascii(json_str,filepath):
    res_dict=json.loads(json_str)
    active_list=res_dict['active']
    active_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in active_list])
    active_segments_string='\n'.join([str(i[0])+","+str(i[1]) for i in active_segments])
    output_fileh=open(filepath,'w+')
    output_fileh.writelines(active_segments_string)
    output_fileh.close()
    return filepath
 def finish(self):
     self.axes.set_title("Time-Frequency Plane\n(%d Triggers)" %
                         self.nevents)
     for seg in ~self.seglist & segments.segmentlist(
         [segments.segment(self.axes.get_xlim())]):
         self.axes.axvspan(float(seg[0]),
                           float(seg[1]),
                           facecolor="k",
                           alpha=0.2)
Ejemplo n.º 43
0
	def get_output_cache(self):
		"""
		Returns a LAL cache of the output file name.  Calling this
		method also induces the output name to get set, so it must
		be at least once.
		"""
		if not self.output_cache:
			self.output_cache = [CacheEntry(self.get_ifo(), self.__usertag, segments.segment(lal.LIGOTimeGPS(self.get_start()), lal.LIGOTimeGPS(self.get_end())), "file://localhost" + os.path.abspath(self.get_output()))]
		return self.output_cache
Ejemplo n.º 44
0
	def get_output_cache(self):
		"""
		Returns a LAL cache of the output file name.  Calling this
		method also induces the output name to get set, so it must
		be at least once.
		"""
		if not self.output_cache:
			# FIXME:  instruments hardcoded to "everything"
			self.output_cache = [CacheEntry(u"G1+H1+H2+L1+T1+V1", self.__usertag, segments.segment(lal.LIGOTimeGPS(self.get_start()), lal.LIGOTimeGPS(self.get_end())), "file://localhost" + os.path.abspath(self.get_output()))]
		return self.output_cache
Ejemplo n.º 45
0
def StringCuspSnglCompare(sim, burst, offsetvector):
	"""
	Return False (injection matches event) if an autocorrelation-width
	window centred on the injection is continuous with the time
	interval of the burst.
	"""
	tinj = sim.time_at_instrument(burst.ifo, offsetvector)
	window = SimBurstUtils.stringcusp_autocorrelation_width / 2
	# uncomment last part of expression to impose an amplitude cut
	return segments.segment(tinj - window, tinj + window).disjoint(burst.period) #or abs(sim.amplitude / SimBurstUtils.string_amplitude_in_instrument(sim, burst.ifo, offsetvector)) > 3
Ejemplo n.º 46
0
def get_segment_summary_times(scienceFile, segmentName):
    """
    This function will find the times for which the segment_summary is set
    for the flag given by segmentName.

    Parameters
    -----------
    scienceFile : SegFile
        The segment file that we want to use to determine this.
    segmentName : string
        The DQ flag to search for times in the segment_summary table.

    Returns
    ---------
    summSegList : ligo.segments.segmentlist
        The times that are covered in the segment summary table.
    """
    # Parse the segmentName
    segmentName = segmentName.split(':')
    if not len(segmentName) in [2,3]:
        raise ValueError("Invalid channel name %s." %(segmentName))
    ifo = segmentName[0]
    channel = segmentName[1]
    version = ''
    if len(segmentName) == 3:
        version = int(segmentName[2])

    # Load the filename
    xmldoc = utils.load_filename(scienceFile.cache_entry.path,
                             gz=scienceFile.cache_entry.path.endswith("gz"),
                             contenthandler=ContentHandler)

    # Get the segment_def_id for the segmentName
    segmentDefTable = table.get_table(xmldoc, "segment_definer")
    for entry in segmentDefTable:
        if (entry.ifos == ifo) and (entry.name == channel):
            if len(segmentName) == 2 or (entry.version==version):
                segDefID = entry.segment_def_id
                break
    else:
        raise ValueError("Cannot find channel %s in segment_definer table."\
                         %(segmentName))

    # Get the segmentlist corresponding to this segmentName in segment_summary
    segmentSummTable = table.get_table(xmldoc, "segment_summary")
    summSegList = segments.segmentlist([])
    for entry in segmentSummTable:
        if entry.segment_def_id == segDefID:
            segment = segments.segment(entry.start_time, entry.end_time)
            summSegList.append(segment)
    summSegList.coalesce()

    return summSegList
Ejemplo n.º 47
0
def new_plots(ifo, plots):
	l = (
		RateVsPeakFreq(ifo, segments.segment(options.frequency_range), 4),
		Durations(ifo),
		Delays(ifo, 0.25, 20),
		RateVsSNR(ifo),
		RateVsConfidence(ifo),
		ConfidenceVsTime(ifo),
		ConfidenceVsFrequencyScatter(ifo),
		TimeFrequencyPlane(ifo)
	)
	return [l[i] for i in plots]
Ejemplo n.º 48
0
  def get_output(self):
    """
    Returns the file name of output from the ring code. This must be kept
    synchronized with the name of the output file in ring.c.
    """
    if self._AnalysisNode__output is None:
      if None in (self.get_start(), self.get_end(), self.get_ifo(), self.__usertag):
        raise ValueError("start time, end time, ifo, or user tag has not been set")
      seg = segments.segment(LIGOTimeGPS(self.get_start()), LIGOTimeGPS(self.get_end()))
      self.set_output(os.path.join(self.output_dir, "%s-STRINGSEARCH_%s-%d-%d.xml.gz" % (self.get_ifo(), self.__usertag, int(self.get_start()), int(self.get_end()) - int(self.get_start()))))

    return self._AnalysisNode__output
Ejemplo n.º 49
0
def coalesceResultDictionary(result_dict):
    """
    Takes a dictionary as returned by QueryTimes or QueryTimeless and converts the lists of tuples into actual segment lists (and coalesces them).

    Parameters
    ----------
    result_dict : `dict`
        This is the input result dictionary from the other api calls
    out_result_dict : `dict`
        This is the output result dictionary with actual segment lists (and coalesced results).

    """
    import copy
    out_result_dict=copy.deepcopy(result_dict)
    active_seg_python_list=[segments.segment(i[0],i[1]) for i in result_dict['active']]
    active_seg_list=segments.segmentlist(active_seg_python_list)
    active_seg_list.coalesce()
    out_result_dict['active']=active_seg_list
    known_seg_python_list=[segments.segment(i[0],i[1]) for i in result_dict['known']]
    known_seg_list=segments.segmentlist(known_seg_python_list)
    known_seg_list.coalesce()
    out_result_dict['known']=known_seg_list
    return out_result_dict
Ejemplo n.º 50
0
def split_segment(timing_params, segment, psds_per_job):
	"""
	Split the data segment into correctly-overlaping segments.  We try
	to have the numbers of PSDs in each segment be equal to
	psds_per_job, but with a short segment at the end if needed.
	"""
	# in seconds
	joblength = job_length_from_psds(timing_params, psds_per_job)
	# in samples
	joboverlap = 2 * timing_params.filter_corruption + (timing_params.psd_length - timing_params.psd_shift)
	# in seconds
	joboverlap /= timing_params.resample_rate

	segs = segments.segmentlist()
	t = segment[0]
	while t + joblength <= segment[1]:
		segs.append(segments.segment(t, t + joblength) & segment)
		t += joblength - joboverlap

	extra_psds = int(psds_from_job_length(timing_params, float(segment[1] - t)))
	if extra_psds:
		segs.append(segments.segment(t, t + job_length_from_psds(timing_params, extra_psds)))
	return segs
Ejemplo n.º 51
0
def split_segment(seg, min_segment_length, pad, overlap, short_segment_duration, max_job_length):
	# avoid infinite loop
	if min_segment_length + 2 * pad <= overlap:
		raise ValueError("infinite loop: min_segment_length + 2 * pad must be > overlap")

	# clip max_job_length down to an allowed size
	max_job_length = clip_segment_length(max_job_length, pad, short_segment_duration)

	seglist = segments.segmentlist()
	while abs(seg) >= min_segment_length + 2 * pad:
		# try to use max_job_length each time
		if abs(seg) >= max_job_length:
			seglist.append(segments.segment(seg[0], seg[0] + max_job_length))
		else:
			seglist.append(segments.segment(seg[0], seg[0] + clip_segment_length(abs(seg), pad, short_segment_duration)))
		assert abs(seglist[-1]) != 0	# safety-check for no-op
		# bounds must be integers
		if abs((int(seglist[-1][0]) - seglist[-1][0]) / seglist[-1][0]) > 1e-14 or abs((int(seglist[-1][1]) - seglist[-1][1]) / seglist[-1][1]) > 1e-14:
			raise ValueError("segment %s does not have integer boundaries" % str(seglist[-1]))
		# advance segment
		seg = segments.segment(seglist[-1][1] - overlap, seg[1])
	if not seglist:
		raise ValueError("unable to use segment %s" % str(seg))
	return seglist
def get_valid_segments(segment_url, base_dir, ifo, science_flag, start_time, end_time):
    print("Finding valid analysis times for %s, please hold..." % ifo)

    cmd  = 'ligolw_segment_query --query-segments --segment-url %s --include-segments %s --gps-start-time %d --gps-end-time %d | ligolw_print -t segment -c start_time -c end_time' % (segment_url, science_flag, start_time, end_time)
    pipe = os.popen(cmd)

    print(cmd)

    results   = [x.strip().split(',') for x in pipe]
    science   = segments.segmentlist([segments.segment(int(x[0]), int(x[1])) for x in results])
    science.coalesce()

    print("Science: ")
    for s in science:
       print(s[0], s[1])

    framedir  = base_dir + '/' + ifo[0] + '1'
    chunks    = [f.split('.')[0].split('-') for f in get_all_files_in_range(framedir, start_time, end_time)]
    available = segments.segmentlist([ segments.segment( int(x[-2]), int(x[-2]) + int(x[-1]) ) for x in chunks if len(x) == 6 ])
    available.coalesce()

    print("Available:")
    for s in available:
       print(s[0], s[1])

    result = science & available

    result.coalesce()

    print("Result:")
    for s in result:
       print(s[0], s[1])

    print("done.")

    return result
Ejemplo n.º 53
0
def new_plots(plots):
	deltat_seg = segments.segment(-0.3, +0.3)
	deltat_width = 0.03125
	l = [
		RateContours("H2", "H1"),
		ConfidenceContours("H2", "H1", magnitude_a, "Confidence", 1, 10**10),
		ConfidenceContours("H2", "L1", magnitude_a, "Confidence", 1, 10**10),
		ConfidenceContours("L1", "H1", magnitude_a, "Confidence", 1, 10**10),
		ConfidenceContours("H2", "H1", magnitude_b, r"Power / D.o.F. / ($F_{+}^{2} + F_{\times}^{2}$)", 1, 10**10),
		ConfidenceContours("H2", "L1", magnitude_b, r"Power / D.o.F. / ($F_{+}^{2} + F_{\times}^{2}$)", 1, 10**10),
		ConfidenceContours("L1", "H1", magnitude_b, r"Power / D.o.F. / ($F_{+}^{2} + F_{\times}^{2}$)", 1, 10**10),
		ConfidenceContourProjection(numpy.array((-1/math.sqrt(2), +1/math.sqrt(2), 0), "Float64"), numpy.array((-1/math.sqrt(4), -1/math.sqrt(4), +1/math.sqrt(2)), "Float64"), magnitude_b, 10**5),
		RateVsConfidence("H1"),
		RateVsConfidence("H2"),
		RateVsConfidence("L1")
	]
	return [l[i] for i in plots]
Ejemplo n.º 54
0
def columns_from_file_list(file_list, columns, ifo, start, end):
    """ Return columns of information stored in single detector trigger
    files.

    Parameters
    ----------
    file_list_file : string
        pickle file containing the list of single detector
    triggers.
    ifo : string
        The ifo to return triggers for.
    columns : list of strings
        The list of columns to read from the trigger files.
    start : int
        The start time to get triggers from
    end : int
        The end time to get triggers from

    Returns
    -------
    trigger_dict : dict
        A dictionary of column vectors with column names as keys.
    """
    file_list = file_list.find_output_with_ifo(ifo)
    file_list = file_list.find_all_output_in_range(ifo, segment(start, end))

    trig_dict = {}
    for trig_file in file_list:
        f = h5py.File(trig_file.storage_path, 'r')

        time = f['end_time'][:]
        pick = numpy.logical_and(time < end, time > start)
        pick_loc = numpy.where(pick)[0]

        for col in columns:
            if col not in trig_dict:
                trig_dict[col] = []
            trig_dict[col] = numpy.concatenate([trig_dict[col], f[col][:][pick_loc]])

    return trig_dict
Ejemplo n.º 55
0
def get_sngl_burst_row(sngl_burst_table, sim_tree, d):
  row = sngl_burst_table.RowType()
  setattr(row, "search", "waveburst")
  # Interferometer name -> ifo
  setattr(row, "ifo", get_ifos_from_index(sim_tree.ifo[d]) )
  # Timing
  peak = LIGOTimeGPS(sim_tree.time[d])
  seg = segments.segment(LIGOTimeGPS(sim_tree.start[d]), LIGOTimeGPS(sim_tree.stop[d]))
  # Central time in the detector -> cent_time
  row.set_peak(peak)
  # Start time in the detector -> end_time
  row.set_start(seg[0])
  # Stop time in the detector -> star_time
  row.set_stop(seg[1])
  # Event duration
  row.duration = abs(seg)
  # TODO: Make sure this is right = Time lag used to shift detector -> lag
  setattr(row, "time_lag", sim_tree.lag[d])
  # Frequency
  # Central frequency in the detector -> frequency
  setattr(row, "peak_frequency", sim_tree.frequency[d])
  # Low frequency of the event in the detector -> flow
  setattr(row, "flow", sim_tree.low[d])
  # High frequency of the event in the detector ->  fhigh
  setattr(row, "fhigh", sim_tree.high[d])
  # Bandwidth
  setattr(row, "bandwidth", sim_tree.bandwidth[d])
  # Shape
  # number of pizels on the TF plane -> tfsize
  setattr(row, "tfvolume", sim_tree.size[d])
  # Energy
  # energy / noise variance -> snr
  setattr(row, "snr", sim_tree.snr[d])
  # TODO: What to do with this? GW strain
  #setattr(row, "strain", sim_tree.strain[d])
  # h _ root square sum
  setattr(row, "hrss", sim_tree.hrss[d])
  
  return row
Ejemplo n.º 56
0
def run_show_types(doc, connection, engine, gps_start_time, gps_end_time, included_segments_string, excluded_segments_string):
    resulttable = lsctables.New(ShowTypesResultTable)
    doc.childNodes[0].appendChild(resulttable)

    sql = """SELECT segment_definer.ifos, segment_definer.name, segment_definer.version,
                 (CASE WHEN segment_definer.comment IS NULL THEN '-' WHEN segment_definer.comment IS NOT NULL THEN segment_definer.comment END),
                 segment_summary.start_time, segment_summary.end_time,
                 (CASE WHEN segment_summary.comment IS NULL THEN '-' WHEN segment_summary.comment IS NOT NULL THEN segment_summary.comment END)
          FROM  segment_definer, segment_summary
          WHERE segment_definer.segment_def_id = segment_summary.segment_def_id
          AND   NOT (segment_summary.start_time > %d OR %d > segment_summary.end_time)
          """ % (gps_end_time, gps_start_time)

    rows = engine.query(sql)

    seg_dict = {}

    for row in rows:
        ifos, name, version, segment_definer_comment, segment_summary_start_time, segment_summary_end_time, segment_summary_comment = row
        key = (ifos, name, version, segment_definer_comment, segment_summary_comment)
        if key not in seg_dict:
            seg_dict[key] = []

        seg_dict[key].append(segments.segment(segment_summary_start_time, segment_summary_end_time))

    for key, value in seg_dict.iteritems():
        segmentlist = segments.segmentlist(value)
        segmentlist.coalesce()

        for segment in segmentlist:
            result = ShowTypesResult()
            result.ifos, result.name, result.version, result.segment_definer_comment, result.segment_summary_comment = key
            result.segment_summary_start_time, result.segment_summary_end_time = segment
            result.ifos = result.ifos.strip()

            resulttable.append(result)

    engine.close()
Ejemplo n.º 57
0
  if offSourceSegment is None:
    print("Warning: insufficient multi-IFO data to construct an off-source segment for GRB %s; skipping" % grb.event_number_grb, file=sys.stderr)
    continue
  elif opts.verbose:
    print("Sufficient off-source data has been found in", ifo_times, "time.")

  # write out the segment list to a segwizard file
  offsource_segfile = idirectory + "/offSourceSeg.txt"
  segmentsUtils.tosegwizard(open(offsource_segfile, "w"),
                            segments.segmentlist([offSourceSegment]))
  onsource_segfile = idirectory + "/onSourceSeg.txt"
  segmentsUtils.tosegwizard(file(onsource_segfile, "w"),
                            segments.segmentlist([onSourceSegment]))
  segLen = abs( onSourceSegment )
  bufferSegment = segments.segment( onSourceSegment[0]-opts.number_buffer_left*segLen,\
                                    onSourceSegment[1]+opts.number_buffer_right*segLen)
  buffer_segfile = idirectory + "/bufferSeg.txt"
  segmentsUtils.tosegwizard(file(buffer_segfile, "w"),
                            segments.segmentlist([bufferSegment]))

  if opts.verbose:
    print("on-source segment: ", onSourceSegment)
    print("off-source segment: ", offSourceSegment)

  ############################################################################
  # set up the analysis dag for this interval
  #
  # In doing this, we simply drop the configuration file into the
  # sub-directory, modify as needed, and then run the appropriate DAG
  # generation script.  In slightly more detail, steps are:
  #