Ejemplo n.º 1
0
    def event_rate(self, nevents=10):
        """
		Calculate the Poissonian significance of the 'on source' trial set for up to the loudest nevents.
		"""

        offtime = float(abs(segments.segmentlist(self.offsource.keys())))
        offsource = sorted(chain(*self.offsource.values()),
                           key=lambda sb: -sb.snr)
        offrate = zip(offsource,
                      map(lambda i: i / offtime, range(1,
                                                       len(offsource) + 1)))
        offrate = offrate[::-1]
        offsource = offsource[::-1]
        offsnr = [sb.snr for sb in offsource]

        ontime = float(abs(segments.segmentlist(self.onsource.keys())))
        if ontime == 0:
            return []
        onsource = sorted(chain(*self.onsource.values()),
                          key=lambda sb: -sb.snr)
        onsnr = [sb.snr for sb in onsource]
        onrate = []
        for snr in onsnr:
            try:
                onrate.append(offrate[bisect_left(offsnr, snr)][1])
            except IndexError:  # on SNR > max off SNR
                onrate.append(0)

        return onrate
Ejemplo n.º 2
0
def generated_vdb_ascii(json_str,filepath):
    res_dict=json.loads(json_str)
    active_list=res_dict['active']
    active_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in active_list])
    known_list=res_dict['known']
    known_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in known_list])
    query_start=res_dict['query_information']['start']
    query_stop=res_dict['query_information']['end']
    if query_start!=0 and query_stop!=0:
        requested_span=segments.segmentlist([segments.segment(query_start,query_stop)])
    else:
        requested_span=segments.segmentlist([segments.segment(0,9999999999)])
    active_segments_string=',1 \n'.join([str(i[0])+","+str(i[1]) for i in active_segments])+",1 \n"
    unknown_segments=requested_span-known_segments
    unknown_segments_string=',-1 \n'.join([str(i[0])+","+str(i[1]) for i in unknown_segments])+",-1 \n"
    known_not_active_segments=known_segments-active_segments
    known_not_active_segments_string=',0 \n'.join([str(i[0])+","+str(i[1]) for i in known_not_active_segments])+",0 \n"
    output_fileh=open(filepath,'w+')
    query_info_string=json.dumps(res_dict['query_information'], indent=1)
    output_fileh.writelines(query_info_string)
    output_fileh.write('\n')
    output_fileh.writelines(active_segments_string)
    output_fileh.writelines(unknown_segments_string)
    output_fileh.writelines(known_not_active_segments_string)
    output_fileh.close()
    return filepath
Ejemplo n.º 3
0
def generated_vdb_ascii(json_dict,filepath):
    #res_dict=json.loads(json_str)
    res_dict=json_dict
    active_list=res_dict['active']
    active_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in active_list])
    active_segments.coalesce()
    known_list=res_dict['known']
    known_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in known_list])
    known_segments.coalesce()
    query_start=res_dict['query_information']['start']
    query_stop=res_dict['query_information']['end']
    if query_start!=0 and query_stop!=0:
        requested_span=segments.segmentlist([segments.segment(query_start,query_stop)])
    else:
        requested_span=segments.segmentlist([segments.segment(0,9999999999)])
    active_segments_string=',1 \n'.join([str(i[0])+","+str(i[1]) for i in active_segments])+",1 \n"
    unknown_segments=requested_span-known_segments    
    unknown_segments_string=',-1 \n'.join([str(i[0])+","+str(i[1]) for i in unknown_segments])+",-1 \n"    
    known_not_active_segments=known_segments-active_segments
    known_not_active_segments_string=',0 \n'.join([str(i[0])+","+str(i[1]) for i in known_not_active_segments])+",0 \n"
    output_fileh=open(filepath,'a')
    query_info_string=json.dumps(res_dict['query_information'], indent=1)
    output_fileh.writelines(query_info_string)
    output_fileh.write('\n')
    output_fileh.writelines(active_segments_string)
    output_fileh.writelines(unknown_segments_string)
    output_fileh.writelines(known_not_active_segments_string)
    output_fileh.close()
    return filepath
Ejemplo n.º 4
0
def setup_psd_calculate(workflow, frame_files, ifo, segments,
                        segment_name, out_dir, tags=None):
    make_analysis_dir(out_dir)
    tags = [] if not tags else tags
    if workflow.cp.has_option_tags('workflow-psd', 'parallelization-factor', tags=tags):
        num_parts = int(workflow.cp.get_opt_tags('workflow-psd',
                                                 'parallelization-factor',
                                                 tags=tags))
    else:
        num_parts = 1

    # get rid of duplicate segments which happen when splitting the bank
    segments = segmentlist(frozenset(segments))

    segment_lists = list(chunks(segments, num_parts))

    psd_files = FileList([])
    for i, segs in enumerate(segment_lists):
        seg_file = SegFile.from_segment_list('%s_%s' %(segment_name, i),
                         segmentlist(segs), segment_name, ifo,
                         valid_segment=workflow.analysis_time,
                         extension='xml', directory=out_dir)

        psd_files += [make_psd_file(workflow, frame_files, seg_file,
                                    segment_name, out_dir,
                                    tags=tags + ['PART%s' % i])]

    if num_parts > 1:
        return merge_psds(workflow, psd_files, ifo, out_dir, tags=tags)
    else:
        return psd_files[0]
Ejemplo n.º 5
0
    def normalize(self):
        """
		Redistribute events to offsource and onsource based on current time span.
		"""
        all_segs = segments.segmentlist(self.onsource.keys())
        if len(all_segs) == 0:
            return

        if len(self.offsource.keys()) > 0:
            all_segs += segments.segmentlist(self.offsource.keys())
        all_segs.coalesce()
        begin, end = all_segs[0][0], all_segs[-1][1]
        span = float(end - begin)
        if span < self.onsource_interval:
            # Not much we can do.
            return

        if span > self.offsource_interval + self.onsource_interval:
            begin = end - (self.offsource_interval + self.onsource_interval)

        onsource_seg = segments.segment(end - self.onsource_interval, end)
        offsource_seg = segments.segment(begin, end - self.onsource_interval)

        for seg, sbt in self.offsource.items():
            try:
                seg & offsource_seg
            except ValueError:  # offsource segment is out of the current window
                del self.offsource[seg]
                continue

            newseg = seg & offsource_seg
            if seg != newseg:
                del self.offsource[seg]
                self.offsource[newseg] = filter(
                    lambda sb:
                    (sb.peak_time + 1e-9 * sb.peak_time_ns) in newseg, sbt)

        for seg, sbt in self.onsource.items():
            if seg in onsource_seg:
                continue
            elif offsource_seg.disjoint(seg) == 1:
                # segment ran off the span since last check
                del self.onsource[seg]
                continue

            offseg = seg & offsource_seg
            del self.onsource[seg]

            try:
                onseg = seg & onsource_seg
                self.onsource[onseg] = filter(
                    lambda sb:
                    (sb.peak_time + 1e-9 * sb.peak_time_ns) in onseg, sbt)
            except ValueError:  # onsource segment completely out of new segment
                pass

            self.offsource[offseg] = filter(
                lambda sb: (sb.peak_time + 1e-9 * sb.peak_time_ns) in offseg,
                sbt)
Ejemplo n.º 6
0
    def __init__(self,ifo,name,version):
        self.known=segments.segmentlist([])
        self.active=segments.segmentlist([])
#        self.metadata={}
        self.flagDict={}
        self.ifo=ifo
        self.name=name
        self.version=version
Ejemplo n.º 7
0
    def __init__(self,ifo,name,version):
        self.known=segments.segmentlist([])
        self.active=segments.segmentlist([])
#        self.metadata={}
        self.flagDict={}
        self.ifo=ifo
        self.name=name
        self.version=version
Ejemplo n.º 8
0
	def __init__(self, instrument):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot("%s Confidence" % instrument, "Coincident Event Rate (Hz)")
		self.instrument = instrument
		self.foreground = []
		self.background = []
		self.foreground_segs = segments.segmentlist()
		self.background_segs = segments.segmentlist()
		self.axes.loglog()
Ejemplo n.º 9
0
 def __init__(self, instrument):
     self.fig, self.axes = SnglBurstUtils.make_burst_plot(
         "%s Confidence" % instrument, "Coincident Event Rate (Hz)")
     self.instrument = instrument
     self.foreground = []
     self.background = []
     self.foreground_segs = segments.segmentlist()
     self.background_segs = segments.segmentlist()
     self.axes.loglog()
Ejemplo n.º 10
0
def get_moon_segments(config_struct, segmentlist, observer, fxdbdy, radec):

    if "moon_constraint" in config_struct:
        moon_constraint = float(config_struct["moon_constraint"])
    else:
        moon_constraint = 20.0

    moonsegmentlist = segments.segmentlist()
    dt = 1.0 / 24.0
    tt = np.arange(segmentlist[0][0], segmentlist[-1][1] + dt, dt)

    ra2 = radec.ra.radian
    d2 = radec.dec.radian

    # Where is the moon?
    moon = ephem.Moon()
    for ii in range(len(tt) - 1):
        observer.date = ephem.Date(Time(tt[ii], format='mjd', scale='utc').iso)
        moon.compute(observer)
        fxdbdy.compute(observer)

        alt_target = float(repr(fxdbdy.alt)) * (360 / (2 * np.pi))
        az_target = float(repr(fxdbdy.az)) * (360 / (2 * np.pi))
        #print("Altitude / Azimuth of target: %.5f / %.5f"%(alt_target,az_target))

        alt_moon = float(repr(moon.alt)) * (360 / (2 * np.pi))
        az_moon = float(repr(moon.az)) * (360 / (2 * np.pi))
        #print("Altitude / Azimuth of moon: %.5f / %.5f"%(alt_moon,az_moon))

        ra_moon = (180 / np.pi) * float(repr(moon.ra))
        dec_moon = (180 / np.pi) * float(repr(moon.dec))

        # Coverting both target and moon ra and dec to radians
        ra1 = float(repr(moon.ra))
        d1 = float(repr(moon.dec))

        # Calculate angle between target and moon
        cosA = np.sin(d1) * np.sin(d2) + np.cos(d1) * np.cos(d2) * np.cos(ra1 -
                                                                          ra2)
        angle = np.arccos(cosA) * (360 / (2 * np.pi))
        #print("Angle between moon and target: %.5f"%(angle))

        #if angle >= 50.0*moon.moon_phase**2:
        if angle >= moon_constraint:
            segment = segments.segment(tt[ii], tt[ii + 1])
            moonsegmentlist = moonsegmentlist + segments.segmentlist([segment])
            moonsegmentlist.coalesce()

    moonsegmentlistdic = segments.segmentlistdict()
    moonsegmentlistdic["observations"] = segmentlist
    moonsegmentlistdic["moon"] = moonsegmentlist
    moonsegmentlist = moonsegmentlistdic.intersection(["observations", "moon"])
    moonsegmentlist.coalesce()

    return moonsegmentlist
Ejemplo n.º 11
0
def load_segments_from_xml(xml_doc, return_dict=False, select_id=None):
    """Read a ligo.segments.segmentlist from the file object file containing an
    xml segment table.

    Parameters
    ----------
        xml_doc: name of segment xml file

        Keyword Arguments:
            return_dict : [ True | False ]
                return a ligo.segments.segmentlistdict containing coalesced
                ligo.segments.segmentlists keyed by seg_def.name for each entry
                in the contained segment_def_table. Default False
            select_id : int
                return a ligo.segments.segmentlist object containing only
                those segments matching the given segment_def_id integer

    """

    # Load SegmentDefTable and SegmentTable
    seg_def_table = load_xml_table(xml_doc,
                                   glsctables.SegmentDefTable.tableName)
    seg_table = load_xml_table(xml_doc, glsctables.SegmentTable.tableName)

    if return_dict:
        segs = segments.segmentlistdict()
    else:
        segs = segments.segmentlist()

    seg_id = {}
    for seg_def in seg_def_table:
        seg_id[int(seg_def.segment_def_id)] = str(seg_def.name)
        if return_dict:
            segs[str(seg_def.name)] = segments.segmentlist()

    for seg in seg_table:
        if return_dict:
            segs[seg_id[int(seg.segment_def_id)]]\
                .append(segments.segment(seg.start_time, seg.end_time))
            continue
        if select_id and int(seg.segment_def_id) == select_id:
            segs.append(segments.segment(seg.start_time, seg.end_time))
            continue
        segs.append(segments.segment(seg.start_time, seg.end_time))

    if return_dict:
        for seg_name in seg_id.values():
            segs[seg_name] = segs[seg_name].coalesce()
    else:
        segs = segs.coalesce()

    return segs
Ejemplo n.º 12
0
def get_segments(params, config_struct):

    gpstime = params["gpstime"]
    event_mjd = Time(gpstime, format='gps', scale='utc').mjd

    segmentlist = segments.segmentlist()
    n_windows = len(params["Tobs"]) // 2
    start_segments = event_mjd + params["Tobs"][::2]
    end_segments = event_mjd + params["Tobs"][1::2]
    for start_segment, end_segment in zip(start_segments, end_segments):
        segmentlist.append(segments.segment(start_segment, end_segment))

    observer = ephem.Observer()
    observer.lat = str(config_struct["latitude"])
    observer.lon = str(config_struct["longitude"])
    observer.horizon = str(-12.0)
    observer.elevation = config_struct["elevation"]

    date_start = ephem.Date(
        Time(segmentlist[0][0], format='mjd', scale='utc').iso)
    date_end = ephem.Date(
        Time(segmentlist[-1][1], format='mjd', scale='utc').iso)
    observer.date = ephem.Date(
        Time(segmentlist[0][0], format='mjd', scale='utc').iso)

    sun = ephem.Sun()
    nightsegmentlist = segments.segmentlist()
    while date_start < date_end:
        date_rise = observer.next_rising(sun, start=date_start)
        date_set = observer.next_setting(sun, start=date_start)
        if date_set > date_rise:
            date_set = observer.previous_setting(sun, start=date_start)

        astropy_rise = Time(date_rise.datetime(), scale='utc').mjd
        astropy_set = Time(date_set.datetime(), scale='utc').mjd

        segment = segments.segment(astropy_set, astropy_rise)
        nightsegmentlist = nightsegmentlist + segments.segmentlist([segment])
        nightsegmentlist.coalesce()

        date_start = date_rise
        observer.date = date_rise

    segmentlistdic = segments.segmentlistdict()
    segmentlistdic["observations"] = segmentlist
    segmentlistdic["night"] = nightsegmentlist
    segmentlist = segmentlistdic.intersection(["observations", "night"])
    segmentlist.coalesce()

    return segmentlist
Ejemplo n.º 13
0
    def __init__(self,ifo,name,version,hackDec11=False):
        self.known=segments.segmentlist([])
        self.active=segments.segmentlist([])
#        self.metadata={}
        self.flagDict={}
        self.ifo=ifo
        self.name=name
        self.version=version
        self.temp_process_ids={} # Used to hold the data
        #                         # associated with a process_id
        if hackDec11:
            self.insert_history={}
        else:
            self.insert_history=[] # holds the process_metadatas and insertion_metadatas    # Note that this assumes that proper dictionaries are appended to this list
Ejemplo n.º 14
0
    def __init__(self,ifo,name,version,hackDec11=False):
        self.known=segments.segmentlist([])
        self.active=segments.segmentlist([])
#        self.metadata={}
        self.flagDict={}
        self.ifo=ifo
        self.name=name
        self.version=version
        self.temp_process_ids={} # Used to hold the data
        #                         # associated with a process_id
        if hackDec11:
            self.insert_history={}
        else:
            self.insert_history=[] # holds the process_metadatas and insertion_metadatas    # Note that this assumes that proper dictionaries are appended to this list
Ejemplo n.º 15
0
def setup_psd_calculate(workflow,
                        frame_files,
                        ifo,
                        segments,
                        segment_name,
                        out_dir,
                        tags=None):
    make_analysis_dir(out_dir)
    tags = [] if not tags else tags
    if workflow.cp.has_option_tags('workflow-psd',
                                   'parallelization-factor',
                                   tags=tags):
        num_parts = int(
            workflow.cp.get_opt_tags('workflow-psd',
                                     'parallelization-factor',
                                     tags=tags))
    else:
        num_parts = 1

    # get rid of duplicate segments which happen when splitting the bank
    segments = segmentlist(frozenset(segments))

    segment_lists = list(chunks(segments, num_parts))

    psd_files = FileList([])
    for i, segs in enumerate(segment_lists):
        seg_file = SegFile.from_segment_list(
            '%s_%s' % (segment_name, i),
            segmentlist(segs),
            segment_name,
            ifo,
            valid_segment=workflow.analysis_time,
            extension='xml',
            directory=out_dir)

        psd_files += [
            make_psd_file(workflow,
                          frame_files,
                          seg_file,
                          segment_name,
                          out_dir,
                          tags=tags + ['PART%s' % i])
        ]

    if num_parts > 1:
        return merge_psds(workflow, psd_files, ifo, out_dir, tags=tags)
    else:
        return psd_files[0]
Ejemplo n.º 16
0
def construct_trials(seg_files, seg_dict, ifos, slide_dict, vetoes):
    """Constructs trials from triggers, timeslides, segments and vetoes"""

    trial_dict = {}

    # Get segments
    segs = read_seg_files(seg_files)

    # Separate segments
    trial_time = abs(segs['on'])

    for slide_id in slide_dict:
        # These can only *reduce* the analysis time
        curr_seg_list = seg_dict[slide_id]

        # Construct the buffer segment list
        seg_buffer = segments.segmentlist()
        for ifo in ifos:
            slide_offset = slide_dict[slide_id][ifo]
            seg_buffer.append(
                segments.segment(segs['buffer'][0] - slide_offset,
                                 segs['buffer'][1] - slide_offset))
        seg_buffer.coalesce()

        # Construct the ifo-indexed dictionary of slid veteoes
        slid_vetoes = slide_vetoes(vetoes, slide_dict, slide_id)

        # Construct trial list and check against buffer
        trial_dict[slide_id] = segments.segmentlist()
        for curr_seg in curr_seg_list:
            iter_int = 1
            while 1:
                trial_end = curr_seg[0] + trial_time * iter_int
                if trial_end > curr_seg[1]:
                    break
                curr_trial = segments.segment(trial_end - trial_time,
                                              trial_end)
                if not seg_buffer.intersects_segment(curr_trial):
                    intersect = numpy.any([
                        slid_vetoes[ifo].intersects_segment(curr_trial)
                        for ifo in ifos
                    ])
                    if not intersect:
                        trial_dict[slide_id].append(curr_trial)

                iter_int += 1

    return trial_dict
Ejemplo n.º 17
0
def get_summary(basedir, ifo, cluster, cat, start_time, end_time):
    all_sum     = segmentlist([])
    cur_time    = start_time

    while cur_time < end_time:
        tstring  = os.popen('tconvert -f %Y%m/%Y%m%d ' + str(cur_time)).readlines()[0].strip()
        infile   = open('%s/%s/%s-0-SUMMARY_%s.csv'  % (basedir, tstring, ifo, cluster))
        lines    = [l.strip().split(',') for l in infile.readlines()]
        summary  = segmentlist([segment(int(l[0]), int(l[1])) for l in lines]).coalesce()
        all_sum  = all_sum + summary

        cur_time += 60 * 60 * 24

    all_sum = all_sum & segmentlist([segment(start_time, end_time)])

    return all_sum
Ejemplo n.º 18
0
def split_segment(seg, min_segment_length, pad, overlap,
                  short_segment_duration, max_job_length):
    # avoid infinite loop
    if min_segment_length + 2 * pad <= overlap:
        raise ValueError(
            "infinite loop: min_segment_length + 2 * pad must be > overlap")

    # clip max_job_length down to an allowed size
    max_job_length = clip_segment_length(max_job_length, pad,
                                         short_segment_duration)

    seglist = segments.segmentlist()
    while abs(seg) >= min_segment_length + 2 * pad:
        # try to use max_job_length each time
        if abs(seg) >= max_job_length:
            seglist.append(segments.segment(seg[0], seg[0] + max_job_length))
        else:
            seglist.append(
                segments.segment(
                    seg[0], seg[0] + clip_segment_length(
                        abs(seg), pad, short_segment_duration)))
        assert abs(seglist[-1]) != 0  # safety-check for no-op
        # bounds must be integers
        if abs((int(seglist[-1][0]) - seglist[-1][0]) /
               seglist[-1][0]) > 1e-14 or abs(
                   (int(seglist[-1][1]) - seglist[-1][1]) /
                   seglist[-1][1]) > 1e-14:
            raise ValueError("segment %s does not have integer boundaries" %
                             str(seglist[-1]))
        # advance segment
        seg = segments.segment(seglist[-1][1] - overlap, seg[1])
    if not seglist:
        raise ValueError("unable to use segment %s" % str(seg))
    return seglist
Ejemplo n.º 19
0
def convert_json_list_to_segmentlist(jsonlist):
     """
     Helper function used to convert json list of lists type object to a
     segmentlist object
     """
     segment_list=segments.segmentlist([segments.segment(x[0],x[1]) for x in jsonlist])
     return segment_list
Ejemplo n.º 20
0
    def segmentlistdict(self):
        """
        A segmentlistdict object describing the instruments and time
        spanned by this CacheEntry.  A new object is constructed each time
        this attribute is accessed (segments are immutable so there is no
        reason to try to share a reference to the CacheEntry's internal
        segment; modifications of one would not be reflected in the other
        anyway).

        Example:

        >>> c = CacheEntry("H1 S5 815901601 576.5 file://localhost/home/kipp/tmp/1/H1-815901601-576.xml")
        >>> c.segmentlistdict['H1']
        [segment(LIGOTimeGPS(815901601, 0), LIGOTimeGPS(815902177, 500000000))]

        The \"observatory\" column of the cache entry, which is frequently
        used to store instrument names, is parsed into instrument names for
        the dictionary keys using the same rules as
        glue.ligolw.lsctables.instrumentsproperty.get().

        Example:

        >>> c = CacheEntry("H1H2, S5 815901601 576.5 file://localhost/home/kipp/tmp/1/H1H2-815901601-576.xml")
        >>> c.segmentlistdict['H1H2']
        [segment(LIGOTimeGPS(815901601, 0), LIGOTimeGPS(815902177, 500000000))]
        """
        # the import has to be done here to break the cyclic
        # dependancy
        from glue.ligolw.lsctables import instrumentsproperty
        instruments = instrumentsproperty.get(self.observatory) or (None, )
        return segments.segmentlistdict(
            (instrument,
             segments.segmentlist(self.segment is not None and [self.segment]
                                  or [])) for instrument in instruments)
Ejemplo n.º 21
0
def get_science_segs_from_datafind_outs(datafindcaches):
    """
    This function will calculate the science segments that are covered in
    the OutGroupList containing the frame files returned by various
    calls to the datafind server. This can then be used to check whether this
    list covers what it is expected to cover.

    Parameters
    ----------
    datafindcaches : OutGroupList
        List of all the datafind output files.

    Returns
    --------
    newScienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances
        The times covered by the frames found in datafindOuts.
    """
    newScienceSegs = {}
    for cache in datafindcaches:
        if len(cache) > 0:
            groupSegs = segments.segmentlist(e.segment for e in cache).coalesce()
            ifo = cache.ifo
            if ifo not in newScienceSegs:
                newScienceSegs[ifo] = groupSegs
            else:
                newScienceSegs[ifo].extend(groupSegs)
                newScienceSegs[ifo].coalesce()
    return newScienceSegs
Ejemplo n.º 22
0
def get_science_segs_from_datafind_outs(datafindcaches):
    """
    This function will calculate the science segments that are covered in
    the OutGroupList containing the frame files returned by various
    calls to the datafind server. This can then be used to check whether this
    list covers what it is expected to cover.

    Parameters
    ----------
    datafindcaches : OutGroupList
        List of all the datafind output files.

    Returns
    --------
    newScienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances
        The times covered by the frames found in datafindOuts.
    """
    newScienceSegs = {}
    for cache in datafindcaches:
        if len(cache) > 0:
            groupSegs = segments.segmentlist(e.segment for e in cache).coalesce()
            ifo = cache.ifo
            if ifo not in newScienceSegs:
                newScienceSegs[ifo] = groupSegs
            else:
                newScienceSegs[ifo].extend(groupSegs)
                newScienceSegs[ifo].coalesce()
    return newScienceSegs
Ejemplo n.º 23
0
    def add_events(self, sbtable, inseg=None):
        """
		Add a trial to the current running tally. If segment is provided, then the key in the trial table is set to be this. Otherwise, the segment is determined from the peak times of the snglbursts
		"""

        # If no events are provided and no segment is indicated, there is no
        # operation to map this into a trial, so we do nothing
        if len(sbtable) == 0 and inseg is None:
            return

        if inseg is None:
            inseg = []
            for sb in sbtable:
                start = sb.start_time + 1e-9 * sb.start_time_ns
                stop = sb.start_time + sb.duration
                inseg.append(segments.segment(start, stop))
            inseg = segments.segmentlist(inseg).coalesce()
            inseg = segments.segment(inseg[0][0], inseg[-1][1])

        oldsegs = filter(lambda s: s.intersects(inseg), self.onsource.keys())

        # FIXME: Is it possible for this to be > 1?
        # Yes, but the reorganization logic is tricky.
        # Call normalize often (like everytime you add a new segment).
        if len(oldsegs) == 1:
            oldseg = oldsegs[0]
            sbtable += self.onsource[oldseg]
            del self.onsource[oldseg]
            inseg = oldseg | inseg

        self.onsource[inseg] = sbtable
Ejemplo n.º 24
0
def get_exposures(params, config_struct, segmentlist):
    '''
    Convert the availability times to a list segments with the length of telescope exposures.
    segmentlist: the segments that the telescope can do the follow-up.
    '''
    exposurelist = segments.segmentlist()
    if "overhead_per_exposure" in config_struct.keys():
        overhead = config_struct["overhead_per_exposure"]
    else:
        overhead = 0.0

    # add the filter change time to the total overheads for integrated
    if not params["doAlternatingFilters"]:
        overhead = overhead + config_struct["filt_change_time"]

    exposure_time = np.max(params["exposuretimes"])

    for ii in range(len(segmentlist)):
        start_segment, end_segment = segmentlist[ii][0], segmentlist[ii][1]
        exposures = np.arange(start_segment, end_segment,
                              (overhead + exposure_time) / 86400.0)

        for jj in range(len(exposures)):
            exposurelist.append(
                segments.segment(exposures[jj],
                                 exposures[jj] + exposure_time / 86400.0))

    return exposurelist
Ejemplo n.º 25
0
def convert_json_list_to_segmentlist(jsonlist):
     """ 
     Helper function used to convert json list of lists type object to a 
     segmentlist object
     """
     segment_list=segments.segmentlist([segments.segment(x[0],x[1]) for x in jsonlist])
     return segment_list
Ejemplo n.º 26
0
	def __init__(self, ifo):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot("Confidence", "Trigger Rate (Hz)")
		self.ifo = ifo
		self.nevents = 0
		self.x = []
		self.seglist = segments.segmentlist()
		self.axes.loglog()
Ejemplo n.º 27
0
	def __init__(self, ifo):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot("Confidence", "Trigger Rate (Hz)")
		self.ifo = ifo
		self.nevents = 0
		self.x = []
		self.seglist = segments.segmentlist()
		self.axes.loglog()
Ejemplo n.º 28
0
def indices_within_segments(times, segment_files, ifo=None, segment_name=None):
    """ Return the list of indices that should be vetoed by the segments in the
    list of veto_files.

    Parameters
    ----------
    times: numpy.ndarray of integer type
        Array of gps start times
    segment_files: string or list of strings
        A string or list of strings that contain the path to xml files that
        contain a segment table
    ifo: string, optional
        The ifo to retrieve segments for from the segment files
    segment_name: str, optional
        name of segment
    Returns
    -------
    indices: numpy.ndarray
        The array of index values within the segments
    segmentlist:
        The segment list corresponding to the selected time.
    """
    veto_segs = segmentlist([])
    indices = numpy.array([], dtype=numpy.uint32)
    for veto_file in segment_files:
        veto_segs += select_segments_by_definer(veto_file, segment_name, ifo)
    veto_segs.coalesce()

    start, end = segments_to_start_end(veto_segs)
    if len(start) > 0:
        idx = indices_within_times(times, start, end)
        indices = numpy.union1d(indices, idx)

    return indices, veto_segs.coalesce()
Ejemplo n.º 29
0
def indices_within_segments(times, segment_files, ifo=None, segment_name=None):
    """ Return the list of indices that should be vetoed by the segments in the
    list of veto_files.

    Parameters
    ----------
    times: numpy.ndarray of integer type
        Array of gps start times
    segment_files: string or list of strings
        A string or list of strings that contain the path to xml files that
        contain a segment table
    ifo: string, optional
        The ifo to retrieve segments for from the segment files
    segment_name: str, optional
        name of segment
    Returns
    -------
    indices: numpy.ndarray
        The array of index values within the segments
    segmentlist:
        The segment list corresponding to the selected time.
    """
    veto_segs = segmentlist([])
    indices = numpy.array([], dtype=numpy.uint32)
    for veto_file in segment_files:
        veto_segs += select_segments_by_definer(veto_file, segment_name, ifo)
    veto_segs.coalesce()

    start, end = segments_to_start_end(veto_segs)
    if len(start) > 0:
        idx = indices_within_times(times, start, end)
        indices = numpy.union1d(indices, idx)

    return indices, veto_segs.coalesce()
Ejemplo n.º 30
0
def get_ha_segments(config_struct,segmentlist,observer,fxdbdy,radec):

    if "ha_constraint" in config_struct:
        ha_constraint = config_struct["ha_constraint"].split(",")
        ha_min = float(ha_constraint[0])
        ha_max = float(ha_constraint[1])
    else:
        ha_min, ha_max = -24.0, 24.0

    if config_struct["telescope"] == "DECam":
        if radec.dec.deg <= -30.0:
            ha_min, ha_max = -5.2, 5.2
        else:
            ha_min, ha_max = -0.644981*np.sqrt(35.0-radec.dec.deg), 0.644981*np.sqrt(35.0-radec.dec.deg)
            
    location = astropy.coordinates.EarthLocation(config_struct["longitude"],
                                                 config_struct["latitude"],
                                                 config_struct["elevation"])

    halist = segments.segmentlist()
    for seg in segmentlist:
        mjds = np.linspace(seg[0], seg[1], 100)
        tt = Time(mjds, format='mjd', scale='utc', location=location)
        lst = tt.sidereal_time('mean')
        ha = (lst - radec.ra).hour
        idx = np.where((ha >= ha_min) & (ha <= ha_max))[0]
        if len(idx) >= 2:
            halist.append(segments.segment(mjds[idx[0]],mjds[idx[-1]]))
 
    return halist
Ejemplo n.º 31
0
	def __init__(self, ifo):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot("GPS Time (s)", "Confidence")
		self.ifo = ifo
		self.nevents = 0
		self.x = []
		self.y = []
		self.seglist = segments.segmentlist()
		self.axes.semilogy()
Ejemplo n.º 32
0
	def finish(self):
		self.axes.plot(self.injected_x, self.injected_y, "k+")
		if not options.made_only:
			self.axes.plot(self.missed_x, self.missed_y, "rx")
		for seg in ~self.seglist & segments.segmentlist([segments.segment(self.axes.get_xlim())]):
			self.axes.axvspan(float(seg[0]), float(seg[1]), facecolor = "k", alpha = 0.2)
		self.axes.set_ylim([min(self.injected_y), max(self.injected_y)])
		self.axes.set_title("Injection Locations\n(%d Injections)" % self.num_injections)
Ejemplo n.º 33
0
	def __init__(self, ifo):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot("GPS Time (s)", "Confidence")
		self.ifo = ifo
		self.nevents = 0
		self.x = []
		self.y = []
		self.seglist = segments.segmentlist()
		self.axes.semilogy()
Ejemplo n.º 34
0
def cleanlist(seglist, min_length):
    removals = segments.segmentlist()
    for seg in seglist:
        if seg.duration() < min_length:
            removals.append(seg)
    seglist = seglist - removals

    return seglist
Ejemplo n.º 35
0
def cleanlist(seglist, min_length):
  removals = segments.segmentlist()
  for seg in seglist:
    if seg.__abs__() < min_length:
      removals.append(seg)
  seglist = seglist - removals

  return seglist
Ejemplo n.º 36
0
def convert_json_list_to_segmentlist(jsonlist):
    """ 
    Helper function used to convert JSON list of lists-type object to a 
    segmentlist object-
    * Utility method, ripped from jsonhelper.py in dqsegdb package, until we 
    can use it as a dependency.
    """
    return segments.segmentlist([segments.segment(x[0],x[1]) for x in jsonlist])
Ejemplo n.º 37
0
def get_segments_tile(config_struct, observatory, radec, segmentlist):

    observer = astroplan.Observer(location=observatory)

    fxdbdy = astroplan.FixedTarget(coord=radec)

    date_start = Time(segmentlist[0][0], format='mjd', scale='utc')
    date_end = Time(segmentlist[-1][1], format='mjd', scale='utc')

    tilesegmentlist = segments.segmentlist()
    while date_start.mjd < date_end.mjd:
        date_rise = observer.target_rise_time(date_start, fxdbdy)
        date_set = observer.target_set_time(date_start, fxdbdy)

        print(date_rise.mjd, date_set.mjd)
        if (date_rise.mjd < 0) and (date_set.mjd < 0): break

        print(date_rise.mjd, date_set.mjd)

        if date_rise > date_set:
            date_rise = observer.target_rise_time(
                date_start - TimeDelta(24 * u.hour), fxdbdy)
        print(date_rise.mjd, date_set.mjd)

        segment = segments.segment(date_rise.mjd, date_set.mjd)
        tilesegmentlist = tilesegmentlist + segments.segmentlist([segment])
        tilesegmentlist.coalesce()

        date_start = date_set + TimeDelta(24 * u.hour)

    #moonsegmentlist = get_skybrightness(\
    #    config_struct,segmentlist,observer,fxdbdy,radec)

    moonsegmentlist = get_moon_segments(\
        config_struct,segmentlist,observer,fxdbdy,radec)

    tilesegmentlistdic = segments.segmentlistdict()
    tilesegmentlistdic["observations"] = segmentlist
    tilesegmentlistdic["tile"] = tilesegmentlist
    tilesegmentlistdic["moon"] = moonsegmentlist
    tilesegmentlist = tilesegmentlistdic.intersection(
        ["observations", "tile", "moon"])
    tilesegmentlist.coalesce()

    return tilesegmentlist
Ejemplo n.º 38
0
def generated_ascii(json_str,filepath):
    res_dict=json.loads(json_str)
    active_list=res_dict['active']
    active_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in active_list])
    active_segments_string='\n'.join([str(i[0])+","+str(i[1]) for i in active_segments])
    output_fileh=open(filepath,'w+')
    output_fileh.writelines(active_segments_string)
    output_fileh.close()
    return filepath
 def finish(self):
     self.axes.set_title("Time-Frequency Plane\n(%d Triggers)" %
                         self.nevents)
     for seg in ~self.seglist & segments.segmentlist(
         [segments.segment(self.axes.get_xlim())]):
         self.axes.axvspan(float(seg[0]),
                           float(seg[1]),
                           facecolor="k",
                           alpha=0.2)
Ejemplo n.º 40
0
def generated_ascii(json_str,filepath):
    res_dict=json.loads(json_str)
    active_list=res_dict['active']
    active_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in active_list])
    active_segments_string='\n'.join([str(i[0])+","+str(i[1]) for i in active_segments])
    output_fileh=open(filepath,'w+')
    output_fileh.writelines(active_segments_string)
    output_fileh.close()
    return filepath
 def finish(self):
     self.axes.set_title("Trigger Confidence vs. Time\n(%d Triggers)" %
                         self.nevents)
     self.axes.plot(self.x, self.y, "k+")
     for seg in ~self.seglist & segments.segmentlist(
         [segments.segment(self.axes.get_xlim())]):
         self.axes.axvspan(float(seg[0]),
                           float(seg[1]),
                           facecolor="k",
                           alpha=0.2)
Ejemplo n.º 42
0
	def __init__(self, instrument):
		self.fig, self.axes = SnglBurstUtils.make_burst_plot("GPS Time (s)", "Frequency (Hz)")
		self.axes.semilogy()
		self.instrument = instrument
		self.num_injections = 0
		self.injected_x = []
		self.injected_y = []
		self.missed_x = []
		self.missed_y = []
		self.seglist = segments.segmentlist()
Ejemplo n.º 43
0
def schedule_alternating(params, config_struct, telescope, map_struct, tile_struct):

    if "filt_change_time" in config_struct.keys(): filt_change_time = config_struct["filt_change_time"]
    else: filt_change_time = 0

    filters, exposuretimes = params["filters"], params["exposuretimes"]
    coverage_structs = []
    maxidx = 0
    for i in range(len(exposuretimes)):
        params["filters"] = [filters[i]]
        params["exposuretimes"] = [exposuretimes[i]]
        config_struct["exposurelist"] = segments.segmentlist(config_struct["exposurelist"][maxidx:])
        total_nexps  = len(config_struct["exposurelist"])

        # if the duration of a single block is less than 30 min, shift by additional time to add up to 30 min
        if i > 0:
            start = Time(coverage_struct["data"][0][2], format='mjd')
            end =  Time(coverage_struct["data"][-1][2], format='mjd')

            delta = end - start
            delta.format = 'sec'
            duration = delta.value + exposuretimes[i] + filt_change_time
            extra_time = (30 * 60) - duration
            if extra_time > 0: extra_time = extra_time + filt_change_time
            elif extra_time <= 0: extra_time = filt_change_time
            config_struct["exposurelist"] = config_struct["exposurelist"].shift(extra_time / 86400.)
    
        prob = {}
        for key in tile_struct.keys():
            if tile_struct[key]['prob']==0.0:
                prob[key]=0.0
        
        if not params["tilesType"] == "galaxy":
            tile_struct = gwemopt.tiles.powerlaw_tiles_struct(params, config_struct, telescope, map_struct, tile_struct)
        
        if params["doBalanceExposure"]:
            for key in prob: #re-assigns 0 prob to tiles w/ unbalanced observations in case they were overwritten
                tile_struct[key]['prob'] = 0.0

        coverage_struct = gwemopt.scheduler.scheduler(params, config_struct, tile_struct)
        if params["doMaxTiles"]:
            tile_struct,doReschedule = gwemopt.utils.slice_number_tiles(params, telescope, tile_struct, coverage_struct)
            if doReschedule:
                coverage_struct = gwemopt.scheduler.scheduler(params, config_struct, tile_struct)

        if len(coverage_struct["exposureused"]) > 0:
            maxidx = int(coverage_struct["exposureused"][-1])
            deltaL = total_nexps - maxidx
        elif len(coverage_struct["exposureused"]) == 0: deltaL = 0

        coverage_structs.append(coverage_struct)
        if deltaL <= 1: break
    params["filters"], params["exposuretimes"] = filters, exposuretimes

    return gwemopt.coverage.combine_coverage_structs(coverage_structs),tile_struct
Ejemplo n.º 44
0
    def event_significance(self, nevents=10, rank_fcn=None):
        """
		Calculate the Poissonian significance of the 'on source' trial set for up to the loudest nevents.
		"""
        if rank_fcn is None:
            rank_fcn = lambda e: e.snr

        offtime = float(abs(segments.segmentlist(self.offsource.keys())))
        offsource = sorted(chain(*self.offsource.values()),
                           key=lambda sb: -sb.snr)
        offrate = zip(offsource,
                      map(lambda i: i / offtime, range(1,
                                                       len(offsource) + 1)))
        offrate = offrate[::-1]
        offsource = offsource[::-1]
        offsnr = map(rank_fcn, offsource)

        ontime = float(abs(segments.segmentlist(self.onsource.keys())))
        if ontime == 0:
            return []
        onsource = sorted(chain(*self.onsource.values()),
                          key=lambda sb: -sb.snr)
        onsnr = map(rank_fcn, onsource)
        onrate = []
        for snr in onsnr:
            try:
                onrate.append(offrate[bisect_left(offsnr, snr)][1])
            except IndexError:  # on SNR > max off SNR
                onrate.append(0)

        onsource_sig = []
        for i, sb in enumerate(onsource[:nevents]):
            # From Gaussian
            #exp_num = chi2.cdf(sb.chisq_dof, sb.snr)*len(onsource)
            # From off-source
            exp_num = onrate[i] * ontime
            # FIXME: requires scipy >= 0.10
            #onsource_sig.append([sb.snr, -poisson.logsf(i, exp_num)])
            onsource_sig.append(
                [rank_fcn(sb), -numpy.log(poisson.sf(i, exp_num))])

        return onsource_sig
Ejemplo n.º 45
0
def get_segment_summary_times(scienceFile, segmentName):
    """
    This function will find the times for which the segment_summary is set
    for the flag given by segmentName.

    Parameters
    -----------
    scienceFile : SegFile
        The segment file that we want to use to determine this.
    segmentName : string
        The DQ flag to search for times in the segment_summary table.

    Returns
    ---------
    summSegList : ligo.segments.segmentlist
        The times that are covered in the segment summary table.
    """
    # Parse the segmentName
    segmentName = segmentName.split(':')
    if not len(segmentName) in [2,3]:
        raise ValueError("Invalid channel name %s." %(segmentName))
    ifo = segmentName[0]
    channel = segmentName[1]
    version = ''
    if len(segmentName) == 3:
        version = int(segmentName[2])

    # Load the filename
    xmldoc = utils.load_filename(scienceFile.cache_entry.path,
                             gz=scienceFile.cache_entry.path.endswith("gz"),
                             contenthandler=ContentHandler)

    # Get the segment_def_id for the segmentName
    segmentDefTable = table.get_table(xmldoc, "segment_definer")
    for entry in segmentDefTable:
        if (entry.ifos == ifo) and (entry.name == channel):
            if len(segmentName) == 2 or (entry.version==version):
                segDefID = entry.segment_def_id
                break
    else:
        raise ValueError("Cannot find channel %s in segment_definer table."\
                         %(segmentName))

    # Get the segmentlist corresponding to this segmentName in segment_summary
    segmentSummTable = table.get_table(xmldoc, "segment_summary")
    summSegList = segments.segmentlist([])
    for entry in segmentSummTable:
        if entry.segment_def_id == segDefID:
            segment = segments.segment(entry.start_time, entry.end_time)
            summSegList.append(segment)
    summSegList.coalesce()

    return summSegList
Ejemplo n.º 46
0
def coalesceResultDictionary(result_dict):
    """
    Takes a dictionary as returned by QueryTimes or QueryTimeless and converts the lists of tuples into actual segment lists (and coalesces them).

    Parameters
    ----------
    result_dict : `dict`
        This is the input result dictionary from the other api calls
    out_result_dict : `dict`
        This is the output result dictionary with actual segment lists (and coalesced results).

    """
    import copy
    out_result_dict=copy.deepcopy(result_dict)
    active_seg_python_list=[segments.segment(i[0],i[1]) for i in result_dict['active']]
    active_seg_list=segments.segmentlist(active_seg_python_list)
    active_seg_list.coalesce()
    out_result_dict['active']=active_seg_list
    known_seg_python_list=[segments.segment(i[0],i[1]) for i in result_dict['known']]
    known_seg_list=segments.segmentlist(known_seg_python_list)
    known_seg_list.coalesce()
    out_result_dict['known']=known_seg_list
    return out_result_dict
def get_valid_segments(segment_url, base_dir, ifo, science_flag, start_time, end_time):
    print("Finding valid analysis times for %s, please hold..." % ifo)

    cmd  = 'ligolw_segment_query --query-segments --segment-url %s --include-segments %s --gps-start-time %d --gps-end-time %d | ligolw_print -t segment -c start_time -c end_time' % (segment_url, science_flag, start_time, end_time)
    pipe = os.popen(cmd)

    print(cmd)

    results   = [x.strip().split(',') for x in pipe]
    science   = segments.segmentlist([segments.segment(int(x[0]), int(x[1])) for x in results])
    science.coalesce()

    print("Science: ")
    for s in science:
       print(s[0], s[1])

    framedir  = base_dir + '/' + ifo[0] + '1'
    chunks    = [f.split('.')[0].split('-') for f in get_all_files_in_range(framedir, start_time, end_time)]
    available = segments.segmentlist([ segments.segment( int(x[-2]), int(x[-2]) + int(x[-1]) ) for x in chunks if len(x) == 6 ])
    available.coalesce()

    print("Available:")
    for s in available:
       print(s[0], s[1])

    result = science & available

    result.coalesce()

    print("Result:")
    for s in result:
       print(s[0], s[1])

    print("done.")

    return result
Ejemplo n.º 48
0
def select_segments_by_definer(segment_file, segment_name=None, ifo=None):
    """ Return the list of segments that match the segment name

    Parameters
    ----------
    segment_file: str
        path to segment xml file

    segment_name: str
        Name of segment
    ifo: str, optional

    Returns
    -------
    seg: list of segments
    """
    from glue.ligolw.ligolw import LIGOLWContentHandler as h; lsctables.use_in(h)
    indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h)
    segment_table  = table.get_table(indoc, 'segment')

    seg_def_table = table.get_table(indoc, 'segment_definer')
    def_ifos = seg_def_table.getColumnByName('ifos')
    def_names = seg_def_table.getColumnByName('name')
    def_ids = seg_def_table.getColumnByName('segment_def_id')

    valid_id = []
    for def_ifo, def_name, def_id in zip(def_ifos, def_names, def_ids):
        if ifo and ifo != def_ifo:
            continue
        if segment_name and segment_name != def_name:
            continue
        valid_id += [def_id]

    start = numpy.array(segment_table.getColumnByName('start_time'))
    start_ns = numpy.array(segment_table.getColumnByName('start_time_ns'))
    end = numpy.array(segment_table.getColumnByName('end_time'))
    end_ns = numpy.array(segment_table.getColumnByName('end_time_ns'))
    start, end = start + 1e-9 * start_ns, end + 1e-9 * end_ns
    did = segment_table.getColumnByName('segment_def_id')

    keep = numpy.array([d in valid_id for d in did])
    if sum(keep) > 0:
        return start_end_to_segments(start[keep], end[keep])
    else:
        return segmentlist([])
Ejemplo n.º 49
0
def get_missing_segs_from_frame_file_cache(datafindcaches):
    """
    This function will use os.path.isfile to determine if all the frame files
    returned by the local datafind server actually exist on the disk. This can
    then be used to update the science times if needed.

    Parameters
    -----------
    datafindcaches : OutGroupList
        List of all the datafind output files.

    Returns
    --------
    missingFrameSegs : Dict. of ifo keyed glue.segment.segmentlist instances
        The times corresponding to missing frames found in datafindOuts.
    missingFrames: Dict. of ifo keyed lal.Cache instances
        The list of missing frames
    """
    missingFrameSegs = {}
    missingFrames = {}
    for cache in datafindcaches:
        if len(cache) > 0:
            # Don't bother if these are not file:// urls, assume all urls in
            # one cache file must be the same type
            if not cache[0].scheme == 'file':
                warn_msg = "We have %s entries in the " %(cache[0].scheme,)
                warn_msg += "cache file. I do not check if these exist."
                logging.info(warn_msg)
                continue
            _, currMissingFrames = cache.checkfilesexist(on_missing="warn")
            missingSegs = segments.segmentlist(e.segment \
                                         for e in currMissingFrames).coalesce()
            ifo = cache.ifo
            if ifo not in missingFrameSegs:
                missingFrameSegs[ifo] = missingSegs
                missingFrames[ifo] = lal.Cache(currMissingFrames)
            else:
                missingFrameSegs[ifo].extend(missingSegs)
                # NOTE: This .coalesce probably isn't needed as the segments
                # should be disjoint. If speed becomes an issue maybe remove it?
                missingFrameSegs[ifo].coalesce()
                missingFrames[ifo].extend(currMissingFrames)
    return missingFrameSegs, missingFrames
Ejemplo n.º 50
0
def run_show_types(doc, connection, engine, gps_start_time, gps_end_time, included_segments_string, excluded_segments_string):
    resulttable = lsctables.New(ShowTypesResultTable)
    doc.childNodes[0].appendChild(resulttable)

    sql = """SELECT segment_definer.ifos, segment_definer.name, segment_definer.version,
                 (CASE WHEN segment_definer.comment IS NULL THEN '-' WHEN segment_definer.comment IS NOT NULL THEN segment_definer.comment END),
                 segment_summary.start_time, segment_summary.end_time,
                 (CASE WHEN segment_summary.comment IS NULL THEN '-' WHEN segment_summary.comment IS NOT NULL THEN segment_summary.comment END)
          FROM  segment_definer, segment_summary
          WHERE segment_definer.segment_def_id = segment_summary.segment_def_id
          AND   NOT (segment_summary.start_time > %d OR %d > segment_summary.end_time)
          """ % (gps_end_time, gps_start_time)

    rows = engine.query(sql)

    seg_dict = {}

    for row in rows:
        ifos, name, version, segment_definer_comment, segment_summary_start_time, segment_summary_end_time, segment_summary_comment = row
        key = (ifos, name, version, segment_definer_comment, segment_summary_comment)
        if key not in seg_dict:
            seg_dict[key] = []

        seg_dict[key].append(segments.segment(segment_summary_start_time, segment_summary_end_time))

    for key, value in seg_dict.iteritems():
        segmentlist = segments.segmentlist(value)
        segmentlist.coalesce()

        for segment in segmentlist:
            result = ShowTypesResult()
            result.ifos, result.name, result.version, result.segment_definer_comment, result.segment_summary_comment = key
            result.segment_summary_start_time, result.segment_summary_end_time = segment
            result.ifos = result.ifos.strip()

            resulttable.append(result)

    engine.close()
Ejemplo n.º 51
0
def split_segment(timing_params, segment, psds_per_job):
	"""
	Split the data segment into correctly-overlaping segments.  We try
	to have the numbers of PSDs in each segment be equal to
	psds_per_job, but with a short segment at the end if needed.
	"""
	# in seconds
	joblength = job_length_from_psds(timing_params, psds_per_job)
	# in samples
	joboverlap = 2 * timing_params.filter_corruption + (timing_params.psd_length - timing_params.psd_shift)
	# in seconds
	joboverlap /= timing_params.resample_rate

	segs = segments.segmentlist()
	t = segment[0]
	while t + joblength <= segment[1]:
		segs.append(segments.segment(t, t + joblength) & segment)
		t += joblength - joboverlap

	extra_psds = int(psds_from_job_length(timing_params, float(segment[1] - t)))
	if extra_psds:
		segs.append(segments.segment(t, t + job_length_from_psds(timing_params, extra_psds)))
	return segs
Ejemplo n.º 52
0
def split_segment(seg, min_segment_length, pad, overlap, short_segment_duration, max_job_length):
	# avoid infinite loop
	if min_segment_length + 2 * pad <= overlap:
		raise ValueError("infinite loop: min_segment_length + 2 * pad must be > overlap")

	# clip max_job_length down to an allowed size
	max_job_length = clip_segment_length(max_job_length, pad, short_segment_duration)

	seglist = segments.segmentlist()
	while abs(seg) >= min_segment_length + 2 * pad:
		# try to use max_job_length each time
		if abs(seg) >= max_job_length:
			seglist.append(segments.segment(seg[0], seg[0] + max_job_length))
		else:
			seglist.append(segments.segment(seg[0], seg[0] + clip_segment_length(abs(seg), pad, short_segment_duration)))
		assert abs(seglist[-1]) != 0	# safety-check for no-op
		# bounds must be integers
		if abs((int(seglist[-1][0]) - seglist[-1][0]) / seglist[-1][0]) > 1e-14 or abs((int(seglist[-1][1]) - seglist[-1][1]) / seglist[-1][1]) > 1e-14:
			raise ValueError("segment %s does not have integer boundaries" % str(seglist[-1]))
		# advance segment
		seg = segments.segment(seglist[-1][1] - overlap, seg[1])
	if not seglist:
		raise ValueError("unable to use segment %s" % str(seg))
	return seglist
Ejemplo n.º 53
0
    def segmentlistdict(self):
        """
        A segmentlistdict object describing the instruments and time
        spanned by this CacheEntry.  A new object is constructed each time
        this attribute is accessed (segments are immutable so there is no
        reason to try to share a reference to the CacheEntry's internal
        segment; modifications of one would not be reflected in the other
        anyway).

        Example:

        >>> c = CacheEntry("H1 S5 815901601 576.5 file://localhost/home/kipp/tmp/1/H1-815901601-576.xml")
        >>> c.segmentlistdict['H1']
        [segment(LIGOTimeGPS(815901601, 0), LIGOTimeGPS(815902177, 500000000))]

        The \"observatory\" column of the cache entry, which is frequently
        used to store instrument names, is parsed into instrument names for
        the dictionary keys using the same rules as
        ligo.lw.lsctables.instrumentsproperty.get().

        Example:

        >>> c = CacheEntry("H1H2, S5 815901601 576.5 file://localhost/home/kipp/tmp/1/H1H2-815901601-576.xml")
        >>> c.segmentlistdict['H1H2']
        [segment(LIGOTimeGPS(815901601, 0), LIGOTimeGPS(815902177, 500000000))]
        """
        # the import has to be done here to break the cyclic
        # dependancy
        try:
            from ligo.lw.lsctables import instrumentsproperty
        except ImportError:
            # FIXME:  remove when we can rely on ligo.lw being installed
            # (why isn't it!?)
            from glue.ligolw.lsctables import instrumentsproperty
        instruments = instrumentsproperty.get(self.observatory) or (None,)
        return segments.segmentlistdict((instrument, segments.segmentlist(self.segment is not None and [self.segment] or [])) for instrument in instruments)
Ejemplo n.º 54
0
##############################################################################
# get the pad and chunk lengths from the values in the ini file
paddata = int(cp.get('data', 'pad-data'))
n = int(cp.get('data', 'segment-length'))
s = int(cp.get('data', 'number-of-segments'))
r = int(cp.get('data', 'sample-rate'))
o = int(cp.get('inspiral', 'segment-overlap'))
length = ( n * s - ( s - 1 ) * o ) / r
overlap = o / r
minsciseg = length + 2 * paddata

##############################################################################
# Based on the start and end time, generate a list of epochs to
# analyze. An entire hipe dag will be run for each of these epochs.
search_epochs = segments.segmentlist()
istart = opts.start_time
while ( istart < opts.end_time ):
  iend = istart + opts.interval
  if iend > opts.end_time:
    iend = opts.end_time
  search_epochs.append(segments.segment(istart,iend))
  istart += opts.interval
# FIXME:  the writing out of the segments should be done at the end so
# that successfully generated dags, etc can be maintained from run to
# run
segmentsUtils.tosegwizard(file("multi_hipe_selectedsegs.txt",'w'),search_epochs)

##############################################################################
# Read in all the segment lists
ifolist = []
Ejemplo n.º 55
0
  ##############################################################################
  # set up the segment including the off-source segment

  grb_ifolist.sort()
  ifo_times = "".join(grb_ifolist)

  if offSourceSegment is None:
    print("Warning: insufficient multi-IFO data to construct an off-source segment for GRB %s; skipping" % grb.event_number_grb, file=sys.stderr)
    continue
  elif opts.verbose:
    print("Sufficient off-source data has been found in", ifo_times, "time.")

  # write out the segment list to a segwizard file
  offsource_segfile = idirectory + "/offSourceSeg.txt"
  segmentsUtils.tosegwizard(open(offsource_segfile, "w"),
                            segments.segmentlist([offSourceSegment]))
  onsource_segfile = idirectory + "/onSourceSeg.txt"
  segmentsUtils.tosegwizard(file(onsource_segfile, "w"),
                            segments.segmentlist([onSourceSegment]))
  segLen = abs( onSourceSegment )
  bufferSegment = segments.segment( onSourceSegment[0]-opts.number_buffer_left*segLen,\
                                    onSourceSegment[1]+opts.number_buffer_right*segLen)
  buffer_segfile = idirectory + "/bufferSeg.txt"
  segmentsUtils.tosegwizard(file(buffer_segfile, "w"),
                            segments.segmentlist([bufferSegment]))

  if opts.verbose:
    print("on-source segment: ", onSourceSegment)
    print("off-source segment: ", offSourceSegment)

  ############################################################################
Ejemplo n.º 56
0
def getSegments ( seglistin, interval):

  seglistout = segments.segmentlist([s for s in seglistin \
      if (s[1] > interval[0] and s[0] < interval[1]) ])

  return seglistout
Ejemplo n.º 57
0
def calculate_combined_result(includedJSON,excludedJSON,startTime,endTime,ifo):
    """
    Calculate the result of the union of the active times for the included flag less the intersection of that result with the union of the excluded flags
    Inputs are 2 lists of python dictionaries representing the JSON (already have run json.loads() on the JSON), a start time, and end time, and the ifo name (it does not make sense to include/exclude across multiple ifos)

    Parameters
    ----------
    startTime : `int`
        Ex: 999999999
    endTime : `int`
        Ex: 999999999
    ifo : `string`
        Ex: 'L1'

    """
    total_active_list=segments.segmentlist([])
    for flag in includedJSON:
        #result=json.loads(flag)
        #flagDict=result['flags'][0]
        active_list=flag['active']
        active_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in active_list])
        total_active_list=total_active_list+active_segments
        total_active_list.coalesce()
    for flag in excludedJSON:
        #result=json.loads(flag)
        #flagDict=result['flags'][0]
        active_list=flag['active']
        active_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in active_list])
        total_active_list=total_active_list-active_segments
        total_active_list.coalesce()
    # Now, total_active_list contains a segmentlist object with segments spanning the expected result     # includedJSON and excludedJSON contain lists of JSON text blobs (not parsed with json.loads yet)

    ## Note:  About known segments for the result:  We just report the start and end time of the period queried!  If you wanted to report the actual validity of multiple segments, it's somewhat undefined if the excluded ones and/or some of the included flags aren't known about for a time when the included ones are;  Technically since exclusion trumps all inclusions, if an excluded segment is known and active at any given time, the result is known for that time explicitly.
    result_known_segment_list=segments.segmentlist([segments.segment(startTime,endTime)])

    ## Now we have to build the JSON for this flag
    # JSON flag objects looks like this (each is a dictionary!):
    #  {
    #    "ifo" : "ifo",
    #    "name" : "flag",
    #    "version" : n,
    #    "comment" : "description",     #    "provenance_url" : "aLog URL",
    #    "deactivated" : false|true,
    #    "active_indicates_ifo_badness" : true|false|null,
    #    // known segments returned for both /active and /known URIs, no segments are returned for the /metadata or /report/flags queries
    #    // aka S6 summary segments
    #    "known" : [ [ts,te], [ts,te], ... ]
    #    // active segments returned only for /active URI:
    #    "active" : [ [ts,te], [ts,te], ... ]
    #    // \textcolor{red}{Comment: or "segment" : [ [ts,te,value], [ts,te,value], ...] (where value can be -1,0 or +1)}
    #    // inactive == (known - active)
    #    // unknown == (all_time - known)
    #  },
    ## Make the json-ready flag dictionary for the combined result:
    ifo=ifo # replicating old behavoir from ligolw_segment_query
    # Note: This just uses the ifo of the last excluded flag!
    name='RESULT'
    version=1
    known_segments=result_known_segment_list
    active_segments=total_active_list
    result_flag=jsonhelper.buildFlagDict(ifo,name,version,known_segments,active_segments)
    return result_flag
Ejemplo n.º 58
0
def run_query_types(doc, proc_id, connection, engine, gps_start_time, gps_end_time, included_segments):
    query_segment = segments.segmentlist([segments.segment(gps_start_time, gps_end_time)])

    sql = """SELECT segment_definer.ifos, segment_definer.name,segment_definer.version,
           (CASE WHEN segment_definer.comment IS NULL THEN '-' WHEN segment_definer.comment IS NOT NULL THEN segment_definer.comment END),
           segment_summary.start_time, segment_summary.end_time,
           (CASE WHEN segment_summary.comment IS NULL THEN '-' WHEN segment_summary.comment IS NOT NULL THEN segment_summary.comment END)
    FROM segment_definer, segment_summary
    WHERE segment_definer.segment_def_id = segment_summary.segment_def_id
    AND NOT(%d > segment_summary.end_time OR segment_summary.start_time > %d)
    """ % (gps_start_time, gps_end_time)

    type_clauses = map(seg_spec_to_sql, included_segments.split(','))

    if type_clauses != []:
        sql += " AND (" + "OR ".join(type_clauses) + ")"


    segment_types = {}

    for row in engine.query(sql):
        sd_ifo, sd_name, sd_vers, sd_comment, ss_start, ss_end, ss_comment = row
        key = (sd_ifo, sd_name, sd_vers, sd_comment, ss_comment)
        if key not in segment_types:
            segment_types[key] = segments.segmentlist([])
        segment_types[key] |= segments.segmentlist([segments.segment(ss_start, ss_end)])

    engine.close()

    # Create segment definer and segment_summary tables
    seg_def_table = lsctables.New(lsctables.SegmentDefTable, columns = ["process_id", "segment_def_id", "ifos", "name", "version", "comment"])
    doc.childNodes[0].appendChild(seg_def_table)

    seg_sum_table = lsctables.New(lsctables.SegmentSumTable, columns = ["process_id", "segment_sum_id", "start_time", "start_time_ns", "end_time", "end_time_ns", "comment", "segment_def_id"])

    doc.childNodes[0].appendChild(seg_sum_table)

    for key in segment_types:
        # Make sure the intervals fall within the query window and coalesce
        segment_types[key].coalesce()
        segment_types[key] &= query_segment

        seg_def_id                     = seg_def_table.get_next_id()
        segment_definer                = lsctables.SegmentDef()
        segment_definer.process_id     = proc_id
        segment_definer.segment_def_id = seg_def_id
        segment_definer.ifos           = key[0]
        segment_definer.name           = key[1]
        segment_definer.version        = key[2]
        segment_definer.comment        = key[3]

        seg_def_table.append(segment_definer)

        # add each segment summary to the segment_summary_table

        for seg in segment_types[key]:
            segment_sum            = lsctables.SegmentSum()
            segment_sum.comment    = key[4]
            segment_sum.process_id = proc_id
            segment_sum.segment_def_id = seg_def_id
            segment_sum.segment_sum_id = seg_sum_table.get_next_id()
            segment_sum.start_time = seg[0]
            segment_sum.start_time_ns = 0
            segment_sum.end_time   = seg[1]
            segment_sum.end_time_ns = 0

            seg_sum_table.append(segment_sum)
Ejemplo n.º 59
0
def calculate_versionless_result(jsonResults,startTime,endTime,ifo_input=None):
    """
    Construct output segments lists from multiple JSON objects.
    The jsonResults input is a list of json ojbects and
    are expected to be in order of decreasing versions.
    """
    debug=False
    active_results={}
    segment_known_results={}
    affected_results={}
    total_active_list=segments.segmentlist([])
    total_query_time=segments.segmentlist([segments.segment(startTime,endTime)])
    total_known_list=segments.segmentlist([])
    for resultin in jsonResults:
        #result=json.loads(resultin)
        result=resultin
        # old : flagDict=result['flags'][0] # Our queries above each return 1 flag
        version = int(result['version'])
        deactivated_state=result['metadata']['deactivated']
        if str(deactivated_state) in ["False","false"]:
            known_list=result['known']
            known_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in known_list]) # make a segment list object to do arithmetic
            known_segments.coalesce()
            segment_known_results[version]=known_segments
            active_list=result['active']
            active_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in active_list]) # make a segment list object to do arithmetic
            active_segments.coalesce()
            active_results[version]=active_segments
            if debug:
                print("Active results for version %d" % version)
                print(active_results[version])
            # Now I have 2 dictionaries of known and active segments with versions as keys in case I want/need them later...
            # This next step might seem a bit confusing:
            # We need to take the active segments for this version only during times that were not known by higher segment versions
            # Thus we need to take the intersection (& operator) of the unknown segments across all higher versions with the known segments for this version, then take the intersection of that result with the active segments for this version, and then add that to the current list of total active segments... phew:
            total_active_list |= (total_query_time-total_known_list)&known_segments&active_segments
            if debug:
                import pdb
                print("Running pdb to see what is in total_active_list")
                pdb.set_trace()
            total_active_list.coalesce()
            # The S6 clients want to know about the range of times affected by a given version explicitly, so those are calculated here:
            affected_results[version]=(total_query_time-total_known_list)&known_segments
            # Note that the order matters here!  we use the total_known_list from the previous iteration of the loop step to figure out which active segments to use in this iteration of the loop, so the above line must come before the next
            total_known_list |= known_segments
            total_known_list.coalesce()
    if ifo_input==None:
        if len(jsonResults)==0:
            import exceptions
            exceptions.RuntimeError("No versions for flag in versionless query")
        else:
            ifo=result['ifo']
    else:  #Only use ifo_input if we can't extract the ifo from the json result (usually because json result is empty)
        try:
            ifo=result['ifo']
        except:
            ifo=ifo_input
    name='RESULT' # Fix!!! Executive decision to make this clear that this is not a specific IFO:FLAG:VERSION resource, but rather a contrived result
    version=1 # Fix!!! Executive decision to make this match what old clients expect
    # I would prefer that this is more clear that this is not a specific IFO:FLAG:VERSION resource, but rather a contrived result, possibly by making it version 0
    total_active_list.coalesce()
    total_known_list.coalesce()
    result_flag=jsonhelper.buildFlagDict(ifo,name,version,total_known_list,total_active_list)
    return result_flag,affected_results