예제 #1
0
def generated_vdb_ascii(json_str, filepath):
    res_dict = json.loads(json_str)
    active_list = res_dict['active']
    active_segments = segments.segmentlist(
        [segments.segment(x[0], x[1]) for x in active_list])
    known_list = res_dict['known']
    known_segments = segments.segmentlist(
        [segments.segment(x[0], x[1]) for x in known_list])
    query_start = res_dict['query_information']['start']
    query_stop = res_dict['query_information']['end']
    if query_start != 0 and query_stop != 0:
        requested_span = segments.segmentlist(
            [segments.segment(query_start, query_stop)])
    else:
        requested_span = segments.segmentlist(
            [segments.segment(0, 9999999999)])
    active_segments_string = ',1 \n'.join(
        [str(i[0]) + "," + str(i[1]) for i in active_segments]) + ",1 \n"
    unknown_segments = requested_span - known_segments
    unknown_segments_string = ',-1 \n'.join(
        [str(i[0]) + "," + str(i[1]) for i in unknown_segments]) + ",-1 \n"
    known_not_active_segments = known_segments - active_segments
    known_not_active_segments_string = ',0 \n'.join(
        [str(i[0]) + "," + str(i[1])
         for i in known_not_active_segments]) + ",0 \n"
    output_fileh = open(filepath, 'w+')
    query_info_string = json.dumps(res_dict['query_information'], indent=1)
    output_fileh.writelines(query_info_string)
    output_fileh.write('\n')
    output_fileh.writelines(active_segments_string)
    output_fileh.writelines(unknown_segments_string)
    output_fileh.writelines(known_not_active_segments_string)
    output_fileh.close()
    return filepath
예제 #2
0
    def inj_seg(self, exclude_coinc_flags=None):
        """ Returns a segmentlist that is the union of all excitation,
        segdb and bitmasked channels.
        """

        if exclude_coinc_flags is None:
            exclude_coinc_flags = []

        tmp_list = segments.segmentlist([])
        for key in self.exc_dict.keys():
            if key[3:] not in exclude_coinc_flags:
                tmp_list.extend(self.exc_dict[key])
        for key in self.seg_dict.keys():
            if key[3:] not in exclude_coinc_flags:
                tmp_list.extend(self.seg_dict[key])
        for key in self.bitmask_dict.keys():
            if key[3:] not in exclude_coinc_flags:
                tmp_list.extend(self.bitmask_dict[key])
        if self.schedule_time:
            seg = segments.segment(self.schedule_time, self.schedule_time + 1)
            seg_list = segments.segmentlist([seg])
            tmp_list.extend(seg_list)
        for time in self.gracedb_time:
            seg = segments.segment(time, time + 1)
            seg_list = segments.segmentlist([seg])
            tmp_list.extend(seg_list)
        return tmp_list
예제 #3
0
def split_segment(seg, min_segment_length, pad, overlap,
                  short_segment_duration, max_job_length):
    # avoid infinite loop
    if min_segment_length + 2 * pad <= overlap:
        raise ValueError, "infinite loop: min_segment_length + 2 * pad must be > overlap"

    # clip max_job_length down to an allowed size
    max_job_length = clip_segment_length(max_job_length, pad,
                                         short_segment_duration)

    seglist = segments.segmentlist()
    while abs(seg) >= min_segment_length + 2 * pad:
        # try to use max_job_length each time
        if abs(seg) >= max_job_length:
            seglist.append(segments.segment(seg[0], seg[0] + max_job_length))
        else:
            seglist.append(
                segments.segment(
                    seg[0], seg[0] + clip_segment_length(
                        abs(seg), pad, short_segment_duration)))
        assert abs(seglist[-1]) != 0  # safety-check for no-op
        # bounds must be integers
        if abs((int(seglist[-1][0]) - seglist[-1][0]) /
               seglist[-1][0]) > 1e-14 or abs(
                   (int(seglist[-1][1]) - seglist[-1][1]) /
                   seglist[-1][1]) > 1e-14:
            raise ValueError, "segment %s does not have integer boundaries" % str(
                seglist[-1])
        # advance segment
        seg = segments.segment(seglist[-1][1] - overlap, seg[1])
    if not seglist:
        raise ValueError, "unable to use segment %s" % str(seg)
    return seglist
예제 #4
0
def subdivide(seg, length, min_len=0):
    """
    Subdivide a segment into smaller segments based on a given length. Enforce a given minimum length at the end, if necessary. If the remainder segment is smaller than the minimum length, then the last two segments will span the remainder plus penultimate segment, with the span divided evenly between the two.

    Input segment: (0, 10] subdivide 3 min 2
    Output segment(s): (0, 3], (3, 6], (6, 8], (8, 10]
    """
    assert length >= min_len
    if abs(seg) < min_len:
        return segmentlist([])
    if abs(seg) <= length:
        return segmentlist([seg])

    subsegl = segmentlist([])
    for i in range(int(float(abs(seg)) / length)):
        st = seg[0]
        subsegl.append(segment(st + length * i, st + length * (i + 1)))

    # Make an attempt to subdivide evenly.
    if float(abs(seg)) % length <= min_len:
        s1 = subsegl.pop()
        rem_len = float(abs(s1)) + (float(abs(seg)) % length)
        s2 = segment(seg[1] - rem_len / 2, seg[1])
        s1 = segment(s1[0], seg[1] - rem_len / 2)
        subsegl.append(s1)
        subsegl.append(s2)
    else:
        subsegl.append(segment(subsegl[-1][1], seg[1]))

    return subsegl
def fromsegmentxml(file, dict=False, id=None):
    """
    Read a glue.segments.segmentlist from the file object file containing an
    xml segment table.

    Arguments:

      file : file object
        file object for segment xml file

    Keyword Arguments:

      dict : [ True | False ]
        returns a glue.segments.segmentlistdict containing coalesced
        glue.segments.segmentlists keyed by seg_def.name for each entry in the
        contained segment_def_table. Default False
      id : int
        returns a glue.segments.segmentlist object containing only those
        segments matching the given segment_def_id integer
        
  """

    # load xmldocument and SegmentDefTable and SegmentTables
    xmldoc, digest = utils.load_fileobj(file,
                                        gz=file.name.endswith(".gz"),
                                        contenthandler=lsctables.use_in(
                                            ligolw.LIGOLWContentHandler))
    seg_def_table = lsctables.SegmentDefTable.get_table(xmldoc)
    seg_table = lsctables.SegmentTable.get_table(xmldoc)

    if dict:
        segs = segments.segmentlistdict()
    else:
        segs = segments.segmentlist()

    seg_id = {}
    for seg_def in seg_def_table:
        seg_id[int(seg_def.segment_def_id)] = str(seg_def.name)
        if dict:
            segs[str(seg_def.name)] = segments.segmentlist()

    for seg in seg_table:
        if dict:
            segs[seg_id[int(seg.segment_def_id)]]\
                .append(segments.segment(seg.start_time, seg.end_time))
            continue
        if id and int(seg.segment_def_id) == id:
            segs.append(segments.segment(seg.start_time, seg.end_time))
            continue
        segs.append(segments.segment(seg.start_time, seg.end_time))

    if dict:
        for seg_name in seg_id.values():
            segs[seg_name] = segs[seg_name].coalesce()
    else:
        segs = segs.coalesce()

    xmldoc.unlink()

    return segs
예제 #6
0
def subdivide(seg, length, min_len=0):
    """
    Subdivide a segment into smaller segments based on a given length. Enforce a given minimum length at the end, if necessary. If the remainder segment is smaller than the minimum length, then the last two segments will span the remainder plus penultimate segment, with the span divided evenly between the two.

    Input segment: (0, 10] subdivide 3 min 2
    Output segment(s): (0, 3], (3, 6], (6, 8], (8, 10]
    """
    assert length >= min_len
    if abs(seg) < min_len:
        return segmentlist([])
    if abs(seg) <= length:
        return segmentlist([seg])

    subsegl = segmentlist([])
    for i in range(int(float(abs(seg))/length)):
        st = seg[0]
        subsegl.append(segment(st+length*i, st+length*(i+1)))

    # Make an attempt to subdivide evenly.
    if float(abs(seg)) % length <= min_len:
        s1 = subsegl.pop()
        rem_len = float(abs(s1)) + (float(abs(seg)) % length)
        s2 = segment(seg[1]-rem_len/2, seg[1])
        s1 = segment(s1[0], seg[1]-rem_len/2)
        subsegl.append(s1)
        subsegl.append(s2)
    else:
        subsegl.append(segment(subsegl[-1][1], seg[1]))

    return subsegl
예제 #7
0
def do_summary_table(xmldoc, sim_tree, liv_tree):
    try:
        search_summary = lsctables.SearchSummaryTable.get_table(xmldoc)
    except ValueError:
        search_summary = lsctables.New(lsctables.SearchSummaryTable, [
            "process_id", "nevents", "ifos", "comment", "in_start_time",
            "in_start_time_ns", "out_start_time", "out_start_time_ns",
            "in_end_time", "in_end_time_ns", "out_end_time", "out_end_time_ns"
        ])
        xmldoc.childNodes[0].appendChild(search_summary)

    process_id_type = lsctables.ProcessID

    runids = set()
    for i in range(0, sim_tree.GetEntries()):
        sim_tree.GetEntry(i)

        # Id for the run processed by WaveBurst -> process ID
        if sim_tree.run in runids:
            continue

        row = search_summary.RowType()
        row.process_id = process_id_type(sim_tree.run)
        runids.add(sim_tree.run)

        # Search Summary Table
        # events found in the run -> nevents
        setattr(row, "nevents", sim_tree.GetEntries())

        # Imstruments involved in the search
        row.ifos = lsctables.ifos_from_instrument_set(
            get_ifos_from_index(
                branch_array_to_list(sim_tree.ifo, sim_tree.ndim)))
        setattr(row, "comment", "waveburst")

        # Begin and end time of the segment
        # TODO: This is a typical offset on either side of the job for artifacts
        # It can, and probably will change in the future, and should not be hardcoded
        # TODO: Make this work properly. We need a gps end from the livetime
        waveoffset = 8
        livetime = 600
        #live_entries = live_tree.GetEntries()
        # This is WAAAAAAAAAAAAAY too slow
        #for l in range(0, live_entries):
        #liv_tree.GetEntry(l)
        #livetime = max(livetime, liv_tree.live)

        #if livetime < 0:
        #sys.exit("Could not find livetime, cannot fill all of summary table.")
        # in -- with waveoffset
        # out -- without waveoffset
        row.set_in(
            segments.segment(LIGOTimeGPS(sim_tree.gps - waveoffset),
                             LIGOTimeGPS(sim_tree.gps + livetime +
                                         waveoffset)))
        row.set_out(
            segments.segment(LIGOTimeGPS(sim_tree.gps),
                             LIGOTimeGPS(sim_tree.gps + livetime)))

        search_summary.append(row)
예제 #8
0
    def inj_seg(self, exclude_coinc_flags=None):
        """ Returns a segmentlist that is the union of all excitation,
        segdb and bitmasked channels.
        """

        if exclude_coinc_flags is None:
            exclude_coinc_flags = []

        tmp_list = segments.segmentlist([])
        for key in self.exc_dict.keys():
            if key[3:] not in exclude_coinc_flags:
                tmp_list.extend(self.exc_dict[key])
        for key in self.seg_dict.keys():
            if key[3:] not in exclude_coinc_flags:
                tmp_list.extend(self.seg_dict[key])
        for key in self.bitmask_dict.keys():
            if key[3:] not in exclude_coinc_flags:
                tmp_list.extend(self.bitmask_dict[key])
        if self.schedule_time:
            seg = segments.segment(self.schedule_time, self.schedule_time + 1)
            seg_list = segments.segmentlist([seg])
            tmp_list.extend(seg_list)
        for time in self.gracedb_time:
            seg = segments.segment(time, time + 1)
            seg_list = segments.segmentlist([seg])
            tmp_list.extend(seg_list)
        return tmp_list
예제 #9
0
def generated_vdb_ascii(json_str, filepath):
    res_dict = json.loads(json_str)
    active_list = res_dict["active"]
    active_segments = segments.segmentlist([segments.segment(x[0], x[1]) for x in active_list])
    known_list = res_dict["known"]
    known_segments = segments.segmentlist([segments.segment(x[0], x[1]) for x in known_list])
    query_start = res_dict["query_information"]["start"]
    query_stop = res_dict["query_information"]["end"]
    if query_start != 0 and query_stop != 0:
        requested_span = segments.segmentlist([segments.segment(query_start, query_stop)])
    else:
        requested_span = segments.segmentlist([segments.segment(0, 9999999999)])
    active_segments_string = ",1 \n".join([str(i[0]) + "," + str(i[1]) for i in active_segments]) + ",1 \n"
    unknown_segments = requested_span - known_segments
    unknown_segments_string = ",-1 \n".join([str(i[0]) + "," + str(i[1]) for i in unknown_segments]) + ",-1 \n"
    known_not_active_segments = known_segments - active_segments
    known_not_active_segments_string = (
        ",0 \n".join([str(i[0]) + "," + str(i[1]) for i in known_not_active_segments]) + ",0 \n"
    )
    output_fileh = open(filepath, "w+")
    query_info_string = json.dumps(res_dict["query_information"], indent=1)
    output_fileh.writelines(query_info_string)
    output_fileh.write("\n")
    output_fileh.writelines(active_segments_string)
    output_fileh.writelines(unknown_segments_string)
    output_fileh.writelines(known_not_active_segments_string)
    output_fileh.close()
    return filepath
예제 #10
0
def generated_vdb_ascii(json_dict,filepath):
    #res_dict=json.loads(json_str)
    res_dict=json_dict
    active_list=res_dict['active']
    active_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in active_list])
    active_segments.coalesce()
    known_list=res_dict['known']
    known_segments=segments.segmentlist([segments.segment(x[0],x[1]) for x in known_list])
    known_segments.coalesce()
    query_start=res_dict['query_information']['start']
    query_stop=res_dict['query_information']['end']
    if query_start!=0 and query_stop!=0:
        requested_span=segments.segmentlist([segments.segment(query_start,query_stop)])
    else:
        requested_span=segments.segmentlist([segments.segment(0,9999999999)])
    active_segments_string=',1 \n'.join([str(i[0])+","+str(i[1]) for i in active_segments])+",1 \n"
    unknown_segments=requested_span-known_segments    
    unknown_segments_string=',-1 \n'.join([str(i[0])+","+str(i[1]) for i in unknown_segments])+",-1 \n"    
    known_not_active_segments=known_segments-active_segments
    known_not_active_segments_string=',0 \n'.join([str(i[0])+","+str(i[1]) for i in known_not_active_segments])+",0 \n"
    output_fileh=open(filepath,'a')
    query_info_string=json.dumps(res_dict['query_information'], indent=1)
    output_fileh.writelines(query_info_string)
    output_fileh.write('\n')
    output_fileh.writelines(active_segments_string)
    output_fileh.writelines(unknown_segments_string)
    output_fileh.writelines(known_not_active_segments_string)
    output_fileh.close()
    return filepath
예제 #11
0
def fromsegwizard(file, coltype=int, strict=True):
    """
	Read a segmentlist from the file object file containing a segwizard
	compatible segment list.  Parsing stops on the first line that
	cannot be parsed (which is consumed).  The segmentlist will be
	created with segment whose boundaries are of type coltype, which
	should raise ValueError if it cannot convert its string argument.
	Two-column, three-column, and four-column segwizard files are
	recognized, but the entire file must be in the same format, which
	is decided by the first parsed line.  If strict is True and the
	file is in three- or four-column format, then each segment's
	duration is checked against that column in the input file.

	NOTE:  the output is a segmentlist as described by the file;  if
	the segments in the input file are not coalesced or out of order,
	then thusly shall be the output of this function.  It is
	recommended that this function's output be coalesced before use.
	"""
    commentpat = re.compile(r"\s*([#;].*)?\Z", re.DOTALL)
    twocolsegpat = re.compile(r"\A\s*([\d.+-eE]+)\s+([\d.+-eE]+)\s*\Z")
    threecolsegpat = re.compile(
        r"\A\s*([\d.+-eE]+)\s+([\d.+-eE]+)\s+([\d.+-eE]+)\s*\Z")
    fourcolsegpat = re.compile(
        r"\A\s*([\d]+)\s+([\d.+-eE]+)\s+([\d.+-eE]+)\s+([\d.+-eE]+)\s*\Z")
    format = None
    l = segments.segmentlist()
    for line in file:
        line = commentpat.split(line)[0]
        if not line:
            continue
        try:
            [tokens] = fourcolsegpat.findall(line)
            num = int(tokens[0])
            seg = segments.segment(map(coltype, tokens[1:3]))
            duration = coltype(tokens[3])
            this_line_format = 4
        except ValueError:
            try:
                [tokens] = threecolsegpat.findall(line)
                seg = segments.segment(map(coltype, tokens[0:2]))
                duration = coltype(tokens[2])
                this_line_format = 3
            except ValueError:
                try:
                    [tokens] = twocolsegpat.findall(line)
                    seg = segments.segment(map(coltype, tokens[0:2]))
                    duration = abs(seg)
                    this_line_format = 2
                except ValueError:
                    break
        if strict:
            if abs(seg) != duration:
                raise ValueError("segment '%s' has incorrect duration" % line)
            if format is None:
                format = this_line_format
            elif format != this_line_format:
                raise ValueError("segment '%s' format mismatch" % line)
        l.append(seg)
    return l
예제 #12
0
def fromsegmentxml(file, dict=False, id=None):

  """
    Read a glue.segments.segmentlist from the file object file containing an
    xml segment table.

    Arguments:

      file : file object
        file object for segment xml file

    Keyword Arguments:

      dict : [ True | False ]
        returns a glue.segments.segmentlistdict containing coalesced
        glue.segments.segmentlists keyed by seg_def.name for each entry in the
        contained segment_def_table. Default False
      id : int
        returns a glue.segments.segmentlist object containing only those
        segments matching the given segment_def_id integer
        
  """

  # load xmldocument and SegmentDefTable and SegmentTables
  xmldoc, digest = utils.load_fileobj(file, gz=file.name.endswith(".gz"))
  seg_def_table  = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName)
  seg_table      = table.get_table(xmldoc, lsctables.SegmentTable.tableName)

  if dict:
    segs = segments.segmentlistdict()
  else:
    segs = segments.segmentlist()

  seg_id = {}
  for seg_def in seg_def_table:
    seg_id[int(seg_def.segment_def_id)] = str(seg_def.name)
    if dict:
      segs[str(seg_def.name)] = segments.segmentlist()

  for seg in seg_table:
    if dict:
      segs[seg_id[int(seg.segment_def_id)]]\
          .append(segments.segment(seg.start_time, seg.end_time))
      continue
    if id and int(seg.segment_def_id)==id:
      segs.append(segments.segment(seg.start_time, seg.end_time))
      continue
    segs.append(segments.segment(seg.start_time, seg.end_time))

  if dict:
   for seg_name in seg_id.values():
     segs[seg_name] = segs[seg_name].coalesce()
  else:
    segs = segs.coalesce()

  xmldoc.unlink()

  return segs
예제 #13
0
def fromsegwizard(file, coltype = int, strict = True):
	"""
	Read a segmentlist from the file object file containing a segwizard
	compatible segment list.  Parsing stops on the first line that
	cannot be parsed (which is consumed).  The segmentlist will be
	created with segment whose boundaries are of type coltype, which
	should raise ValueError if it cannot convert its string argument.
	Two-column, three-column, and four-column segwizard files are
	recognized, but the entire file must be in the same format, which
	is decided by the first parsed line.  If strict is True and the
	file is in three- or four-column format, then each segment's
	duration is checked against that column in the input file.

	NOTE:  the output is a segmentlist as described by the file;  if
	the segments in the input file are not coalesced or out of order,
	then thusly shall be the output of this function.  It is
	recommended that this function's output be coalesced before use.
	"""
	commentpat = re.compile(r"\s*([#;].*)?\Z", re.DOTALL)
	twocolsegpat = re.compile(r"\A\s*([\d.+-eE]+)\s+([\d.+-eE]+)\s*\Z")
	threecolsegpat = re.compile(r"\A\s*([\d.+-eE]+)\s+([\d.+-eE]+)\s+([\d.+-eE]+)\s*\Z")
	fourcolsegpat = re.compile(r"\A\s*([\d]+)\s+([\d.+-eE]+)\s+([\d.+-eE]+)\s+([\d.+-eE]+)\s*\Z")
	format = None
	l = segments.segmentlist()
	for line in file:
		line = commentpat.split(line)[0]
		if not line:
			continue
		try:
			[tokens] = fourcolsegpat.findall(line)
			num = int(tokens[0])
			seg = segments.segment(map(coltype, tokens[1:3]))
			duration = coltype(tokens[3])
			this_line_format = 4
		except ValueError:
			try:
				[tokens] = threecolsegpat.findall(line)
				seg = segments.segment(map(coltype, tokens[0:2]))
				duration = coltype(tokens[2])
				this_line_format = 3
			except ValueError:
				try:
					[tokens] = twocolsegpat.findall(line)
					seg = segments.segment(map(coltype, tokens[0:2]))
					duration = abs(seg)
					this_line_format = 2
				except ValueError:
					break
		if strict:
			if abs(seg) != duration:
				raise ValueError("segment '%s' has incorrect duration" % line)
			if format is None:
				format = this_line_format
			elif format != this_line_format:
				raise ValueError("segment '%s' format mismatch" % line)
		l.append(seg)
	return l
def get_manually(gps_start_time, gps_end_time):
    db_location = os.environ['S6_SEGMENT_SERVER']
    segment_connection = segmentdb_utils.setup_database(db_location)
    engine = query_engine.LdbdQueryEngine(segment_connection)

    # 1. Get v1 science segments

    sql = "SELECT segment.start_time, segment.end_time "
    sql += "FROM segment_definer, segment "
    sql += "WHERE segment.segment_def_id = segment_definer.segment_def_id "
    sql += "AND   segment_definer.ifos = 'H1' "
    sql += "AND   segment.segment_def_cdb = segment_definer.creator_db "
    sql += "AND   segment_definer.name = 'DMT-SCIENCE' "
    sql += "AND   segment_definer.version = 1 "
    sql += "AND NOT (%s > segment.end_time OR segment.start_time > %s)" % (
        gps_start_time, gps_end_time)

    v1_science_segments = segmentlist(
        [segment(row[0], row[1]) for row in engine.query(sql)]).coalesce()

    # 2. Get v2 science summaries

    sql = "SELECT segment_summary.start_time, segment_summary.end_time "
    sql += "FROM segment_definer, segment_summary "
    sql += "WHERE segment_summary.segment_def_id = segment_definer.segment_def_id "
    sql += "AND   segment_definer.ifos = 'H1' "
    sql += "AND   segment_summary.segment_def_cdb = segment_definer.creator_db "
    sql += "AND   segment_definer.name = 'DMT-SCIENCE' "
    sql += "AND   segment_definer.version = 2 "
    sql += "AND NOT (%s > segment_summary.end_time OR segment_summary.start_time > %s)" % (
        gps_start_time, gps_end_time)

    v2_science_summaries = segmentlist(
        [segment(row[0], row[1]) for row in engine.query(sql)]).coalesce()

    # 1. Get v2 science segments

    sql = "SELECT segment.start_time, segment.end_time "
    sql += "FROM segment_definer, segment "
    sql += "WHERE segment.segment_def_id = segment_definer.segment_def_id "
    sql += "AND   segment_definer.ifos = 'H1' "
    sql += "AND   segment.segment_def_cdb = segment_definer.creator_db "
    sql += "AND   segment_definer.name = 'DMT-SCIENCE' "
    sql += "AND   segment_definer.version = 2 "
    sql += "AND NOT (%s > segment.end_time OR segment.start_time > %s)" % (
        gps_start_time, gps_end_time)

    v2_science_segments = segmentlist(
        [segment(row[0], row[1]) for row in engine.query(sql)]).coalesce()

    result = (v1_science_segments - v2_science_summaries) + v2_science_segments

    result.coalesce()

    result &= segmentlist([segment(gps_start_time, gps_end_time)])

    return result
예제 #15
0
 def _query(self, channel, start, end):
     "Do we know where the frame file is?"
     if segment(start, end) in self._remotecoverage:
         return True
     urls = query_LDR(self.host, self.port, channel[0], self.frametype, start, end, urlType="file")
     if urls:
         new = Cache.from_urls(urls, coltype=int)
         new.sort(key=operator.attrgetter("segment"))
         self.add_cache(new)
     return segment(start, end) in self._remotecoverage
예제 #16
0
	def test_tofromseqwizard(self):
		"""
		Check that the segwizard writing routine's output is parsed
		correctly.
		"""
		data = StringIO.StringIO()
		correct = segments.segmentlist([segments.segment(10, 100), segments.segment(110, 120), segments.segment(125, 130), segments.segment(0, 200)])
		segmentsUtils.tosegwizard(data, correct)
		data.seek(0)
		self.assertEqual(correct, segmentsUtils.fromsegwizard(data, strict=True))
예제 #17
0
def randomlist(n):
	def r():
		return random.randint(1,5000)
	if n < 1:
		raise ValueError, "randomlist(n): n must be >= 1"
	x = r()
	l = segments.segmentlist([segments.segment(x, x + r())])
	for i in range(n - 1):
		x = l[-1][1] + r()
		l.append(segments.segment(x, x + r()))
	return l
예제 #18
0
def test_optimized_query(engine):
    res = segmentdb_utils.query_segments( engine, 'segment_summary', [ ('H1','DMT-TESTSEG_2',1,924900000,924900016,0,0),
                                                                      ('H1','DMT-TESTSEG_3',1,924900000,924900016,0,0) ] )

    if res[0] != segmentlist([segment(924900000, 924900010)]): 
        return False

    if res[1] != segmentlist([segment(924900008, 924900016)]):
        return False

    return True
def coalesceResultDictionary(result_dict):
    out_result_dict=result_dict
    active_seg_python_list=[seg.segment(i[0],i[1]) for i in result_dict[0]['active']]
    active_seg_list=seg.segmentlist(active_seg_python_list)
    active_seg_list.coalesce()
    out_result_dict[0]['active']=active_seg_list
    known_seg_python_list=[seg.segment(i[0],i[1]) for i in result_dict[0]['known']]
    known_seg_list=seg.segmentlist(known_seg_python_list)
    known_seg_list.coalesce()
    out_result_dict[0]['known']=known_seg_list
    return out_result_dict
예제 #20
0
        def pad_and_truncate(row_start, row_end):
            tmp = segmentlist([segment(row_start + start_pad, row_end + end_pad)])
            # No coalesce needed as a list with a single segment is already coalesced
            tmp &= search_span_list

            # The intersection is guaranteed to be non-empty if the row passed match()
            # PR 2969: The above comment is incorrect.  Negative padding may cause
            # an empty intersection.
            if len(tmp) == 0:
                return segment(0,0)
            else:
                return tmp[0]
예제 #21
0
def test_basic_seg_summary(engine):
    res = segmentdb_utils.query_segments( engine, 'segment_summary', [ ('H1','DMT-TESTSEG_1',1,924900000,924900016,0,0) ] )

    if res != [ segmentlist( [segment(924900000, 924900016)] ) ]:
        return False

    res = segmentdb_utils.query_segments( engine, 'segment_summary', [ ('H1','DMT-TESTSEG_1',2,924900000,924900016,0,0) ] )
    
    if res != [ segmentlist( [segment(924900008, 924900010)] ) ]:
        return False

    return True
예제 #22
0
def associate_psds_to_segments(opt, fd_segments, gwstrain, flen, delta_f, flow,
                               dyn_range_factor=1., precision=None):
    """Generate a set of overlapping PSDs covering the data in GWstrain.
    Then associate these PSDs with the appropriate segment in strain_segments.

    Parameters
    -----------
    opt : object
        Result of parsing the CLI with OptionParser, or any object with the
        required attributes (psd_model, psd_file, asd_file, psd_estimation,
        psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
    fd_segments : StrainSegments.fourier_segments() object
        The fourier transforms of the various analysis segments. The psd
        attribute of each segment is updated to point to the appropriate PSD.
    gwstrain : Strain object
        The timeseries of raw data on which to estimate PSDs.
    flen : int
        The length in samples of the output PSDs.
    delta_f : float
        The frequency step of the output PSDs.
    flow: float
        The low frequncy cutoff to use when calculating the PSD.
    dyn_range_factor : {1, float}
        For PSDs taken from models or text files, if `dyn_range_factor` is
        not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
    precision : str, choices (None,'single','double')
        If not specified, or specified as None, the precision of the returned
        PSD will match the precision of the data, if measuring a PSD, or will
        match the default precision of the model if using an analytical PSD.
        If 'single' the PSD will be converted to float32, if not already in
        that precision. If 'double' the PSD will be converted to float64, if
        not already in that precision.
    """
    psds_and_times = generate_overlapping_psds(opt, gwstrain, flen, delta_f,
                                       flow, dyn_range_factor=dyn_range_factor,
                                       precision=precision)

    for fd_segment in fd_segments:
        best_psd = None
        psd_overlap = 0
        inp_seg = segments.segment(fd_segment.seg_slice.start,
                                   fd_segment.seg_slice.stop)
        for start_idx, end_idx, psd in psds_and_times:
            psd_seg = segments.segment(start_idx, end_idx)
            if psd_seg.intersects(inp_seg):
                curr_overlap = abs(inp_seg & psd_seg)
                if curr_overlap > psd_overlap:
                    psd_overlap = curr_overlap
                    best_psd = psd
        if best_psd is None:
            err_msg = "No PSDs found intersecting segment!"
            raise ValueError(err_msg)
        fd_segment.psd = best_psd
예제 #23
0
def do_summary_table(xmldoc, sim_tree, liv_tree):
  try: 
    search_summary = table.get_table(xmldoc, lsctables.SearchSummaryTable.tableName)
  except ValueError:
    search_summary = lsctables.New(lsctables.SearchSummaryTable,
    ["process_id", "nevents", "ifos", "comment", "in_start_time",
    "in_start_time_ns", "out_start_time", "out_start_time_ns",
    "in_end_time", "in_end_time_ns", "out_end_time", "out_end_time_ns"])
    xmldoc.childNodes[0].appendChild(search_summary)

  process_id_type = type(table.get_table(xmldoc, lsctables.ProcessTable.tableName).next_id)

  runids = set()
  for i in range(0, sim_tree.GetEntries()) :
    sim_tree.GetEntry(i)

    # Id for the run processed by WaveBurst -> process ID
    if sim_tree.run in runids :
      continue

    row = search_summary.RowType()
    row.process_id = process_id_type(sim_tree.run)
    runids.add(sim_tree.run)

    # Search Summary Table
    # events found in the run -> nevents
    setattr(row, "nevents", sim_tree.GetEntries())

    # Imstruments involved in the search
    row.ifos = lsctables.ifos_from_instrument_set( get_ifos_from_index( branch_array_to_list ( sim_tree.ifo, sim_tree.ndim ) ) )
    setattr(row, "comment", "waveburst")

    # Begin and end time of the segment
    # TODO: This is a typical offset on either side of the job for artifacts
    # It can, and probably will change in the future, and should not be hardcoded
		# TODO: Make this work properly. We need a gps end from the livetime
    waveoffset = 8
    livetime = 600
    #live_entries = live_tree.GetEntries()
    # This is WAAAAAAAAAAAAAY too slow
    #for l in range(0, live_entries):
      #liv_tree.GetEntry(l)
      #livetime = max(livetime, liv_tree.live)

    #if livetime < 0:
      #sys.exit("Could not find livetime, cannot fill all of summary table.")
    # in -- with waveoffset
    # out -- without waveoffset
    row.set_in(segments.segment(LIGOTimeGPS(sim_tree.gps - waveoffset), LIGOTimeGPS(sim_tree.gps + livetime + waveoffset)))
    row.set_out(segments.segment(LIGOTimeGPS(sim_tree.gps), LIGOTimeGPS(sim_tree.gps + livetime)))

    search_summary.append(row)
예제 #24
0
def test_optimized_query(engine):
    res = segmentdb_utils.query_segments(
        engine, 'segment_summary',
        [('H1', 'DMT-TESTSEG_2', 1, 924900000, 924900016, 0, 0),
         ('H1', 'DMT-TESTSEG_3', 1, 924900000, 924900016, 0, 0)])

    if res[0] != segmentlist([segment(924900000, 924900010)]):
        return False

    if res[1] != segmentlist([segment(924900008, 924900016)]):
        return False

    return True
예제 #25
0
def get_manually(gps_start_time, gps_end_time):
    db_location = os.environ["S6_SEGMENT_SERVER"]
    segment_connection = segmentdb_utils.setup_database(db_location)
    engine = query_engine.LdbdQueryEngine(segment_connection)

    # 1. Get v1 science segments

    sql = "SELECT segment.start_time, segment.end_time "
    sql += "FROM segment_definer, segment "
    sql += "WHERE segment.segment_def_id = segment_definer.segment_def_id "
    sql += "AND   segment_definer.ifos = 'H1' "
    sql += "AND   segment.segment_def_cdb = segment_definer.creator_db "
    sql += "AND   segment_definer.name = 'DMT-SCIENCE' "
    sql += "AND   segment_definer.version = 1 "
    sql += "AND NOT (%s > segment.end_time OR segment.start_time > %s)" % (gps_start_time, gps_end_time)

    v1_science_segments = segmentlist([segment(row[0], row[1]) for row in engine.query(sql)]).coalesce()

    # 2. Get v2 science summaries

    sql = "SELECT segment_summary.start_time, segment_summary.end_time "
    sql += "FROM segment_definer, segment_summary "
    sql += "WHERE segment_summary.segment_def_id = segment_definer.segment_def_id "
    sql += "AND   segment_definer.ifos = 'H1' "
    sql += "AND   segment_summary.segment_def_cdb = segment_definer.creator_db "
    sql += "AND   segment_definer.name = 'DMT-SCIENCE' "
    sql += "AND   segment_definer.version = 2 "
    sql += "AND NOT (%s > segment_summary.end_time OR segment_summary.start_time > %s)" % (gps_start_time, gps_end_time)

    v2_science_summaries = segmentlist([segment(row[0], row[1]) for row in engine.query(sql)]).coalesce()

    # 1. Get v2 science segments

    sql = "SELECT segment.start_time, segment.end_time "
    sql += "FROM segment_definer, segment "
    sql += "WHERE segment.segment_def_id = segment_definer.segment_def_id "
    sql += "AND   segment_definer.ifos = 'H1' "
    sql += "AND   segment.segment_def_cdb = segment_definer.creator_db "
    sql += "AND   segment_definer.name = 'DMT-SCIENCE' "
    sql += "AND   segment_definer.version = 2 "
    sql += "AND NOT (%s > segment.end_time OR segment.start_time > %s)" % (gps_start_time, gps_end_time)

    v2_science_segments = segmentlist([segment(row[0], row[1]) for row in engine.query(sql)]).coalesce()

    result = (v1_science_segments - v2_science_summaries) + v2_science_segments

    result.coalesce()

    result &= segmentlist([segment(gps_start_time, gps_end_time)])

    return result
예제 #26
0
def build_segment_list_one(
    engine, gps_start_time, gps_end_time, ifo, segment_name, version=None, start_pad=0, end_pad=0
):
    """Builds a list of segments satisfying the given criteria """
    seg_result = segmentlist([])
    sum_result = segmentlist([])

    # Is there any way to get segment and segement summary in one query?
    # Maybe some sort of outer join where we keep track of which segment
    # summaries we've already seen.
    sql = "SELECT segment_summary.start_time, segment_summary.end_time "
    sql += "FROM segment_definer, segment_summary "
    sql += "WHERE segment_summary.segment_def_id = segment_definer.segment_def_id "
    sql += "AND   segment_definer.ifos = '%s' " % ifo
    if engine.__class__ == query_engine.LdbdQueryEngine:
        sql += "AND segment_summary.segment_def_cdb = segment_definer.creator_db "
    sql += "AND   segment_definer.name = '%s' " % segment_name
    sql += "AND   segment_definer.version = %s " % version
    sql += "AND NOT (%s > segment_summary.end_time OR segment_summary.start_time > %s)" % (gps_start_time, gps_end_time)

    rows = engine.query(sql)

    for sum_start_time, sum_end_time in rows:
        sum_start_time = (sum_start_time < gps_start_time) and gps_start_time or sum_start_time
        sum_end_time = (sum_end_time > gps_end_time) and gps_end_time or sum_end_time

        sum_result |= segmentlist([segment(sum_start_time, sum_end_time)])

    # We can't use queries paramaterized with ? since the ldbd protocol doesn't support it...
    sql = "SELECT segment.start_time + %d, segment.end_time + %d " % (start_pad, end_pad)
    sql += "FROM segment, segment_definer "
    sql += "WHERE segment.segment_def_id = segment_definer.segment_def_id "

    if engine.__class__ == query_engine.LdbdQueryEngine:
        sql += "AND segment.segment_def_cdb = segment_definer.creator_db "
    sql += "AND   segment_definer.ifos = '%s' " % ifo
    sql += "AND   segment_definer.name = '%s' " % segment_name
    sql += "AND   segment_definer.version = %s " % version
    sql += "AND NOT (%s > segment.end_time OR segment.start_time > %s)" % (gps_start_time, gps_end_time)

    rows = engine.query(sql)

    for seg_start_time, seg_end_time in rows:
        seg_start_time = (seg_start_time < gps_start_time) and gps_start_time or seg_start_time
        seg_end_time = (seg_end_time > gps_end_time) and gps_end_time or seg_end_time

        seg_result |= segmentlist([segment(seg_start_time, seg_end_time)])

    engine.close()

    return sum_result, seg_result
예제 #27
0
def new_plots(instrument, amplitude_func, amplitude_lbl, plots):
	l = (
		FreqVsTime(instrument),
		HrssVsFreqScatter(instrument, amplitude_func, amplitude_lbl),
		SimBurstUtils.Efficiency_hrss_vs_freq((instrument,), amplitude_func, amplitude_lbl, 0.1),
		TriggerCountHistogram(instrument),
		RecoveredVsInjectedhrss(instrument, amplitude_func, amplitude_lbl),
		RecoveredPerInjectedhrssVsFreq(instrument, amplitude_func, amplitude_lbl),
		RecoveredPerInjectedhrssVsBandwidth(instrument, amplitude_func, amplitude_lbl),
		RecoveredTimeOffset(instrument, segments.segment(-0.03, +0.03), 0.00015),
		RecoveredFrequencyOffset(instrument, segments.segment(-1.0, +1.0), .002),
		RecoveredVsInjectedFreq(instrument, amplitude_func)
	)
	return [l[i] for i in plots]
def new_plots(instrument, amplitude_func, amplitude_lbl, plots):
	l = (
		FreqVsTime(instrument),
		HrssVsFreqScatter(instrument, amplitude_func, amplitude_lbl),
		SimBurstUtils.Efficiency_hrss_vs_freq((instrument,), amplitude_func, amplitude_lbl, 0.1),
		TriggerCountHistogram(instrument),
		RecoveredVsInjectedhrss(instrument, amplitude_func, amplitude_lbl),
		RecoveredPerInjectedhrssVsFreq(instrument, amplitude_func, amplitude_lbl),
		RecoveredPerInjectedhrssVsBandwidth(instrument, amplitude_func, amplitude_lbl),
		RecoveredTimeOffset(instrument, segments.segment(-0.03, +0.03), 0.00015),
		RecoveredFrequencyOffset(instrument, segments.segment(-1.0, +1.0), .002),
		RecoveredVsInjectedFreq(instrument, amplitude_func)
	)
	return [l[i] for i in plots]
예제 #29
0
	def test_fromsegwizard(self):
		"""
		Test segwizard parsing.
		"""
		data = StringIO.StringIO("""# This is a comment
 # This is another comment
	# Again a comment
1  10 100 90
2 110 120 10# Here's a comment
3 125 130 5 # Another one

4   0 200 200""")
		correct = segments.segmentlist([segments.segment(10, 100), segments.segment(110, 120), segments.segment(125, 130), segments.segment(0, 200)])
		self.assertEqual(correct, segmentsUtils.fromsegwizard(data, strict=True))
예제 #30
0
def build_segment_list_one(engine, gps_start_time, gps_end_time, ifo, segment_name, version = None, start_pad = 0, end_pad = 0):
    """Builds a list of segments satisfying the given criteria """
    seg_result = segmentlist([])
    sum_result = segmentlist([])

    # Is there any way to get segment and segement summary in one query?
    # Maybe some sort of outer join where we keep track of which segment
    # summaries we've already seen.
    sql = "SELECT segment_summary.start_time, segment_summary.end_time "
    sql += "FROM segment_definer, segment_summary "
    sql += "WHERE segment_summary.segment_def_id = segment_definer.segment_def_id "
    sql += "AND   segment_definer.ifos = '%s' " % ifo
    if engine.__class__ == query_engine.LdbdQueryEngine:
       sql += "AND segment_summary.segment_def_cdb = segment_definer.creator_db "
    sql += "AND   segment_definer.name = '%s' " % segment_name
    sql += "AND   segment_definer.version = %s " % version
    sql += "AND NOT (%s > segment_summary.end_time OR segment_summary.start_time > %s)" % (gps_start_time, gps_end_time)

    rows = engine.query(sql)

    for sum_start_time, sum_end_time in rows:
        sum_start_time = (sum_start_time < gps_start_time) and gps_start_time or sum_start_time
        sum_end_time = (sum_end_time > gps_end_time) and gps_end_time or sum_end_time

        sum_result |= segmentlist([segment(sum_start_time, sum_end_time)])

    # We can't use queries paramaterized with ? since the ldbd protocol doesn't support it...
    sql = "SELECT segment.start_time + %d, segment.end_time + %d " % (start_pad, end_pad)
    sql += "FROM segment, segment_definer "
    sql += "WHERE segment.segment_def_id = segment_definer.segment_def_id "

    if engine.__class__ == query_engine.LdbdQueryEngine:
       sql += "AND segment.segment_def_cdb = segment_definer.creator_db "
    sql += "AND   segment_definer.ifos = '%s' " % ifo
    sql += "AND   segment_definer.name = '%s' " % segment_name
    sql += "AND   segment_definer.version = %s " % version
    sql += "AND NOT (%s > segment.end_time OR segment.start_time > %s)" % (gps_start_time, gps_end_time)

    rows = engine.query(sql)
    
    for seg_start_time, seg_end_time in rows:
        seg_start_time = (seg_start_time < gps_start_time) and gps_start_time or seg_start_time
        seg_end_time = (seg_end_time > gps_end_time) and gps_end_time or seg_end_time

        seg_result |= segmentlist([segment(seg_start_time, seg_end_time)])

    engine.close()

    return sum_result, seg_result
예제 #31
0
    def __init__(self):
        # set defaults
        now = XLALUTCToGPS(time.gmtime())
        self.segment = segments.segment(now, now + (-1 * 3600))
        self.ratewidth = 60.0
        self.freqwidth = 16.0
        self.band = segments.segment(0.0, 2500.0)
        self.set_instrument("H1")
        self.seglist = segments.segmentlist([self.segment])
        self.filename = None
        self.format = None  # force update
        self.set_format("png")
        self.cluster = 0

        return self
예제 #32
0
	def __init__(self):
		# set defaults
		now = XLALUTCToGPS(time.gmtime())
		self.segment = segments.segment(now, now + (-1 * 3600))
		self.ratewidth = 60.0
		self.freqwidth = 16.0
		self.band = segments.segment(0.0, 2500.0)
		self.set_instrument("H1")
		self.seglist = segments.segmentlist([self.segment])
		self.filename = None
		self.format = None	# force update
		self.set_format("png")
		self.cluster = 0

		return self
예제 #33
0
def test_basic_seg_summary(engine):
    res = segmentdb_utils.query_segments(
        engine, 'segment_summary',
        [('H1', 'DMT-TESTSEG_1', 1, 924900000, 924900016, 0, 0)])

    if res != [segmentlist([segment(924900000, 924900016)])]:
        return False

    res = segmentdb_utils.query_segments(
        engine, 'segment_summary',
        [('H1', 'DMT-TESTSEG_1', 2, 924900000, 924900016, 0, 0)])

    if res != [segmentlist([segment(924900008, 924900010)])]:
        return False

    return True
예제 #34
0
def convert_json_list_to_segmentlist(jsonlist):
     """ 
     Helper function used to convert json list of lists type object to a 
     segmentlist object
     """
     segment_list=segments.segmentlist([segments.segment(x[0],x[1]) for x in jsonlist])
     return segment_list
예제 #35
0
def get_slide_coincs_from_cache(cachefile, pattern, match, verb, coinc_stat):
  full_coinc_table = []
  cache = cachefile.sieve(description=pattern, exact_match=match)
  found, missed = cache.checkfilesexist()
  files = found.pfnlist()
  if not len(files):
    print >>sys.stderr, "cache contains no files with " + pattern + " description"
    return None
  # split the time slide files into 105 groups to aid with I/O
  num_files=len(files)

  #Changed by Tristan Miller as a memory fix
  #groups_of_files = split_seq(files,105)
  groups_of_files = split_seq(files,50)
  for filegroup in groups_of_files:
    if filegroup:  
      # extract the coinc table
      coinc_table = SnglInspiralUtils.ReadSnglInspiralFromFiles(filegroup, mangle_event_id=False, verbose=verb, non_lsc_tables_ok=False)
      segDict = SearchSummaryUtils.GetSegListFromSearchSummaries(filegroup)
      rings = segments.segmentlist(iterutils.flatten(segDict.values()))
      rings.sort()
      for k,ring in enumerate(rings):
        rings[k] = segments.segment(rings[k][0], rings[k][1] + 10**(-9))
      shift_vector = {"H1": 0, "H2": 0, "L1": 5, "V1": 5}
      if coinc_table:
        SnglInspiralUtils.slideTriggersOnRingWithVector(coinc_table, shift_vector, rings)
        full_coinc_table.extend(CoincInspiralUtils.coincInspiralTable(coinc_table,coinc_stat))
  return full_coinc_table
예제 #36
0
 def _query(self, channel, start, end):
     "Do we know where the frame file is?"
     if segment(start, end) in self._remotecoverage:
         return True
     urls = query_LDR(self.host,
                      self.port,
                      channel[0],
                      self.frametype,
                      start,
                      end,
                      urlType="file")
     if urls:
         new = Cache.from_urls(urls, coltype=int)
         new.sort(key=operator.attrgetter("segment"))
         self.add_cache(new)
     return segment(start, end) in self._remotecoverage
예제 #37
0
def fromfilenames(filenames, coltype=int):
    """
	Return a segmentlist describing the intervals spanned by the files
	whose names are given in the list filenames.  The segmentlist is
	constructed by parsing the file names, and the boundaries of each
	segment are coerced to type coltype.

	The file names are parsed using a generalization of the format
	described in Technical Note LIGO-T010150-00-E, which allows the
	start time and duration appearing in the file name to be
	non-integers.

	NOTE:  the output is a segmentlist as described by the file names;
	if the file names are not in time order, or describe overlaping
	segments, then thusly shall be the output of this function.  It is
	recommended that this function's output be coalesced before use.
	"""
    pattern = re.compile(r"-([\d.]+)-([\d.]+)\.[\w_+#]+\Z")
    l = segments.segmentlist()
    for name in filenames:
        [(s, d)] = pattern.findall(name.strip().rstrip(".gz"))
        s = coltype(s)
        d = coltype(d)
        l.append(segments.segment(s, s + d))
    return l
예제 #38
0
def read_segfile_xml(segfile,verbose):
  """
  Read segment file in ligolw xml type and return in glue.segments.segmentlist
  format.
  """
  from glue.ligolw import ligolw
  from glue.ligolw import table
  from glue.ligolw import utils

  def ContentHandler(xmldoc):
    return ligolw.PartialLIGOLWContentHandler(xmldoc, lambda name, attrs:\
               (name == ligolw.Table.tagName) and\
               (table.StripTableName(attrs["Name"]) in ["segment"]))
  try:
    table.use_in(ligolw.PartialLIGOLWContentHandler)
  except AttributeError:
    # old glue did not allow .use_in().
    # FIXME:  remove when we can require the latest version of glue
    pass

  xmldoc = utils.load_url(segfile, verbose = verbose,gz = segfile.endswith(".gz"), contenthandler = ContentHandler)
  seg_list = segmentlist()
  for table_elem in xmldoc.getElements(lambda e:\
                                       (e.tagName == ligolw.Table.tagName)):
    for row in table_elem:
      seg_list.append(segment(row.start_time, row.end_time))
  xmldoc.unlink()
  return seg_list
예제 #39
0
def find_segments(doc, key, use_segment_table = True):
    key_pieces = key.split(':')
    while len(key_pieces) < 3:
        key_pieces.append('*')

    filter_func = lambda x: str(x.ifos) == key_pieces[0] and (str(x.name) == key_pieces[1] or key_pieces[1] == '*') and (str(x.version) == key_pieces[2] or key_pieces[2] == '*') 

    # Find all segment definers matching the critieria
    seg_def_table = lsctables.SegmentDefTable.get_table(doc)
    seg_defs      = filter(filter_func, seg_def_table)
    seg_def_ids   = map(lambda x: str(x.segment_def_id), seg_defs)

    # Find all segments belonging to those definers
    if use_segment_table:
        seg_table     = lsctables.SegmentTable.get_table(doc)
        seg_entries   = filter(lambda x: str(x.segment_def_id) in seg_def_ids, seg_table)
    else:
        seg_sum_table = lsctables.SegmentSumTable.get_table(doc)
        seg_entries   = filter(lambda x: str(x.segment_def_id) in seg_def_ids, seg_sum_table)

    # Combine into a segmentlist
    ret = segmentlist(map(lambda x: segment(x.start_time, x.end_time), seg_entries))

    ret.coalesce()

    return ret
예제 #40
0
    def fetch(self, channel, start, end):
        """
        Retrieve data, caching file locations and the files themselves.
        """
        seg = segment(start, end)

        if not self._query(channel, start, end):
            raise ValueError("%s not found in cache" %
                             repr(segmentlist([seg]) - self._remotecoverage))

        # Need to cache files locally
        # Note: seg *will* be in self._cachecoverage if self.scratchdir is None.
        if seg not in self._cachecoverage:
            for f, s in zip(self._remotefiles, self._remotesegs):
                if seg.intersects(s) and s not in self._cachecoverage:
                    dest = os.path.join(self._scratchdir, os.path.split(f)[-1])
                    if self._verbose:
                        print "Copying %s -->\n          %s." % (f, dest)
                    shutil.copy(f, dest)
                    ind = bisect_right(self._cachedsegs, s)
                    self._cachedfiles.insert(ind, dest)
                    self._cachedsegs.insert(ind, s)
                    self._cachecoverage |= segmentlist([s])
            assert seg in self._cachecoverage

        # Finally, return the cached data
        return self._fetch(channel, start, end)
예제 #41
0
파일: cache.py 프로젝트: Cyberface/lalsuite
    def from_T050017(cls, url, coltype = LIGOTimeGPS):
        """
        Parse a URL in the style of T050017-00 into a CacheEntry.  The
        T050017-00 file name format is, essentially,

        observatory-description-start-duration.extension

        Example:

        >>> c = CacheEntry.from_T050017("file://localhost/data/node144/frames/S5/strain-L2/LLO/L-L1_RDS_C03_L2-8365/L-L1_RDS_C03_L2-836562330-83.gwf")
        >>> c.observatory
        'L'
        >>> c.host
        'localhost'
        >>> os.path.basename(c.path)
        'L-L1_RDS_C03_L2-836562330-83.gwf'
        """
        match = cls._url_regex.search(url)
        if not match:
            raise ValueError("could not convert %s to CacheEntry" % repr(url))
        observatory = match.group("obs")
        description = match.group("dsc")
        # FIXME:  remove typecasts when LIGOTimeGPS can be passed a unicode
        start = str(match.group("strt"))
        duration = str(match.group("dur"))
        if start == "-" and duration == "-":
            # no segment information
            segment = None
        else:
            segment = segments.segment(coltype(start), coltype(start) + coltype(duration))
        return cls(observatory, description, segment, url)
예제 #42
0
파일: power.py 프로젝트: Solaro/lalsuite
	def get_output(self):
		if self._AnalysisNode__output is None:
			if None in (self.get_start(), self.get_end(), self.get_ifo(), self.__usertag):
				raise ValueError, "start time, end time, ifo, or user tag has not been set"
			seg = segments.segment(LIGOTimeGPS(self.get_start()), LIGOTimeGPS(self.get_end()))
			self.set_output(os.path.join(self.output_dir, "%s-POWER_%s-%d-%d.xml.gz" % (self.get_ifo(), self.__usertag, int(self.get_start()), int(self.get_end()) - int(self.get_start()))))
		return self._AnalysisNode__output
예제 #43
0
def exportGPSEventToDisk(tevent, dir, cnt, dag, filename=None):
    """
        """
    #If directy coincHeadings not there make the directory if
    #filename is None
    headingDir = dir + "/coinc_info"
    ifos = tevent.instruments
    instruments = tevent.instruments
    time = tevent.time

    idKeyStr = "%s_%s" % (str(time), instruments)
    if filename == None:
        filename = "coincEvent.info"
        stfu_pipe.mkdir(headingDir)
        filename = os.path.normpath(headingDir + '/' + idKeyStr + '_' +
                                    filename)
    fp = open(filename, 'w')
    fp.write("#DIR\t\t\tRANK\tFAR\t\tSNR\tIFOS\tINSTRUMENTS\tTIME\t\tMASS\n")
    fp.write("%-16s\t%d\t%0.2e\t%.2f\t%s\t%s\t\t%.3f\t%.2f\n" %
             (dir, cnt, 0, 0, ifos, instruments, float(time), 0))
    fp.write("#DIR\t\t\tIFO\tTIME\t\tSNR\tCHISQ\tMASS1\tMASS2\n")
    rowString = "%-16s\t%s\t%.3f\t%.2f\t%.2f\t%.2f\t%.2f\n"
    content = list()
    for ifo in tevent.ifos_list:
        content.append(
            rowString %
            (dir, ifo, float(time), float(0), float(0), float(0), float(0)))
    cache = lal.CacheEntry(instruments, "COINC_INFO_" + dir.upper(),
                           segments.segment(float(time), float(time)),
                           "file://localhost/" + os.path.abspath(filename))
    dag.output_cache.append(cache)
    fp.writelines(content)
    fp.close()
    return os.path.split(filename)[1]
예제 #44
0
def segmentlist_range(start, stop, period):
    """
	Analogous to Python's range() builtin, this generator yields a
	sequence of continuous adjacent segments each of length "period"
	with the first starting at "start" and the last ending not after
	"stop".  Note that the segments generated do not form a coalesced
	list (they are not disjoint).  start, stop, and period can be any
	objects which support basic arithmetic operations.

	Example:

	>>> from glue.segments import *
	>>> segmentlist(segmentlist_range(0, 15, 5))
	[segment(0, 5), segment(5, 10), segment(10, 15)]
	>>> segmentlist(segmentlist_range('', 'xxx', 'x'))
	[segment('', 'x'), segment('x', 'xx'), segment('xx', 'xxx')]
	"""
    n = 1
    b = start
    while True:
        a, b = b, start + n * period
        if b > stop:
            break
        yield segments.segment(a, b)
        n += 1
예제 #45
0
	def get_output(self):
		if self._AnalysisNode__output is None:
			if None in (self.get_start(), self.get_end(), self.get_ifo(), self.__usertag):
				raise ValueError, "start time, end time, ifo, or user tag has not been set"
			seg = segments.segment(lal.LIGOTimeGPS(self.get_start()), lal.LIGOTimeGPS(self.get_end()))
			self.set_output(os.path.join(self.output_dir, "%s-POWER_%s-%d-%d.xml.gz" % (self.get_ifo(), self.__usertag, int(self.get_start()), int(self.get_end()) - int(self.get_start()))))
		return self._AnalysisNode__output
예제 #46
0
def read_segfile_xml(segfile, verbose):
    """
  Read segment file in ligolw xml type and return in glue.segments.segmentlist
  format.
  """
    from glue.ligolw import ligolw, utils, lsctables, table
    lsctables.use_in(ligolw.LIGOLWContentHandler)

    def ContentHandler(xmldoc):
        return ligolw.LIGOLWContentHandler(xmldoc, lambda name, attrs:\
                   (name == ligolw.Table.tagName) and\
                   (table.StripTableName(attrs["Name"]) in ["segment"]))

    utils.ContentHandler = ContentHandler

    xmldoc = utils.load_url(segfile,
                            verbose=verbose,
                            gz=segfile.endswith(".gz"),
                            contenthandler=ligolw.LIGOLWContentHandler)
    seg_list = segmentlist()
    rows = table.get_table(xmldoc, lsctables.VetoDefTable.tableName)
    for row in rows:
        seg_list.append(segment(row.start_time, row.end_time))
    xmldoc.unlink()
    return seg_list
    def test_tofromseqwizard(self):
        """
		Check that the segwizard writing routine's output is parsed
		correctly.
		"""
        data = StringIO.StringIO()
        correct = segments.segmentlist([
            segments.segment(10, 100),
            segments.segment(110, 120),
            segments.segment(125, 130),
            segments.segment(0, 200)
        ])
        segmentsUtils.tosegwizard(data, correct)
        data.seek(0)
        self.assertEqual(correct, segmentsUtils.fromsegwizard(data,
                                                              strict=True))
예제 #48
0
def tosegmentxml(file, segs):

  """
    Write the glue.segments.segmentlist object segs to file object file in xml
    format with appropriate tables.
  """

  # generate empty document
  xmldoc = ligolw.Document()
  xmldoc.appendChild(ligolw.LIGO_LW())
  xmldoc.childNodes[-1].appendChild(lsctables.New(lsctables.ProcessTable))
  xmldoc.childNodes[-1].appendChild(lsctables.New(lsctables.ProcessParamsTable))

  # append process to table
  process = ligolw_process.append_process(xmldoc,\
                                  program='pylal.dq.dqSegmentUtils',\
                                  version=__version__,\
                                  cvs_repository='lscsoft',\
                                  cvs_entry_time=__date__)

  gpssegs = segments.segmentlist()
  for seg in segs:
    gpssegs.append(segments.segment(LIGOTimeGPS(seg[0]), LIGOTimeGPS(seg[1])))

  # append segs and seg definer
  segments_tables = ligolw_segments.LigolwSegments(xmldoc)
  segments_tables.add(ligolw_segments.LigolwSegmentList(active=gpssegs))
  # finalise
  segments_tables.coalesce()
  segments_tables.optimize()
  segments_tables.finalize(process)
  ligolw_process.set_process_end_time(process)

  # write file
  utils.write_fileobj(xmldoc, file, gz=False)
예제 #49
0
    def fetch(self, channel, start, end):
        """
        Retrieve data, caching file locations and the files themselves.
        """
        seg = segment(start, end)

        if not self._query(channel, start, end):
            raise ValueError("%s not found in cache" % repr(segmentlist([seg]) - self._remotecoverage))

        # Need to cache files locally
        # Note: seg *will* be in self._cachecoverage if self.scratchdir is None.
        if seg not in self._cachecoverage:
            for f,s in zip(self._remotefiles, self._remotesegs):
                if seg.intersects(s) and s not in self._cachecoverage:
                    dest = os.path.join(self._scratchdir, os.path.split(f)[-1])
                    if self._verbose:
                        print "Copying %s -->\n          %s." % (f, dest)
                    shutil.copy(f, dest)
                    ind = bisect_right(self._cachedsegs, s)
                    self._cachedfiles.insert(ind, dest)
                    self._cachedsegs.insert(ind, s)
                    self._cachecoverage |= segmentlist([s])
            assert seg in self._cachecoverage

        # Finally, return the cached data
        return self._fetch(channel, start, end)
예제 #50
0
 def matches(row):
     return (
         row[0].strip() == ifo
         and row[1] == name
         and int(row[2]) == int(version)
         and search_span.intersects(segment(row[3] + start_pad, row[4] + start_pad))
     )
예제 #51
0
    def get_valid_times(self):
        overlap = int(self.get_opt('segment-duration')) / 4
        pad_data = int(self.get_opt('pad-data'))

        valid_start = self.data_seg[0] + pad_data + overlap
        valid_end = self.data_seg[1] - pad_data - overlap
        return self.data_seg, segments.segment(valid_start, valid_end)
예제 #52
0
def from_bitstream(bitstream, start, dt, minlen=1):
    """
	Convert consecutive True values in a bit stream (boolean-castable
	iterable) to a stream of segments. Require minlen consecutive True
	samples to comprise a segment.

	Example:

	>>> list(from_bitstream((True, True, False, True, False), 0, 1))
	[segment(0, 2), segment(3, 4)]
	>>> list(from_bitstream([[], [[]], [[]], [], []], 1013968613, 0.125))
	[segment(1013968613.125, 1013968613.375)]
	"""
    bitstream = iter(bitstream)
    i = 0
    while 1:
        if bitstream.next():
            # found start of True block; find the end
            j = i + 1
            try:
                while bitstream.next():
                    j += 1
            finally:  # make sure StopIteration doesn't kill final segment
                if j - i >= minlen:
                    yield segments.segment(start + i * dt, start + j * dt)
            i = j  # advance to end of block
        i += 1
예제 #53
0
def expand_version_number(engine, segdef):
    ifo, name, version, start_time, end_time, start_pad, end_pad = segdef

    if version != '*':
        return [segdef]

    # Start looking at the full interval
    intervals = segmentlist([segment(start_time, end_time)])

    # Find the maximum version number
    sql  = "SELECT max(version) FROM segment_definer "
    sql += "WHERE  segment_definer.ifos = '%s' " % ifo
    sql += "AND   segment_definer.name = '%s' " % name

    rows    = engine.query(sql)
    try:
        version = len(rows[0]) and rows[0][0] or 1
    except:
        version = None

    results = []

    while version > 0:
        for interval in intervals:
            segs = query_segments(engine, 'segment_summary', [(ifo, name, version, interval[0], interval[1], 0, 0)])

            for seg in segs[0]:
                results.append( (ifo, name, version, seg[0], seg[1], 0, 0) )

        intervals.coalesce()
        intervals -= segs[0]

        version -= 1

    return results
예제 #54
0
    def get_valid_times(self):
        overlap = int(self.get_opt('segment-duration')) / 4
        pad_data = int(self.get_opt('pad-data'))

        valid_start = self.data_seg[0] + pad_data + overlap
        valid_end = self.data_seg[1] - pad_data - overlap
        return self.data_seg, segments.segment(valid_start, valid_end)
예제 #55
0
    def from_T050017(cls, url, coltype=LIGOTimeGPS):
        """
        Parse a URL in the style of T050017-00 into a CacheEntry.  The
        T050017-00 file name format is, essentially,

        observatory-description-start-duration.extension

        Example:

        >>> c = CacheEntry.from_T050017("file://localhost/data/node144/frames/S5/strain-L2/LLO/L-L1_RDS_C03_L2-8365/L-L1_RDS_C03_L2-836562330-83.gwf")
        >>> c.observatory
        'L'
        >>> c.host
        'localhost'
        >>> os.path.basename(c.path)
        'L-L1_RDS_C03_L2-836562330-83.gwf'
        """
        match = cls._url_regex.search(url)
        if not match:
            raise ValueError("could not convert %s to CacheEntry" % repr(url))
        observatory = match.group("obs")
        description = match.group("dsc")
        # FIXME:  remove typecasts when LIGOTimeGPS can be passed a unicode
        start = str(match.group("strt"))
        duration = str(match.group("dur"))
        if start == "-" and duration == "-":
            # no segment information
            segment = None
        else:
            segment = segments.segment(coltype(start),
                                       coltype(start) + coltype(duration))
        return cls(observatory, description, segment, url)
예제 #56
0
def new_plots(plots):
    deltat_seg = segments.segment(-0.3, +0.3)
    deltat_width = 0.03125
    l = [
        RateContours("H2", "H1"),
        ConfidenceContours("H2", "H1", magnitude_a, "Confidence", 1, 10**10),
        ConfidenceContours("H2", "L1", magnitude_a, "Confidence", 1, 10**10),
        ConfidenceContours("L1", "H1", magnitude_a, "Confidence", 1, 10**10),
        ConfidenceContours("H2", "H1", magnitude_b,
                           r"Power / D.o.F. / ($F_{+}^{2} + F_{\times}^{2}$)",
                           1, 10**10),
        ConfidenceContours("H2", "L1", magnitude_b,
                           r"Power / D.o.F. / ($F_{+}^{2} + F_{\times}^{2}$)",
                           1, 10**10),
        ConfidenceContours("L1", "H1", magnitude_b,
                           r"Power / D.o.F. / ($F_{+}^{2} + F_{\times}^{2}$)",
                           1, 10**10),
        ConfidenceContourProjection(
            numpy.array((-1 / math.sqrt(2), +1 / math.sqrt(2), 0), "Float64"),
            numpy.array(
                (-1 / math.sqrt(4), -1 / math.sqrt(4), +1 / math.sqrt(2)),
                "Float64"), magnitude_b, 10**5),
        RateVsConfidence("H1"),
        RateVsConfidence("H2"),
        RateVsConfidence("L1")
    ]
    return [l[i] for i in plots]