Ejemplo n.º 1
0
def find_cache_segments(*caches):
    """Return the segments covered by one or more data caches

    Parameters
    ----------
    *cache : `~glue.lal.Cache`
        one or more file caches

    Returns
    -------
    segments : `~gwpy.segments.SegmentList`
        list of segments containing in cache
    """
    out = SegmentList()
    nframes = sum(len(c) for c in caches)
    if nframes == 0:
        return out
    for cache in caches:
        # build segment for this cache
        if not len(cache):
            continue
        seg = cache[0].segment
        for e in cache:
            # if new segment doesn't overlap, append and start again
            if e.segment.disjoint(seg):
                out.append(seg)
                seg = e.segment
            # otherwise, append to current segment
            else:
                seg |= e.segment
    # append final segment and return
    out.append(seg)
    return out
Ejemplo n.º 2
0
 def test_properties(self):
     empty = DataQualityFlag()
     flag = DataQualityFlag(FLAG1,
                            active=ACTIVE,
                            known=KNOWN,
                            padding=(-4, 8))
     # name
     self.assertEqual(empty.name, None)
     self.assertEqual(flag.name, FLAG1)
     self.assertEqual(flag.ifo, FLAG1.split(':')[0])
     self.assertEqual(flag.version, int(FLAG1.split(':')[-1]))
     # known
     self.assertIsInstance(empty.known, SegmentList)
     self.assertListEqual(empty.known, SegmentList())
     self.assertListEqual(flag.known, KNOWN)
     # active
     self.assertIsInstance(empty.active, SegmentList)
     self.assertListEqual(empty.active, SegmentList())
     self.assertListEqual(flag.active, ACTIVE)
     # padding
     self.assertTupleEqual(empty.padding, (0, 0))
     self.assertTupleEqual(flag.padding, (-4, 8))
     # texname
     self.assertEqual(flag.texname, FLAG1.replace('_', r'\_'))
     self.assertEqual(empty.texname, None)
     # livetime
     self.assertEqual(flag.livetime, 4)
Ejemplo n.º 3
0
    def test_query_dqsegdb(self, name, flag):
        result = query_dqsegdb(self.TEST_CLASS.query_dqsegdb, name, 0, 10)
        RESULT = QUERY_RESULTC[flag]

        assert isinstance(result, self.TEST_CLASS)
        utils.assert_segmentlist_equal(result.known, RESULT.known)
        utils.assert_segmentlist_equal(result.active, RESULT.active)

        result2 = query_dqsegdb(self.TEST_CLASS.query_dqsegdb, name, (0, 10))
        utils.assert_flag_equal(result, result2)

        result2 = query_dqsegdb(self.TEST_CLASS.query_dqsegdb, name,
                                SegmentList([(0, 10)]))
        utils.assert_flag_equal(result, result2)

        with pytest.raises(ValueError):
            query_dqsegdb(self.TEST_CLASS.query_dqsegdb, 'BAD-FLAG_NAME',
                          SegmentList([(0, 10)]))

        with pytest.raises(HTTPError) as exc:
            query_dqsegdb(self.TEST_CLASS.query_dqsegdb, 'X1:GWPY-TEST:0', 0,
                          10)
        assert str(exc.value) == 'HTTP Error 404: Not found [X1:GWPY-TEST:0]'

        with pytest.raises(ValueError):
            self.TEST_CLASS.query_dqsegdb(QUERY_FLAGS[0], 1, 2, 3)
        with pytest.raises(ValueError):
            self.TEST_CLASS.query_dqsegdb(QUERY_FLAGS[0], (1, 2, 3))
Ejemplo n.º 4
0
    def combined_time_volume(self, allsegments, allranges):
        try:
            combined_range = TimeSeries(numpy.zeros(allranges[0].size),
                                        xindex=allranges[0].times,
                                        unit='Mpc')
        except IndexError:
            combined_range = TimeSeries(numpy.zeros(allranges[0].size),
                                        unit='Mpc',
                                        x0=allranges[0].x0,
                                        dx=allranges[0].dx)

        # get coincident observing segments
        pairs = list(combinations(allsegments, 2))
        coincident = SegmentList()
        for pair in pairs:
            coincident.extend(pair[0] & pair[1])
        coincident = coincident.coalesce()

        # get effective network range
        values = [r.value for r in allranges]
        values = [min(nlargest(2, x)) for x in zip(*values)]
        size = min([r.size for r in allranges])
        combined_range[:size] = values * combined_range.unit

        # compute time-volume
        return self.calculate_time_volume(coincident, combined_range)
Ejemplo n.º 5
0
Archivo: all.py Proyecto: tjma12/gwsumm
def generate_all_state(start, end, register=True, **kwargs):
    """Build a new `SummaryState` for the given [start, end) interval.

    Parameters
    ----------
    start : `~gwpy.time.LIGOTimeGPS`, float
        the GPS start time of the current analysis
    end : `~gwpy.time.LIGOTimeGPS`, float
        the GPS end time of the current analysis
    register : `bool`, optional
        should the new `SummaryState` be registered, default `True`
    **kwargs
        other keyword arguments passed to the `SummaryState` constructor

    Returns
    -------
    allstate : `SummaryState`
        the newly created 'All' `SummaryState`
    """
    now = min(end, NOW)
    all_ = SummaryState(ALLSTATE,
                        known=SegmentList([Segment(start, end)]),
                        active=SegmentList([Segment(start, now)]),
                        **kwargs)
    all_.ready = True
    if register:
        register_state(all_)
    return all_
Ejemplo n.º 6
0
def segments_from_array(array):
    """Convert a 2-dimensional `numpy.ndarray` to a `SegmentList`
    """
    out = SegmentList()
    for row in array:
        out.append(Segment(*row))
    return out
Ejemplo n.º 7
0
def divide_segmentlist(start,end,bins=4096,write=True,**kwargs):
    ''' Divide given period to segmenlist
    
    Parameters
    ----------
    start : `int`
        GPS start time of given period
    end : `int`
        GPS end time of given period
    bins : `int`, optional
        The number of bins. Unit is second. Default value is 4096 =(2**12).

    Returns
    -------
    segmentlist : `gwpy.segment.SegmentList`
        Divided segmentlist
    '''
    if ((end-start) % bins) != 0:
        raise ValueError('Not divisible!')

    _start = range(start     ,end     ,bins)
    _end   = range(start+bins,end+bins,bins)
    segmentlist = SegmentList([Segment(s,e) for s,e in zip(_start,_end)])
    log.debug(segmentlist[0])
    log.debug(segmentlist[-1])
    if write:
        segmentlist.write('./segmentlist/total.txt')
    return segmentlist
Ejemplo n.º 8
0
 def __init__(self,
              name,
              known=SegmentList(),
              active=SegmentList(),
              description=None,
              definition=None,
              hours=None,
              key=None,
              filename=None,
              url=None):
     """Initialise a new `SummaryState`
     """
     # allow users to specify known as (start, end)
     if (isinstance(known, Segment)
             or (isinstance(known, tuple) and len(known) == 2
                 and not isinstance(known[0], tuple))):
         known = [known]
     super(SummaryState, self).__init__(name=name,
                                        known=known,
                                        active=active)
     self.description = description
     if definition:
         self.definition = re.sub('(\s|\n)', '', definition)
     else:
         self.definition = None
     self.key = key
     self.hours = hours
     self.url = url
     if known and active:
         self.ready = True
     else:
         self.ready = False
     self.filename = filename
Ejemplo n.º 9
0
    def __init__(self, parent=None, tssb=None, conc=0.1):
        super(DataNode, self).__init__(parent=parent, tssb=tssb)

        # pi is a first-class citizen
        self.pi = 0.0
        self.param = 0.0
        self.param1 = 0.0
        self.pi1 = 0.0  # used in MH	to store old state

        self.path = None  # set of nodes from root to this node
        self.ht = 0.0

        if parent is None:
            self._conc = conc
            self.pi = 1.0
            self.param = 1.0

        else:
            self.pi = rand(1) * parent.pi
            parent.pi = parent.pi - self.pi
            self.param = self.pi

        # 此处初始化应该设置为SegmentList
        self.varphiR = SegmentList([Segment(0, 1)])
        self.piR = SegmentList([Segment(0, 1)])
        self.epsilon = ""
        # tssb node
        self.tNode = None
Ejemplo n.º 10
0
 def fetch(self,
           config=GWSummConfigParser(),
           segdb_error='raise',
           datafind_error='raise',
           **kwargs):
     """Finalise this state by fetching its defining segments,
     either from global memory, or from the segment database
     """
     # check we haven't done this before
     if self.ready:
         return self
     # fetch data
     if self.definition:
         match = re.search('(%s)' % '|'.join(MATHOPS.keys()),
                           self.definition)
     else:
         match = None
     if self.filename:
         self._read_segments(self.filename)
     elif match:
         channel, thresh = self.definition.split(match.groups()[0])
         channel = channel.rstrip()
         thresh = float(thresh.strip())
         self._fetch_data(channel,
                          thresh,
                          match.groups()[0],
                          config=config,
                          datafind_error=datafind_error,
                          **kwargs)
     # fetch segments
     elif self.definition:
         self._fetch_segments(config=config,
                              segdb_error=segdb_error,
                              **kwargs)
     # fetch null
     else:
         start = config.getfloat(DEFAULTSECT, 'gps-start-time')
         end = config.getfloat(DEFAULTSECT, 'gps-end-time')
         self.known = [(start, end)]
         self.active = self.known
     # restrict to given hours
     if self.hours:
         segs_ = SegmentList()
         # get start day
         d = Time(float(self.start), format='gps', scale='utc').datetime
         d.replace(hour=0, minute=0, second=0, microsecond=0)
         end_ = Time(float(self.end), format='gps', scale='utc').datetime
         while d < end_:
             # get GPS of day
             t = to_gps(d)
             # for each [start, end) hour pair, build a segment
             for h0, h1 in self.hours:
                 segs_.append(Segment(t + h0 * 3600, t + h1 * 3600))
             # increment and return
             d += datetime.timedelta(1)
         self.known &= segs_
         self.active &= segs_
     # FIXME
     self.ready = True
     return self
Ejemplo n.º 11
0
def segments_from_array(array):
    """Convert a 2-dimensional `numpy.ndarray` to a `SegmentList`
    """
    out = SegmentList()
    for row in array:
        out.append(Segment(*row))
    return out
Ejemplo n.º 12
0
def segmentlist_from_tree(tree, coalesce=False):
    """Read a `~ligo.segments.segmentlist` from a 'segments' `ROOT.Tree`
    """
    segs = SegmentList()
    for i in range(tree.GetEntries()):
        tree.GetEntry(i)
        segs.append(Segment(tree.start, tree.end))
    return segs
Ejemplo n.º 13
0
def segmentlist_from_tree(tree, coalesce=False):
    """Read a `~ligo.segments.segmentlist` from a 'segments' `ROOT.Tree`
    """
    segs = SegmentList()
    for i in range(tree.GetEntries()):
        tree.GetEntry(i)
        segs.append(Segment(tree.start, tree.end))
    return segs
Ejemplo n.º 14
0
 def make_cache():
     segs = SegmentList()
     cache = Cache()
     for seg in [(0, 1), (1, 2), (4, 5)]:
         d = seg[1] - seg[0]
         f = 'A-B-%d-%d.tmp' % (seg[0], d)
         cache.append(CacheEntry.from_T050017(f))
         segs.append(Segment(*seg))
     return cache, segs
Ejemplo n.º 15
0
 def make_cache():
     segs = SegmentList()
     cache = Cache()
     for seg in [(0, 1), (1, 2), (4, 5)]:
         d = seg[1] - seg[0]
         f = 'A-B-%d-%d.tmp' % (seg[0], d)
         cache.append(CacheEntry.from_T050017(f))
         segs.append(Segment(*seg))
     return cache, segs
Ejemplo n.º 16
0
def test_get_frame_segments(find):
    assert segments.get_frame_segments("X", "X1_R", 0, 100) == SegmentList(
        [Segment(0, 10), Segment(20, 30)])
    assert segments.get_frame_segments(
        "X",
        "X1_R",
        25,
        100,
    ) == SegmentList([Segment(25, 30)])
Ejemplo n.º 17
0
 def _get_data_segments(self, channel):
     """Get data segments for this plot
     """
     if self.state and not self.all_data:
         return self.state.active
     if channel.sample_rate is not None:
         return SegmentList(
             [self.span.protract(1 / channel.sample_rate.value)])
     return SegmentList([self.span])
Ejemplo n.º 18
0
def locked():
    lockstate = TimeSeries.fetch('K1:GRD-LSC_LOCK_OK',start,end,host='10.68.10.121',port=8088,pad=np.nan)
    fs = (1./lockstate.dt).value
    locked = (lockstate == 1.0*u.V).to_dqflag(round=False,minlen=2**10*fs) # *1
    ok = locked.active
    ok = SegmentList(sorted(ok,key=lambda x:x.end-x.start,reverse=True)) # *2
    myprint(ok)
    ok.write('segments_locked.txt')
    print('Finished segments_locked.txt')
Ejemplo n.º 19
0
 def make_cache():
     segs = SegmentList()
     cache = Cache()
     for seg in [(0, 1), (1, 2), (4, 5)]:
         d = seg[1] - seg[0]
         _, f = tempfile.mkstemp(prefix='A-',
                                 suffix='-%d-%d.tmp' % (seg[0], d))
         cache.append(CacheEntry.from_T050017(f))
         segs.append(Segment(*seg))
     return cache, segs
Ejemplo n.º 20
0
 def make_cache():
     segs = SegmentList()
     cache = Cache()
     for seg in [(0, 1), (1, 2), (4, 5)]:
         d = seg[1] - seg[0]
         _, f = tempfile.mkstemp(prefix='A-',
                                 suffix='-%d-%d.tmp' % (seg[0], d))
         cache.append(CacheEntry.from_T050017(f))
         segs.append(Segment(*seg))
     return cache, segs
Ejemplo n.º 21
0
def grab_time_triggers(glob_wildcard):
    time_segs = SegmentList([])
    start_time_utc = tconvert(args.gps_start_time)
    for filename in glob.glob(glob_wildcard):
        data = SegmentList.read(filename)
        print 'grabbing trigger file:' + filename
        time_segs += data
        # print time_segs
        start_time_utc += datetime.timedelta(days=1)
    return time_segs
Ejemplo n.º 22
0
def get_state_segments(channel, frametype, start, end, bits=[0], nproc=1,
                       pad=(0, 0)):
    """Read state segments from a state-vector channel in the frames
    """
    ifo = channel[:2]
    pstart = start - pad[0]
    pend = end + pad[1]

    # find frame cache
    cache = data.find_frames(ifo, frametype, pstart, pend)

    # optimise I/O based on type and library
    io_kw = {}
    try:
        from LDAStools import frameCPP  # noqa: F401
    except ImportError:
        pass
    else:
        io_kw['format'] = 'gwf.framecpp'
        if RAW_TYPE_REGEX.match(frametype):
            io_kw['type'] = 'adc'
        elif channel.endswith('GDS-CALIB_STATE_VECTOR'):
            io_kw['type'] = 'proc'

    bits = list(map(str, bits))
    # FIXME: need to read from cache with single segment but doesn't match
    # [start, end)

    # Virgo drops the state vector regularly, so need to sieve the files
    if channel == "V1:DQ_ANALYSIS_STATE_VECTOR":
        span = gwf_data_segments(cache, channel)
    else:
        span = SegmentList([Segment(pstart, pend)])

    # read data segments
    segs = SegmentList()
    try:
        csegs = cache_segments(cache)
    except KeyError:
        return segs
    for seg in csegs & span:
        sv = StateVector.read(cache, channel, nproc=nproc, start=seg[0],
                              end=seg[1], bits=bits, gap='pad', pad=0,
                              **io_kw).astype('uint32')
        segs += sv.to_dqflags().intersection().active

    # truncate to integers, and apply padding
    for i, seg in enumerate(segs):
        segs[i] = type(seg)(int(ceil(seg[0])) + pad[0],
                            int(floor(seg[1])) - pad[1])
    segs.coalesce()

    return segs.coalesce()
Ejemplo n.º 23
0
def diff(seglist,nodata):
    new = SegmentList()
    for segment in seglist:
        flag = 0
        for _nodata in nodata:
            if segment != _nodata:
                flag += 1
            else:
                break
        if flag==len(nodata):
            new.append(segment)
    return new
Ejemplo n.º 24
0
def plot_whitening(station, ts_list, start_time, end_time, seglist=None):
    """
    Generate a spectrogram plot and normalized spectrogram
    norm: \sqrt{S(f,t)} / \sqrt{\overbar{S(f)}}
    """
    stride, fftlength, overlap = 20, 6, 3
    plot = SpectrogramPlot()
    ax = plot.gca()
    white_plot = SpectrogramPlot()
    wax = white_plot.gca()
    for ts in ts_list:
        if (len(ts) * ts.dt).value < stride:
            continue
        spec = ts.spectrogram(stride, fftlength=fftlength, overlap=overlap)
        ax.plot(spec, cmap='jet', norm=matplotlib.colors.LogNorm())
        wspec = spec.ratio('median')
        wax.plot(wspec,
                 vmin=0.1,
                 vmax=100,
                 cmap='jet',
                 norm=matplotlib.colors.LogNorm())
    ax.set_title('$\mathrm{' + station + '}$')
    ax.set_ylim(0.1, ts.sample_rate.value / 2.)
    ax.set_yscale('log')
    wax.set_title('$\mathrm{' + station + '}$')
    wax.set_ylim(0.1, ts.sample_rate.value / 2.)
    wax.set_yscale('log')
    plot.add_colorbar(label='Amplitude')
    white_plot.add_colorbar(label='Amplitude')
    if seglist != None:
        plot.add_state_segments(SegmentList(seglist[station].active),
                                plotargs={
                                    'label': 'data present',
                                    'facecolor': 'g',
                                    'edgecolor': 'k'
                                })
        white_plot.add_state_segments(SegmentList(seglist[station].active),
                                      plotargs={
                                          'label': 'data present',
                                          'facecolor': 'g',
                                          'edgecolor': 'k'
                                      })
    # Set limits
    plot.axes[0].set_epoch(start_time)
    plot.axes[2].set_epoch(start_time)
    #plot.axes[1].set_epoch(start_time)
    white_plot.axes[0].set_epoch(start_time)
    white_plot.axes[2].set_epoch(start_time)
    ax.set_xlim(start_time, end_time)
    wax.set_xlim(start_time, end_time)
    # Save figures
    plot.savefig("spectrogram.png", dpi=300)
    white_plot.savefig("whitened.png", dpi=300)
Ejemplo n.º 25
0
 def fetch(self, config=GWSummConfigParser(), segdb_error='raise',
           datafind_error='raise', **kwargs):
     """Finalise this state by fetching its defining segments,
     either from global memory, or from the segment database
     """
     # check we haven't done this before
     if self.ready:
         return self
     # fetch data
     if self.definition:
         match = re.search('(%s)' % '|'.join(MATHOPS.keys()),
                           self.definition)
     else:
         match = None
     if self.filename:
         self._read_segments(self.filename)
     elif match:
         channel, thresh = self.definition.split(match.groups()[0])
         channel = channel.rstrip()
         thresh = float(thresh.strip())
         self._fetch_data(channel, thresh, match.groups()[0], config=config,
                          datafind_error=datafind_error, **kwargs)
     # fetch segments
     elif self.definition:
         self._fetch_segments(config=config, segdb_error=segdb_error,
                              **kwargs)
     # fetch null
     else:
         start = config.getfloat(DEFAULTSECT, 'gps-start-time')
         end = config.getfloat(DEFAULTSECT, 'gps-end-time')
         self.known = [(start, end)]
         self.active = self.known
     # restrict to given hours
     if self.hours:
         segs_ = SegmentList()
         # get start day
         d = Time(float(self.start), format='gps', scale='utc').datetime
         d.replace(hour=0, minute=0, second=0, microsecond=0)
         end_ = Time(float(self.end), format='gps', scale='utc').datetime
         while d < end_:
             # get GPS of day
             t = to_gps(d)
             # for each [start, end) hour pair, build a segment
             for h0, h1 in self.hours:
                 segs_.append(Segment(t + h0 * 3600, t + h1*3600))
             # increment and return
             d += datetime.timedelta(1)
         self.known &= segs_
         self.active &= segs_
     # FIXME
     self.ready = True
     return self
Ejemplo n.º 26
0
def get_guardian_segments(node, frametype, start, end, nproc=1, pad=(0, 0),
                          strict=False):
    """Determine state segments for a given guardian node
    """
    ifo, node = node.split(':', 1)
    if node.startswith('GRD-'):
        node = node[4:]
    pstart = start - pad[0]
    pend = end + pad[1]

    # find frame cache
    cache = data.find_frames(ifo, frametype, pstart, pend)

    # pre-format data segments
    span = SegmentList([Segment(pstart, pend)])
    segs = SegmentList()
    csegs = cache_segments(cache)
    if not csegs:
        return csegs

    # read data
    stub = "{}:GRD-{}".format(ifo, node)
    if strict:
        channels = ["{}_OK".format(stub)]
    else:
        state = "{}_STATE_N".format(stub)
        nominal = "{}_NOMINAL_N".format(stub)
        active = "{}_ACTIVE".format(stub)
        channels = [state, nominal, active]
    for seg in csegs & span:
        if strict:
            sv = StateVector.read(
                cache, channels[0], nproc=nproc, start=seg[0], end=seg[1],
                bits=[0], gap='pad', pad=0,).astype('uint32')
            segs += sv.to_dqflags().intersection().active
        else:
            gdata = TimeSeriesDict.read(
                cache, channels, nproc=nproc, start=seg[0], end=seg[1],
                gap='pad', pad=0)
            ok = ((gdata[state].value == gdata[nominal].value) &
                  (gdata[active].value == 1)).view(StateTimeSeries)
            ok.t0 = gdata[state].t0
            ok.dt = gdata[state].dt
            segs += ok.to_dqflag().active

    # truncate to integers, and apply padding
    for i, seg in enumerate(segs):
        segs[i] = type(seg)(int(ceil(seg[0])) + pad[0],
                            int(floor(seg[1])) - pad[1])
    segs.coalesce()

    return segs.coalesce()
Ejemplo n.º 27
0
def get_spectrogram(channel, segments, config=ConfigParser(), cache=None,
                    query=True, nds='guess', format='power', return_=True,
                    frametype=None, multiprocess=True, datafind_error='raise',
                    **fftparams):
    """Retrieve the time-series and generate a spectrogram of the given
    channel
    """
    channel = get_channel(channel)

    # read data for all sub-channels
    specs = []
    channels = re_channel.findall(channel.ndsname)
    for c in channels:
        specs.append(_get_spectrogram(c, segments, config=config, cache=cache,
                                      query=query, nds=nds, format=format,
                                      return_=return_, frametype=frametype,
                                      multiprocess=multiprocess,
                                      datafind_error=datafind_error,
                                      **fftparams))
    if return_ and len(channels) == 1:
        return specs[0]
    elif return_:
        # get union of segments for all sub-channels
        datasegs = reduce(operator.and_, [sgl.segments for sgl in specs])
        # build meta-spectrogram for all interseceted segments
        out = SpectrogramList()
        operators = [channel.name[m.span()[1]] for m in
                     list(re_channel.finditer(channel.ndsname))[:-1]]
        for seg in datasegs:
            sg = _get_spectrogram(channels[0], SegmentList([seg]),
                                  config=config, query=False, format=format,
                                  return_=True)[0]
            sg.name = str(channel)
            for op, ch in zip(operators, channels[1:]):
                try:
                    op = OPERATOR[op]
                except KeyError as e:
                    e.args = ('Cannot parse math operator %r' % op,)
                    raise
                data = _get_spectrogram(ch, SegmentList([seg]),
                                        config=config, query=False,
                                        format=format, return_=True)
                try:
                    sg = op(sg, data[0])
                except ValueError as e:
                    if 'could not be broadcast together' in str(e):
                        s = min(sg.shape[0], data[0].shape[0])
                        sg = op(sg[:s], data[0][:s])
                    else:
                        raise
            out.append(sg)
        return out
Ejemplo n.º 28
0
def grab_time_triggers(wildcard, start, end):
    """Retrieve triggers from a given GPS time range
    """
    time_segs = SegmentList([])
    start_time_utc = tconvert(start)
    for filename in glob.glob(wildcard):
        data = SegmentList.read(filename)
        LOGGER.info(' '.join(['grabbing trigger file:', filename]))
        start_end_seg = Segment(start, end)
        c = data & SegmentList([start_end_seg])
        time_segs += c
        start_time_utc += datetime.timedelta(days=1)
    return time_segs
Ejemplo n.º 29
0
 def test_get_triggers(self):
     # test that trigfind raises a warning if the channel-level directory
     # doesn't exist
     with pytest.warns(UserWarning):
         out = triggers.get_triggers('X1:DOES_NOT_EXIST', 'omicron',
                                     SegmentList([Segment(0, 100)]))
     # check output type and columns
     self.assertIsInstance(out, numpy.ndarray)
     for col in ['time', 'frequency', 'snr']:
         self.assertIn(col, out.dtype.fields)
     # test that unknown ETG raises KeyError
     self.assertRaises(KeyError, triggers.get_triggers,
                       'X1:DOES_NOT_EXIST', 'fake-etg',
                       SegmentList([Segment(0, 100)]))
Ejemplo n.º 30
0
    def make_cache():
        try:
            from lal.utils import CacheEntry
        except ImportError as e:
            pytest.skip(str(e))

        segs = SegmentList()
        cache = []
        for seg in [(0, 1), (1, 2), (4, 5)]:
            d = seg[1] - seg[0]
            f = 'A-B-%d-%d.tmp' % (seg[0], d)
            cache.append(CacheEntry.from_T050017(f, coltype=int))
            segs.append(Segment(*seg))
        return cache, segs
Ejemplo n.º 31
0
    def make_cache():
        try:
            from lal.utils import CacheEntry
        except ImportError as e:
            pytest.skip(str(e))

        segs = SegmentList()
        cache = Cache()
        for seg in [(0, 1), (1, 2), (4, 5)]:
            d = seg[1] - seg[0]
            f = 'A-B-%d-%d.tmp' % (seg[0], d)
            cache.append(CacheEntry.from_T050017(f, coltype=int))
            segs.append(Segment(*seg))
        return cache, segs
Ejemplo n.º 32
0
def diff(segmentlist, nodata):
    '''
    '''
    from gwpy.segments import SegmentList
    new = SegmentList()
    for segment in segmentlist:
        flag = 0
        for _nodata in nodata:
            if segment != _nodata:
                flag += 1
            else:
                break
        if flag == len(nodata):
            new.append(segment)
    return new
Ejemplo n.º 33
0
 def test_write_segments_ascii(self):
     for ncol in [2, 4]:
         with NamedTemporaryFile(suffix='.txt', delete=False) as f:
             segments.write_ascii(f.name, TEST_SEGMENTS, ncol=ncol)
             f.delete = True
             a = SegmentList.read(f.name, gpstype=float, strict=False)
             self.assertEqual(a, TEST_SEGMENTS_2)
Ejemplo n.º 34
0
def read_cache(cache, segments, etg, nproc=1, timecolumn=None, **kwargs):
    """Read a table of events from a cache

    This function is mainly meant for use from the `get_triggers` method

    Parameters
    ----------
    cache : :class:`glue.lal.Cache`
        the formatted list of files to read
    segments : `~gwpy.segments.SegmentList`
        the list of segments to read
    etg : `str`
        the name of the trigger generator that created the files
    nproc : `int`, optional
        the number of parallel processes to use when reading
    **kwargs
        other keyword arguments are passed to the `EventTable.read` or
        `{tableclass}.read` methods

    Returns
    -------
    table : `~gwpy.table.EventTable`, `None`
        a table of events, or `None` if the cache has no overlap with
        the segments
    """
    if isinstance(cache, Cache):
        cache = cache.sieve(segmentlist=segments)
        cache = cache.checkfilesexist()[0]
        cache.sort(key=lambda x: x.segment[0])
        cache = cache.pfnlist()  # some readers only like filenames
    else:
        cache = [urlparse(url).path for url in cache]
    if etg == 'pycbc_live':  # remove empty HDF5 files
        cache = filter_pycbc_live_files(cache, ifo=kwargs['ifo'])

    if len(cache) == 0:
        return

    # read triggers
    table = EventTable.read(cache, **kwargs)

    # store read keywords in the meta table
    if timecolumn:
        table.meta['timecolumn'] = timecolumn

    # get back from cache entry
    if isinstance(cache, CacheEntry):
        cache = Cache([cache])

    # append new events to existing table
    try:
        csegs = cache_segments(cache) & segments
    except (AttributeError, TypeError, ValueError):
        csegs = SegmentList()
    table.meta['segments'] = csegs

    if timecolumn:  # already filtered on-the-fly
        return table
    # filter now
    return keep_in_segments(table, segments, etg)
Ejemplo n.º 35
0
def add_triggers(table, key, segments=None):
    """Add a `EventTable` to the global memory cache
    """
    if segments is not None:
        table.meta['segments'] = segments
    try:
        old = globalv.TRIGGERS[key]
    except KeyError:
        new = globalv.TRIGGERS[key] = table
        new.meta.setdefault('segments', SegmentList())
    else:
        new = globalv.TRIGGERS[key] = vstack_tables((old, table))
        new.meta = old.meta
        new.meta['segments'] |= table.meta.get('segments', SegmentList())
    new.meta['segments'].coalesce()
    return new
Ejemplo n.º 36
0
    def draw(self):
        """Read in all necessary data, and generate the figure.
        """
        plot = self.init_plot()
        ax = plot.gca()

        # work out labels
        labels = self.pargs.pop('labels', self.channels)
        if isinstance(labels, str):
            labels = labels.split(',')
        labels = [str(s).strip('\n ') for s in labels]

        # add data
        for label, channel in zip(labels, self.channels):
            label = usetex_tex(label)
            if self.state and not self.all_data:
                valid = self.state.active
            else:
                valid = SegmentList([self.span])
            data = get_timeseries(channel, valid, query=False)
            # handle no timeseries
            if not len(data):
                ax.plot([0], [0], visible=False, label=label)
                continue
            # plot time-series
            color = None
            for ts in data:
                # double-check log scales
                if self.logy:
                    ts.value[ts.value == 0] = 1e-100
                if color is None:
                    line = ax.plot(ts, label=label)[0]
                    color = line.get_color()
                else:
                    ax.plot(ts, color=color, label=None)

            # allow channel data to set parameters
            if hasattr(data[0].channel, 'amplitude_range'):
                self.pargs.setdefault('ylim',
                                      data[0].channel.amplitude_range)

        # add horizontal lines to add
        for yval in self.pargs['hline']:
            try:
                yval = float(yval)
            except ValueError:
                continue
            else:
                ax.plot([self.start, self.end], [yval, yval],
                        linestyle='--', color='red')

        # customise plot
        legendargs = self.parse_legend_kwargs()
        self.apply_parameters(ax, **self.pargs)
        if len(self.channels) > 1:
            ax.legend(**legendargs)

        # finalise
        self.add_state_segments(ax)
        return self.finalize()
Ejemplo n.º 37
0
    def test_add_state_segments(self):
        fig, ax = self.new()

        # mock up some segments and add them as 'state' segments
        segs = SegmentList([Segment(1, 2), Segment(4, 5)])
        segax = fig.add_state_segments(segs)

        # check that the new axes aligns with the parent
        utils.assert_array_equal(segax.get_position().intervalx,
                                 ax.get_position().intervalx)
        coll = segax.collections[0]
        for seg, path in zip(segs, coll.get_paths()):
            utils.assert_array_equal(path.vertices,
                                     [(seg[0], -.4), (seg[1], -.4),
                                      (seg[1], .4), (seg[0], .4),
                                      (seg[0], -.4)])

        with pytest.raises(ValueError):
            fig.add_state_segments(segs, location='left')

        # test that this doesn't work with non-timeseries axes
        fig = self.FIGURE_CLASS()
        ax = fig.gca(projection='rectilinear')
        with pytest.raises(ValueError) as exc:
            fig.add_state_segments(segs)
        assert str(exc.value) == ("No 'timeseries' Axes found, cannot anchor "
                                  "new segment Axes.")
Ejemplo n.º 38
0
def find_best_frames(ifo, frametype, start, end, **kwargs):
    """Find frames for the given type, replacing with a better type if needed
    """
    # find cache for this frametype
    cache = find_frames(ifo, frametype, start, end, **kwargs)

    # check for gaps in current cache
    span = SegmentList([Segment(start, end)])
    gaps = span - cache_segments(cache)

    # if gaps and using aggregated h(t), check short files
    if abs(gaps) and frametype in SHORT_HOFT_TYPES:
        f2 = SHORT_HOFT_TYPES[frametype]
        vprint("    Gaps discovered in aggregated h(t) type "
               "%s, checking %s\n" % (frametype, f2))
        kwargs['gaps'] = 'ignore'
        cache.extend(
            filter(lambda e: file_segment(e) in gaps,
                   find_frames(ifo, f2, start, end, **kwargs)))
        new = int(abs(gaps - cache_segments(cache)))
        if new:
            vprint("    %ss extra coverage with frametype %s\n" % (new, f2))
        else:
            vprint("    No extra coverage with frametype %s\n" % f2)

    return cache, frametype
Ejemplo n.º 39
0
    def draw(self):
        """Read in all necessary data and generate a figure
        """
        keys = []
        # generate data
        for i, channel in enumerate(self.channels):
            fftkwargs = dict((key, self.fftparams[key][i])
                             for key in self.fftparams
                             if self.fftparams[key][i] is not None)
            rangekwargs = dict((key, self.rangeparams[key][i])
                               for key in self.rangeparams
                               if self.rangeparams[key][i] is not None)
            if self.state and not self.all_data:
                valid = self.state.active
            else:
                valid = SegmentList([self.span])
            rlist = self.range_func(channel,
                                    valid,
                                    query=self.read,
                                    **fftkwargs,
                                    **rangekwargs)
            try:
                keys.append(str(rlist[0].channel))
            except IndexError:
                keys.append(get_range_channel(channel, **rangekwargs))

        # reset channel lists and generate plot
        channels = self.channels
        self.channels = keys
        out = super(RangePlotMixin, self).draw()
        self.channels = channels
        return out
Ejemplo n.º 40
0
    def draw(self):
        """Read in all necessary data, and generate the figure.
        """
        # generate data
        keys = []
        for i, channel in enumerate(self.channels):
            kwargs = dict((key, self.rangeparams[key][i])
                          for key in self.rangeparams
                          if self.rangeparams[key][i] is not None)
            if self.state and not self.all_data:
                valid = self.state.active
            else:
                valid = SegmentList([self.span])
            rlist = get_range(channel, valid, query=self.read, **kwargs)
            try:
                keys.append(rlist[0].channel)
            except IndexError:
                keys.append(get_range_channel(channel, **kwargs))

        # reset channel lists and generate time-series plot
        channels = self.channels
        outputfile = self.outputfile
        self.channels = keys
        out = super(RangePlotMixin, self).draw(outputfile=outputfile)
        self.channels = channels
        return out
Ejemplo n.º 41
0
def grab_time_triggers(glob_wildcard):
    time_segs = SegmentList([])
    start_time_utc = tconvert(args.gps_start_time)
    for filename in glob.glob(glob_wildcard):
        data = SegmentList.read(filename)
        print 'grabbing trigger file:' + filename
        time_segs += data
    # print time_segs
        start_time_utc += datetime.timedelta(days=1)
    return time_segs
Ejemplo n.º 42
0
Archivo: range.py Proyecto: gwpy/gwsumm
    def combined_time_volume(self, allsegments, allranges):
        try:
            combined_range = TimeSeries(numpy.zeros(allranges[0].size),
                                        xindex=allranges[0].times, unit='Mpc')
        except IndexError:
            combined_range = TimeSeries(
                numpy.zeros(allranges[0].size), unit='Mpc',
                x0=allranges[0].x0, dx=allranges[0].dx)

        # get coincident observing segments
        pairs = list(combinations(allsegments, 2))
        coincident = SegmentList()
        for pair in pairs:
            coincident.extend(pair[0] & pair[1])
        coincident = coincident.coalesce()

        # get effective network range
        values = [r.value for r in allranges]
        values = [min(nlargest(2, x)) for x in zip(*values)]
        size = min([r.size for r in allranges])
        combined_range[:size] = values * combined_range.unit

        # compute time-volume
        return self.calculate_time_volume(coincident, combined_range)
Ejemplo n.º 43
0
def cache_overlaps(*caches):
    """Find segments of overlap in the given cache sets
    """
    cache = [e for c in caches for e in c]
    cache.sort(key=lambda e: file_segment(e)[0])
    overlap = SegmentList()
    segments = SegmentList()
    for e in cache:
        seg = file_segment(e)
        ol = SegmentList([seg]) & segments
        if abs(ol):
            overlap.extend(ol)
        segments.append(seg)
    return overlap
Ejemplo n.º 44
0
#"IMC-REFL_DC_OUT_DQ",
#"ALS-X_REFL_ERR_OUT_DQ",
#"PEM-CS_MAG_EBAY_SUSRACK_Y_DQ",
#"ASC-Y_TR_B_NSUM_OUT_DQ",
#"ASC-AS_B_RF45_Q_PIT_OUT_DQ",
#"SUS-OMC_M1_ISIWIT_T_DQ",
#"PSL-ISS_AOM_DRIVER_MON_OUT_DQ",
#"LSC-PRCL_OUT_DQ"]

ifo = sys.argv[1]
bbhdir = sys.argv[2]
bbhfile= glob.glob(os.path.join(bbhdir, ifo+'*.xml.gz'))[0]
omiccachedir = sys.argv[3]

# Read in the segment file
segments = SegmentList.read('/home/albert.wandui/detchar'+\
    '/ER7/jul13/%s_ER7_segments.txt' %ifo)

# Read in the BBH triggers
bbh_trigs = SnglInspiralTable.read(bbhfile)
# We only want the triggers in the given segments
bbh_trigs = bbh_trigs.vetoed(segments)
#bbh_trigs.sort(key=lambda x: x.end_time + x.end_time_ns * 1.0e-9)

# We need to extract the chirp mass and the end times for these triggers
end_times = np.array(bbh_trigs.getColumnByName('end_time')[:], dtype=float) +\
    np.array(bbh_trigs.getColumnByName('end_time_ns')[:], dtype=float) * 1.0e-9

m1 = np.array(bbh_trigs.getColumnByName('mass1')[:])
m2= np.array(bbh_trigs.getColumnByName('mass2')[:])

M = m1+m2
Ejemplo n.º 45
0
def load_segs():
  segs = SegmentList.read('L1_ER7_segments.txt')
  return segs
Ejemplo n.º 46
0
 def test_read_segwizard(self):
     active = SegmentList.read(SEGWIZ, coalesce=False)
     self.assertTrue(active == ACTIVE,
                     'SegmentList.read(segwizard) mismatch:\n\n%s\n\n%s'
                     % (ACTIVE, active))
Ejemplo n.º 47
0
def test_write_segments_ascii(ncol):
    with NamedTemporaryFile(suffix='.txt', delete=False) as tmp:
        segments.write_ascii(tmp.name, TEST_SEGMENTS, ncol=ncol)
        tmp.delete = True
        a = SegmentList.read(tmp.name, gpstype=float, strict=False)
        assert a == TEST_SEGMENTS_2
Ejemplo n.º 48
0
#! /usr/bin/env python

from __future__ import (division, print_function)

import sys

from gwpy.segments import SegmentList
from gwpy.table.lsctables import SnglInspiralTable

import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt

import numpy as np

bbh_file = sys.argv[1]

trigs = SnglInspiralTable.read(bbh_file)
segs = SegmentList.read('L1_ER7_segments.txt')
trigs = trigs.vetoed(segs)

plot = trigs.plot('time', 'snr', edgecolor='none')#, epoch=1117378816)
#plot.set_xlim(1117378816, 1117378816+(24*3600*11.0))
plot.set_ylabel('SNR')
plot.set_yscale('log', nonposy='clip')
plot.set_title('BBH triggers during the ER7 run')
plot.savefig('H1_BBH_SNR.png')


Ejemplo n.º 49
0
                                break
		f.close()

	elif args.online_offline == 'online':


		#TRIGGER HANDLING: begin for loop that loops over the range of all days/months/years
		f = open("total_hveto_trigs.txt", "w") #file that will hold collection of all triggers
	
		#create pattern paths for the trigger segment files to loop over
		#NOTE TO SELF: create option to specify which trigger files to loop over. default it to '*VETO_SEGS_ROUND*.txt', and then in the --help, specify how to put in your own list of trigger files.
		pattern_trigs_hveto = os.path.join(args.directory_path, '{}{:02}','{}{:02}{:02}', '*86400-DARM','*VETO_SEGS_ROUND*.txt')	

		start_time_utc = tconvert(args.gps_start_time)
		end_time_utc = tconvert(args.gps_end_time)
		triggers = SegmentList([])
		while start_time_utc < end_time_utc:
    			day = start_time_utc.day
    			month = start_time_utc.month
    			year = start_time_utc.year
    
    			wildcard_trigs_hveto = pattern_trigs_hveto.format(year, month, year, month, day)
   			triggers = SegmentList([])
    			#grabbing the trigger files
    			for filename in glob.glob(wildcard_trigs_hveto):
	    			#loading the triggers in
				data = SegmentList.read(filename)
				print data
	       			triggers += data
        			start_time_utc += datetime.timedelta(days=1)
        
Ejemplo n.º 50
0
def read_segments(source, coltype=int):
    return SegmentList.read(
        source,
        gpstype=coltype,
        format="segwizard",
    )
Ejemplo n.º 51
0
    def process(self, config=GWSummConfigParser(), **kwargs):

        # set params
        self.rounds = None

        if not os.path.isdir(self.directory):
            self.rounds = None
            return

        # get some basic info
        ifo = config.get('DEFAULT', 'ifo')

        # read the configuration
        d = os.path.realpath(self.directory).rstrip('/')
        self.conf = dict()
        confs = glob(os.path.join(d, '%s-HVETO_CONF-*-*.txt' % ifo))
        if len(confs) != 1:
            self.rounds = 'FAIL'
            return
        conffile = confs[0]
        try:
            with open(conffile) as f:
                self.conf = dict()
                lines = f.readlines()[3:]
                for line in lines:
                    try:
                        key, val = line.split(': ', 1)
                        self.conf[key.strip()] = eval(val)
                    except (ValueError, SyntaxError, NameError):
                        pass
        except IOError:
            self.rounds = 'FAIL'
            return
        else:
            etg = self.conf.pop('AUXtype', None)
            if 'DEfnm' in self.conf:
                name = re_quote.sub('', self.conf['DEfnm'])
                self.primary = '%s:%s' % (ifo, name)
                if 'DEtype' in self.conf:
                    hetg = re_quote.sub('', self.conf['DEtype'])
                    if re.search('_%s\Z' % hetg, self.primary, re.I):
                        self.primary = self.primary[:-len(hetg)-1]
            else:
                self.primary = None

        # find the segments
        try:
            ce = CacheEntry.from_T050017(conffile)
        except (ValueError):
            start = int(self.span[0])
            duration = int(abs(self.span))
            span = self.span
        else:
            start = int(ce.segment[0])
            duration = int(abs(ce.segment))
            span = ce.segment
        try:
            statefile = self.conf['dqfnm']
        except KeyError:
            statefile = '%s-HVETO_DQ_SEGS-%d-%d.txt' % (ifo, start, duration)
        if not os.path.isfile(os.path.join(self.directory, statefile)):
            self.rounds = 'NOSEGMENTS'
            return

        # find the results table
        resultsfile = os.path.join(self.directory, 'summary_stats.txt')
        if not os.path.isfile(resultsfile):
            self.rounds = 'FAIL'
            return

        # determine the Hveto state
        cache = Cache([CacheEntry.from_T050017(
                           os.path.join(self.directory, statefile))])
        segments = SegmentList.read(cache)
        globalv.SEGMENTS[self.states[0].definition] = DataQualityFlag(
            self.states[0].definition, known=[span], active=segments)
        self.finalize_states(config=config, query=False)

        # read results file
        self.rounds = []
        with open(resultsfile, 'r') as f:
            for line in f.readlines():
                self.rounds.append(dict(zip(self.summaryrows,
                                            line.split(' ')[1:])))
                # fix channel name
                c = '%s:%s' % (ifo, self.rounds[-1]['Winning channel'])
                if etg and re.search('_%s\Z' % etg, c, re.I):
                     c = c.rsplit('_', 1)[0]
                self.rounds[-1]['Winning channel'] = c

        # read starting triggers
        rawfile = ('%s-HVETO_RAW_TRIGS_ROUND_0-%d-%d.txt'
                   % (ifo, start, duration))
        cache = Cache([CacheEntry.from_T050017(
                           os.path.join(self.directory, rawfile))])
        get_triggers('%s:hveto_start' % ifo, 'hveto', [self.span],
                     config=config, cache=cache, return_=False)
        get_triggers('%s:hveto_vetoed_all' % ifo, 'hveto', [self.span],
                     config=config, cache=Cache(), return_=False)

        for r in range(1, len(self.rounds) + 1):
            # read round veto triggers
            rawfile = ('%s-HVETO_VETOED_TRIGS_ROUND_%d-%d-%d.txt'
                       % (ifo, r, start, duration))
            cache = Cache([CacheEntry.from_T050017(
                               os.path.join(self.directory, rawfile))])
            trigs = get_triggers('%s:hveto_vetoed_round %d' % (ifo, r), 'hveto',
                         [self.span], config=config, cache=cache)
            add_triggers(trigs, '%s:hveto_vetoed_all,hveto' % ifo,
                         segments=SegmentList([self.span]))
            # read round veto segments
            segfile = ('%s-HVETO_VETO_SEGS_ROUND_%d-%d-%d.txt'
                       % (ifo, r, start, duration))
            cache = Cache([CacheEntry.from_T050017(
                               os.path.join(self.directory, segfile))])
            get_segments('%s:hveto_veto_segs_round_%d' % (ifo, r), [self.span],
                         config=config, cache=cache, return_=False)

        for plot in self.plots:
            if isinstance(plot, HvetoSegmentSummaryPlot):
                plot.find_flags()

        kwargs['trigcache'] = Cache()
        kwargs['segmentcache'] = Cache()
        super(HvetoTab, self).process(config=config, **kwargs)

        # find some plots
        for plot in ['OVERAL_HISTOGRAM', 'OVERAL_EFF_DT'][::-1]:
             filename = (
                 '%s-HVETO_%s-%d-%d.png' % (ifo, plot, start, duration))
             plotfile = os.path.join(self.directory, filename)
             if os.path.isfile(plotfile):
                 p = SummaryPlot(os.path.join(self.url, filename), new=False)
                 p.state = self.states[0]
                 self.plots.insert(0, p)

        # delete data from archive
        del globalv.SEGMENTS[self.states[0].definition]
        for row in range(1, len(self.rounds) + 1):
            del globalv.SEGMENTS['%s:hveto_veto_segs_round_%s' % (ifo, row)]
Ejemplo n.º 52
0
#"IMC-REFL_DC_OUT_DQ",
#"ALS-X_REFL_ERR_OUT_DQ",
#"PEM-CS_MAG_EBAY_SUSRACK_Y_DQ",
#"ASC-Y_TR_B_NSUM_OUT_DQ",
#"ASC-AS_B_RF45_Q_PIT_OUT_DQ",
#"SUS-OMC_M1_ISIWIT_T_DQ",
#"PSL-ISS_AOM_DRIVER_MON_OUT_DQ",
#"LSC-PRCL_OUT_DQ"]

ifo = sys.argv[1]
bbhdir = sys.argv[2]
bbhfile= glob.glob(os.path.join(bbhdir, ifo+'*.xml.gz'))[0]
omiccachedir = sys.argv[3]

# Read in the segment file
segments = SegmentList.read('/home/albert.wandui/detchar'+\
    '/ER7/jul13/%s_ER7_segments.txt' %ifo)

# Read in the BBH triggers
bbh_trigs = SnglInspiralTable.read(bbhfile)
# We only want the triggers in the given segments
bbh_trigs = bbh_trigs.vetoed(segments)
#bbh_trigs.sort(key=lambda x: x.end_time + x.end_time_ns * 1.0e-9)

print "Read in all the BBH triggers!!!\n"
print "Let's start working on the Omicron triggers...\n"
# ---------------------------------------------------------------------------- #

# Read in all the Omicron caches
# Also get an idea of the speed of the code when reading from cache file vs
# letting vet get the data itself