Пример #1
0
def find_kw(channel, start, end, base=None):
    """Find KW trigger XML files
    """
    span = Segment(to_gps(start), to_gps(end))
    channel = get_channel(channel)
    ifo = channel.ifo
    if base is None and channel.name.split(':', 1)[-1] == 'GDS-CALIB_STRAIN':
        tag = '%s-KW_HOFT' % ifo[0].upper()
        base = '/gds-%s/dmt/triggers/%s' % (ifo.lower(), tag)
    elif base is None:
        tag = '%s-KW_TRIGGERS' % ifo[0].upper()
        base = '/gds-%s/dmt/triggers/%s' % (ifo.lower(), tag)
    gps5 = int('%.5s' % start)
    end5 = int('%.5s' % end)
    out = Cache()
    append = out.append
    while gps5 <= end5:
        trigglob = os.path.join(
            base, '%s-%d' % (tag, gps5), '%s-*-*.xml' % tag)
        found = glob.glob(trigglob)
        for f in found:
            ce = CacheEntry.from_T050017(f)
            if ce.segment.intersects(span):
                append(ce)
        gps5 += 1
    out.sort(key=lambda e: e.path)
    vprint("    Found %d files for %s (KW)\n"
           % (len(out), channel.ndsname))
    return out
Пример #2
0
def find_dmt_omega(channel, start, end, base=None):
    """Find DMT-Omega trigger XML files
    """
    span = Segment(to_gps(start), to_gps(end))
    channel = get_channel(channel)
    ifo = channel.ifo
    if base is None and channel.name.split(':', 1)[-1] == 'GDS-CALIB_STRAIN':
        base = '/gds-%s/dmt/triggers/%s-HOFT_Omega' % (
            ifo.lower(), ifo[0].upper())
    elif base is None:
        raise NotImplementedError("This method doesn't know how to locate DMT "
                                  "Omega trigger files for %r" % str(channel))
    gps5 = int('%.5s' % start)
    end5 = int('%.5s' % end)
    out = Cache()
    append = out.append
    while gps5 <= end5:
        trigglob = os.path.join(
            base, str(gps5),
            '%s-%s_%s_%s_OmegaC-*-*.xml' % (
                ifo, channel.system, channel.subsystem, channel.signal))
        found = glob.glob(trigglob)
        for f in found:
            ce = CacheEntry.from_T050017(f)
            if ce.segment.intersects(span):
                append(ce)
        gps5 += 1
    out.sort(key=lambda e: e.path)
    vprint("    Found %d files for %s (DMT-Omega)\n"
           % (len(out), channel.ndsname))
    return out
Пример #3
0
def find_trigger_urls(channel, etg, gpsstart, gpsend, verbose=False):
    """Find the paths of trigger files that represent the given
    observatory, channel, and ETG (event trigger generator) for a given
    GPS [start, end) segment.
    """
    if etg.lower() == 'omicron':
        etg = '?micron'

    # construct search
    span = Segment(gpsstart, gpsend)
    ifo, channel = channel.split(':', 1)
    trigtype = "%s_%s" % (channel, etg.lower())
    epoch = '*'
    searchbase = os.path.join(TRIGFIND_BASE_PATH, epoch, ifo, trigtype)
    gpsdirs = range(int(str(gpsstart)[:5]), int(str(gpsend)[:5]) + 1)
    trigform = ('%s-%s_%s-%s-*.xml*' %
                (ifo, re_dash.sub('_', channel), etg.lower(), '[0-9]' * 10))

    # perform and cache results
    out = Cache()
    for gpsdir in gpsdirs:
        gpssearchpath = os.path.join(searchbase, str(gpsdir), trigform)
        if verbose:
            gprint("Searching %s..." % os.path.split(gpssearchpath)[0],
                   end=' ')
        gpscache = Cache(
            map(CacheEntry.from_T050017,
                glob.glob(os.path.join(searchbase, str(gpsdir), trigform))))
        out.extend(gpscache.sieve(segment=span))
        if verbose:
            gprint("%d found" % len(gpscache.sieve(segment=span)))
    out.sort(key=lambda e: e.path)

    return out
Пример #4
0
def read_cache(lcf, coltype=LIGOTimeGPS):
    """Read a LAL-format cache file into memory as a `Cache`

    Parameters
    ----------
    lcf : `str`, `file`
        input file or file path to read

    coltype : `LIGOTimeGPS`, `int`, optional
        `type` for GPS times

    Returns
    -------
    cache : :class:`glue.lal.Cache`
        a cache object, representing each line in the file as a
        :class:`~lal.utils.CacheEntry`
    """
    from glue.lal import Cache
    # open file
    if not isinstance(lcf, FILE_LIKE):
        with open(lcf, 'r') as f:
            return open_cache(f, coltype=coltype)

    # read file
    out = Cache()
    for line in lcf:
        if isinstance(line, bytes):
            line = line.decode('utf-8')
        out.append(out.entry_class(line, coltype=coltype))
    return out
Пример #5
0
 def process_state(self, state, *args, **kwargs):
     config = kwargs.get('config', None)
     # first get all of the segments
     if self.segmentfile:
         get_segments(self.flags, state, config=config,
                      cache=self.segmentfile, return_=False)
         segs = get_segments(self.metaflag, state, padding=self.padding,
                             config=config, query=False)
         kwargs['segmentcache'] = Cache()
     else:
         segs = get_segments(self.metaflag, state, config=config,
                             segdb_error=kwargs.get('segdb_error', 'raise'),
                             padding=self.padding)
     # then get all of the triggers
     if self.channel:
         cache = kwargs.pop('trigcache', None)
         before = get_triggers(str(self.channel), self.etg, state,
                               config=config, cache=cache,
                               format=self.etgformat)
     else:
         before = None
     # then apply all of the metrics
     self.results[state] = evaluate_flag(
         segs, triggers=before, metrics=self.metrics, injections=None,
         minduration=self.minseglength, vetotag=str(state),
         channel=str(self.channel), etg=self.etg)[0]
     vprint("    Veto evaluation results:\n")
     for metric, val in self.results[state].iteritems():
         vprint('        %s: %s\n' % (metric, val))
     # then pass to super to make the plots
     kwargs['trigcache'] = Cache()
     return super(FlagTab, self).process_state(state, *args, **kwargs)
Пример #6
0
def open_cache(lcf):
    """Read a LAL-format cache file into memory as a
    :class:`glue.lal.Cache`.
    """
    if isinstance(lcf, file):
        return Cache.fromfile(lcf)
    else:
        with open(lcf, 'r') as f:
            return Cache.fromfile(f)
Пример #7
0
def open_cache(lcf):
    """Read a LAL-format cache file into memory as a
    :class:`glue.lal.Cache`.
    """
    if isinstance(lcf, file):
        return Cache.fromfile(lcf)
    else:
        with open(lcf, 'r') as f:
            return Cache.fromfile(f)
def FrameCachetoLALCache(fcache):

  lcache = LALCache()

  files = fcache.get_files()

  for f in files:
    lcache.append(LALCacheEntry.from_T050017(f))
  
  return lcache
Пример #9
0
def create_cache(ifo, start, end):
    """Find .gwf files and create cache. Will first look in the llhoft, and
    if the frames have expired from llhoft, will call gwdatafind.

    Parameters
    ----------
    ifo : str
        Interferometer name (e.g. ``H1``).
    start, end: int or float
        GPS start and end times desired.

    Returns
    -------
    :class:`glue.lal.Cache`

    Example
    -------
    >>> create_cache('H1', 1198800018, 1198800618)
    [<glue.lal.CacheEntry at 0x7fbae6b71278>,
      <glue.lal.CacheEntry at 0x7fbae6ae5b38>,
      <glue.lal.CacheEntry at 0x7fbae6ae5c50>,
     ...
      <glue.lal.CacheEntry at 0x7fbae6b15080>,
      <glue.lal.CacheEntry at 0x7fbae6b15828>]

    """
    pattern = app.conf['llhoft_glob'].format(detector=ifo)
    filenames = glob.glob(pattern)
    cache = Cache.from_urls(filenames)

    try:
        cache_starttime = int(
            list(cache.to_segmentlistdict().values())[0][0][0])
    except IndexError:
        log.exception('Files do not exist in llhoft_glob')
        return cache  # returns empty cache

    if start >= cache_starttime:  # required data is in llhoft
        return cache

    # otherwise, required data has left llhoft
    high_latency = app.conf['high_latency_frame_types'][ifo]
    urls = find_urls(ifo[0], high_latency, start, end)
    if urls:
        return Cache.from_urls(urls)

    # required data not in high latency frames
    low_latency = app.conf['low_latency_frame_types'][ifo]
    urls = find_urls(ifo[0], low_latency, start, end)
    if not urls:  # required data not in low latency frames
        log.error('This data cannot be found, or does not exist.')

    return Cache.from_urls(urls)
Пример #10
0
    def make_cache():
        try:
            from lal.utils import CacheEntry
        except ImportError as e:
            pytest.skip(str(e))

        segs = SegmentList()
        cache = Cache()
        for seg in [(0, 1), (1, 2), (4, 5)]:
            d = seg[1] - seg[0]
            f = 'A-B-%d-%d.tmp' % (seg[0], d)
            cache.append(CacheEntry.from_T050017(f, coltype=int))
            segs.append(Segment(*seg))
        return cache, segs
Пример #11
0
def find_online_cache(start, end, channel, **kwargs):
    """Find ExcessPower files from the online GSTLAL analysis
    for the given span

    @param start
        GPS start time for search
    @param end
        GPS end time for search
    @param channel UNDOCUMENTED
    @param kwargs UNDOCUMENTED
    'ifo' observatory for search
    'clustering'
        tag for clustering stage to search, default: unclustered
    'check_files'
        check that the returned files can be read on disk, default False
    """
    out = Cache()

    # set base directory
    directory = kwargs.pop("directory", ER3_RUN_DIRECTORY)
    ifo,channel = channel.split(":", 1)
    channel_dir = os.path.join(directory, ifo, "%s_excesspower" % channel)

    glob_query = "%s-%s_excesspower-*.xml" % (ifo, channel.replace("-", "_"))

    span = Segment(start, end)

    # optimise
    append = out.append
    splitext = os.path.splitext
    isfile = os.path.isfile
    pjoin = os.path.join
    intersects = span.intersects
    from_T050017 = CacheEntry.from_T050017


    # loop over days gathering files
    t = start // 1e4 * 1e4
    while t < end:
        gps_dir = os.path.join(channel_dir, "%.6s" % t)
        if os.path.isdir(gps_dir):
            file_list = glob(os.path.join(gps_dir, glob_query))
            for f in file_list:
                e = from_T050017(f)
                if intersects(e.segment):
                   append(e)
        t += 1e4

    out.sort(key=lambda e: e.segment[0])
    return out
Пример #12
0
def find_trigger_urls(channel, etg, gpsstart, gpsend, verbose=False):
    """Find the paths of trigger files that represent the given
    observatory, channel, and ETG (event trigger generator) for a given
    GPS [start, end) segment.
    """
    if etg.lower() == 'omicron':
        etg = '?micron'

    # construct search
    span = Segment(gpsstart, gpsend)
    ifo, channel = channel.split(':', 1)
    trigtype = "%s_%s" % (channel, etg.lower())
    epoch = '*'
    searchbase = os.path.join(TRIGFIND_BASE_PATH, epoch, ifo, trigtype)
    gpsdirs = range(int(str(gpsstart)[:5]), int(str(gpsend)[:5])+1)
    trigform = ('%s-%s_%s-%s-*.xml*'
                % (ifo, re_dash.sub('_', channel), etg.lower(), '[0-9]'*10))

    # perform and cache results
    out = Cache()
    for gpsdir in gpsdirs:
        gpssearchpath = os.path.join(searchbase, str(gpsdir), trigform)
        if verbose:
            gprint("Searching %s..." % os.path.split(gpssearchpath)[0],
                   end =' ')
        gpscache = Cache(map(CacheEntry.from_T050017,
                             glob.glob(os.path.join(searchbase, str(gpsdir),
                                                    trigform))))
        out.extend(gpscache.sieve(segment=span))
        if verbose:
            gprint("%d found" % len(gpscache.sieve(segment=span)))
    out.sort(key=lambda e: e.path)

    return out
Пример #13
0
def find_online_cache(start, end, channel, **kwargs):
    """Find ExcessPower files from the online GSTLAL analysis
    for the given span

    @param start
        GPS start time for search
    @param end
        GPS end time for search
    @param channel UNDOCUMENTED
    @param kwargs UNDOCUMENTED
    'ifo' observatory for search
    'clustering'
        tag for clustering stage to search, default: unclustered
    'check_files'
        check that the returned files can be read on disk, default False
    """
    out = Cache()

    # set base directory
    directory = kwargs.pop("directory", ER3_RUN_DIRECTORY)
    ifo,channel = channel.split(":", 1)
    channel_dir = os.path.join(directory, ifo, "%s_excesspower" % channel)

    glob_query = "%s-%s_excesspower-*.xml" % (ifo, channel.replace("-", "_"))

    span = Segment(start, end)

    # optimise
    append = out.append
    splitext = os.path.splitext
    isfile = os.path.isfile
    pjoin = os.path.join
    intersects = span.intersects
    from_T050017 = CacheEntry.from_T050017


    # loop over days gathering files
    t = start // 1e4 * 1e4
    while t < end:
        gps_dir = os.path.join(channel_dir, "%.6s" % t)
        if os.path.isdir(gps_dir):
            file_list = glob(os.path.join(gps_dir, glob_query))
            for f in file_list:
                e = from_T050017(f)
                if intersects(e.segment):
                   append(e)
        t += 1e4

    out.sort(key=lambda e: e.segment[0])
    return out
Пример #14
0
def find_trigger_urls(channel, etg, gpsstart, gpsend, verbose=False):
    """Find the paths of trigger files that represent the given
    observatory, channel, and ETG (event trigger generator) for a given
    GPS [start, end) segment.
    """
    if etg.lower().startswith('omicron'):
        etg = '?' + etg[1:]

    # construct search
    gpsstart = to_gps(gpsstart).seconds
    gpsend = to_gps(gpsend).seconds
    span = Segment(gpsstart, gpsend)
    ifo, channel = channel.split(':', 1)
    trigtype = "%s_%s" % (channel, etg.lower())
    epoch = '*'
    searchbase = os.path.join(TRIGFIND_BASE_PATH, epoch, ifo, trigtype)
    gpsdirs = range(int(str(gpsstart)[:5]), int(str(gpsend)[:5])+1)
    trigform = ('%s-%s_%s-%s-*.xml*'
                % (ifo, re_dash.sub('_', channel), etg.lower(), '[0-9]'*10))

    # test for channel-level directory
    if not glob.glob(searchbase):
        raise ValueError("No channel-level directory found at %s. Either the "
                         "channel name or ETG names are wrong, or this "
                         "channel is not configured for this ETG."
                         % searchbase)

    # perform and cache results
    out = Cache()
    append = out.append
    for gpsdir in gpsdirs:
        gpssearchpath = os.path.join(searchbase, str(gpsdir), trigform)
        if verbose:
            gprint("Searching %s..." % os.path.split(gpssearchpath)[0],
                   end=' ')
        found = set(map(
            os.path.realpath,
            glob.glob(os.path.join(searchbase, str(gpsdir), trigform))))
        n = 0
        for f in found:
            ce = CacheEntry.from_T050017(f)
            if ce.segment.intersects(span):
                append(ce)
                n += 1
        if verbose:
            gprint("%d found" % n)
    out.sort(key=lambda e: e.path)

    return out
Пример #15
0
def find_trigger_urls(channel, etg, gpsstart, gpsend, verbose=False):
    """Find the paths of trigger files that represent the given
    observatory, channel, and ETG (event trigger generator) for a given
    GPS [start, end) segment.
    """
    if etg.lower().startswith('omicron'):
        etg = '?' + etg[1:]

    # construct search
    gpsstart = to_gps(gpsstart).seconds
    gpsend = to_gps(gpsend).seconds
    span = Segment(gpsstart, gpsend)
    ifo, channel = channel.split(':', 1)
    trigtype = "%s_%s" % (channel, etg.lower())
    epoch = '*'
    searchbase = os.path.join(TRIGFIND_BASE_PATH, epoch, ifo, trigtype)
    gpsdirs = range(int(str(gpsstart)[:5]), int(str(gpsend)[:5]) + 1)
    trigform = ('%s-%s_%s-%s-*.xml*' %
                (ifo, re_dash.sub('_', channel), etg.lower(), '[0-9]' * 10))

    # test for channel-level directory
    if not glob.glob(searchbase):
        raise ValueError("No channel-level directory found at %s. Either the "
                         "channel name or ETG names are wrong, or this "
                         "channel is not configured for this ETG." %
                         searchbase)

    # perform and cache results
    out = Cache()
    append = out.append
    for gpsdir in gpsdirs:
        gpssearchpath = os.path.join(searchbase, str(gpsdir), trigform)
        if verbose:
            gprint("Searching %s..." % os.path.split(gpssearchpath)[0],
                   end=' ')
        found = set(
            map(os.path.realpath,
                glob.glob(os.path.join(searchbase, str(gpsdir), trigform))))
        n = 0
        for f in found:
            ce = CacheEntry.from_T050017(f)
            if ce.segment.intersects(span):
                append(ce)
                n += 1
        if verbose:
            gprint("%d found" % n)
    out.sort(key=lambda e: e.path)

    return out
Пример #16
0
def checkifo(event):
    server, repository = connect_gitlab()
    gitlab_events = gitlab.find_events(repository, subset=event)

    for event in gitlab_events:
        if "event time" not in event.event_object.meta:
            print(f"Time not found {event.event_object.name}")
        time = event.event_object.meta['event time']
        gpsstart = time - 600
        gpsend = time + 600
        bits = ['Bit 0', 'Bit 1', 'Bit 2']

        active_ifo = []
        for ifo in ["L1", "H1", "V1"]:
            frametypes = event.event_object.meta['data']['frame-types']
            urls = find_urls(site=f"{ifo[0]}",
                             frametype=frametypes[ifo],
                             gpsstart=gpsstart,
                             gpsend=gpsend)
            datacache = Cache.from_urls(urls)
            if len(datacache) == 0:
                print(f"No {ifo} data found.")
                continue

            if "state vector" in event.meta:
                state_vector_channel = event.meta['state vector']
            else:
                state_vector_channel = ast.literal_eval(
                    config.get("data", "state-vector"))

            state = gwpy.timeseries.StateVector.read(
                datacache,
                state_vector_channel[ifo],
                start=gpsstart,
                end=gpsend,
                pad=
                0  # padding data so that errors are not raised even if found data are not continuous.
            )
            if not np.issubdtype(state.dtype, np.unsignedinteger):
                # if data are not unsigned integers, cast to them now so that
                # we can determine the bit content for the flags
                state = state.astype(
                    "uint32",
                    casting="unsafe",
                    subok=True,
                    copy=False,
                )
            flags = state.to_dqflags()

            segments = flags[bits[0]].active
            for bit in bits:
                segments -= ~flags[bit].active

            if len(segments) > 0: active_ifo += [ifo]
        print(event.event_object.name)
        if event.event_object.meta['interferometers'] != active_ifo:
            print(f"Gitlab data\t{event.event_object.meta['interferometers']}")
            print(f"Recommended IFOS\t{active_ifo}")
        event.event_object.meta['interferometers'] = active_ifo
        event.update_data()
Пример #17
0
 def process(self, *args, **kwargs):
     error = None
     # read the cache files
     if isinstance(self.cache, str) and os.path.isfile(self.cache):
         with open(self.cache, 'r') as fobj:
             try:
                 self.cache = Cache.fromfile(fobj).sieve(
                                          segment=self.span)
             except ValueError as e:
                 if "could not convert \'\\n\' to CacheEntry" in str(e):
                     error = 'could not parse event cache file'
                 else:
                     raise
     elif isinstance(self.cache, str):
         error = 'could not locate event cache file'
         warn("Cache file %s not found." % self.cache)
     elif self.cache is not None and not isinstance(self.cache, Cache):
         raise ValueError("Cannot parse EventTriggerTab.cache of type %r"
                          % type(self.cache))
     # push error to all states for HTML writing
     if error:
         for state in self.states:
             self.error[state] = (
                 'danger', 'This analysis seems to have failed: %s.' % error)
     # only process if the cachfile was found
     if kwargs.get('trigcache', None) is None:
         kwargs['trigcache'] = self.cache
     try:
         super(EventTriggerTab, self).process(*args, **kwargs)
     except IOError as e:
         warn('Caught %s: %s' % (type(e).__name__, str(e)))
         msg = "GWSumm failed to process these data.<pre>%s</pre>" % str(e)
         for state in self.states:
             self.error[state] = ( 'danger', msg)
Пример #18
0
 def process(self, *args, **kwargs):
     error = None
     # read the cache files
     if isinstance(self.cache, string_types) and os.path.isfile(self.cache):
         with open(self.cache, 'r') as fobj:
             try:
                 self.cache = Cache.fromfile(fobj).sieve(
                                          segment=self.span)
             except ValueError as e:
                 if "could not convert \'\\n\' to CacheEntry" in str(e):
                     error = 'could not parse event cache file'
                 else:
                     raise
     elif isinstance(self.cache, string_types):
         error = 'could not locate event cache file'
         warn("Cache file %s not found." % self.cache)
     elif self.cache is not None and not isinstance(self.cache, Cache):
         raise ValueError("Cannot parse EventTriggerTab.cache of type %r"
                          % type(self.cache))
     # push error to all states for HTML writing
     if error:
         for state in self.states:
             self.error[state] = (
                 'danger',
                 'This analysis seems to have failed: %s.' % error)
     # only process if the cachfile was found
     if kwargs.get('trigcache', None) is None:
         kwargs['trigcache'] = self.cache
     try:
         super(EventTriggerTab, self).process(*args, **kwargs)
     except IOError as e:
         msg = "GWSumm failed to process these data.<pre>%s</pre>" % str(e)
         for state in self.states:
             self.error[state] = ('danger', msg)
Пример #19
0
def read_cache(cache, segments, etg, nproc=1, timecolumn=None, **kwargs):
    """Read a table of events from a cache

    This function is mainly meant for use from the `get_triggers` method

    Parameters
    ----------
    cache : :class:`glue.lal.Cache`
        the formatted list of files to read
    segments : `~gwpy.segments.SegmentList`
        the list of segments to read
    etg : `str`
        the name of the trigger generator that created the files
    nproc : `int`, optional
        the number of parallel processes to use when reading
    **kwargs
        other keyword arguments are passed to the `EventTable.read` or
        `{tableclass}.read` methods

    Returns
    -------
    table : `~gwpy.table.EventTable`, `None`
        a table of events, or `None` if the cache has no overlap with
        the segments
    """
    if isinstance(cache, Cache):
        cache = cache.sieve(segmentlist=segments)
        cache = cache.checkfilesexist()[0]
        cache.sort(key=lambda x: x.segment[0])
        cache = cache.pfnlist()  # some readers only like filenames
    else:
        cache = [urlparse(url).path for url in cache]
    if etg == 'pycbc_live':  # remove empty HDF5 files
        cache = filter_pycbc_live_files(cache, ifo=kwargs['ifo'])

    if len(cache) == 0:
        return

    # read triggers
    table = EventTable.read(cache, **kwargs)

    # store read keywords in the meta table
    if timecolumn:
        table.meta['timecolumn'] = timecolumn

    # get back from cache entry
    if isinstance(cache, CacheEntry):
        cache = Cache([cache])

    # append new events to existing table
    try:
        csegs = cache_segments(cache) & segments
    except (AttributeError, TypeError, ValueError):
        csegs = SegmentList()
    table.meta['segments'] = csegs

    if timecolumn:  # already filtered on-the-fly
        return table
    # filter now
    return keep_in_segments(table, segments, etg)
Пример #20
0
def find_trigger_urls(channel, etg, gpsstart, gpsend, verbose=False, **kwargs):
    """Find the paths of trigger files that represent the given
    observatory, channel, and ETG (event trigger generator) for a given
    GPS [start, end) segment.
    """
    # special case for KW
    if etg.lower() in ['kw', 'kleinewelle']:
        from .kw import find_dmt_cache
        ifo = channel.split(':')[0]
        kwargs.setdefault('extension', 'xml')
        kwargs.setdefault('check_files', True)
        return find_dmt_cache(gpsstart, gpsend, ifo, **kwargs)
    elif etg.lower() == 'omega':
        from .omega import find_dmt_cache
        ifo = channel.split(':')[0]
        kwargs.setdefault('check_files', True)
        return find_dmt_cache(gpsstart, gpsend, ifo, **kwargs)
    elif etg.lower() == 'omicron':
        etg = '?micron'

    # construct search
    span = segments.segment(gpsstart, gpsend)
    ifo, channel = channel.split(':', 1)
    trigtype = "%s_%s" % (channel, etg.lower())
    epoch = '*'
    searchbase = os.path.join(TRIGFIND_BASE_PATH, epoch, ifo, trigtype)
    gpsdirs = numpy.arange(int(str(gpsstart)[:5]), int(str(gpsend)[:5])+1)
    trigform = ('%s-%s_%s-%s-*.xml*'
                % (ifo, re.sub('-', '_', channel), etg.lower(), '[0-9]'*10))

    # perform and cache results
    out = Cache()
    for gpsdir in gpsdirs:
        gpssearchpath = os.path.join(searchbase, str(gpsdir), trigform)
        if verbose:
            sys.stdout.write("Searching %s..."
                             % os.path.split(gpssearchpath)[0])
            sys.stdout.flush()
        gpscache = Cache(map(CacheEntry.from_T050017,
                             glob.glob(os.path.join(searchbase, str(gpsdir),
                                                    trigform))))
        out.extend(gpscache.sieve(segment=span))
        if verbose:
            sys.stdout.write(" %d found\n" % len(gpscache.sieve(segment=span)))
    out.sort(key=lambda e: e.path)

    return out
Пример #21
0
def read_cache(cache, segments, etg, nproc=1, timecolumn=None, **kwargs):
    """Read a table of events from a cache

    This function is mainly meant for use from the `get_triggers` method

    Parameters
    ----------
    cache : :class:`glue.lal.Cache`
        the formatted list of files to read
    segments : `~gwpy.segments.SegmentList`
        the list of segments to read
    etg : `str`
        the name of the trigger generator that created the files
    nproc : `int`, optional
        the number of parallel processes to use when reading
    **kwargs
        other keyword arguments are passed to the `EventTable.read` or
        `{tableclass}.read` methods

    Returns
    -------
    table : `~gwpy.table.EventTable`, `None`
        a table of events, or `None` if the cache has no overlap with
        the segments
    """
    if isinstance(cache, Cache):
        cache = cache.sieve(segmentlist=segments)
        cache = cache.checkfilesexist()[0]
        cache.sort(key=lambda x: x.segment[0])
        if etg == 'pycbc_live':  # remove empty HDF5 files
            cache = type(cache)(
                filter_pycbc_live_files(cache, ifo=kwargs['ifo']))
    # if no files, skip
    if len(cache) == 0:
        return
    # use multiprocessing except for ascii reading
    # (since astropy doesn't allow it)
    if kwargs.get('format', 'none').startswith('ascii.'):
        cache = cache.pfnlist()
    else:
        kwargs['nproc'] = nproc
    if len(cache) == 1:
        cache = cache[0]

    # read triggers
    table = EventTable.read(cache, **kwargs)
    if timecolumn:
        table.meta['timecolumn'] = timecolumn

    # get back from cache entry
    if isinstance(cache, CacheEntry):
        cache = Cache([cache])
    # append new events to existing table
    try:
        csegs = cache_segments(cache)
    except (AttributeError, TypeError):
        csegs = SegmentList()
    table.meta['segments'] = csegs
    return keep_in_segments(table, segments, etg)
Пример #22
0
 def test_get_coherence_spectrum(self):
     cache = Cache([e for c in self.FRAMES for e in self.FRAMES[c]])
     a = data.get_coherence_spectrogram(
         ('H1:LOSC-STRAIN', 'L1:LOSC-STRAIN'),
         LOSC_SEGMENTS,
         cache=cache,
         stride=4,
         fftlength=2,
         overlap=1)
Пример #23
0
    def process(self,
                nds=None,
                nproc=1,
                config=GWSummConfigParser(),
                datacache=None,
                datafind_error='raise',
                **kwargs):
        """Process time accounting data
        """
        for p in self.plots:
            if p.outputfile in globalv.WRITTEN_PLOTS:
                p.new = False

        # get archived GPS time
        tag = self.segmenttag % list(self.modes)[0]
        try:
            lastgps = globalv.SEGMENTS[tag].known[-1][-1]
        except (IndexError, KeyError):
            lastgps = self.span[0]

        # get data
        new = SegmentList([type(self.span)(lastgps, self.span[1])])
        data = get_timeseries(self.channel,
                              new,
                              config=config,
                              nds=nds,
                              dtype='int16',
                              datafind_error=datafind_error,
                              nproc=nproc,
                              cache=datacache)
        vprint("    All time-series data loaded\n")

        # find segments
        for ts in data:
            modesegments = DataQualityDict()
            for idx, name in self.modes.items():
                # get segments for state
                tag = self.segmenttag % idx
                instate = ts == idx * ts.unit
                modesegments[tag] = instate.to_dqflag(name=name.strip('*'))
                # append segments for group
                group = int(idx // 10. * 10)
                gtag = self.segmenttag % group
                try:
                    modesegments[gtag] += modesegments[tag]
                except KeyError:
                    modesegments[gtag] = modesegments[tag]
            globalv.SEGMENTS += modesegments

        kwargs['segdb_error'] = 'ignore'
        super(AccountingTab, self).process(config=config,
                                           nds=nds,
                                           nproc=nproc,
                                           segmentcache=Cache(),
                                           datacache=datacache,
                                           datafind_error=datafind_error,
                                           **kwargs)
Пример #24
0
 def _query(self, channel, start, end):
     "Do we know where the frame file is?"
     if segment(start, end) in self._remotecoverage:
         return True
     urls = query_LDR(self.host, self.port, channel[0], self.frametype, start, end, urlType="file")
     if urls:
         new = Cache.from_urls(urls, coltype=int)
         new.sort(key=operator.attrgetter("segment"))
         self.add_cache(new)
     return segment(start, end) in self._remotecoverage
Пример #25
0
def find_contiguous(*caches):
    """Separate one or more caches into sets of contiguous caches

    Parameters
    ----------
    *caches
        one or more :class:`~glue.lal.Cache` objects

    Returns
    -------
    caches : `iter` of :class:`~glue.lal.Cache`
        an interable yielding each contiguous cache
    """
    try:
        flat = flatten(*caches)
    except IndexError:
        flat = Cache()
    for segment in cache_segments(flat):
        yield flat.sieve(segment=segment)
Пример #26
0
    def process(self, *args, **kwargs):
        # read the segment files
        if os.path.isfile(self.segmentfile):
            segs = DataQualityFlag.read(self.segmentfile, coalesce=False)
            self.states[0].known = segs.known
            self.states[0].active = segs.active
            self.states[0].ready = True
        else:
            warn('Segment file %s not found.' % self.segmentfile)
            return
        if len(self.states[0].active) == 0:
            warn('No segments analysed by daily ahope.')
            return
        # read the cache files
        if os.path.isfile(self.inspiralcachefile):
            with open(self.inspiralcachefile, 'r') as fobj:
                try:
                    self.inspiralcache = Cache.fromfile(fobj).sieve(
                        segment=self.span)
                except ValueError as e:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.inspiralcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.inspiralcachefile)
            return
        if os.path.isfile(self.tmpltbankcachefile):
            with open(self.tmpltbankcachefile, 'r') as fobj:
                try:
                    self.tmpltbankcache = Cache.fromfile(fobj).sieve(
                        segment=self.span)
                except ValueError:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.tmpltbankcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.tmpltbankcachefile)
            self.tmpltbankcache = Cache()

        # only process if the cachfile was found
        super(DailyAhopeTab, self).process(*args, **kwargs)
Пример #27
0
 def setup_class(cls):
     cls.FRAMES = {}
     cls._tempdir = tempfile.mkdtemp(prefix='gwsumm-test-data-')
     # get data
     for channel in LOSC_DATA:
         cls.FRAMES[channel] = Cache()
         for gwf in LOSC_DATA[channel]:
             target = os.path.join(cls._tempdir, os.path.basename(gwf))
             download(gwf, target)
             cls.FRAMES[channel].append(CacheEntry.from_T050017(target))
Пример #28
0
    def process(self, *args, **kwargs):
        # read the segment files
        if os.path.isfile(self.segmentfile):
            segs = DataQualityFlag.read(self.segmentfile, coalesce=False)
            self.states[0].known = segs.known
            self.states[0].active = segs.active
            self.states[0].ready = True
        else:
            warn('Segment file %s not found.' % self.segmentfile)
            return
        if len(self.states[0].active) == 0:
            warn('No segments analysed by daily ahope.')
            return
        # read the cache files
        if os.path.isfile(self.inspiralcachefile):
            with open(self.inspiralcachefile, 'r') as fobj:
                try:
                    self.inspiralcache = Cache.fromfile(fobj).sieve(
                                             segment=self.span)
                except ValueError as e:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.inspiralcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.inspiralcachefile)
            return
        if os.path.isfile(self.tmpltbankcachefile):
            with open(self.tmpltbankcachefile, 'r') as fobj:
                try:
                    self.tmpltbankcache = Cache.fromfile(fobj).sieve(
                                              segment=self.span)
                except ValueError:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.tmpltbankcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.tmpltbankcachefile)
            self.tmpltbankcache = Cache()

        # only process if the cachfile was found
        super(DailyAhopeTab, self).process(*args, **kwargs)
Пример #29
0
def find_trigger_files(channel, etg, segments, **kwargs):
    """Find trigger files for a given channel and ETG

    Parameters
    ----------
    channel : `str`
        name of channel to find

    etg : `str`
        name of event trigger generator to find

    segments : :class:`~glue.segments.segmentlist`
        list of segments to find

    **kwargs
        all other keyword arguments are passed to
        `trigfind.find_trigger_urls`

    Returns
    -------
    cache : :class:`~glue.lal.Cache`
        cache of trigger file paths

    See Also
    --------
    trigfind.find_trigger_urls
        for details on file discovery
    """
    cache = Cache()
    for start, end in segments:
        try:
            cache.extend(trigfind.find_trigger_urls(channel, etg, start,
                                                    end, **kwargs))
        except ValueError as e:
            if str(e).lower().startswith('no channel-level directory'):
                warnings.warn(str(e))
            else:
                raise
    return cache.unique()
Пример #30
0
def get_omicron_triggers(channel, ifo, segments, cachefile):
  print "Reading channel: %s\n" %channel
  with open(cachefile, 'r') as f:
    mycache = Cache.fromfile(f)
  # Let's try and catch failed reads
  try:
    triggers = get_triggers(ifo + ':' + channel, 'sngl_burst', segments,\
        cache=mycache)
  except:
    print "No Omicron triggers read for channel %s" %channel
    return None

  return triggers
Пример #31
0
def _losc_json_cache(metadata, detector, sample_rate=4096,
                      format='hdf5', duration=4096):
    """Parse a :class:`~glue.lal.Cache` from a LOSC metadata packet
    """
    urls = []
    for fmd in metadata:  # loop over file metadata dicts
        # skip over files we don't want
        if (fmd['detector'] != detector or
                fmd['sampling_rate'] != sample_rate or
                fmd['format'] != format or
                fmd['duration'] != duration):
            continue
        urls.append(fmd['url'])
    return Cache.from_urls(urls)
Пример #32
0
def _losc_json_cache(metadata,
                     detector,
                     sample_rate=4096,
                     format='hdf5',
                     duration=4096):
    """Parse a :class:`~glue.lal.Cache` from a LOSC metadata packet
    """
    urls = []
    for fmd in metadata:  # loop over file metadata dicts
        # skip over files we don't want
        if (fmd['detector'] != detector or fmd['sampling_rate'] != sample_rate
                or fmd['format'] != format or fmd['duration'] != duration):
            continue
        urls.append(fmd['url'])
    return Cache.from_urls(urls)
Пример #33
0
 def _query(self, channel, start, end):
     "Do we know where the frame file is?"
     if segment(start, end) in self._remotecoverage:
         return True
     urls = query_LDR(self.host,
                      self.port,
                      channel[0],
                      self.frametype,
                      start,
                      end,
                      urlType="file")
     if urls:
         new = Cache.from_urls(urls, coltype=int)
         new.sort(key=operator.attrgetter("segment"))
         self.add_cache(new)
     return segment(start, end) in self._remotecoverage
Пример #34
0
def test_lalcache_from_gluecache():
    files = [
        "X-TEST-0-1.gwf",
        "X-TEST-1-1.gwf",
    ]
    gcache = GlueCache.from_urls(files, coltype=LIGOTimeGPS)
    try:
        lcache = lal_utils.lalcache_from_gluecache(gcache)
    finally:
        for fp in files:
            if os.path.isfile(fp):
                os.remove(fp)
    assert lcache.length == len(gcache)
    assert lcache.list.url == (
        "file://localhost{}".format(os.path.abspath(files[0]))
    )
Пример #35
0
def load_omic_trigs(omicroncachefile, segs):
  # Read in the Omicron triggers
  with open(omicroncachefile, 'r') as cachefile:
    cache = Cache.fromfile(cachefile)

  omic_trigs = SnglBurstTable.read(cache, verbose=True, filt=lambda x: x.snr <\
      100 and x.peak_frequency < 100)

  # Check if Omicron triggers have been read in successfully
  if not omic_trigs:
    sys.exit("ERROR: No triggers for Omicron channel: %s" % cachefile.split('.')[0])
  else:
    print "%d Omicron triggers read" % len(omic_trigs)

  #Get the Omicron triggers that lie within the valid segment list
  omic_trigs = omic_trigs.vetoed(segs)
  return omic_trigs
Пример #36
0
 def process_state(self,
                   state,
                   nds='guess',
                   multiprocess=False,
                   config=GWSummConfigParser(),
                   segdb_error='raise',
                   trigcache=None,
                   datacache=None):
     if trigcache is None:
         trigcache = self.inspiralcache
     if datacache is None:
         datacache = Cache()
     super(DailyAhopeTab, self).process_state(state,
                                              nds=nds,
                                              multiprocess=multiprocess,
                                              config=config,
                                              datacache=datacache,
                                              trigcache=trigcache)
Пример #37
0
    def run_command(self, options={}, args=[]):
        if len(args) not in [1]:
            self.parser.error("cachfile is required.")

        config = getLarsConfig()
        if not config:
            print "This analysis does not appear to have a reservation. (no %s)" % INI_NAME
            print "If a reservation has been lost, try 'lars info [--repair]'"
            print "to try to recover your '%s'" % INI_NAME
            return

        id = config.get('lars', 'id')

        cachefilename = args[0]
        cachefile = open(cachefilename, "r")

        cache = Cache.fromfile(cachefile)
        segdir = cache.to_segmentlistdict()
        extent = segdir.extent_all()
        gpsStart = int(extent[0])
        gpsEnd = int(extent[1])
        ifos = mkIfos(segdir.keys())

        duration = gpsEnd - gpsStart

        url = makeNiceUrl(os.getcwd())

        if options.dry_run:
            print "Dry run.  Results not saved"
            print "gpsStart:  ", gpsStart
            print "gpsEnd:    ", gpsEnd
            print "duration:  ", duration
            print "IFOs:      ", ifos
            print "Cachefile: ", cachefilename
            print "Location:  ", url
            return

        server = serviceProxy(config.get('lars', 'serviceUrl'))
        rv = server.publish(id, ifos, gpsStart, duration, url,
                            makeNiceUrl(cachefilename))
        rv = objectify(rv)
        print "Published:", rv.uid
Пример #38
0
    def run_command(self, options={}, args=[]):
        if len(args) not in [1]:
            self.parser.error("cachfile is required.")

        config = getLarsConfig()
        if not config:
            print "This analysis does not appear to have a reservation. (no %s)" % INI_NAME
            print "If a reservation has been lost, try 'lars info [--repair]'"
            print "to try to recover your '%s'" % INI_NAME
            return

        id = config.get('lars','id')

        cachefilename = args[0]
        cachefile = open(cachefilename, "r")
        
        cache = Cache.fromfile(cachefile)
        segdir = cache.to_segmentlistdict()
        extent = segdir.extent_all()
        gpsStart = int(extent[0])
        gpsEnd = int(extent[1])
        ifos = mkIfos(segdir.keys())

        duration = gpsEnd - gpsStart

        url = makeNiceUrl(os.getcwd())

        if options.dry_run:
            print "Dry run.  Results not saved"
            print "gpsStart:  ", gpsStart
            print "gpsEnd:    ", gpsEnd
            print "duration:  ", duration
            print "IFOs:      ", ifos
            print "Cachefile: ", cachefilename
            print "Location:  ", url
            return

        server = serviceProxy(config.get('lars', 'serviceUrl'))
        rv = server.publish(id, ifos, gpsStart, duration, url, makeNiceUrl(cachefilename))
        rv = objectify(rv)
        print "Published:", rv.uid
Пример #39
0
def hoge(gps):
    if np.isnan(gps):
        return None
    from gwpy.time import tconvert
    #print tconvert(gps)
    from gwpy.timeseries import TimeSeries
    from glue.lal import Cache
    channel = 'K1:PEM-EXV_SEIS_Z_SENSINF_OUT_DQ'
    start = gps #- 5*60
    end = gps + 30*60
    gwf_cache = 'full_Sep01-Nov01.cache'
    with open(gwf_cache, 'r') as fobj:
        cache = Cache.fromfile(fobj)
    #print cache
    data = TimeSeries.read(cache,channel,start=start,end=end,verbose=True,nproc=2,pad=np.nan,format='gwf.lalframe')    
    plot = data.plot(
        title = 'hoge'
        #ylabel='Strain amplitude',
    )
    plot.savefig('{0}.png'.format(gps))
    plot.close()
Пример #40
0
 def test_aux_channels_from_cache(self):
     cache = Cache.from_urls(AUX_FILES.values())
     channels = triggers.find_auxiliary_channels('omicron', None, None,
                                                 cache=cache)
     self.assertListEqual(channels, sorted(AUX_FILES.keys()))
Пример #41
0
    def test_read_write_gwf(self, api):
        array = self.create(name='TEST')

        # map API to format name
        if api is None:
            fmt = 'gwf'
        else:
            fmt = 'gwf.%s' % api

        # test basic write/read
        try:
            utils.test_read_write(array,
                                  fmt,
                                  extension='gwf',
                                  read_args=[array.name],
                                  assert_equal=utils.assert_quantity_sub_equal,
                                  assert_kw={'exclude': ['channel']})
        except ImportError as e:
            pytest.skip(str(e))

        # test read keyword arguments
        suffix = '-%d-%d.gwf' % (array.t0.value, array.duration.value)
        with tempfile.NamedTemporaryFile(prefix='GWpy-', suffix=suffix) as f:
            array.write(f.name)

            def read_(**kwargs):
                return type(array).read(f, array.name, format='gwf', **kwargs)

            # test start, end
            start, end = array.span.contract(10)
            t = read_(start=start, end=end)
            utils.assert_quantity_sub_equal(t,
                                            array.crop(start, end),
                                            exclude=['channel'])
            assert t.span == (start, end)
            t = read_(start=start)
            utils.assert_quantity_sub_equal(t,
                                            array.crop(start=start),
                                            exclude=['channel'])
            t = read_(end=end)
            utils.assert_quantity_sub_equal(t,
                                            array.crop(end=end),
                                            exclude=['channel'])

            # test dtype
            t = read_(dtype='float32')
            assert t.dtype is numpy.dtype('float32')
            t = read_(dtype={f.name: 'float64'})
            assert t.dtype is numpy.dtype('float64')

            # check errors
            with pytest.raises((ValueError, RuntimeError)):
                read_(start=array.span[1])
            with pytest.raises((ValueError, RuntimeError)):
                read_(end=array.span[0] - 1)

            # check old format prints a deprecation warning
            if api:
                with pytest.warns(DeprecationWarning):
                    type(array).read(f, array.name, format=api)

            # check reading from cache
            try:
                from glue.lal import Cache
            except ImportError:
                pass
            else:
                a2 = self.create(name='TEST', t0=array.span[1], dt=array.dx)
                suffix = '-%d-%d.gwf' % (a2.t0.value, a2.duration.value)
                with tempfile.NamedTemporaryFile(prefix='GWpy-',
                                                 suffix=suffix) as f2:
                    a2.write(f2.name)
                    cache = Cache.from_urls([f.name, f2.name], coltype=int)
                    comb = type(array).read(cache, 'TEST', format=fmt, nproc=2)
                    utils.assert_quantity_sub_equal(comb,
                                                    array.append(
                                                        a2, inplace=False),
                                                    exclude=['channel'])
Пример #42
0
def find_daily_cache(start, end, ifo, clustering=None, check_files=False,
                     **kwargs):
    """Find Daily ihope files from the daily runs for the given span

    @param start
        GPS start time for search
    @param end
        GPS end time for search
    @param ifo
        observatory for search
    @param clustering
        tag for clustering stage to search, default: unclustered
    @param check_files
        check that the returned files can be read on disk, default False
    @param kwargs UNDOCUMENTED
    """
    out = Cache()

    # set clustering tag
    if clustering==None or clustering.upper()=='UNCLUSTERED':
        file_tag='INSPIRAL_UNCLUSTERED'
    elif clustering.upper() in ["100MS", "100MILLISEC"]:
        file_tag='INSPIRAL_100MILLISEC_CLUSTERED'
    elif clustering.upper() in ["30MS", "30MILLISEC"]:
        file_tag='INSPIRAL_30MILLISEC_CLUSTERED'
    elif clustering.upper() in ["16S", "16SECOND"]:
        file_tag='INSPIRAL_16SEC_CLUSTERED'

    # set base directory
    directory = kwargs.pop("directory", os.path.expanduser("~cbc/ihope_daily"))

    # work out days
    span = Segment(start, end)
    start = int(start)
    start_d = lal.UTCToGPS(datetime(*lal.GPSToUTC(start)[:6]).replace(
                                       hour=0, minute=0, second=0).timetuple())
    days = []
    day = start_d
    while day <= end:
        days.append(day)
        day+=86400

    # optimise
    append = out.append
    splitext = os.path.splitext
    isfile = os.path.isfile
    pjoin = os.path.join
    intersects = span.intersects
    from_T050017 = CacheEntry.from_T050017

    # loop over days gathering files
    for day in days:
        utc = datetime(*lal.GPSToUTC(day)[:6])
        day_path = pjoin(directory, utc.strftime("%Y%m"),
                         utc.strftime("%Y%m%d"))
        day_cache = os.path.join(day_path, "%s-%s.cache" % (ifo, file_tag))
        if isfile(day_cache):
            with open(day_cache, "r") as f:
                filenames = Cache.fromfile(f).pfnlist()
        else:
            filenames = glob(os.path.join(day_path,
                                               ("%s-%s-*.xml.gz"
                                                % (ifo, file_tag))))
        for filename in filenames:
            e = from_T050017(filename)
            if intersects(e.segment):
                append(e)

    out.sort(key=lambda e: e.path)
    return out
Пример #43
0
def find_frames(ifo, frametype, gpsstart, gpsend, config=GWSummConfigParser(),
                urltype='file', gaps='warn', onerror='raise'):
    """Query the datafind server for GWF files for the given type

    Parameters
    ----------
    ifo : `str`
        prefix for the IFO of interest (either one or two characters)

    frametype : `str`
        name of the frametype to find

    gpsstart : `int`
        GPS start time of the query

    gpsend : `int`
        GPS end time of the query

    config : `~ConfigParser.ConfigParser`, optional
        configuration with `[datafind]` section containing `server`
        specification, otherwise taken from the environment

    urltype : `str`, optional
        what type of file paths to return, default: `file`

    gaps : `str`, optional
        what to do when gaps are detected, one of

        - `ignore` : do nothing
        - `warn` : display the existence of gaps but carry on
        - `raise` : raise an exception

    onerror : `str`, optional
        what to do when the `~glue.datafind` query itself fails, same
        options as for ``gaps``

    Returns
    -------
    cache : `~glue.lal.Cache`
        a list of structured frame file descriptions matching the ifo and
        frametype requested
    """
    vprint('    Finding %s-%s frames for [%d, %d)...'
           % (ifo[0], frametype, int(gpsstart), int(gpsend)))
    # find datafind host:port
    try:
        host = config.get('datafind', 'server')
    except (NoOptionError, NoSectionError):
        try:
            host = os.environ['LIGO_DATAFIND_SERVER']
        except KeyError:
            host = None
            port = None
        else:
            try:
                host, port = host.rsplit(':', 1)
            except ValueError:
                port = None
            else:
                port = int(port)
    else:
        port = config.getint('datafind', 'port')
    # get credentials
    if port == 80:
        cert = None
        key = None
    else:
        cert, key = datafind.find_credential()

    # XXX HACK: LLO changed frame types on Dec 6 2013:
    LLOCHANGE = 1070291904
    if re.match('L1_{CRMT}', frametype) and gpsstart < LLOCHANGE:
        frametype = frametype[-1]

    # query frames
    ifo = ifo[0].upper()
    gpsstart = int(floor(gpsstart))
    gpsend = int(ceil(min(globalv.NOW, gpsend)))
    if gpsend <= gpsstart:
        return Cache()

    # parse match
    try:
        frametype, match = frametype.split('|', 1)
    except ValueError:
        match = None

    def _query():
        if cert is not None:
            dfconn = datafind.GWDataFindHTTPSConnection(
                host=host, port=port, cert_file=cert, key_file=key)
        else:
            dfconn = datafind.GWDataFindHTTPConnection(host=host, port=port)
        return dfconn.find_frame_urls(ifo[0].upper(), frametype, gpsstart,
                                      gpsend, urltype=urltype, on_gaps=gaps,
                                      match=match)
    try:
        cache = _query()
    except RuntimeError as e:
        sleep(1)
        try:
            cache = _query()
        except RuntimeError:
            if 'Invalid GPS times' in str(e):
                e.args = ('%s: %d ... %s' % (str(e), gpsstart, gpsend),)
            if onerror in ['ignore', None]:
                pass
            elif onerror in ['warn']:
                warnings.warn('Caught %s: %s'
                              % (type(e).__name__, str(e)))
            else:
                raise
            cache = Cache()

    # XXX: if querying for day of LLO frame type change, do both
    if (ifo[0].upper() == 'L' and frametype in ['C', 'R', 'M', 'T'] and
            gpsstart < LLOCHANGE < gpsend):
        start = len(cache) and cache[-1].segment[1] or gpsstart
        if start < gpsend:
            cache.extend(dfconn.find_frame_urls(ifo[0].upper(),
                                                'L1_%s' % frametype, start,
                                                gpsend, urltype=urltype,
                                                on_gaps=gaps)[1:])
    cache, _ = cache.checkfilesexist()
    vprint(' %d found.\n' % len(cache))
    return cache
Пример #44
0
def _get_timeseries_dict(channels, segments, config=None,
                         cache=None, query=True, nds=None, frametype=None,
                         multiprocess=True, return_=True, statevector=False,
                         archive=True, datafind_error='raise', **ioargs):
    """Internal method to retrieve the data for a set of like-typed
    channels using the :meth:`TimeSeriesDict.read` accessor.
    """
    channels = map(get_channel, channels)

    # set classes
    if statevector:
        ListClass = StateVectorList
        DictClass = StateVectorDict
    else:
        ListClass = TimeSeriesList
        DictClass = TimeSeriesDict

    # check we have a configparser
    if config is None:
        config = GWSummConfigParser()

    # read segments from global memory
    keys = dict((c.ndsname, make_globalv_key(c)) for c in channels)
    havesegs = reduce(operator.and_,
                      (globalv.DATA.get(keys[channel.ndsname],
                                        ListClass()).segments
                       for channel in channels))
    new = segments - havesegs

    # get processes
    if multiprocess is True:
        nproc = count_free_cores()
    elif multiprocess is False:
        nproc = 1
    else:
        nproc = multiprocess

    if globalv.VERBOSE and not multiprocess:
        verbose = '    '
    else:
        verbose = False

    # read channel information
    filter_ = dict()
    resample = dict()
    dtype_ = dict()
    for channel in channels:
        try:
            filter_[channel.ndsname] = channel.filter
        except AttributeError:
            pass
        try:
            resample[channel] = float(channel.resample)
        except AttributeError:
            pass
        if channel.dtype is None:
            dtype_[channel] = ioargs.get('dtype')
        else:
            dtype_[channel] = channel.dtype

    # work out whether to use NDS or not
    if nds is None and cache is not None:
        nds = False
    elif nds is None:
        nds = 'LIGO_DATAFIND_SERVER' not in os.environ

    # read new data
    query &= (abs(new) > 0)
    if cache is not None:
        query &= len(cache) > 0
    if query:
        for channel in channels:
            globalv.DATA.setdefault(keys[channel.ndsname], ListClass())
        # open NDS connection
        if nds and config.has_option('nds', 'host'):
            host = config.get('nds', 'host')
            port = config.getint('nds', 'port')
            try:
                ndsconnection = nds2.connection(host, port)
            except RuntimeError as e:
                if 'SASL authentication' in str(e):
                    from gwpy.io.nds import kinit
                    kinit()
                    ndsconnection = nds2.connection(host, port)
                else:
                    raise
            frametype = source = 'nds'
            ndstype = channels[0].type
        elif nds:
            ndsconnection = None
            frametype = source = 'nds'
            ndstype = channels[0].type
        # or find frame type and check cache
        else:
            ifo = channels[0].ifo
            frametype = frametype or channels[0].frametype
            if frametype is not None and frametype.endswith('%s_M' % ifo):
                new = type(new)([s for s in new if abs(s) >= 60.])
            elif frametype is not None and frametype.endswith('%s_T' % ifo):
                new = type(new)([s for s in new if abs(s) >= 1.])
            #elif ((globalv.NOW - new[0][0]) < 86400 * 10 and
            #      frametype == '%s_R' % ifo and
            #      find_types(site=ifo[0], match='_C\Z')):
            #    frametype = '%s_C' % ifo
            if cache is not None:
                fcache = cache.sieve(ifos=ifo[0], description=frametype,
                                     exact_match=True)
            else:
                fcache = Cache()
            if (cache is None or len(fcache) == 0) and len(new):
                span = new.extent().protract(8)
                fcache = find_frames(ifo, frametype, span[0], span[1],
                                     config=config, gaps='ignore',
                                     onerror=datafind_error)
                if len(fcache) == 0 and frametype == '%s_R' % ifo:
                    frametype = '%s_C' % ifo
                    vprint("    Moving to backup frametype %s\n" % frametype)
                    fcache = find_frames(ifo, frametype, span[0], span[1],
                                         config=config, gaps='ignore',
                                         onerror=datafind_error)

            # parse discontiguous cache blocks and rebuild segment list
            cachesegments = find_cache_segments(fcache)
            new &= cachesegments
            source = 'frames'
        for channel in channels:
            channel.frametype = frametype

        # check whether each channel exists for all new times already
        qchannels = []
        qresample = {}
        qdtype = {}
        for channel in channels:
            oldsegs = globalv.DATA.get(channel.ndsname,
                                       ListClass()).segments
            if abs(new - oldsegs) != 0:
                qchannels.append(channel)
                if channel in resample:
                    qresample[channel] = resample[channel]
                qdtype[channel] = dtype_.get(channel, ioargs.get('dtype'))
        ioargs['dtype'] = qdtype

        # find channel type
        if not nds:
            ctype = set()
            for channel in qchannels:
                try:
                    ctype.add(channel.ctype)
                except AttributeError:
                    ctype.add(get_channel_type(channel))
            if len(ctype) == 1:
                ctype = list(ctype)[0]
            else:
                ctype = None
        # loop through segments, recording data for each
        if len(new) and nproc > 1:
            vprint("    Fetching data (from %s) for %d channels [%s]"
                   % (source, len(qchannels), nds and ndstype or frametype))
        for segment in new:
            # force reading integer-precision segments
            segment = type(segment)(int(segment[0]), int(segment[1]))
            if abs(segment) < 1:
                continue
            if nds:
                tsd = DictClass.fetch(qchannels, segment[0], segment[1],
                                      connection=ndsconnection, type=ndstype,
                                      **ioargs)
            else:
                # pad resampling
                if segment[1] == cachesegments[-1][1] and qresample:
                    resamplepad = 8
                    if abs(segment) <= resamplepad:
                        continue
                    segment = type(segment)(segment[0],
                                            segment[1] - resamplepad)
                    segcache = fcache.sieve(
                                   segment=segment.protract(resamplepad))
                else:
                    segcache = fcache.sieve(segment=segment)
                # set minute trend times modulo 60 from GPS 0
                if (re.match('(?:(.*)_)?[A-Z]\d_M', str(frametype)) or
                        (ifo == 'C1' and frametype == 'M')):
                    segstart = int(segment[0]) // 60 * 60
                    segend = int(segment[1]) // 60 * 60
                    if segend >= segment[1]:
                        segend -= 60
                    # and ignore segments shorter than 1 full average
                    if (segend - segstart) < 60:
                        continue
                else:
                    segstart, segend = map(float, segment)
                # pull filters out because they can break multiprocessing
                if nproc > 1:
                    for c in qchannels:
                        if c.ndsname in filter_:
                            del c.filter
                # read data
                tsd = DictClass.read(segcache, qchannels,
                                     start=segstart, end=segend, type=ctype,
                                     nproc=nproc, resample=qresample,
                                     verbose=verbose, **ioargs)
                # put filters back
                for c in qchannels:
                    if c.ndsname in filter_:
                        c.filter = filter_[c.ndsname]
            for (channel, data) in tsd.iteritems():
                key = keys[channel.ndsname]
                if (key in globalv.DATA and
                        data.span in globalv.DATA[key].segments):
                    continue
                if data.unit is None:
                    data.unit = 'undef'
                for seg in globalv.DATA[key].segments:
                    if seg.intersects(data.span):
                        data = data.crop(*(data.span - seg))
                        break
                try:
                    filt = filter_[channel.ndsname]
                except KeyError:
                    pass
                else:
                    # filter with function
                    if callable(filt):
                        try:
                            data = filt(data)
                        except TypeError as e:
                            if 'Can only apply' in str(e):
                                data.value[:] = filt(data.value)
                            else:
                                raise
                    # filter with gain
                    elif (isinstance(filt, tuple) and len(filt) == 3 and
                              len(filt[0] + filt[1]) == 0):
                        try:
                            data *= filt[2]
                        except TypeError:
                            data = data * filt[2]
                    # filter zpk
                    elif isinstance(filt, tuple):
                        data = data.filter(*filt)
                    # filter fail
                    else:
                        raise ValueError("Cannot parse filter for %s: %r"
                                         % (channel.ndsname,
                                            filt))
                if isinstance(data, StateVector) or ':GRD-' in str(channel):
                    try:
                        data.unit = units.dimensionless_unscaled
                    except AttributeError:
                        data._unit = units.dimensionless_unscaled
                    if hasattr(channel, 'bits'):
                        data.bits = channel.bits
                elif data.unit is None:
                    data._unit = channel.unit
                # XXX: HACK for failing unit check
                if len(globalv.DATA[key]):
                    data._unit = globalv.DATA[key][-1].unit
                # update channel type for trends
                if (data.channel.type is None and
                       data.channel.trend is not None):
                    if data.dt.to('s').value == 1:
                        data.channel.type = 's-trend'
                    elif data.dt.to('s').value == 60:
                        data.channel.type = 'm-trend'
                # append and coalesce
                add_timeseries(data, key=key, coalesce=True)
            if multiprocess:
                vprint('.')
        if len(new):
            vprint("\n")

    if not return_:
        return

    # return correct data
    out = OrderedDict()
    for channel in channels:
        data = ListClass()
        if keys[channel.ndsname] not in globalv.DATA:
            out[channel.ndsname] = ListClass()
        else:
            for ts in globalv.DATA[keys[channel.ndsname]]:
                for seg in segments:
                    if abs(seg) == 0 or abs(seg) < ts.dt.value:
                        continue
                    if ts.span.intersects(seg):
                        common = map(float, ts.span & seg)
                        cropped = ts.crop(*common, copy=False)
                        if cropped.size:
                            data.append(cropped)
        out[channel.ndsname] = data.coalesce()
    return out
Пример #45
0
def _get_timeseries_dict(channels,
                         segments,
                         config=None,
                         cache=None,
                         query=True,
                         nds=None,
                         frametype=None,
                         multiprocess=True,
                         return_=True,
                         statevector=False,
                         archive=True,
                         datafind_error='raise',
                         **ioargs):
    """Internal method to retrieve the data for a set of like-typed
    channels using the :meth:`TimeSeriesDict.read` accessor.
    """
    channels = map(get_channel, channels)

    # set classes
    if statevector:
        ListClass = StateVectorList
        DictClass = StateVectorDict
    else:
        ListClass = TimeSeriesList
        DictClass = TimeSeriesDict

    # check we have a configparser
    if config is None:
        config = GWSummConfigParser()

    # read segments from global memory
    keys = dict((c.ndsname, make_globalv_key(c)) for c in channels)
    havesegs = reduce(
        operator.and_,
        (globalv.DATA.get(keys[channel.ndsname], ListClass()).segments
         for channel in channels))
    new = segments - havesegs

    # get processes
    if multiprocess is True:
        nproc = count_free_cores()
    elif multiprocess is False:
        nproc = 1
    else:
        nproc = multiprocess

    # read channel information
    filter_ = dict()
    resample = dict()
    dtype_ = dict()
    for channel in channels:
        try:
            filter_[channel.ndsname] = channel.filter
        except AttributeError:
            pass
        try:
            resample[channel] = float(channel.resample)
        except AttributeError:
            pass
        if channel.dtype is None:
            dtype_[channel] = ioargs.get('dtype')
        else:
            dtype_[channel] = channel.dtype

    # work out whether to use NDS or not
    if nds is None and cache is not None:
        nds = False
    elif nds is None:
        nds = 'LIGO_DATAFIND_SERVER' not in os.environ

    # read new data
    query &= (abs(new) > 0)
    if cache is not None:
        query &= len(cache) > 0
    if query:
        for channel in channels:
            globalv.DATA.setdefault(keys[channel.ndsname], ListClass())
        # open NDS connection
        if nds and config.has_option('nds', 'host'):
            host = config.get('nds', 'host')
            port = config.getint('nds', 'port')
            try:
                ndsconnection = nds2.connection(host, port)
            except RuntimeError as e:
                if 'SASL authentication' in str(e):
                    from gwpy.io.nds import kinit
                    kinit()
                    ndsconnection = nds2.connection(host, port)
                else:
                    raise
            frametype = source = 'nds'
            ndstype = channels[0].type
        elif nds:
            ndsconnection = None
            frametype = source = 'nds'
            ndstype = channels[0].type
        # or find frame type and check cache
        else:
            ifo = channels[0].ifo
            frametype = frametype or channels[0].frametype
            if frametype is not None and frametype.endswith('%s_M' % ifo):
                new = type(new)([s for s in new if abs(s) >= 60.])
            elif frametype is not None and frametype.endswith('%s_T' % ifo):
                new = type(new)([s for s in new if abs(s) >= 1.])
            if cache is not None:
                fcache = cache.sieve(ifos=ifo[0],
                                     description=frametype,
                                     exact_match=True)
            else:
                fcache = Cache()
            if (cache is None or len(fcache) == 0) and len(new):
                span = new.extent().protract(8)
                fcache = find_frames(ifo,
                                     frametype,
                                     span[0],
                                     span[1],
                                     config=config,
                                     gaps='ignore',
                                     onerror=datafind_error)
                cachesegments = find_cache_segments(fcache)
                gaps = SegmentList([span]) - cachesegments
                if abs(gaps) and frametype == '%s_HOFT_C00' % ifo:
                    f2 = '%s_DMT_C00' % ifo
                    vprint("    Gaps discovered in aggregated h(t) type "
                           "%s, checking %s\n" % (frametype, f2))
                    c2 = find_frames(ifo,
                                     f2,
                                     span[0],
                                     span[1],
                                     config=config,
                                     gaps='ignore',
                                     onerror=datafind_error)
                    g2 = SegmentList([span]) - find_cache_segments(c2)
                    if abs(g2) < abs(gaps):
                        vprint("    Greater coverage with frametype %s\n" % f2)
                        fcache = c2
                        frametype = f2
                    else:
                        vprint("    No extra coverage with frametype %s\n" %
                               f2)

            # parse discontiguous cache blocks and rebuild segment list
            cachesegments = find_cache_segments(fcache)
            new &= cachesegments
            source = 'frames'

            # set ctype if reading with framecpp
            if cache is None and frametype in ADC_TYPES and HAS_FRAMECPP:
                ioargs['type'] = 'adc'

        for channel in channels:
            channel.frametype = frametype

        # check whether each channel exists for all new times already
        qchannels = []
        qresample = {}
        qdtype = {}
        for channel in channels:
            oldsegs = globalv.DATA.get(channel.ndsname, ListClass()).segments
            if abs(new - oldsegs) != 0:
                qchannels.append(channel)
                if channel in resample:
                    qresample[channel] = resample[channel]
                qdtype[channel] = dtype_.get(channel, ioargs.get('dtype'))
        ioargs['dtype'] = qdtype

        # loop through segments, recording data for each
        if len(new) and nproc > 1:
            vprint("    Fetching data (from %s) for %d channels [%s]" %
                   (source, len(qchannels), nds and ndstype or frametype))
        for segment in new:
            # force reading integer-precision segments
            segment = type(segment)(int(segment[0]), int(segment[1]))
            if abs(segment) < 1:
                continue
            if nds:
                tsd = DictClass.fetch(qchannels,
                                      segment[0],
                                      segment[1],
                                      connection=ndsconnection,
                                      type=ndstype,
                                      **ioargs)
            else:
                # pad resampling
                if segment[1] == cachesegments[-1][1] and qresample:
                    resamplepad = 8
                    if abs(segment) <= resamplepad:
                        continue
                    segment = type(segment)(segment[0],
                                            segment[1] - resamplepad)
                    segcache = fcache.sieve(
                        segment=segment.protract(resamplepad))
                else:
                    segcache = fcache.sieve(segment=segment)
                # set minute trend times modulo 60 from GPS 0
                if (re.match('(?:(.*)_)?[A-Z]\d_M', str(frametype))
                        or (ifo == 'C1' and frametype == 'M')):
                    segstart = int(segment[0]) // 60 * 60
                    segend = int(segment[1]) // 60 * 60
                    if segend >= segment[1]:
                        segend -= 60
                    # and ignore segments shorter than 1 full average
                    if (segend - segstart) < 60:
                        continue
                    segcache = segcache.sieve(
                        segment=type(segment)(segstart, segend))
                else:
                    segstart, segend = map(float, segment)
                # pull filters out because they can break multiprocessing
                if nproc > 1:
                    for c in qchannels:
                        if c.ndsname in filter_:
                            del c.filter
                # read data
                tsd = DictClass.read(segcache,
                                     qchannels,
                                     start=segstart,
                                     end=segend,
                                     nproc=nproc,
                                     resample=qresample,
                                     **ioargs)
                # put filters back
                for c in qchannels:
                    if c.ndsname in filter_:
                        c.filter = filter_[c.ndsname]
            for (channel, data) in tsd.iteritems():
                key = keys[channel.ndsname]
                if (key in globalv.DATA
                        and data.span in globalv.DATA[key].segments):
                    continue
                if data.unit is None:
                    data.unit = 'undef'
                for seg in globalv.DATA[key].segments:
                    if seg.intersects(data.span):
                        data = data.crop(*(data.span - seg))
                        break
                try:
                    filt = filter_[channel.ndsname]
                except KeyError:
                    pass
                else:
                    # filter with function
                    if callable(filt):
                        try:
                            data = filt(data)
                        except TypeError as e:
                            if 'Can only apply' in str(e):
                                data.value[:] = filt(data.value)
                            else:
                                raise
                    # filter with gain
                    elif (isinstance(filt, tuple) and len(filt) == 3
                          and len(filt[0] + filt[1]) == 0):
                        try:
                            data *= filt[2]
                        except TypeError:
                            data = data * filt[2]
                    # filter zpk
                    elif isinstance(filt, tuple):
                        data = data.filter(*filt)
                    # filter fail
                    else:
                        raise ValueError("Cannot parse filter for %s: %r" %
                                         (channel.ndsname, filt))
                if isinstance(data, StateVector) or ':GRD-' in str(channel):
                    try:
                        data.unit = units.dimensionless_unscaled
                    except AttributeError:
                        data._unit = units.dimensionless_unscaled
                    if hasattr(channel, 'bits'):
                        data.bits = channel.bits
                elif data.unit is None:
                    data._unit = channel.unit
                # XXX: HACK for failing unit check
                if len(globalv.DATA[key]):
                    data._unit = globalv.DATA[key][-1].unit
                # update channel type for trends
                if data.channel.type is None and (data.channel.trend
                                                  is not None):
                    if data.dt.to('s').value == 1:
                        data.channel.type = 's-trend'
                    elif data.dt.to('s').value == 60:
                        data.channel.type = 'm-trend'
                # append and coalesce
                add_timeseries(data, key=key, coalesce=True)
            if multiprocess:
                vprint('.')
        if len(new):
            vprint("\n")

    if not return_:
        return

    # return correct data
    out = OrderedDict()
    for channel in channels:
        data = ListClass()
        if keys[channel.ndsname] not in globalv.DATA:
            out[channel.ndsname] = ListClass()
        else:
            for ts in globalv.DATA[keys[channel.ndsname]]:
                for seg in segments:
                    if abs(seg) == 0 or abs(seg) < ts.dt.value:
                        continue
                    if ts.span.intersects(seg):
                        common = map(float, ts.span & seg)
                        cropped = ts.crop(*common, copy=False)
                        if cropped.size:
                            data.append(cropped)
        out[channel.ndsname] = data.coalesce()
    return out
Пример #46
0
def get_cache(start, end, ifo, ftype, framecache=False, server=None,\
              verbose=False):

  """
    Queries the LSC datafind server and returns a glue.lal.Cache object
    containing the frame file paths in the given GPS (start, end) interval
    for the given ifo and type (can be lists).

    framecache=True returns a pylal.dq.dqFrameUTils.FrameCache object in stead.

    Arguments:

      start : float
        GPS start time (s).
      end : float
        GPS end time (s).
      ifo : [ string | list ]
        ifo (or list of) to find, e.g. 'G1'.
      ftype : [ string | list ]
        frame data type (or list of) to find, e.g. 'G1_RDS_C01_L3'.

  """

  # set lists
  if isinstance(ftype, str):
    types = [ftype]
  else:
    types = ftype
  if isinstance(ifo, str):
    ifos = [ifo]
  else:
    ifos = ifo

  # construct span
  span = segments.segment(start,end)

  # set options
  cache = LALCache()
  entry_class = LALCacheEntry

  # try querying the ligo_data_find server
  if not server:
    server = _find_datafind_server()

  if verbose: sys.stdout.write("Opening connection to %s...\n" % server)

  if re.search(':', server):
    port = int(server.split(':')[-1])
  else:
    port = None

  cert, key = _get_grid_proxy()

  # if we have a credential then use it when setting up the connection
  if cert and key and port!=80:
    h = httplib.HTTPSConnection(server, key_file=key, cert_file=cert)
  else:
    h = httplib.HTTPConnection(server)

  if verbose: sys.stdout.write("Querying server for frames...\n")

  # loop over ifos and types
  for ifo in ifos:
    for t in types:
      # construct the URL for a simple data find query
      url = "/LDR/services/data/v1/gwf/%s/%s/%s,%s/file.json" % (ifo[0], t,\
                                                                 str(start),\
                                                                 str(end))
      # query the server
      h.request("GET", url)
      response = h.getresponse()
      _verify_response(response)
      # unravel the response
      urlList = cjson.decode(response.read())
      for url in urlList:
        cache.append(entry_class.from_T050017(url))

  # close the server connection
  h.close()
  if verbose: sys.stdout.write("Connection to %s closed.\n" % server)

  # convert to FrameCache if needed
  if framecache:
    cache = LALCachetoFrameCache(cache)

  return cache
Пример #47
0
########## apply segment list (of good times)

segments = SegmentList.read('L1_ER7_segments.txt')

for cachefile in cachelist:
  ### open trigger cache
  # Make a tag for the saving the plot as well as the title
  # The tag is the name of the channel extracted from the path
  tag = cachefile.split('/')[-1]
  tag = tag.split('.')[0]
  tag = tag.replace('_Omicron','')
 
  print ('\n\nReading file: %s now ...\n' % tag)
  with open(cachefile, 'r') as fobj:
      cache = Cache.fromfile(fobj)


  ### read triggers
  # filter to select for triggers with frequency < 100
  #trigs = SnglBurstTable.read(cache, verbose=True, filt=lambda t: t.peak_frequency < 100)

  #filter to select for triggers with frequency <100 and snr <100
  trigs = get_triggers('L1:'+tag, 'sngl_burst', segments, cache=cache)


  ### check triggers read successfully
  if not trigs:
    print("    WARNING: No triggers for channel '%s'." % channel,
                file=sys.stderr)
  else:
Пример #48
0
def process_options():
	"""
	Process options and check for required values.
	"""
	opt = OptionParser()
	opt.add_option( "-c", "--input-cache", help="Read triggers from the files in this cache." )
	opt.add_option( "-v", "--verbose", action="store_true", help="Be verbose." )
	veto_settings = OptionGroup( opt, "HVeto settings" )
	veto_settings.add_option( "-i", "--instrument", help="Instrument against which to veto. Required." )
	veto_settings.add_option( "-r", "--reference-channel", help="Channel against which to veto. Required." )
	veto_settings.add_option( "-t", "--reference-triggers", help="File path to load reference triggers. Required." )
	veto_settings.add_option( "-s", "--significance-threshold", type=float, default=15, help="Significance below which to terminate the rounds. Default is 15." )
	veto_settings.add_option( "--snr-thresh", action="append", help="Add an SNR threshold to use in veto round. Can be given multiple times for different values. WARNING: This will override the default settings, *not* append to them." )
	veto_settings.add_option( "--time-window", action="append", help="Add a time window to use in veto round. Can be given multiple times for different values. WARNING: This will override the default settings, *not* append to them." )
	veto_settings.add_option( "-S", "--min-ref-snr", type=float, default=8, help="Minimum SNR threshold to load a trigger in the reference channel." )
	# FIXME: Strictly speaking the ignore list is required because I'm not
	# sure what the	 function will do with out one?
	veto_settings.add_option( "-I", "--ignore-list", help="Text file, one channel per line with a list of channels to ignore when loading triggers." )
	# FIXME:
	#veto_settings.add_option( "-C", "--ignore-channel", action="append", help="Ignore these channels. Given several times, will ignore several channels. Do not prepend instrument. E.g. -C LSC-DARM_CTRL." )
	veto_settings.add_option( "--write-coinc", action="store_true", default=False, help="If set, output table will include coinc tables indicating which triggers were coincided in the process of execution." )
	opt.add_option_group( veto_settings )

	livetime_settings = OptionGroup( opt, "livetime settings" )
	# FIXME:
	#livetime_settings.add_option( "-L", "--livetime-definer", action="append", help="Name of segment definer entry from which to draw live segments. See '-l' option. If none is indicated, use all segments. Provide several times for deveral different segment definers." )
	livetime_settings.add_option( "-l", "--livetime-segments", help="File from which to parse livetime segments. Will assume, at first, a LIGOLW XML file with valid segment definer and segment tables. If this fails, will try segwizard format. Required." )
	livetime_settings.add_option( "--segment-definer", help="In tandem with --livetime-segments will retrieve segments with this definer. If none is provided, all segments will be used. Note: this option is REQUIRED if querying a databse. Example: H1:DMT-SCIENCE:1 (version is required)" )
	livetime_settings.add_option( "--segment-database", help="Query this URL for segments. Takes precedence over providing a file." )
	livetime_settings.add_option( "--gps-start", type=int, help="GPS start of analysis." )
	livetime_settings.add_option( "--gps-end", type=int, help="GPS end of analysis." )
	opt.add_option_group( livetime_settings )

	opts, args = opt.parse_args()
	if opts.instrument is None:
		print >>sys.stderr, "Instrument must be indicated."
		exit()
	if opts.reference_channel is None:
		print >>sys.stderr, "Reference channel must be indicated."
		exit()
	if opts.reference_triggers is None:
		print >>sys.stderr, "Reference triggers must be present."
		exit()
	if (opts.livetime_segments or opts.segment_database) is None:
		print >>sys.stderr, "Must provide livetime segments file or segment database location."
		exit()
	if opts.segment_database and (opts.segment_definer is None):
		print >>sys.stderr, "Must provide definer for segment database querying."
		exit()
	if len(args) == 0 and opts.input_cache is None:
		print >>sys.stderr, "Must provide input arguments or set --input-cache."
		exit()
	if opts.input_cache is not None:
		with open(opts.input_cache) as cache:
			c = Cache.fromfile(cache)
			args.extend( c.pfnlist() )
	if opts.ignore_list is None:
		print >>sys.stderr, "Must provide a channel ignore list."
		exit()

	return opts, args
Пример #49
0
def get_cache(start, end, ifo, channel, mask='DOWNSELECT', checkfilesexist=False,\
              **kwargs):
    """
    Returns a glue.lal.Cache contatining CacheEntires for all omega online
    trigger files between the given start and end time for the given ifo.
    """
    cache = Cache()
    
    # verify host
    host = { 'G1':'atlas', 'H1':'ligo-wa', 'H2':'ligo-wa', 'L1':'ligo-la'}
    if (not kwargs.has_key('directory') and not re.search(host[ifo],getfqdn())):
        sys.stderr.write("warning: Omega online files are not available for "+\
                         "IFO=%s on this host." % ifo)
        sys.stderr.flush()
        return cache

    span = segments.segment(start,end)
    if ifo == 'G1':
        if channel:
            kwargs.setdefault('directory', '/home/omega/online/%s/segments' % channel.replace(':','_'))
        else:
            kwargs.setdefault('directory', '/home/omega/online/G1/segments')
        kwargs.setdefault('epoch', 0)
    else:
        kwargs.setdefault('directory',\
                            '/home/omega/online/%s/archive/S6/segments' % ifo)
        kwargs.setdefault('epoch', 931211808)
    kwargs.setdefault('duration', 64)
    kwargs.setdefault('overlap', 8)

    # optimise
    append       = cache.append
    splitext     = os.path.splitext
    isfile   = os.path.isfile
    intersects   = span.intersects
    segment      = segments.segment
    from_T050017 = CacheEntry.from_T050017
    basedir      = kwargs['directory']
    basetime     = kwargs['epoch']
    triglength   = kwargs['duration']
    overlap      = kwargs['overlap']

    # get times
    start_time = int(start-numpy.mod(start-basetime,triglength-overlap))
    t = start_time

    # loop over time segments constructing file paths and appending to the cache
    while t<end:
        if ifo == 'G1':
            trigfile = '%s/%.5d/%.10d-%10.d/%s-OMEGA_TRIGGERS_%s-%.10d-%d.txt'\
                % (basedir, t/100000, t, t+triglength, ifo, mask, t, triglength)
        else:
            trigfile = '%s/%.10d-%10.d/%s-OMEGA_TRIGGERS_%s-%.10d-%d.txt'\
                % (basedir, t, t+triglength, ifo, mask, t, triglength)
        if intersects(segment(t, t+triglength))\
        and (not checkfilesexist or isfile(trigfile)):
            append(from_T050017(trigfile))
        t+=triglength-overlap

    cache.sort(key=lambda e: e.path)

    return cache
#
conditions = map(parse_specification, opts.specifier)

#
# Put the conditions together
#
channel_cond = defaultdict(list)
for inst, channel_name, op, threshold in conditions:
	channel = "%s:%s" % (inst, channel_name)
	channel_cond[channel].append((op, threshold))

#
# Read the datas and such
#
ifos = list(set([c[:2] for c in channel_cond.keys()]))
cache = Cache.fromfile(open(opts.frame_cache))
seg = cache.to_segmentlistdict()[ifos[0][0]][0]
if opts.verbose:
	print "Loaded %s, total coverage time: %f" % (opts.frame_cache, abs(seg))

#
# Set up the XML document
#
xmldoc = ligolw.Document()
xmldoc.appendChild(ligolw.LIGO_LW())
# Append the process information
procrow = utils.process.append_process(xmldoc, program=sys.argv[0])
utils.process.append_process_params(xmldoc, procrow, process.process_params_from_dict(opts.__dict__))

#
# Segment storage
Пример #51
0
def find_online_cache(start, end, ifo, mask='DOWNSELECT',
                      check_files=False, **kwargs):
    """Find Omega Online files for the given GPS period.

    @param start
        GPS start time for search
    @param end
        GPS end time for search
    @param ifo
        observatory for search
    @param mask
        description tag of Omega ASCII to search
    @param check_files
        check that the returned files can be read on disk, default False
    @param kwargs UNDOCUMENTED
    """
    out = Cache()

    # verify host
    host = { 'G1':'atlas', 'H1':'ligo-wa', 'H2':'ligo-wa', 'L1':'ligo-la'}
    if (not kwargs.has_key('directory') and not
            re.search(host[ifo],getfqdn())):
        sys.stderr.write("WARNING: Omega online files are not available for "
                         "IFO=%s on this host." % ifo)
        sys.stderr.flush()
        return out

    span = segments.segment(start,end)
    # get parameters
    dt = kwargs.pop("duration", 64)
    overlap = kwargs.pop("overlap", 8)
    if ifo == "G1":
        directory = kwargs.pop("directory", "/home/omega/online/G1/segments")
        epoch = kwargs.pop("epoch", 983669456)
    else:
        directory = kwargs.pop("directory",\
                               "/home/omega/online/%s/archive/S6/segments"
                               % ifo)

    # optimise
    append = out.append
    splitext = os.path.splitext
    isfile = os.path.isfile
    intersects = span.intersects
    segment = segments.segment
    from_T050017 = CacheEntry.from_T050017

    # get times
    start_time = int(start-numpy.mod(start-epoch, dt-overlap))
    t = start_time

    if ifo == "G1":
        def _omega_file(gps, ifo):
            return ("%s/%.5d/%.10d-%.10d/%s-OMEGA_TRIGGERS_%s-%.10d-%d.txt"
                    % (basedir, gps/100000, gps, gps+dt, ifo, mask, gps, dt))
    else:
        def _omega_file(gps, ifo):
            return ("%s/%s-%s/%s-OMEGA_TRIGGERS_%s-%s-%s.txt"
                    % (basedir, gps, gps+dt, ifo, mask, gps, dt))

    # loop over time segments constructing file paths
    while t<end:
        fp = _omega_file(t, ifo)
        if (intersects(segment(t, t+dt)) and
               (not check_files or isfile(fp))):
            append(from_T050017(fp))
        t += dt - overlap
    out.sort(key=lambda e: e.path)

    return out
Пример #52
0
def find_dmt_cache(start, end, ifo, check_files=False, **kwargs):
    """Find DMTOmega files for the given GPS period.

    @param start
        GPS start time for search
    @param end
        GPS end time for search
    @param ifo
        observatory for search
    @param check_files
        check that the returned files can be read on disk, default False
    @param kwargs UNDOCUMENTED
    """
    out = Cache()

    # verify host
    host = { 'G1':'atlas', 'H1':'ligo-wa', 'H2':'ligo-wa', 'L1':'ligo-la'}
    if (not kwargs.has_key('directory') and not
            re.search(host[ifo],getfqdn())):
        sys.stderr.write("WARNING: Omega online files are not available for "
                         "IFO=%s on this host." % ifo)
        sys.stderr.flush()
        return out

    span = segments.segment(start,end)

    # set known epochs
    known_epochs = {1031340854:55, 1041657635:55, 1041669472:55,
                    1041682187:55, 1044093810:38, 1044111232:38, 1044111282:38,
                    1044112180:38, 1057700030:38, 1057722672:38}

    # get parameters
    epoch = kwargs.pop("epoch", sorted(known_epochs.keys()))
    dt = kwargs.pop("duration", 55)
    try:
        iter(epoch)
    except TypeError:
        epoch = [epoch]
    overlap = kwargs.pop("overlap", 0)
    directory = kwargs.pop("duration",
                           "/gds-%s/dmt/triggers/%s-Omega_Triggers"
                           % (ifo.lower(), ifo[0].upper()))

    # optimise
    append = out.append
    splitext = os.path.splitext
    isfile = os.path.isfile
    intersects = span.intersects
    segment = segments.segment
    from_T050017 = CacheEntry.from_T050017

    # get times
    epoch_idx = bisect.bisect_right(epoch, start)-1
    print epoch_idx
    try:
        dt = known_epochs[epoch[epoch_idx]]
    except KeyError:
        dt = 38
    next_epoch = len(epoch) >= epoch_idx+2  and epoch[epoch_idx+1] or 0
    start_time = int(start-numpy.mod(start-epoch[epoch_idx], dt-overlap))
    t = start_time

    def _omega_file(gps, ifo, deltaT):
        return ("%s/%s-OMEGA_TRIGGERS_CLUSTER-%.5s/"
                "%s-OMEGA_TRIGGERS_CLUSTER-%.10d-%d.xml"
                % (directory, ifo.upper(), gps, ifo.upper(), gps, deltaT))

    # loop over time segments constructing file paths
    while t<end:
        fp = _omega_file(t, ifo, dt)
        if (intersects(segment(t, t+dt)) and
               (not check_files or isfile(fp))):
            append(from_T050017(fp))
        t += dt - overlap
        if next_epoch and t > next_epoch:
            try:
                dt = known_epochs[next_epoch]
            except KeyError:
                dt = 55
            t = next_epoch
            epoch_idx += 1
            next_epoch = len(epoch) >= epoch_idx+2  and epoch[epoch_idx+1] or 0
    out.sort(key=lambda e: e.path)

    return out
Пример #53
0
def find_dmt_cache(start, end, ifo, extension="xml", check_files=False,
                   **kwargs):
    """Find DMT KW files for the given GPS period.

    @param start
        GPS start time for search
    @param end
        GPS end time for search
    @param ifo
        observatory for search
    @param extension UNDOCUMENTED
    @param check_files
        check that the returned files can be read on disk, default False
    @param kwargs UNDOCUMENTED
    """
    out = Cache()

    # verify host
    host = { 'G1':'atlas', 'H1':'ligo-wa', 'H2':'ligo-wa', 'L1':'ligo-la'}
    if (not kwargs.has_key('directory') and not
            re.search(host[ifo],getfqdn())):
        sys.stderr.write("WARNING: KW online files are not available for "
                         "IFO=%s on this host." % ifo)
        sys.stderr.flush()
        return out

    span = segments.segment(start,end)

    # set known epochs
    known_epochs = [1026263104]

    # get parameters
    dt = int(kwargs.pop("duration", 64))
    epoch = kwargs.pop("epoch", known_epochs)
    filetag = kwargs.pop("filetag", "KW_TRIGGERS")
    dirtag = filetag.endswith("_TRENDS") and filetag[:-7] or filetag
    try:
        iter(epoch)
    except TypeError:
        epoch = [int(epoch)]
    overlap = int(kwargs.pop("overlap", 0))
    directory = kwargs.pop("duration",
                           "/gds-%s/dmt/triggers/%s-%s"
                           % (ifo.lower(), ifo[0].upper(), dirtag))

    # optimise
    append = out.append
    splitext = os.path.splitext
    isfile = os.path.isfile
    intersects = span.intersects
    segment = segments.segment
    from_T050017 = CacheEntry.from_T050017

    # get times
    epoch_idx = bisect.bisect_right(epoch, start)-1
    next_epoch = len(epoch) >= epoch_idx+2  and epoch[epoch_idx+1] or 0
    start_time = int(start-numpy.mod(start-epoch[epoch_idx], dt-overlap))
    t = start_time

    def _kw_file(gps, ifo):
        return ("%s/%s-%s-%.5s/"
                "%s-%s-%.10d-%d.%s"
                % (directory, ifo.upper()[0], dirtag, gps,
                   ifo.upper()[0], filetag, gps, dt, extension))

    # loop over time segments constructing file paths
    while t<end:
        fp = _kw_file(t, ifo)
        if (intersects(segment(t, t+dt)) and
               (not check_files or isfile(fp))):
            append(from_T050017(fp))
        t += dt - overlap
        if next_epoch and t > next_epoch:
            t = next_epoch
            epoch_idx += 1
            next_epoch = len(epoch) >= epoch_idx+2  and epoch[epoch_idx+1] or 0
    out.sort(key=lambda e: e.path)

    return out