コード例 #1
0
ファイル: detchar.py プロジェクト: lpsinger/gwcelery
def create_cache(ifo, start, end):
    """Find .gwf files and create cache. Will first look in the llhoft, and
    if the frames have expired from llhoft, will call gwdatafind.

    Parameters
    ----------
    ifo : str
        Interferometer name (e.g. ``H1``).
    start, end: int or float
        GPS start and end times desired.

    Returns
    -------
    :class:`glue.lal.Cache`

    Example
    -------
    >>> create_cache('H1', 1198800018, 1198800618)
    [<glue.lal.CacheEntry at 0x7fbae6b71278>,
      <glue.lal.CacheEntry at 0x7fbae6ae5b38>,
      <glue.lal.CacheEntry at 0x7fbae6ae5c50>,
     ...
      <glue.lal.CacheEntry at 0x7fbae6b15080>,
      <glue.lal.CacheEntry at 0x7fbae6b15828>]

    """
    pattern = app.conf['llhoft_glob'].format(detector=ifo)
    filenames = glob.glob(pattern)
    cache = Cache.from_urls(filenames)

    try:
        cache_starttime = int(
            list(cache.to_segmentlistdict().values())[0][0][0])
    except IndexError:
        log.exception('Files do not exist in llhoft_glob')
        return cache  # returns empty cache

    if start >= cache_starttime:  # required data is in llhoft
        return cache

    # otherwise, required data has left llhoft
    high_latency = app.conf['high_latency_frame_types'][ifo]
    urls = find_urls(ifo[0], high_latency, start, end)
    if urls:
        return Cache.from_urls(urls)

    # required data not in high latency frames
    low_latency = app.conf['low_latency_frame_types'][ifo]
    urls = find_urls(ifo[0], low_latency, start, end)
    if not urls:  # required data not in low latency frames
        log.error('This data cannot be found, or does not exist.')

    return Cache.from_urls(urls)
コード例 #2
0
ファイル: data.py プロジェクト: ligovirgo/pyomicron
def _find_frames_datafind(obs, frametype, start, end, **kwargs):
    kwargs.setdefault('urltype', 'file')
    cache = list(map(
        path_from_file_url,
        gwdatafind.find_urls(obs[0], frametype, start, end, **kwargs),
    ))

    # use latest frame to find more recent frames that aren't in
    # datafind yet, this is quite hacky, and isn't guaranteed to
    # work at any point, but it shouldn't break anything
    try:
        latest = cache[-1]
    except IndexError:  # no frames, `cache` is list()
        latestgps = start
    else:
        cache.extend(_find_more_files(latest))
        latestgps = file_segment(cache[-1])[1]

    # if we're searching for aggregated h(t), find more files
    # for the equivalent short h(t) type:
    if frametype in AGGREGATED_HOFT and latestgps < end:
        cache.extend(_find_frames_datafind(
            obs,
            AGGREGATED_HOFT[frametype],
            latestgps,
            end,
            **kwargs
        ))

    return cache
コード例 #3
0
def _find_frames_datafind(obs, frametype, start, end, **kwargs):
    kwargs.setdefault('urltype', 'file')
    cache = list(
        map(
            path_from_file_url,
            gwdatafind.find_urls(obs[0], frametype, start, end, **kwargs),
        ))

    # use latest frame to find more recent frames that aren't in
    # datafind yet, this is quite hacky, and isn't guaranteed to
    # work at any point, but it shouldn't break anything
    try:
        latest = cache[-1]
    except IndexError:  # no frames, `cache` is list()
        latestgps = start
    else:
        cache.extend(_find_more_files(latest))
        latestgps = file_segment(cache[-1])[1]

    # if we're searching for aggregated h(t), find more files
    # for the equivalent short h(t) type:
    if frametype in AGGREGATED_HOFT and latestgps < end:
        cache.extend(
            _find_frames_datafind(obs, AGGREGATED_HOFT[frametype], latestgps,
                                  end, **kwargs))

    return cache
コード例 #4
0
ファイル: event.py プロジェクト: transientlunatic/asimov
def checkifo(event):
    server, repository = connect_gitlab()
    gitlab_events = gitlab.find_events(repository, subset=event)

    for event in gitlab_events:
        if "event time" not in event.event_object.meta:
            print(f"Time not found {event.event_object.name}")
        time = event.event_object.meta['event time']
        gpsstart = time - 600
        gpsend = time + 600
        bits = ['Bit 0', 'Bit 1', 'Bit 2']

        active_ifo = []
        for ifo in ["L1", "H1", "V1"]:
            frametypes = event.event_object.meta['data']['frame-types']
            urls = find_urls(site=f"{ifo[0]}",
                             frametype=frametypes[ifo],
                             gpsstart=gpsstart,
                             gpsend=gpsend)
            datacache = Cache.from_urls(urls)
            if len(datacache) == 0:
                print(f"No {ifo} data found.")
                continue

            if "state vector" in event.meta:
                state_vector_channel = event.meta['state vector']
            else:
                state_vector_channel = ast.literal_eval(
                    config.get("data", "state-vector"))

            state = gwpy.timeseries.StateVector.read(
                datacache,
                state_vector_channel[ifo],
                start=gpsstart,
                end=gpsend,
                pad=
                0  # padding data so that errors are not raised even if found data are not continuous.
            )
            if not np.issubdtype(state.dtype, np.unsignedinteger):
                # if data are not unsigned integers, cast to them now so that
                # we can determine the bit content for the flags
                state = state.astype(
                    "uint32",
                    casting="unsafe",
                    subok=True,
                    copy=False,
                )
            flags = state.to_dqflags()

            segments = flags[bits[0]].active
            for bit in bits:
                segments -= ~flags[bit].active

            if len(segments) > 0: active_ifo += [ifo]
        print(event.event_object.name)
        if event.event_object.meta['interferometers'] != active_ifo:
            print(f"Gitlab data\t{event.event_object.meta['interferometers']}")
            print(f"Recommended IFOS\t{active_ifo}")
        event.event_object.meta['interferometers'] = active_ifo
        event.update_data()
コード例 #5
0
def _data_exists(end, frametype_dict):
    """Check whether data at end time can be found with gwdatafind and return
    true it it is found.
    """
    return min(
        len(find_urls(ifo[0], frametype_dict[ifo], end, end + 1))
        for ifo in frametype_dict.keys()) > 0
コード例 #6
0
def _discover_data_source(obs, frametype, start, end, preview):
    """Determine filepaths to local gravitational-wave frame files
    """
    # get paths to frame files
    cache1 = gwdatafind.find_urls(
        obs,
        frametype,
        start - preview,
        start,
    )
    cache2 = gwdatafind.find_urls(
        obs,
        frametype,
        end,
        end + 1,
    )
    return (cache1, cache2)
コード例 #7
0
 def _query():
     return gwdatafind.find_urls(ifo[0].upper(),
                                 frametype,
                                 gpsstart,
                                 gpsend,
                                 urltype=urltype,
                                 on_gaps=gaps,
                                 match=match,
                                 host=host,
                                 port=port)
コード例 #8
0
ファイル: daq.py プロジェクト: andrew-lundgren/gwdetchar
def _ligo_model_overflow_channels_gwf(dcuid, ifo, frametype, gpstime):
    try:
        framefile = find_urls(ifo[0], frametype, gpstime, gpstime)[0]
    except IndexError as e:
        e.args = ('No %s-%s frames found at GPS %d'
                  % (ifo[0], frametype, gpstime),)
        raise
    try:
        return _CHANNELS[framefile]
    except KeyError:
        _CHANNELS[framefile] = get_channel_names(framefile)
        return _CHANNELS[framefile]
コード例 #9
0
    def get_urls(self, host, args):

        urls = []

        # Parse the input arguments
        for arg in args:
            attr, value = arg.split("=")
            if attr == "observatory":
                observatory = value
            if attr == "type":
                type = value
            if attr == "s":
                start_frame = value
            if attr == "e":
                end_frame = value

        # If any input arguments are missing, report error and exit
        try:
            observatory, type, end_frame, start_frame
        except NameError:
            with open(self.outfile, 'w') as outfile:
                outfile_dict = get_error_dict(
                    "gwdata:// urls must include arguments 'observatory', 'type', 's' (start frame), 'e' (end frame)"
                )
                outfile = open(self.outfile_path, 'w')
                outfile.write(str(classad.ClassAd(outfile_dict)))
                sys.exit(-1)

        # Retrieve the list of URLs
        try:
            urls = gwdatafind.find_urls(host=host,
                                        site=observatory,
                                        frametype=type,
                                        gpsstart=int(start_frame),
                                        gpsend=int(end_frame))
        except Exception as e:
            outfile_dict = get_error_dict(
                f"Error retrieving gwdatafind URLs: {sys.exc_info()[0]} ({e})")
            outfile = open(self.outfile_path, 'w')
            outfile.write(str(classad.ClassAd(outfile_dict)))
            sys.exit(-1)

        return urls
コード例 #10
0
def check_data_availability(obs, frametype, start, end):
    """Check for the full data availability for this frame type

    Parameters
    ----------
    obs : `str`
        the initial for the observatory
    frametype : `str`
        the name of the frame type for which to search
    start : `int`
        the GPS start time of this search
    end : `int`
        the GPS end time of this search

    Raises
    ------
    ValueError
        if gaps are found in the frame archive for the given frame type
    """
    return gwdatafind.find_urls(obs[0], frametype, start, end, on_gaps='error')
コード例 #11
0
ファイル: data.py プロジェクト: ligovirgo/pyomicron
def check_data_availability(obs, frametype, start, end):
    """Check for the full data availability for this frame type

    Parameters
    ----------
    obs : `str`
        the initial for the observatory
    frametype : `str`
        the name of the frame type for which to search
    start : `int`
        the GPS start time of this search
    end : `int`
        the GPS end time of this search

    Raises
    ------
    ValueError
        if gaps are found in the frame archive for the given frame type
    """
    return gwdatafind.find_urls(obs[0], frametype, start, end, on_gaps='error')
コード例 #12
0
ファイル: gwdata_plugin.py プロジェクト: wiene/htcondor
    def get_urls(self, host, args):

        urls = []

        # Parse the input arguments
        for arg in args:
            attr, value = arg.split("=")
            if attr == "observatory": observatory = value
            if attr == "type": type = value
            if attr == "s": start_frame = value
            if attr == "e": end_frame = value

        # Retrieve the list of URLs
        try:
            urls = gwdatafind.find_urls(host=host,
                                        site=observatory,
                                        frametype=type,
                                        gpsstart=int(start_frame),
                                        gpsend=int(end_frame))
        except Exception as e:
            print("Error retrieving gwdatafind URLs: " +
                  str(sys.exc_info()[0]) + ": " + str(e))

        return urls
コード例 #13
0
def find_frames(ifo,
                frametype,
                gpsstart,
                gpsend,
                config=GWSummConfigParser(),
                urltype='file',
                gaps='warn',
                onerror='raise'):
    """Query the datafind server for GWF files for the given type

    Parameters
    ----------
    ifo : `str`
        prefix for the IFO of interest (either one or two characters)

    frametype : `str`
        name of the frametype to find

    gpsstart : `int`
        GPS start time of the query

    gpsend : `int`
        GPS end time of the query

    config : `~ConfigParser.ConfigParser`, optional
        configuration with `[datafind]` section containing `server`
        specification, otherwise taken from the environment

    urltype : `str`, optional
        what type of file paths to return, default: `file`

    gaps : `str`, optional
        what to do when gaps are detected, one of

        - `ignore` : do nothing
        - `warn` : display the existence of gaps but carry on
        - `raise` : raise an exception

    onerror : `str`, optional
        what to do when the `gwdatafind` query itself fails, same
        options as for ``gaps``

    Returns
    -------
    cache : `list` of `str`
        a list of file paths pointing at GWF files matching the request
    """
    vprint('    Finding %s-%s frames for [%d, %d)...' %
           (ifo[0], frametype, int(gpsstart), int(gpsend)))
    # find datafind host:port
    try:
        host = config.get('datafind', 'server')
    except (NoOptionError, NoSectionError):
        host = None
        port = None
    else:
        port = config.getint('datafind', 'port')

    # XXX HACK: LLO changed frame types on Dec 6 2013:
    LLOCHANGE = 1070291904
    if re.match(r'L1_{CRMT}', frametype) and gpsstart < LLOCHANGE:
        frametype = frametype[-1]

    # query frames
    ifo = ifo[0].upper()
    gpsstart = int(floor(gpsstart))
    gpsend = int(ceil(min(globalv.NOW, gpsend)))
    if gpsend <= gpsstart:
        return []

    # parse match
    try:
        frametype, match = frametype.split('|', 1)
    except ValueError:
        match = None

    def _query():
        return gwdatafind.find_urls(ifo[0].upper(),
                                    frametype,
                                    gpsstart,
                                    gpsend,
                                    urltype=urltype,
                                    on_gaps=gaps,
                                    match=match,
                                    host=host,
                                    port=port)

    try:
        cache = _query()
    except RuntimeError as e:
        sleep(1)
        try:
            cache = _query()
        except RuntimeError:
            if 'Invalid GPS times' in str(e):
                e.args = ('%s: %d ... %s' % (str(e), gpsstart, gpsend), )
            if onerror in ['ignore', None]:
                pass
            elif onerror in ['warn']:
                warnings.warn('Caught %s: %s' % (type(e).__name__, str(e)))
            else:
                raise
            cache = []

    # XXX: if querying for day of LLO frame type change, do both
    if (ifo[0].upper() == 'L' and frametype in ['C', 'R', 'M', 'T']
            and gpsstart < LLOCHANGE < gpsend):
        start = len(cache) and cache[-1].segment[1] or gpsstart
        if start < gpsend:
            cache.extend(
                gwdatafind.find_urls(ifo[0].upper(),
                                     'L1_%s' % frametype,
                                     start,
                                     gpsend,
                                     urltype=urltype,
                                     on_gaps=gaps,
                                     host=host,
                                     port=port)[1:])

    # extend cache beyond datafind's knowledge to reduce latency
    try:
        latest = cache[-1]
        ngps = len(
            re_gwf_gps_epoch.search(
                os.path.dirname(latest)).groupdict()['gpsepoch'])
    except (IndexError, AttributeError):
        pass
    else:
        while True:
            s, e = file_segment(latest)
            if s >= gpsend:
                break
            # replace GPS time of file basename
            new = latest.replace('-%d-' % s, '-%d-' % e)
            # replace GPS epoch in dirname
            new = new.replace('%s/' % str(s)[:ngps], '%s/' % str(e)[:ngps])
            if os.path.isfile(new):
                cache.append(new)
            else:
                break

    # validate files existing and return
    cache = list(filter(os.path.exists, map(_urlpath, cache)))
    vprint(' %d found.\n' % len(cache))
    return cache
コード例 #14
0
ファイル: timeseries.py プロジェクト: gwpy/gwsumm
 def _query():
     return gwdatafind.find_urls(ifo[0].upper(), frametype, gpsstart,
                                 gpsend, urltype=urltype, on_gaps=gaps,
                                 match=match, host=host, port=port)
コード例 #15
0
def main(args=None):
    """Run the software saturation command-line interface
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    # get IFO
    ifo = args.ifo.upper()
    site = ifo[0]
    frametype = args.frametype or '%s_R' % ifo

    # let's go
    LOGGER.info('{} Software saturations {}-{}'.format(
        args.ifo, int(args.gpsstart), int(args.gpsend)))

    # get segments
    span = Segment(args.gpsstart, args.gpsend)
    if args.state_flag:
        state = DataQualityFlag.query(args.state_flag, int(args.gpsstart),
                                      int(args.gpsend),
                                      url=const.DEFAULT_SEGMENT_SERVER)
        for i, seg in enumerate(state.active):
            state.active[i] = type(seg)(seg[0], seg[1]-args.pad_state_end)
        segs = state.active.coalesce()
        LOGGER.debug("Recovered %d seconds of time for %s"
                     % (abs(segs), args.state_flag))
    else:
        segs = SegmentList([Segment(args.gpsstart, args.gpsend)])

    # find frames
    cache = gwdatafind.find_urls(
        site, frametype, int(args.gpsstart), int(args.gpsend))

    # find channels
    if not os.getenv('LIGO_DATAFIND_SERVER'):
        raise RuntimeError("No LIGO_DATAFIND_SERVER variable set, don't know "
                           "how to discover channels")
    else:
        LOGGER.debug("Identifying channels in frame files")
        if len(cache) == 0:
            raise RuntimeError(
                "No frames recovered for %s in interval [%s, %s)" %
                (frametype, int(args.gpsstart),
                 int(args.gpsend)))
        allchannels = get_channel_names(cache[0])
        LOGGER.debug("   Found %d channels" % len(allchannels))
        sys.stdout.flush()
        channels = core.find_limit_channels(allchannels, skip=args.skip)
        LOGGER.info(
            "   Parsed %d channels with '_LIMIT' and '_LIMEN' or '_SWSTAT'"
            % sum(map(len, channels)))

    # -- read channels and check limits -------------

    saturations = DataQualityDict()
    bad = set()

    # TODO: use multiprocessing to separate channel list into discrete chunks
    #       should give a factor of X for X processes

    # check limens
    for suffix, clist in zip(['LIMEN', 'SWSTAT'], channels):
        nchans = len(clist)
        # group channels in sets for batch processing
        #     min of <number of channels>, user group size (sensible number),
        #     and 512 Mb of RAM for single-precision EPICS
        try:
            dur = max([float(abs(s)) for s in segs])
        except ValueError:
            ngroup = args.group_size
        else:
            ngroup = int(
                min(nchans, args.group_size, 2 * 1024**3 / 4. / 16. / dur))
        LOGGER.info('Processing %s channels in groups of %d' % (
            suffix, ngroup))
        sys.stdout.flush()
        sets = core.grouper(clist, ngroup)
        for i, cset in enumerate(sets):
            # remove empty entries use to pad the list to 8 elements
            cset = list(cset)
            while cset[-1] is None:
                cset.pop(-1)
            for seg in segs:
                cache2 = sieve_cache(cache, segment=seg)
                if not len(cache2):
                    continue
                saturated = core.is_saturated(
                    cset, cache2, seg[0], seg[1], indicator=suffix,
                    nproc=args.nproc)
                for new in saturated:
                    try:
                        saturations[new.name] += new
                    except KeyError:
                        saturations[new.name] = new
            for j, c in enumerate(cset):
                try:
                    sat = saturations[c]
                except KeyError:
                    LOGGER.debug('%40s:      SKIP      [%d/%d]'
                                 % (c, i*ngroup + j + 1, nchans))
                else:
                    if abs(sat.active):
                        LOGGER.debug('%40s: ---- FAIL ---- [%d/%d]'
                                     % (c, i*ngroup + j + 1, nchans))
                        for seg in sat.active:
                            LOGGER.debug(" " * 42 + str(seg))
                        bad.add(c)
                    else:
                        LOGGER.debug('%40s:      PASS      [%d/%d]'
                                     % (c, i*ngroup + j + 1, nchans))
                sys.stdout.flush()

    # -- log results and exit -----------------------

    if len(bad):
        LOGGER.info("Saturations were found for all of the following:\n\n")
        for c in bad:
            print(c)
        print('\n\n')
    else:
        LOGGER.info("No software saturations were found in any channels")

    # write segments to file
    outfile = ('%s-SOFTWARE_SATURATIONS-%d-%d.h5'
               % (ifo, int(args.gpsstart),
                  int(args.gpsend) - int(args.gpsstart)))
    LOGGER.info("Writing saturation segments to %s" % outfile)
    saturations.write(outfile, path="segments", overwrite=True)

    if args.html:
        # get base path
        base = os.path.dirname(args.html)
        os.chdir(base)
        if args.plot:
            args.plot = os.path.curdir
        segfile = os.path.relpath(outfile, os.path.dirname(args.html))
        if os.path.basename(args.html) == 'index.html':
            links = [
                '%d-%d' % (int(args.gpsstart), int(args.gpsend)),
                ('Parameters', '#parameters'),
                ('Segments', [('Software saturations',
                               '#software-saturations')]),
                ('Results', '#results'),
            ]
            if args.state_flag:
                links[2][1].insert(0, ('State flag', '#state-flag'))
            (brand, class_) = htmlio.get_brand(ifo, 'Saturations',
                                               args.gpsstart)
            navbar = htmlio.navbar(links, class_=class_, brand=brand)
            page = htmlio.new_bootstrap_page(
                navbar=navbar, title='%s Saturations | %d-%d' % (
                    ifo, int(args.gpsstart), int(args.gpsend)))
        else:
            page = markup.page()
            page.div(class_='container')
        # -- header
        page.div(class_='pb-2 mt-3 mb-2 border-bottom')
        page.h1('%s Software Saturations: %d-%d'
                % (ifo, int(args.gpsstart), int(args.gpsend)))
        page.div.close()
        # -- paramters
        content = [
            ('State end padding', args.pad_state_end),
            ('Skip', ', '.join(map(repr, args.skip)))]
        page.h2('Parameters', class_='mt-4 mb-4', id_='parameters')
        page.div(class_='row')
        page.div(class_='col-md-9 col-sm-12')
        page.add(htmlio.parameter_table(
            content, start=args.gpsstart, end=args.gpsend,
            flag=args.state_flag))
        page.div.close()  # col-md-9 col-sm-12
        page.div(class_='col-md-3 col-sm-12')
        page.add(htmlio.download_btn(
            [('Segments (HDF)', segfile)],
            btnclass='btn btn-%s dropdown-toggle' % ifo.lower(),
        ))
        page.div.close()  # col-md-9 col-sm-12
        page.div.close()  # row
        page.h5('Command-line:')
        page.add(htmlio.get_command_line(about=False, prog=PROG))
        # -- segments
        page.h2('Segments', class_='mt-4', id_='segments')
        msg = ("This analysis searched {0} filter bank readback channels for "
               "time periods during which their OUTPUT value matched or "
               "exceeded the LIMIT value set in software. Signals that "
               "achieve saturation are shown below, and saturation segments "
               "are available by expanding a given panel.").format(
                   sum(map(len, channels)))
        page.add(htmlio.alert(msg, context=ifo.lower()))
        # record state segments
        if args.state_flag:
            page.h3('State flag', class_='mt-3', id_='state-flag')
            page.div(id_='accordion1')
            page.add(htmlio.write_flag_html(
                state, span, 'state', parent='accordion1', context='success',
                plotdir=args.plot, facecolor=(0.2, 0.8, 0.2),
                edgecolor='darkgreen', known={
                    'facecolor': 'red',
                    'edgecolor': 'darkred',
                    'height': 0.4},
            ))
            page.div.close()
        # record saturation segments
        if len(bad):
            page.h3('Software saturations', class_='mt-3',
                    id_='software-saturations')
            page.div(id_='accordion2')
            for i, (c, flag) in enumerate(saturations.items()):
                if abs(flag.active) > 0:
                    title = '%s [%d]' % (flag.name, len(flag.active))
                    page.add(htmlio.write_flag_html(
                        flag, span=span, id=i, parent='accordion2',
                        title=title, plotdir=args.plot))
            page.div.close()
        else:
            page.add(htmlio.alert('No software saturations were found in this '
                                  'analysis', context=ifo.lower(),
                                  dismiss=False))
        # -- results table
        page.h2('Results summary', class_='mt-4', id_='results')
        page.add(htmlio.alert('All channels for which the LIMIT setting was '
                              'active are shown below.', context=ifo.lower()))
        page.table(class_='table table-striped table-hover')
        # write table header
        page.thead()
        page.tr()
        for header in ['Channel', 'Result', 'Num. saturations']:
            page.th(header)
        page.thead.close()
        # write body
        page.tbody()
        for c, seglist in saturations.items():
            passed = abs(seglist.active) == 0
            if passed:
                page.tr()
            else:
                page.tr(class_='table-warning')
            page.td(c)
            page.td(passed and 'Pass' or 'Fail')
            page.td(len(seglist.active))
            page.tr.close()
        page.tbody.close()
        page.table.close()
        # close and write
        htmlio.close_page(page, args.html)
コード例 #16
0
def get_data(channel, start, end, frametype=None, source=None,
             nproc=1, verbose=False, **kwargs):
    """Retrieve data for given channels within a certain time range

    Parameters
    ----------
    channel : `str` or `list`
        either a single channel name, or a list of channel names

    start : `float`
        GPS start time of requested data

    end : `float`
        GPS end time of requested data

    frametype : `str`, optional
        name of frametype in which channel(s) are stored, default: `None`

    source : `str`, `list`, optional
        path(s) of a LAL-format cache file or individual data file,
        default: `None`

    nproc : `int`, optional
        number of parallel processes to use, uses serial process by default

    verbose : `bool`, optional
        print verbose output about NDS progress, default: False

    **kwargs : `dict`, optional
        additional keyword arguments to `~gwpy.timeseries.TimeSeries.read`
        or `~gwpy.timeseries.TimeSeries.get`

    Returns
    -------
    data : `~gwpy.timeseries.TimeSeries` or `~gwpy.timeseries.TimeSeriesDict`
        collection of data for the requested channels in the requested time
        range

    Notes
    -----
    If `channel` is a `str`, then a `TimeSeries` object will be returned, else
    the result is a `TimeSeriesDict`.

    The `frametype` argument should be used to read from archived frame files,
    while `source` should be used to read from a local cache or specific data
    file. If either fails, or if neither is passed, this function will attempt
    to get data over an NDS server.

    If `frametype` is used to read from the archive, any channels missing
    from the first or last frame file in the requested time range will be
    ignored.

    See Also
    --------
    remove_missing_channels
        a utility that removes channels missing from the frame archive
    gwpy.timeseries.TimeSeries.get
        the underlying method to read data over an NDS server
    gwpy.timeseries.TimeSeries.read
        the underlying method to read data from local files
    """
    # get TimeSeries class
    if isinstance(channel, (list, tuple)):
        series_class = TimeSeriesDict
    else:
        series_class = TimeSeries

    if frametype is not None:
        try:  # locate frame files
            ifo = re.search('[A-Z]1', frametype).group(0)
            obs = ifo[0]
            source = gwdatafind.find_urls(obs, frametype, start, end)
        except AttributeError:
            raise AttributeError(
                'Could not determine observatory from frametype')
        except (HTTPError, JSONDecodeError):  # frame files not found
            pass
    if isinstance(source, list) and isinstance(channel, (list, tuple)):
        channel = remove_missing_channels(channel, source)
    if source is not None:  # read from frame files
        return series_class.read(
            source, channel, start=start, end=end, nproc=nproc,
            verbose=verbose, **kwargs)

    # read single channel from NDS
    if not isinstance(channel, (list, tuple)):
        return series_class.get(
            channel, start, end, verbose=verbose, **kwargs)

    # if all else fails, process channels in groups of 60
    data = series_class()
    for group in [channel[i:i + 60] for i in range(0, len(channel), 60)]:
        data.append(series_class.get(
            group, start, end, verbose=verbose, **kwargs))
    return data
コード例 #17
0
ファイル: mct.py プロジェクト: siddharth101/gwdetchar-1
def main(args=None):
    """Run the zero-crossing counter tool
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    span = Segment(args.gpsstart, args.gpsend)
    LOGGER.info('-- Processing channel %s over span %d - %d' %
                (args.channel, args.gpsstart, args.gpsend))

    if args.state_flag:
        state = DataQualityFlag.query(
            args.state_flag,
            int(args.gpsstart),
            int(args.gpsend),
            url=const.DEFAULT_SEGMENT_SERVER,
        )
        statea = state.active
    else:
        statea = SegmentList([span])

    duration = abs(span)

    # initialize output files for each threshold and store them in a dict
    outfiles = {}
    for thresh in args.threshold:
        outfiles[str(thresh)] = (os.path.join(
            args.output_path, '%s_%s_DAC-%d-%d.h5' %
            (args.channel.replace('-', '_').replace(':', '-'), str(
                int(thresh)).replace('-', 'n'), int(args.gpsstart), duration)))

    # get frame cache
    cache = gwdatafind.find_urls(args.ifo[0], args.frametype,
                                 int(args.gpsstart), int(args.gpsend))

    cachesegs = statea & cache_segments(cache)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    # initialize a ligolw table for each threshold and store them in a dict
    names = ("time", "frequency", "snr")
    dtypes = ("f8", ) * len(names)
    tables = {}
    for thresh in args.threshold:
        tables[str(thresh)] = EventTable(
            names=names,
            dtype=dtypes,
            meta={"channel": args.channel},
        )

    # for each science segment, read in the data from frames, check for
    # threshold crossings, and if the rate of crossings is less than
    # rate_thresh, write to a sngl_burst table
    for seg in cachesegs:
        LOGGER.debug("Processing {}:".format(seg))
        c = sieve_cache(cache, segment=seg)
        if not c:
            LOGGER.warning("    No {} data files for this segment, "
                           "skipping".format(args.frametype))
            continue
        data = get_data(args.channel,
                        seg[0],
                        seg[1],
                        nproc=args.nproc,
                        source=c,
                        verbose="Reading data:".rjust(30))
        for thresh in args.threshold:
            times = find_crossings(data, thresh)
            rate = float(times.size) / abs(seg) if times.size else 0
            LOGGER.info("    Found {0} crossings of {1}, rate: {2} Hz".format(
                times.size,
                thresh,
                rate,
            ))
            if times.size and rate < args.rate_thresh:
                existing = tables[str(thresh)]
                tables[str(thresh)] = vstack_tables(
                    (
                        existing,
                        table_from_times(times,
                                         snr=10.,
                                         frequency=100.,
                                         names=existing.colnames),
                    ),
                    join_type="exact",
                )

    n = max(map(len, tables.values()))
    for thresh, outfile in outfiles.items():
        tables[thresh].write(
            outfile,
            path="triggers",
            format="hdf5",
            overwrite=True,
        )
        LOGGER.info("{0} events written to {1}".format(
            str(len(tables[thresh])).rjust(len(str(n))),
            outfile,
        ))
コード例 #18
0
ファイル: timeseries.py プロジェクト: gwpy/gwsumm
def find_frames(ifo, frametype, gpsstart, gpsend, config=GWSummConfigParser(),
                urltype='file', gaps='warn', onerror='raise'):
    """Query the datafind server for GWF files for the given type

    Parameters
    ----------
    ifo : `str`
        prefix for the IFO of interest (either one or two characters)

    frametype : `str`
        name of the frametype to find

    gpsstart : `int`
        GPS start time of the query

    gpsend : `int`
        GPS end time of the query

    config : `~ConfigParser.ConfigParser`, optional
        configuration with `[datafind]` section containing `server`
        specification, otherwise taken from the environment

    urltype : `str`, optional
        what type of file paths to return, default: `file`

    gaps : `str`, optional
        what to do when gaps are detected, one of

        - `ignore` : do nothing
        - `warn` : display the existence of gaps but carry on
        - `raise` : raise an exception

    onerror : `str`, optional
        what to do when the `gwdatafind` query itself fails, same
        options as for ``gaps``

    Returns
    -------
    cache : `list` of `str`
        a list of file paths pointing at GWF files matching the request
    """
    vprint('    Finding %s-%s frames for [%d, %d)...'
           % (ifo[0], frametype, int(gpsstart), int(gpsend)))
    # find datafind host:port
    try:
        host = config.get('datafind', 'server')
    except (NoOptionError, NoSectionError):
        host = None
        port = None
    else:
        port = config.getint('datafind', 'port')

    # XXX HACK: LLO changed frame types on Dec 6 2013:
    LLOCHANGE = 1070291904
    if re.match(r'L1_{CRMT}', frametype) and gpsstart < LLOCHANGE:
        frametype = frametype[-1]

    # query frames
    ifo = ifo[0].upper()
    gpsstart = int(floor(gpsstart))
    gpsend = int(ceil(min(globalv.NOW, gpsend)))
    if gpsend <= gpsstart:
        return []

    # parse match
    try:
        frametype, match = frametype.split('|', 1)
    except ValueError:
        match = None

    def _query():
        return gwdatafind.find_urls(ifo[0].upper(), frametype, gpsstart,
                                    gpsend, urltype=urltype, on_gaps=gaps,
                                    match=match, host=host, port=port)
    try:
        cache = _query()
    except RuntimeError as e:
        sleep(1)
        try:
            cache = _query()
        except RuntimeError:
            if 'Invalid GPS times' in str(e):
                e.args = ('%s: %d ... %s' % (str(e), gpsstart, gpsend),)
            if onerror in ['ignore', None]:
                pass
            elif onerror in ['warn']:
                warnings.warn('Caught %s: %s'
                              % (type(e).__name__, str(e)))
            else:
                raise
            cache = []

    # XXX: if querying for day of LLO frame type change, do both
    if (ifo[0].upper() == 'L' and frametype in ['C', 'R', 'M', 'T'] and
            gpsstart < LLOCHANGE < gpsend):
        start = len(cache) and cache[-1].segment[1] or gpsstart
        if start < gpsend:
            cache.extend(gwdatafind.find_urls(
                ifo[0].upper(), 'L1_%s' % frametype, start, gpsend,
                urltype=urltype, on_gaps=gaps, host=host, port=port)[1:])

    # extend cache beyond datafind's knowledge to reduce latency
    try:
        latest = cache[-1]
        ngps = len(re_gwf_gps_epoch.search(
            os.path.dirname(latest)).groupdict()['gpsepoch'])
    except (IndexError, AttributeError):
        pass
    else:
        while True:
            s, e = file_segment(latest)
            if s >= gpsend:
                break
            # replace GPS time of file basename
            new = latest.replace('-%d-' % s, '-%d-' % e)
            # replace GPS epoch in dirname
            new = new.replace('%s/' % str(s)[:ngps], '%s/' % str(e)[:ngps])
            if os.path.isfile(new):
                cache.append(new)
            else:
                break

    # validate files existing and return
    cache = list(filter(os.path.exists, map(_urlpath, cache)))
    vprint(' %d found.\n' % len(cache))
    return cache
コード例 #19
0
ファイル: datafind.py プロジェクト: ligovirgo/gwdetchar
def get_data(channel, start, end, frametype=None, source=None,
             nproc=1, verbose=False, **kwargs):
    """Retrieve data for given channels within a certain time range

    Parameters
    ----------
    channel : `str` or `list`
        either a single channel name, or a list of channel names

    start : `float`
        GPS start time of requested data

    end : `float`
        GPS end time of requested data

    frametype : `str`, optional
        name of frametype in which channel(s) are stored, default: `None`

    source : `str`, `list`, optional
        path(s) of a LAL-format cache file or individual data file,
        default: `None`

    nproc : `int`, optional
        number of parallel processes to use, uses serial process by default

    verbose : `bool`, optional
        print verbose output about NDS progress, default: False

    **kwargs : `dict`, optional
        additional keyword arguments to `~gwpy.timeseries.TimeSeries.read`
        or `~gwpy.timeseries.TimeSeries.get`

    Returns
    -------
    data : `~gwpy.timeseries.TimeSeries` or `~gwpy.timeseries.TimeSeriesDict`
        collection of data for the requested channels in the requested time
        range

    Notes
    -----
    If `channel` is a `str`, then a `TimeSeries` object will be returned, else
    the result is a `TimeSeriesDict`.

    The `frametype` argument should be used to read from archived frame files,
    while `source` should be used to read from a local cache or specific data
    file. If either fails, or if neither is passed, this function will attempt
    to get data over an NDS server.

    If `frametype` is used to read from the archive, any channels missing
    from the first or last frame file in the requested time range will be
    ignored.

    See Also
    --------
    remove_missing_channels
        a utility that removes channels missing from the frame archive
    gwpy.timeseries.TimeSeries.get
        the underlying method to read data over an NDS server
    gwpy.timeseries.TimeSeries.read
        the underlying method to read data from local files
    """
    # get TimeSeries class
    if isinstance(channel, (list, tuple)):
        series_class = TimeSeriesDict
    else:
        series_class = TimeSeries

    if frametype is not None:
        try:  # locate frame files
            ifo = re.search('[A-Z]1', frametype).group(0)
            obs = ifo[0]
            source = gwdatafind.find_urls(obs, frametype, start, end)
        except AttributeError:
            raise AttributeError(
                'Could not determine observatory from frametype')
        except (HTTPError, JSONDecodeError):  # frame files not found
            pass
    if isinstance(source, list) and isinstance(channel, (list, tuple)):
        channel = remove_missing_channels(channel, source)
    if source is not None:  # read from frame files
        return series_class.read(
            source, channel, start=start, end=end, nproc=nproc,
            verbose=verbose, **kwargs)

    # read single channel from NDS
    if not isinstance(channel, (list, tuple)):
        return series_class.get(
            channel, start, end, verbose=verbose, **kwargs)

    # if all else fails, process channels in groups of 60
    data = series_class()
    for group in [channel[i:i + 60] for i in range(0, len(channel), 60)]:
        data.append(series_class.get(
            group, start, end, verbose=verbose, **kwargs))
    return data
コード例 #20
0
def framecache_from_event(gid,
                          observatories,
                          frame_types,
                          time_span=500,
                          outdir=".",
                          filename="frame.cache",
                          verbose=False):
    """Get the frame cache for an event given the gracedb event id.

	Args:
		gid (str): The gracedb event id.
		observatories (list): See gwdatafind.
		frame_type (list): See gwdatafind.
		time_span (float): The time span before and after the trigger time.
		outdir (str, default="."): The output directory.
		filename (str, default="frame.cache"): The output filename.
		verbose (bool): Be verbose.

	Returns:
		Dictionary of instruments, trigger_times, gps_start_time,
		gps_end_time, channels_name.

	"""
    assert time_span >= 500., "Please use time_span larger or equal to 500."

    obs2ifo = {"H": "H1", "L": "L1", "V": "V1"}

    observatories = set(observatories)
    frame_types = set(frame_types)

    if len(observatories) != len(frame_types):
        raise ValueError("Must have as many frame_types as observatories.")
    # FIXME: This is not reliable, have a better way to map frame_type to observatory?
    obs_type_dict = dict([(obs, frame_type) for obs in observatories
                          for frame_type in frame_types
                          if obs == frame_type[0]])

    gracedb_client = gracedb.GraceDb()
    coinc_xmldoc = lvalert_helper.get_coinc_xmldoc(gracedb_client, gid)
    eventid_trigger_dict = dict(
        (row.ifo, row)
        for row in lsctables.SnglInspiralTable.get_table(coinc_xmldoc))
    channel_names_dict = dict([
        (row.value.split("=")[0], row.value)
        for row in lsctables.ProcessParamsTable.get_table(coinc_xmldoc)
        if row.param == "--channel-name"
    ])

    gwdata_metavar_headers = [
        "instruments", "trigger_times", "gps_start_time", "gps_end_time",
        "channels_name"
    ]
    gwdata_metavar_values = []
    urls = []
    for observatory, frame_type in obs_type_dict.items():
        trigger_time = eventid_trigger_dict[obs2ifo[observatory]].end
        gps_start_time = int(trigger_time - time_span)
        gps_end_time = int(trigger_time + time_span)
        gwdata_metavar_values.append(
            (obs2ifo[observatory], trigger_time, gps_start_time, gps_end_time,
             channel_names_dict[obs2ifo[observatory]]))

        urls += gwdatafind.find_urls(observatory, frame_type, gps_start_time,
                                     gps_end_time)

    with open(os.path.join(outdir, "frame.cache"), "w") as cache:
        for url in urls:
            filename = str(CacheEntry.from_T050017(url))
            cache.write("%s\n" % filename)
            if verbose:
                sys.stderr.write(
                    "writing %s to %s\n" %
                    (filename, os.path.join(outdir, "frame.cache")))
        if verbose:
            sys.stderr.write("Done.\n")

    return dict(zip(gwdata_metavar_headers, zip(*gwdata_metavar_values)))
コード例 #21
0
ファイル: overflow.py プロジェクト: siddharth101/gwdetchar-1
def main(args=None):
    """Run the online Guardian node visualization tool
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    fec_map = args.fec_map
    simulink = args.simulink
    daqsvn = args.daqsvn or ('https://daqsvn.ligo-la.caltech.edu/websvn/'
                             'listing.php?repname=daq_maps')
    if args.ifo == 'H1':
        if not fec_map:
            fec_map = 'https://lhocds.ligo-wa.caltech.edu/exports/detchar/fec/'
        if not simulink:
            simulink = 'https://lhocds.ligo-wa.caltech.edu/daq/simulink/'
    if args.ifo == 'L1':
        if not fec_map:
            fec_map = 'https://llocds.ligo-la.caltech.edu/exports/detchar/fec/'
        if not simulink:
            simulink = 'https://llocds.ligo-la.caltech.edu/daq/simulink/'

    span = Segment(args.gpsstart, args.gpsend)

    # let's go
    LOGGER.info('{} Overflows {}-{}'.format(args.ifo, int(args.gpsstart),
                                            int(args.gpsend)))

    # get segments
    if args.state_flag:
        state = DataQualityFlag.query(args.state_flag,
                                      int(args.gpsstart),
                                      int(args.gpsend),
                                      url=const.DEFAULT_SEGMENT_SERVER)
        tmp = type(state.active)()
        for i, seg in enumerate(state.active):
            if abs(seg) < args.segment_end_pad:
                continue
            tmp.append(type(seg)(seg[0], seg[1] - args.segment_end_pad))
        state.active = tmp.coalesce()
        statea = state.active
    else:
        statea = SegmentList([span])

    if not args.output_file:
        duration = abs(span)
        args.output_file = ('%s-OVERFLOWS-%d-%d.h5' %
                            (args.ifo, int(args.gpsstart), duration))
        LOGGER.debug("Set default output file as %s" % args.output_file)

    # set up container
    overflows = DataQualityDict()

    # prepare data access
    if args.nds:
        from gwpy.io import nds2 as io_nds2
        host, port = args.nds.rsplit(':', 1)
        ndsconnection = io_nds2.connect(host, port=int(port))
        if ndsconnection.get_protocol() == 1:
            cachesegs = SegmentList(
                [Segment(int(args.gpsstart), int(args.gpsend))])
        else:
            cachesegs = io_nds2.get_availability(
                ['{0}:FEC-1_DAC_OVERFLOW_ACC_0_0'.format(args.ifo)],
                int(args.gpsstart),
                int(args.gpsend),
            )
    else:  # get frame cache
        cache = gwdatafind.find_urls(args.ifo[0], args.frametype,
                                     int(args.gpsstart), int(args.gpsend))
        cachesegs = statea & cache_segments(cache)

    flag_desc = "ADC/DAC Overflow indicated by {0}"

    # get channel and find overflows
    for dcuid in args.dcuid:
        LOGGER.info("Processing DCUID %d" % dcuid)
        channel = daq.ligo_accum_overflow_channel(dcuid, args.ifo)
        overflows[channel] = DataQualityFlag(channel, known=cachesegs)
        if args.deep:
            LOGGER.debug(" -- Getting list of overflow channels")
            try:
                channels = daq.ligo_model_overflow_channels(dcuid,
                                                            args.ifo,
                                                            args.frametype,
                                                            gpstime=span[0],
                                                            nds=args.nds)
            except IndexError:  # no frame found for GPS start, try GPS end
                channels = daq.ligo_model_overflow_channels(dcuid,
                                                            args.ifo,
                                                            args.frametype,
                                                            gpstime=span[-1])
            for chan in channels:  # set up flags early
                overflows[chan] = DataQualityFlag(
                    chan,
                    known=cachesegs,
                    description=flag_desc.format(chan),
                    isgood=False,
                )
            LOGGER.debug(" -- %d channels found" % len(channel))
        for seg in cachesegs:
            LOGGER.debug(" -- Processing {}-{}".format(*seg))
            if args.nds:
                read_kw = dict(connection=ndsconnection)
            else:
                read_kw = dict(source=cache, nproc=args.nproc)
            msg = "Reading ACCUM_OVERFLOW data:".rjust(30)
            data = get_data(channel,
                            seg[0],
                            seg[1],
                            pad=0.,
                            verbose=msg,
                            **read_kw)
            new = daq.find_overflow_segments(
                data,
                cumulative=True,
            )
            overflows[channel] += new
            LOGGER.info(" -- {} overflows found".format(len(new.active)))
            if not new.active:
                continue
            # go deep!
            for s, e in tqdm.tqdm(new.active.protract(2),
                                  unit='ovfl',
                                  desc='Going deep'.rjust(30)):
                data = get_data(channels, s, e, **read_kw)
                for ch in channels:
                    try:
                        overflows[ch] += daq.find_overflow_segments(
                            data[ch],
                            cumulative=True,
                        )
                    except KeyError:
                        warnings.warn("Skipping {}".format(ch), UserWarning)
                        continue
        LOGGER.debug(" -- Search complete")

    # write output
    LOGGER.info("Writing segments to %s" % args.output_file)
    table = table_from_segments(
        overflows,
        sngl_burst=args.output_file.endswith((".xml", ".xml.gz")),
    )
    if args.integer_segments:
        for key in overflows:
            overflows[key] = overflows[key].round()
    if args.output_file.endswith((".h5", "hdf", ".hdf5")):
        with h5py.File(args.output_file, "w") as h5f:
            table.write(h5f, path="triggers")
            overflows.write(h5f, path="segments")
    else:
        table.write(args.output_file, overwrite=True)
        overflows.write(args.output_file, overwrite=True, append=True)

    # write HTML
    if args.html:
        # get base path
        base = os.path.dirname(args.html)
        os.chdir(base)
        if args.plot:
            args.plot = os.path.curdir
        if args.output_file:
            args.output_file = os.path.relpath(args.output_file,
                                               os.path.dirname(args.html))
        if os.path.basename(args.html) == 'index.html':
            links = [
                '%d-%d' % (int(args.gpsstart), int(args.gpsend)),
                ('Parameters', '#parameters'),
                ('Segments', [('Overflows', '#overflows')]),
                ('Results', '#results'),
            ]
            if args.state_flag:
                links[2][1].insert(0, ('State flag', '#state-flag'))
            (brand, class_) = htmlio.get_brand(args.ifo, 'Overflows',
                                               args.gpsstart)
            navbar = htmlio.navbar(links, class_=class_, brand=brand)
            page = htmlio.new_bootstrap_page(
                title='%s Overflows | %d-%d' %
                (args.ifo, int(args.gpsstart), int(args.gpsend)),
                navbar=navbar)
        else:
            page = htmlio.markup.page()
            page.div(class_='container')

        # -- header
        page.div(class_='pb-2 mt-3 mb-2 border-bottom')
        page.h1('%s ADC/DAC Overflows: %d-%d' %
                (args.ifo, int(args.gpsstart), int(args.gpsend)))
        page.div.close()

        # -- paramters
        content = [('DCUIDs', ' '.join(map(str, args.dcuid)))]
        if daqsvn:
            content.append(('FEC configuration', (
                '<a href="{0}" target="_blank" title="{1} FEC configuration">'
                '{0}</a>').format(daqsvn, args.ifo)))
        if fec_map:
            content.append(
                ('FEC map', '<a href="{0}" target="_blank" title="{1} FEC '
                 'map">{0}</a>'.format(fec_map, args.ifo)))
        if simulink:
            content.append(
                ('Simulink models', '<a href="{0}" target="_blank" title="{1} '
                 'Simulink models">{0}</a>'.format(simulink, args.ifo)))
        page.h2('Parameters', class_='mt-4 mb-4', id_='parameters')
        page.div(class_='row')
        page.div(class_='col-md-9 col-sm-12')
        page.add(
            htmlio.parameter_table(content,
                                   start=args.gpsstart,
                                   end=args.gpsend,
                                   flag=args.state_flag))
        page.div.close()  # col-md-9 col-sm-12

        # link to summary file
        if args.output_file:
            ext = ('HDF' if args.output_file.endswith(
                (".h5", "hdf", ".hdf5")) else 'XML')
            page.div(class_='col-md-3 col-sm-12')
            page.add(
                htmlio.download_btn(
                    [('Segments ({})'.format(ext), args.output_file)],
                    btnclass='btn btn-%s dropdown-toggle' % args.ifo.lower(),
                ))
            page.div.close()  # col-md-3 col-sm-12
        page.div.close()  # row

        # -- command-line
        page.h5('Command-line:')
        page.add(htmlio.get_command_line(about=False, prog=PROG))

        # -- segments
        page.h2('Segments', class_='mt-4', id_='segments')

        # give contextual information
        msg = ("This analysis searched for digital-to-analogue (DAC) or "
               "analogue-to-digital (ADC) conversion overflows in the {0} "
               "real-time controls system. ").format(
                   SITE_MAP.get(args.ifo, 'LIGO'))
        if args.deep:
            msg += (
                "A hierarchichal search was performed, with one cumulative "
                "overflow counter checked per front-end controller (FEC). "
                "For those models that indicated an overflow, the card- and "
                "slot-specific channels were then checked. ")
        msg += (
            "Consant overflow is shown as yellow, while transient overflow "
            "is shown as red. If a data-quality flag was loaded for this "
            "analysis, it will be displayed in green.")
        page.add(htmlio.alert(msg, context=args.ifo.lower()))
        # record state segments
        if args.state_flag:
            page.h3('State flag', class_='mt-3', id_='state-flag')
            page.div(id_='accordion1')
            page.add(
                htmlio.write_flag_html(state,
                                       span,
                                       'state',
                                       parent='accordion1',
                                       context='success',
                                       plotdir=args.plot,
                                       facecolor=(0.2, 0.8, 0.2),
                                       edgecolor='darkgreen',
                                       known={
                                           'facecolor': 'red',
                                           'edgecolor': 'darkred',
                                           'height': 0.4,
                                       }))
            page.div.close()
        # record overflow segments
        if sum(abs(s.active) for s in overflows.values()):
            page.h3('Overflows', class_='mt-3', id_='overflows')
            page.div(id_='accordion2')
            for i, (c, flag) in enumerate(list(overflows.items())):
                if abs(flag.active) == 0:
                    continue
                if abs(flag.active) == abs(cachesegs):
                    context = 'warning'
                else:
                    context = 'danger'
                try:
                    channel = cds.get_real_channel(flag.name)
                except Exception:
                    title = '%s [%d]' % (flag.name, len(flag.active))
                else:
                    title = '%s (%s) [%d]' % (flag.name, channel,
                                              len(flag.active))
                page.add(
                    htmlio.write_flag_html(flag,
                                           span,
                                           i,
                                           parent='accordion2',
                                           title=title,
                                           context=context,
                                           plotdir=args.plot))
            page.div.close()
        else:
            page.add(
                htmlio.alert('No overflows were found in this analysis',
                             context=args.ifo.lower(),
                             dismiss=False))

        # -- results table
        page.h2('Results summary', class_='mt-4', id_='results')
        page.table(class_='table table-striped table-hover')
        # write table header
        page.thead()
        page.tr()
        for header in ['Channel', 'Connected signal', 'Num. overflows']:
            page.th(header)
        page.thead.close()
        # write body
        page.tbody()
        for c, seglist in overflows.items():
            t = abs(seglist.active)
            if t == 0:
                page.tr()
            elif t == abs(cachesegs):
                page.tr(class_='table-warning')
            else:
                page.tr(class_='table-danger')
            page.td(c)
            try:
                page.td(cds.get_real_channel(str(c)))
            except Exception:
                page.td()
            page.td(len(seglist.active))
            page.tr.close()
        page.tbody.close()
        page.table.close()

        # -- close and write
        htmlio.close_page(page, args.html)
        LOGGER.info("HTML written to %s" % args.html)