Beispiel #1
0
def check_flag(flag, gpstime, duration, pad):
    """Check that a state flag is active during an entire analysis segment

    Parameters
    ----------
    flag : `str`
        state flag to check

    gpstime : `float`
        GPS time of required data

    duration : `float`
        duration (in seconds) of required data

    pad : `float`
        amount of extra data to read in at the start and end for filtering

    Returns
    -------
    check : `bool`
        Boolean switch to pass (`True`) or fail (`False`) depending on whether
        the given flag is active
    """
    # set GPS start and end time
    start = gpstime - duration/2. - pad
    end = gpstime + duration/2. + pad
    seg = Segment(start, end)
    # query for state segments
    active = DataQualityFlag.query(flag, start, end,
                                   url=DEFAULT_SEGMENT_SERVER).active
    # check that state flag is active during the entire analysis
    if (not active.intersects_segment(seg)) or (abs(active[0]) < abs(seg)):
        return False
    return True
Beispiel #2
0
def query_state_segments(flag, start, end, url=DEFAULT_SEGMENT_SERVER,
                         pad=(0, 0)):
    """Query a segment database for active segments associated with a flag
    """
    # NOTE: DQF.pad pads forward in time at end
    return DataQualityFlag.query(
        flag, start-pad[0], end+pad[1], url=url,
    ).coalesce().pad(pad[0], -pad[1]).active
Beispiel #3
0
def check_flag(flag, gpstime, duration, pad):
    """Check that a state flag is active during an entire analysis segment

    Parameters
    ----------
    flag : `str`
        state flag to check

    gpstime : `float`
        GPS time of required data

    duration : `float`
        duration (in seconds) of required data

    pad : `float`
        amount of extra data to read in at the start and end for filtering

    Returns
    -------
    check : `bool`
        Boolean switch to pass (`True`) or fail (`False`) depending on whether
        the given flag is active
    """
    # set GPS start and end time
    start = gpstime - duration / 2. - pad
    end = gpstime + duration / 2. + pad
    seg = Segment(start, end)
    # query for state segments
    active = DataQualityFlag.query(flag,
                                   start,
                                   end,
                                   url=DEFAULT_SEGMENT_SERVER).active
    # check that state flag is active during the entire analysis
    if (not active.intersects_segment(seg)) or (abs(active[0]) < abs(seg)):
        return False
    return True
Beispiel #4
0
def main(args=None):
    """Run the software saturation command-line interface
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    # get IFO
    ifo = args.ifo.upper()
    site = ifo[0]
    frametype = args.frametype or '%s_R' % ifo

    # let's go
    LOGGER.info('{} Software saturations {}-{}'.format(
        args.ifo, int(args.gpsstart), int(args.gpsend)))

    # get segments
    span = Segment(args.gpsstart, args.gpsend)
    if args.state_flag:
        state = DataQualityFlag.query(args.state_flag, int(args.gpsstart),
                                      int(args.gpsend),
                                      url=const.DEFAULT_SEGMENT_SERVER)
        for i, seg in enumerate(state.active):
            state.active[i] = type(seg)(seg[0], seg[1]-args.pad_state_end)
        segs = state.active.coalesce()
        LOGGER.debug("Recovered %d seconds of time for %s"
                     % (abs(segs), args.state_flag))
    else:
        segs = SegmentList([Segment(args.gpsstart, args.gpsend)])

    # find frames
    cache = gwdatafind.find_urls(
        site, frametype, int(args.gpsstart), int(args.gpsend))

    # find channels
    if not os.getenv('LIGO_DATAFIND_SERVER'):
        raise RuntimeError("No LIGO_DATAFIND_SERVER variable set, don't know "
                           "how to discover channels")
    else:
        LOGGER.debug("Identifying channels in frame files")
        if len(cache) == 0:
            raise RuntimeError(
                "No frames recovered for %s in interval [%s, %s)" %
                (frametype, int(args.gpsstart),
                 int(args.gpsend)))
        allchannels = get_channel_names(cache[0])
        LOGGER.debug("   Found %d channels" % len(allchannels))
        sys.stdout.flush()
        channels = core.find_limit_channels(allchannels, skip=args.skip)
        LOGGER.info(
            "   Parsed %d channels with '_LIMIT' and '_LIMEN' or '_SWSTAT'"
            % sum(map(len, channels)))

    # -- read channels and check limits -------------

    saturations = DataQualityDict()
    bad = set()

    # TODO: use multiprocessing to separate channel list into discrete chunks
    #       should give a factor of X for X processes

    # check limens
    for suffix, clist in zip(['LIMEN', 'SWSTAT'], channels):
        nchans = len(clist)
        # group channels in sets for batch processing
        #     min of <number of channels>, user group size (sensible number),
        #     and 512 Mb of RAM for single-precision EPICS
        try:
            dur = max([float(abs(s)) for s in segs])
        except ValueError:
            ngroup = args.group_size
        else:
            ngroup = int(
                min(nchans, args.group_size, 2 * 1024**3 / 4. / 16. / dur))
        LOGGER.info('Processing %s channels in groups of %d' % (
            suffix, ngroup))
        sys.stdout.flush()
        sets = core.grouper(clist, ngroup)
        for i, cset in enumerate(sets):
            # remove empty entries use to pad the list to 8 elements
            cset = list(cset)
            while cset[-1] is None:
                cset.pop(-1)
            for seg in segs:
                cache2 = sieve_cache(cache, segment=seg)
                if not len(cache2):
                    continue
                saturated = core.is_saturated(
                    cset, cache2, seg[0], seg[1], indicator=suffix,
                    nproc=args.nproc)
                for new in saturated:
                    try:
                        saturations[new.name] += new
                    except KeyError:
                        saturations[new.name] = new
            for j, c in enumerate(cset):
                try:
                    sat = saturations[c]
                except KeyError:
                    LOGGER.debug('%40s:      SKIP      [%d/%d]'
                                 % (c, i*ngroup + j + 1, nchans))
                else:
                    if abs(sat.active):
                        LOGGER.debug('%40s: ---- FAIL ---- [%d/%d]'
                                     % (c, i*ngroup + j + 1, nchans))
                        for seg in sat.active:
                            LOGGER.debug(" " * 42 + str(seg))
                        bad.add(c)
                    else:
                        LOGGER.debug('%40s:      PASS      [%d/%d]'
                                     % (c, i*ngroup + j + 1, nchans))
                sys.stdout.flush()

    # -- log results and exit -----------------------

    if len(bad):
        LOGGER.info("Saturations were found for all of the following:\n\n")
        for c in bad:
            print(c)
        print('\n\n')
    else:
        LOGGER.info("No software saturations were found in any channels")

    # write segments to file
    outfile = ('%s-SOFTWARE_SATURATIONS-%d-%d.h5'
               % (ifo, int(args.gpsstart),
                  int(args.gpsend) - int(args.gpsstart)))
    LOGGER.info("Writing saturation segments to %s" % outfile)
    saturations.write(outfile, path="segments", overwrite=True)

    if args.html:
        # get base path
        base = os.path.dirname(args.html)
        os.chdir(base)
        if args.plot:
            args.plot = os.path.curdir
        segfile = os.path.relpath(outfile, os.path.dirname(args.html))
        if os.path.basename(args.html) == 'index.html':
            links = [
                '%d-%d' % (int(args.gpsstart), int(args.gpsend)),
                ('Parameters', '#parameters'),
                ('Segments', [('Software saturations',
                               '#software-saturations')]),
                ('Results', '#results'),
            ]
            if args.state_flag:
                links[2][1].insert(0, ('State flag', '#state-flag'))
            (brand, class_) = htmlio.get_brand(ifo, 'Saturations',
                                               args.gpsstart)
            navbar = htmlio.navbar(links, class_=class_, brand=brand)
            page = htmlio.new_bootstrap_page(
                navbar=navbar, title='%s Saturations | %d-%d' % (
                    ifo, int(args.gpsstart), int(args.gpsend)))
        else:
            page = markup.page()
            page.div(class_='container')
        # -- header
        page.div(class_='pb-2 mt-3 mb-2 border-bottom')
        page.h1('%s Software Saturations: %d-%d'
                % (ifo, int(args.gpsstart), int(args.gpsend)))
        page.div.close()
        # -- paramters
        content = [
            ('State end padding', args.pad_state_end),
            ('Skip', ', '.join(map(repr, args.skip)))]
        page.h2('Parameters', class_='mt-4 mb-4', id_='parameters')
        page.div(class_='row')
        page.div(class_='col-md-9 col-sm-12')
        page.add(htmlio.parameter_table(
            content, start=args.gpsstart, end=args.gpsend,
            flag=args.state_flag))
        page.div.close()  # col-md-9 col-sm-12
        page.div(class_='col-md-3 col-sm-12')
        page.add(htmlio.download_btn(
            [('Segments (HDF)', segfile)],
            btnclass='btn btn-%s dropdown-toggle' % ifo.lower(),
        ))
        page.div.close()  # col-md-9 col-sm-12
        page.div.close()  # row
        page.h5('Command-line:')
        page.add(htmlio.get_command_line(about=False, prog=PROG))
        # -- segments
        page.h2('Segments', class_='mt-4', id_='segments')
        msg = ("This analysis searched {0} filter bank readback channels for "
               "time periods during which their OUTPUT value matched or "
               "exceeded the LIMIT value set in software. Signals that "
               "achieve saturation are shown below, and saturation segments "
               "are available by expanding a given panel.").format(
                   sum(map(len, channels)))
        page.add(htmlio.alert(msg, context=ifo.lower()))
        # record state segments
        if args.state_flag:
            page.h3('State flag', class_='mt-3', id_='state-flag')
            page.div(id_='accordion1')
            page.add(htmlio.write_flag_html(
                state, span, 'state', parent='accordion1', context='success',
                plotdir=args.plot, facecolor=(0.2, 0.8, 0.2),
                edgecolor='darkgreen', known={
                    'facecolor': 'red',
                    'edgecolor': 'darkred',
                    'height': 0.4},
            ))
            page.div.close()
        # record saturation segments
        if len(bad):
            page.h3('Software saturations', class_='mt-3',
                    id_='software-saturations')
            page.div(id_='accordion2')
            for i, (c, flag) in enumerate(saturations.items()):
                if abs(flag.active) > 0:
                    title = '%s [%d]' % (flag.name, len(flag.active))
                    page.add(htmlio.write_flag_html(
                        flag, span=span, id=i, parent='accordion2',
                        title=title, plotdir=args.plot))
            page.div.close()
        else:
            page.add(htmlio.alert('No software saturations were found in this '
                                  'analysis', context=ifo.lower(),
                                  dismiss=False))
        # -- results table
        page.h2('Results summary', class_='mt-4', id_='results')
        page.add(htmlio.alert('All channels for which the LIMIT setting was '
                              'active are shown below.', context=ifo.lower()))
        page.table(class_='table table-striped table-hover')
        # write table header
        page.thead()
        page.tr()
        for header in ['Channel', 'Result', 'Num. saturations']:
            page.th(header)
        page.thead.close()
        # write body
        page.tbody()
        for c, seglist in saturations.items():
            passed = abs(seglist.active) == 0
            if passed:
                page.tr()
            else:
                page.tr(class_='table-warning')
            page.td(c)
            page.td(passed and 'Pass' or 'Fail')
            page.td(len(seglist.active))
            page.tr.close()
        page.tbody.close()
        page.table.close()
        # close and write
        htmlio.close_page(page, args.html)
Beispiel #5
0
def get_triggers():

    # Obtain segments that are analysis ready
    analysis_ready = DataQualityFlag.query('{0}:DMT-ANALYSIS_READY:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)

    # Display segments for which this flag is true
    print "Segments for which the ANALYSIS READY Flag is active: {0}".format(analysis_ready.active)

    if opts.applyallDQ:
        print("We are finding all previously created DQ cuts")
        # Obtain segments of all DQ cuts if requested DQ list can be found
        # https://code.pycbc.phy.syr.edu/detchar/veto-definitions/blob/master/burst/O1/H1L1-HOFT_C02_O1_BURST.xml
        # First obtain those flags that are for both H1 and L1
        O1_MISSING_HOFT_C02 = DataQualityFlag.query('{0}:DCS-MISSING_{0}_HOFT_C02:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_ETMY_ESD_DAC_OVERFLOW = DataQualityFlag.query('{0}:DMT-ETMY_ESD_DAC_OVERFLOW:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_OMC_DCPD_A_SATURATION = DataQualityFlag.query('{0}:DCH-OMC_DCPD_A_SATURATION:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_OMC_DCPD_ADC_OVERFLOW = DataQualityFlag.query('{0}:DMT-OMC_DCPD_ADC_OVERFLOW:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_ETMY_SATURATION_SNR200 = DataQualityFlag.query('{0}:DCH-ETMY_SATURATION_SNR200:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_CW_INJECTION_TRANSITION = DataQualityFlag.query('{0}:DCH-CW_INJECTION_TRANSITION:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_BAD_KAPPA_BASIC_CUT_HOFT_C02 = DataQualityFlag.query('{0}:DCS-BAD_KAPPA_BASIC_CUT_{0}_HOFT_C02:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_PARTIAL_FRAME_LOSS_HOFT_C02 = DataQualityFlag.query('{0}:DCS-PARTIAL_FRAME_LOSS_{0}_HOFT_C02:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)

        # Obtain detector specific flags
        if opts.detector == "H1":
            O1_RF45_AM_STABILIZATION = DataQualityFlag.query('{0}:DCH-RF45_AM_STABILIZATION:4'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_ETMY_SATURATION = DataQualityFlag.query('{0}:DCH-ETMY_SATURATION:2'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_BAD_DATA_BEFORE_LOCKLOSS = DataQualityFlag.query('{0}:DCH-BAD_DATA_BEFORE_LOCKLOSS:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_ETMY_VIOLIN_MODE_2NDHARMONIC_RINGING = DataQualityFlag.query('{0}:DCH-ETMY_VIOLIN_MODE_2NDHARMONIC_RINGING:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_RF45_SEVERE_GLITCHING = DataQualityFlag.query('{0}:DCH-RF45_SEVERE_GLITCHING:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_EY_BECKHOFF_CHASSIS_PROBLEM = DataQualityFlag.query('{0}:DCH-EY_BECKHOFF_CHASSIS_PROBLEM:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_ASC_AS_B_RF36_GLITCHING = DataQualityFlag.query('{0}:DCH-ASC_AS_B_RF36_GLITCHING:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_BAD_STRAIN_HOFT_C02 = DataQualityFlag.query('{0}:DCS-BAD_STRAIN_{0}_HOFT_C02:2'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_TOGGLING_BAD_KAPPA_HOFT_C02= DataQualityFlag.query('{0}:DCS-TOGGLING_BAD_KAPPA_{0}_HOFT_C02:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        else:
            O1_ETMY_SATURATION = DataQualityFlag.query('{0}:DCH-ETMY_SATURATION:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_BAD_DATA_BEFORE_LOCKLOSS = DataQualityFlag.query('{0}:DCH-BAD_DATA_BEFORE_LOCKLOSS:2'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_PCAL_GLITCHES_GT_20P = DataQualityFlag.query('{0}:DCH-PCAL_GLITCHES_GT_20P:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_SUDDEN_PSD_CHANGE = DataQualityFlag.query('{0}:DCH-SUDDEN_PSD_CHANGE:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_BAD_VCO_OFFSET = DataQualityFlag.query('{0}:DCH-BAD_VCO_OFFSET:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_SEVERE_60_200_HZ_NOISE = DataQualityFlag.query('{0}:DCH-SEVERE_60_200_HZ_NOISE:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)

    # Fetch raw omicron triggers and apply filter which is defined in a function above.
    omicrontriggers = SnglBurstTable.fetch(detchannelname,'Omicron',\
    opts.gpsStart,opts.gpsEnd,filt=threshold)

    print "List of available metadata information for a given glitch provided by omicron: {0}".format(omicrontriggers.columnnames)

    print "Number of triggers after SNR and Freq cuts but before ANALYSIS READY flag filtering: {0}".format(len(omicrontriggers))

    # Filter the raw omicron triggers against the ANALYSIS READY flag.
    omicrontriggers = omicrontriggers.vetoed(analysis_ready.active)
    # If requested filter out DQ flags
    if opts.applyallDQ:
        print("We are applying all previously created DQ cuts")
        # Obtain segments of all DQ cuts if requested DQ list can be found
        # https://code.pycbc.phy.syr.edu/detchar/veto-definitions/blob/master/burst/O1/H1L1-HOFT_C02_O1_BURST.xml
        # First obtain those flags that are for both H1 and L1
        omicrontriggers = omicrontriggers.veto(O1_MISSING_HOFT_C02.active)
        omicrontriggers = omicrontriggers.veto(O1_ETMY_ESD_DAC_OVERFLOW.active)
        omicrontriggers = omicrontriggers.veto(O1_OMC_DCPD_A_SATURATION.active)
        omicrontriggers = omicrontriggers.veto(O1_OMC_DCPD_ADC_OVERFLOW.active)
        omicrontriggers = omicrontriggers.veto(O1_ETMY_SATURATION_SNR200.active)
        omicrontriggers = omicrontriggers.veto(O1_CW_INJECTION_TRANSITION.active)
        omicrontriggers = omicrontriggers.veto(O1_BAD_KAPPA_BASIC_CUT_HOFT_C02.active)
        omicrontriggers = omicrontriggers.veto(O1_PARTIAL_FRAME_LOSS_HOFT_C02.active)

        # Obtain detector specific flags
        if opts.detector == "H1":
            omicrontriggers = omicrontriggers.veto(O1_RF45_AM_STABILIZATION.active)
            omicrontriggers = omicrontriggers.veto(O1_ETMY_SATURATION.active)
            omicrontriggers = omicrontriggers.veto(O1_BAD_DATA_BEFORE_LOCKLOSS.active)
            omicrontriggers = omicrontriggers.veto(O1_ETMY_VIOLIN_MODE_2NDHARMONIC_RINGING.active)
            omicrontriggers = omicrontriggers.veto(O1_RF45_SEVERE_GLITCHING.active)
            omicrontriggers = omicrontriggers.veto(O1_EY_BECKHOFF_CHASSIS_PROBLEM.active)
            omicrontriggers = omicrontriggers.veto(O1_ASC_AS_B_RF36_GLITCHING.active)
            omicrontriggers = omicrontriggers.veto(O1_BAD_STRAIN_HOFT_C02.active)
            omicrontriggers = omicrontriggers.veto(O1_TOGGLING_BAD_KAPPA_HOFT_C02.active)
        else:
            omicrontriggers = omicrontriggers.veto(O1_ETMY_SATURATION.active)
            omicrontriggers = omicrontriggers.veto(O1_BAD_DATA_BEFORE_LOCKLOSS.active)
            omicrontriggers = omicrontriggers.veto(O1_PCAL_GLITCHES_GT_20P.active)
            omicrontriggers = omicrontriggers.veto(O1_SUDDEN_PSD_CHANGE.active)
            omicrontriggers = omicrontriggers.veto(O1_BAD_VCO_OFFSET.active)
            omicrontriggers = omicrontriggers.veto(O1_SEVERE_60_200_HZ_NOISE.active)
        


    print "Final trigger length: {0}".format(len(omicrontriggers))

    return omicrontriggers
Beispiel #6
0
def query(flag, start, end, url='https://segments.ligo.org'):
    """Query a segment database for active segments associated with a flag
    """
    return DataQualityFlag.query(flag, start, end, url=url)
Beispiel #7
0
def main(args=None):
    """Run the zero-crossing counter tool
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    span = Segment(args.gpsstart, args.gpsend)
    LOGGER.info('-- Processing channel %s over span %d - %d' %
                (args.channel, args.gpsstart, args.gpsend))

    if args.state_flag:
        state = DataQualityFlag.query(
            args.state_flag,
            int(args.gpsstart),
            int(args.gpsend),
            url=const.DEFAULT_SEGMENT_SERVER,
        )
        statea = state.active
    else:
        statea = SegmentList([span])

    duration = abs(span)

    # initialize output files for each threshold and store them in a dict
    outfiles = {}
    for thresh in args.threshold:
        outfiles[str(thresh)] = (os.path.join(
            args.output_path, '%s_%s_DAC-%d-%d.h5' %
            (args.channel.replace('-', '_').replace(':', '-'), str(
                int(thresh)).replace('-', 'n'), int(args.gpsstart), duration)))

    # get frame cache
    cache = gwdatafind.find_urls(args.ifo[0], args.frametype,
                                 int(args.gpsstart), int(args.gpsend))

    cachesegs = statea & cache_segments(cache)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    # initialize a ligolw table for each threshold and store them in a dict
    names = ("time", "frequency", "snr")
    dtypes = ("f8", ) * len(names)
    tables = {}
    for thresh in args.threshold:
        tables[str(thresh)] = EventTable(
            names=names,
            dtype=dtypes,
            meta={"channel": args.channel},
        )

    # for each science segment, read in the data from frames, check for
    # threshold crossings, and if the rate of crossings is less than
    # rate_thresh, write to a sngl_burst table
    for seg in cachesegs:
        LOGGER.debug("Processing {}:".format(seg))
        c = sieve_cache(cache, segment=seg)
        if not c:
            LOGGER.warning("    No {} data files for this segment, "
                           "skipping".format(args.frametype))
            continue
        data = get_data(args.channel,
                        seg[0],
                        seg[1],
                        nproc=args.nproc,
                        source=c,
                        verbose="Reading data:".rjust(30))
        for thresh in args.threshold:
            times = find_crossings(data, thresh)
            rate = float(times.size) / abs(seg) if times.size else 0
            LOGGER.info("    Found {0} crossings of {1}, rate: {2} Hz".format(
                times.size,
                thresh,
                rate,
            ))
            if times.size and rate < args.rate_thresh:
                existing = tables[str(thresh)]
                tables[str(thresh)] = vstack_tables(
                    (
                        existing,
                        table_from_times(times,
                                         snr=10.,
                                         frequency=100.,
                                         names=existing.colnames),
                    ),
                    join_type="exact",
                )

    n = max(map(len, tables.values()))
    for thresh, outfile in outfiles.items():
        tables[thresh].write(
            outfile,
            path="triggers",
            format="hdf5",
            overwrite=True,
        )
        LOGGER.info("{0} events written to {1}".format(
            str(len(tables[thresh])).rjust(len(str(n))),
            outfile,
        ))
Beispiel #8
0
    list(map("%s:%s".__mod__, channel_list), start=start, end=end))
if plot_front_end:
    front_end_data = TimeSeriesDict.fetch(
        list(map("%s:%s".__mod__, front_end_channel_list),
             start=start,
             end=end))
if plot_additional_hoft:
    additional_hoft_data = TimeSeriesDict.read(
        options.additional_hoft_frames_cache,
        list(map("%s:%s".__mod__, additional_channel_list)),
        start=start,
        end=end)

print(map("%s:%s".__mod__, front_end_channel_list))

segs = DataQualityFlag.query('%s:DMT-CALIBRATED:1' % ifo, start, end)

for n, channel in enumerate(channels):
    plot = TimeSeries.plot(data["%s:%s" % (ifo, channel)])
    ax = plot.gca()
    if plot_front_end:
        ax.plot(front_end_data["%s:%s" % (ifo, front_end_channels[n])])
    if plot_additional_hoft:
        ax.plot(additional_hoft_data["%s:%s" % (ifo, additional_channels[n])])
    ax.set_ylabel('Correction value')
    plot.gca().legend()
    #title = item
    #title = title.replace('_', '\_')
    ax.set_title(channel.replace('_', '\_'))
    if 'F_S_SQUARED' in channel:
        ax.set_ylim(-100, 100)
Beispiel #9
0
    def get_triggers(cls, start, end, channel,
                     dqflag, verbose=True, **kwargs):
        """Obtain omicron triggers to run gravityspy on

        Parameters:

            start (int): start of time to look for triggers
            end (int): end time to look for triggers
            channel (str): channel to look for triggers
            dqflag (str): name of segment during which to keep triggers

        Returns:
            `Events` table
        """
        duration_max = kwargs.pop('duration_max', None)
        duration_min = kwargs.pop('duration_min', None)
        frequency_max = kwargs.pop('frequency_max', 2048)
        frequency_min = kwargs.pop('frequency_min', 10)
        snr_max = kwargs.pop('snr_max', None)
        snr_min = kwargs.pop('snr_min', 7.5)

        detector = channel.split(':')[0]

        logger = log.Logger('Gravity Spy: Fetching Omicron Triggers')

        # Obtain segments that are analysis ready
        analysis_ready = DataQualityFlag.query('{0}:{1}'.format(detector,
                                                                dqflag),
                                              float(start), float(end))

        # Display segments for which this flag is true
        logger.info("Segments for which the {0} Flag "
                    "is active: {1}".format(dqflag, analysis_ready.active))

        # get Omicron triggers
        files = find_trigger_files(channel,'Omicron',
                                   float(start),float(end))

        triggers = cls.read(files, tablename='sngl_burst', format='ligolw')

        logger.info("Number of triggers "
                    "before any filtering: {0}".format(len(triggers)))

        masks = numpy.ones(len(triggers), dtype=bool)

        logger.info("duration filter "
                    "[{0}, {1}]".format(duration_min, duration_max))

        logger.info("frequency filter "
                    "[{0}, {1}]".format(frequency_min, frequency_max))

        logger.info("snr filter "
                    "[{0}, {1}]".format(snr_min, snr_max))

        if not duration_max is None:
            masks &= (triggers['duration'] <= duration_max)
        if not duration_min is None:
            masks &= (triggers['duration'] >= duration_min)
        if not frequency_max is None:
            masks &= (triggers['peak_frequency'] <= frequency_max)
        if not frequency_min is None:
            masks &= (triggers['peak_frequency'] >= frequency_min)
        if not snr_max is None:
            masks &= (triggers['snr'] <= snr_max)
        if not snr_min is None:
            masks &= (triggers['snr'] >= snr_min)

        triggers = triggers[masks]
        # Set peakGPS

        logger.info("Number of triggers after "
                    "snr, frequency, and duration filters "
                    "cuts but before {0} flag filtering: "
                    "{1}".format(dqflag, len(triggers)))

        # Filter the raw omicron triggers against the ANALYSIS READY flag.
        vetoed = triggers['event_time'].in_segmentlist(analysis_ready.active)
        triggers = triggers[vetoed]

        logger.info("Final trigger length: {0}".format(len(triggers)))

        return triggers
Beispiel #10
0
def query(flag, start, end, url=DEFAULT_SEGMENT_SERVER):
    """Query a segment database for active segments associated with a flag
    """
    return DataQualityFlag.query(flag, start, end, url=url)
Beispiel #11
0
    # Widely-applied cutoffs
    print('Grabbinng env-cutoff info...')
    envchan1 = 'L1:ISI-GND_STS_ITMY_Z_BLRMS_3_10' # anthro
    envchan2 = 'L1:ISI-GND_STS_ITMY_Z_BLRMS_100M_300M' # micro
    envchan4 = 'L1:PEM-EY_WIND_WEATHER_MPS' # wind
    envchans = [envchan1, envchan2, envchan4]
    # env_series = [Grab_Series(start, end, chan, frame, procs) for chan in envchans]
    env_series = [Grab_Sfiles(start, end, obsrun, chan, frame) for chan in envchans]

    envstricts1 = [[(500, 'maxeq')]]
    envstricts2 = [[(1000, 'maxeq')]]
    envstricts4 = [[(5, 'maxeq')]]
    env_strictsl = [envstricts1, envstricts2, envstricts4]

    # Observing times
    observing = DataQualityFlag.query('L1:DMT-ANALYSIS_READY:1', start, end)
    base_flags = observing&Net_Flag(env_series, env_strictsl)
    transflags = base_flags&Group_Flags(transmodes, transtricts)
    EQflags = base_flags&Group_Flags(EQmodes, EQstricts)-transflags
    nomflags = base_flags-(EQflags|transflags)
    configs = [nomflags, transflags, EQflags]
    
# No need to edit beyond here unless debugging or tinkering.
# Do so at your own risk!

    for i in range(len(configs)):
        configs[i].write(File_Name(start, end, '{}_dqflag'.format(tablet[i]), 'hdf5', extras = [skip_load_direct]))
    
    configsecs = [config.active for config in configs]
    
    # Conglomerate configsecs to define an all-inclusive SegmentList:
Beispiel #12
0
def cluster_plotter(channels,
                    start,
                    stop,
                    prefix='.',
                    label='kmeans-labels',
                    groups=None,
                    filename=DEFAULT_FILENAME,
                    dqflag='L1:DMT-ANALYSIS_READY:1',
                    xscale=None,
                    unit=None,
                    progressbar=True,
                    **kwargs):
    """
    Plots data with clusters labeled by color in the working directory, or a relative path given by prefix.
    Requires a .hdf5 file produced with a clustering function defined in this module to be in the working directory.
    **kwargs are forwarded to TimeSeries.plot().

    :param prefix: relative path to output images.
    :param label: name attribute of labels TimeSeries saved in filename.
    :param groups: groups of channels to plot in the same figure. See the example.
    :param dqflag: data quality flag for segments bar.
    :param xscale: gps x-axis scale to use.
    :param unit: override y-axis unit.
    :param progressbar: show progress bar.

    >>> from gwpy.time import tconvert, from_gps
    >>> from datetime import timedelta
    >>> from cluster import cluster_plotter
    >>>
    >>> channels = [f'L1:ISI-GND_STS_ETMX_Z_BLRMS_1_3.mean,m-trend', 'L1:ISI-GND_STS_ETMY_Z_BLRMS_1_3.mean,m-trend']
    >>> groups = [[channels, ('ETMX', 'ETMY'), 'L1:ISI-GND_STS_BLRMS_1_3 Z-axis']] # plot on the same figure.
    >>>
    >>> stop = from_gps(60 * (int(tconvert('now')) // 60)) # gets nearest minute to now
    >>> start = stop - timedelta(days=1)  # cluster the past day
    >>> cluster_plotter(channels, start, stop, filename='my_kmeans.hdf5', groups=groups)

    """

    # some defaults.
    if not kwargs:
        kwargs['color'] = 'k'
        kwargs['alpha'] = 0.3
    if groups is None:
        groups = channels

    # read the data from the save file.
    data = TimeSeriesDict.read(filename,
                               channels + [label],
                               start=to_gps(start),
                               end=to_gps(stop))
    logger.info(f'Read {start} to {stop} from {filename}')

    # get segments for the duration specified. Note that this may require doing `ligo-proxy-init -p`.
    logger.debug(f'Getting segments for {dqflag} from {start} to {stop}...')
    dq = DataQualityFlag.query(dqflag, to_gps(start), to_gps(stop))
    logger.info(f'Got segments for {dqflag} from {start} to {stop}.')

    # plotting is slow, so show a nice progress bar.
    logger.debug('Initiating plotting routine...')
    with Progress('plotting', len(channels),
                  quiet=not progressbar) as progress:

        for p, (group, labels, title) in enumerate(groups):

            # plot the group in one figure.
            plt = Plot(*(data[channel] for channel in group),
                       separate=True,
                       sharex=True,
                       zorder=1,
                       **kwargs)

            # modify the axes one by one.
            axes = plt.get_axes()
            for i, ax in enumerate(axes):

                # namely, add a colored overlay that indicates clustering labels.
                ax.scatter(data[group[i]].times,
                           data[group[i]].value,
                           c=[colors[j] for j in data[label]],
                           edgecolor='',
                           s=4,
                           zorder=2)

                ax.set_ylabel(
                    f'{labels[i]} {data[group[i]].unit if unit is None else unit}'
                )
                setp(ax.get_xticklabels(), visible=False)

            # modify the figure as a whole.
            plt.add_segments_bar(dq, label='')
            if xscale is not None:
                plt.gca().set_xscale(xscale)
            plt.suptitle(title)

            # save to png.
            progress(plt.save, p, get_path(title, 'png', prefix=prefix))

    logger.info(f'Completed plotting for {start} to {stop} from {filename}')
Beispiel #13
0
def segs_and_dur(starttime,endtime,ifo):
	segs =  DataQualityFlag.query('{0}:DMT-ANALYSIS_READY:1'.format(ifo),starttime,endtime).active
	dur =   int(np.sum([i.end-i.start for i in segs]))

	return segs, dur
Beispiel #14
0
trigPath = args.trigFile
analysis = args.analysis

#create directories to hold stuff
tag = '%d-%d-%s' % (start.seconds, end.seconds, analysis)
outdir = os.path.abspath(os.path.join(args.output_directory, tag))
mkdir(outdir)
os.chdir(outdir)

ALLSEGMENTS = DataQualityDict()

# -- get analysis flags ----------------------
allFlags = get_known_flags(start, end, url, ifo=ifo, badonly=None)
for flag in allFlags[:]:
    if 'ANALYSIS_READY' not in flag:
        new = DataQualityFlag.query(flag, start,
                                    end)  #try get_segments gwvet.segments
        ALLSEGMENTS[new.name] = new
    else:
        allFlags.remove(flag)

#Download VDFs
if urlparse(vetofile).netloc:
    tmp = urlopen(vetofile)
    vetofile = os.path.abspath(os.path.basename(vetofile))
    with open(vetofile, 'w') as f:
        f.write(tmp.read())
    print('Downloaded veto definer file')
vdf = DataQualityDict.from_veto_definer_file(vetofile,
                                             format='ligolw',
                                             start=start,
                                             end=end,
Beispiel #15
0
def main(args=None):
    """Run the online Guardian node visualization tool
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    fec_map = args.fec_map
    simulink = args.simulink
    daqsvn = args.daqsvn or ('https://daqsvn.ligo-la.caltech.edu/websvn/'
                             'listing.php?repname=daq_maps')
    if args.ifo == 'H1':
        if not fec_map:
            fec_map = 'https://lhocds.ligo-wa.caltech.edu/exports/detchar/fec/'
        if not simulink:
            simulink = 'https://lhocds.ligo-wa.caltech.edu/daq/simulink/'
    if args.ifo == 'L1':
        if not fec_map:
            fec_map = 'https://llocds.ligo-la.caltech.edu/exports/detchar/fec/'
        if not simulink:
            simulink = 'https://llocds.ligo-la.caltech.edu/daq/simulink/'

    span = Segment(args.gpsstart, args.gpsend)

    # let's go
    LOGGER.info('{} Overflows {}-{}'.format(args.ifo, int(args.gpsstart),
                                            int(args.gpsend)))

    # get segments
    if args.state_flag:
        state = DataQualityFlag.query(args.state_flag,
                                      int(args.gpsstart),
                                      int(args.gpsend),
                                      url=const.DEFAULT_SEGMENT_SERVER)
        tmp = type(state.active)()
        for i, seg in enumerate(state.active):
            if abs(seg) < args.segment_end_pad:
                continue
            tmp.append(type(seg)(seg[0], seg[1] - args.segment_end_pad))
        state.active = tmp.coalesce()
        statea = state.active
    else:
        statea = SegmentList([span])

    if not args.output_file:
        duration = abs(span)
        args.output_file = ('%s-OVERFLOWS-%d-%d.h5' %
                            (args.ifo, int(args.gpsstart), duration))
        LOGGER.debug("Set default output file as %s" % args.output_file)

    # set up container
    overflows = DataQualityDict()

    # prepare data access
    if args.nds:
        from gwpy.io import nds2 as io_nds2
        host, port = args.nds.rsplit(':', 1)
        ndsconnection = io_nds2.connect(host, port=int(port))
        if ndsconnection.get_protocol() == 1:
            cachesegs = SegmentList(
                [Segment(int(args.gpsstart), int(args.gpsend))])
        else:
            cachesegs = io_nds2.get_availability(
                ['{0}:FEC-1_DAC_OVERFLOW_ACC_0_0'.format(args.ifo)],
                int(args.gpsstart),
                int(args.gpsend),
            )
    else:  # get frame cache
        cache = gwdatafind.find_urls(args.ifo[0], args.frametype,
                                     int(args.gpsstart), int(args.gpsend))
        cachesegs = statea & cache_segments(cache)

    flag_desc = "ADC/DAC Overflow indicated by {0}"

    # get channel and find overflows
    for dcuid in args.dcuid:
        LOGGER.info("Processing DCUID %d" % dcuid)
        channel = daq.ligo_accum_overflow_channel(dcuid, args.ifo)
        overflows[channel] = DataQualityFlag(channel, known=cachesegs)
        if args.deep:
            LOGGER.debug(" -- Getting list of overflow channels")
            try:
                channels = daq.ligo_model_overflow_channels(dcuid,
                                                            args.ifo,
                                                            args.frametype,
                                                            gpstime=span[0],
                                                            nds=args.nds)
            except IndexError:  # no frame found for GPS start, try GPS end
                channels = daq.ligo_model_overflow_channels(dcuid,
                                                            args.ifo,
                                                            args.frametype,
                                                            gpstime=span[-1])
            for chan in channels:  # set up flags early
                overflows[chan] = DataQualityFlag(
                    chan,
                    known=cachesegs,
                    description=flag_desc.format(chan),
                    isgood=False,
                )
            LOGGER.debug(" -- %d channels found" % len(channel))
        for seg in cachesegs:
            LOGGER.debug(" -- Processing {}-{}".format(*seg))
            if args.nds:
                read_kw = dict(connection=ndsconnection)
            else:
                read_kw = dict(source=cache, nproc=args.nproc)
            msg = "Reading ACCUM_OVERFLOW data:".rjust(30)
            data = get_data(channel,
                            seg[0],
                            seg[1],
                            pad=0.,
                            verbose=msg,
                            **read_kw)
            new = daq.find_overflow_segments(
                data,
                cumulative=True,
            )
            overflows[channel] += new
            LOGGER.info(" -- {} overflows found".format(len(new.active)))
            if not new.active:
                continue
            # go deep!
            for s, e in tqdm.tqdm(new.active.protract(2),
                                  unit='ovfl',
                                  desc='Going deep'.rjust(30)):
                data = get_data(channels, s, e, **read_kw)
                for ch in channels:
                    try:
                        overflows[ch] += daq.find_overflow_segments(
                            data[ch],
                            cumulative=True,
                        )
                    except KeyError:
                        warnings.warn("Skipping {}".format(ch), UserWarning)
                        continue
        LOGGER.debug(" -- Search complete")

    # write output
    LOGGER.info("Writing segments to %s" % args.output_file)
    table = table_from_segments(
        overflows,
        sngl_burst=args.output_file.endswith((".xml", ".xml.gz")),
    )
    if args.integer_segments:
        for key in overflows:
            overflows[key] = overflows[key].round()
    if args.output_file.endswith((".h5", "hdf", ".hdf5")):
        with h5py.File(args.output_file, "w") as h5f:
            table.write(h5f, path="triggers")
            overflows.write(h5f, path="segments")
    else:
        table.write(args.output_file, overwrite=True)
        overflows.write(args.output_file, overwrite=True, append=True)

    # write HTML
    if args.html:
        # get base path
        base = os.path.dirname(args.html)
        os.chdir(base)
        if args.plot:
            args.plot = os.path.curdir
        if args.output_file:
            args.output_file = os.path.relpath(args.output_file,
                                               os.path.dirname(args.html))
        if os.path.basename(args.html) == 'index.html':
            links = [
                '%d-%d' % (int(args.gpsstart), int(args.gpsend)),
                ('Parameters', '#parameters'),
                ('Segments', [('Overflows', '#overflows')]),
                ('Results', '#results'),
            ]
            if args.state_flag:
                links[2][1].insert(0, ('State flag', '#state-flag'))
            (brand, class_) = htmlio.get_brand(args.ifo, 'Overflows',
                                               args.gpsstart)
            navbar = htmlio.navbar(links, class_=class_, brand=brand)
            page = htmlio.new_bootstrap_page(
                title='%s Overflows | %d-%d' %
                (args.ifo, int(args.gpsstart), int(args.gpsend)),
                navbar=navbar)
        else:
            page = htmlio.markup.page()
            page.div(class_='container')

        # -- header
        page.div(class_='pb-2 mt-3 mb-2 border-bottom')
        page.h1('%s ADC/DAC Overflows: %d-%d' %
                (args.ifo, int(args.gpsstart), int(args.gpsend)))
        page.div.close()

        # -- paramters
        content = [('DCUIDs', ' '.join(map(str, args.dcuid)))]
        if daqsvn:
            content.append(('FEC configuration', (
                '<a href="{0}" target="_blank" title="{1} FEC configuration">'
                '{0}</a>').format(daqsvn, args.ifo)))
        if fec_map:
            content.append(
                ('FEC map', '<a href="{0}" target="_blank" title="{1} FEC '
                 'map">{0}</a>'.format(fec_map, args.ifo)))
        if simulink:
            content.append(
                ('Simulink models', '<a href="{0}" target="_blank" title="{1} '
                 'Simulink models">{0}</a>'.format(simulink, args.ifo)))
        page.h2('Parameters', class_='mt-4 mb-4', id_='parameters')
        page.div(class_='row')
        page.div(class_='col-md-9 col-sm-12')
        page.add(
            htmlio.parameter_table(content,
                                   start=args.gpsstart,
                                   end=args.gpsend,
                                   flag=args.state_flag))
        page.div.close()  # col-md-9 col-sm-12

        # link to summary file
        if args.output_file:
            ext = ('HDF' if args.output_file.endswith(
                (".h5", "hdf", ".hdf5")) else 'XML')
            page.div(class_='col-md-3 col-sm-12')
            page.add(
                htmlio.download_btn(
                    [('Segments ({})'.format(ext), args.output_file)],
                    btnclass='btn btn-%s dropdown-toggle' % args.ifo.lower(),
                ))
            page.div.close()  # col-md-3 col-sm-12
        page.div.close()  # row

        # -- command-line
        page.h5('Command-line:')
        page.add(htmlio.get_command_line(about=False, prog=PROG))

        # -- segments
        page.h2('Segments', class_='mt-4', id_='segments')

        # give contextual information
        msg = ("This analysis searched for digital-to-analogue (DAC) or "
               "analogue-to-digital (ADC) conversion overflows in the {0} "
               "real-time controls system. ").format(
                   SITE_MAP.get(args.ifo, 'LIGO'))
        if args.deep:
            msg += (
                "A hierarchichal search was performed, with one cumulative "
                "overflow counter checked per front-end controller (FEC). "
                "For those models that indicated an overflow, the card- and "
                "slot-specific channels were then checked. ")
        msg += (
            "Consant overflow is shown as yellow, while transient overflow "
            "is shown as red. If a data-quality flag was loaded for this "
            "analysis, it will be displayed in green.")
        page.add(htmlio.alert(msg, context=args.ifo.lower()))
        # record state segments
        if args.state_flag:
            page.h3('State flag', class_='mt-3', id_='state-flag')
            page.div(id_='accordion1')
            page.add(
                htmlio.write_flag_html(state,
                                       span,
                                       'state',
                                       parent='accordion1',
                                       context='success',
                                       plotdir=args.plot,
                                       facecolor=(0.2, 0.8, 0.2),
                                       edgecolor='darkgreen',
                                       known={
                                           'facecolor': 'red',
                                           'edgecolor': 'darkred',
                                           'height': 0.4,
                                       }))
            page.div.close()
        # record overflow segments
        if sum(abs(s.active) for s in overflows.values()):
            page.h3('Overflows', class_='mt-3', id_='overflows')
            page.div(id_='accordion2')
            for i, (c, flag) in enumerate(list(overflows.items())):
                if abs(flag.active) == 0:
                    continue
                if abs(flag.active) == abs(cachesegs):
                    context = 'warning'
                else:
                    context = 'danger'
                try:
                    channel = cds.get_real_channel(flag.name)
                except Exception:
                    title = '%s [%d]' % (flag.name, len(flag.active))
                else:
                    title = '%s (%s) [%d]' % (flag.name, channel,
                                              len(flag.active))
                page.add(
                    htmlio.write_flag_html(flag,
                                           span,
                                           i,
                                           parent='accordion2',
                                           title=title,
                                           context=context,
                                           plotdir=args.plot))
            page.div.close()
        else:
            page.add(
                htmlio.alert('No overflows were found in this analysis',
                             context=args.ifo.lower(),
                             dismiss=False))

        # -- results table
        page.h2('Results summary', class_='mt-4', id_='results')
        page.table(class_='table table-striped table-hover')
        # write table header
        page.thead()
        page.tr()
        for header in ['Channel', 'Connected signal', 'Num. overflows']:
            page.th(header)
        page.thead.close()
        # write body
        page.tbody()
        for c, seglist in overflows.items():
            t = abs(seglist.active)
            if t == 0:
                page.tr()
            elif t == abs(cachesegs):
                page.tr(class_='table-warning')
            else:
                page.tr(class_='table-danger')
            page.td(c)
            try:
                page.td(cds.get_real_channel(str(c)))
            except Exception:
                page.td()
            page.td(len(seglist.active))
            page.tr.close()
        page.tbody.close()
        page.table.close()

        # -- close and write
        htmlio.close_page(page, args.html)
        LOGGER.info("HTML written to %s" % args.html)
Beispiel #16
0
def main(args=None):
    """Run the hveto command-line interface
    """
    # declare global variables
    # this is needed for multiprocessing utilities
    global acache, analysis, areadkw, atrigfindkw, auxiliary, auxetg
    global auxfreq, counter, livetime, minsnr, naux, pchannel, primary
    global rnd, snrs, windows

    # parse command-line
    parser = create_parser()
    args = parser.parse_args(args=args)
    ifo = args.ifo
    start = int(args.gpsstart)
    end = int(args.gpsend)
    duration = end - start

    # log startup
    LOGGER.info("-- Welcome to Hveto --")
    LOGGER.info("GPS start time: %d" % start)
    LOGGER.info("GPS end time: %d" % end)
    LOGGER.info("Interferometer: %s" % ifo)

    # -- initialisation -------------------------

    # read configuration
    cp = config.HvetoConfigParser(ifo=ifo)
    cp.read(args.config_file)
    LOGGER.info("Parsed configuration file(s)")

    # format output directory
    outdir = _abs_path(args.output_directory)
    if not os.path.isdir(outdir):
        os.makedirs(outdir)
    os.chdir(outdir)
    LOGGER.info("Working directory: %s" % outdir)
    segdir = 'segments'
    plotdir = 'plots'
    trigdir = 'triggers'
    omegadir = 'scans'
    for d in [segdir, plotdir, trigdir, omegadir]:
        if not os.path.isdir(d):
            os.makedirs(d)

    # prepare html variables
    htmlv = {
        'title': '%s Hveto | %d-%d' % (ifo, start, end),
        'config': None,
        'prog': PROG,
        'context': ifo.lower(),
    }

    # get segments
    aflag = cp.get('segments', 'analysis-flag')
    url = cp.get('segments', 'url')
    padding = tuple(cp.getfloats('segments', 'padding'))
    if args.analysis_segments:
        segs_ = DataQualityDict.read(args.analysis_segments, gpstype=float)
        analysis = segs_[aflag]
        span = SegmentList([Segment(start, end)])
        analysis.active &= span
        analysis.known &= span
        analysis.coalesce()
        LOGGER.debug("Segments read from disk")
    else:
        analysis = DataQualityFlag.query(aflag, start, end, url=url)
        LOGGER.debug("Segments recovered from %s" % url)
    if padding != (0, 0):
        mindur = padding[0] - padding[1]
        analysis.active = type(analysis.active)([s for s in analysis.active if
                                                 abs(s) >= mindur])
        analysis.pad(*padding, inplace=True)
        LOGGER.debug("Padding %s applied" % str(padding))
    livetime = int(abs(analysis.active))
    livetimepc = livetime / duration * 100.
    LOGGER.info("Retrieved %d segments for %s with %ss (%.2f%%) livetime"
                % (len(analysis.active), aflag, livetime, livetimepc))

    # apply vetoes from veto-definer file
    try:
        vetofile = cp.get('segments', 'veto-definer-file')
    except configparser.NoOptionError:
        vetofile = None
    else:
        try:
            categories = cp.getfloats('segments', 'veto-definer-categories')
        except configparser.NoOptionError:
            categories = None
        # read file
        vdf = read_veto_definer_file(vetofile, start=start, end=end, ifo=ifo)
        LOGGER.debug("Read veto-definer file from %s" % vetofile)
        # get vetoes from segdb
        vdf.populate(source=url, segments=analysis.active, on_error='warn')
        # coalesce flags from chosen categories
        vetoes = DataQualityFlag('%s:VDF-VETOES:1' % ifo)
        nflags = 0
        for flag in vdf:
            if not categories or vdf[flag].category in categories:
                vetoes += vdf[flag]
                nflags += 1
        try:
            deadtime = int(abs(vetoes.active)) / int(abs(vetoes.known)) * 100
        except ZeroDivisionError:
            deadtime = 0
        LOGGER.debug("Coalesced %ss (%.2f%%) of deadtime from %d veto flags"
                     % (abs(vetoes.active), deadtime, nflags))
        # apply to analysis segments
        analysis -= vetoes
        LOGGER.debug("Applied vetoes from veto-definer file")
        livetime = int(abs(analysis.active))
        livetimepc = livetime / duration * 100.
        LOGGER.info("%ss (%.2f%%) livetime remaining after vetoes"
                    % (livetime, livetimepc))

    snrs = cp.getfloats('hveto', 'snr-thresholds')
    minsnr = min(snrs)
    windows = cp.getfloats('hveto', 'time-windows')

    # record all segments
    segments = DataQualityDict()
    segments[analysis.name] = analysis

    # -- load channels --------------------------

    # get primary channel name
    pchannel = cp.get('primary', 'channel')

    # read auxiliary cache
    if args.auxiliary_cache is not None:
        acache = read_cache(args.auxiliary_cache)
    else:
        acache = None

    # load auxiliary channels
    auxetg = cp.get('auxiliary', 'trigger-generator')
    auxfreq = cp.getfloats('auxiliary', 'frequency-range')
    try:
        auxchannels = cp.get('auxiliary', 'channels').strip('\n').split('\n')
    except config.configparser.NoOptionError:
        auxchannels = find_auxiliary_channels(auxetg, (start, end), ifo=ifo,
                                              cache=acache)
        cp.set('auxiliary', 'channels', '\n'.join(auxchannels))
        LOGGER.debug("Auto-discovered %d "
                     "auxiliary channels" % len(auxchannels))
    else:
        auxchannels = sorted(set(auxchannels))
        LOGGER.debug("Read list of %d auxiliary channels" % len(auxchannels))

    # load unsafe channels list
    _unsafe = cp.get('safety', 'unsafe-channels')
    if os.path.isfile(_unsafe):  # from file
        unsafe = set()
        with open(_unsafe, 'rb') as f:
            for c in f.read().rstrip('\n').split('\n'):
                if c.startswith('%(IFO)s'):
                    unsafe.add(c.replace('%(IFO)s', ifo))
                elif not c.startswith('%s:' % ifo):
                    unsafe.add('%s:%s' % (ifo, c))
                else:
                    unsafe.add(c)
    else:  # or from line-seprated list
        unsafe = set(_unsafe.strip('\n').split('\n'))
    unsafe.add(pchannel)
    cp.set('safety', 'unsafe-channels', '\n'.join(sorted(unsafe)))
    LOGGER.debug("Read list of %d unsafe channels" % len(unsafe))

    # remove unsafe channels
    nunsafe = 0
    for i in range(len(auxchannels) - 1, -1, -1):
        if auxchannels[i] in unsafe:
            LOGGER.warning("Auxiliary channel %r identified as unsafe and has "
                           "been removed" % auxchannels[i])
            auxchannels.pop(i)
            nunsafe += 1
    LOGGER.debug("%d auxiliary channels identified as unsafe" % nunsafe)
    naux = len(auxchannels)
    LOGGER.info("Identified %d auxiliary channels to process" % naux)

    # record INI file in output HTML directory
    inifile = '%s-HVETO_CONFIGURATION-%d-%d.ini' % (ifo, start, duration)
    if os.path.isfile(inifile) and any(
            os.path.samefile(inifile, x) for x in args.config_file):
        LOGGER.debug("Cannot write INI file to %s, file was given as input")
    else:
        with open(inifile, 'w') as f:
            cp.write(f)
        LOGGER.info("Configuration recorded as %s" % inifile)
    htmlv['config'] = inifile

    # -- load primary triggers ------------------

    # read primary cache
    if args.primary_cache is not None:
        pcache = read_cache(args.primary_cache)
    else:
        pcache = None

    # load primary triggers
    petg = cp.get('primary', 'trigger-generator')
    psnr = cp.getfloat('primary', 'snr-threshold')
    pfreq = cp.getfloats('primary', 'frequency-range')
    preadkw = cp.getparams('primary', 'read-')
    if pcache is not None:  # auto-detect the file format
        LOGGER.debug('Unsetting the primary trigger file format')
        preadkw['format'] = None
        preadkw['path'] = 'triggers'
    ptrigfindkw = cp.getparams('primary', 'trigfind-')
    primary = get_triggers(pchannel, petg, analysis.active, snr=psnr,
                           frange=pfreq, cache=pcache, nproc=args.nproc,
                           trigfind_kwargs=ptrigfindkw, **preadkw)
    fcol, scol = primary.dtype.names[1:3]

    if len(primary):
        LOGGER.info("Read %d events for %s" % (len(primary), pchannel))
    else:
        message = "No events found for %r in %d seconds of livetime" % (
           pchannel, livetime)
        LOGGER.critical(message)

    # cluster primary triggers
    clusterkwargs = cp.getparams('primary', 'cluster-')
    if clusterkwargs:
        primary = primary.cluster(**clusterkwargs)
        LOGGER.info("%d primary events remain after clustering over %s" %
                    (len(primary), clusterkwargs['rank']))

    # -- bail out early -------------------------
    # the bail out is done here so that we can at least generate the eventual
    # configuration file, mainly for HTML purposes

    # no segments
    if livetime == 0:
        message = ("No active segments found for analysis flag %r in interval "
                   "[%d, %d)" % (aflag, start, end))
        LOGGER.critical(message)
        htmlv['context'] = 'info'
        index = html.write_null_page(ifo, start, end, message, **htmlv)
        LOGGER.info("HTML report written to %s" % index)
        sys.exit(0)

    # no primary triggers
    if len(primary) == 0:
        htmlv['context'] = 'danger'
        index = html.write_null_page(ifo, start, end, message, **htmlv)
        LOGGER.info("HTML report written to %s" % index)
        sys.exit(0)

    # otherwise write all primary triggers to ASCII
    trigfile = os.path.join(
        trigdir,
        '%s-HVETO_RAW_TRIGS_ROUND_0-%d-%d.txt' % (ifo, start, duration),
    )
    primary.write(trigfile, format='ascii', overwrite=True)

    # -- load auxiliary triggers ----------------

    LOGGER.info("Reading triggers for aux channels...")
    counter = multiprocessing.Value('i', 0)

    areadkw = cp.getparams('auxiliary', 'read-')
    if acache is not None:  # auto-detect the file format
        LOGGER.debug('Unsetting the auxiliary trigger file format')
        areadkw['format'] = None
        areadkw['path'] = 'triggers'
    atrigfindkw = cp.getparams('auxiliary', 'trigfind-')

    # map with multiprocessing
    if args.nproc > 1:
        pool = multiprocessing.Pool(processes=args.nproc)
        results = pool.map(_get_aux_triggers, auxchannels)
        pool.close()
    # map without multiprocessing
    else:
        results = map(_get_aux_triggers, auxchannels)

    LOGGER.info("All aux events loaded")

    auxiliary = dict(x for x in results if x is not None)
    auxchannels = sorted(auxiliary.keys())
    chanfile = '%s-HVETO_CHANNEL_LIST-%d-%d.txt' % (ifo, start, duration)
    with open(chanfile, 'w') as f:
        for chan in auxchannels:
            print(chan, file=f)
    LOGGER.info("Recorded list of valid auxiliary channels in %s" % chanfile)

    # -- execute hveto analysis -----------------

    minsig = cp.getfloat('hveto', 'minimum-significance')

    pevents = [primary]
    pvetoed = []

    auxfcol, auxscol = auxiliary[auxchannels[0]].dtype.names[1:3]
    slabel = plot.get_column_label(scol)
    flabel = plot.get_column_label(fcol)
    auxslabel = plot.get_column_label(auxscol)
    auxflabel = plot.get_column_label(auxfcol)

    rounds = []
    rnd = core.HvetoRound(1, pchannel, rank=scol)
    rnd.segments = analysis.active

    while True:
        LOGGER.info("-- Processing round %d --" % rnd.n)

        # write segments for this round
        segfile = os.path.join(
            segdir, '%s-HVETO_ANALYSIS_SEGS_ROUND_%d-%d-%d.txt'
                    % (ifo, rnd.n, start, duration))
        write_ascii_segments(segfile, rnd.segments)

        # calculate significances for this round
        if args.nproc > 1:  # multiprocessing
            # separate channel list into chunks and process each chunk
            pool = multiprocessing.Pool(
                processes=min(args.nproc, len(auxiliary.keys())))
            chunks = utils.channel_groups(list(auxiliary.keys()), args.nproc)
            results = pool.map(_find_max_significance, chunks)
            pool.close()
            winners, sigsets = zip(*results)
            # find winner of chunk winners
            winner = sorted(winners, key=lambda w: w.significance)[-1]
            # flatten sets of significances into one list
            newsignificances = sigsets[0]
            for subdict in sigsets[1:]:
                newsignificances.update(subdict)
        else:  # single process
            winner, newsignificances = core.find_max_significance(
                primary, auxiliary, pchannel, snrs, windows, rnd.livetime)

        LOGGER.info("Round %d winner: %s" % (rnd.n, winner.name))

        # plot significance drop here for the last round
        #   only now do we actually have the new data to
        #   calculate significance drop
        if rnd.n > 1:
            svg = (pngname % 'SIG_DROP').replace('.png', '.svg')  # noqa: F821
            plot.significance_drop(
                svg, oldsignificances, newsignificances,  # noqa: F821
                title=' | '.join([title, subtitle]),  # noqa: F821
                bbox_inches='tight')
            LOGGER.debug("Figure written to %s" % svg)
            svg = FancyPlot(svg, caption=plot.ROUND_CAPTION['SIG_DROP'])
            rounds[-1].plots.append(svg)
        oldsignificances = newsignificances  # noqa: F841

        # break out of the loop if the significance is below stopping point
        if winner.significance < minsig:
            LOGGER.info("Maximum signifiance below stopping point")
            LOGGER.debug("    (%.2f < %.2f)" % (winner.significance, minsig))
            LOGGER.info("-- Rounds complete! --")
            break

        # work out the vetoes for this round
        allaux = auxiliary[winner.name][
            auxiliary[winner.name][auxscol] >= winner.snr]
        winner.events = allaux
        coincs = allaux[core.find_coincidences(allaux['time'], primary['time'],
                                               dt=winner.window)]
        rnd.vetoes = winner.get_segments(allaux['time'])
        flag = DataQualityFlag(
            '%s:HVT-ROUND_%d:1' % (ifo, rnd.n), active=rnd.vetoes,
            known=rnd.segments,
            description="winner=%s, window=%s, snr=%s" % (
                winner.name, winner.window, winner.snr))
        segments[flag.name] = flag
        LOGGER.debug("Generated veto segments for round %d" % rnd.n)

        # link events before veto for plotting
        before = primary
        beforeaux = auxiliary[winner.name]

        # apply vetoes to primary
        primary, vetoed = core.veto(primary, rnd.vetoes)
        pevents.append(primary)
        pvetoed.append(vetoed)
        LOGGER.debug("Applied vetoes to primary")

        # record results
        rnd.winner = winner
        rnd.efficiency = (len(vetoed), len(primary) + len(vetoed))
        rnd.use_percentage = (len(coincs), len(winner.events))
        if rnd.n > 1:
            rnd.cum_efficiency = (
                len(vetoed) + rounds[-1].cum_efficiency[0],
                rounds[0].efficiency[1])
            rnd.cum_deadtime = (
                rnd.deadtime[0] + rounds[-1].cum_deadtime[0],
                livetime)
        else:
            rnd.cum_efficiency = rnd.efficiency
            rnd.cum_deadtime = rnd.deadtime

        # apply vetoes to auxiliary
        if args.nproc > 1:  # multiprocess
            # separate channel list into chunks and process each chunk
            pool = multiprocessing.Pool(
                processes=min(args.nproc, len(auxiliary.keys())))
            chunks = utils.channel_groups(list(auxiliary.keys()), args.nproc)
            results = pool.map(_veto, chunks)
            pool.close()
            auxiliary = results[0]
            for subdict in results[1:]:
                auxiliary.update(subdict)
        else:  # single process
            auxiliary = core.veto_all(auxiliary, rnd.vetoes)
        LOGGER.debug("Applied vetoes to auxiliary channels")

        # log results
        LOGGER.info("""Results for round %d:\n\n
    winner :          %s
    significance :    %s
    mu :              %s
    snr :             %s
    dt :              %s
    use_percentage :  %s
    efficiency :      %s
    deadtime :        %s
    cum. efficiency : %s
    cum. deadtime :   %s\n\n""" % (
            rnd.n, rnd.winner.name, rnd.winner.significance,
            rnd.winner.mu, rnd.winner.snr, rnd.winner.window,
            rnd.use_percentage, rnd.efficiency, rnd.deadtime,
            rnd.cum_efficiency, rnd.cum_deadtime))

        # write segments
        segfile = os.path.join(
            segdir,
            '%s-HVETO_VETO_SEGS_ROUND_%d-%d-%d.txt' % (
                ifo, rnd.n, start, duration))
        write_ascii_segments(segfile, rnd.vetoes)
        LOGGER.debug("Round %d vetoes written to %s" % (rnd.n, segfile))
        rnd.files['VETO_SEGS'] = (segfile,)
        # write triggers
        trigfile = os.path.join(
            trigdir,
            '%s-HVETO_%%s_TRIGS_ROUND_%d-%d-%d.txt' % (
                ifo, rnd.n, start, duration))
        for tag, arr in zip(
                ['WINNER', 'VETOED', 'RAW'],
                [winner.events, vetoed, primary]):
            f = trigfile % tag
            arr.write(f, format='ascii', overwrite=True)
            LOGGER.debug("Round %d %s events written to %s"
                         % (rnd.n, tag.lower(), f))
            rnd.files[tag] = f

        # record times to omega scan
        if args.omega_scans:
            N = len(vetoed)
            ind = random.sample(range(0, N), min(args.omega_scans, N))
            rnd.scans = vetoed[ind]
            LOGGER.debug("Collected %d events to omega scan:\n\n%s\n\n"
                         % (len(rnd.scans), rnd.scans))

        # -- make some plots --

        pngname = os.path.join(plotdir, '%s-HVETO_%%s_ROUND_%d-%d-%d.png' % (
            ifo, rnd.n, start, duration))
        wname = texify(rnd.winner.name)
        beforel = 'Before\n[%d]' % len(before)
        afterl = 'After\n[%d]' % len(primary)
        vetoedl = 'Vetoed\n(primary)\n[%d]' % len(vetoed)
        beforeauxl = 'All\n[%d]' % len(beforeaux)
        usedl = 'Used\n(aux)\n[%d]' % len(winner.events)
        coincl = 'Coinc.\n[%d]' % len(coincs)
        title = '%s Hveto round %d' % (ifo, rnd.n)
        ptitle = '%s: primary impact' % title
        atitle = '%s: auxiliary use' % title
        subtitle = 'winner: %s [%d-%d]' % (wname, start, end)

        # before/after histogram
        png = pngname % 'HISTOGRAM'
        plot.before_after_histogram(
            png, before[scol], primary[scol],
            label1=beforel, label2=afterl, xlabel=slabel,
            title=ptitle, subtitle=subtitle)
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['HISTOGRAM'])
        rnd.plots.append(png)

        # snr versus time
        png = pngname % 'SNR_TIME'
        plot.veto_scatter(
            png, before, vetoed, x='time', y=scol, label1=beforel,
            label2=vetoedl, epoch=start, xlim=[start, end], ylabel=slabel,
            title=ptitle, subtitle=subtitle, legend_title="Primary:")
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['SNR_TIME'])
        rnd.plots.append(png)

        # snr versus frequency
        png = pngname % 'SNR_%s' % fcol.upper()
        plot.veto_scatter(
            png, before, vetoed, x=fcol, y=scol, label1=beforel,
            label2=vetoedl, xlabel=flabel, ylabel=slabel, xlim=pfreq,
            title=ptitle, subtitle=subtitle, legend_title="Primary:")
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['SNR'])
        rnd.plots.append(png)

        # frequency versus time coloured by SNR
        png = pngname % '%s_TIME' % fcol.upper()
        plot.veto_scatter(
            png, before, vetoed, x='time', y=fcol, color=scol,
            label1=None, label2=None, ylabel=flabel,
            clabel=slabel, clim=[3, 100], cmap='YlGnBu',
            epoch=start, xlim=[start, end], ylim=pfreq,
            title=ptitle, subtitle=subtitle)
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['TIME'])
        rnd.plots.append(png)

        # aux used versus frequency
        png = pngname % 'USED_SNR_TIME'
        plot.veto_scatter(
            png, winner.events, vetoed, x='time', y=[auxscol, scol],
            label1=usedl, label2=vetoedl, ylabel=slabel, epoch=start,
            xlim=[start, end], title=atitle, subtitle=subtitle)
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['USED_SNR_TIME'])
        rnd.plots.append(png)

        # snr versus time
        png = pngname % 'AUX_SNR_TIME'
        plot.veto_scatter(
            png, beforeaux, (winner.events, coincs), x='time', y=auxscol,
            label1=beforeauxl, label2=(usedl, coincl), epoch=start,
            xlim=[start, end], ylabel=auxslabel, title=atitle,
            subtitle=subtitle)
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['AUX_SNR_TIME'])
        rnd.plots.append(png)

        # snr versus frequency
        png = pngname % 'AUX_SNR_FREQUENCY'
        plot.veto_scatter(
            png, beforeaux, (winner.events, coincs), x=auxfcol, y=auxscol,
            label1=beforeauxl, label2=(usedl, coincl), xlabel=auxflabel,
            ylabel=auxslabel, title=atitle, subtitle=subtitle,
            legend_title="Aux:")
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['AUX_SNR_FREQUENCY'])
        rnd.plots.append(png)

        # frequency versus time coloured by SNR
        png = pngname % 'AUX_FREQUENCY_TIME'
        plot.veto_scatter(
            png, beforeaux, (winner.events, coincs), x='time', y=auxfcol,
            color=auxscol, label1=None, label2=[None, None], ylabel=auxflabel,
            clabel=auxslabel, clim=[3, 100], cmap='YlGnBu', epoch=start,
            xlim=[start, end], title=atitle, subtitle=subtitle)
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['AUX_FREQUENCY_TIME'])
        rnd.plots.append(png)

        # move to the next round
        rounds.append(rnd)
        rnd = core.HvetoRound(rnd.n + 1, pchannel, rank=scol,
                              segments=rnd.segments-rnd.vetoes)

    # write file with all segments
    segfile = os.path.join(
        segdir, '%s-HVETO_SEGMENTS-%d-%d.h5' % (ifo, start, duration))
    segments.write(segfile, overwrite=True)
    LOGGER.debug("Segment summary written to %s" % segfile)

    LOGGER.debug("Making summary figures...")

    # -- exit early if no rounds above threshold

    if not rounds:
        message = ("No rounds completed above threshold. Analysis stopped "
                   "with %s achieving significance of %.2f"
                   % (winner.name, winner.significance))
        LOGGER.critical(message)
        message = message.replace(
            winner.name, cis_link(winner.name, class_='alert-link'))
        message += '<br>[T<sub>win</sub>: %ss, SNR: %s]' % (
            winner.window, winner.snr)
        htmlv['context'] = 'warning'
        index = html.write_null_page(ifo, start, end, message, **htmlv)
        LOGGER.info("HTML report written to %s" % index)
        sys.exit(0)

    # -- plot all rounds impact
    pngname = os.path.join(plotdir, '%s-HVETO_%%s_ALL_ROUNDS-%d-%d.png' % (
        ifo, start, duration))
    plots = []
    title = '%s Hveto all rounds' % args.ifo
    subtitle = '%d rounds | %d-%d' % (len(rounds), start, end)

    # before/after histogram
    png = pngname % 'HISTOGRAM'
    beforel = 'Before analysis [%d events]' % len(pevents[0])
    afterl = 'After %d rounds [%d]' % (len(pevents) - 1, len(pevents[-1]))
    plot.before_after_histogram(
        png, pevents[0][scol], pevents[-1][scol],
        label1=beforel, label2=afterl, xlabel=slabel,
        title=title, subtitle=subtitle)
    png = FancyPlot(png, caption=plot.HEADER_CAPTION['HISTOGRAM'])
    plots.append(png)
    LOGGER.debug("Figure written to %s" % png)

    # efficiency/deadtime curve
    png = pngname % 'ROC'
    plot.hveto_roc(png, rounds, title=title, subtitle=subtitle)
    png = FancyPlot(png, caption=plot.HEADER_CAPTION['ROC'])
    plots.append(png)
    LOGGER.debug("Figure written to %s" % png)

    # frequency versus time
    png = pngname % '%s_TIME' % fcol.upper()
    labels = [str(r.n) for r in rounds]
    legtitle = 'Vetoed at\nround'
    plot.veto_scatter(
        png, pevents[0], pvetoed,
        label1='', label2=labels, title=title,
        subtitle=subtitle, ylabel=flabel, x='time', y=fcol,
        epoch=start, xlim=[start, end], legend_title=legtitle)
    png = FancyPlot(png, caption=plot.HEADER_CAPTION['TIME'])
    plots.append(png)
    LOGGER.debug("Figure written to %s" % png)

    # snr versus time
    png = pngname % 'SNR_TIME'
    plot.veto_scatter(
        png, pevents[0], pvetoed, label1='', label2=labels, title=title,
        subtitle=subtitle, ylabel=slabel, x='time', y=scol,
        epoch=start, xlim=[start, end], legend_title=legtitle)
    png = FancyPlot(png, caption=plot.HEADER_CAPTION['SNR_TIME'])
    plots.append(png)
    LOGGER.debug("Figure written to %s" % png)

    # -- write summary states to ASCII table and JSON
    json_ = {
        'user': getuser(),
        'host': getfqdn(),
        'date': str(datetime.datetime.now()),
        'configuration': inifile,
        'ifo': ifo,
        'gpsstart': start,
        'gpsend': end,
        'call': ' '.join(sys.argv),
        'rounds': [],
    }
    with open('summary-stats.txt', 'w') as f:
        # print header
        print('#N winner window SNR significance nveto use-percentage '
              'efficiency deadtime cumulative-efficiency cumulative-deadtime',
              file=f)
        for r in rounds:
            # extract relevant statistics
            results = [
                ('round', r.n),
                ('name', r.winner.name),
                ('window', r.winner.window),
                ('snr', r.winner.snr),
                ('significance', r.winner.significance),
                ('nveto', r.efficiency[0]),
                ('use-percentage',
                    r.use_percentage[0] / r.use_percentage[1] * 100.),
                ('efficiency', r.efficiency[0] / r.efficiency[1] * 100.),
                ('deadtime', r.deadtime[0] / r.deadtime[1] * 100.),
                ('cumulative-efficiency',
                    r.cum_efficiency[0] / r.cum_efficiency[1] * 100.),
                ('cumulative-deadtime',
                    r.cum_deadtime[0] / r.cum_deadtime[1] * 100.),
            ]
            # write to ASCII
            print(' '.join(map(str, list(zip(*results))[1])), file=f)
            # write to JSON
            results.append(('files', r.files))
            json_['rounds'].append(dict(results))
    LOGGER.debug("Summary table written to %s" % f.name)

    with open('summary-stats.json', 'w') as f:
        json.dump(json_, f, sort_keys=True)
    LOGGER.debug("Summary JSON written to %s" % f.name)

    # -- generate workflow for omega scans

    if args.omega_scans:
        omegatimes = list(map(str, sorted(numpy.unique(
            [t['time'] for r in rounds for t in r.scans]))))
        LOGGER.debug("Collected %d times to omega scan" % len(omegatimes))
        newtimes = [t for t in omegatimes if not
                    os.path.exists(os.path.join(omegadir, str(t)))]
        LOGGER.debug("%d scans already complete or in progress, %d remaining"
                     % (len(omegatimes) - len(newtimes), len(newtimes)))
        if len(newtimes) > 0:
            LOGGER.info('Creating workflow for omega scans')
            flags = batch.get_command_line_flags(
                ifo=ifo,
                ignore_state_flags=True)
            condorcmds = batch.get_condor_arguments(
                timeout=4,
                extra_commands=["request_disk='1G'"],
                gps=start)
            batch.generate_dag(
                newtimes,
                flags=flags,
                submit=True,
                outdir=omegadir,
                condor_commands=condorcmds)
            LOGGER.info('Launched {} omega scans to condor'.format(
                len(newtimes)))
        else:
            LOGGER.debug('Skipping omega scans')

    # -- write HTML and finish

    index = html.write_hveto_page(
        ifo, start, end, rounds, plots,
        winners=[r.winner.name for r in rounds], **htmlv)
    LOGGER.debug("HTML written to %s" % index)
    LOGGER.debug("Analysis completed in %d seconds" % (time.time() - JOBSTART))
    LOGGER.info("-- Hveto complete --")
Beispiel #17
0
def process_channel(processor: PostProcessor,
                    start: datetime,
                    stop: datetime,
                    downloader=TimeSeriesDict.get) -> str:
    """
    Post-processes a channel using the given post-processor, and streams to a file in the working directory.
    The output .hdf5 file is given by the channel name and the start time.
    This is because inserting (unsupported) requires reading out the whole database and re-writing it again.
    It's not a terribly high priority, I think.

    :return filename of generated post-processed channel.

    >>> from channeling import config, process_channel, PostProcessor
    >>> from util import config2dataclass
    >>>
    >>> for channel in eval(config['DEFAULT']['channels']):
    >>>     for processor in config2dataclass(PostProcessor, config, channel):
    >>>         process_channel(processor, start, stop)

    or even
    >>> from channeling import config, process_channel, PostProcessor
    >>> from util import config2dataclass
    >>> from multiprocessing import Pool
    >>>
    >>> p = lambda channel: [process_channel(processor, start, stop) for processor in
    >>>      config2dataclass(PostProcessor, config, channel)]
    >>>
    >>> pool = Pool()
    >>> pool.map(p, eval(config['DEFAULT']['channels']))

    """

    # use h5py to make a mutable object pointing to a file on disk.
    channel_file, filename = path2h5file(
        get_path(f'{processor.channel} {start}', 'hdf5'))
    logger.debug(f'Initiated hdf5 stream to {filename}')

    # get the number of strides.
    num_strides = (stop - start) // processor.stride_length

    # create list of start and end times.
    strides = [[
        start + processor.stride_length * i,
        start + processor.stride_length * (i + 1)
    ] for i in range(num_strides)]

    # stride loop.
    for stride_start, stride_stop in strides:

        if data_exists(processor.output_channels,
                       to_gps(stride_stop).seconds, channel_file):
            # for all possible output channels, it's likely this stride exists already on disk.
            continue

        # get the data.
        logger.debug(
            f'Initiating data download for {processor.channel} ({stride_start} to {stride_stop})'
        )

        # separately download all observing segments within the stride, or one segment for the whole stride.
        # this is set by the processor.respect_segments: bool option.
        # it really should be processor.respect_segments: str = 'L1:DMT-ANALYSIS_READY:1' for generality.
        segments = [[int(s.start), int(s.end)] for s in DataQualityFlag.query(
            'L1:DMT-ANALYSIS_READY:1', to_gps(stride_start), to_gps(
                stride_stop)).active] if processor.respect_segments else [[
                    to_gps(stride_start).seconds,
                    to_gps(stride_stop).seconds
                ]]

        raw_segments = list()
        for seg_start, seg_stop in segments:
            try:
                raw_segments.append([
                    downloader([processor.channel],
                               start=seg_start,
                               end=seg_stop + processor.extra_seconds),
                    seg_start
                ])
            except RuntimeError:  # sometimes the data does not exist on the server. The show must go on, though.
                logger.warning(
                    f'SKIPPING download for {processor.channel} ({stride_start} to {stride_stop}) !!'
                )

        logger.info(
            f'Completed data download for {processor.channel} ({stride_start} to {stride_stop})'
        )

        for raw, segment_start in raw_segments:
            # use the processor to compute each downloaded segment in the stride.
            finished_segment = processor.compute(raw[processor.channel])
            logger.info(
                f'Generated {processor.__class__.__name__} for {processor.channel}'
            )

            # write each computed segment to the channel file.
            write_to_disk(finished_segment, segment_start, channel_file)

        logger.info(f'Completed stride {stride_start} to {stride_stop})')

    logger.debug(f'Completed channel at {filename}')

    # for automated usage of the post-processed data, return the generated filename.
    return filename
Beispiel #18
0
def validate_segments(ifos, start, end, cp, trigger_time=None):
    """determine analysis ready segments during requested analysis time
    Parameters
    ----------
    ifos : `str`
        list of ifos used in X-Pipeline analysis

    start : `float`, :class:`~gwpy.time.LIGOTimeGPS`

    end : `float`, `~gwpy.time.LIGOTimeGPS`

    cp : `object` ConfigParser object

    trigger_time

    Returns
    -------
    DataQualityDict : ~gwpy.segments.DataQualityDict`
    """
    analysis_seg_files = []

    # If simulating noise skip all segment, veto
    # and network validation checks. The on source and off source is
    # simulated and therefore has no DQ issues.
    if cp.has_option('parameters', 'makeSimulatedNoise'):
        for ifo in ifos:
            print('Making simulated noise, creating temp segment file')
            f = open('segments_{0}.txt'.format(ifo), 'w')
            f.write('0 {0} {1} {2}'.format(start, end, end - start))
            f.close()
            analysis_seg_files.append('segments_{0}.txt'.format(ifo))
    else:
        for ifo in ifos:
            if cp.has_option(ifo, 'segment-list'):
                if not os.path.isfile(cp.get(ifo, 'segment-list')):
                    raise ValueError('Please uncomment the '
                                     'the segment file in ini file '
                                     'as it does not exist. '
                                     'If you want to use a '
                                     'supplied file please provide one.')
                else:
                    analysis_seg_files.append(cp.get(ifo, 'segment-list'))
            else:
                # Query for veto definer file
                vdf = query_veto_definer_file(ifo, start, end, cp)

                # Filter for cat1 vetos
                segs = filter_for_cat_type(vdf, ifo, [1])

                # ---- Write out cat1 veto to text file.
                filename_cat1 = "input/" + ifo + "-veto-cat1.txt"
                segs.write(filename_cat1)

                # Compute analysis ready segments in order to
                # subtract out cat1 vetos
                analysis_ready = DataQualityFlag.query(
                    '{0}:DMT-ANALYSIS_READY:1'.format(ifo), start, end)

                # Subtract cat 1 veto from analysis_ready
                analysis_ready_minus_cat1 = analysis_ready.active - segs
                # Save new segment list to file
                filename_analysis_ready_minus_cat1 = "input/" + ifo + "_science_cat1.txt"
                analysis_ready_minus_cat1.write(
                    filename_analysis_ready_minus_cat1)
                analysis_seg_files.append(filename_analysis_ready_minus_cat1)

    return analysis_seg_files
Beispiel #19
0
def main(args=None):
    """Run the cache_events tool
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    ifo = args.ifo
    start = int(args.gpsstart)
    end = int(args.gpsend)
    duration = end - start

    LOGGER.info("-- Welcome to Hveto --")
    LOGGER.info("GPS start time: %d" % start)
    LOGGER.info("GPS end time: %d" % end)
    LOGGER.info("Interferometer: %s" % ifo)

    # -- initialisation -------------------------------

    # read configuration
    cp = config.HvetoConfigParser(ifo=args.ifo)
    cp.read(map(str, args.config_file))
    LOGGER.info("Parsed configuration file(s)")

    # format output directory
    outdir = args.output_directory
    outdir.mkdir(parents=True, exist_ok=True)
    LOGGER.info("Working directory: {}".format(outdir))
    trigdir = outdir / 'triggers'
    trigdir.mkdir(parents=True, exist_ok=True)

    # get segments
    aflag = cp.get('segments', 'analysis-flag')
    url = cp.get('segments', 'url')
    padding = cp.getfloats('segments', 'padding')
    if args.analysis_segments:
        segs_ = DataQualityDict.read(args.analysis_segments, gpstype=float)
        analysis = segs_[aflag]
        span = SegmentList([Segment(start, end)])
        analysis.active &= span
        analysis.known &= span
        analysis.coalesce()
        LOGGER.debug("Segments read from disk")
    else:
        analysis = DataQualityFlag.query(aflag, start, end, url=url)
        LOGGER.debug("Segments recovered from %s" % url)
    analysis.pad(*padding)
    livetime = int(abs(analysis.active))
    livetimepc = livetime / duration * 100.
    LOGGER.info("Retrieved %d segments for %s with %ss (%.2f%%) livetime" %
                (len(analysis.active), aflag, livetime, livetimepc))

    snrs = cp.getfloats('hveto', 'snr-thresholds')
    minsnr = min(snrs)

    # -- utility methods ------------------------------

    def create_path(channel):
        ifo, name = channel.split(':', 1)
        name = name.replace('-', '_')
        return trigdir / "{}-{}-{}-{}.h5".format(ifo, name, start, duration)

    def read_and_cache_events(channel,
                              etg,
                              cache=None,
                              trigfind_kw={},
                              **read_kw):
        cfile = create_path(channel)
        # read existing cached triggers and work out new segments to query
        if args.append and cfile.is_file():
            previous = DataQualityFlag.read(
                str(cfile),
                path='segments',
                format='hdf5',
            ).coalesce()
            new = analysis - previous
        else:
            new = analysis.copy()
        # get cache of files
        if cache is None:
            cache = find_trigger_files(channel, etg, new.active, **trigfind_kw)
        else:
            cache = list(
                filter(
                    lambda e: new.active.intersects_segment(file_segment(e)),
                    cache,
                ))
        # restrict 'active' segments to when we have data
        try:
            new.active &= cache_segments(cache)
        except IndexError:
            new.active = type(new.active)()
        # find new triggers
        try:
            trigs = get_triggers(channel,
                                 etg,
                                 new.active,
                                 cache=cache,
                                 raw=True,
                                 **read_kw)
        # catch error and continue
        except ValueError as e:
            warnings.warn('%s: %s' % (type(e).__name__, str(e)))
        else:
            path = write_events(channel, trigs, new)
            try:
                return path, len(trigs)
            except TypeError:  # None
                return

    def write_events(channel, tab, segments):
        """Write events to file with a given filename
        """
        # get filename
        path = create_path(channel)
        h5f = h5py.File(str(path), 'a')

        # read existing table from file
        try:
            old = tab.read(h5f["triggers"], format="hdf5")
        except KeyError:
            pass
        else:
            tab = vstack(old, tab)

        # append event table
        tab.write(h5f, path="triggers", append=True, overwrite=True)

        # write segments
        try:
            oldsegs = DataQualityFlag.read(h5f, path="segments", format="hdf5")
        except KeyError:
            pass
        else:
            segments = oldsegs + segments
        segments.write(h5f, path="segments", append=True, overwrite=True)

        # write file to disk
        h5f.close()
        return path

    # -- load channels --------------------------------

    # get primary channel name
    pchannel = cp.get('primary', 'channel')

    # read auxiliary cache
    if args.auxiliary_cache is not None:
        acache = [e for c in args.auxiliary_cache for e in read_cache(str(c))]
    else:
        acache = None

    # load auxiliary channels
    auxetg = cp.get('auxiliary', 'trigger-generator')
    auxfreq = cp.getfloats('auxiliary', 'frequency-range')
    try:
        auxchannels = cp.get('auxiliary', 'channels').strip('\n').split('\n')
    except config.configparser.NoOptionError:
        auxchannels = find_auxiliary_channels(auxetg,
                                              start,
                                              ifo=args.ifo,
                                              cache=acache)

    # load unsafe channels list
    _unsafe = cp.get('safety', 'unsafe-channels')
    if os.path.isfile(_unsafe):  # from file
        unsafe = set()
        with open(_unsafe, 'rb') as f:
            for c in f.read().rstrip('\n').split('\n'):
                if c.startswith('%(IFO)s'):
                    unsafe.add(c.replace('%(IFO)s', ifo))
                elif not c.startswith('%s:' % ifo):
                    unsafe.add('%s:%s' % (ifo, c))
                else:
                    unsafe.add(c)
    else:  # or from line-seprated list
        unsafe = set(_unsafe.strip('\n').split('\n'))
    unsafe.add(pchannel)
    cp.set('safety', 'unsafe-channels', '\n'.join(sorted(unsafe)))
    LOGGER.debug("Read list of %d unsafe channels" % len(unsafe))

    # remove duplicates
    auxchannels = sorted(set(auxchannels))
    LOGGER.debug("Read list of %d auxiliary channels" % len(auxchannels))

    # remove unsafe channels
    nunsafe = 0
    for i in range(len(auxchannels) - 1, -1, -1):
        if auxchannels[i] in unsafe:
            LOGGER.warning("Auxiliary channel %r identified as unsafe and has "
                           "been removed" % auxchannels[i])
            auxchannels.pop(i)
            nunsafe += 1
    LOGGER.debug("%d auxiliary channels identified as unsafe" % nunsafe)
    naux = len(auxchannels)
    LOGGER.info("Identified %d auxiliary channels to process" % naux)

    # -- load primary triggers -------------------------

    LOGGER.info("Reading events for primary channel...")

    # read primary cache
    if args.primary_cache is not None:
        pcache = [e for c in args.primary_cache for e in read_cache(str(c))]
    else:
        pcache = None

    # get primary params
    petg = cp.get('primary', 'trigger-generator')
    psnr = cp.getfloat('primary', 'snr-threshold')
    pfreq = cp.getfloats('primary', 'frequency-range')
    preadkw = cp.getparams('primary', 'read-')
    ptrigfindkw = cp.getparams('primary', 'trigfind-')

    # load primary triggers
    out = read_and_cache_events(pchannel,
                                petg,
                                snr=psnr,
                                frange=pfreq,
                                cache=pcache,
                                trigfind_kw=ptrigfindkw,
                                **preadkw)
    try:
        e, n = out
    except TypeError:
        e = None
        n = 0
    if n:
        LOGGER.info("Cached %d new events for %s" % (n, pchannel))
    elif args.append and e.is_file():
        LOGGER.info("Cached 0 new events for %s" % pchannel)
    else:
        message = "No events found for %r in %d seconds of livetime" % (
            pchannel, livetime)
        LOGGER.critical(message)

    # write primary to local cache
    pname = trigdir / '{}-HVETO_PRIMARY_CACHE-{}-{}.lcf'.format(
        ifo,
        start,
        duration,
    )
    write_lal_cache(str(pname), [e])
    LOGGER.info('Primary cache written to {}'.format(pname))

    # -- load auxiliary triggers -----------------------

    LOGGER.info("Reading triggers for aux channels...")
    counter = multiprocessing.Value('i', 0)

    areadkw = cp.getparams('auxiliary', 'read-')
    atrigfindkw = cp.getparams('auxiliary', 'trigfind-')

    def read_and_write_aux_triggers(channel):
        if acache is None:
            auxcache = None
        else:
            ifo, name = channel.split(':')
            match = "{}-{}".format(ifo, name.replace('-', '_'))
            auxcache = [e for e in acache if Path(e).name.startswith(match)]

        out = read_and_cache_events(channel,
                                    auxetg,
                                    cache=auxcache,
                                    snr=minsnr,
                                    frange=auxfreq,
                                    trigfind_kw=atrigfindkw,
                                    **areadkw)
        try:
            e, n = out
        except TypeError:
            e = None
            n = 0
        # log result of load
        with counter.get_lock():
            counter.value += 1
            tag = '[%d/%d]' % (counter.value, naux)
            if e is None:  # something went wrong
                LOGGER.critical("    %s Failed to read events for %s" %
                                (tag, channel))
            else:  # either read events or nothing new
                LOGGER.debug("    %s Cached %d new events for %s" %
                             (tag, n, channel))
        return e

    # map with multiprocessing
    if args.nproc > 1:
        pool = multiprocessing.Pool(processes=args.nproc)
        results = pool.map(read_and_write_aux_triggers, auxchannels)
        pool.close()
    # map without multiprocessing
    else:
        results = map(read_and_write_aux_triggers, auxchannels)

    acache = [x for x in results if x is not None]
    aname = trigdir / '{}-HVETO_AUXILIARY_CACHE-{}-{}.lcf'.format(
        ifo,
        start,
        duration,
    )
    write_lal_cache(str(aname), acache)
    LOGGER.info('Auxiliary cache written to {}'.format(aname))

    # -- finish ----------------------------------------

    LOGGER.info('Done, you can use these cache files in an hveto analysis by '
                'passing the following arguments:\n\n--primary-cache {} '
                '--auxiliary-cache {}\n'.format(pname, aname))
Beispiel #20
0
def main(args=None):
    """Run the online Guardian node visualization tool
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    # parse command line options
    ifo = args.ifo
    if not args.ifo:
        parser.error('--ifo must be given if not obvious from the host')
    start = getattr(args, 'gpsstart')
    end = getattr(args, 'gpsend')
    duration = int(ceil(end) - floor(start))
    categories = args.categories.split(',')
    for i, c in enumerate(categories):
        try:
            categories[i] = int(c)
        except (TypeError, ValueError):
            pass
    vetofile = getattr(args, 'veto-definer-file')
    vetofile = (urlparse(vetofile).netloc or os.path.abspath(vetofile))
    args.metric = args.metric or DEFAULT_METRICS

    # -- setup --------------------------------------

    tag = '%d-%d' % (start.seconds, end.seconds)
    outdir = os.path.abspath(os.path.join(args.output_directory, tag))
    mkdir(outdir)
    os.chdir(outdir)
    mkdir('etc', 'segments', 'condor')

    # -- segment handling ---------------------------

    os.chdir('segments')
    ALLSEGMENTS = DataQualityDict()

    # -- get analysis segments ----------------------

    aflags = args.analysis_segments
    asegments = DataQualityFlag('%s:VET-ANALYSIS_SEGMENTS:0' % ifo)
    for i, flag in enumerate(aflags):
        # use union of segments from a file
        if os.path.isfile(flag):
            asegments += DataQualityFlag.read(flag)
        # or intersection of segments from multiple flags
        else:
            new = DataQualityFlag.query(flag, start, end, url=args.segdb)
            if i:
                asegments.known &= new.known
                asegments.active &= new.active
            else:
                asegments.known = new.known
                asegments.active = new.active
    ALLSEGMENTS[asegments.name] = asegments

    if os.path.isfile(aflags[0]):
        asegments.filename = aflags

    # -- read veto definer and process --------------

    if urlparse(vetofile).netloc:
        tmp = urlopen(vetofile)
        vetofile = os.path.abspath(os.path.basename(vetofile))
        with open(vetofile, 'w') as f:
            f.write(tmp.read())
        LOGGER.info('Downloaded veto definer file')
    vdf = DataQualityDict.from_veto_definer_file(vetofile,
                                                 format='ligolw',
                                                 start=start,
                                                 end=end,
                                                 ifo=ifo)
    LOGGER.info('Read %d flags from veto definer' % len(vdf.keys()))

    # populate veto definer file from database
    vdf.populate(source=args.segdb, on_error=args.on_segdb_error)
    ALLSEGMENTS += vdf

    # organise flags into categories
    flags = dict((c, DataQualityDict()) for c in categories)
    for name, flag in vdf.items():
        try:
            flags[flag.category][name] = flag
        except KeyError:
            pass

    # find the states and segments for each category
    states, after, oldtitle = (dict(), None, '')
    for i, category in enumerate(categories):
        title = isinstance(category, int) and 'Cat %d' % category or category
        tag = re_cchar.sub('_', str(title).upper())
        states[category] = SummaryState(
            'After %s' % oldtitle,
            key=tag,
            known=after.known,
            active=after.active,
            definition=after.name,
        ) if i else SummaryState(
            args.analysis_name,
            key=args.analysis_name,
            definition=asegments.name,
        )
        try:
            segs = flags[category].union()
        except TypeError:  # no flags
            segs = DataQualityFlag()
        segs.name = '%s:VET-ANALYSIS_%s:0' % (ifo, tag)
        ALLSEGMENTS[segs.name] = segs
        after = (after - segs) if i else (asegments - segs)
        after.name = '%s:VET-ANALYSIS_AFTER_%s:0' % (ifo, tag)
        ALLSEGMENTS[after.name] = after
        oldtitle = title

    # write all segments to disk
    segfile = os.path.abspath('%s-VET_SEGMENTS-%d-%d.xml.gz' %
                              (ifo, start.seconds, duration))
    ALLSEGMENTS.write(segfile)

    os.chdir(os.pardir)

    if args.verbose:
        LOGGER.debug("All segments accessed and written to\n%s" % segfile)

    # -- job preparation ----------------------------

    os.chdir('etc')

    configs = []
    for category in categories:
        title = (isinstance(category, int) and 'Category %d' % category
                 or category)
        tab = 'tab-%s' % title
        config = ConfigParser()

        # add segment-database configuration
        add_config_section(config, 'segment-database', url=args.segdb)

        # add plot configurations
        pconfig = ConfigParser()
        pconfig.read(args.config_file)
        for section in pconfig.sections():
            if section.startswith('plot-'):
                config._sections[section] = pconfig._sections[section].copy()

        try:
            plots = pconfig.items('plots-%s' % category, raw=True)
        except NoSectionError:
            try:
                plots = pconfig.items('plots', raw=True)
            except NoSectionError:
                plots = []

        # add state
        if args.independent:
            state = states[categories[0]]
        else:
            state = states[category]
        sname = 'state-%s' % state.key
        add_config_section(config,
                           sname,
                           key=state.key,
                           name=state.name,
                           definition=state.definition,
                           filename=segfile)

        # add plugin
        add_config_section(config, 'plugins', **{'gwvet.tabs': ''})

        # define metrics
        if category == 1:
            metrics = ['Deadtime']
        else:
            metrics = args.metric

        # define summary tab
        if category == 1:
            tab = configure_veto_tab(config,
                                     title,
                                     title,
                                     state,
                                     flags[category].keys(),
                                     segfile,
                                     metrics,
                                     name='Summary',
                                     **{'veto-name': title})
        else:
            tab = configure_veto_tab(config,
                                     title,
                                     title,
                                     state,
                                     flags[category].keys(),
                                     segfile,
                                     metrics,
                                     name='Summary',
                                     **{
                                         'veto-name': title,
                                         'event-channel': args.event_channel,
                                         'event-generator':
                                         args.event_generator,
                                     })
        if len(categories) == 1:
            config.set(tab, 'index',
                       '%(gps-start-time)s-%(gps-end-time)s/index.html')
        for key, value in plots:
            if re.match('%\(flags\)s (?:plot-)?segments', value):  # noqa: W605
                config.set(tab, key, '%%(union)s,%s' % value)
                if '%s-labels' % key not in plots:
                    config.set(tab, '%s-labels' % key, 'Union,%(flags)s')
            else:
                config.set(tab, key, value)

        # now a tab for each flag
        for flag in flags[category]:
            if category == 1:
                tab = configure_veto_tab(config, flag, title, state, [flag],
                                         segfile, metrics)
            else:
                tab = configure_veto_tab(
                    config, flag, title, state, [flag], segfile, metrics, **{
                        'event-channel': args.event_channel,
                        'event-generator': args.event_generator
                    })
                if args.event_file:
                    config.set(tab, 'event-file', args.event_file)
            for key, value in plots:
                config.set(tab, key, value)

        if len(categories) > 1 and category != categories[-1]:
            with open('%s.ini' % re_cchar.sub('-', title.lower()), 'w') as f:
                config.write(f)
                configs.append(os.path.abspath(f.name))

    # configure summary job
    if len(categories) > 1:
        state = states[categories[0]]
        add_config_section(config,
                           'state-%s' % state.key,
                           key=state.key,
                           name=state.name,
                           definition=state.definition,
                           filename=segfile)
        try:
            plots = pconfig.items('plots', raw=True)
        except NoSectionError:
            plots = []
        flags = [f for c in categories for f in flags[c].keys()]
        tab = configure_veto_tab(
            config,
            'Impact of full veto definer file',
            None,
            state,
            flags,
            segfile,
            args.metric,
            shortname='Summary',
            index='%(gps-start-time)s-%(gps-end-time)s/index.html',
            **{
                'event-channel': args.event_channel,
                'event-generator': args.event_generator,
                'veto-name': 'All vetoes'
            })
        if args.event_file:
            config.set(tab, 'event-file', args.event_file)
        for key, value in plots:
            config.set(tab, key, value)
        with open('%s.ini' % re_cchar.sub('-', title.lower()), 'w') as f:
            config.write(f)
            configs.append(os.path.abspath(f.name))

    os.chdir(os.pardir)

    if args.verbose:
        LOGGER.debug("Generated configuration files for each category")

    # -- condor preparation -------------------------

    os.chdir(os.pardir)

    # get condor variables
    if getuser() == 'detchar':
        accgroup = 'ligo.prod.o1.detchar.dqproduct.gwpy'
    else:
        accgroup = 'ligo.dev.o1.detchar.dqproduct.gwpy'

    gwsumm_args = [
        '--gps-start-time',
        str(start.seconds),
        '--gps-end-time',
        str(end.seconds),
        '--ifo',
        ifo,
        '--file-tag',
        'gwpy-vet',
        '--condor-command',
        'accounting_group=%s' % accgroup,
        '--condor-command',
        'accounting_group_user=%s' % getuser(),
        '--on-segdb-error',
        args.on_segdb_error,
        '--output-dir',
        args.output_directory,
    ]
    for cf in args.global_config:
        gwsumm_args.extend(('--global-config', cf))
    for cf in configs:
        gwsumm_args.extend(('--config-file', cf))
    if args.verbose:
        gwsumm_args.append('--verbose')

    if args.verbose:
        LOGGER.debug('Generating summary DAG via:\n')
        LOGGER.debug(' '.join([batch.PROG] + gwsumm_args))

    # execute gwsumm in batch mode
    batch.main(args=gwsumm_args)
Beispiel #21
0
def get_segments(flags,
                 segments,
                 cache=None,
                 url='https://segdb-er.ligo.caltech.edu',
                 **kwargs):
    """Fetch some segments from the segment database

    Parameters
    ----------
    flags : `str`, `list`
        one of more flags for which to query
    segments : `~gwpy.segments.DataQualityFlag`, `~gwpy.segments.SegmentList`
        span over which to query for flag segments
    cache : `~glue.lal.Cache`, optional
        cache of files to use as data source
    url : `str`
        URL of segment database, if ``cache`` is not given
    **kwargs
        other keyword arguments to pass to either
        `~gwpy.segments.DataQualityFlag.read` (if ``cache`` is given) or
        `~gwpy.segments.DataQualityFlag.query` (otherwise)

    Returns
    -------
    segments : `~gwpy.segments.DataQualityFlag`,
               `~gwpy.segments.DataQualityDict`
        a single `~gwpy.segments.DataQualityFlag` (if ``flags`` is given
        as a `str`), or a `~gwpy.segments.DataQualityDict` (if ``flags``
        is given as a `list`)
    """
    # format segments
    if isinstance(segments, DataQualityFlag):
        segments = segments.active
    elif isinstance(segments, tuple):
        segments = [Segment(to_gps(segments[0]), to_gps(segments[1]))]
    segments = SegmentList(segments)

    # get format for files
    if cache is not None and not isinstance(cache, Cache):
        kwargs.setdefault(
            'format',
            _get_valid_format('read', DataQualityFlag, None, None,
                              (cache[0], ), {}))

    # populate an existing set of flags
    if isinstance(flags, (DataQualityFlag, DataQualityDict)):
        return flags.populate(source=cache or url, segments=segments, **kwargs)
    # query one flag
    elif cache is None and isinstance(flags, str):
        return DataQualityFlag.query(flags, segments, url=url, **kwargs)
    # query lots of flags
    elif cache is None:
        return DataQualityDict.query(flags, segments, url=url, **kwargs)
    # read one flag
    elif flags is None or isinstance(flags, str):
        segs = DataQualityFlag.read(cache, flags, coalesce=False, **kwargs)
        if segs.known:
            segs.known &= segments
        else:
            segs.known = segments
        segs.active &= segments
        return segs
    # read lots of flags
    else:
        segs = DataQualityDict.read(cache, flags, coalesce=True, **kwargs)
        for name, flag in segs.items():
            flag.known &= segments
            flag.active &= segments
        return segs
Beispiel #22
0
def main(args=None):
    """Run the primary scattering command-line tool
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    # set up logger
    logger = cli.logger(
        name=PROG.split('python -m ').pop(),
        level='DEBUG' if args.verbose else 'INFO',
    )

    # useful variables
    fthresh = (
        int(args.frequency_threshold) if args.frequency_threshold.is_integer()
        else args.frequency_threshold)
    multiplier = args.multiplier_for_threshold
    tstr = str(fthresh).replace('.', '_')
    gpsstr = '%s-%s' % (int(args.gpsstart), int(args.gpsend - args.gpsstart))
    args.optic = args.optic or list(OPTIC_MOTION_CHANNELS.keys())

    # go to working directory
    indir = os.getcwd()
    if not os.path.isdir(args.output_dir):
        os.makedirs(args.output_dir)
    os.chdir(args.output_dir)

    # set up output files
    summfile = '{}-SCATTERING_SUMMARY-{}.csv'.format(
        args.ifo, gpsstr)
    segfile = '{}-SCATTERING_SEGMENTS_{}_HZ-{}.h5'.format(
        args.ifo, tstr, gpsstr)

    # log start of process
    logger.info('{} Scattering {}-{}'.format(
        args.ifo, int(args.gpsstart), int(args.gpsend)))

    # -- get state segments -----------

    span = Segment(args.gpsstart, args.gpsend)

    # get segments
    if args.state_flag is not None:
        state = DataQualityFlag.query(
            args.state_flag, int(args.gpsstart), int(args.gpsend),
            url=DEFAULT_SEGMENT_SERVER,
        ).coalesce()
        statea = []
        padding = args.segment_start_pad + args.segment_end_pad
        for i, seg in enumerate(state.active):
            if abs(seg) > padding:
                statea.append(Segment(
                    seg[0] + args.segment_start_pad,
                    seg[1] - args.segment_end_pad,
                ))
            else:
                logger.debug(
                    "Segment length {} shorter than padding length {}, "
                    "skipping segment {}-{}".format(abs(seg), padding, *seg),
                )
        statea = SegmentList(statea)
        logger.debug("Downloaded %d segments for %s"
                     % (len(statea), args.state_flag))
    else:
        statea = SegmentList([span])
    livetime = float(abs(statea))
    logger.debug("Processing %.2f s of livetime" % livetime)

    # -- load h(t) --------------------

    args.main_channel = args.main_channel.format(IFO=args.ifo)
    logger.debug("Loading Omicron triggers for %s" % args.main_channel)

    if args.gpsstart >= 1230336018:  # Jan 1 2019
        ext = "h5"
        names = ["time", "frequency", "snr"]
        read_kw = {
            "columns": names,
            "selection": [
                "{0} < frequency < {1}".format(
                    args.fmin, multiplier * fthresh),
                ("time", in_segmentlist, statea),
            ],
            "format": "hdf5",
            "path": "triggers",
        }
    else:
        ext = "xml.gz"
        names = ['peak', 'peak_frequency', 'snr']
        read_kw = {
            "columns": names,
            "selection": [
                "{0} < peak_frequency < {1}".format(
                    args.fmin, multiplier * fthresh),
                ('peak', in_segmentlist, statea),
            ],
            "format": 'ligolw',
            "tablename": "sngl_burst",
        }

    fullcache = []
    for seg in statea:
        cache = gwtrigfind.find_trigger_files(
            args.main_channel, 'omicron', seg[0], seg[1], ext=ext,
        )
        if len(cache) == 0:
            warnings.warn(
                "No Omicron triggers found for %s in segment [%d .. %d)"
                % (args.main_channel, seg[0], seg[1]),
            )
            continue
        fullcache.extend(cache)

    # read triggers
    if fullcache:
        trigs = EventTable.read(fullcache, nproc=args.nproc, **read_kw)
    else:  # no files (no livetime?)
        trigs = EventTable(names=names)

    highsnrtrigs = trigs[trigs['snr'] >= 8]
    logger.debug("%d read" % len(trigs))

    # -- prepare HTML -----------------

    links = [
        '%d-%d' % (int(args.gpsstart), int(args.gpsend)),
        ('Parameters', '#parameters'),
        ('Segments', (
            ('State flag', '#state-flag'),
            ('Optical sensors', '#osems'),
            ('Transmons', '#transmons'),
        )),
    ]
    if args.omega_scans:
        links.append(('Scans', '#omega-scans'))
    (brand, class_) = htmlio.get_brand(args.ifo, 'Scattering', args.gpsstart)
    navbar = htmlio.navbar(links, class_=class_, brand=brand)
    page = htmlio.new_bootstrap_page(
        title='%s Scattering | %d-%d' % (
            args.ifo, int(args.gpsstart), int(args.gpsend)),
        navbar=navbar)
    page.div(class_='pb-2 mt-3 mb-2 border-bottom')
    page.h1('%s Scattering: %d-%d'
            % (args.ifo, int(args.gpsstart), int(args.gpsend)))
    page.div.close()  # pb-2 mt-3 mb-2 border-bottom
    page.h2('Parameters', class_='mt-4 mb-4', id_='parameters')
    page.div(class_='row')
    page.div(class_='col-md-9 col-sm-12')
    page.add(htmlio.parameter_table(
        start=int(args.gpsstart), end=int(args.gpsend), flag=args.state_flag))
    page.div.close()  # col-md-9 col-sm-12

    # link to summary files
    page.div(class_='col-md-3 col-sm-12')
    page.add(htmlio.download_btn(
        [('Segments (HDF)', segfile),
         ('Triggers (CSV)', summfile)],
        btnclass='btn btn-%s dropdown-toggle' % args.ifo.lower(),
    ))
    page.div.close()  # col-md-3 col-sm-12
    page.div.close()  # row

    # command-line
    page.h5('Command-line:')
    page.add(htmlio.get_command_line(about=False, prog=PROG))

    # section header
    page.h2('Segments', class_='mt-4', id_='segments')

    if statea:  # contextual information
        paper = markup.oneliner.a(
            'Accadia et al. (2010)', target='_blank', class_='alert-link',
            href='http://iopscience.iop.org/article/10.1088/0264-9381/27'
                 '/19/194011')
        msg = (
            "Segments marked \"optical sensors\" below show evidence of beam "
            "scattering between {0} and {1} Hz based on the velocity of optic "
            "motion, with fringe frequencies projected using equation (3) of "
            "{2}. Segments marked \"transmons\" are based on whitened, "
            "band-limited RMS trends of transmon sensors. In both cases, "
            "yellow panels denote weak evidence for scattering, while red "
            "panels denote strong evidence."
         ).format(args.fmin, multiplier * fthresh, str(paper))
        page.add(htmlio.alert(msg, context=args.ifo.lower()))
    else:  # null segments
        page.add(htmlio.alert('No active analysis segments were found',
                              context='warning', dismiss=False))

    # record state segments
    if args.state_flag is not None:
        page.h3('State flag', class_='mt-3', id_='state-flag')
        page.div(id_='accordion1')
        page.add(htmlio.write_flag_html(
            state, span, 'state', parent='accordion1', context='success',
            plotdir='', facecolor=(0.2, 0.8, 0.2), edgecolor='darkgreen',
            known={'facecolor': 'red', 'edgecolor': 'darkred', 'height': 0.4}))
        page.div.close()

    # -- find scattering evidence -----

    # read data for OSEMs and transmons
    osems = ['%s:%s' % (args.ifo, c) for optic in args.optic for
             c in OPTIC_MOTION_CHANNELS[optic]]
    transmons = ['%s:%s' % (args.ifo, c) for c in TRANSMON_CHANNELS]
    allchannels = osems + transmons

    logger.info("Reading all timeseries data")
    alldata = []
    n = len(statea)
    for i, seg in enumerate(statea):
        msg = "{0}/{1} {2}:".rjust(30).format(
            str(i + 1).rjust(len(str(n))),
            n,
            str(seg),
        ) if args.verbose else False
        alldata.append(
            get_data(allchannels, seg[0], seg[1],
                     frametype=args.frametype.format(IFO=args.ifo),
                     verbose=msg, nproc=args.nproc).resample(128))
    try:  # ensure that only available channels are analyzed
        osems = list(
            set(alldata[0].keys()) & set(alldata[-1].keys()) & set(osems))
        transmons = list(
            set(alldata[0].keys()) & set(alldata[-1].keys()) & set(transmons))
    except IndexError:
        osems = []
        transmons = []

    # initialize scattering segments
    scatter_segments = DataQualityDict()
    actives = SegmentList()

    # scattering based on OSEM velocity
    if statea:
        page.h3('Optical sensors (OSEMs)', class_='mt-3', id_='osems')
        page.div(id_='osems-group')
    logger.info('Searching for scatter based on OSEM velocity')

    for i, channel in enumerate(sorted(osems)):
        logger.info("-- Processing %s --" % channel)
        chanstr = re.sub('[:-]', '_', channel).replace('_', '-', 1)
        optic = channel.split('-')[1].split('_')[0]
        flag = '%s:DCH-%s_SCATTERING_GE_%s_HZ:1' % (args.ifo, optic, tstr)
        scatter_segments[channel] = DataQualityFlag(
            flag,
            isgood=False,
            description="Evidence for scattering above {0} Hz from {1} in "
                        "{2}".format(fthresh, optic, channel),
        )
        # set up plot(s)
        plot = Plot(figsize=[12, 12])
        axes = {}
        axes['position'] = plot.add_subplot(
            411, xscale='auto-gps', xlabel='')
        axes['fringef'] = plot.add_subplot(
            412, sharex=axes['position'], xlabel='')
        axes['triggers'] = plot.add_subplot(
            413, sharex=axes['position'], xlabel='')
        axes['segments'] = plot.add_subplot(
            414, projection='segments', sharex=axes['position'])
        plot.subplots_adjust(bottom=.07, top=.95)
        fringecolors = [None] * len(FREQUENCY_MULTIPLIERS)
        histdata = dict((x, numpy.ndarray((0,))) for
                        x in FREQUENCY_MULTIPLIERS)
        linecolor = None
        # loop over state segments and find scattering fringes
        for j, seg in enumerate(statea):
            logger.debug("Processing segment [%d .. %d)" % seg)
            ts = alldata[j][channel]
            # get raw data and plot
            line = axes['position'].plot(ts, color=linecolor)[0]
            linecolor = line.get_color()
            # get fringe frequency and plot
            fringef = get_fringe_frequency(ts, multiplier=1)
            for k, m in list(enumerate(FREQUENCY_MULTIPLIERS))[::-1]:
                fm = fringef * m
                line = axes['fringef'].plot(
                    fm, color=fringecolors[k],
                    label=(j == 0 and r'$f\times%d$' % m or None))[0]
                fringecolors[k] = line.get_color()
                histdata[m] = numpy.resize(
                    histdata[m], (histdata[m].size + fm.size,))
                histdata[m][-fm.size:] = fm.value
            # get segments and plot
            scatter = get_segments(
                fringef * multiplier,
                fthresh,
                name=flag,
                pad=args.segment_padding
            )
            axes['segments'].plot(
                scatter, facecolor='red', edgecolor='darkred',
                known={'alpha': 0.6, 'facecolor': 'lightgray',
                       'edgecolor': 'gray', 'height': 0.4},
                height=0.8, y=0, label=' ',
            )
            scatter_segments[channel] += scatter
            logger.debug(
                "    Found %d scattering segments" % (len(scatter.active)))
        logger.debug("Completed channel %s, found %d segments in total"
                     % (channel, len(scatter_segments[channel].active)))

        # calculate efficiency and deadtime of veto
        deadtime = abs(scatter_segments[channel].active)
        try:
            deadtimepc = deadtime / livetime * 100
        except ZeroDivisionError:
            deadtimepc = 0.
        logger.info("Deadtime: %.2f%% (%.2f/%ds)"
                    % (deadtimepc, deadtime, livetime))
        efficiency = in_segmentlist(highsnrtrigs[names[0]],
                                    scatter_segments[channel].active).sum()
        try:
            efficiencypc = efficiency / len(highsnrtrigs) * 100
        except ZeroDivisionError:
            efficiencypc = 0.
        logger.info("Efficiency (SNR>=8): %.2f%% (%d/%d)"
                    % (efficiencypc, efficiency, len(highsnrtrigs)))
        if deadtimepc == 0.:
            effdt = 0
        else:
            effdt = efficiencypc/deadtimepc
        logger.info("Efficiency/Deadtime: %.2f" % effdt)

        if abs(scatter_segments[channel].active):
            actives.extend(scatter_segments[channel].active)

        # finalize plot
        logger.debug("Plotting")
        name = texify(channel)
        axes['position'].set_title("Scattering evidence in %s" % name)
        axes['position'].set_xlabel('')
        axes['position'].set_ylabel(r'Position [$\mu$m]')
        axes['position'].text(
            0.01, 0.95, 'Optic position',
            transform=axes['position'].transAxes, va='top', ha='left',
            bbox={'edgecolor': 'none', 'facecolor': 'white', 'alpha': .5})
        axes['fringef'].plot(
            span, [fthresh, fthresh], 'k--')
        axes['fringef'].set_xlabel('')
        axes['fringef'].set_ylabel(r'Frequency [Hz]')
        axes['fringef'].yaxis.tick_right()
        axes['fringef'].yaxis.set_label_position("right")
        axes['fringef'].set_ylim(0, multiplier * fthresh)
        axes['fringef'].text(
            0.01, 0.95, 'Calculated fringe frequency',
            transform=axes['fringef'].transAxes, va='top', ha='left',
            bbox={'edgecolor': 'none', 'facecolor': 'white', 'alpha': .5})
        handles, labels = axes['fringef'].get_legend_handles_labels()
        axes['fringef'].legend(handles[::-1], labels[::-1], loc='upper right',
                               borderaxespad=0, bbox_to_anchor=(-0.01, 1.),
                               handlelength=1)

        axes['triggers'].scatter(
            trigs[names[0]],
            trigs[names[1]],
            c=trigs[names[2]],
            edgecolor='none',
        )
        name = texify(args.main_channel)
        axes['triggers'].text(
            0.01, 0.95,
            '%s event triggers (Omicron)' % name,
            transform=axes['triggers'].transAxes, va='top', ha='left',
            bbox={'edgecolor': 'none', 'facecolor': 'white', 'alpha': .5})
        axes['triggers'].set_ylabel('Frequency [Hz]')
        axes['triggers'].set_ylim(args.fmin, multiplier * fthresh)
        axes['triggers'].colorbar(cmap='YlGnBu', clim=(3, 100), norm='log',
                                  label='Signal-to-noise ratio')
        axes['segments'].set_ylim(-.55, .55)
        axes['segments'].text(
            0.01, 0.95,
            r'Time segments with $f\times%d > %.2f$ Hz' % (
                multiplier, fthresh),
            transform=axes['segments'].transAxes, va='top', ha='left',
            bbox={'edgecolor': 'none', 'facecolor': 'white', 'alpha': .5})
        for ax in axes.values():
            ax.set_epoch(int(args.gpsstart))
            ax.set_xlim(*span)
        png = '%s_SCATTERING_%s_HZ-%s.png' % (chanstr, tstr, gpsstr)
        try:
            plot.save(png)
        except OverflowError as e:
            warnings.warn(str(e))
            plot.axes[1].set_ylim(0, multiplier * fthresh)
            plot.refresh()
            plot.save(png)
        plot.close()
        logger.debug("%s written." % png)

        # make histogram
        histogram = Plot(figsize=[12, 6])
        ax = histogram.gca()
        hrange = (0, multiplier * fthresh)
        for m, color in list(zip(histdata, fringecolors))[::-1]:
            if histdata[m].size:
                ax.hist(
                    histdata[m], facecolor=color, alpha=.6, range=hrange,
                    bins=50, histtype='stepfilled', label=r'$f\times%d$' % m,
                    cumulative=-1, weights=ts.dx.value, bottom=1e-100,
                    log=True)
            else:
                ax.plot(histdata[m], color=color, label=r'$f\times%d$' % m)
                ax.set_yscale('log')
        ax.set_ylim(.01, float(livetime))
        ax.set_ylabel('Time with fringe above frequency [s]')
        ax.set_xlim(*hrange)
        ax.set_xlabel('Frequency [Hz]')
        ax.set_title(axes['position'].get_title())
        handles, labels = ax.get_legend_handles_labels()
        ax.legend(handles[::-1], labels[::-1], loc='upper right')
        hpng = '%s_SCATTERING_HISTOGRAM-%s.png' % (chanstr, gpsstr)
        histogram.save(hpng)
        histogram.close()
        logger.debug("%s written." % hpng)

        # write HTML
        if deadtime != 0 and effdt > 2:
            context = 'danger'
        elif ((deadtime != 0 and effdt < 2) or
              (histdata[multiplier].size and
               histdata[multiplier].max() >=
                  fthresh/2.)):
            context = 'warning'
        else:
            continue
        page.div(class_='card border-%s mb-1 shadow-sm' % context)
        page.div(class_='card-header text-white bg-%s' % context)
        page.a(channel, class_='collapsed card-link cis-link',
               href='#osem%s' % i, **{'data-toggle': 'collapse'})
        page.div.close()  # card-header
        page.div(id_='osem%s' % i, class_='collapse',
                 **{'data-parent': '#osems-group'})
        page.div(class_='card-body')
        page.div(class_='row')
        img = htmlio.FancyPlot(
            png, caption=SCATTER_CAPTION.format(CHANNEL=channel))
        page.div(class_='col-md-10 offset-md-1')
        page.add(htmlio.fancybox_img(img))
        page.div.close()  # col-md-10 offset-md-1
        himg = htmlio.FancyPlot(
            hpng, caption=HIST_CAPTION.format(CHANNEL=channel))
        page.div(class_='col-md-10 offset-md-1')
        page.add(htmlio.fancybox_img(himg))
        page.div.close()  # col-md-10 offset-md-1
        page.div.close()  # row
        segs = StringIO()
        if deadtime:
            page.p("%d segments were found predicting a scattering fringe "
                   "above %.2f Hz." % (
                       len(scatter_segments[channel].active),
                       fthresh))
            page.table(class_='table table-sm table-hover')
            page.tbody()
            page.tr()
            page.th('Deadtime')
            page.td('%.2f/%d seconds' % (deadtime, livetime))
            page.td('%.2f%%' % deadtimepc)
            page.tr.close()
            page.tr()
            page.th('Efficiency<br><small>(SNR&ge;8 and '
                    '%.2f Hz</sub>&ltf<sub>peak</sub>&lt;%.2f Hz)</small>'
                    % (args.fmin, multiplier * fthresh))
            page.td('%d/%d events' % (efficiency, len(highsnrtrigs)))
            page.td('%.2f%%' % efficiencypc)
            page.tr.close()
            page.tr()
            page.th('Efficiency/Deadtime')
            page.td()
            page.td('%.2f' % effdt)
            page.tr.close()
            page.tbody.close()
            page.table.close()
            scatter_segments[channel].active.write(segs, format='segwizard',
                                                   coltype=float)
            page.pre(segs.getvalue())
        else:
            page.p("No segments were found with scattering above %.2f Hz."
                   % fthresh)
        page.div.close()  # card-body
        page.div.close()  # collapse
        page.div.close()  # card

    if statea:  # close accordion
        page.div.close()  # osems-group

    # scattering based on transmon BLRMS
    if statea:
        page.h3('Transmons', class_='mt-3', id_='transmons')
        page.div(id_='transmons-group')
    logger.info('Searching for scatter based on band-limited RMS of transmons')

    for i, channel in enumerate(sorted(transmons)):
        logger.info("-- Processing %s --" % channel)
        optic = channel.split('-')[1][:6]
        flag = '%s:DCH-%s_SCATTERING_BLRMS:1' % (args.ifo, optic)
        scatter_segments[channel] = DataQualityFlag(
            flag,
            isgood=False,
            description="Evidence for scattering from whitened, band-limited "
                        "RMS trends of {0}".format(channel),
        )

        # loop over state segments and compute BLRMS
        for j, seg in enumerate(statea):
            logger.debug("Processing segment [%d .. %d)" % seg)
            wblrms = get_blrms(
                alldata[j][channel],
                flow=args.bandpass_flow,
                fhigh=args.bandpass_fhigh,
            )
            scatter = get_segments(
                wblrms,
                numpy.mean(wblrms) + args.sigma * numpy.std(wblrms),
                name=flag,
            )
            scatter_segments[channel] += scatter
            logger.debug(
                "    Found %d scattering segments" % (len(scatter.active)))
        logger.debug("Completed channel %s, found %d segments in total"
                     % (channel, len(scatter_segments[channel].active)))

        # calculate efficiency and deadtime of veto
        deadtime = abs(scatter_segments[channel].active)
        try:
            deadtimepc = deadtime / livetime * 100
        except ZeroDivisionError:
            deadtimepc = 0.
        logger.info("Deadtime: %.2f%% (%.2f/%ds)"
                    % (deadtimepc, deadtime, livetime))
        highsnrtrigs = trigs[trigs['snr'] <= 200]
        efficiency = in_segmentlist(highsnrtrigs[names[0]],
                                    scatter_segments[channel].active).sum()
        try:
            efficiencypc = efficiency / len(highsnrtrigs) * 100
        except ZeroDivisionError:
            efficiencypc = 0.
        logger.info("Efficiency (SNR>=8): %.2f%% (%d/%d)"
                    % (efficiencypc, efficiency, len(highsnrtrigs)))
        if deadtimepc == 0.:
            effdt = 0
        else:
            effdt = efficiencypc/deadtimepc
        logger.info("Efficiency/Deadtime: %.2f" % effdt)

        if abs(scatter_segments[channel].active):
            actives.extend(scatter_segments[channel].active)

        # write HTML
        if deadtime != 0 and effdt > 2:
            context = 'danger'
        elif deadtime != 0 and effdt < 2:
            context = 'warning'
        else:
            continue
        page.add(htmlio.write_flag_html(
            scatter_segments[channel], span, i, parent='transmons-group',
            title=channel, context=context, plotdir=''))

    if statea:  # close accordion
        page.div.close()  # transmons-group

    actives = actives.coalesce()  # merge contiguous segments
    if statea and not actives:
        page.add(htmlio.alert(
            'No evidence of scattering found in the channels analyzed',
            context=args.ifo.lower(), dismiss=False))

    # identify triggers during active segments
    logger.debug('Writing a summary CSV record')
    ind = [i for i, trigtime in enumerate(highsnrtrigs[names[0]])
           if trigtime in actives]
    gps = highsnrtrigs[names[0]][ind]
    freq = highsnrtrigs[names[1]][ind]
    snr = highsnrtrigs[names[2]][ind]
    segs = [y for x in gps for y in actives if x in y]
    table = EventTable(
        [gps, freq, snr, [seg[0] for seg in segs], [seg[1] for seg in segs]],
        names=('trigger_time', 'trigger_frequency', 'trigger_snr',
               'segment_start', 'segment_end'))
    logger.info('The following {} triggers fell within active scattering '
                'segments:\n\n'.format(len(table)))
    print(table)
    print('\n\n')
    table.write(summfile, overwrite=True)

    # -- launch omega scans -----------

    nscans = min(args.omega_scans, len(table))
    if nscans > 0:
        # launch scans
        scandir = 'scans'
        ind = random.sample(range(0, len(table)), nscans)
        omegatimes = [str(t) for t in table['trigger_time'][ind]]
        logger.debug('Collected {} event times to omega scan: {}'.format(
            nscans, ', '.join(omegatimes)))
        logger.info('Creating workflow for omega scans')
        flags = batch.get_command_line_flags(
            ifo=args.ifo, ignore_state_flags=True)
        condorcmds = batch.get_condor_arguments(timeout=4, gps=args.gpsstart)
        batch.generate_dag(omegatimes, flags=flags, submit=True,
                           outdir=scandir, condor_commands=condorcmds)
        logger.info('Launched {} omega scans to condor'.format(nscans))
        # render HTML
        page.h2('Omega scans', class_='mt-4', id_='omega-scans')
        msg = (
            'The following event times correspond to significant Omicron '
            'triggers that occur during the scattering segments found above. '
            'To compare these against fringe frequency projections, please '
            'use the "simple scattering" module:',
            markup.oneliner.pre(
                '$ python -m gwdetchar.scattering.simple --help',
            ),
        )
        page.add(htmlio.alert(msg, context=args.ifo.lower()))
        page.add(htmlio.scaffold_omega_scans(
            omegatimes, args.main_channel, scandir=scandir))
    elif args.omega_scans:
        logger.info('No events found during active scattering segments')

    # -- finalize ---------------------

    # write segments
    scatter_segments.write(segfile, path="segments", overwrite=True)
    logger.debug("%s written" % segfile)

    # write HTML
    htmlio.close_page(page, 'index.html')
    logger.info("-- index.html written, all done --")

    # return to original directory
    os.chdir(indir)