예제 #1
0
파일: losc.py 프로젝트: paulaltin/gwpy
def read_losc_state(filename, channel, group=None, start=None, end=None,
                    copy=False):
    """Read a `StateVector` from a LOSC-format HDF file.
    """
    h5file = open_hdf5(filename)
    if group:
        channel = '%s/%s' % (group, channel)
    # find data
    dataset = _find_dataset(h5file, '%s/DQmask' % channel)
    maskset = _find_dataset(h5file, '%s/DQDescriptions' % channel)
    # read data
    nddata = dataset.value
    bits = list(maskset.value)
    # read metadata
    try:
        epoch = dataset.attrs['Xstart']
    except KeyError:
        try:
            ce = CacheEntry.from_T050017(h5file.filename)
        except ValueError:
            epoch = None
        else:
            epoch = ce.segment[0]
    try:
        dt = dataset.attrs['Xspacing']
    except KeyError:
        dt = Quantity(1, 's')
    else:
        xunit = Unit(dataset.attrs['Xunit'])
        dt = Quantity(dt, xunit)
    return StateVector(nddata, bits=bits, epoch=epoch, name='Data quality',
                       sample_rate=(1/dt).to('Hertz'), copy=copy)
예제 #2
0
파일: triggers.py 프로젝트: pvasired/gwsumm
def find_dmt_omega(channel, start, end, base=None):
    """Find DMT-Omega trigger XML files
    """
    span = Segment(to_gps(start), to_gps(end))
    channel = get_channel(channel)
    ifo = channel.ifo
    if base is None and channel.name.split(':', 1)[-1] == 'GDS-CALIB_STRAIN':
        base = '/gds-%s/dmt/triggers/%s-HOFT_Omega' % (
            ifo.lower(), ifo[0].upper())
    elif base is None:
        raise NotImplementedError("This method doesn't know how to locate DMT "
                                  "Omega trigger files for %r" % str(channel))
    gps5 = int('%.5s' % start)
    end5 = int('%.5s' % end)
    out = Cache()
    append = out.append
    while gps5 <= end5:
        trigglob = os.path.join(
            base, str(gps5),
            '%s-%s_%s_%s_OmegaC-*-*.xml' % (
                ifo, channel.system, channel.subsystem, channel.signal))
        found = glob.glob(trigglob)
        for f in found:
            ce = CacheEntry.from_T050017(f)
            if ce.segment.intersects(span):
                append(ce)
        gps5 += 1
    out.sort(key=lambda e: e.path)
    vprint("    Found %d files for %s (DMT-Omega)\n"
           % (len(out), channel.ndsname))
    return out
예제 #3
0
파일: triggers.py 프로젝트: pvasired/gwsumm
def find_kw(channel, start, end, base=None):
    """Find KW trigger XML files
    """
    span = Segment(to_gps(start), to_gps(end))
    channel = get_channel(channel)
    ifo = channel.ifo
    if base is None and channel.name.split(':', 1)[-1] == 'GDS-CALIB_STRAIN':
        tag = '%s-KW_HOFT' % ifo[0].upper()
        base = '/gds-%s/dmt/triggers/%s' % (ifo.lower(), tag)
    elif base is None:
        tag = '%s-KW_TRIGGERS' % ifo[0].upper()
        base = '/gds-%s/dmt/triggers/%s' % (ifo.lower(), tag)
    gps5 = int('%.5s' % start)
    end5 = int('%.5s' % end)
    out = Cache()
    append = out.append
    while gps5 <= end5:
        trigglob = os.path.join(
            base, '%s-%d' % (tag, gps5), '%s-*-*.xml' % tag)
        found = glob.glob(trigglob)
        for f in found:
            ce = CacheEntry.from_T050017(f)
            if ce.segment.intersects(span):
                append(ce)
        gps5 += 1
    out.sort(key=lambda e: e.path)
    vprint("    Found %d files for %s (KW)\n"
           % (len(out), channel.ndsname))
    return out
예제 #4
0
def read_losc_state(filename, channel, group=None, start=None, end=None):
    """Read a `StateVector` from a LOSC-format HDF file.
    """
    h5file = open_hdf5(filename)
    if group:
        channel = '%s/%s' % (group, channel)
    # find data
    dataset = _find_dataset(h5file, '%s/DQmask' % channel)
    maskset = _find_dataset(h5file, '%s/DQDescriptions' % channel)
    # read data
    nddata = dataset.value
    bits = list(maskset.value)
    # read metadata
    try:
        epoch = dataset.attrs['Xstart']
    except KeyError:
        try:
            from glue.lal import CacheEntry
        except ImportError:
            epoch = None
        else:
            ce = CacheEntry.from_T050017(h5file.filename)
            epoch = ce.segment[0]
    try:
        dt = dataset.attrs['Xspacing']
    except KeyError:
        dt = Quantity(1, 's')
    else:
        xunit = Unit(dataset.attrs['Xunit'])
        dt = Quantity(dt, xunit)
    return StateVector(nddata,
                       bits=bits,
                       epoch=epoch,
                       sample_rate=(1 / dt).to('Hertz'),
                       name='Data quality')
def FrameCachetoLALCache(fcache):

  lcache = LALCache()

  files = fcache.get_files()

  for f in files:
    lcache.append(LALCacheEntry.from_T050017(f))
  
  return lcache
예제 #6
0
 def setUpClass(cls):
     cls.FRAMES = {}
     cls._tempdir = tempfile.mkdtemp(prefix='gwsumm-test-data-')
     # get data
     for channel in LOSC_DATA:
         cls.FRAMES[channel] = Cache()
         for gwf in LOSC_DATA[channel]:
             target = os.path.join(cls._tempdir, os.path.basename(gwf))
             download(gwf, target)
             cls.FRAMES[channel].append(CacheEntry.from_T050017(target))
예제 #7
0
 def setUpClass(cls):
     cls.FRAMES = {}
     cls._tempdir = tempfile.mkdtemp(prefix='gwsumm-test-data-')
     # get data
     for channel in LOSC_DATA:
         cls.FRAMES[channel] = Cache()
         for gwf in LOSC_DATA[channel]:
             target = os.path.join(cls._tempdir, os.path.basename(gwf))
             download(gwf, target)
             cls.FRAMES[channel].append(CacheEntry.from_T050017(target))
예제 #8
0
파일: losc.py 프로젝트: bfarr/gwpy
def read_losc_state(filename, channel, group=None, start=None, end=None,
                    copy=False):
    """Read a `StateVector` from a LOSC-format HDF file.

    Parameters
    ----------
    filename : `str`
        path to LOSC-format HDF5 file to read.
    channel : `str`
        name of HDF5 dataset to read.
    group : `str`, optional
        name of containing HDF5 group for ``channel``. If not given,
        the first dataset named ``channel`` will be assumed as the right
        one.
    start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
        start GPS time of desired data
    end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
        end GPS time of desired data
    copy : `bool`, default: `False`
        create a fresh-memory copy of the underlying array

    Returns
    -------
    data : :class`~gwpy.timeseries.TimeSeries`
        a new `TimeSeries` containing the data read from disk
    """
    h5file = open_hdf5(filename)
    if group:
        channel = '%s/%s' % (group, channel)
    # find data
    dataset = _find_dataset(h5file, '%s/DQmask' % channel)
    maskset = _find_dataset(h5file, '%s/DQDescriptions' % channel)
    # read data
    nddata = dataset.value
    bits = list(maskset.value)
    # read metadata
    try:
        epoch = dataset.attrs['Xstart']
    except KeyError:
        try:
            ce = CacheEntry.from_T050017(h5file.filename)
        except ValueError:
            epoch = None
        else:
            epoch = ce.segment[0]
    try:
        dt = dataset.attrs['Xspacing']
    except KeyError:
        dt = Quantity(1, 's')
    else:
        xunit = parse_unit(dataset.attrs['Xunits'])
        dt = Quantity(dt, xunit)
    return StateVector(nddata, bits=bits, epoch=epoch, name='Data quality',
                       dx=dt, copy=copy)
예제 #9
0
def find_trigger_urls(channel, etg, gpsstart, gpsend, verbose=False):
    """Find the paths of trigger files that represent the given
    observatory, channel, and ETG (event trigger generator) for a given
    GPS [start, end) segment.
    """
    if etg.lower().startswith('omicron'):
        etg = '?' + etg[1:]

    # construct search
    gpsstart = to_gps(gpsstart).seconds
    gpsend = to_gps(gpsend).seconds
    span = Segment(gpsstart, gpsend)
    ifo, channel = channel.split(':', 1)
    trigtype = "%s_%s" % (channel, etg.lower())
    epoch = '*'
    searchbase = os.path.join(TRIGFIND_BASE_PATH, epoch, ifo, trigtype)
    gpsdirs = range(int(str(gpsstart)[:5]), int(str(gpsend)[:5]) + 1)
    trigform = ('%s-%s_%s-%s-*.xml*' %
                (ifo, re_dash.sub('_', channel), etg.lower(), '[0-9]' * 10))

    # test for channel-level directory
    if not glob.glob(searchbase):
        raise ValueError("No channel-level directory found at %s. Either the "
                         "channel name or ETG names are wrong, or this "
                         "channel is not configured for this ETG." %
                         searchbase)

    # perform and cache results
    out = Cache()
    append = out.append
    for gpsdir in gpsdirs:
        gpssearchpath = os.path.join(searchbase, str(gpsdir), trigform)
        if verbose:
            gprint("Searching %s..." % os.path.split(gpssearchpath)[0],
                   end=' ')
        found = set(
            map(os.path.realpath,
                glob.glob(os.path.join(searchbase, str(gpsdir), trigform))))
        n = 0
        for f in found:
            ce = CacheEntry.from_T050017(f)
            if ce.segment.intersects(span):
                append(ce)
                n += 1
        if verbose:
            gprint("%d found" % n)
    out.sort(key=lambda e: e.path)

    return out
예제 #10
0
파일: trigfind.py 프로젝트: bfarr/gwpy
def find_trigger_urls(channel, etg, gpsstart, gpsend, verbose=False):
    """Find the paths of trigger files that represent the given
    observatory, channel, and ETG (event trigger generator) for a given
    GPS [start, end) segment.
    """
    if etg.lower().startswith('omicron'):
        etg = '?' + etg[1:]

    # construct search
    gpsstart = to_gps(gpsstart).seconds
    gpsend = to_gps(gpsend).seconds
    span = Segment(gpsstart, gpsend)
    ifo, channel = channel.split(':', 1)
    trigtype = "%s_%s" % (channel, etg.lower())
    epoch = '*'
    searchbase = os.path.join(TRIGFIND_BASE_PATH, epoch, ifo, trigtype)
    gpsdirs = range(int(str(gpsstart)[:5]), int(str(gpsend)[:5])+1)
    trigform = ('%s-%s_%s-%s-*.xml*'
                % (ifo, re_dash.sub('_', channel), etg.lower(), '[0-9]'*10))

    # test for channel-level directory
    if not glob.glob(searchbase):
        raise ValueError("No channel-level directory found at %s. Either the "
                         "channel name or ETG names are wrong, or this "
                         "channel is not configured for this ETG."
                         % searchbase)

    # perform and cache results
    out = Cache()
    append = out.append
    for gpsdir in gpsdirs:
        gpssearchpath = os.path.join(searchbase, str(gpsdir), trigform)
        if verbose:
            gprint("Searching %s..." % os.path.split(gpssearchpath)[0],
                   end=' ')
        found = set(map(
            os.path.realpath,
            glob.glob(os.path.join(searchbase, str(gpsdir), trigform))))
        n = 0
        for f in found:
            ce = CacheEntry.from_T050017(f)
            if ce.segment.intersects(span):
                append(ce)
                n += 1
        if verbose:
            gprint("%d found" % n)
    out.sort(key=lambda e: e.path)

    return out
  def get_files(self):
    """
    Return Find all files described by this FrameCacheEntry.
    """

    filenames = glob.glob(os.path.join(self.path,\
                                         '%s-%s*-%s.*' % (self.observatory,\
                                                           self.description,\
                                                           self.duration)))
    cache = [e.path for e in\
                 LALCache([LALCacheEntry.from_T050017(f) for f in filenames])\
             if e.observatory==self.observatory and\
                e.description==self.description and\
                self.segment.intersects(e.segment) and\
                abs(e.segment)==self.duration]

    return cache
예제 #12
0
def find_frames(ifo,
                frametype,
                gpsstart,
                gpsend,
                config=GWSummConfigParser(),
                urltype='file',
                gaps='warn',
                onerror='raise'):
    """Query the datafind server for GWF files for the given type

    Parameters
    ----------
    ifo : `str`
        prefix for the IFO of interest (either one or two characters)

    frametype : `str`
        name of the frametype to find

    gpsstart : `int`
        GPS start time of the query

    gpsend : `int`
        GPS end time of the query

    config : `~ConfigParser.ConfigParser`, optional
        configuration with `[datafind]` section containing `server`
        specification, otherwise taken from the environment

    urltype : `str`, optional
        what type of file paths to return, default: `file`

    gaps : `str`, optional
        what to do when gaps are detected, one of

        - `ignore` : do nothing
        - `warn` : display the existence of gaps but carry on
        - `raise` : raise an exception

    onerror : `str`, optional
        what to do when the `~glue.datafind` query itself fails, same
        options as for ``gaps``

    Returns
    -------
    cache : `~glue.lal.Cache`
        a list of structured frame file descriptions matching the ifo and
        frametype requested
    """
    vprint('    Finding %s-%s frames for [%d, %d)...' %
           (ifo[0], frametype, int(gpsstart), int(gpsend)))
    # find datafind host:port
    try:
        host = config.get('datafind', 'server')
    except (NoOptionError, NoSectionError):
        try:
            host = os.environ['LIGO_DATAFIND_SERVER']
        except KeyError:
            host = None
            port = None
        else:
            try:
                host, port = host.rsplit(':', 1)
            except ValueError:
                port = None
            else:
                port = int(port)
    else:
        port = config.getint('datafind', 'port')
    # get credentials
    if port == 80:
        cert = None
        key = None
    else:
        cert, key = datafind.find_credential()

    # XXX HACK: LLO changed frame types on Dec 6 2013:
    LLOCHANGE = 1070291904
    if re.match('L1_{CRMT}', frametype) and gpsstart < LLOCHANGE:
        frametype = frametype[-1]

    # query frames
    ifo = ifo[0].upper()
    gpsstart = int(floor(gpsstart))
    gpsend = int(ceil(min(globalv.NOW, gpsend)))
    if gpsend <= gpsstart:
        return Cache()

    # parse match
    try:
        frametype, match = frametype.split('|', 1)
    except ValueError:
        match = None

    def _query():
        if cert is not None:
            dfconn = datafind.GWDataFindHTTPSConnection(host=host,
                                                        port=port,
                                                        cert_file=cert,
                                                        key_file=key)
        else:
            dfconn = datafind.GWDataFindHTTPConnection(host=host, port=port)
        return dfconn.find_frame_urls(ifo[0].upper(),
                                      frametype,
                                      gpsstart,
                                      gpsend,
                                      urltype=urltype,
                                      on_gaps=gaps,
                                      match=match)

    try:
        cache = _query()
    except RuntimeError as e:
        sleep(1)
        try:
            cache = _query()
        except RuntimeError:
            if 'Invalid GPS times' in str(e):
                e.args = ('%s: %d ... %s' % (str(e), gpsstart, gpsend), )
            if onerror in ['ignore', None]:
                pass
            elif onerror in ['warn']:
                warnings.warn('Caught %s: %s' % (type(e).__name__, str(e)))
            else:
                raise
            cache = Cache()

    # XXX: if querying for day of LLO frame type change, do both
    if (ifo[0].upper() == 'L' and frametype in ['C', 'R', 'M', 'T']
            and gpsstart < LLOCHANGE < gpsend):
        start = len(cache) and cache[-1].segment[1] or gpsstart
        if start < gpsend:
            cache.extend(
                dfconn.find_frame_urls(ifo[0].upper(),
                                       'L1_%s' % frametype,
                                       start,
                                       gpsend,
                                       urltype=urltype,
                                       on_gaps=gaps)[1:])

    # extend cache beyond datafind's knowledge to reduce latency
    try:
        latest = cache[-1]
        ngps = len(
            re_gwf_gps_epoch.search(os.path.dirname(
                latest.path)).groupdict()['gpsepoch'])
    except (IndexError, AttributeError):
        pass
    else:
        while True:
            s, e = latest.segment
            if s >= gpsend:
                break
            # replace GPS time of file basename
            new = latest.path.replace('-%d-' % s, '-%d-' % e)
            # replace GPS epoch in dirname
            new = new.replace('%s/' % str(s)[:ngps], '%s/' % str(e)[:ngps])
            if os.path.isfile(new):
                latest = CacheEntry.from_T050017(new)
                cache.append(latest)
            else:
                break

    # validate files existing and return
    cache, _ = cache.checkfilesexist()
    vprint(' %d found.\n' % len(cache))
    return cache
예제 #13
0
def read_losc_state(filename,
                    channel,
                    group=None,
                    start=None,
                    end=None,
                    copy=False):
    """Read a `StateVector` from a LOSC-format HDF file.

    Parameters
    ----------
    filename : `str`
        path to LOSC-format HDF5 file to read.
    channel : `str`
        name of HDF5 dataset to read.
    group : `str`, optional
        name of containing HDF5 group for ``channel``. If not given,
        the first dataset named ``channel`` will be assumed as the right
        one.
    start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
        start GPS time of desired data
    end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
        end GPS time of desired data
    copy : `bool`, default: `False`
        create a fresh-memory copy of the underlying array

    Returns
    -------
    data : :class`~gwpy.timeseries.TimeSeries`
        a new `TimeSeries` containing the data read from disk
    """
    h5file = open_hdf5(filename)
    if group:
        channel = '%s/%s' % (group, channel)
    # find data
    dataset = _find_dataset(h5file, '%s/DQmask' % channel)
    maskset = _find_dataset(h5file, '%s/DQDescriptions' % channel)
    # read data
    nddata = dataset.value
    bits = list(maskset.value)
    # read metadata
    try:
        epoch = dataset.attrs['Xstart']
    except KeyError:
        try:
            ce = CacheEntry.from_T050017(h5file.filename)
        except ValueError:
            epoch = None
        else:
            epoch = ce.segment[0]
    try:
        dt = dataset.attrs['Xspacing']
    except KeyError:
        dt = Quantity(1, 's')
    else:
        xunit = parse_unit(dataset.attrs['Xunits'])
        dt = Quantity(dt, xunit)
    return StateVector(nddata,
                       bits=bits,
                       epoch=epoch,
                       name='Data quality',
                       dx=dt,
                       copy=copy)
예제 #14
0
파일: hveto.py 프로젝트: pvasired/gwsumm
    def process(self, config=GWSummConfigParser(), **kwargs):

        # set params
        self.rounds = None

        if not os.path.isdir(self.directory):
            self.rounds = None
            return

        # get some basic info
        ifo = config.get('DEFAULT', 'ifo')

        # read the configuration
        d = os.path.realpath(self.directory).rstrip('/')
        self.conf = dict()
        confs = glob(os.path.join(d, '%s-HVETO_CONF-*-*.txt' % ifo))
        if len(confs) != 1:
            self.rounds = 'FAIL'
            return
        conffile = confs[0]
        try:
            with open(conffile) as f:
                self.conf = dict()
                lines = f.readlines()[3:]
                for line in lines:
                    try:
                        key, val = line.split(': ', 1)
                        self.conf[key.strip()] = eval(val)
                    except (ValueError, SyntaxError, NameError):
                        pass
        except IOError:
            self.rounds = 'FAIL'
            return
        else:
            etg = self.conf.pop('AUXtype', None)
            if 'DEfnm' in self.conf:
                name = re_quote.sub('', self.conf['DEfnm'])
                self.primary = '%s:%s' % (ifo, name)
                if 'DEtype' in self.conf:
                    hetg = re_quote.sub('', self.conf['DEtype'])
                    if re.search('_%s\Z' % hetg, self.primary, re.I):
                        self.primary = self.primary[:-len(hetg) - 1]
            else:
                self.primary = None

        # find the segments
        try:
            ce = CacheEntry.from_T050017(conffile)
        except (ValueError):
            start = int(self.span[0])
            duration = int(abs(self.span))
            span = self.span
        else:
            start = int(ce.segment[0])
            duration = int(abs(ce.segment))
            span = ce.segment
        try:
            statefile = self.conf['dqfnm']
        except KeyError:
            statefile = '%s-HVETO_DQ_SEGS-%d-%d.txt' % (ifo, start, duration)
        if not os.path.isfile(os.path.join(self.directory, statefile)):
            self.rounds = 'NOSEGMENTS'
            return

        # find the results table
        resultsfile = os.path.join(self.directory, 'summary_stats.txt')
        if not os.path.isfile(resultsfile):
            self.rounds = 'FAIL'
            return

        # determine the Hveto state
        cache = Cache(
            [CacheEntry.from_T050017(os.path.join(self.directory, statefile))])
        segments = SegmentList.read(cache)
        globalv.SEGMENTS[self.states[0].definition] = DataQualityFlag(
            self.states[0].definition, known=[span], active=segments)
        self.finalize_states(config=config, query=False)

        # read results file
        self.rounds = []
        with open(resultsfile, 'r') as f:
            for line in f.readlines():
                self.rounds.append(
                    dict(zip(self.summaryrows,
                             line.split(' ')[1:])))
                # fix channel name
                c = '%s:%s' % (ifo, self.rounds[-1]['Winning channel'])
                if etg and re.search('_%s\Z' % etg, c, re.I):
                    c = c.rsplit('_', 1)[0]
                self.rounds[-1]['Winning channel'] = c

        # read starting triggers
        rawfile = ('%s-HVETO_RAW_TRIGS_ROUND_0-%d-%d.txt' %
                   (ifo, start, duration))
        cache = Cache(
            [CacheEntry.from_T050017(os.path.join(self.directory, rawfile))])
        get_triggers('%s:hveto_start' % ifo,
                     'hveto', [self.span],
                     config=config,
                     cache=cache,
                     tablename='sngl_burst',
                     return_=False)

        get_triggers('%s:hveto_vetoed_all' % ifo,
                     'hveto', [self.span],
                     config=config,
                     cache=Cache(),
                     tablename='sngl_burst')
        for r in range(1, len(self.rounds) + 1):
            # read round veto triggers
            rawfile = ('%s-HVETO_VETOED_TRIGS_ROUND_%d-%d-%d.txt' %
                       (ifo, r, start, duration))
            cache = Cache([
                CacheEntry.from_T050017(os.path.join(self.directory, rawfile))
            ])
            trigs = get_triggers('%s:hveto_vetoed_round %d' % (ifo, r),
                                 'hveto', [self.span],
                                 config=config,
                                 cache=cache,
                                 tablename='sngl_burst')
            globalv.TRIGGERS['%s:hveto_vetoed_all,hveto' % ifo].extend(trigs)
            # read round veto segments
            segfile = ('%s-HVETO_VETO_SEGS_ROUND_%d-%d-%d.txt' %
                       (ifo, r, start, duration))
            cache = Cache([
                CacheEntry.from_T050017(os.path.join(self.directory, segfile))
            ])
            get_segments('%s:hveto_veto_segs_round_%d' % (ifo, r), [self.span],
                         config=config,
                         cache=cache,
                         return_=False)

        for plot in self.plots:
            if isinstance(plot, HvetoSegmentSummaryPlot):
                plot.find_flags()

        kwargs['trigcache'] = Cache()
        kwargs['segmentcache'] = Cache()
        super(HvetoTab, self).process(config=config, **kwargs)

        # find some plots
        for plot in ['OVERAL_HISTOGRAM', 'OVERAL_EFF_DT'][::-1]:
            filename = ('%s-HVETO_%s-%d-%d.png' % (ifo, plot, start, duration))
            plotfile = os.path.join(self.directory, filename)
            if os.path.isfile(plotfile):
                p = SummaryPlot(os.path.join(self.url, filename), new=False)
                p.state = self.states[0]
                self.plots.insert(0, p)

        # delete data from archive
        del globalv.SEGMENTS[self.states[0].definition]
        for row in range(1, len(self.rounds) + 1):
            del globalv.SEGMENTS['%s:hveto_veto_segs_round_%s' % (ifo, row)]
예제 #15
0
파일: hveto.py 프로젝트: berkowitze/gwsumm
    def process(self, config=GWSummConfigParser(), **kwargs):

        # set params
        self.rounds = None

        if not os.path.isdir(self.directory):
            self.rounds = None
            return

        # get some basic info
        ifo = config.get('DEFAULT', 'ifo')

        # read the configuration
        d = os.path.realpath(self.directory).rstrip('/')
        self.conf = dict()
        confs = glob(os.path.join(d, '%s-HVETO_CONF-*-*.txt' % ifo))
        if len(confs) != 1:
            self.rounds = 'FAIL'
            return
        conffile = confs[0]
        try:
            with open(conffile) as f:
                self.conf = dict()
                lines = f.readlines()[3:]
                for line in lines:
                    try:
                        key, val = line.split(': ', 1)
                        self.conf[key.strip()] = eval(val)
                    except (ValueError, SyntaxError, NameError):
                        pass
        except IOError:
            self.rounds = 'FAIL'
            return
        else:
            etg = self.conf.pop('AUXtype', None)
            if 'DEfnm' in self.conf:
                name = re_quote.sub('', self.conf['DEfnm'])
                self.primary = '%s:%s' % (ifo, name)
                if 'DEtype' in self.conf:
                    hetg = re_quote.sub('', self.conf['DEtype'])
                    if re.search('_%s\Z' % hetg, self.primary, re.I):
                        self.primary = self.primary[:-len(hetg)-1]
            else:
                self.primary = None

        # find the segments
        try:
            ce = CacheEntry.from_T050017(conffile)
        except (ValueError):
            start = int(self.span[0])
            duration = int(abs(self.span))
            span = self.span
        else:
            start = int(ce.segment[0])
            duration = int(abs(ce.segment))
            span = ce.segment
        try:
            statefile = self.conf['dqfnm']
        except KeyError:
            statefile = '%s-HVETO_DQ_SEGS-%d-%d.txt' % (ifo, start, duration)
        if not os.path.isfile(os.path.join(self.directory, statefile)):
            self.rounds = 'NOSEGMENTS'
            return

        # find the results table
        resultsfile = os.path.join(self.directory, 'summary_stats.txt')
        if not os.path.isfile(resultsfile):
            self.rounds = 'FAIL'
            return

        # determine the Hveto state
        cache = Cache([CacheEntry.from_T050017(
                           os.path.join(self.directory, statefile))])
        segments = SegmentList.read(cache)
        globalv.SEGMENTS[self.states[0].definition] = DataQualityFlag(
            self.states[0].definition, known=[span], active=segments)
        self.finalize_states(config=config, query=False)

        # read results file
        self.rounds = []
        with open(resultsfile, 'r') as f:
            for line in f.readlines():
                self.rounds.append(dict(zip(self.summaryrows,
                                            line.split(' ')[1:])))
                # fix channel name
                c = '%s:%s' % (ifo, self.rounds[-1]['Winning channel'])
                if etg and re.search('_%s\Z' % etg, c, re.I):
                     c = c.rsplit('_', 1)[0]
                self.rounds[-1]['Winning channel'] = c

        # read starting triggers
        rawfile = ('%s-HVETO_RAW_TRIGS_ROUND_0-%d-%d.txt'
                   % (ifo, start, duration))
        cache = Cache([CacheEntry.from_T050017(
                           os.path.join(self.directory, rawfile))])
        get_triggers('%s:hveto_start' % ifo, 'hveto', [self.span],
                     config=config, cache=cache, return_=False)
        get_triggers('%s:hveto_vetoed_all' % ifo, 'hveto', [self.span],
                     config=config, cache=Cache(), return_=False)

        for r in range(1, len(self.rounds) + 1):
            # read round veto triggers
            rawfile = ('%s-HVETO_VETOED_TRIGS_ROUND_%d-%d-%d.txt'
                       % (ifo, r, start, duration))
            cache = Cache([CacheEntry.from_T050017(
                               os.path.join(self.directory, rawfile))])
            trigs = get_triggers('%s:hveto_vetoed_round %d' % (ifo, r), 'hveto',
                         [self.span], config=config, cache=cache)
            add_triggers(trigs, '%s:hveto_vetoed_all,hveto' % ifo,
                         segments=SegmentList([self.span]))
            # read round veto segments
            segfile = ('%s-HVETO_VETO_SEGS_ROUND_%d-%d-%d.txt'
                       % (ifo, r, start, duration))
            cache = Cache([CacheEntry.from_T050017(
                               os.path.join(self.directory, segfile))])
            get_segments('%s:hveto_veto_segs_round_%d' % (ifo, r), [self.span],
                         config=config, cache=cache, return_=False)

        for plot in self.plots:
            if isinstance(plot, HvetoSegmentSummaryPlot):
                plot.find_flags()

        kwargs['trigcache'] = Cache()
        kwargs['segmentcache'] = Cache()
        super(HvetoTab, self).process(config=config, **kwargs)

        # find some plots
        for plot in ['OVERAL_HISTOGRAM', 'OVERAL_EFF_DT'][::-1]:
             filename = (
                 '%s-HVETO_%s-%d-%d.png' % (ifo, plot, start, duration))
             plotfile = os.path.join(self.directory, filename)
             if os.path.isfile(plotfile):
                 p = SummaryPlot(os.path.join(self.url, filename), new=False)
                 p.state = self.states[0]
                 self.plots.insert(0, p)

        # delete data from archive
        del globalv.SEGMENTS[self.states[0].definition]
        for row in range(1, len(self.rounds) + 1):
            del globalv.SEGMENTS['%s:hveto_veto_segs_round_%s' % (ifo, row)]
#! /usr/bin/python

from __future__ import print_function

from glue.lal import Cache
from glue.lal import CacheEntry
import sys

for line in sys.stdin:
    c = CacheEntry.from_T050017(line)
    print(str(c), end=' ')