示例#1
0
    def write_events(channel, tab, segments):
        """Write events to file with a given filename
        """
        # get filename
        path = create_path(channel)
        h5f = h5py.File(str(path), 'a')

        # read existing table from file
        try:
            old = tab.read(h5f["triggers"], format="hdf5")
        except KeyError:
            pass
        else:
            tab = vstack(old, tab)

        # append event table
        tab.write(h5f, path="triggers", append=True, overwrite=True)

        # write segments
        try:
            oldsegs = DataQualityFlag.read(h5f, path="segments", format="hdf5")
        except KeyError:
            pass
        else:
            segments = oldsegs + segments
        segments.write(h5f, path="segments", append=True, overwrite=True)

        # write file to disk
        h5f.close()
        return path
示例#2
0
 def test_read_segwizard(self):
     flag = DataQualityFlag.read(SEGWIZ, FLAG1, coalesce=False)
     self.assertTrue(
         flag.active == ACTIVE,
         'DataQualityFlag.read(segwizard) mismatch:\n\n%s\n\n%s' %
         (ACTIVE, flag.active))
     self.assertTrue(flag.known == flag.active)
示例#3
0
 def test_read_ligolw(self):
     flag = DataQualityFlag.read(SEGXML, FLAG1, coalesce=False)
     self.assertTrue(flag.active == ACTIVE,
                     'DataQualityFlag.read(ligol) mismatch:\n\n%s\n\n%s'
                     % (ACTIVE, flag.active))
     self.assertTrue(flag.known == KNOWN,
                     'DataQualityFlag.read(ligol) mismatch:\n\n%s\n\n%s'
                     % (KNOWN, flag.known))
示例#4
0
 def test_read_ligolw(self):
     flag = DataQualityFlag.read(SEGXML, FLAG1, coalesce=False)
     self.assertTrue(
         flag.active == ACTIVE,
         'DataQualityFlag.read(ligol) mismatch:\n\n%s\n\n%s' %
         (ACTIVE, flag.active))
     self.assertTrue(
         flag.known == KNOWN,
         'DataQualityFlag.read(ligol) mismatch:\n\n%s\n\n%s' %
         (KNOWN, flag.known))
示例#5
0
文件: core.py 项目: berkowitze/gwsumm
 def _read_segments(self, filename):
     segs = DataQualityFlag.read(filename, self.definition)
     # XXX HACK around malformed segment files with no segment_summary table
     if segs.active and not segs.known:
         segs.known = type(segs.active)(segs.active)
     if self.known:
         self.known = self.known & segs.known
         self.active = self.known & segs.active
     else:
         self.known = segs.known
         self.active = segs.active
     return self
示例#6
0
 def _read_segments(self, filename):
     segs = DataQualityFlag.read(filename, self.definition)
     # XXX HACK around malformed segment files with no segment_summary table
     if segs.active and not segs.known:
         segs.known = type(segs.active)(segs.active)
     if self.known:
         self.known = self.known & segs.known
         self.active = self.known & segs.active
     else:
         self.known = segs.known
         self.active = segs.active
     return self
示例#7
0
 def test_read_hdf5(self):
     try:
         hdfout = self.test_write_hdf5(delete=False)
     except ImportError as e:
         self.skipTest(str(e))
     else:
         flag = DataQualityFlag.read(hdfout)
         os.remove(hdfout)
         self.assertTrue(flag.active == ACTIVE,
                         'DataQualityFlag.read(hdf5) mismatch:\n\n%s\n\n%s'
                         % (ACTIVE, flag.active))
         self.assertTrue(flag.known == KNOWN,
                         'DataQualityFlag.read(hdf5) mismatch:\n\n%s\n\n%s'
                         % (KNOWN, flag.known))
示例#8
0
 def test_read_hdf5(self):
     try:
         hdfout = self.test_write_hdf5(delete=False)
     except ImportError as e:
         self.skipTest(str(e))
     else:
         flag = DataQualityFlag.read(hdfout)
         os.remove(hdfout)
         self.assertTrue(
             flag.active == ACTIVE,
             'DataQualityFlag.read(hdf5) mismatch:\n\n%s\n\n%s' %
             (ACTIVE, flag.active))
         self.assertTrue(
             flag.known == KNOWN,
             'DataQualityFlag.read(hdf5) mismatch:\n\n%s\n\n%s' %
             (KNOWN, flag.known))
示例#9
0
 def read_and_cache_events(channel,
                           etg,
                           cache=None,
                           trigfind_kw={},
                           **read_kw):
     cfile = create_path(channel)
     # read existing cached triggers and work out new segments to query
     if args.append and cfile.is_file():
         previous = DataQualityFlag.read(
             str(cfile),
             path='segments',
             format='hdf5',
         ).coalesce()
         new = analysis - previous
     else:
         new = analysis.copy()
     # get cache of files
     if cache is None:
         cache = find_trigger_files(channel, etg, new.active, **trigfind_kw)
     else:
         cache = list(
             filter(
                 lambda e: new.active.intersects_segment(file_segment(e)),
                 cache,
             ))
     # restrict 'active' segments to when we have data
     try:
         new.active &= cache_segments(cache)
     except IndexError:
         new.active = type(new.active)()
     # find new triggers
     try:
         trigs = get_triggers(channel,
                              etg,
                              new.active,
                              cache=cache,
                              raw=True,
                              **read_kw)
     # catch error and continue
     except ValueError as e:
         warnings.warn('%s: %s' % (type(e).__name__, str(e)))
     else:
         path = write_events(channel, trigs, new)
         try:
             return path, len(trigs)
         except TypeError:  # None
             return
示例#10
0
    def process(self, *args, **kwargs):
        # read the segment files
        if os.path.isfile(self.segmentfile):
            segs = DataQualityFlag.read(self.segmentfile, coalesce=False)
            self.states[0].known = segs.known
            self.states[0].active = segs.active
            self.states[0].ready = True
        else:
            warn('Segment file %s not found.' % self.segmentfile)
            return
        if len(self.states[0].active) == 0:
            warn('No segments analysed by daily ahope.')
            return
        # read the cache files
        if os.path.isfile(self.inspiralcachefile):
            with open(self.inspiralcachefile, 'r') as fobj:
                try:
                    self.inspiralcache = Cache.fromfile(fobj).sieve(
                        segment=self.span)
                except ValueError as e:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.inspiralcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.inspiralcachefile)
            return
        if os.path.isfile(self.tmpltbankcachefile):
            with open(self.tmpltbankcachefile, 'r') as fobj:
                try:
                    self.tmpltbankcache = Cache.fromfile(fobj).sieve(
                        segment=self.span)
                except ValueError:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.tmpltbankcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.tmpltbankcachefile)
            self.tmpltbankcache = Cache()

        # only process if the cachfile was found
        super(DailyAhopeTab, self).process(*args, **kwargs)
示例#11
0
    def process(self, *args, **kwargs):
        # read the segment files
        if os.path.isfile(self.segmentfile):
            segs = DataQualityFlag.read(self.segmentfile, coalesce=False)
            self.states[0].known = segs.known
            self.states[0].active = segs.active
            self.states[0].ready = True
        else:
            warn('Segment file %s not found.' % self.segmentfile)
            return
        if len(self.states[0].active) == 0:
            warn('No segments analysed by daily ahope.')
            return
        # read the cache files
        if os.path.isfile(self.inspiralcachefile):
            with open(self.inspiralcachefile, 'r') as fobj:
                try:
                    self.inspiralcache = Cache.fromfile(fobj).sieve(
                                             segment=self.span)
                except ValueError as e:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.inspiralcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.inspiralcachefile)
            return
        if os.path.isfile(self.tmpltbankcachefile):
            with open(self.tmpltbankcachefile, 'r') as fobj:
                try:
                    self.tmpltbankcache = Cache.fromfile(fobj).sieve(
                                              segment=self.span)
                except ValueError:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.tmpltbankcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.tmpltbankcachefile)
            self.tmpltbankcache = Cache()

        # only process if the cachfile was found
        super(DailyAhopeTab, self).process(*args, **kwargs)
示例#12
0
文件: archive.py 项目: tjma12/gwsumm
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source

    This method reads all found data into the data containers defined by
    the `gwsumm.globalv` module, then returns nothing.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:

        # -- channels ---------------------------

        try:
            ctable = Table.read(h5file['channels'])
        except KeyError:  # no channels table written
            pass
        else:
            for row in ctable:
                chan = get_channel(row['name'])
                for p in ctable.colnames[1:]:
                    if row[p]:
                        setattr(chan, p, row[p])

        # -- timeseries -------------------------

        for dataset in h5file.get('timeseries', {}).values():
            ts = TimeSeries.read(dataset, format='hdf5')
            if (re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name)
                    and ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() != mode.Mode.day:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # -- statevector -- ---------------------

        for dataset in h5file.get('statevector', {}).values():
            sv = StateVector.read(dataset, format='hdf5')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # -- spectrogram ------------------------

        for tag, add_ in zip(
            ['spectrogram', 'coherence-components'],
            [add_spectrogram, add_coherence_component_spectrogram]):
            for key, dataset in h5file.get(tag, {}).items():
                key = key.rsplit(',', 1)[0]
                spec = Spectrogram.read(dataset, format='hdf5')
                spec.channel = get_channel(spec.channel)
                add_(spec, key=key)

        # -- segments ---------------------------

        for name, dataset in h5file.get('segments', {}).items():
            dqflag = DataQualityFlag.read(h5file,
                                          path=dataset.name,
                                          format='hdf5')
            globalv.SEGMENTS += {name: dqflag}

        # -- triggers ---------------------------

        for dataset in h5file.get('triggers', {}).values():
            load_table(dataset)
示例#13
0
文件: segments.py 项目: gwpy/gwsumm
def get_segments(flag, validity=None, config=ConfigParser(), cache=None,
                 query=True, return_=True, coalesce=True, padding=None,
                 segdb_error='raise', url=None):
    """Retrieve the segments for a given flag

    Segments will be loaded from global memory if already defined,
    otherwise they will be loaded from the given
    :class:`~glue.lal.Cache`, or finally from the segment database

    Parameters
    ----------
    flag : `str`, `list`
        either the name of one flag, or a list of names

    validity : `~gwpy.segments.SegmentList`
        the segments over which to search for other segments

    query : `bool`, optional, default: `True`
        actually execute a read/query operation (if needed), otherwise
        just retrieve segments that have already been cached

    config : `~configparser.ConfigParser`, optional
        the configuration for your analysis, if you have one. If
        present the ``[segment-database]`` section will be queried
        for the following options

        - ``gps-start-time``, and ``gps-end-time``, if ``validity`` is
          not given
        - ``url`` (the remote hostname for the segment database) if
          the ``url`` keyword is not given

    cache : :class:`glue.lal.Cache`, optional
        a cache of files from which to read segments, otherwise segments
        will be downloaded from the segment database

    coalesce : `bool`, optional, default: `True`
        coalesce all segmentlists before returning, otherwise just return
        segments as they were downloaded/read

    padding : `tuple`, or `dict` of `tuples`, optional
        `(start, end)` padding with which to pad segments that are
        downloaded/read

    segdb_error : `str`, optional, default: ``'raise'``
        how to handle errors returned from the segment database, one of

        - ``'raise'`` (default) : raise the exception as normal
        - ``'warn'`` : print the exception as a warning, but return no
          segments
        - ``'ignore'`` : silently ignore the error and return no segments

    url : `str`, optional
        the remote hostname for the target segment database

    return_ : `bool`, optional, default: `True`
        internal flag to enable (True) or disable (False) actually returning
        anything. This is useful if you want to download/read segments now
        but not use them until later (e.g. plotting)

    Returns
    -------
    flag : `~gwpy.segments.DataQualityFlag`
        the flag object representing segments for the given single flag, OR

    flagdict : `~gwpy.segments.DataQualityDict`
        the dict of `~gwpy.segments.DataQualityFlag` objects for multiple
        flags, if ``flag`` is given as a `list`, OR

    None
       if ``return_=False``
    """
    if isinstance(flag, str):
        flags = flag.split(',')
    else:
        flags = flag
    allflags = set([f for cf in flags for f in
                    re_flagdiv.split(str(cf))[::2] if f])

    if padding is None and isinstance(flag, DataQualityFlag):
        padding = {flag: flag.padding}
    elif padding is None:
        padding = dict((flag,
                        isinstance(flag, DataQualityFlag) and
                        flag.padding or None) for flag in flags)

    # check validity
    if validity is None:
        start = config.get(DEFAULTSECT, 'gps-start-time')
        end = config.get(DEFAULTSECT, 'gps-end-time')
        span = SegmentList([Segment(start, end)])
    elif isinstance(validity, DataQualityFlag):
        validity = validity.active
        try:
            span = SegmentList([validity.extent()])
        except ValueError:
            span = SegmentList()
    else:
        try:
            span = SegmentList([SegmentList(validity).extent()])
        except ValueError:
            span = SegmentList()
    validity = SegmentList(validity)

    # generate output object
    out = DataQualityDict()
    for f in flags:
        out[f] = DataQualityFlag(f, known=validity, active=validity)
    for f in allflags:
        globalv.SEGMENTS.setdefault(f, DataQualityFlag(f))

    # read segments from global memory and get the union of needed times
    try:
        old = reduce(
            operator.and_,
            (globalv.SEGMENTS.get(f, DataQualityFlag(f)).known for f in flags))
    except TypeError:
        old = SegmentList()
    newsegs = validity - old
    # load new segments
    query &= abs(newsegs) != 0
    query &= len(allflags) > 0
    if cache is not None:
        query &= len(cache) != 0
    if query:
        if cache is not None:
            try:
                new = DataQualityDict.read(cache, list(allflags))
            except IORegistryError as e:
                # can remove when astropy >= 1.2 is required
                if type(e) is not IORegistryError:
                    raise
                if len(allflags) == 1:
                    f = list(allflags)[0]
                    new = DataQualityDict()
                    new[f] = DataQualityFlag.read(cache, f, coalesce=False)
            for f in new:
                new[f].known &= newsegs
                new[f].active &= newsegs
                if coalesce:
                    new[f].coalesce()
                vprint("    Read %d segments for %s (%.2f%% coverage).\n"
                       % (len(new[f].active), f,
                          float(abs(new[f].known))/float(abs(newsegs))*100))
        else:
            if len(newsegs) >= 10:
                qsegs = span
            else:
                qsegs = newsegs
            # parse configuration for query
            kwargs = {}
            if url is not None:
                kwargs['url'] = url
            else:
                try:
                    kwargs['url'] = config.get('segment-database', 'url')
                except (NoSectionError, NoOptionError):
                    pass
            if kwargs.get('url', None) in SEGDB_URLS:
                query_func = DataQualityDict.query_segdb
            else:
                query_func = DataQualityDict.query_dqsegdb
            try:
                new = query_func(allflags, qsegs, on_error=segdb_error,
                                 **kwargs)
            except Exception as e:
                # ignore error from SegDB
                if segdb_error in ['ignore', None]:
                    pass
                # convert to warning
                elif segdb_error in ['warn']:
                    print('%sWARNING: %sCaught %s: %s [gwsumm.segments]'
                          % (WARNC, ENDC, type(e).__name__, str(e)),
                          file=sys.stderr)
                    warnings.warn('%s: %s' % (type(e).__name__, str(e)))
                # otherwise raise as normal
                else:
                    raise
                new = DataQualityDict()
            for f in new:
                new[f].known &= newsegs
                new[f].active &= newsegs
                if coalesce:
                    new[f].coalesce()
                vprint("    Downloaded %d segments for %s (%.2f%% coverage).\n"
                       % (len(new[f].active), f,
                          float(abs(new[f].known))/float(abs(newsegs))*100))
        # record new segments
        globalv.SEGMENTS += new
        for f in new:
            globalv.SEGMENTS[f].description = str(new[f].description)

    # return what was asked for
    if return_:
        for compound in flags:
            union, intersection, exclude, notequal = split_compound_flag(
                compound)
            if len(union + intersection) == 1:
                out[compound].description = globalv.SEGMENTS[f].description
                out[compound].padding = padding.get(f, (0, 0))
            for flist, op in zip([exclude, intersection, union, notequal],
                                 [operator.sub, operator.and_, operator.or_,
                                  not_equal]):
                for f in flist:
                    pad = padding.get(f, (0, 0))
                    segs = globalv.SEGMENTS[f].copy()
                    if isinstance(pad, (float, int)):
                        segs = segs.pad(pad, pad)
                    elif pad is not None:
                        segs = segs.pad(*pad)
                    if coalesce:
                        segs = segs.coalesce()
                    out[compound] = op(out[compound], segs)
            out[compound].known &= validity
            out[compound].active &= validity
            if coalesce:
                out[compound].coalesce()
        if isinstance(flag, str):
            return out[flag]
        else:
            return out
示例#14
0
                    type=str,
                    help='observatory',
                    required=False)

args = parser.parse_args()

# Load channel list; accept FrChannels format
channels = []
with open(args.channels, 'r') as fin:
    for line in fin:
        channels.append(line.split()[0])

#Get ifo and active sci segs
ifo = args.ifo
if args.sci_seg:
    sci_segs = DataQualityFlag.read(args.sci_seg,
                                    path='H1:DMT-ANALYSIS_READY:1')
    assert sci_segs.ifo == ifo
    segs = sci_segs.active
elif args.start_time and args.end_time:
    segs = [Segment(args.start_time, args.end_time)]
else:
    print "Either --segment-file, or both start and end time must be provided."
    sys.exit(2)

st = segs[0].start
et = segs[-1].end

###################
###DARM Channels###
###################
示例#15
0
 def test_read_segwizard(self):
     flag = DataQualityFlag.read(SEGWIZ, FLAG1, coalesce=False)
     self.assertTrue(flag.active == ACTIVE,
                     'DataQualityFlag.read(segwizard) mismatch:\n\n%s\n\n%s'
                     % (ACTIVE, flag.active))
     self.assertTrue(flag.known == flag.active)
示例#16
0
#! /usr/bin/env python
import sys

from gwpy.segments import DataQualityFlag


def add_job(the_file, job_type, job_number, **kwargs):
    job_id = "%s%.6u" % (job_type, job_number)
    the_file.write("JOB %s %s.sub\n" % (job_id, job_type))
    vars_line = " ".join(
        ['%s="%s"' % (arg, str(val)) for arg, val in kwargs.iteritems()])
    the_file.write("VARS %s %s\n" % (job_id, vars_line))
    the_file.write("\n")


if __name__ == "__main__":
    segment_file = sys.argv[1]
    sci_segs = DataQualityFlag.read(segment_file)

    ifo = sci_segs.ifo
    segs = sci_segs.active

    jobtypes = ['target', 'aux']

    fdag = open("my.dag", 'w')
    for idx, seg in enumerate(segs):
        for jobtype in jobtypes:
            add_job(fdag, jobtype, idx, ifo=ifo, st=seg.start, et=seg.end)
示例#17
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:
        # read all time-series data
        try:
            group = h5file['timeseries']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            ts = TimeSeries.read(dataset, format='hdf')
            if (re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name) and
                    ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() == mode.SUMMARY_MODE_DAY:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # read all state-vector data
        try:
            group = h5file['statevector']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            sv = StateVector.read(dataset, format='hdf')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # read all spectrogram data
        try:
            group = h5file['spectrogram']
        except KeyError:
            group = dict()
        for key, dataset in group.iteritems():
            key = key.rsplit(',', 1)[0]
            spec = Spectrogram.read(dataset, format='hdf')
            spec.channel = get_channel(spec.channel)
            add_spectrogram(spec, key=key)

        try:
            group = h5file['segments']
        except KeyError:
            group = dict()
        for name, dataset in group.iteritems():
            dqflag = DataQualityFlag.read(dataset, format='hdf')
            globalv.SEGMENTS += {name: dqflag}
示例#18
0
def get_segments(flag, validity=None, config=ConfigParser(), cache=None,
                 query=True, return_=True, coalesce=True, padding=None,
                 segdb_error='raise', url=None):
    """Retrieve the segments for a given flag

    Segments will be loaded from global memory if already defined,
    otherwise they will be loaded from the given
    :class:`~glue.lal.Cache`, or finally from the segment database

    Parameters
    ----------
    FIXME

    Returns
    -------
    FIXME
    """
    if isinstance(flag, (unicode, str)):
        flags = flag.split(',')
    else:
        flags = flag
    allflags = set([f for cf in flags for f in
                    re_flagdiv.split(str(cf))[::2] if f])

    if padding is None and isinstance(flag, DataQualityFlag):
        padding = {flag: flag.padding}
    elif padding is None:
        padding = dict((flag, isinstance(flag, DataQualityFlag) and
                              flag.padding or None) for flag in flags)

    # check validity
    if validity is None:
        start = config.get(DEAFULTSECT, 'gps-start-time')
        end = config.get(DEFAULTSECT, 'gps-end-time')
        span = SegmentList([Segment(start, end)])
    elif isinstance(validity, DataQualityFlag):
        validity = validity.active
        try:
            span = SegmentList([validity.extent()])
        except ValueError:
            span = SegmentList()
    else:
        try:
            span = SegmentList([SegmentList(validity).extent()])
        except ValueError:
            span = SegmentList()
    validity = SegmentList(validity)

    # generate output object
    out = DataQualityDict()
    for f in flags:
        out[f] = DataQualityFlag(f, known=validity, active=validity)
    for f in allflags:
        globalv.SEGMENTS.setdefault(f, DataQualityFlag(f))

    # read segments from global memory and get the union of needed times
    try:
        old = reduce(operator.and_, (globalv.SEGMENTS.get(
                                        f, DataQualityFlag(f)).known
                                    for f in flags))
    except TypeError:
        old = SegmentList()
    newsegs = validity - old
    # load new segments
    query &= abs(newsegs) != 0
    query &= len(allflags) > 0
    if cache is not None:
        query &= len(cache) != 0
    if query:
        if cache is not None:
            try:
                new = DataQualityDict.read(cache, list(allflags))
            except Exception as e:
                if type(e) is not Exception:
                    raise
                if len(allflags) == 1:
                    f = list(allflags)[0]
                    new = DataQualityDict()
                    new[f] = DataQualityFlag.read(cache, f, coalesce=False)
            for f in new:
                new[f].known &= newsegs
                new[f].active &= newsegs
                if coalesce:
                    new[f].coalesce()
                vprint("    Read %d segments for %s (%.2f%% coverage).\n"
                       % (len(new[f].active), f,
                          float(abs(new[f].known))/float(abs(newsegs))*100))
        else:
            if len(newsegs) >= 10:
                qsegs = span
            else:
                qsegs = newsegs
            # parse configuration for query
            kwargs = {}
            if url is not None:
                kwargs['url'] = url
            else:
                try:
                    kwargs['url'] = config.get('segment-database', 'url')
                except (NoSectionError, NoOptionError):
                    pass
            if kwargs.get('url', None) in SEGDB_URLS:
                query_func = DataQualityDict.query_segdb
            else:
                query_func = DataQualityDict.query_dqsegdb
            try:
                new = query_func(allflags, qsegs, on_error=segdb_error,
                                 **kwargs)
            except Exception as e:
                # ignore error from SegDB
                if segdb_error in ['ignore', None]:
                    pass
                # convert to warning
                elif segdb_error in ['warn']:
                    print('%sWARNING: %sCaught %s: %s [gwsumm.segments]'
                          % (WARNC, ENDC, type(e).__name__, str(e)),
                          file=sys.stderr)
                    warnings.warn('%s: %s' % (type(e).__name__, str(e)))
                # otherwise raise as normal
                else:
                    raise
                new = DataQualityDict()
            for f in new:
                new[f].known &= newsegs
                new[f].active &= newsegs
                if coalesce:
                    new[f].coalesce()
                vprint("    Downloaded %d segments for %s (%.2f%% coverage).\n"
                       % (len(new[f].active), f,
                          float(abs(new[f].known))/float(abs(newsegs))*100))
        # record new segments
        globalv.SEGMENTS += new
        for f in new:
            globalv.SEGMENTS[f].description = str(new[f].description)

    # return what was asked for
    if return_:
        for compound in flags:
            union, intersection, exclude, notequal = split_compound_flag(
                compound)
            if len(union + intersection) == 1:
                out[compound].description = globalv.SEGMENTS[f].description
                out[compound].padding = padding.get(f, (0, 0))
            for flist, op in zip([exclude, intersection, union, notequal],
                                 [operator.sub, operator.and_, operator.or_,
                                  not_equal]):
                for f in flist:
                    pad = padding.get(f, (0, 0))
                    segs = globalv.SEGMENTS[f].copy()
                    if isinstance(pad, (float, int)):
                        segs = segs.pad(pad, pad)
                    elif pad is not None:
                        segs = segs.pad(*pad)
                    if coalesce:
                        segs = segs.coalesce()
                    out[compound] = op(out[compound], segs)
                    out[compound].known &= segs.known
                    out[compound].active &= segs.known
            out[compound].known &= validity
            out[compound].active &= validity
            if coalesce:
                out[compound].coalesce()
        if isinstance(flag, basestring):
            return out[flag]
        else:
            return out
示例#19
0
                 # calculations are in progress.
skip_load = True # Set to 'True' if you want to use the secondary intermediate files
                  # that will let you skip glitch rate generation. Use for repeated runs.
# Define the name of the directory within the directory
# '{year in 3rd millenium}/{month #}/{day #}' that will store script-generated files.
skip_load_direct = 'Wind_Thresh_5'
tablet = ['Observing', 'Transition', 'EQ'] # The list of names for configurations
                                                # defined by their respective entries in 'configsecs'
                                                # below.
histbinwidths = [0.1, 0.1, 0.1] # Desired bin width for glitch rate categories in 
                                # each configuration's histogram plot.

if skip_load:
    partstarts = list(np.loadtxt(File_Name(start, end, 'partstarts', 'txt', extras = [skip_load_direct])))
    rates = list(np.loadtxt(File_Name(start, end, 'glitchrates', 'txt', extras = [skip_load_direct])))
    nomflags = DataQualityFlag.read(File_Name(start, end, '{}_dqflag'.format(tablet[0]), 'hdf5', extras = [skip_load_direct]))
    transflags = DataQualityFlag.read(File_Name(start, end, '{}_dqflag'.format(tablet[1]), 'hdf5', extras = [skip_load_direct]))
    EQflags = DataQualityFlag.read(File_Name(start, end, '{}_dqflag'.format(tablet[2]), 'hdf5', extras = [skip_load_direct]))
    configs = [nomflags, transflags, EQflags]
    configsecs = [config.active for config in configs]
else:
    # Category-specific cutoffs
    print('Grabbinng cat-cutoff info...')
    indchan1 = 'L1:ISI-HAM6_SENSCOR_X_FADE_TIME_LEFT_MON'
    indchan2 = 'L1:ISI-HAM6_SENSCOR_X_FADE_CUR_CHAN_MON'
    # transmodes = Grab_Series(start, end, indchan1, frame, procs)
    transmodes = Grab_Sfiles(start, end, obsrun, indchan1, frame)
    # EQmodes = Grab_Series(start, end, indchan2, frame, procs)
    EQmodes = Grab_Sfiles(start, end, obsrun, indchan2, frame)

    transtricts = [[(0, 'min')]]
示例#20
0
print('\n--- Total {0}h {1}m ---'.format(
    int((time.time() - start_time) / 3600),
    int(((time.time() - start_time) / 3600 - int(
        (time.time() - start_time) / 3600)) * 60)))

# whole day file should be produced at the end of the day.
end_time = (datetime.now() + timedelta(hours=-9) +
            timedelta(minutes=-15)).strftime("%Y-%m-%d")

if utc_date != end_time:
    print("date changed.")
    for key in keys:
        for snr in snrs[key]:

            tmp = DataQualityFlag.read(filepath_xml[key + str(snr)])
            tmp.write(SEGMENT_DIR + 'K1-' + key + '_SNR' + str(snr) + '/' +
                      year + '/' + 'K1-' + key + '_SNR' + str(snr) +
                      '_SEGMENT_UTC_' + utc_date + '.xml',
                      overwrite=True)

            # Check if missing part exist
            day = DataQualityFlag(known=[(end_gps_time - 86400, end_gps_time)],
                                  active=[(end_gps_time - 86400, end_gps_time)
                                          ],
                                  name=key + str(snr))
            missing = day.known - tmp.known

            for seg in missing:
                mkSegment(seg[0], seg[1], utc_date, txt=False)
            tmp = DataQualityFlag.read(filepath_xml[key + str(snr)])
示例#21
0

params = parse_command_line()

st_dir = int(str(params.st)[0:5])
et_dir = int(str(params.et)[0:5])
dirs = np.arange(st_dir, et_dir + 1)
pipeline_dict = coh_io.read_pipeline_ini(params.ini)
env_params, run_params = coh_io.check_ini_params(pipeline_dict)
segs = []
for directory in dirs:
    seg_files = sorted(
        glob.glob('%s/SEGMENTS/%d' %
                  (env_params['base_directory'], directory)))
    for f in seg_files:
        temps = DataQualityFlag.read(f)
        segs.append(temps)

plots = sorted(glob.glob('%s/*.png' % params.directory))
notches_page = 'webpage/notches.html'
params_page = 'webpage/params.html'
diagnostic_plots_page = 'webpage/diagnostic_plots.html'
index_page = 'webpage/index.html'
os.system('mkdir -p webpage')
os.system('touch %s' % notches_page)
os.system('touch %s' % params_page)
os.system('touch %s' % diagnostic_plots_page)
os.system('touch %s' % index_page)
os.system('cp main.css webpage/')
make_index_page(index_page, params)
make_diagnostic_page(diagnostic_plots_page, params)
示例#22
0
def main(args=None):
    """Run the online Guardian node visualization tool
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    # parse command line options
    ifo = args.ifo
    if not args.ifo:
        parser.error('--ifo must be given if not obvious from the host')
    start = getattr(args, 'gpsstart')
    end = getattr(args, 'gpsend')
    duration = int(ceil(end) - floor(start))
    categories = args.categories.split(',')
    for i, c in enumerate(categories):
        try:
            categories[i] = int(c)
        except (TypeError, ValueError):
            pass
    vetofile = getattr(args, 'veto-definer-file')
    vetofile = (urlparse(vetofile).netloc or os.path.abspath(vetofile))
    args.metric = args.metric or DEFAULT_METRICS

    # -- setup --------------------------------------

    tag = '%d-%d' % (start.seconds, end.seconds)
    outdir = os.path.abspath(os.path.join(args.output_directory, tag))
    mkdir(outdir)
    os.chdir(outdir)
    mkdir('etc', 'segments', 'condor')

    # -- segment handling ---------------------------

    os.chdir('segments')
    ALLSEGMENTS = DataQualityDict()

    # -- get analysis segments ----------------------

    aflags = args.analysis_segments
    asegments = DataQualityFlag('%s:VET-ANALYSIS_SEGMENTS:0' % ifo)
    for i, flag in enumerate(aflags):
        # use union of segments from a file
        if os.path.isfile(flag):
            asegments += DataQualityFlag.read(flag)
        # or intersection of segments from multiple flags
        else:
            new = DataQualityFlag.query(flag, start, end, url=args.segdb)
            if i:
                asegments.known &= new.known
                asegments.active &= new.active
            else:
                asegments.known = new.known
                asegments.active = new.active
    ALLSEGMENTS[asegments.name] = asegments

    if os.path.isfile(aflags[0]):
        asegments.filename = aflags

    # -- read veto definer and process --------------

    if urlparse(vetofile).netloc:
        tmp = urlopen(vetofile)
        vetofile = os.path.abspath(os.path.basename(vetofile))
        with open(vetofile, 'w') as f:
            f.write(tmp.read())
        LOGGER.info('Downloaded veto definer file')
    vdf = DataQualityDict.from_veto_definer_file(vetofile,
                                                 format='ligolw',
                                                 start=start,
                                                 end=end,
                                                 ifo=ifo)
    LOGGER.info('Read %d flags from veto definer' % len(vdf.keys()))

    # populate veto definer file from database
    vdf.populate(source=args.segdb, on_error=args.on_segdb_error)
    ALLSEGMENTS += vdf

    # organise flags into categories
    flags = dict((c, DataQualityDict()) for c in categories)
    for name, flag in vdf.items():
        try:
            flags[flag.category][name] = flag
        except KeyError:
            pass

    # find the states and segments for each category
    states, after, oldtitle = (dict(), None, '')
    for i, category in enumerate(categories):
        title = isinstance(category, int) and 'Cat %d' % category or category
        tag = re_cchar.sub('_', str(title).upper())
        states[category] = SummaryState(
            'After %s' % oldtitle,
            key=tag,
            known=after.known,
            active=after.active,
            definition=after.name,
        ) if i else SummaryState(
            args.analysis_name,
            key=args.analysis_name,
            definition=asegments.name,
        )
        try:
            segs = flags[category].union()
        except TypeError:  # no flags
            segs = DataQualityFlag()
        segs.name = '%s:VET-ANALYSIS_%s:0' % (ifo, tag)
        ALLSEGMENTS[segs.name] = segs
        after = (after - segs) if i else (asegments - segs)
        after.name = '%s:VET-ANALYSIS_AFTER_%s:0' % (ifo, tag)
        ALLSEGMENTS[after.name] = after
        oldtitle = title

    # write all segments to disk
    segfile = os.path.abspath('%s-VET_SEGMENTS-%d-%d.xml.gz' %
                              (ifo, start.seconds, duration))
    ALLSEGMENTS.write(segfile)

    os.chdir(os.pardir)

    if args.verbose:
        LOGGER.debug("All segments accessed and written to\n%s" % segfile)

    # -- job preparation ----------------------------

    os.chdir('etc')

    configs = []
    for category in categories:
        title = (isinstance(category, int) and 'Category %d' % category
                 or category)
        tab = 'tab-%s' % title
        config = ConfigParser()

        # add segment-database configuration
        add_config_section(config, 'segment-database', url=args.segdb)

        # add plot configurations
        pconfig = ConfigParser()
        pconfig.read(args.config_file)
        for section in pconfig.sections():
            if section.startswith('plot-'):
                config._sections[section] = pconfig._sections[section].copy()

        try:
            plots = pconfig.items('plots-%s' % category, raw=True)
        except NoSectionError:
            try:
                plots = pconfig.items('plots', raw=True)
            except NoSectionError:
                plots = []

        # add state
        if args.independent:
            state = states[categories[0]]
        else:
            state = states[category]
        sname = 'state-%s' % state.key
        add_config_section(config,
                           sname,
                           key=state.key,
                           name=state.name,
                           definition=state.definition,
                           filename=segfile)

        # add plugin
        add_config_section(config, 'plugins', **{'gwvet.tabs': ''})

        # define metrics
        if category == 1:
            metrics = ['Deadtime']
        else:
            metrics = args.metric

        # define summary tab
        if category == 1:
            tab = configure_veto_tab(config,
                                     title,
                                     title,
                                     state,
                                     flags[category].keys(),
                                     segfile,
                                     metrics,
                                     name='Summary',
                                     **{'veto-name': title})
        else:
            tab = configure_veto_tab(config,
                                     title,
                                     title,
                                     state,
                                     flags[category].keys(),
                                     segfile,
                                     metrics,
                                     name='Summary',
                                     **{
                                         'veto-name': title,
                                         'event-channel': args.event_channel,
                                         'event-generator':
                                         args.event_generator,
                                     })
        if len(categories) == 1:
            config.set(tab, 'index',
                       '%(gps-start-time)s-%(gps-end-time)s/index.html')
        for key, value in plots:
            if re.match('%\(flags\)s (?:plot-)?segments', value):  # noqa: W605
                config.set(tab, key, '%%(union)s,%s' % value)
                if '%s-labels' % key not in plots:
                    config.set(tab, '%s-labels' % key, 'Union,%(flags)s')
            else:
                config.set(tab, key, value)

        # now a tab for each flag
        for flag in flags[category]:
            if category == 1:
                tab = configure_veto_tab(config, flag, title, state, [flag],
                                         segfile, metrics)
            else:
                tab = configure_veto_tab(
                    config, flag, title, state, [flag], segfile, metrics, **{
                        'event-channel': args.event_channel,
                        'event-generator': args.event_generator
                    })
                if args.event_file:
                    config.set(tab, 'event-file', args.event_file)
            for key, value in plots:
                config.set(tab, key, value)

        if len(categories) > 1 and category != categories[-1]:
            with open('%s.ini' % re_cchar.sub('-', title.lower()), 'w') as f:
                config.write(f)
                configs.append(os.path.abspath(f.name))

    # configure summary job
    if len(categories) > 1:
        state = states[categories[0]]
        add_config_section(config,
                           'state-%s' % state.key,
                           key=state.key,
                           name=state.name,
                           definition=state.definition,
                           filename=segfile)
        try:
            plots = pconfig.items('plots', raw=True)
        except NoSectionError:
            plots = []
        flags = [f for c in categories for f in flags[c].keys()]
        tab = configure_veto_tab(
            config,
            'Impact of full veto definer file',
            None,
            state,
            flags,
            segfile,
            args.metric,
            shortname='Summary',
            index='%(gps-start-time)s-%(gps-end-time)s/index.html',
            **{
                'event-channel': args.event_channel,
                'event-generator': args.event_generator,
                'veto-name': 'All vetoes'
            })
        if args.event_file:
            config.set(tab, 'event-file', args.event_file)
        for key, value in plots:
            config.set(tab, key, value)
        with open('%s.ini' % re_cchar.sub('-', title.lower()), 'w') as f:
            config.write(f)
            configs.append(os.path.abspath(f.name))

    os.chdir(os.pardir)

    if args.verbose:
        LOGGER.debug("Generated configuration files for each category")

    # -- condor preparation -------------------------

    os.chdir(os.pardir)

    # get condor variables
    if getuser() == 'detchar':
        accgroup = 'ligo.prod.o1.detchar.dqproduct.gwpy'
    else:
        accgroup = 'ligo.dev.o1.detchar.dqproduct.gwpy'

    gwsumm_args = [
        '--gps-start-time',
        str(start.seconds),
        '--gps-end-time',
        str(end.seconds),
        '--ifo',
        ifo,
        '--file-tag',
        'gwpy-vet',
        '--condor-command',
        'accounting_group=%s' % accgroup,
        '--condor-command',
        'accounting_group_user=%s' % getuser(),
        '--on-segdb-error',
        args.on_segdb_error,
        '--output-dir',
        args.output_directory,
    ]
    for cf in args.global_config:
        gwsumm_args.extend(('--global-config', cf))
    for cf in configs:
        gwsumm_args.extend(('--config-file', cf))
    if args.verbose:
        gwsumm_args.append('--verbose')

    if args.verbose:
        LOGGER.debug('Generating summary DAG via:\n')
        LOGGER.debug(' '.join([batch.PROG] + gwsumm_args))

    # execute gwsumm in batch mode
    batch.main(args=gwsumm_args)
示例#23
0
args = parser.parse_args()

freq = args.freq
year = args.year
month = args.month
day = args.day

if len(month) < 2:
    month = "0" + month
if len(day) < 2:
    day = "0" + day

#=============Get locked segments=============

locked = DataQualityFlag.read("/home/detchar/Segments/K1-DET_FOR_GRB200415A/" +
                              year + "/K1-DET_FOR_GRB200415A_UTC_" + year +
                              "-" + month + "-" + day + ".xml")

# Remove segments shorter than 94 sec

act = SegmentList()
for seg in locked.active:
    duration = seg[1] - seg[0]
    if duration >= 94:
        act.append(seg)

# Remove last 30 sec and margin 2 sec
act = act.contract(17)
act = act.shift(-15)
locked.active = act
示例#24
0
def get_segments(flag, validity=None, config=ConfigParser(), cache=None,
                 query=True, return_=True, coalesce=True, padding=None,
                 segdb_error='raise', url=None, **read_kw):
    """Retrieve the segments for a given flag

    Segments will be loaded from global memory if already defined,
    otherwise they will be loaded from the given
    :class:`~glue.lal.Cache`, or finally from the segment database

    Parameters
    ----------
    flag : `str`, `list`
        either the name of one flag, or a list of names

    validity : `~gwpy.segments.SegmentList`
        the segments over which to search for other segments

    query : `bool`, optional, default: `True`
        actually execute a read/query operation (if needed), otherwise
        just retrieve segments that have already been cached

    config : `~configparser.ConfigParser`, optional
        the configuration for your analysis, if you have one. If
        present the ``[segment-database]`` section will be queried
        for the following options

        - ``gps-start-time``, and ``gps-end-time``, if ``validity`` is
          not given
        - ``url`` (the remote hostname for the segment database) if
          the ``url`` keyword is not given

    cache : :class:`glue.lal.Cache`, optional
        a cache of files from which to read segments, otherwise segments
        will be downloaded from the segment database

    coalesce : `bool`, optional, default: `True`
        coalesce all segmentlists before returning, otherwise just return
        segments as they were downloaded/read

    padding : `tuple`, or `dict` of `tuples`, optional
        `(start, end)` padding with which to pad segments that are
        downloaded/read

    segdb_error : `str`, optional, default: ``'raise'``
        how to handle errors returned from the segment database, one of

        - ``'raise'`` (default) : raise the exception as normal
        - ``'warn'`` : print the exception as a warning, but return no
          segments
        - ``'ignore'`` : silently ignore the error and return no segments

    url : `str`, optional
        the remote hostname for the target segment database

    return_ : `bool`, optional, default: `True`
        internal flag to enable (True) or disable (False) actually returning
        anything. This is useful if you want to download/read segments now
        but not use them until later (e.g. plotting)

    **read_kw : `dict`, optional
        additional keyword arguments to `~gwpy.segments.DataQualityDict.read`
        or `~gwpy.segments.DataQualityFlag.read`

    Returns
    -------
    flag : `~gwpy.segments.DataQualityFlag`
        the flag object representing segments for the given single flag, OR

    flagdict : `~gwpy.segments.DataQualityDict`
        the dict of `~gwpy.segments.DataQualityFlag` objects for multiple
        flags, if ``flag`` is given as a `list`, OR

    None
       if ``return_=False``
    """
    if isinstance(flag, str):
        flags = flag.split(',')
    else:
        flags = flag
    allflags = set([f for cf in flags for f in
                    re_flagdiv.split(str(cf))[::2] if f])

    if padding is None and isinstance(flag, DataQualityFlag):
        padding = {flag: flag.padding}
    elif padding is None:
        padding = dict((flag,
                        isinstance(flag, DataQualityFlag) and
                        flag.padding or None) for flag in flags)

    # check validity
    if validity is None:
        start = config.get(DEFAULTSECT, 'gps-start-time')
        end = config.get(DEFAULTSECT, 'gps-end-time')
        span = SegmentList([Segment(start, end)])
    elif isinstance(validity, DataQualityFlag):
        validity = validity.active
        try:
            span = SegmentList([validity.extent()])
        except ValueError:
            span = SegmentList()
    else:
        try:
            span = SegmentList([SegmentList(validity).extent()])
        except ValueError:
            span = SegmentList()
    validity = SegmentList(validity)

    # generate output object
    out = DataQualityDict()
    for f in flags:
        out[f] = DataQualityFlag(f, known=validity, active=validity)
    for f in allflags:
        globalv.SEGMENTS.setdefault(f, DataQualityFlag(f))

    # read segments from global memory and get the union of needed times
    try:
        old = reduce(
            operator.and_,
            (globalv.SEGMENTS.get(f, DataQualityFlag(f)).known for f in flags))
    except TypeError:
        old = SegmentList()
    newsegs = validity - old
    # load new segments
    query &= abs(newsegs) != 0
    query &= len(allflags) > 0
    if cache is not None:
        query &= len(cache) != 0
    if query:
        if cache is not None:
            if isinstance(cache, str) and cache.endswith(
                (".h5", ".hdf", ".hdf5")) and (
                    'path' not in read_kw):
                read_kw['path'] = config.get(
                    'DEFAULT', 'segments-hdf5-path', fallback='segments')
            try:
                new = DataQualityDict.read(cache, list(allflags), **read_kw)
            except IORegistryError as e:
                # can remove when astropy >= 1.2 is required
                if type(e) is not IORegistryError:
                    raise
                if len(allflags) == 1:
                    f = list(allflags)[0]
                    new = DataQualityDict()
                    new[f] = DataQualityFlag.read(
                        cache, f, coalesce=False, **read_kw)
            for f in new:
                new[f].known &= newsegs
                new[f].active &= newsegs
                if coalesce:
                    new[f].coalesce()
                vprint("    Read %d segments for %s (%.2f%% coverage).\n"
                       % (len(new[f].active), f,
                          float(abs(new[f].known))/float(abs(newsegs))*100))
        else:
            if len(newsegs) >= 10:
                qsegs = span
            else:
                qsegs = newsegs
            # parse configuration for query
            kwargs = {}
            if url is not None:
                kwargs['url'] = url
            else:
                try:
                    kwargs['url'] = config.get('segment-database', 'url')
                except (NoSectionError, NoOptionError):
                    pass
            if kwargs.get('url', None) in SEGDB_URLS:
                query_func = DataQualityDict.query_segdb
            else:
                query_func = DataQualityDict.query_dqsegdb
            try:
                new = query_func(allflags, qsegs, on_error=segdb_error,
                                 **kwargs)
            except Exception as e:
                # ignore error from SegDB
                if segdb_error in ['ignore', None]:
                    pass
                # convert to warning
                elif segdb_error in ['warn']:
                    print('%sWARNING: %sCaught %s: %s [gwsumm.segments]'
                          % (WARNC, ENDC, type(e).__name__, str(e)),
                          file=sys.stderr)
                    warnings.warn('%s: %s' % (type(e).__name__, str(e)))
                # otherwise raise as normal
                else:
                    raise
                new = DataQualityDict()
            for f in new:
                new[f].known &= newsegs
                new[f].active &= newsegs
                if coalesce:
                    new[f].coalesce()
                vprint("    Downloaded %d segments for %s (%.2f%% coverage).\n"
                       % (len(new[f].active), f,
                          float(abs(new[f].known))/float(abs(newsegs))*100))
        # record new segments
        globalv.SEGMENTS += new
        for f in new:
            globalv.SEGMENTS[f].description = str(new[f].description)

    # return what was asked for
    if return_:
        for compound in flags:
            union, intersection, exclude, notequal = split_compound_flag(
                compound)
            if len(union + intersection) == 1:
                out[compound].description = globalv.SEGMENTS[f].description
                out[compound].padding = padding.get(f, (0, 0))
            for flist, op in zip([exclude, intersection, union, notequal],
                                 [operator.sub, operator.and_, operator.or_,
                                  not_equal]):
                for f in flist:
                    pad = padding.get(f, (0, 0))
                    segs = globalv.SEGMENTS[f].copy()
                    if isinstance(pad, (float, int)):
                        segs = segs.pad(pad, pad)
                    elif pad is not None:
                        segs = segs.pad(*pad)
                    if coalesce:
                        segs = segs.coalesce()
                    out[compound] = op(out[compound], segs)
            out[compound].known &= validity
            out[compound].active &= validity
            if coalesce:
                out[compound].coalesce()
        if isinstance(flag, str):
            return out[flag]
        else:
            return out
示例#25
0
        mkSegment(start_gps_time, end_gps_time, utc_date)
        #print('    DQF segment file saved')
    except ValueError :
        print('    Cannot append discontiguous TimeSeries')
        pass
#------------------------------------------------------------

print('\n--- Total {0}h {1}m ---'.format( int((time.time()-start_time)/3600), int(( (time.time()-start_time)/3600 - int((time.time()-start_time)/3600) )*60) ))


# whole day file should be produced at the end of the day.
end_time = (datetime.now() + timedelta(hours=-9)).strftime("%Y-%m-%d")

if utc_date != end_time:
    for key in keys:
        tmp = DataQualityFlag.read(filepath_xml[key])
        #tmp.write(SEGMENT_DIR +key+'/'+year+'/'+key+'_SEGMENT_UTC_' + utc_date + '.xml',overwrite=True)

        # Check if missing part exist
        day = DataQualityFlag(known=[(end_gps_time-86400,end_gps_time)],active=[(end_gps_time-86400,end_gps_time)],name=key)
        missing = day.known - tmp.known

        for seg in missing:
            mkSegment(seg[0], seg[1], utc_date, txt=False)
        tmp = DataQualityFlag.read(filepath_xml[key])

        tmp.write(SEGMENT_DIR +key+'/'+year+'/'+key+'_SEGMENT_UTC_' + utc_date + '.xml',overwrite=True)   
        with open(SEGMENT_DIR +key+'/'+year+'/'+key+'_SEGMENT_UTC_' + utc_date + '.txt', mode='w') as f:
            for seg in tmp.active :
                f.write('{0} {1}\n'.format(int(seg[0]), int(seg[1])))
        os.remove(filepath_xml[key])
示例#26
0
import subprocess
import argparse
from gwpy.segments import DataQualityFlag
from gwpy.segments import SegmentList
from gwpy.segments import Segment

parser = argparse.ArgumentParser(description='Get locked segment including the given gps time.')
parser.add_argument('-t','--time',help='input gpstime.',required=True)
parser.add_argument('-d','--date',help='input date',required=True)

args = parser.parse_args()
time = int(args.time)
date = args.date

segmentfile="/users/DET/Segments/SegmentList_FPMI_UTC_"+date+".xml"

Locked = DataQualityFlag.read(segmentfile)

#triggerseg = DataQualityFlag(known=[time-30,time+30], active=[time-1,time+1])
triggerseg=SegmentList([Segment(time-1,time+1)])
IsLocked = Locked.active.intersects(triggerseg)

if IsLocked:
    for seg in Locked.active:
        if seg[0] < time and time < seg[1]:
            print(str(seg[0]) + " " + str(seg[1]))
            break

else:
    print("Not locked.")
示例#27
0
def mkSegment(gst, get, utc_date, txt=True) :

    chGRDLSC = 'K1:GRD-LSC_LOCK_STATE_N'
    chGRDIFO = 'K1:GRD-IFO_STATE_N'
    chGRDEQ = 'K1:GRD-PEM_EARTHQUAKE_STATE_N'
    chOMCADC    = 'K1:FEC-32_ADC_OVERFLOW_0_0'

    channels = [chGRDLSC,chGRDIFO,chGRDEQ,chOMCADC]
    
    if getpass.getuser() == "controls":
        gwf_cache = '/users/DET/Cache/latest.cache'
        with open(gwf_cache, 'r') as fobj:
            cache = Cache.fromfile(fobj)
    else:
        # add 1sec margin for locked segments contract.
        cache = GetFilelist(gst-1, get+1)

    #------------------------------------------------------------

    #print('Reading {0} timeseries data...'.format(date))
    # add 1sec margin for locked segments contract.
    channeldata = TimeSeriesDict.read(cache, channels, start=gst-1, end=get+1, format='gwf.lalframe', gap='pad')
    channeldataGRDIFO = channeldata[chGRDIFO]
    channeldataGRDLSC = channeldata[chGRDLSC]
    channeldataGRDEQ = channeldata[chGRDEQ]
    channeldataOMCADC = channeldata[chOMCADC]

    sv={}
    sv['K1-GRD_SCIENCE_MODE'] = channeldataGRDIFO == 1000 
    # Locked will be defined by inverse of unlocked segments for technical reason.    
    #sv['K1-GRD_LOCKED'] = channeldataGRDLSC == 1000 
    sv['K1-GRD_UNLOCKED'] = channeldataGRDLSC != 1000
    sv['K1-GRD_PEM_EARTHQUAKE'] = channeldataGRDEQ == 1000
    sv['K1-OMC_OVERFLOW_VETO'] = channeldataOMCADC != 0
    # OMC_OVERFLOW_OK will be defined by inverse of veto segments for technical reason.
    #sv['K1-OMC_OVERFLOW_OK'] = channeldataOMCADC == 0


    dqflag = {}
    for key in keys:
        if key == 'K1-GRD_LOCKED' or key == 'K1-OMC_OVERFLOW_OK':
            continue
        dqflag[key] = sv[key].to_dqflag(round=True)

    # To omit fraction. round=True option is inclusive in default.         

    dqflag['K1-GRD_SCIENCE_MODE'].active = dqflag['K1-GRD_SCIENCE_MODE'].active.contract(1.0)

    dqflag['K1-GRD_LOCKED'] = ~dqflag['K1-GRD_UNLOCKED']
    dqflag['K1-GRD_LOCKED'].name = "K1:GRD-LSC_LOCK_STATE_N == 1000"

    dqflag['K1-OMC_OVERFLOW_OK'] = ~dqflag['K1-OMC_OVERFLOW_VETO']
    dqflag['K1-OMC_OVERFLOW_OK'].name = "K1:FEC-32_ADC_OVERFLOW_0_0 == 0"
    
    dqflag['K1-GRD_SCIENCE_MODE'].description = "Observation mode. K1:GRD-IFO_STATE_N == 1000"
    dqflag['K1-GRD_UNLOCKED'].description = "Interferometer is not locked. K1:GRD-LSC_LOCK_STATE_N != 1000"
    dqflag['K1-GRD_LOCKED'].description = "Interferometer is locked. K1:GRD-LSC_LOCK_STATE_N == 1000"
    dqflag['K1-OMC_OVERFLOW_VETO'].description = "OMC overflow happened. K1:FEC-32_ADC_OVERFLOW_0_0 != 0"
    dqflag['K1-OMC_OVERFLOW_OK'].description = "OMC overflow does not happened. K1:FEC-32_ADC_OVERFLOW_0_0 == 0"

    for key in keys:

        # added 1sec margin for locked segments contract is removed.
        margin = DataQualityFlag(known=[(gst,get)],active=[(gst-1,gst),(get,get+1)])
        dqflag[key] -= margin

        # write down 15 min segments. 
        if txt:
            with open(filepath_txt[key], mode='w') as f:
                for seg in dqflag[key].active :
                    f.write('{0} {1}\n'.format(int(seg[0]), int(seg[1])))
        
        # if accumulated file exists, it is added. 
        if os.path.exists(filepath_xml[key]):
            tmp = DataQualityFlag.read(filepath_xml[key])        
            dqflag[key] = dqflag[key] + tmp

        dqflag[key].write(filepath_xml[key],overwrite=True)
示例#28
0
def mkSegment(gst, get, utc_date, txt=True):

    for key in keys:
        sources = GetFilelist(gst, get, key)

        first = True
        for source in sources:
            events = EventTable.read(
                source,
                tablename='sngl_burst',
                columns=['start_time', 'start_time_ns', 'duration', 'snr'])
            #events = EventTable.read(source, tablename='sngl_burst',columns=['peak_time', 'peak_time_ns','start_time', 'start_time_ns', 'duration', 'peak_frequency', 'central_freq', 'bandwidth', 'channel', 'amplitude', 'snr', 'confidence', 'chisq', 'chisq_dof', 'param_one_name', 'param_one_value'])
            col = events.get_column('start_time')
            if first:
                if len(col) > 0:
                    mergedevents = events
                    first = False
                else:
                    pass
            else:
                mergedevents = vstack([mergedevents, events])

        for snr in snrs[key]:
            Triggered = DataQualityFlag(name="K1:" + key,
                                        known=[(gst, get)],
                                        active=[],
                                        label="Glitch",
                                        description="Glitch veto segment K1:" +
                                        key + " >= SNR" + str(snr))
            #Triggered.ifo = "K1"

            if not first:

                fevents = mergedevents.filter(
                    ('snr', mylib.Islargerequal, snr))
                durations = fevents.get_column('duration')
                start_times = fevents.get_column('start_time')
                for start_time, duration in zip(start_times, durations):
                    tmpstart = int(start_time)
                    #tmpend = start_time + duration
                    tmpend = int(start_time + 1)
                    tmpsegment = Segment(tmpstart, tmpend)

                    tmpTriggered = DataQualityFlag(known=[(gst, get)],
                                                   active=[(tmpstart, tmpend)])
                    Triggered |= tmpTriggered

                    #dqflag['K1-GRD_SCIENCE_MODE'].description = "Observation mode. K1:GRD-IFO_STATE_N == 1000"
                    #dqflag['K1-GRD_LOCKED'].name = "K1:GRD-LSC_LOCK_STATE_N >= 300 & K1:GRD-LSC_LOCK_STATE_N <= 1000"

            # write down 15 min segments.
            if txt:
                with open(filepath_txt[key + str(snr)], mode='w') as f:
                    for seg in Triggered.active:
                        f.write('{0} {1}\n'.format(int(seg[0]), int(seg[1])))

            # if accumulated file exists, it is added.
            if os.path.exists(filepath_xml[key + str(snr)]):
                tmp = DataQualityFlag.read(filepath_xml[key + str(snr)])
                Triggered = Triggered + tmp

            Triggered.write(filepath_xml[key + str(snr)], overwrite=True)
示例#29
0
文件: archive.py 项目: gwpy/gwsumm
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source

    This method reads all found data into the data containers defined by
    the `gwsumm.globalv` module, then returns nothing.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:

        # -- channels ---------------------------

        try:
            ctable = Table.read(h5file['channels'])
        except KeyError:  # no channels table written
            pass
        else:
            for row in ctable:
                chan = get_channel(row['name'])
                for p in ctable.colnames[1:]:
                    if row[p]:
                        setattr(chan, p, row[p])

        # -- timeseries -------------------------

        for dataset in h5file.get('timeseries', {}).values():
            ts = TimeSeries.read(dataset, format='hdf5')
            if (re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name) and
                    ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() != mode.Mode.day:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # -- statevector -- ---------------------

        for dataset in h5file.get('statevector', {}).values():
            sv = StateVector.read(dataset, format='hdf5')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # -- spectrogram ------------------------

        for tag, add_ in zip(
                ['spectrogram', 'coherence-components'],
                [add_spectrogram, add_coherence_component_spectrogram]):
            for key, dataset in h5file.get(tag, {}).items():
                key = key.rsplit(',', 1)[0]
                spec = Spectrogram.read(dataset, format='hdf5')
                spec.channel = get_channel(spec.channel)
                add_(spec, key=key)

        # -- segments ---------------------------

        for name, dataset in h5file.get('segments', {}).items():
            dqflag = DataQualityFlag.read(h5file, path=dataset.name,
                                          format='hdf5')
            globalv.SEGMENTS += {name: dqflag}

        # -- triggers ---------------------------

        for dataset in h5file.get('triggers', {}).values():
            load_table(dataset)
示例#30
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:
        # read all time-series data
        try:
            group = h5file['timeseries']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            ts = TimeSeries.read(dataset, format='hdf5')
            if (re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name)
                    and ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() != mode.Mode.day:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # read all state-vector data
        try:
            group = h5file['statevector']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            sv = StateVector.read(dataset, format='hdf5')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # read all spectrogram data
        for tag in ['spectrogram', 'coherence-components']:
            if tag == 'coherence-components':
                add_ = add_coherence_component_spectrogram
            else:
                add_ = add_spectrogram
            try:
                group = h5file[tag]
            except KeyError:
                group = dict()
            for key, dataset in group.iteritems():
                key = key.rsplit(',', 1)[0]
                spec = Spectrogram.read(dataset, format='hdf5')
                spec.channel = get_channel(spec.channel)
                add_(spec, key=key)

        # read all segments
        try:
            group = h5file['segments']
        except KeyError:
            group = dict()
        for name in group:
            dqflag = DataQualityFlag.read(group, path=name, format='hdf5')
            globalv.SEGMENTS += {name: dqflag}

        # read all triggers
        try:
            group = h5file['triggers']
        except KeyError:
            group = dict()
        for key in group:
            load_table(group[key])
示例#31
0
import fir
from utils import chunk_segments

parser = argparse.ArgumentParser(description='Generate target features for machine learning on LIGO data.')
parser.add_argument('--ifo',type=str,required=True)
parser.add_argument('-f','--segment-file',type=str)
parser.add_argument('-s','--start-time',type=int)
parser.add_argument('-e','--end-time',type=int)
parser.add_argument('-p','--path', help='path to output directory', required=False)

args = parser.parse_args()

ifo=args.ifo

if args.segment_file:
    sci_segs=DataQualityFlag.read(args.segment_file, path='%s:DMT-ANALYSIS_READY:1' % ifo)
    assert sci_segs.ifo == ifo
    segs=sci_segs.active
elif args.start_time and args.end_time:
    segs=[Segment(args.start_time, args.end_time)]
else:
    print "Either --segment-file, or both start and end time must be provided."
    exit(2)

st=segs[0].start
et=segs[-1].end

chunk=4096
pad=256

target_chan=ifo+':GDS-CALIB_STRAIN'
示例#32
0
def get_segments(flags,
                 segments,
                 cache=None,
                 url='https://segdb-er.ligo.caltech.edu',
                 **kwargs):
    """Fetch some segments from the segment database

    Parameters
    ----------
    flags : `str`, `list`
        one of more flags for which to query
    segments : `~gwpy.segments.DataQualityFlag`, `~gwpy.segments.SegmentList`
        span over which to query for flag segments
    cache : `~glue.lal.Cache`, optional
        cache of files to use as data source
    url : `str`
        URL of segment database, if ``cache`` is not given
    **kwargs
        other keyword arguments to pass to either
        `~gwpy.segments.DataQualityFlag.read` (if ``cache`` is given) or
        `~gwpy.segments.DataQualityFlag.query` (otherwise)

    Returns
    -------
    segments : `~gwpy.segments.DataQualityFlag`,
               `~gwpy.segments.DataQualityDict`
        a single `~gwpy.segments.DataQualityFlag` (if ``flags`` is given
        as a `str`), or a `~gwpy.segments.DataQualityDict` (if ``flags``
        is given as a `list`)
    """
    # format segments
    if isinstance(segments, DataQualityFlag):
        segments = segments.active
    elif isinstance(segments, tuple):
        segments = [Segment(to_gps(segments[0]), to_gps(segments[1]))]
    segments = SegmentList(segments)

    # get format for files
    if cache is not None and not isinstance(cache, Cache):
        kwargs.setdefault(
            'format',
            _get_valid_format('read', DataQualityFlag, None, None,
                              (cache[0], ), {}))

    # populate an existing set of flags
    if isinstance(flags, (DataQualityFlag, DataQualityDict)):
        return flags.populate(source=cache or url, segments=segments, **kwargs)
    # query one flag
    elif cache is None and isinstance(flags, str):
        return DataQualityFlag.query(flags, segments, url=url, **kwargs)
    # query lots of flags
    elif cache is None:
        return DataQualityDict.query(flags, segments, url=url, **kwargs)
    # read one flag
    elif flags is None or isinstance(flags, str):
        segs = DataQualityFlag.read(cache, flags, coalesce=False, **kwargs)
        if segs.known:
            segs.known &= segments
        else:
            segs.known = segments
        segs.active &= segments
        return segs
    # read lots of flags
    else:
        segs = DataQualityDict.read(cache, flags, coalesce=True, **kwargs)
        for name, flag in segs.items():
            flag.known &= segments
            flag.active &= segments
        return segs
示例#33
0
def get_segments(flag, validity=None, config=ConfigParser(), cache=None,
                 query=True, return_=True, coalesce=True, padding=None,
                 segdb_error='raise', url=None):
    """Retrieve the segments for a given flag

    Segments will be loaded from global memory if already defined,
    otherwise they will be loaded from the given
    :class:`~glue.lal.Cache`, or finally from the segment database

    Parameters
    ----------
    FIXME

    Returns
    -------
    FIXME
    """
    if isinstance(flag, (unicode, str)):
        flags = flag.split(',')
    else:
        flags = flag
    allflags = set([f for cf in flags for f in
                    re_flagdiv.split(str(cf))[::2] if f])

    if padding is None and isinstance(flag, DataQualityFlag):
        padding = {flag: flag.padding}
    elif padding is None:
        padding = dict((flag, isinstance(flag, DataQualityFlag) and
                              flag.padding or None) for flag in flags)

    # check validity
    if validity is None:
        start = config.get(DEAFULTSECT, 'gps-start-time')
        end = config.get(DEFAULTSECT, 'gps-end-time')
        span = SegmentList([Segment(start, end)])
    elif isinstance(validity, DataQualityFlag):
        validity = validity.active
        try:
            span = SegmentList([validity.extent()])
        except ValueError:
            span = SegmentList()
    else:
        try:
            span = SegmentList([SegmentList(validity).extent()])
        except ValueError:
            span = SegmentList()
    validity = SegmentList(validity)

    # generate output object
    out = DataQualityDict()
    for f in flags:
        out[f] = DataQualityFlag(f, known=validity, active=validity)
    for f in allflags:
        globalv.SEGMENTS.setdefault(f, DataQualityFlag(f))

    # read segments from global memory and get the union of needed times
    try:
        old = reduce(operator.and_, (globalv.SEGMENTS.get(
                                        f, DataQualityFlag(f)).known
                                    for f in flags))
    except TypeError:
        old = SegmentList()
    newsegs = validity - old
    # load new segments
    query &= abs(newsegs) != 0
    query &= len(allflags) > 0
    if cache is not None:
        query &= len(cache) != 0
    if query:
        if cache is not None:
            try:
                new = DataQualityDict.read(cache, list(allflags))
            except IORegistryError as e:
                # can remove when astropy >= 1.2 is required
                if type(e) is not IORegistryError:
                    raise
                if len(allflags) == 1:
                    f = list(allflags)[0]
                    new = DataQualityDict()
                    new[f] = DataQualityFlag.read(cache, f, coalesce=False)
            for f in new:
                new[f].known &= newsegs
                new[f].active &= newsegs
                if coalesce:
                    new[f].coalesce()
                vprint("    Read %d segments for %s (%.2f%% coverage).\n"
                       % (len(new[f].active), f,
                          float(abs(new[f].known))/float(abs(newsegs))*100))
        else:
            if len(newsegs) >= 10:
                qsegs = span
            else:
                qsegs = newsegs
            # parse configuration for query
            kwargs = {}
            if url is not None:
                kwargs['url'] = url
            else:
                try:
                    kwargs['url'] = config.get('segment-database', 'url')
                except (NoSectionError, NoOptionError):
                    pass
            if kwargs.get('url', None) in SEGDB_URLS:
                query_func = DataQualityDict.query_segdb
            else:
                query_func = DataQualityDict.query_dqsegdb
            try:
                new = query_func(allflags, qsegs, on_error=segdb_error,
                                 **kwargs)
            except Exception as e:
                # ignore error from SegDB
                if segdb_error in ['ignore', None]:
                    pass
                # convert to warning
                elif segdb_error in ['warn']:
                    print('%sWARNING: %sCaught %s: %s [gwsumm.segments]'
                          % (WARNC, ENDC, type(e).__name__, str(e)),
                          file=sys.stderr)
                    warnings.warn('%s: %s' % (type(e).__name__, str(e)))
                # otherwise raise as normal
                else:
                    raise
                new = DataQualityDict()
            for f in new:
                new[f].known &= newsegs
                new[f].active &= newsegs
                if coalesce:
                    new[f].coalesce()
                vprint("    Downloaded %d segments for %s (%.2f%% coverage).\n"
                       % (len(new[f].active), f,
                          float(abs(new[f].known))/float(abs(newsegs))*100))
        # record new segments
        globalv.SEGMENTS += new
        for f in new:
            globalv.SEGMENTS[f].description = str(new[f].description)

    # return what was asked for
    if return_:
        for compound in flags:
            union, intersection, exclude, notequal = split_compound_flag(
                compound)
            if len(union + intersection) == 1:
                out[compound].description = globalv.SEGMENTS[f].description
                out[compound].padding = padding.get(f, (0, 0))
            for flist, op in zip([exclude, intersection, union, notequal],
                                 [operator.sub, operator.and_, operator.or_,
                                  not_equal]):
                for f in flist:
                    pad = padding.get(f, (0, 0))
                    segs = globalv.SEGMENTS[f].copy()
                    if isinstance(pad, (float, int)):
                        segs = segs.pad(pad, pad)
                    elif pad is not None:
                        segs = segs.pad(*pad)
                    if coalesce:
                        segs = segs.coalesce()
                    out[compound] = op(out[compound], segs)
                    out[compound].known &= segs.known
                    out[compound].active &= segs.known
            out[compound].known &= validity
            out[compound].active &= validity
            if coalesce:
                out[compound].coalesce()
        if isinstance(flag, basestring):
            return out[flag]
        else:
            return out