예제 #1
0
def downtime(vetosegs, segments, channel):
  myflag = DataQualityFlag()
  myflag.active = vetosegs
  myflag.known = segments
  plot = myflag.plot()
  plot.set_title(r'Active and vetoed segments for %s' %channel)
  plot.savefig(r'%s_downtime.png'%channel)
예제 #2
0
파일: core.py 프로젝트: duncanmmacleod/vet
def evaluate_flag(flag, triggers=None, metrics=['deadtime'], injections=None,
                  minduration=0, vetotag='', channel=None, etg=None):
    """Evaluate the performance of a set of a `~gwpy.segments.DataQualityFlag`

    Parameters
    ----------
    flag : `~gwpy.segments.DataQualityFlag`
        the data-quality flag to be tested
    triggers : `~glue.ligolw.table.Table`, optional
        the set of analysis event triggers against which to test
    metrics : `list`, optional
        the list of `Metrics <~gwvet.Metric>`
    injections : `~glue.ligolw.table.Table`, `~gwpy.segments.SegmentList`, optional
        a list of injections, or injection segments, against which to test
        flag safety
    minduration : `float`, optional
        the minimum duration of post-veto segments, if applicable, default: 0

    Returns
    -------
    results, after : `OrderedDict`, `~glue.ligolw.table.Table`
        the results of each metric test, and the triggers after vetoes have
        been applied (or `None` if not given)
    """
    # format as flag
    if not isinstance(flag, DataQualityFlag):
        flag = DataQualityFlag(active=flag)
    else:
        flag = flag.copy()
    # get inverse of veto segments
    if minduration:
        post = type(flag.known)([s for s in (flag.known - flag.active)
                             if float(abs(s)) >= minduration])
        flag.active = flag.known - post

    # apply vetoes to triggers
    triggers.etg = etg
    if triggers is not None:
        after = veto(triggers, flag, tag=vetotag, channel=channel, etg=etg)
    else:
        after = None

    # test each metric
    out = OrderedDict()
    for metric in metrics:
        if isinstance(metric, Metric):
            _metric = metric
        else:
            _metric = get_metric(metric)
        if _metric.name.lower() == 'safety':
            out[metric] = _metric(flag, injections)
        elif _metric.needs_triggers:
            out[metric] = _metric(flag, triggers, after=after)
        else:
            out[metric] = _metric(flag)

    return out, after
예제 #3
0
 def test_deprecated(self):
     with pytest.warns(DeprecationWarning):
         flag = DataQualityFlag(FLAG1, active=ACTIVE, valid=KNOWN)
     with pytest.warns(DeprecationWarning):
         flag.valid
     with pytest.warns(DeprecationWarning):
         flag.valid = flag.known
     with pytest.warns(DeprecationWarning):
         del flag.valid
예제 #4
0
 def test_coalesce(self):
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     self.assertFalse(flag.regular,
                      'flag.regular test failed (should be False)')
     flag.coalesce()
     self.assertTrue(flag.known == KNOWN, 'flag.known changed by coalesce')
     self.assertTrue(flag.active == KNOWNACTIVE,
                     'flag.active misset by coalesce')
     self.assertTrue(flag.regular,
                     'flag.regular test failed (should be True)')
예제 #5
0
 def test_pad(self):
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     # test without arguments (and no padding)
     padded = flag.pad()
     self.assertListEqual(padded.known, flag.known)
     self.assertListEqual(padded.active, flag.active)
     # test without arguments (and no padding)
     flag.padding = PADDING
     padded = flag.pad()
     self.assertListEqual(padded.known, KNOWNPAD)
     self.assertListEqual(padded.active, ACTIVEPAD)
     # test with arguments
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     padded = flag.pad(*PADDING)
     self.assertListEqual(padded.known, KNOWNPAD)
     self.assertListEqual(padded.active, ACTIVEPAD)
     # test coalesce
     padded.coalesce()
     self.assertListEqual(padded.active, ACTIVEPADC)
     # test in-place
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     padded = flag.pad(*PADDING)
     self.assertIsNot(flag, padded)
     padded = flag.pad(*PADDING, inplace=True)
     self.assertIs(flag, padded)
     # test other kwargs fail
     self.assertRaises(TypeError, flag.pad, *PADDING, kwarg='test')
예제 #6
0
 def test_write_hdf5(self, delete=True):
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     hdfout = self.tmpfile % 'hdf'
     try:
         flag.write(hdfout)
     except ImportError as e:
         self.skipTest(str(e))
     else:
         if delete:
             os.remove(hdfout)
     return hdfout
예제 #7
0
def summary_stats(statistics, bbh_trigs, omicron_trigs, channel, vetosegs, segments):
  # Metrics being considered
  eff = get_metric('efficiency')
  dt = get_metric('deadtime')
  eff_over_dt = get_metric('efficiency/deadtime')
  usep = get_metric('my use percentage')
  loudbysnr = get_metric('loudest event by snr')
  get_percentile= get_metric('percentile')
  myflag = DataQualityFlag()
  myflag.active = vetosegs
  myflag.known = segments
  statistics[i] = (channel, eff(myflag, bbh_trigs).value, dt(myflag).value,\
      eff_over_dt(myflag, bbh_trigs).value, usep(myflag, omic_trigs).value,\
      loudbysnr(myflag, bbh_trigs).value)
예제 #8
0
def check_flag(flag, gpstime, duration, pad):
    """Check that a state flag is active during an entire analysis segment

    Parameters
    ----------
    flag : `str`
        state flag to check

    gpstime : `float`
        GPS time of required data

    duration : `float`
        duration (in seconds) of required data

    pad : `float`
        amount of extra data to read in at the start and end for filtering

    Returns
    -------
    check : `bool`
        Boolean switch to pass (`True`) or fail (`False`) depending on whether
        the given flag is active
    """
    # set GPS start and end time
    start = gpstime - duration/2. - pad
    end = gpstime + duration/2. + pad
    seg = Segment(start, end)
    # query for state segments
    active = DataQualityFlag.query(flag, start, end,
                                   url=DEFAULT_SEGMENT_SERVER).active
    # check that state flag is active during the entire analysis
    if (not active.intersects_segment(seg)) or (abs(active[0]) < abs(seg)):
        return False
    return True
예제 #9
0
 def test_read_ligolw(self):
     flag = DataQualityFlag.read(SEGXML, FLAG1, coalesce=False)
     self.assertTrue(flag.active == ACTIVE,
                     'DataQualityFlag.read(ligol) mismatch:\n\n%s\n\n%s'
                     % (ACTIVE, flag.active))
     self.assertTrue(flag.known == KNOWN,
                     'DataQualityFlag.read(ligol) mismatch:\n\n%s\n\n%s'
                     % (KNOWN, flag.known))
예제 #10
0
def query_state_segments(flag, start, end, url=DEFAULT_SEGMENT_SERVER,
                         pad=(0, 0)):
    """Query a segment database for active segments associated with a flag
    """
    # NOTE: DQF.pad pads forward in time at end
    return DataQualityFlag.query(
        flag, start-pad[0], end+pad[1], url=url,
    ).coalesce().pad(pad[0], -pad[1]).active
예제 #11
0
 def test_query_dqsegdb(self):
     try:
         flag = DataQualityFlag.query_dqsegdb(
             QUERY_FLAG, QUERY_START, QUERY_END, url=QUERY_URL)
     except (ImportError, URLError) as e:
         self.skipTest(str(e))
     else:
         self.assertEqual(flag.known, QUERY_KNOWN)
         self.assertEqual(flag.active, QUERY_ACTIVE)
예제 #12
0
 def test_query_segdb(self):
     flag = QUERY_FLAGS[0]
     try:
         result = DataQualityFlag.query_segdb(flag, QUERY_START, QUERY_END,
                                              url=QUERY_URL_SEGDB)
     except (SystemExit, LDBDClientException) as e:
         self.skipTest(str(e))
     self.assertEqual(result.known, QUERY_RESULT[flag].known)
     self.assertEqual(result.active, QUERY_RESULT[flag].active)
예제 #13
0
 def test_query_dqsegdb_multi(self):
     querymid = int(QUERY_START + (QUERY_END - QUERY_START) /2.)
     segs = SegmentList([Segment(QUERY_START, querymid),
                         Segment(querymid, QUERY_END)])
     try:
         flag = DataQualityFlag.query_dqsegdb(
             QUERY_FLAG, segs, url=QUERY_URL)
     except (ImportError, URLError) as e:
         self.skipTest(str(e))
     else:
         self.assertEqual(flag.known, QUERY_KNOWN)
         self.assertEqual(flag.active, QUERY_ACTIVE)
예제 #14
0
파일: core.py 프로젝트: berkowitze/gwsumm
 def _read_segments(self, filename):
     segs = DataQualityFlag.read(filename, self.definition)
     # XXX HACK around malformed segment files with no segment_summary table
     if segs.active and not segs.known:
         segs.known = type(segs.active)(segs.active)
     if self.known:
         self.known = self.known & segs.known
         self.active = self.known & segs.active
     else:
         self.known = segs.known
         self.active = segs.active
     return self
예제 #15
0
 def test_read_hdf5(self):
     try:
         hdfout = self.test_write_hdf5(delete=False)
     except ImportError as e:
         self.skipTest(str(e))
     else:
         flag = DataQualityFlag.read(hdfout)
         os.remove(hdfout)
         self.assertTrue(flag.active == ACTIVE,
                         'DataQualityFlag.read(hdf5) mismatch:\n\n%s\n\n%s'
                         % (ACTIVE, flag.active))
         self.assertTrue(flag.known == KNOWN,
                         'DataQualityFlag.read(hdf5) mismatch:\n\n%s\n\n%s'
                         % (KNOWN, flag.known))
예제 #16
0
파일: ihope.py 프로젝트: berkowitze/gwsumm
    def process(self, *args, **kwargs):
        # read the segment files
        if os.path.isfile(self.segmentfile):
            segs = DataQualityFlag.read(self.segmentfile, coalesce=False)
            self.states[0].known = segs.known
            self.states[0].active = segs.active
            self.states[0].ready = True
        else:
            warn('Segment file %s not found.' % self.segmentfile)
            return
        if len(self.states[0].active) == 0:
            warn('No segments analysed by daily ahope.')
            return
        # read the cache files
        if os.path.isfile(self.inspiralcachefile):
            with open(self.inspiralcachefile, 'r') as fobj:
                try:
                    self.inspiralcache = Cache.fromfile(fobj).sieve(
                                             segment=self.span)
                except ValueError as e:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.inspiralcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.inspiralcachefile)
            return
        if os.path.isfile(self.tmpltbankcachefile):
            with open(self.tmpltbankcachefile, 'r') as fobj:
                try:
                    self.tmpltbankcache = Cache.fromfile(fobj).sieve(
                                              segment=self.span)
                except ValueError:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.tmpltbankcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.tmpltbankcachefile)
            self.tmpltbankcache = Cache()

        # only process if the cachfile was found
        super(DailyAhopeTab, self).process(*args, **kwargs)
예제 #17
0
 def test_pad(self):
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     # test without arguments (and no padding)
     padded = flag.pad()
     self.assertListEqual(padded.known, flag.known)
     self.assertListEqual(padded.active, flag.active)
     # test without arguments (and no padding)
     flag.padding = PADDING
     padded = flag.pad()
     self.assertListEqual(padded.known, KNOWNPAD)
     self.assertListEqual(padded.active, ACTIVEPAD)
     # test with arguments
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     padded = flag.pad(*PADDING)
     self.assertListEqual(padded.known, KNOWNPAD)
     self.assertListEqual(padded.active, ACTIVEPAD)
     # test coalesce
     padded.coalesce()
     self.assertListEqual(padded.active, ACTIVEPADC)
예제 #18
0
 def test_pad(self):
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     # test without arguments (and no padding)
     padded = flag.pad()
     self.assertListEqual(padded.known, flag.known)
     self.assertListEqual(padded.active, flag.active)
     # test without arguments (and no padding)
     flag.padding = PADDING
     padded = flag.pad()
     self.assertListEqual(padded.known, KNOWNPAD)
     self.assertListEqual(padded.active, ACTIVEPAD)
     # test with arguments
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     padded = flag.pad(*PADDING)
     self.assertListEqual(padded.known, KNOWNPAD)
     self.assertListEqual(padded.active, ACTIVEPAD)
     # test coalesce
     padded.coalesce()
     self.assertListEqual(padded.active, ACTIVEPADC)
     # test in-place
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     padded = flag.pad(*PADDING)
     self.assertIsNot(flag, padded)
     padded = flag.pad(*PADDING, inplace=True)
     self.assertIs(flag, padded)
     # test other kwargs fail
     self.assertRaises(TypeError, flag.pad, *PADDING, kwarg='test')
예제 #19
0
def query(flag, start, end, url=DEFAULT_SEGMENT_SERVER):
    """Query a segment database for active segments associated with a flag
    """
    return DataQualityFlag.query(flag, start, end, url=url)
예제 #20
0
 def flag():
     known = SegmentList([Segment(0, 3), Segment(6, 7)])
     active = SegmentList([Segment(1, 2), Segment(3, 4), Segment(5, 7)])
     return DataQualityFlag(name='Test segments',
                            known=known,
                            active=active)
예제 #21
0
import shutil
from unittest.mock import patch

from gwpy.segments import DataQualityFlag

from matplotlib import (use, rcParams)
use('agg')

from .. import plot

__author__ = 'Alex Urban <*****@*****.**>'

# global test objects

FLAG = DataQualityFlag(known=[(0, 66)],
                       active=[(16, 42)],
                       name='X1:TEST-FLAG:1')

# -- test utilities -----------------------------------------------------------


def test_texify():
    name = 'X1:TEST-CHANNEL_NAME'

    # test with LaTeX
    with patch.dict(rcParams, {'text.usetex': True}):
        assert plot.texify(name) == name.replace('_', r'\_')

    # test without LaTeX
    with patch.dict(rcParams, {'text.usetex': False}):
        assert plot.texify(name) == name
예제 #22
0
 def test_repr_str(self):
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     repr(flag)
     str(flag)
예제 #23
0
def get_segments(flag, validity=None, config=ConfigParser(), cache=None,
                 query=True, return_=True, coalesce=True, padding=None,
                 segdb_error='raise', url=None):
    """Retrieve the segments for a given flag

    Segments will be loaded from global memory if already defined,
    otherwise they will be loaded from the given
    :class:`~glue.lal.Cache`, or finally from the segment database

    Parameters
    ----------
    FIXME

    Returns
    -------
    FIXME
    """
    if isinstance(flag, (unicode, str)):
        flags = flag.split(',')
    else:
        flags = flag
    allflags = set([f for cf in flags for f in
                    re_flagdiv.split(str(cf))[::2] if f])

    if padding is None and isinstance(flag, DataQualityFlag):
        padding = {flag: flag.padding}
    elif padding is None:
        padding = dict((flag, isinstance(flag, DataQualityFlag) and
                              flag.padding or None) for flag in flags)

    # check validity
    if validity is None:
        start = config.get(DEAFULTSECT, 'gps-start-time')
        end = config.get(DEFAULTSECT, 'gps-end-time')
        span = SegmentList([Segment(start, end)])
    elif isinstance(validity, DataQualityFlag):
        validity = validity.active
        try:
            span = SegmentList([validity.extent()])
        except ValueError:
            span = SegmentList()
    else:
        try:
            span = SegmentList([SegmentList(validity).extent()])
        except ValueError:
            span = SegmentList()
    validity = SegmentList(validity)

    # generate output object
    out = DataQualityDict()
    for f in flags:
        out[f] = DataQualityFlag(f, known=validity, active=validity)
    for f in allflags:
        globalv.SEGMENTS.setdefault(f, DataQualityFlag(f))

    # read segments from global memory and get the union of needed times
    try:
        old = reduce(operator.and_, (globalv.SEGMENTS.get(
                                        f, DataQualityFlag(f)).known
                                    for f in flags))
    except TypeError:
        old = SegmentList()
    newsegs = validity - old
    # load new segments
    query &= abs(newsegs) != 0
    query &= len(allflags) > 0
    if cache is not None:
        query &= len(cache) != 0
    if query:
        if cache is not None:
            try:
                new = DataQualityDict.read(cache, list(allflags))
            except IORegistryError as e:
                # can remove when astropy >= 1.2 is required
                if type(e) is not IORegistryError:
                    raise
                if len(allflags) == 1:
                    f = list(allflags)[0]
                    new = DataQualityDict()
                    new[f] = DataQualityFlag.read(cache, f, coalesce=False)
            for f in new:
                new[f].known &= newsegs
                new[f].active &= newsegs
                if coalesce:
                    new[f].coalesce()
                vprint("    Read %d segments for %s (%.2f%% coverage).\n"
                       % (len(new[f].active), f,
                          float(abs(new[f].known))/float(abs(newsegs))*100))
        else:
            if len(newsegs) >= 10:
                qsegs = span
            else:
                qsegs = newsegs
            # parse configuration for query
            kwargs = {}
            if url is not None:
                kwargs['url'] = url
            else:
                try:
                    kwargs['url'] = config.get('segment-database', 'url')
                except (NoSectionError, NoOptionError):
                    pass
            if kwargs.get('url', None) in SEGDB_URLS:
                query_func = DataQualityDict.query_segdb
            else:
                query_func = DataQualityDict.query_dqsegdb
            try:
                new = query_func(allflags, qsegs, on_error=segdb_error,
                                 **kwargs)
            except Exception as e:
                # ignore error from SegDB
                if segdb_error in ['ignore', None]:
                    pass
                # convert to warning
                elif segdb_error in ['warn']:
                    print('%sWARNING: %sCaught %s: %s [gwsumm.segments]'
                          % (WARNC, ENDC, type(e).__name__, str(e)),
                          file=sys.stderr)
                    warnings.warn('%s: %s' % (type(e).__name__, str(e)))
                # otherwise raise as normal
                else:
                    raise
                new = DataQualityDict()
            for f in new:
                new[f].known &= newsegs
                new[f].active &= newsegs
                if coalesce:
                    new[f].coalesce()
                vprint("    Downloaded %d segments for %s (%.2f%% coverage).\n"
                       % (len(new[f].active), f,
                          float(abs(new[f].known))/float(abs(newsegs))*100))
        # record new segments
        globalv.SEGMENTS += new
        for f in new:
            globalv.SEGMENTS[f].description = str(new[f].description)

    # return what was asked for
    if return_:
        for compound in flags:
            union, intersection, exclude, notequal = split_compound_flag(
                compound)
            if len(union + intersection) == 1:
                out[compound].description = globalv.SEGMENTS[f].description
                out[compound].padding = padding.get(f, (0, 0))
            for flist, op in zip([exclude, intersection, union, notequal],
                                 [operator.sub, operator.and_, operator.or_,
                                  not_equal]):
                for f in flist:
                    pad = padding.get(f, (0, 0))
                    segs = globalv.SEGMENTS[f].copy()
                    if isinstance(pad, (float, int)):
                        segs = segs.pad(pad, pad)
                    elif pad is not None:
                        segs = segs.pad(*pad)
                    if coalesce:
                        segs = segs.coalesce()
                    out[compound] = op(out[compound], segs)
                    out[compound].known &= segs.known
                    out[compound].active &= segs.known
            out[compound].known &= validity
            out[compound].active &= validity
            if coalesce:
                out[compound].coalesce()
        if isinstance(flag, basestring):
            return out[flag]
        else:
            return out
예제 #24
0
FLAG_HTML_WITH_PLOTS = FLAG_CONTENT.format(
    content='<pre># seg\tstart\tstop\tduration\n0\t0\t66\t66.0\n</pre>',
    plots='\n<div class="row scaffold">\n<div class="col-sm-12">\n<a '
          'href="plots/X1-TEST_FLAG-0-66.png" id="a_X1-TEST_FLAG_66" '
          'title="Known (small) and active (large) analysis segments for '
          'X1:TEST_FLAG" class="fancybox" target="_blank" data-caption='
          '"Known (small) and active (large) analysis segments for '
          'X1:TEST_FLAG" data-fancybox="gallery" data-fancybox-group="images"'
          '>\n<img id="img_X1-TEST_FLAG_66" alt="X1-TEST_FLAG-0-66.png" '
          'class="img-fluid w-100" src="plots/X1-TEST_FLAG-0-66.png" />\n</a>'
          '\n</div>\n</div>')

FLAG_HTML_NO_SEGMENTS = FLAG_CONTENT.format(
    content='<p>No segments were found.</p>', plots='')

FLAG = DataQualityFlag(known=[(0, 66)], active=[(0, 66)], name='X1:TEST_FLAG')

OMEGA_SCAFFOLD = """<div class="card card-x1">
<div class="card-header pb-0">
<h5 class="card-title"><a class="cis-link" href="https://cis.ligo.org/channel/byname/X1:STRAIN" title="CIS entry for X1:STRAIN" target="_blank">X1:STRAIN</a></h5>
</div>
<div class="card-body">
<ul class="list-group">
<li class="list-group-item">
<div class="container">
<div class="row">
<h6>
<a href="./1126259462" class="text-dark">1126259462</a>
</h6>
</div>
<div class="row scaffold">
예제 #25
0
 def test_read_segwizard(self):
     flag = DataQualityFlag.read(SEGWIZ, FLAG1, coalesce=False)
     self.assertTrue(flag.active == ACTIVE,
                     'DataQualityFlag.read(segwizard) mismatch:\n\n%s\n\n%s'
                     % (ACTIVE, flag.active))
     self.assertTrue(flag.known == flag.active)
예제 #26
0
 def test_plot(self):
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     plot = flag.plot()
     self.assertIsInstance(plot, SegmentPlot)
     self.assertIsInstance(plot.gca(), SegmentAxes)
     self.assertEqual(plot.gca().get_epoch(), flag.known[0][0])
예제 #27
0
args = parser.parse_args()

freq = args.freq
year = args.year
month = args.month
day = args.day

if len(month) < 2:
    month = "0" + month
if len(day) < 2:
    day = "0" + day

#=============Get locked segments=============

locked = DataQualityFlag.read("/home/detchar/Segments/K1-DET_FOR_GRB200415A/" +
                              year + "/K1-DET_FOR_GRB200415A_UTC_" + year +
                              "-" + month + "-" + day + ".xml")

# Remove segments shorter than 94 sec

act = SegmentList()
for seg in locked.active:
    duration = seg[1] - seg[0]
    if duration >= 94:
        act.append(seg)

# Remove last 30 sec and margin 2 sec
act = act.contract(17)
act = act.shift(-15)
locked.active = act
예제 #28
0
 def test_protract(self):
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     flag.protract(.1)
     self.assertListEqual(flag.active, ACTIVE_PROTRACTED)
예제 #29
0
def validate_segments(ifos, start, end, cp, trigger_time=None):
    """determine analysis ready segments during requested analysis time
    Parameters
    ----------
    ifos : `str`
        list of ifos used in X-Pipeline analysis

    start : `float`, :class:`~gwpy.time.LIGOTimeGPS`

    end : `float`, `~gwpy.time.LIGOTimeGPS`

    cp : `object` ConfigParser object

    trigger_time

    Returns
    -------
    DataQualityDict : ~gwpy.segments.DataQualityDict`
    """
    analysis_seg_files = []

    # If simulating noise skip all segment, veto
    # and network validation checks. The on source and off source is
    # simulated and therefore has no DQ issues.
    if cp.has_option('parameters', 'makeSimulatedNoise'):
        for ifo in ifos:
            print('Making simulated noise, creating temp segment file')
            f = open('segments_{0}.txt'.format(ifo), 'w')
            f.write('0 {0} {1} {2}'.format(start, end, end - start))
            f.close()
            analysis_seg_files.append('segments_{0}.txt'.format(ifo))
    else:
        for ifo in ifos:
            if cp.has_option(ifo, 'segment-list'):
                if not os.path.isfile(cp.get(ifo, 'segment-list')):
                    raise ValueError('Please uncomment the '
                                     'the segment file in ini file '
                                     'as it does not exist. '
                                     'If you want to use a '
                                     'supplied file please provide one.')
                else:
                    analysis_seg_files.append(cp.get(ifo, 'segment-list'))
            else:
                # Query for veto definer file
                vdf = query_veto_definer_file(ifo, start, end, cp)

                # Filter for cat1 vetos
                segs = filter_for_cat_type(vdf, ifo, [1])

                # ---- Write out cat1 veto to text file.
                filename_cat1 = "input/" + ifo + "-veto-cat1.txt"
                segs.write(filename_cat1)

                # Compute analysis ready segments in order to
                # subtract out cat1 vetos
                analysis_ready = DataQualityFlag.query(
                    '{0}:DMT-ANALYSIS_READY:1'.format(ifo), start, end)

                # Subtract cat 1 veto from analysis_ready
                analysis_ready_minus_cat1 = analysis_ready.active - segs
                # Save new segment list to file
                filename_analysis_ready_minus_cat1 = "input/" + ifo + "_science_cat1.txt"
                analysis_ready_minus_cat1.write(
                    filename_analysis_ready_minus_cat1)
                analysis_seg_files.append(filename_analysis_ready_minus_cat1)

    return analysis_seg_files
예제 #30
0
파일: make_dag.py 프로젝트: tjma12/ligo_ml
#! /usr/bin/env python
import sys

from gwpy.segments import DataQualityFlag


def add_job(the_file, job_type, job_number, **kwargs):
    job_id = "%s%.6u" % (job_type, job_number)
    the_file.write("JOB %s %s.sub\n" % (job_id, job_type))
    vars_line = " ".join(
        ['%s="%s"' % (arg, str(val)) for arg, val in kwargs.iteritems()])
    the_file.write("VARS %s %s\n" % (job_id, vars_line))
    the_file.write("\n")


if __name__ == "__main__":
    segment_file = sys.argv[1]
    sci_segs = DataQualityFlag.read(segment_file)

    ifo = sci_segs.ifo
    segs = sci_segs.active

    jobtypes = ['target', 'aux']

    fdag = open("my.dag", 'w')
    for idx, seg in enumerate(segs):
        for jobtype in jobtypes:
            add_job(fdag, jobtype, idx, ifo=ifo, st=seg.start, et=seg.end)
예제 #31
0
 def test_plot(self):
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     plot = flag.plot()
     self.assertIsInstance(plot, SegmentPlot)
     self.assertIsInstance(plot.gca(), SegmentAxes)
     self.assertEqual(plot.gca().get_epoch(), flag.known[0][0])
예제 #32
0
__author__ = 'Duncan Macleod <*****@*****.**>'

# .. currentmodule:: gwpy.segments
#
# Getting the segments
# --------------------
#
# First, we need to fetch the Open Data timeline segments from LOSC.
# To do that we can call the :meth:`DataQualityFlag.fetch_open_data` method
# using ``'H1_DATA'`` as the flag (for an explanation of what this means,
# read up on `The S6 Data Release <https://losc.ligo.org/S6/>`__).

from gwpy.segments import DataQualityFlag

h1segs = DataQualityFlag.fetch_open_data('H1_DATA', 'Sep 16 2010',
                                         'Sep 17 2010')

# For sanity, lets plot these segments:

splot = h1segs.plot(figsize=[12, 3])
splot.show()
splot.close()  # hide

# We see that the LIGO Hanford Observatory detector was operating for the
# majority of the day, with a few outages of ~30 minutes or so.

# We can use the :func:`abs` function to display the total amount of time
# spent taking data:

print(abs(h1segs.active))
예제 #33
0
 def test_protract(self):
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     flag.protract(.1)
     self.assertListEqual(flag.active, ACTIVE_PROTRACTED)
예제 #34
0
def main(args=None):
    """Run the online Guardian node visualization tool
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    # parse command line options
    ifo = args.ifo
    if not args.ifo:
        parser.error('--ifo must be given if not obvious from the host')
    start = getattr(args, 'gpsstart')
    end = getattr(args, 'gpsend')
    duration = int(ceil(end) - floor(start))
    categories = args.categories.split(',')
    for i, c in enumerate(categories):
        try:
            categories[i] = int(c)
        except (TypeError, ValueError):
            pass
    vetofile = getattr(args, 'veto-definer-file')
    vetofile = (urlparse(vetofile).netloc or os.path.abspath(vetofile))
    args.metric = args.metric or DEFAULT_METRICS

    # -- setup --------------------------------------

    tag = '%d-%d' % (start.seconds, end.seconds)
    outdir = os.path.abspath(os.path.join(args.output_directory, tag))
    mkdir(outdir)
    os.chdir(outdir)
    mkdir('etc', 'segments', 'condor')

    # -- segment handling ---------------------------

    os.chdir('segments')
    ALLSEGMENTS = DataQualityDict()

    # -- get analysis segments ----------------------

    aflags = args.analysis_segments
    asegments = DataQualityFlag('%s:VET-ANALYSIS_SEGMENTS:0' % ifo)
    for i, flag in enumerate(aflags):
        # use union of segments from a file
        if os.path.isfile(flag):
            asegments += DataQualityFlag.read(flag)
        # or intersection of segments from multiple flags
        else:
            new = DataQualityFlag.query(flag, start, end, url=args.segdb)
            if i:
                asegments.known &= new.known
                asegments.active &= new.active
            else:
                asegments.known = new.known
                asegments.active = new.active
    ALLSEGMENTS[asegments.name] = asegments

    if os.path.isfile(aflags[0]):
        asegments.filename = aflags

    # -- read veto definer and process --------------

    if urlparse(vetofile).netloc:
        tmp = urlopen(vetofile)
        vetofile = os.path.abspath(os.path.basename(vetofile))
        with open(vetofile, 'w') as f:
            f.write(tmp.read())
        LOGGER.info('Downloaded veto definer file')
    vdf = DataQualityDict.from_veto_definer_file(vetofile,
                                                 format='ligolw',
                                                 start=start,
                                                 end=end,
                                                 ifo=ifo)
    LOGGER.info('Read %d flags from veto definer' % len(vdf.keys()))

    # populate veto definer file from database
    vdf.populate(source=args.segdb, on_error=args.on_segdb_error)
    ALLSEGMENTS += vdf

    # organise flags into categories
    flags = dict((c, DataQualityDict()) for c in categories)
    for name, flag in vdf.items():
        try:
            flags[flag.category][name] = flag
        except KeyError:
            pass

    # find the states and segments for each category
    states, after, oldtitle = (dict(), None, '')
    for i, category in enumerate(categories):
        title = isinstance(category, int) and 'Cat %d' % category or category
        tag = re_cchar.sub('_', str(title).upper())
        states[category] = SummaryState(
            'After %s' % oldtitle,
            key=tag,
            known=after.known,
            active=after.active,
            definition=after.name,
        ) if i else SummaryState(
            args.analysis_name,
            key=args.analysis_name,
            definition=asegments.name,
        )
        try:
            segs = flags[category].union()
        except TypeError:  # no flags
            segs = DataQualityFlag()
        segs.name = '%s:VET-ANALYSIS_%s:0' % (ifo, tag)
        ALLSEGMENTS[segs.name] = segs
        after = (after - segs) if i else (asegments - segs)
        after.name = '%s:VET-ANALYSIS_AFTER_%s:0' % (ifo, tag)
        ALLSEGMENTS[after.name] = after
        oldtitle = title

    # write all segments to disk
    segfile = os.path.abspath('%s-VET_SEGMENTS-%d-%d.xml.gz' %
                              (ifo, start.seconds, duration))
    ALLSEGMENTS.write(segfile)

    os.chdir(os.pardir)

    if args.verbose:
        LOGGER.debug("All segments accessed and written to\n%s" % segfile)

    # -- job preparation ----------------------------

    os.chdir('etc')

    configs = []
    for category in categories:
        title = (isinstance(category, int) and 'Category %d' % category
                 or category)
        tab = 'tab-%s' % title
        config = ConfigParser()

        # add segment-database configuration
        add_config_section(config, 'segment-database', url=args.segdb)

        # add plot configurations
        pconfig = ConfigParser()
        pconfig.read(args.config_file)
        for section in pconfig.sections():
            if section.startswith('plot-'):
                config._sections[section] = pconfig._sections[section].copy()

        try:
            plots = pconfig.items('plots-%s' % category, raw=True)
        except NoSectionError:
            try:
                plots = pconfig.items('plots', raw=True)
            except NoSectionError:
                plots = []

        # add state
        if args.independent:
            state = states[categories[0]]
        else:
            state = states[category]
        sname = 'state-%s' % state.key
        add_config_section(config,
                           sname,
                           key=state.key,
                           name=state.name,
                           definition=state.definition,
                           filename=segfile)

        # add plugin
        add_config_section(config, 'plugins', **{'gwvet.tabs': ''})

        # define metrics
        if category == 1:
            metrics = ['Deadtime']
        else:
            metrics = args.metric

        # define summary tab
        if category == 1:
            tab = configure_veto_tab(config,
                                     title,
                                     title,
                                     state,
                                     flags[category].keys(),
                                     segfile,
                                     metrics,
                                     name='Summary',
                                     **{'veto-name': title})
        else:
            tab = configure_veto_tab(config,
                                     title,
                                     title,
                                     state,
                                     flags[category].keys(),
                                     segfile,
                                     metrics,
                                     name='Summary',
                                     **{
                                         'veto-name': title,
                                         'event-channel': args.event_channel,
                                         'event-generator':
                                         args.event_generator,
                                     })
        if len(categories) == 1:
            config.set(tab, 'index',
                       '%(gps-start-time)s-%(gps-end-time)s/index.html')
        for key, value in plots:
            if re.match('%\(flags\)s (?:plot-)?segments', value):  # noqa: W605
                config.set(tab, key, '%%(union)s,%s' % value)
                if '%s-labels' % key not in plots:
                    config.set(tab, '%s-labels' % key, 'Union,%(flags)s')
            else:
                config.set(tab, key, value)

        # now a tab for each flag
        for flag in flags[category]:
            if category == 1:
                tab = configure_veto_tab(config, flag, title, state, [flag],
                                         segfile, metrics)
            else:
                tab = configure_veto_tab(
                    config, flag, title, state, [flag], segfile, metrics, **{
                        'event-channel': args.event_channel,
                        'event-generator': args.event_generator
                    })
                if args.event_file:
                    config.set(tab, 'event-file', args.event_file)
            for key, value in plots:
                config.set(tab, key, value)

        if len(categories) > 1 and category != categories[-1]:
            with open('%s.ini' % re_cchar.sub('-', title.lower()), 'w') as f:
                config.write(f)
                configs.append(os.path.abspath(f.name))

    # configure summary job
    if len(categories) > 1:
        state = states[categories[0]]
        add_config_section(config,
                           'state-%s' % state.key,
                           key=state.key,
                           name=state.name,
                           definition=state.definition,
                           filename=segfile)
        try:
            plots = pconfig.items('plots', raw=True)
        except NoSectionError:
            plots = []
        flags = [f for c in categories for f in flags[c].keys()]
        tab = configure_veto_tab(
            config,
            'Impact of full veto definer file',
            None,
            state,
            flags,
            segfile,
            args.metric,
            shortname='Summary',
            index='%(gps-start-time)s-%(gps-end-time)s/index.html',
            **{
                'event-channel': args.event_channel,
                'event-generator': args.event_generator,
                'veto-name': 'All vetoes'
            })
        if args.event_file:
            config.set(tab, 'event-file', args.event_file)
        for key, value in plots:
            config.set(tab, key, value)
        with open('%s.ini' % re_cchar.sub('-', title.lower()), 'w') as f:
            config.write(f)
            configs.append(os.path.abspath(f.name))

    os.chdir(os.pardir)

    if args.verbose:
        LOGGER.debug("Generated configuration files for each category")

    # -- condor preparation -------------------------

    os.chdir(os.pardir)

    # get condor variables
    if getuser() == 'detchar':
        accgroup = 'ligo.prod.o1.detchar.dqproduct.gwpy'
    else:
        accgroup = 'ligo.dev.o1.detchar.dqproduct.gwpy'

    gwsumm_args = [
        '--gps-start-time',
        str(start.seconds),
        '--gps-end-time',
        str(end.seconds),
        '--ifo',
        ifo,
        '--file-tag',
        'gwpy-vet',
        '--condor-command',
        'accounting_group=%s' % accgroup,
        '--condor-command',
        'accounting_group_user=%s' % getuser(),
        '--on-segdb-error',
        args.on_segdb_error,
        '--output-dir',
        args.output_directory,
    ]
    for cf in args.global_config:
        gwsumm_args.extend(('--global-config', cf))
    for cf in configs:
        gwsumm_args.extend(('--config-file', cf))
    if args.verbose:
        gwsumm_args.append('--verbose')

    if args.verbose:
        LOGGER.debug('Generating summary DAG via:\n')
        LOGGER.debug(' '.join([batch.PROG] + gwsumm_args))

    # execute gwsumm in batch mode
    batch.main(args=gwsumm_args)
예제 #35
0
#NOTE TO SELF: If there were no active segments, so we still want to produce the DQ Flag? Or should we tell it to quit out?

known_start = [knownsegments[i, 0] for i in range(len(knownsegments))]
known_end = [knownsegments[i, 1] for i in range(len(knownsegments))]

# reading in trigger files
data = numpy.loadtxt('total_' + args.type_dq_flag + '_trigs.txt')

# get an array for the start_time and end_time of each segment
start_time = [data[i, 0] for i in range(len(data))]
end_time = [data[i, 1] for i in range(len(data))]

# create a data quality flag object
#zip will truncate the start and end time. is this OK?
flag = DataQualityFlag(flag_name,
                       active=zip(start_time, end_time),
                       known=zip(known_start, known_end))

# write flag
flag.write(name)

print "Created DQ Flag: " + flag_name + " in .xml form as: " + name
###################################################
##############CREATING VET .INI FILE###############
###################################################

config = ConfigParser.RawConfigParser()

config.add_section('plugins')
config.set('plugins', 'gwvet.tabs', ' ')
예제 #36
0

params = parse_command_line()

st_dir = int(str(params.st)[0:5])
et_dir = int(str(params.et)[0:5])
dirs = np.arange(st_dir, et_dir + 1)
pipeline_dict = coh_io.read_pipeline_ini(params.ini)
env_params, run_params = coh_io.check_ini_params(pipeline_dict)
segs = []
for directory in dirs:
    seg_files = sorted(
        glob.glob('%s/SEGMENTS/%d' %
                  (env_params['base_directory'], directory)))
    for f in seg_files:
        temps = DataQualityFlag.read(f)
        segs.append(temps)

plots = sorted(glob.glob('%s/*.png' % params.directory))
notches_page = 'webpage/notches.html'
params_page = 'webpage/params.html'
diagnostic_plots_page = 'webpage/diagnostic_plots.html'
index_page = 'webpage/index.html'
os.system('mkdir -p webpage')
os.system('touch %s' % notches_page)
os.system('touch %s' % params_page)
os.system('touch %s' % diagnostic_plots_page)
os.system('touch %s' % index_page)
os.system('cp main.css webpage/')
make_index_page(index_page, params)
make_diagnostic_page(diagnostic_plots_page, params)
# set up channel name for grabbing data
channel = ['%s,m-trend' % args.channel]

# Threshold used to generate this flag
thresh = 190 #nm/s

# set up segment file name
chan = channel[0].replace(':', '_')
segment_file = 'L1_EARTHQUAKE_GT_%s_%s-%s.xml' % (thresh, args.start, abs(args.end-args.start))

# name the DQ flag
optic = channel[0].split('_')[2]
flag_name = 'L1:DCH-EQ_%s_GT_%s:1' % (optic, thresh)

# grab all observing (or whatever is defined) time
active = DataQualityFlag.query_dqsegdb(args.science, args.start, args.end).active

# grab only data for the STS channel in observing time
data = get_timeseries_dict(channel, active, frametype='L1_M')

# find times above threshold
time = [j.times[j > thresh] for j in data[channel[0]]]
times = numpy.concatenate(time)

# put all times above threshold in to segments
segs = segments.segmentlist()
segs.extend([segments.segment(int(t.value), int(t.value)+args.stride) for t in times])
segs = segs.coalesce()

# set up the xml file by making a list of the start and end times of the flag
start_time = []
예제 #38
0
파일: hveto.py 프로젝트: pvasired/gwsumm
    def process(self, config=GWSummConfigParser(), **kwargs):

        # set params
        self.rounds = None

        if not os.path.isdir(self.directory):
            self.rounds = None
            return

        # get some basic info
        ifo = config.get('DEFAULT', 'ifo')

        # read the configuration
        d = os.path.realpath(self.directory).rstrip('/')
        self.conf = dict()
        confs = glob(os.path.join(d, '%s-HVETO_CONF-*-*.txt' % ifo))
        if len(confs) != 1:
            self.rounds = 'FAIL'
            return
        conffile = confs[0]
        try:
            with open(conffile) as f:
                self.conf = dict()
                lines = f.readlines()[3:]
                for line in lines:
                    try:
                        key, val = line.split(': ', 1)
                        self.conf[key.strip()] = eval(val)
                    except (ValueError, SyntaxError, NameError):
                        pass
        except IOError:
            self.rounds = 'FAIL'
            return
        else:
            etg = self.conf.pop('AUXtype', None)
            if 'DEfnm' in self.conf:
                name = re_quote.sub('', self.conf['DEfnm'])
                self.primary = '%s:%s' % (ifo, name)
                if 'DEtype' in self.conf:
                    hetg = re_quote.sub('', self.conf['DEtype'])
                    if re.search('_%s\Z' % hetg, self.primary, re.I):
                        self.primary = self.primary[:-len(hetg) - 1]
            else:
                self.primary = None

        # find the segments
        try:
            ce = CacheEntry.from_T050017(conffile)
        except (ValueError):
            start = int(self.span[0])
            duration = int(abs(self.span))
            span = self.span
        else:
            start = int(ce.segment[0])
            duration = int(abs(ce.segment))
            span = ce.segment
        try:
            statefile = self.conf['dqfnm']
        except KeyError:
            statefile = '%s-HVETO_DQ_SEGS-%d-%d.txt' % (ifo, start, duration)
        if not os.path.isfile(os.path.join(self.directory, statefile)):
            self.rounds = 'NOSEGMENTS'
            return

        # find the results table
        resultsfile = os.path.join(self.directory, 'summary_stats.txt')
        if not os.path.isfile(resultsfile):
            self.rounds = 'FAIL'
            return

        # determine the Hveto state
        cache = Cache(
            [CacheEntry.from_T050017(os.path.join(self.directory, statefile))])
        segments = SegmentList.read(cache)
        globalv.SEGMENTS[self.states[0].definition] = DataQualityFlag(
            self.states[0].definition, known=[span], active=segments)
        self.finalize_states(config=config, query=False)

        # read results file
        self.rounds = []
        with open(resultsfile, 'r') as f:
            for line in f.readlines():
                self.rounds.append(
                    dict(zip(self.summaryrows,
                             line.split(' ')[1:])))
                # fix channel name
                c = '%s:%s' % (ifo, self.rounds[-1]['Winning channel'])
                if etg and re.search('_%s\Z' % etg, c, re.I):
                    c = c.rsplit('_', 1)[0]
                self.rounds[-1]['Winning channel'] = c

        # read starting triggers
        rawfile = ('%s-HVETO_RAW_TRIGS_ROUND_0-%d-%d.txt' %
                   (ifo, start, duration))
        cache = Cache(
            [CacheEntry.from_T050017(os.path.join(self.directory, rawfile))])
        get_triggers('%s:hveto_start' % ifo,
                     'hveto', [self.span],
                     config=config,
                     cache=cache,
                     tablename='sngl_burst',
                     return_=False)

        get_triggers('%s:hveto_vetoed_all' % ifo,
                     'hveto', [self.span],
                     config=config,
                     cache=Cache(),
                     tablename='sngl_burst')
        for r in range(1, len(self.rounds) + 1):
            # read round veto triggers
            rawfile = ('%s-HVETO_VETOED_TRIGS_ROUND_%d-%d-%d.txt' %
                       (ifo, r, start, duration))
            cache = Cache([
                CacheEntry.from_T050017(os.path.join(self.directory, rawfile))
            ])
            trigs = get_triggers('%s:hveto_vetoed_round %d' % (ifo, r),
                                 'hveto', [self.span],
                                 config=config,
                                 cache=cache,
                                 tablename='sngl_burst')
            globalv.TRIGGERS['%s:hveto_vetoed_all,hveto' % ifo].extend(trigs)
            # read round veto segments
            segfile = ('%s-HVETO_VETO_SEGS_ROUND_%d-%d-%d.txt' %
                       (ifo, r, start, duration))
            cache = Cache([
                CacheEntry.from_T050017(os.path.join(self.directory, segfile))
            ])
            get_segments('%s:hveto_veto_segs_round_%d' % (ifo, r), [self.span],
                         config=config,
                         cache=cache,
                         return_=False)

        for plot in self.plots:
            if isinstance(plot, HvetoSegmentSummaryPlot):
                plot.find_flags()

        kwargs['trigcache'] = Cache()
        kwargs['segmentcache'] = Cache()
        super(HvetoTab, self).process(config=config, **kwargs)

        # find some plots
        for plot in ['OVERAL_HISTOGRAM', 'OVERAL_EFF_DT'][::-1]:
            filename = ('%s-HVETO_%s-%d-%d.png' % (ifo, plot, start, duration))
            plotfile = os.path.join(self.directory, filename)
            if os.path.isfile(plotfile):
                p = SummaryPlot(os.path.join(self.url, filename), new=False)
                p.state = self.states[0]
                self.plots.insert(0, p)

        # delete data from archive
        del globalv.SEGMENTS[self.states[0].definition]
        for row in range(1, len(self.rounds) + 1):
            del globalv.SEGMENTS['%s:hveto_veto_segs_round_%s' % (ifo, row)]
예제 #39
0
from gwpy.timeseries import (TimeSeries, TimeSeriesDict)
from gwpy.segments import (Segment, DataQualityFlag)

from .. import datafind

__author__ = 'Alex Urban <*****@*****.**>'

# global test objects

HOFT = TimeSeries(numpy.random.normal(loc=1, scale=.5, size=16384 * 66),
                  sample_rate=16384,
                  epoch=0,
                  name='X1:TEST-STRAIN')

FLAG = DataQualityFlag(known=[(-33, 33)],
                       active=[(-33, 33)],
                       name='X1:TEST-FLAG:1')

# -- make sure data can be read -----------------------------------------------


@mock.patch('gwpy.segments.DataQualityFlag.query', return_value=FLAG)
def test_check_flag(segserver):
    # attempt to query segments database for an analysis flag
    flag = 'X1:TEST-FLAG:1'
    assert datafind.check_flag(flag, gpstime=0, duration=64, pad=1) is True
    assert datafind.check_flag(flag, gpstime=800, duration=64, pad=1) is False


@mock.patch('gwpy.io.gwf.iter_channel_names', return_value=['X1:TEST-STRAIN'])
def test_remove_missing_channels(io_gwf):
예제 #40
0
파일: archive.py 프로젝트: gwpy/gwsumm
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source

    This method reads all found data into the data containers defined by
    the `gwsumm.globalv` module, then returns nothing.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:

        # -- channels ---------------------------

        try:
            ctable = Table.read(h5file['channels'])
        except KeyError:  # no channels table written
            pass
        else:
            for row in ctable:
                chan = get_channel(row['name'])
                for p in ctable.colnames[1:]:
                    if row[p]:
                        setattr(chan, p, row[p])

        # -- timeseries -------------------------

        for dataset in h5file.get('timeseries', {}).values():
            ts = TimeSeries.read(dataset, format='hdf5')
            if (re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name) and
                    ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() != mode.Mode.day:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # -- statevector -- ---------------------

        for dataset in h5file.get('statevector', {}).values():
            sv = StateVector.read(dataset, format='hdf5')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # -- spectrogram ------------------------

        for tag, add_ in zip(
                ['spectrogram', 'coherence-components'],
                [add_spectrogram, add_coherence_component_spectrogram]):
            for key, dataset in h5file.get(tag, {}).items():
                key = key.rsplit(',', 1)[0]
                spec = Spectrogram.read(dataset, format='hdf5')
                spec.channel = get_channel(spec.channel)
                add_(spec, key=key)

        # -- segments ---------------------------

        for name, dataset in h5file.get('segments', {}).items():
            dqflag = DataQualityFlag.read(h5file, path=dataset.name,
                                          format='hdf5')
            globalv.SEGMENTS += {name: dqflag}

        # -- triggers ---------------------------

        for dataset in h5file.get('triggers', {}).values():
            load_table(dataset)
예제 #41
0
파일: archive.py 프로젝트: pvasired/gwsumm
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:
        # read all time-series data
        try:
            group = h5file['timeseries']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            ts = TimeSeries.read(dataset, format='hdf')
            if (re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name) and
                    ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() == mode.SUMMARY_MODE_DAY:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # read all state-vector data
        try:
            group = h5file['statevector']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            sv = StateVector.read(dataset, format='hdf')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # read all spectrogram data
        try:
            group = h5file['spectrogram']
        except KeyError:
            group = dict()
        for key, dataset in group.iteritems():
            key = key.rsplit(',', 1)[0]
            spec = Spectrogram.read(dataset, format='hdf')
            spec.channel = get_channel(spec.channel)
            add_spectrogram(spec, key=key)

        try:
            group = h5file['segments']
        except KeyError:
            group = dict()
        for name, dataset in group.iteritems():
            dqflag = DataQualityFlag.read(dataset, format='hdf')
            globalv.SEGMENTS += {name: dqflag}
예제 #42
0
파일: segments.py 프로젝트: gwpy/gwsumm
def get_segments(flag, validity=None, config=ConfigParser(), cache=None,
                 query=True, return_=True, coalesce=True, padding=None,
                 segdb_error='raise', url=None):
    """Retrieve the segments for a given flag

    Segments will be loaded from global memory if already defined,
    otherwise they will be loaded from the given
    :class:`~glue.lal.Cache`, or finally from the segment database

    Parameters
    ----------
    flag : `str`, `list`
        either the name of one flag, or a list of names

    validity : `~gwpy.segments.SegmentList`
        the segments over which to search for other segments

    query : `bool`, optional, default: `True`
        actually execute a read/query operation (if needed), otherwise
        just retrieve segments that have already been cached

    config : `~configparser.ConfigParser`, optional
        the configuration for your analysis, if you have one. If
        present the ``[segment-database]`` section will be queried
        for the following options

        - ``gps-start-time``, and ``gps-end-time``, if ``validity`` is
          not given
        - ``url`` (the remote hostname for the segment database) if
          the ``url`` keyword is not given

    cache : :class:`glue.lal.Cache`, optional
        a cache of files from which to read segments, otherwise segments
        will be downloaded from the segment database

    coalesce : `bool`, optional, default: `True`
        coalesce all segmentlists before returning, otherwise just return
        segments as they were downloaded/read

    padding : `tuple`, or `dict` of `tuples`, optional
        `(start, end)` padding with which to pad segments that are
        downloaded/read

    segdb_error : `str`, optional, default: ``'raise'``
        how to handle errors returned from the segment database, one of

        - ``'raise'`` (default) : raise the exception as normal
        - ``'warn'`` : print the exception as a warning, but return no
          segments
        - ``'ignore'`` : silently ignore the error and return no segments

    url : `str`, optional
        the remote hostname for the target segment database

    return_ : `bool`, optional, default: `True`
        internal flag to enable (True) or disable (False) actually returning
        anything. This is useful if you want to download/read segments now
        but not use them until later (e.g. plotting)

    Returns
    -------
    flag : `~gwpy.segments.DataQualityFlag`
        the flag object representing segments for the given single flag, OR

    flagdict : `~gwpy.segments.DataQualityDict`
        the dict of `~gwpy.segments.DataQualityFlag` objects for multiple
        flags, if ``flag`` is given as a `list`, OR

    None
       if ``return_=False``
    """
    if isinstance(flag, str):
        flags = flag.split(',')
    else:
        flags = flag
    allflags = set([f for cf in flags for f in
                    re_flagdiv.split(str(cf))[::2] if f])

    if padding is None and isinstance(flag, DataQualityFlag):
        padding = {flag: flag.padding}
    elif padding is None:
        padding = dict((flag,
                        isinstance(flag, DataQualityFlag) and
                        flag.padding or None) for flag in flags)

    # check validity
    if validity is None:
        start = config.get(DEFAULTSECT, 'gps-start-time')
        end = config.get(DEFAULTSECT, 'gps-end-time')
        span = SegmentList([Segment(start, end)])
    elif isinstance(validity, DataQualityFlag):
        validity = validity.active
        try:
            span = SegmentList([validity.extent()])
        except ValueError:
            span = SegmentList()
    else:
        try:
            span = SegmentList([SegmentList(validity).extent()])
        except ValueError:
            span = SegmentList()
    validity = SegmentList(validity)

    # generate output object
    out = DataQualityDict()
    for f in flags:
        out[f] = DataQualityFlag(f, known=validity, active=validity)
    for f in allflags:
        globalv.SEGMENTS.setdefault(f, DataQualityFlag(f))

    # read segments from global memory and get the union of needed times
    try:
        old = reduce(
            operator.and_,
            (globalv.SEGMENTS.get(f, DataQualityFlag(f)).known for f in flags))
    except TypeError:
        old = SegmentList()
    newsegs = validity - old
    # load new segments
    query &= abs(newsegs) != 0
    query &= len(allflags) > 0
    if cache is not None:
        query &= len(cache) != 0
    if query:
        if cache is not None:
            try:
                new = DataQualityDict.read(cache, list(allflags))
            except IORegistryError as e:
                # can remove when astropy >= 1.2 is required
                if type(e) is not IORegistryError:
                    raise
                if len(allflags) == 1:
                    f = list(allflags)[0]
                    new = DataQualityDict()
                    new[f] = DataQualityFlag.read(cache, f, coalesce=False)
            for f in new:
                new[f].known &= newsegs
                new[f].active &= newsegs
                if coalesce:
                    new[f].coalesce()
                vprint("    Read %d segments for %s (%.2f%% coverage).\n"
                       % (len(new[f].active), f,
                          float(abs(new[f].known))/float(abs(newsegs))*100))
        else:
            if len(newsegs) >= 10:
                qsegs = span
            else:
                qsegs = newsegs
            # parse configuration for query
            kwargs = {}
            if url is not None:
                kwargs['url'] = url
            else:
                try:
                    kwargs['url'] = config.get('segment-database', 'url')
                except (NoSectionError, NoOptionError):
                    pass
            if kwargs.get('url', None) in SEGDB_URLS:
                query_func = DataQualityDict.query_segdb
            else:
                query_func = DataQualityDict.query_dqsegdb
            try:
                new = query_func(allflags, qsegs, on_error=segdb_error,
                                 **kwargs)
            except Exception as e:
                # ignore error from SegDB
                if segdb_error in ['ignore', None]:
                    pass
                # convert to warning
                elif segdb_error in ['warn']:
                    print('%sWARNING: %sCaught %s: %s [gwsumm.segments]'
                          % (WARNC, ENDC, type(e).__name__, str(e)),
                          file=sys.stderr)
                    warnings.warn('%s: %s' % (type(e).__name__, str(e)))
                # otherwise raise as normal
                else:
                    raise
                new = DataQualityDict()
            for f in new:
                new[f].known &= newsegs
                new[f].active &= newsegs
                if coalesce:
                    new[f].coalesce()
                vprint("    Downloaded %d segments for %s (%.2f%% coverage).\n"
                       % (len(new[f].active), f,
                          float(abs(new[f].known))/float(abs(newsegs))*100))
        # record new segments
        globalv.SEGMENTS += new
        for f in new:
            globalv.SEGMENTS[f].description = str(new[f].description)

    # return what was asked for
    if return_:
        for compound in flags:
            union, intersection, exclude, notequal = split_compound_flag(
                compound)
            if len(union + intersection) == 1:
                out[compound].description = globalv.SEGMENTS[f].description
                out[compound].padding = padding.get(f, (0, 0))
            for flist, op in zip([exclude, intersection, union, notequal],
                                 [operator.sub, operator.and_, operator.or_,
                                  not_equal]):
                for f in flist:
                    pad = padding.get(f, (0, 0))
                    segs = globalv.SEGMENTS[f].copy()
                    if isinstance(pad, (float, int)):
                        segs = segs.pad(pad, pad)
                    elif pad is not None:
                        segs = segs.pad(*pad)
                    if coalesce:
                        segs = segs.coalesce()
                    out[compound] = op(out[compound], segs)
            out[compound].known &= validity
            out[compound].active &= validity
            if coalesce:
                out[compound].coalesce()
        if isinstance(flag, str):
            return out[flag]
        else:
            return out
예제 #43
0
 def setUp(self):
     self.segments = SegmentList([Segment(0, 3), Segment(6, 7)])
     active = SegmentList([Segment(1, 2), Segment(3, 4), Segment(5, 7)])
     self.flag = DataQualityFlag(name='Test segments',
                                 known=self.segments,
                                 active=active)
예제 #44
0
 def test_fail_write_segwizard(self):
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     self.assertRaises(ValueError, flag.write, StringIO, format='segwizard')
예제 #45
0
    os.path.split(__file__)[0], 'data', 'X1-GWPY_TEST_SEGMENTS-0-10.xml.gz')
SEGWIZ = os.path.join(
    os.path.split(__file__)[0], 'data', 'X1-GWPY_TEST_SEGMENTS-0-10.txt')
FLAG1 = 'X1:GWPY-TEST_SEGMENTS:1'
FLAG2 = 'X1:GWPY-TEST_SEGMENTS:2'

QUERY_START = 968630415
QUERY_END = 968716815
QUERY_FLAGS = ['H1:DMT-SCIENCE:4', 'L1:DMT-SCIENCE:4']
QUERY_RESULT = DataQualityDict()
QUERY_RESULT['H1:DMT-SCIENCE:4'] = DataQualityFlag('H1:DMT-SCIENCE:4',
                                                   known=[(968630415,
                                                           968716815)],
                                                   active=[
                                                       (968632249, 968641010),
                                                       (968642871, 968644430),
                                                       (968646220, 968681205),
                                                       (968686357, 968686575),
                                                       (968688760, 968690950),
                                                       (968692881, 968714403)
                                                   ])
QUERY_RESULT['L1:DMT-SCIENCE:4'] = DataQualityFlag('L1:DMT-SCIENCE:4',
                                                   known=[(968630415,
                                                           968716815)],
                                                   active=[
                                                       (968630415, 968634911),
                                                       (968638548, 968644632),
                                                       (968646025, 968675387),
                                                       (968676835, 968679443),
                                                       (968680215, 968686803),
                                                       (968688905, 968691838),
예제 #46
0
def query(flag, start, end, url='https://segments.ligo.org'):
    """Query a segment database for active segments associated with a flag
    """
    return DataQualityFlag.query(flag, start, end, url=url)
예제 #47
0
def main(args=None):
    """Run the hveto command-line interface
    """
    # declare global variables
    # this is needed for multiprocessing utilities
    global acache, analysis, areadkw, atrigfindkw, auxiliary, auxetg
    global auxfreq, counter, livetime, minsnr, naux, pchannel, primary
    global rnd, snrs, windows

    # parse command-line
    parser = create_parser()
    args = parser.parse_args(args=args)
    ifo = args.ifo
    start = int(args.gpsstart)
    end = int(args.gpsend)
    duration = end - start

    # log startup
    LOGGER.info("-- Welcome to Hveto --")
    LOGGER.info("GPS start time: %d" % start)
    LOGGER.info("GPS end time: %d" % end)
    LOGGER.info("Interferometer: %s" % ifo)

    # -- initialisation -------------------------

    # read configuration
    cp = config.HvetoConfigParser(ifo=ifo)
    cp.read(args.config_file)
    LOGGER.info("Parsed configuration file(s)")

    # format output directory
    outdir = _abs_path(args.output_directory)
    if not os.path.isdir(outdir):
        os.makedirs(outdir)
    os.chdir(outdir)
    LOGGER.info("Working directory: %s" % outdir)
    segdir = 'segments'
    plotdir = 'plots'
    trigdir = 'triggers'
    omegadir = 'scans'
    for d in [segdir, plotdir, trigdir, omegadir]:
        if not os.path.isdir(d):
            os.makedirs(d)

    # prepare html variables
    htmlv = {
        'title': '%s Hveto | %d-%d' % (ifo, start, end),
        'config': None,
        'context': ifo.lower(),
    }

    # get segments
    aflag = cp.get('segments', 'analysis-flag')
    url = cp.get('segments', 'url')
    padding = tuple(cp.getfloats('segments', 'padding'))
    if args.analysis_segments:
        segs_ = DataQualityDict.read(args.analysis_segments, gpstype=float)
        analysis = segs_[aflag]
        span = SegmentList([Segment(start, end)])
        analysis.active &= span
        analysis.known &= span
        analysis.coalesce()
        LOGGER.debug("Segments read from disk")
    else:
        analysis = DataQualityFlag.query(aflag, start, end, url=url)
        LOGGER.debug("Segments recovered from %s" % url)
    if padding != (0, 0):
        mindur = padding[0] - padding[1]
        analysis.active = type(
            analysis.active)([s for s in analysis.active if abs(s) >= mindur])
        analysis.pad(*padding, inplace=True)
        LOGGER.debug("Padding %s applied" % str(padding))
    livetime = int(abs(analysis.active))
    livetimepc = livetime / duration * 100.
    LOGGER.info("Retrieved %d segments for %s with %ss (%.2f%%) livetime" %
                (len(analysis.active), aflag, livetime, livetimepc))

    # apply vetoes from veto-definer file
    try:
        vetofile = cp.get('segments', 'veto-definer-file')
    except configparser.NoOptionError:
        vetofile = None
    else:
        try:
            categories = cp.getfloats('segments', 'veto-definer-categories')
        except configparser.NoOptionError:
            categories = None
        # read file
        vdf = read_veto_definer_file(vetofile, start=start, end=end, ifo=ifo)
        LOGGER.debug("Read veto-definer file from %s" % vetofile)
        # get vetoes from segdb
        vdf.populate(source=url, segments=analysis.active, on_error='warn')
        # coalesce flags from chosen categories
        vetoes = DataQualityFlag('%s:VDF-VETOES:1' % ifo)
        nflags = 0
        for flag in vdf:
            if not categories or vdf[flag].category in categories:
                vetoes += vdf[flag]
                nflags += 1
        try:
            deadtime = int(abs(vetoes.active)) / int(abs(vetoes.known)) * 100
        except ZeroDivisionError:
            deadtime = 0
        LOGGER.debug("Coalesced %ss (%.2f%%) of deadtime from %d veto flags" %
                     (abs(vetoes.active), deadtime, nflags))
        # apply to analysis segments
        analysis -= vetoes
        LOGGER.debug("Applied vetoes from veto-definer file")
        livetime = int(abs(analysis.active))
        livetimepc = livetime / duration * 100.
        LOGGER.info("%ss (%.2f%%) livetime remaining after vetoes" %
                    (livetime, livetimepc))

    snrs = cp.getfloats('hveto', 'snr-thresholds')
    minsnr = min(snrs)
    windows = cp.getfloats('hveto', 'time-windows')

    # record all segments
    segments = DataQualityDict()
    segments[analysis.name] = analysis

    # -- load channels --------------------------

    # get primary channel name
    pchannel = cp.get('primary', 'channel')

    # read auxiliary cache
    if args.auxiliary_cache is not None:
        acache = read_cache(args.auxiliary_cache)
    else:
        acache = None

    # load auxiliary channels
    auxetg = cp.get('auxiliary', 'trigger-generator')
    auxfreq = cp.getfloats('auxiliary', 'frequency-range')
    try:
        auxchannels = cp.get('auxiliary', 'channels').strip('\n').split('\n')
    except config.configparser.NoOptionError:
        auxchannels = find_auxiliary_channels(auxetg, (start, end),
                                              ifo=ifo,
                                              cache=acache)
        cp.set('auxiliary', 'channels', '\n'.join(auxchannels))
        LOGGER.debug("Auto-discovered %d "
                     "auxiliary channels" % len(auxchannels))
    else:
        auxchannels = sorted(set(auxchannels))
        LOGGER.debug("Read list of %d auxiliary channels" % len(auxchannels))

    # load unsafe channels list
    _unsafe = cp.get('safety', 'unsafe-channels')
    if os.path.isfile(_unsafe):  # from file
        unsafe = set()
        with open(_unsafe, 'rb') as f:
            for c in f.read().rstrip('\n').split('\n'):
                if c.startswith('%(IFO)s'):
                    unsafe.add(c.replace('%(IFO)s', ifo))
                elif not c.startswith('%s:' % ifo):
                    unsafe.add('%s:%s' % (ifo, c))
                else:
                    unsafe.add(c)
    else:  # or from line-seprated list
        unsafe = set(_unsafe.strip('\n').split('\n'))
    unsafe.add(pchannel)
    cp.set('safety', 'unsafe-channels', '\n'.join(sorted(unsafe)))
    LOGGER.debug("Read list of %d unsafe channels" % len(unsafe))

    # remove unsafe channels
    nunsafe = 0
    for i in range(len(auxchannels) - 1, -1, -1):
        if auxchannels[i] in unsafe:
            LOGGER.warning("Auxiliary channel %r identified as unsafe and has "
                           "been removed" % auxchannels[i])
            auxchannels.pop(i)
            nunsafe += 1
    LOGGER.debug("%d auxiliary channels identified as unsafe" % nunsafe)
    naux = len(auxchannels)
    LOGGER.info("Identified %d auxiliary channels to process" % naux)

    # record INI file in output HTML directory
    inifile = '%s-HVETO_CONFIGURATION-%d-%d.ini' % (ifo, start, duration)
    if os.path.isfile(inifile) and any(
            os.path.samefile(inifile, x) for x in args.config_file):
        LOGGER.debug("Cannot write INI file to %s, file was given as input")
    else:
        with open(inifile, 'w') as f:
            cp.write(f)
        LOGGER.info("Configuration recorded as %s" % inifile)
    htmlv['config'] = inifile

    # -- load primary triggers ------------------

    # read primary cache
    if args.primary_cache is not None:
        pcache = read_cache(args.primary_cache)
    else:
        pcache = None

    # load primary triggers
    petg = cp.get('primary', 'trigger-generator')
    psnr = cp.getfloat('primary', 'snr-threshold')
    pfreq = cp.getfloats('primary', 'frequency-range')
    preadkw = cp.getparams('primary', 'read-')
    if pcache is not None:  # auto-detect the file format
        LOGGER.debug('Unsetting the primary trigger file format')
        preadkw['format'] = None
        preadkw['path'] = 'triggers'
    ptrigfindkw = cp.getparams('primary', 'trigfind-')
    primary = get_triggers(pchannel,
                           petg,
                           analysis.active,
                           snr=psnr,
                           frange=pfreq,
                           cache=pcache,
                           nproc=args.nproc,
                           trigfind_kwargs=ptrigfindkw,
                           **preadkw)
    fcol, scol = primary.dtype.names[1:3]

    if len(primary):
        LOGGER.info("Read %d events for %s" % (len(primary), pchannel))
    else:
        message = "No events found for %r in %d seconds of livetime" % (
            pchannel, livetime)
        LOGGER.critical(message)

    # cluster primary triggers
    clusterkwargs = cp.getparams('primary', 'cluster-')
    if clusterkwargs:
        primary = primary.cluster(**clusterkwargs)
        LOGGER.info("%d primary events remain after clustering over %s" %
                    (len(primary), clusterkwargs['rank']))

    # -- bail out early -------------------------
    # the bail out is done here so that we can at least generate the eventual
    # configuration file, mainly for HTML purposes

    # no segments
    if livetime == 0:
        message = ("No active segments found for analysis flag %r in interval "
                   "[%d, %d)" % (aflag, start, end))
        LOGGER.critical(message)
        htmlv['context'] = 'info'
        index = html.write_null_page(ifo, start, end, message, **htmlv)
        LOGGER.info("HTML report written to %s" % index)
        sys.exit(0)

    # no primary triggers
    if len(primary) == 0:
        htmlv['context'] = 'danger'
        index = html.write_null_page(ifo, start, end, message, **htmlv)
        LOGGER.info("HTML report written to %s" % index)
        sys.exit(0)

    # otherwise write all primary triggers to ASCII
    trigfile = os.path.join(
        trigdir,
        '%s-HVETO_RAW_TRIGS_ROUND_0-%d-%d.txt' % (ifo, start, duration),
    )
    primary.write(trigfile, format='ascii', overwrite=True)

    # -- load auxiliary triggers ----------------

    LOGGER.info("Reading triggers for aux channels...")
    counter = multiprocessing.Value('i', 0)

    areadkw = cp.getparams('auxiliary', 'read-')
    if acache is not None:  # auto-detect the file format
        LOGGER.debug('Unsetting the auxiliary trigger file format')
        areadkw['format'] = None
        areadkw['path'] = 'triggers'
    atrigfindkw = cp.getparams('auxiliary', 'trigfind-')

    # map with multiprocessing
    if args.nproc > 1:
        pool = multiprocessing.Pool(processes=args.nproc)
        results = pool.map(_get_aux_triggers, auxchannels)
        pool.close()
    # map without multiprocessing
    else:
        results = map(_get_aux_triggers, auxchannels)

    LOGGER.info("All aux events loaded")

    auxiliary = dict(x for x in results if x is not None)
    auxchannels = sorted(auxiliary.keys())
    chanfile = '%s-HVETO_CHANNEL_LIST-%d-%d.txt' % (ifo, start, duration)
    with open(chanfile, 'w') as f:
        for chan in auxchannels:
            print(chan, file=f)
    LOGGER.info("Recorded list of valid auxiliary channels in %s" % chanfile)

    # -- execute hveto analysis -----------------

    minsig = cp.getfloat('hveto', 'minimum-significance')

    pevents = [primary]
    pvetoed = []

    auxfcol, auxscol = auxiliary[auxchannels[0]].dtype.names[1:3]
    slabel = plot.get_column_label(scol)
    flabel = plot.get_column_label(fcol)
    auxslabel = plot.get_column_label(auxscol)
    auxflabel = plot.get_column_label(auxfcol)

    rounds = []
    rnd = core.HvetoRound(1, pchannel, rank=scol)
    rnd.segments = analysis.active

    while True:
        LOGGER.info("-- Processing round %d --" % rnd.n)

        # write segments for this round
        segfile = os.path.join(
            segdir, '%s-HVETO_ANALYSIS_SEGS_ROUND_%d-%d-%d.txt' %
            (ifo, rnd.n, start, duration))
        write_ascii_segments(segfile, rnd.segments)

        # calculate significances for this round
        if args.nproc > 1:  # multiprocessing
            # separate channel list into chunks and process each chunk
            pool = multiprocessing.Pool(
                processes=min(args.nproc, len(auxiliary.keys())))
            chunks = utils.channel_groups(list(auxiliary.keys()), args.nproc)
            results = pool.map(_find_max_significance, chunks)
            pool.close()
            winners, sigsets = zip(*results)
            # find winner of chunk winners
            winner = sorted(winners, key=lambda w: w.significance)[-1]
            # flatten sets of significances into one list
            newsignificances = sigsets[0]
            for subdict in sigsets[1:]:
                newsignificances.update(subdict)
        else:  # single process
            winner, newsignificances = core.find_max_significance(
                primary, auxiliary, pchannel, snrs, windows, rnd.livetime)

        LOGGER.info("Round %d winner: %s" % (rnd.n, winner.name))

        # plot significance drop here for the last round
        #   only now do we actually have the new data to
        #   calculate significance drop
        if rnd.n > 1:
            svg = (pngname % 'SIG_DROP').replace('.png', '.svg')  # noqa: F821
            plot.significance_drop(
                svg,
                oldsignificances,
                newsignificances,  # noqa: F821
                title=' | '.join([title, subtitle]),  # noqa: F821
                bbox_inches='tight')
            LOGGER.debug("Figure written to %s" % svg)
            svg = FancyPlot(svg, caption=plot.ROUND_CAPTION['SIG_DROP'])
            rounds[-1].plots.append(svg)
        oldsignificances = newsignificances  # noqa: F841

        # break out of the loop if the significance is below stopping point
        if winner.significance < minsig:
            LOGGER.info("Maximum signifiance below stopping point")
            LOGGER.debug("    (%.2f < %.2f)" % (winner.significance, minsig))
            LOGGER.info("-- Rounds complete! --")
            break

        # work out the vetoes for this round
        allaux = auxiliary[winner.name][
            auxiliary[winner.name][auxscol] >= winner.snr]
        winner.events = allaux
        coincs = allaux[core.find_coincidences(allaux['time'],
                                               primary['time'],
                                               dt=winner.window)]
        rnd.vetoes = winner.get_segments(allaux['time'])
        flag = DataQualityFlag('%s:HVT-ROUND_%d:1' % (ifo, rnd.n),
                               active=rnd.vetoes,
                               known=rnd.segments,
                               description="winner=%s, window=%s, snr=%s" %
                               (winner.name, winner.window, winner.snr))
        segments[flag.name] = flag
        LOGGER.debug("Generated veto segments for round %d" % rnd.n)

        # link events before veto for plotting
        before = primary
        beforeaux = auxiliary[winner.name]

        # apply vetoes to primary
        primary, vetoed = core.veto(primary, rnd.vetoes)
        pevents.append(primary)
        pvetoed.append(vetoed)
        LOGGER.debug("Applied vetoes to primary")

        # record results
        rnd.winner = winner
        rnd.efficiency = (len(vetoed), len(primary) + len(vetoed))
        rnd.use_percentage = (len(coincs), len(winner.events))
        if rnd.n > 1:
            rnd.cum_efficiency = (len(vetoed) + rounds[-1].cum_efficiency[0],
                                  rounds[0].efficiency[1])
            rnd.cum_deadtime = (rnd.deadtime[0] + rounds[-1].cum_deadtime[0],
                                livetime)
        else:
            rnd.cum_efficiency = rnd.efficiency
            rnd.cum_deadtime = rnd.deadtime

        # apply vetoes to auxiliary
        if args.nproc > 1:  # multiprocess
            # separate channel list into chunks and process each chunk
            pool = multiprocessing.Pool(
                processes=min(args.nproc, len(auxiliary.keys())))
            chunks = utils.channel_groups(list(auxiliary.keys()), args.nproc)
            results = pool.map(_veto, chunks)
            pool.close()
            auxiliary = results[0]
            for subdict in results[1:]:
                auxiliary.update(subdict)
        else:  # single process
            auxiliary = core.veto_all(auxiliary, rnd.vetoes)
        LOGGER.debug("Applied vetoes to auxiliary channels")

        # log results
        LOGGER.info("""Results for round %d:\n\n
    winner :          %s
    significance :    %s
    mu :              %s
    snr :             %s
    dt :              %s
    use_percentage :  %s
    efficiency :      %s
    deadtime :        %s
    cum. efficiency : %s
    cum. deadtime :   %s\n\n""" %
                    (rnd.n, rnd.winner.name, rnd.winner.significance,
                     rnd.winner.mu, rnd.winner.snr, rnd.winner.window,
                     rnd.use_percentage, rnd.efficiency, rnd.deadtime,
                     rnd.cum_efficiency, rnd.cum_deadtime))

        # write segments
        segfile = os.path.join(
            segdir, '%s-HVETO_VETO_SEGS_ROUND_%d-%d-%d.txt' %
            (ifo, rnd.n, start, duration))
        write_ascii_segments(segfile, rnd.vetoes)
        LOGGER.debug("Round %d vetoes written to %s" % (rnd.n, segfile))
        rnd.files['VETO_SEGS'] = (segfile, )
        # write triggers
        trigfile = os.path.join(
            trigdir, '%s-HVETO_%%s_TRIGS_ROUND_%d-%d-%d.txt' %
            (ifo, rnd.n, start, duration))
        for tag, arr in zip(['WINNER', 'VETOED', 'RAW'],
                            [winner.events, vetoed, primary]):
            f = trigfile % tag
            arr.write(f, format='ascii', overwrite=True)
            LOGGER.debug("Round %d %s events written to %s" %
                         (rnd.n, tag.lower(), f))
            rnd.files[tag] = f

        # record times to omega scan
        if args.omega_scans:
            N = len(vetoed)
            ind = random.sample(range(0, N), min(args.omega_scans, N))
            rnd.scans = vetoed[ind]
            LOGGER.debug("Collected %d events to omega scan:\n\n%s\n\n" %
                         (len(rnd.scans), rnd.scans))

        # -- make some plots --

        pngname = os.path.join(
            plotdir,
            '%s-HVETO_%%s_ROUND_%d-%d-%d.png' % (ifo, rnd.n, start, duration))
        wname = texify(rnd.winner.name)
        beforel = 'Before\n[%d]' % len(before)
        afterl = 'After\n[%d]' % len(primary)
        vetoedl = 'Vetoed\n(primary)\n[%d]' % len(vetoed)
        beforeauxl = 'All\n[%d]' % len(beforeaux)
        usedl = 'Used\n(aux)\n[%d]' % len(winner.events)
        coincl = 'Coinc.\n[%d]' % len(coincs)
        title = '%s Hveto round %d' % (ifo, rnd.n)
        ptitle = '%s: primary impact' % title
        atitle = '%s: auxiliary use' % title
        subtitle = 'winner: %s [%d-%d]' % (wname, start, end)

        # before/after histogram
        png = pngname % 'HISTOGRAM'
        plot.before_after_histogram(png,
                                    before[scol],
                                    primary[scol],
                                    label1=beforel,
                                    label2=afterl,
                                    xlabel=slabel,
                                    title=ptitle,
                                    subtitle=subtitle)
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['HISTOGRAM'])
        rnd.plots.append(png)

        # snr versus time
        png = pngname % 'SNR_TIME'
        plot.veto_scatter(png,
                          before,
                          vetoed,
                          x='time',
                          y=scol,
                          label1=beforel,
                          label2=vetoedl,
                          epoch=start,
                          xlim=[start, end],
                          ylabel=slabel,
                          title=ptitle,
                          subtitle=subtitle,
                          legend_title="Primary:")
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['SNR_TIME'])
        rnd.plots.append(png)

        # snr versus frequency
        png = pngname % 'SNR_%s' % fcol.upper()
        plot.veto_scatter(png,
                          before,
                          vetoed,
                          x=fcol,
                          y=scol,
                          label1=beforel,
                          label2=vetoedl,
                          xlabel=flabel,
                          ylabel=slabel,
                          xlim=pfreq,
                          title=ptitle,
                          subtitle=subtitle,
                          legend_title="Primary:")
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['SNR'])
        rnd.plots.append(png)

        # frequency versus time coloured by SNR
        png = pngname % '%s_TIME' % fcol.upper()
        plot.veto_scatter(png,
                          before,
                          vetoed,
                          x='time',
                          y=fcol,
                          color=scol,
                          label1=None,
                          label2=None,
                          ylabel=flabel,
                          clabel=slabel,
                          clim=[3, 100],
                          cmap='YlGnBu',
                          epoch=start,
                          xlim=[start, end],
                          ylim=pfreq,
                          title=ptitle,
                          subtitle=subtitle)
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['TIME'])
        rnd.plots.append(png)

        # aux used versus frequency
        png = pngname % 'USED_SNR_TIME'
        plot.veto_scatter(png,
                          winner.events,
                          vetoed,
                          x='time',
                          y=[auxscol, scol],
                          label1=usedl,
                          label2=vetoedl,
                          ylabel=slabel,
                          epoch=start,
                          xlim=[start, end],
                          title=atitle,
                          subtitle=subtitle)
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['USED_SNR_TIME'])
        rnd.plots.append(png)

        # snr versus time
        png = pngname % 'AUX_SNR_TIME'
        plot.veto_scatter(png,
                          beforeaux, (winner.events, coincs),
                          x='time',
                          y=auxscol,
                          label1=beforeauxl,
                          label2=(usedl, coincl),
                          epoch=start,
                          xlim=[start, end],
                          ylabel=auxslabel,
                          title=atitle,
                          subtitle=subtitle)
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['AUX_SNR_TIME'])
        rnd.plots.append(png)

        # snr versus frequency
        png = pngname % 'AUX_SNR_FREQUENCY'
        plot.veto_scatter(png,
                          beforeaux, (winner.events, coincs),
                          x=auxfcol,
                          y=auxscol,
                          label1=beforeauxl,
                          label2=(usedl, coincl),
                          xlabel=auxflabel,
                          ylabel=auxslabel,
                          title=atitle,
                          subtitle=subtitle,
                          legend_title="Aux:")
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['AUX_SNR_FREQUENCY'])
        rnd.plots.append(png)

        # frequency versus time coloured by SNR
        png = pngname % 'AUX_FREQUENCY_TIME'
        plot.veto_scatter(png,
                          beforeaux, (winner.events, coincs),
                          x='time',
                          y=auxfcol,
                          color=auxscol,
                          label1=None,
                          label2=[None, None],
                          ylabel=auxflabel,
                          clabel=auxslabel,
                          clim=[3, 100],
                          cmap='YlGnBu',
                          epoch=start,
                          xlim=[start, end],
                          title=atitle,
                          subtitle=subtitle)
        LOGGER.debug("Figure written to %s" % png)
        png = FancyPlot(png, caption=plot.ROUND_CAPTION['AUX_FREQUENCY_TIME'])
        rnd.plots.append(png)

        # move to the next round
        rounds.append(rnd)
        rnd = core.HvetoRound(rnd.n + 1,
                              pchannel,
                              rank=scol,
                              segments=rnd.segments - rnd.vetoes)

    # write file with all segments
    segfile = os.path.join(
        segdir, '%s-HVETO_SEGMENTS-%d-%d.h5' % (ifo, start, duration))
    segments.write(segfile, overwrite=True)
    LOGGER.debug("Segment summary written to %s" % segfile)

    LOGGER.debug("Making summary figures...")

    # -- exit early if no rounds above threshold

    if not rounds:
        message = ("No rounds completed above threshold. Analysis stopped "
                   "with %s achieving significance of %.2f" %
                   (winner.name, winner.significance))
        LOGGER.critical(message)
        message = message.replace(winner.name,
                                  cis_link(winner.name, class_='alert-link'))
        message += '<br>[T<sub>win</sub>: %ss, SNR: %s]' % (winner.window,
                                                            winner.snr)
        htmlv['context'] = 'warning'
        index = html.write_null_page(ifo, start, end, message, **htmlv)
        LOGGER.info("HTML report written to %s" % index)
        sys.exit(0)

    # -- plot all rounds impact
    pngname = os.path.join(
        plotdir, '%s-HVETO_%%s_ALL_ROUNDS-%d-%d.png' % (ifo, start, duration))
    plots = []
    title = '%s Hveto all rounds' % args.ifo
    subtitle = '%d rounds | %d-%d' % (len(rounds), start, end)

    # before/after histogram
    png = pngname % 'HISTOGRAM'
    beforel = 'Before analysis [%d events]' % len(pevents[0])
    afterl = 'After %d rounds [%d]' % (len(pevents) - 1, len(pevents[-1]))
    plot.before_after_histogram(png,
                                pevents[0][scol],
                                pevents[-1][scol],
                                label1=beforel,
                                label2=afterl,
                                xlabel=slabel,
                                title=title,
                                subtitle=subtitle)
    png = FancyPlot(png, caption=plot.HEADER_CAPTION['HISTOGRAM'])
    plots.append(png)
    LOGGER.debug("Figure written to %s" % png)

    # efficiency/deadtime curve
    png = pngname % 'ROC'
    plot.hveto_roc(png, rounds, title=title, subtitle=subtitle)
    png = FancyPlot(png, caption=plot.HEADER_CAPTION['ROC'])
    plots.append(png)
    LOGGER.debug("Figure written to %s" % png)

    # frequency versus time
    png = pngname % '%s_TIME' % fcol.upper()
    labels = [str(r.n) for r in rounds]
    legtitle = 'Vetoed at\nround'
    plot.veto_scatter(png,
                      pevents[0],
                      pvetoed,
                      label1='',
                      label2=labels,
                      title=title,
                      subtitle=subtitle,
                      ylabel=flabel,
                      x='time',
                      y=fcol,
                      epoch=start,
                      xlim=[start, end],
                      legend_title=legtitle)
    png = FancyPlot(png, caption=plot.HEADER_CAPTION['TIME'])
    plots.append(png)
    LOGGER.debug("Figure written to %s" % png)

    # snr versus time
    png = pngname % 'SNR_TIME'
    plot.veto_scatter(png,
                      pevents[0],
                      pvetoed,
                      label1='',
                      label2=labels,
                      title=title,
                      subtitle=subtitle,
                      ylabel=slabel,
                      x='time',
                      y=scol,
                      epoch=start,
                      xlim=[start, end],
                      legend_title=legtitle)
    png = FancyPlot(png, caption=plot.HEADER_CAPTION['SNR_TIME'])
    plots.append(png)
    LOGGER.debug("Figure written to %s" % png)

    # -- write summary states to ASCII table and JSON
    json_ = {
        'user': getuser(),
        'host': getfqdn(),
        'date': str(datetime.datetime.now()),
        'configuration': inifile,
        'ifo': ifo,
        'gpsstart': start,
        'gpsend': end,
        'call': ' '.join(sys.argv),
        'rounds': [],
    }
    with open('summary-stats.txt', 'w') as f:
        # print header
        print(
            '#N winner window SNR significance nveto use-percentage '
            'efficiency deadtime cumulative-efficiency cumulative-deadtime',
            file=f)
        for r in rounds:
            # extract relevant statistics
            results = [
                ('round', r.n),
                ('name', r.winner.name),
                ('window', r.winner.window),
                ('snr', r.winner.snr),
                ('significance', r.winner.significance),
                ('nveto', r.efficiency[0]),
                ('use-percentage',
                 r.use_percentage[0] / r.use_percentage[1] * 100.),
                ('efficiency', r.efficiency[0] / r.efficiency[1] * 100.),
                ('deadtime', r.deadtime[0] / r.deadtime[1] * 100.),
                ('cumulative-efficiency',
                 r.cum_efficiency[0] / r.cum_efficiency[1] * 100.),
                ('cumulative-deadtime',
                 r.cum_deadtime[0] / r.cum_deadtime[1] * 100.),
            ]
            # write to ASCII
            print(' '.join(map(str, list(zip(*results))[1])), file=f)
            # write to JSON
            results.append(('files', r.files))
            json_['rounds'].append(dict(results))
    LOGGER.debug("Summary table written to %s" % f.name)

    with open('summary-stats.json', 'w') as f:
        json.dump(json_, f, sort_keys=True)
    LOGGER.debug("Summary JSON written to %s" % f.name)

    # -- generate workflow for omega scans

    if args.omega_scans:
        omegatimes = list(
            map(
                str,
                sorted(
                    numpy.unique([t['time'] for r in rounds
                                  for t in r.scans]))))
        LOGGER.debug("Collected %d times to omega scan" % len(omegatimes))
        newtimes = [
            t for t in omegatimes
            if not os.path.exists(os.path.join(omegadir, str(t)))
        ]
        LOGGER.debug("%d scans already complete or in progress, %d remaining" %
                     (len(omegatimes) - len(newtimes), len(newtimes)))
        if len(newtimes) > 0:
            LOGGER.info('Creating workflow for omega scans')
            flags = batch.get_command_line_flags(ifo=ifo,
                                                 ignore_state_flags=True)
            condorcmds = batch.get_condor_arguments(timeout=4, gps=start)
            batch.generate_dag(newtimes,
                               flags=flags,
                               submit=True,
                               outdir=omegadir,
                               condor_commands=condorcmds)
            LOGGER.info('Launched {} omega scans to condor'.format(
                len(newtimes)))
        else:
            LOGGER.debug('Skipping omega scans')

    # -- write HTML and finish

    index = html.write_hveto_page(ifo,
                                  start,
                                  end,
                                  rounds,
                                  plots,
                                  winners=[r.winner.name for r in rounds],
                                  **htmlv)
    LOGGER.debug("HTML written to %s" % index)
    LOGGER.debug("Analysis completed in %d seconds" % (time.time() - JOBSTART))
    LOGGER.info("-- Hveto complete --")
예제 #48
0
		else:
			print filename + " does not exist. Looking for the segment file in next time increment."
			break
f.close()

#construct flag and filename
flag_name = 'H1:UPVh-RND:1' #NEEDS TO BE CHANGED
name =  'segments_UPVh_RND.xml' #NEEDS TO BE CHANGED


try: knownsegments = numpy.loadtxt('total_UPVh_segs.txt')
except:
        print 'No total_UPVh_segs.txt file in current working directory. It should have been produced from last loop.'

#knownsegments = numpy.loadtxt(total_UPVh_segs.txt'
known_start = [knownsegments[i,0] for i in range(len(knownsegments))]
known_end = [knownsegments[i,1] for i in range(len(knownsegments))]
# read the data
data = numpy.loadtxt('total_UPVh_trigs.txt', dtype=float)

# get an array for the start_time and end_time of each segment
start_time = [data[i,0] for i in range(len(data))]
end_time = [data[i,1] for i in range(len(data))]

# create a data quality flag object 
#zip will truncate the start and end time. is this OK?
flag = DataQualityFlag(flag_name, active=zip(start_time, end_time), known=zip(known_start, known_end))

# write flag
flag.write(name)
예제 #49
0
#xarm = mylib.GetDQFlag(gpsstart,gpsend,config="xarm",kamioka=kamioka)
#LSC = mylib.GetDQFlag(gpsstart,gpsend,config="LSC",kamioka=kamioka)
#FPMI = mylib.GetDQFlag(gpsstart,gpsend,config="FPMI",kamioka=kamioka)
Observation = mylib.GetDQFlag(gpsstart,
                              gpsend,
                              config="Observation",
                              kamioka=kamioka)
IFO = mylib.GetDQFlag(gpsstart, gpsend, config="IFO", kamioka=kamioka)
IMC = mylib.GetDQFlag(gpsstart, gpsend, config="IMC", kamioka=kamioka)
PMC = mylib.GetDQFlag(gpsstart, gpsend, config="PMC", kamioka=kamioka)
FSS = mylib.GetDQFlag(gpsstart, gpsend, config="FSS", kamioka=kamioka)
ISS = mylib.GetDQFlag(gpsstart, gpsend, config="ISS", kamioka=kamioka)
#DAQ = mylib.GetDQFlag(gpsstart,gpsend,config="DAQ",kamioka=kamioka)
#MICH = mylib.GetDQFlag(gpsstart,gpsend,config="MICH",kamioka=kamioka)
trigger = DataQualityFlag(name="Trigger",
                          known=IFO.known,
                          active=[(triggertime, triggertime + duration)])

untriggered = ~trigger
plot = untriggered.plot(figsize=(12, 8))
ax = plot.gca()
ax.set_title("Interferometer state summary")
#ax.plot(LSC)
#ax.plot(FPMI)
ax.plot(Observation)
ax.plot(IFO)
#ax.plot(xarm)
#ax.plot(MICH)
ax.plot(ISS)
ax.plot(IMC)
ax.plot(PMC)
예제 #50
0
def get_triggers():

    # Obtain segments that are analysis ready
    analysis_ready = DataQualityFlag.query('{0}:DMT-ANALYSIS_READY:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)

    # Display segments for which this flag is true
    print "Segments for which the ANALYSIS READY Flag is active: {0}".format(analysis_ready.active)

    if opts.applyallDQ:
        print("We are finding all previously created DQ cuts")
        # Obtain segments of all DQ cuts if requested DQ list can be found
        # https://code.pycbc.phy.syr.edu/detchar/veto-definitions/blob/master/burst/O1/H1L1-HOFT_C02_O1_BURST.xml
        # First obtain those flags that are for both H1 and L1
        O1_MISSING_HOFT_C02 = DataQualityFlag.query('{0}:DCS-MISSING_{0}_HOFT_C02:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_ETMY_ESD_DAC_OVERFLOW = DataQualityFlag.query('{0}:DMT-ETMY_ESD_DAC_OVERFLOW:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_OMC_DCPD_A_SATURATION = DataQualityFlag.query('{0}:DCH-OMC_DCPD_A_SATURATION:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_OMC_DCPD_ADC_OVERFLOW = DataQualityFlag.query('{0}:DMT-OMC_DCPD_ADC_OVERFLOW:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_ETMY_SATURATION_SNR200 = DataQualityFlag.query('{0}:DCH-ETMY_SATURATION_SNR200:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_CW_INJECTION_TRANSITION = DataQualityFlag.query('{0}:DCH-CW_INJECTION_TRANSITION:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_BAD_KAPPA_BASIC_CUT_HOFT_C02 = DataQualityFlag.query('{0}:DCS-BAD_KAPPA_BASIC_CUT_{0}_HOFT_C02:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        O1_PARTIAL_FRAME_LOSS_HOFT_C02 = DataQualityFlag.query('{0}:DCS-PARTIAL_FRAME_LOSS_{0}_HOFT_C02:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)

        # Obtain detector specific flags
        if opts.detector == "H1":
            O1_RF45_AM_STABILIZATION = DataQualityFlag.query('{0}:DCH-RF45_AM_STABILIZATION:4'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_ETMY_SATURATION = DataQualityFlag.query('{0}:DCH-ETMY_SATURATION:2'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_BAD_DATA_BEFORE_LOCKLOSS = DataQualityFlag.query('{0}:DCH-BAD_DATA_BEFORE_LOCKLOSS:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_ETMY_VIOLIN_MODE_2NDHARMONIC_RINGING = DataQualityFlag.query('{0}:DCH-ETMY_VIOLIN_MODE_2NDHARMONIC_RINGING:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_RF45_SEVERE_GLITCHING = DataQualityFlag.query('{0}:DCH-RF45_SEVERE_GLITCHING:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_EY_BECKHOFF_CHASSIS_PROBLEM = DataQualityFlag.query('{0}:DCH-EY_BECKHOFF_CHASSIS_PROBLEM:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_ASC_AS_B_RF36_GLITCHING = DataQualityFlag.query('{0}:DCH-ASC_AS_B_RF36_GLITCHING:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_BAD_STRAIN_HOFT_C02 = DataQualityFlag.query('{0}:DCS-BAD_STRAIN_{0}_HOFT_C02:2'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_TOGGLING_BAD_KAPPA_HOFT_C02= DataQualityFlag.query('{0}:DCS-TOGGLING_BAD_KAPPA_{0}_HOFT_C02:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
        else:
            O1_ETMY_SATURATION = DataQualityFlag.query('{0}:DCH-ETMY_SATURATION:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_BAD_DATA_BEFORE_LOCKLOSS = DataQualityFlag.query('{0}:DCH-BAD_DATA_BEFORE_LOCKLOSS:2'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_PCAL_GLITCHES_GT_20P = DataQualityFlag.query('{0}:DCH-PCAL_GLITCHES_GT_20P:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_SUDDEN_PSD_CHANGE = DataQualityFlag.query('{0}:DCH-SUDDEN_PSD_CHANGE:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_BAD_VCO_OFFSET = DataQualityFlag.query('{0}:DCH-BAD_VCO_OFFSET:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)
            O1_SEVERE_60_200_HZ_NOISE = DataQualityFlag.query('{0}:DCH-SEVERE_60_200_HZ_NOISE:1'.format(opts.detector),opts.gpsStart,opts.gpsEnd)

    # Fetch raw omicron triggers and apply filter which is defined in a function above.
    omicrontriggers = SnglBurstTable.fetch(detchannelname,'Omicron',\
    opts.gpsStart,opts.gpsEnd,filt=threshold)

    print "List of available metadata information for a given glitch provided by omicron: {0}".format(omicrontriggers.columnnames)

    print "Number of triggers after SNR and Freq cuts but before ANALYSIS READY flag filtering: {0}".format(len(omicrontriggers))

    # Filter the raw omicron triggers against the ANALYSIS READY flag.
    omicrontriggers = omicrontriggers.vetoed(analysis_ready.active)
    # If requested filter out DQ flags
    if opts.applyallDQ:
        print("We are applying all previously created DQ cuts")
        # Obtain segments of all DQ cuts if requested DQ list can be found
        # https://code.pycbc.phy.syr.edu/detchar/veto-definitions/blob/master/burst/O1/H1L1-HOFT_C02_O1_BURST.xml
        # First obtain those flags that are for both H1 and L1
        omicrontriggers = omicrontriggers.veto(O1_MISSING_HOFT_C02.active)
        omicrontriggers = omicrontriggers.veto(O1_ETMY_ESD_DAC_OVERFLOW.active)
        omicrontriggers = omicrontriggers.veto(O1_OMC_DCPD_A_SATURATION.active)
        omicrontriggers = omicrontriggers.veto(O1_OMC_DCPD_ADC_OVERFLOW.active)
        omicrontriggers = omicrontriggers.veto(O1_ETMY_SATURATION_SNR200.active)
        omicrontriggers = omicrontriggers.veto(O1_CW_INJECTION_TRANSITION.active)
        omicrontriggers = omicrontriggers.veto(O1_BAD_KAPPA_BASIC_CUT_HOFT_C02.active)
        omicrontriggers = omicrontriggers.veto(O1_PARTIAL_FRAME_LOSS_HOFT_C02.active)

        # Obtain detector specific flags
        if opts.detector == "H1":
            omicrontriggers = omicrontriggers.veto(O1_RF45_AM_STABILIZATION.active)
            omicrontriggers = omicrontriggers.veto(O1_ETMY_SATURATION.active)
            omicrontriggers = omicrontriggers.veto(O1_BAD_DATA_BEFORE_LOCKLOSS.active)
            omicrontriggers = omicrontriggers.veto(O1_ETMY_VIOLIN_MODE_2NDHARMONIC_RINGING.active)
            omicrontriggers = omicrontriggers.veto(O1_RF45_SEVERE_GLITCHING.active)
            omicrontriggers = omicrontriggers.veto(O1_EY_BECKHOFF_CHASSIS_PROBLEM.active)
            omicrontriggers = omicrontriggers.veto(O1_ASC_AS_B_RF36_GLITCHING.active)
            omicrontriggers = omicrontriggers.veto(O1_BAD_STRAIN_HOFT_C02.active)
            omicrontriggers = omicrontriggers.veto(O1_TOGGLING_BAD_KAPPA_HOFT_C02.active)
        else:
            omicrontriggers = omicrontriggers.veto(O1_ETMY_SATURATION.active)
            omicrontriggers = omicrontriggers.veto(O1_BAD_DATA_BEFORE_LOCKLOSS.active)
            omicrontriggers = omicrontriggers.veto(O1_PCAL_GLITCHES_GT_20P.active)
            omicrontriggers = omicrontriggers.veto(O1_SUDDEN_PSD_CHANGE.active)
            omicrontriggers = omicrontriggers.veto(O1_BAD_VCO_OFFSET.active)
            omicrontriggers = omicrontriggers.veto(O1_SEVERE_60_200_HZ_NOISE.active)
        


    print "Final trigger length: {0}".format(len(omicrontriggers))

    return omicrontriggers
예제 #51
0
import pytest

from gwpy.segments import (Segment, SegmentList, DataQualityFlag,
                           DataQualityDict)

from .. import segments

TEST_SEGMENTS = SegmentList([
    Segment(0.1, 1.234567),
    Segment(5.64321, 6.234567890),
])
TEST_SEGMENTS_2 = SegmentList(
    [Segment(round(a, 6), round(b, 6)) for a, b in TEST_SEGMENTS])

TEST_FLAG = DataQualityFlag(known=SegmentList([Segment(0, 7)]),
                            active=TEST_SEGMENTS,
                            name='X1:TEST-FLAG')
TEST_DICT = DataQualityDict({TEST_FLAG.name: TEST_FLAG})

# -- unit tests ---------------------------------------------------------------


@mock.patch('gwpy.segments.DataQualityFlag.query', return_value=TEST_FLAG)
def test_query(dqflag):
    flag = segments.query('X1:TEST-FLAG', 0, 7)
    assert flag.known == TEST_FLAG.known
    assert flag.active == SegmentList(
        [Segment((int(seg[0]), int(seg[1]))) for seg in TEST_FLAG.active])


@pytest.mark.parametrize('ncol', (2, 4))
예제 #52
0
K1:AUX-INVALID_DATA,17.0,103.0,69.0,103.5999984741211,14.4,5.0,1.445966601520126,-55
"""  # noqa: E501

# -- test data

GPS = 17

SIGNAL = TimeSeries(
    gausspulse(numpy.arange(-1, 1, 1./4096), bw=100),
    sample_rate=4096,
    epoch=GPS - 1,
)

TEST_FLAG = DataQualityFlag(
    name="K1:DCH-TEST_FLAG:1",
    active=SegmentList(),
    known=SegmentList([Segment(0, 34)]),
)

K1_DATA = TimeSeriesDict({
    "K1:GW-PRIMARY_CHANNEL": TimeSeries(
        numpy.random.normal(loc=1, scale=.5, size=4096 * GPS * 2),
        sample_rate=4096,
        epoch=0,
    ).zpk([], [0], 1).inject(SIGNAL),
    "K1:AUX-HIGH_SIGNIFICANCE": TimeSeries(
        numpy.random.normal(loc=1, scale=.5, size=4096 * GPS * 2),
        sample_rate=4096,
        epoch=0,
    ).zpk([], [0], 1).inject(SIGNAL),
    "K1:AUX-LOW_SIGNIFICANCE": TimeSeries(
예제 #53
0
 def test_round(self):
     flag = DataQualityFlag(FLAG1, active=ACTIVE_CONTRACTED, known=KNOWN)
     flag2 = flag.round()
     self.assertListEqual(flag2.active, ACTIVE & KNOWN)
예제 #54
0
"""

__author__ = 'Duncan Macleod <*****@*****.**>'

# .. currentmodule:: gwpy.segments
#
# Getting the segments
# --------------------
#
# First, we need to fetch the Open Data timeline segments from LOSC.
# To do that we can call the :meth:`DataQualityFlag.fetch_open_data` method
# using ``'H1_DATA'`` as the flag (for an explanation of what this means,
# read up on `The S6 Data Release <https://losc.ligo.org/S6/>`__).

from gwpy.segments import DataQualityFlag
h1segs = DataQualityFlag.fetch_open_data('H1_DATA',
                                         'Sep 16 2010', 'Sep 17 2010')

# For sanity, lets plot these segments:

splot = h1segs.plot(figsize=[12, 3])
splot.show()
splot.close()  # hide

# We see that the LIGO Hanford Observatory detector was operating for the
# majority of the day, with a few outages of ~30 minutes or so.

# We can use the :func:`abs` function to display the total amount of time
# spent taking data:

print(abs(h1segs.active))
예제 #55
0
import fir
from utils import chunk_segments

parser = argparse.ArgumentParser(description='Generate target features for machine learning on LIGO data.')
parser.add_argument('--ifo',type=str,required=True)
parser.add_argument('-f','--segment-file',type=str)
parser.add_argument('-s','--start-time',type=int)
parser.add_argument('-e','--end-time',type=int)
parser.add_argument('-p','--path', help='path to output directory', required=False)

args = parser.parse_args()

ifo=args.ifo

if args.segment_file:
    sci_segs=DataQualityFlag.read(args.segment_file, path='%s:DMT-ANALYSIS_READY:1' % ifo)
    assert sci_segs.ifo == ifo
    segs=sci_segs.active
elif args.start_time and args.end_time:
    segs=[Segment(args.start_time, args.end_time)]
else:
    print "Either --segment-file, or both start and end time must be provided."
    exit(2)

st=segs[0].start
et=segs[-1].end

chunk=4096
pad=256

target_chan=ifo+':GDS-CALIB_STRAIN'
예제 #56
0
파일: open-data.py 프로젝트: rngeorge/gwpy
`Observing Run 1 (O1) <https://www.gw-openscience.org/O1/>`__
have been released by |GWOSC|_.

This example demonstrates how to download segment information into a
:class:`~gwpy.segments.DataQualityFlag`, and then plot them.
"""

__author__ = 'Duncan Macleod <*****@*****.**>'
__currentmodule__ = 'gwpy.segments'

# All we need to do is import the `DataQualityFlag` object, and then call
# the :meth:`DataQualityFlag.fetch_open_data` method to query for, and download
# the segments for all of O1:

from gwpy.segments import DataQualityFlag
h1segs = DataQualityFlag.fetch_open_data('H1_DATA', 'Sep 12 2015',
                                         'Jan 19 2016')

# We can then generate a plot of the times when LIGO-Hanford was operating:

plot = h1segs.plot(color='gwpy:ligo-hanford')
plot.show()

# That's a lot of segments. We can pare-down the list a little to display
# only the segments from the first month of the run:

h1month1 = DataQualityFlag.fetch_open_data('H1_DATA', 'Sep 12 2015',
                                           'Oct 12 2015')

# We can also download the LIGO-Livingston segments from the same period
# and display them alongside, as well as those segments during which both
# interferometers were operating at the same time
예제 #57
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:
        # read all time-series data
        try:
            group = h5file['timeseries']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            ts = TimeSeries.read(dataset, format='hdf5')
            if (re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name)
                    and ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() != mode.Mode.day:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # read all state-vector data
        try:
            group = h5file['statevector']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            sv = StateVector.read(dataset, format='hdf5')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # read all spectrogram data
        for tag in ['spectrogram', 'coherence-components']:
            if tag == 'coherence-components':
                add_ = add_coherence_component_spectrogram
            else:
                add_ = add_spectrogram
            try:
                group = h5file[tag]
            except KeyError:
                group = dict()
            for key, dataset in group.iteritems():
                key = key.rsplit(',', 1)[0]
                spec = Spectrogram.read(dataset, format='hdf5')
                spec.channel = get_channel(spec.channel)
                add_(spec, key=key)

        # read all segments
        try:
            group = h5file['segments']
        except KeyError:
            group = dict()
        for name in group:
            dqflag = DataQualityFlag.read(group, path=name, format='hdf5')
            globalv.SEGMENTS += {name: dqflag}

        # read all triggers
        try:
            group = h5file['triggers']
        except KeyError:
            group = dict()
        for key in group:
            load_table(group[key])
예제 #58
0
 def test_round(self):
     flag = DataQualityFlag(FLAG1, active=ACTIVE_CONTRACTED, known=KNOWN)
     flag2 = flag.round()
     self.assertListEqual(flag2.active, ACTIVE & KNOWN)
예제 #59
0
	# determine times when f2 above some threshold
	idxhighscat = argwhere(scatf2>=thresh)
	highscatf2 = scatf2[idxhighscat]
	highscattimes = times[idxhighscat]
	highscattimesgps = highscattimes+start_time

	# save text file with values above threshold [GPS f2 index]
	outdata = hstack((highscattimesgps,highscatf2,idxhighscat))
	savetxt('%s-ALL-TIMES-SCATTER-GT%dHZ-%d-%d.txt' % (ifo,thresh,start_time,dur),outdata,fmt='%f %f %i')

	# save segments XML file with segments (based off code from Duncan Macleod)
	from math import (floor, ceil)
	from gwpy.segments import (Segment, DataQualityFlag)

	flag = '%s:DCH-SCATTERED_LIGHT_GT%dHZ:1' % (ifo,thresh) 
	flag = DataQualityFlag(flag)
	segs = []
	append = segs.append

	for gps in highscattimesgps:
    		if len(segs) and gps in segs[-1]:
        		continue
    		seg = Segment(floor(gps), ceil(gps))
    		append(seg)

	flag.active = segs
	flag.known = [Segment(start_time, end_time)]
	flag.coalesce()
	flag.write('%s-%s_%d-%d-%d.xml.gz' % (flag.ifo, flag.tag.replace('-', '_'), flag.version,start_time, dur))

#EOF
예제 #60
0
def main(args=None):
    """Run the zero-crossing counter tool
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    span = Segment(args.gpsstart, args.gpsend)
    LOGGER.info('-- Processing channel %s over span %d - %d' %
                (args.channel, args.gpsstart, args.gpsend))

    if args.state_flag:
        state = DataQualityFlag.query(
            args.state_flag,
            int(args.gpsstart),
            int(args.gpsend),
            url=const.DEFAULT_SEGMENT_SERVER,
        )
        statea = state.active
    else:
        statea = SegmentList([span])

    duration = abs(span)

    # initialize output files for each threshold and store them in a dict
    outfiles = {}
    for thresh in args.threshold:
        outfiles[str(thresh)] = (os.path.join(
            args.output_path, '%s_%s_DAC-%d-%d.h5' %
            (args.channel.replace('-', '_').replace(':', '-'), str(
                int(thresh)).replace('-', 'n'), int(args.gpsstart), duration)))

    # get frame cache
    cache = gwdatafind.find_urls(args.ifo[0], args.frametype,
                                 int(args.gpsstart), int(args.gpsend))

    cachesegs = statea & cache_segments(cache)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    # initialize a ligolw table for each threshold and store them in a dict
    names = ("time", "frequency", "snr")
    dtypes = ("f8", ) * len(names)
    tables = {}
    for thresh in args.threshold:
        tables[str(thresh)] = EventTable(
            names=names,
            dtype=dtypes,
            meta={"channel": args.channel},
        )

    # for each science segment, read in the data from frames, check for
    # threshold crossings, and if the rate of crossings is less than
    # rate_thresh, write to a sngl_burst table
    for seg in cachesegs:
        LOGGER.debug("Processing {}:".format(seg))
        c = sieve_cache(cache, segment=seg)
        if not c:
            LOGGER.warning("    No {} data files for this segment, "
                           "skipping".format(args.frametype))
            continue
        data = get_data(args.channel,
                        seg[0],
                        seg[1],
                        nproc=args.nproc,
                        source=c,
                        verbose="Reading data:".rjust(30))
        for thresh in args.threshold:
            times = find_crossings(data, thresh)
            rate = float(times.size) / abs(seg) if times.size else 0
            LOGGER.info("    Found {0} crossings of {1}, rate: {2} Hz".format(
                times.size,
                thresh,
                rate,
            ))
            if times.size and rate < args.rate_thresh:
                existing = tables[str(thresh)]
                tables[str(thresh)] = vstack_tables(
                    (
                        existing,
                        table_from_times(times,
                                         snr=10.,
                                         frequency=100.,
                                         names=existing.colnames),
                    ),
                    join_type="exact",
                )

    n = max(map(len, tables.values()))
    for thresh, outfile in outfiles.items():
        tables[thresh].write(
            outfile,
            path="triggers",
            format="hdf5",
            overwrite=True,
        )
        LOGGER.info("{0} events written to {1}".format(
            str(len(tables[thresh])).rjust(len(str(n))),
            outfile,
        ))