Ejemplo n.º 1
0
def check_vector(cache, channel, start, end, bits, logic_type='all'):
    """Check timeseries of decimals against a bitmask.
    This is inclusive of the start time and exclusive of the end time, i.e.
    [start, ..., end).

    Parameters
    ----------
    cache : :class:`glue.lal.Cache`
        Cache from which to check.
    channel : str
        Channel to look at, e.g. ``H1:DMT-DQ_VECTOR``.
    start, end : int or float
        GPS start and end times desired.
    bits: :class:`gwpy.TimeSeries.Bits`
        Definitions of the bits in the channel.
    logic_type : str, optional
        Type of logic to apply for vetoing.
        If ``all``, then all samples in the window must pass the bitmask.
        If ``any``, then one or more samples in the window must pass.

    Returns
    -------
    dict
        Maps each bit in channel to its state.

    Example
    -------
    >>> check_vector(cache, 'H1:GDS-CALIB_STATE_VECTOR', 1216496260,
                     1216496262, ligo_state_vector_bits)
    {'H1:HOFT_OK': True,
     'H1:OBSERVATION_INTENT': True,
     'H1:NO_STOCH_HW_INJ': True,
     'H1:NO_CBC_HW_INJ': True,
     'H1:NO_BURST_HW_INJ': True,
     'H1:NO_DETCHAR_HW_INJ': True}
    """
    if logic_type not in ('any', 'all'):
        raise ValueError("logic_type must be either 'all' or 'any'.")
    bitname = '{}:{}'
    if cache:
        try:
            statevector = StateVector.read(cache,
                                           channel,
                                           start=start,
                                           end=end,
                                           bits=bits)
            return {
                bitname.format(channel.split(':')[0], key):
                bool(getattr(np, logic_type)(getattr(value, 'value')))
                for key, value in statevector.get_bit_series().items()
            }
        except IndexError:
            log.exception('Failed to read from low-latency frame files')
    # FIXME: figure out how to get access to low-latency frames outside
    # of the cluster. Until we figure that out, actual I/O errors have
    # to be non-fatal.
    return {
        bitname.format(channel.split(':')[0], key): None
        for key in bits if key is not None
    }
Ejemplo n.º 2
0
def get_state_segments(channel, frametype, start, end, bits=[0], nproc=1,
                       pad=(0, 0)):
    """Read state segments from a state-vector channel in the frames
    """
    ifo = channel[:2]
    pstart = start - pad[0]
    pend = end + pad[1]

    # find frame cache
    cache = data.find_frames(ifo, frametype, pstart, pend)

    # optimise I/O based on type and library
    io_kw = {}
    try:
        from LDAStools import frameCPP  # noqa: F401
    except ImportError:
        pass
    else:
        io_kw['format'] = 'gwf.framecpp'
        if RAW_TYPE_REGEX.match(frametype):
            io_kw['type'] = 'adc'
        elif channel.endswith('GDS-CALIB_STATE_VECTOR'):
            io_kw['type'] = 'proc'

    bits = list(map(str, bits))
    # FIXME: need to read from cache with single segment but doesn't match
    # [start, end)

    # Virgo drops the state vector regularly, so need to sieve the files
    if channel == "V1:DQ_ANALYSIS_STATE_VECTOR":
        span = gwf_data_segments(cache, channel)
    else:
        span = SegmentList([Segment(pstart, pend)])

    # read data segments
    segs = SegmentList()
    try:
        csegs = cache_segments(cache)
    except KeyError:
        return segs
    for seg in csegs & span:
        sv = StateVector.read(cache, channel, nproc=nproc, start=seg[0],
                              end=seg[1], bits=bits, gap='pad', pad=0,
                              **io_kw).astype('uint32')
        segs += sv.to_dqflags().intersection().active

    # truncate to integers, and apply padding
    for i, seg in enumerate(segs):
        segs[i] = type(seg)(int(ceil(seg[0])) + pad[0],
                            int(floor(seg[1])) - pad[1])
    segs.coalesce()

    return segs.coalesce()
Ejemplo n.º 3
0
def get_guardian_segments(node, frametype, start, end, nproc=1, pad=(0, 0),
                          strict=False):
    """Determine state segments for a given guardian node
    """
    ifo, node = node.split(':', 1)
    if node.startswith('GRD-'):
        node = node[4:]
    pstart = start - pad[0]
    pend = end + pad[1]

    # find frame cache
    cache = data.find_frames(ifo, frametype, pstart, pend)

    # pre-format data segments
    span = SegmentList([Segment(pstart, pend)])
    segs = SegmentList()
    csegs = cache_segments(cache)
    if not csegs:
        return csegs

    # read data
    stub = "{}:GRD-{}".format(ifo, node)
    if strict:
        channels = ["{}_OK".format(stub)]
    else:
        state = "{}_STATE_N".format(stub)
        nominal = "{}_NOMINAL_N".format(stub)
        active = "{}_ACTIVE".format(stub)
        channels = [state, nominal, active]
    for seg in csegs & span:
        if strict:
            sv = StateVector.read(
                cache, channels[0], nproc=nproc, start=seg[0], end=seg[1],
                bits=[0], gap='pad', pad=0,).astype('uint32')
            segs += sv.to_dqflags().intersection().active
        else:
            gdata = TimeSeriesDict.read(
                cache, channels, nproc=nproc, start=seg[0], end=seg[1],
                gap='pad', pad=0)
            ok = ((gdata[state].value == gdata[nominal].value) &
                  (gdata[active].value == 1)).view(StateTimeSeries)
            ok.t0 = gdata[state].t0
            ok.dt = gdata[state].dt
            segs += ok.to_dqflag().active

    # truncate to integers, and apply padding
    for i, seg in enumerate(segs):
        segs[i] = type(seg)(int(ceil(seg[0])) + pad[0],
                            int(floor(seg[1])) - pad[1])
    segs.coalesce()

    return segs.coalesce()
Ejemplo n.º 4
0
def retrieve_bits(params,channel,segment):
    """@retrieves timeseries for given channel and segment.

    @param params
        seismon params dictionary
    @param channel
        seismon channel structure
    @param segment
        [start,end] gps
    """

    gpsStart = segment[0]
    gpsEnd = segment[1]

    # set the times
    duration = np.ceil(gpsEnd-gpsStart)

    dataFull = []
    
    bitmask = ['0','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15']    
    # make timeseries
    dataFull = StateVector.read(params["frame"], channel.station, start=gpsStart, end=gpsEnd, bitmask=bitmask)

    return dataFull
Ejemplo n.º 5
0
def get_state_segments(channel,
                       frametype,
                       start,
                       end,
                       bits=[0],
                       nproc=1,
                       pad=(0, 0)):
    """Read state segments from a state-vector channel in the frames
    """
    ifo = channel[:2]
    pstart = start - pad[0]
    pend = end + pad[1]

    # find frame cache
    cache = data.find_frames(ifo, frametype, pstart, pend)

    # optimise I/O based on type and library
    io_kw = {}
    try:
        from LDAStools import frameCPP  # noqa: F401
    except ImportError:
        pass
    else:
        io_kw['format'] = 'gwf.framecpp'
        if RAW_TYPE_REGEX.match(frametype):
            io_kw['type'] = 'adc'
        elif channel.endswith('GDS-CALIB_STATE_VECTOR'):
            io_kw['type'] = 'proc'

    bits = list(map(str, bits))
    # FIXME: need to read from cache with single segment but doesn't match
    # [start, end)

    # Virgo drops the state vector regularly, so need to sieve the files
    if channel == "V1:DQ_ANALYSIS_STATE_VECTOR":
        span = gwf_data_segments(cache, channel)
    else:
        span = SegmentList([Segment(pstart, pend)])

    # read data segments
    segs = SegmentList()
    try:
        csegs = cache_segments(cache)
    except KeyError:
        return segs
    for seg in csegs & span:
        sv = StateVector.read(cache,
                              channel,
                              nproc=nproc,
                              start=seg[0],
                              end=seg[1],
                              bits=bits,
                              gap='pad',
                              pad=0,
                              **io_kw).astype('uint32')
        segs += sv.to_dqflags().intersection().active

    # truncate to integers, and apply padding
    for i, seg in enumerate(segs):
        segs[i] = type(seg)(int(ceil(seg[0])) + pad[0],
                            int(floor(seg[1])) - pad[1])
    segs.coalesce()

    return segs.coalesce()
Ejemplo n.º 6
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source

    This method reads all found data into the data containers defined by
    the `gwsumm.globalv` module, then returns nothing.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:

        # -- channels ---------------------------

        try:
            ctable = Table.read(h5file['channels'])
        except KeyError:  # no channels table written
            pass
        else:
            for row in ctable:
                chan = get_channel(row['name'])
                for p in ctable.colnames[1:]:
                    if row[p]:
                        setattr(chan, p, row[p])

        # -- timeseries -------------------------

        for dataset in h5file.get('timeseries', {}).values():
            ts = TimeSeries.read(dataset, format='hdf5')
            if (re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name) and
                    ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() != mode.Mode.day:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # -- statevector -- ---------------------

        for dataset in h5file.get('statevector', {}).values():
            sv = StateVector.read(dataset, format='hdf5')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # -- spectrogram ------------------------

        for tag, add_ in zip(
                ['spectrogram', 'coherence-components'],
                [add_spectrogram, add_coherence_component_spectrogram]):
            for key, dataset in h5file.get(tag, {}).items():
                key = key.rsplit(',', 1)[0]
                spec = Spectrogram.read(dataset, format='hdf5')
                spec.channel = get_channel(spec.channel)
                add_(spec, key=key)

        # -- segments ---------------------------

        for name, dataset in h5file.get('segments', {}).items():
            dqflag = DataQualityFlag.read(h5file, path=dataset.name,
                                          format='hdf5')
            globalv.SEGMENTS += {name: dqflag}

        # -- triggers ---------------------------

        for dataset in h5file.get('triggers', {}).values():
            load_table(dataset)
Ejemplo n.º 7
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:
        # read all time-series data
        try:
            group = h5file['timeseries']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            ts = TimeSeries.read(dataset, format='hdf')
            if (re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name) and
                    ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() == mode.SUMMARY_MODE_DAY:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # read all state-vector data
        try:
            group = h5file['statevector']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            sv = StateVector.read(dataset, format='hdf')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # read all spectrogram data
        try:
            group = h5file['spectrogram']
        except KeyError:
            group = dict()
        for key, dataset in group.iteritems():
            key = key.rsplit(',', 1)[0]
            spec = Spectrogram.read(dataset, format='hdf')
            spec.channel = get_channel(spec.channel)
            add_spectrogram(spec, key=key)

        try:
            group = h5file['segments']
        except KeyError:
            group = dict()
        for name, dataset in group.iteritems():
            dqflag = DataQualityFlag.read(dataset, format='hdf')
            globalv.SEGMENTS += {name: dqflag}
Ejemplo n.º 8
0
Alongside the strain *h(t)* data, |GWOSC|_ also
releases a *Data Quality* :ref:`state vector <gwpy-statevector>`.
We can use this to check on the quality of the data from the LIGO Livingston
detector around |GW170817|_.
"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = "gwpy.timeseries"

# First, we can import the `StateVector` class:
from gwpy.timeseries import StateVector

# and download the state information surrounding GW170817:
data = StateVector.fetch_open_data(
    "L1",
    1187008882 - 100,
    1187008882 + 100,
    verbose=True,
)

# Finally, we make a :meth:`~StateVector.plot`, passing `insetlabels=True` to
# display the bit names inside the axes:
plot = data.plot(insetlabels=True)
ax = plot.gca()
ax.set_xscale('seconds', epoch=1187008882)
ax.axvline(1187008882, color='orange', linestyle='--')
ax.set_title('LIGO-Livingston data quality around GW170817')
plot.show()

# This plot shows that for a short time exactly overlapping with GW170817
# there was a data quality issue recorded that would negatively impact a
# search for generic gravitational-wave transients (bursts).
Ejemplo n.º 9
0
Confident detection of gravitational-wave signals is critically dependent
on understanding the quality of the data searched.
Alongside the strain *h(t)* data, the GW Open Science Center (GWOSC) also
releases a *Data Quality* :ref:`state vector <gwpy-statevector>`.
We can use this to check on the quality of the data from the LIGO Livingston
detector around `GW170817 <https://losc.ligo.org/events/GW170817/>`__.
"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we can import the `StateVector` class:
from gwpy.timeseries import StateVector

# and download the state information surrounding GW170817:
data = StateVector.fetch_open_data('L1', 1187008882-100, 1187008882+100,
                                   verbose=True, tag='C00')

# Finally, we make a :meth:`~StateVector.plot`, passing `insetlabels=True` to
# display the bit names inside the axes:
plot = data.plot(insetlabels=True)
ax = plot.gca()
ax.set_xscale('seconds', epoch=1187008882)
ax.axvline(1187008882, color='orange', linestyle='--')
ax.set_title('LIGO-Livingston data quality around GW170817')
plot.show()

# This plot shows that for a short time exactly overlapping with GW170817
# there was a data quality issue recorded that would negatively impact a
# search for generic gravitational-wave transients (bursts).
# For more details on this _glitch_, and on how it was excised, please see
# `Abbott et al. 2017 <https://doi.org/10.1103/PhysRevLett.119.161101>`__.
from gwpy.timeseries import (TimeSeries, StateVector)

print(TimeSeries.fetch_open_data('H1', 1126259446, 1126259478))
# TimeSeries([  2.17704028e-19,  2.08763900e-19,  2.39681183e-19,
# ...,   3.55365541e-20,  6.33533516e-20,
# 7.58121195e-20]
# unit: Unit(dimensionless),
# t0: 1126259446.0 s,
# dt: 0.000244140625 s,
# name: Strain,
# channel: None)
print(StateVector.fetch_open_data('H1', 1126259446, 1126259478))
# StateVector([127,127,127,127,127,127,127,127,127,127,127,127,
# 127,127,127,127,127,127,127,127,127,127,127,127,
# 127,127,127,127,127,127,127,127]
# unit: Unit(dimensionless),
# t0: 1126259446.0 s,
# dt: 1.0 s,
# name: Data quality,
# channel: None,
# bits: Bits(0: data present
# 1: passes cbc CAT1 test
# 2: passes cbc CAT2 test
# 3: passes cbc CAT3 test
# 4: passes burst CAT1 test
# 5: passes burst CAT2 test
# 6: passes burst CAT3 test,
# channel=None,
# epoch=1126259446.0))

# For the `StateVector`, the naming of the bits will be
Ejemplo n.º 11
0
from gwpy.timeseries import StateVector

# define bitmask
bits = [
    'Summary state',
    'State 1 damped',
    'Stage 1 isolated',
    'Stage 2 damped',
    'Stage 2 isolated',
    'Master switch ON',
    'Stage 1 WatchDog OK',
    'Stage 2 WatchDog OK',
]

# get data
data = StateVector.fetch('L1:ISI-ETMX_ODC_CHANNEL_OUT_DQ', 'May 22 2014 14:00', 'May 22 15:00', bits=bits)
data = data.resample(16)

# make a plot
plot = data.plot(add_label='inset')
plot.set_title('LLO ETMX internal seismic isolation state')
plot.add_bitmask('0b11101110')

if __name__ == '__main__':
    try:
        outfile = __file__.replace('.py', '.png')
    except NameError:
        pass
    else:
        plot.save(outfile)
        print("Example output saved as\n%s" % outfile)
Ejemplo n.º 12
0
parser.add_option("--ifo", metavar = "name", help = "Name of the IFO")
parser.add_option("--hoft-frames-cache", metavar = "name", help = "Frame cache file for h(t) data to be analyzed")
#parser.add_option("--raw-frames-cache", metavar = "name", help = "Frame cache for raw data.")
parser.add_option("--calib-state-vector-channel-name", metavar = "name", default = "GDS-CALIB_STATE_VECTOR", help = "Calibration state vector channel name (default = GDS-CALIB_STATE_VECTOR")
#parser.add_option("--analyze-calcs-hoft", action = "store_true", help = "Set this to analyze CALCS h(t) data")
#parser.add_option("--calcs-deltal-channel-name", metavar = "name", default = "CAL-DELTAL_EXTERNAL_DQ", help = "CALCS \delta L channel name (default = CAL-DELTAL_EXTERNAL_DQ)")

options, filenames = parser.parse_args()

start = int(options.gps_start_time)
end = int(options.gps_end_time)
ifo = options.ifo
hoft_frames_cache = options.hoft_frames_cache
calib_state_channel_name = options.calib_state_vector_channel_name

calib_state_vector = StateVector.read(hoft_frames_cache, "%s:%s" % (ifo, calib_state_channel_name), start = start, end = end)

# define list of labels
labels = [
	'h(t) OK',
	'Obs. intent',
	'Obs. ready',
	'Filters OK',
	'No gap',
	'No Stoch. inj.',
	'No CBC inj.',
	'No Burst inj.',
	'No DetChar inj.',
	'ktst smooth',
	'kpum smooth',
	'kuim smooth',
from gwpy.timeseries import (TimeSeries, StateVector)
print(TimeSeries.fetch_open_data('H1', 1126259446, 1126259478))
# TimeSeries([  2.17704028e-19,  2.08763900e-19,  2.39681183e-19,
# ...,   3.55365541e-20,  6.33533516e-20,
# 7.58121195e-20]
# unit: Unit(dimensionless),
# t0: 1126259446.0 s,
# dt: 0.000244140625 s,
# name: Strain,
# channel: None)
print(StateVector.fetch_open_data('H1', 1126259446, 1126259478))
# StateVector([127,127,127,127,127,127,127,127,127,127,127,127,
# 127,127,127,127,127,127,127,127,127,127,127,127,
# 127,127,127,127,127,127,127,127]
# unit: Unit(dimensionless),
# t0: 1126259446.0 s,
# dt: 1.0 s,
# name: Data quality,
# channel: None,
# bits: Bits(0: data present
# 1: passes cbc CAT1 test
# 2: passes cbc CAT2 test
# 3: passes cbc CAT3 test
# 4: passes burst CAT1 test
# 5: passes burst CAT2 test
# 6: passes burst CAT3 test,
# channel=None,
# epoch=1126259446.0))

# For the `StateVector`, the naming of the bits will be
# ``format``-dependent, because they are recorded differently by LOSC
Ejemplo n.º 14
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source

    This method reads all found data into the data containers defined by
    the `gwsumm.globalv` module, then returns nothing.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:

        # -- channels ---------------------------

        try:
            ctable = Table.read(h5file['channels'])
        except KeyError:  # no channels table written
            pass
        else:
            for row in ctable:
                chan = get_channel(row['name'])
                for p in ctable.colnames[1:]:
                    if row[p]:
                        setattr(chan, p, row[p])

        # -- timeseries -------------------------

        for dataset in h5file.get('timeseries', {}).values():
            ts = TimeSeries.read(dataset, format='hdf5')
            if (re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name)
                    and ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() != mode.Mode.day:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # -- statevector -- ---------------------

        for dataset in h5file.get('statevector', {}).values():
            sv = StateVector.read(dataset, format='hdf5')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # -- spectrogram ------------------------

        for tag, add_ in zip(
            ['spectrogram', 'coherence-components'],
            [add_spectrogram, add_coherence_component_spectrogram]):
            for key, dataset in h5file.get(tag, {}).items():
                key = key.rsplit(',', 1)[0]
                spec = Spectrogram.read(dataset, format='hdf5')
                spec.channel = get_channel(spec.channel)
                add_(spec, key=key)

        # -- segments ---------------------------

        for name, dataset in h5file.get('segments', {}).items():
            dqflag = DataQualityFlag.read(h5file,
                                          path=dataset.name,
                                          format='hdf5')
            globalv.SEGMENTS += {name: dqflag}

        # -- triggers ---------------------------

        for dataset in h5file.get('triggers', {}).values():
            load_table(dataset)
Ejemplo n.º 15
0
Alongside the strain *h(t)* data, the GW Open Science Center (GWOSC) also
releases a *Data Quality* :ref:`state vector <gwpy-statevector>`.
We can use this to check on the quality of the data from the LIGO Livingston
detector around `GW170817 <https://losc.ligo.org/events/GW170817/>`__.
"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we can import the `StateVector` class:
from gwpy.timeseries import StateVector

# and download the state information surrounding GW170817:
data = StateVector.fetch_open_data('L1',
                                   1187008882 - 100,
                                   1187008882 + 100,
                                   verbose=True,
                                   tag='C00')

# Finally, we make a :meth:`~StateVector.plot`, passing `insetlabels=True` to
# display the bit names inside the axes:
plot = data.plot(insetlabels=True)
ax = plot.gca()
ax.set_xscale('seconds', epoch=1187008882)
ax.axvline(1187008882, color='orange', linestyle='--')
ax.set_title('LIGO-Livingston data quality around GW170817')
plot.show()

# This plot shows that for a short time exactly overlapping with GW170817
# there was a data quality issue recorded that would negatively impact a
# search for generic gravitational-wave transients (bursts).
Ejemplo n.º 16
0
def get_guardian_segments(node,
                          frametype,
                          start,
                          end,
                          nproc=1,
                          pad=(0, 0),
                          strict=False):
    """Determine state segments for a given guardian node
    """
    ifo, node = node.split(':', 1)
    if node.startswith('GRD-'):
        node = node[4:]
    pstart = start - pad[0]
    pend = end + pad[1]

    # find frame cache
    cache = data.find_frames(ifo, frametype, pstart, pend)

    # pre-format data segments
    span = SegmentList([Segment(pstart, pend)])
    segs = SegmentList()
    csegs = cache_segments(cache)
    if not csegs:
        return csegs

    # read data
    stub = "{}:GRD-{}".format(ifo, node)
    if strict:
        channels = ["{}_OK".format(stub)]
    else:
        state = "{}_STATE_N".format(stub)
        nominal = "{}_NOMINAL_N".format(stub)
        active = "{}_ACTIVE".format(stub)
        channels = [state, nominal, active]
    for seg in csegs & span:
        if strict:
            sv = StateVector.read(
                cache,
                channels[0],
                nproc=nproc,
                start=seg[0],
                end=seg[1],
                bits=[0],
                gap='pad',
                pad=0,
            ).astype('uint32')
            segs += sv.to_dqflags().intersection().active
        else:
            gdata = TimeSeriesDict.read(cache,
                                        channels,
                                        nproc=nproc,
                                        start=seg[0],
                                        end=seg[1],
                                        gap='pad',
                                        pad=0)
            ok = ((gdata[state].value == gdata[nominal].value) &
                  (gdata[active].value == 1)).view(StateTimeSeries)
            ok.t0 = gdata[state].t0
            ok.dt = gdata[state].dt
            segs += ok.to_dqflag().active

    # truncate to integers, and apply padding
    for i, seg in enumerate(segs):
        segs[i] = type(seg)(int(ceil(seg[0])) + pad[0],
                            int(floor(seg[1])) - pad[1])
    segs.coalesce()

    return segs.coalesce()
Ejemplo n.º 17
0
# First, we can import the `StateVector` class:
from gwpy.timeseries import StateVector

# Next, we define which bits we want to use, and can then
# :meth:`~StateVector.get` the data:
bits = [
    'Summary state',
    'State 1 damped',
    'Stage 1 isolated',
    'Stage 2 damped',
    'Stage 2 isolated',
    'Master switch ON',
    'Stage 1 WatchDog OK',
    'Stage 2 WatchDog OK',
]

data = StateVector.get('L1:ISI-ETMX_ODC_CHANNEL_OUT_DQ', 'May 22 2014 14:00', 'May 22 2014 15:00', bits=bits)
data = data.astype('uint32')  # hide

# For this example, we wish to :meth:`~StateVector.resample` the data to a
# much lower rate, to make visualising the state much easier:
data = data.resample(16)

# Finally, we make a :meth:`~StateVector.plot`, passing `insetlabels=True` to
# display the bit names inside the axes:
plot = data.plot(insetlabels=True)
plot.set_title('LLO ETMX internal seismic isolation state')
plot.add_bitmask('0b11101110')
plot.show()
Ejemplo n.º 18
0
def check_vector(cache, channel, start, end, bits, logic_type='all'):
    """Check timeseries of decimals against a bitmask.
    This is inclusive of the start time and exclusive of the end time, i.e.
    [start, ..., end).

    Parameters
    ----------
    cache : :class:`glue.lal.Cache`
        Cache from which to check.
    channel : str
        Channel to look at, e.g. ``H1:DMT-DQ_VECTOR``.
    start, end : int or float
        GPS start and end times desired.
    bits: :class:`gwpy.TimeSeries.Bits`
        Definitions of the bits in the channel.
    logic_type : str, optional
        Type of logic to apply for vetoing.
        If ``all``, then all samples in the window must pass the bitmask.
        If ``any``, then one or more samples in the window must pass.

    Returns
    -------
    dict
        Maps each bit in channel to its state.

    Example
    -------
    >>> check_vector(cache, 'H1:GDS-CALIB_STATE_VECTOR', 1216496260,
                     1216496262, ligo_state_vector_bits)
    {'H1:HOFT_OK': True,
     'H1:OBSERVATION_INTENT': True,
     'H1:NO_STOCH_HW_INJ': True,
     'H1:NO_CBC_HW_INJ': True,
     'H1:NO_BURST_HW_INJ': True,
     'H1:NO_DETCHAR_HW_INJ': True}

    """
    if logic_type not in ('any', 'all'):
        raise ValueError("logic_type must be either 'all' or 'any'.")
    else:
        logic_map = {'any': np.any, 'all': np.all}
    bitname = '{}:{}'
    if cache:
        try:
            statevector = StateVector.read(cache, channel,
                                           start=start, end=end, bits=bits)
        except (IndexError, TypeError):
            # FIXME: TypeError above is due to
            # https://github.com/gwpy/gwpy/issues/1211
            #
            # FIXME: Change from log.exception to log.warning until this fixed,
            # because it's saturating Sentry.
            log.warning('Failed to read from low-latency frame files')
        else:
            # FIXME: In the playground environment, the Virgo state vector
            # channel is stored as a float. Is this also the case in the
            # production environment?
            statevector = statevector.astype(np.uint32)
            if len(statevector) > 0:  # statevector must not be empty
                return {bitname.format(channel.split(':')[0], key):
                        bool(logic_map[logic_type](
                            value.value if len(value.value) > 0 else None))
                        for key, value in statevector.get_bit_series().items()}
    # FIXME: figure out how to get access to low-latency frames outside
    # of the cluster. Until we figure that out, actual I/O errors have
    # to be non-fatal.
    return {bitname.format(channel.split(':')[0], key):
            None for key in bits if key is not None}
Ejemplo n.º 19
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:
        # read all time-series data
        try:
            group = h5file['timeseries']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            ts = TimeSeries.read(dataset, format='hdf5')
            if (re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name)
                    and ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() != mode.Mode.day:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # read all state-vector data
        try:
            group = h5file['statevector']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            sv = StateVector.read(dataset, format='hdf5')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # read all spectrogram data
        for tag in ['spectrogram', 'coherence-components']:
            if tag == 'coherence-components':
                add_ = add_coherence_component_spectrogram
            else:
                add_ = add_spectrogram
            try:
                group = h5file[tag]
            except KeyError:
                group = dict()
            for key, dataset in group.iteritems():
                key = key.rsplit(',', 1)[0]
                spec = Spectrogram.read(dataset, format='hdf5')
                spec.channel = get_channel(spec.channel)
                add_(spec, key=key)

        # read all segments
        try:
            group = h5file['segments']
        except KeyError:
            group = dict()
        for name in group:
            dqflag = DataQualityFlag.read(group, path=name, format='hdf5')
            globalv.SEGMENTS += {name: dqflag}

        # read all triggers
        try:
            group = h5file['triggers']
        except KeyError:
            group = dict()
        for key in group:
            load_table(group[key])
Ejemplo n.º 20
0
from gwosc.datasets import event_gps
from gwpy.timeseries import StateVector
gps = event_gps("GW200105_162426")
start = int(gps) - 1000
end = int(gps) + 1000
gw200105_state = StateVector.fetch_open_data("L1", start, end)
print(gw200105_state)
# StateVector([127, 127, 127, ..., 127, 127, 127]
# unit: dimensionless,
# t0: 1262275684.0 s,
# dt: 1.0 s,
# name: Data quality,
# channel: None,
# bits: Bits(0: Passes DATA test
# 1: Passes CBC_CAT1 test
# 2: Passes CBC_CAT2 test
# 3: Passes CBC_CAT3 test
# 4: Passes BURST_CAT1 test
# 5: Passes BURST_CAT2 test
# 6: Passes BURST_CAT3 test,
# channel=None,
# epoch=1262274636.0))