예제 #1
0
def fftgram(timeseries, stride, pad=False):
    """
    calculates fourier-gram with automatic
    50% overlapping hann windowing.
    Parameters
    ----------
        timeseries : gwpy TimeSeries
            time series to take fftgram of
        stride : `int`
            number of seconds in single PSD
    Returns
    -------
        fftgram : gwpy spectrogram
            a fourier-gram
    """

    fftlength = stride
    dt = stride
    df = 1. / fftlength
    # number of values in a step
    stride *= timeseries.sample_rate.value
    # number of steps
    nsteps = 2 * int(timeseries.size // stride) - 1
    # only get positive frequencies
    if pad:
        nfreqs = int(fftlength * timeseries.sample_rate.value)
        df = df / 2
        f0 = df
    else:
        nfreqs = int(fftlength * timeseries.sample_rate.value) / 2
    dtype = np.complex
    # initialize the spectrogram
    out = Spectrogram(np.zeros((nsteps, nfreqs), dtype=dtype),
                      name=timeseries.name, epoch=timeseries.epoch,
                      f0=df, df=df, dt=dt, copy=True,
                      unit=1 / u.Hz**0.5, dtype=dtype)
    # stride through TimeSeries, recording FFTs as columns of Spectrogram
    for step in range(nsteps):
        # indexes for this step
        idx = (stride / 2) * step
        idx_end = idx + stride
        stepseries = timeseries[idx:idx_end]
        # zeropad, window, fft, shift zero to center, normalize
        # window
        stepseries = np.multiply(stepseries,
                                 np.hanning(stepseries.value.size))
        # take fft
        if pad:
            stepseries = TimeSeries(np.hstack((stepseries, np.zeros(stepseries.size))),
                                    name=stepseries.name, x0=stepseries.x0,
                                    dx=timeseries.dx)
            tempfft = stepseries.fft(stepseries.size)
        else:
            tempfft = stepseries.fft(stepseries.size)
        tempfft.override_unit(out.unit)

        out[step] = tempfft[1:]

    return out
def dump_calibrated_data(fname):
    data = numpy.load(fname)

    # Figure out the times covered by the file from the filename
    # I should start using HDF5 so I can store metadata
    temp = fname.split('.')[0]
    temp = temp.split('-')
    ifo = temp[0]
    st, dur = int(temp[-2]), int(temp[-1])
    et = st + dur

    maxidx = len(data)
    width = 45

    weights = 1. - ((numpy.arange(-width, width) / float(width))**2)

    # The VCO frequencies are integers so we could dither them
    # to avoid quantization error if we wanted to be fancy
    # but it seems to make no differece
    if False:
        from numpy.random import triangular
        data[:, 1] += triangular(-1., 0., 1., size=len(data))

    # Just fit the whole thing at once, to get a single coefficient
    a, b = numpy.polyfit(data[:, 0], data[:, 1], 1)
    print "%.1f %u" % (a, b)

    # Slide through the data fitting PSL to IMC for data around each sample
    coeffs = []
    for idx in xrange(maxidx):
        idx1 = max(0, idx - width)
        idx2 = min(idx + width, maxidx)
        coeffs.append(numpy.polyfit(data[idx1:idx2, 0], data[idx1:idx2, 1], 1,
                                    w=weights[idx1 - idx + width:idx2 - idx + width]))
    coeffs = numpy.array(coeffs)
    times = numpy.arange(len(coeffs)) + 0.5
    connection = datafind.GWDataFindHTTPConnection()
    cache = connection.find_frame_urls(
        ifo[0], '%s_R' % ifo, st, et, urltype='file')

    imc = TimeSeries.read(cache, "%s:IMC-F_OUT_DQ" % ifo, st, et)
    imc = imc[::16384 / 256]
    print imc
    samp_times = numpy.arange(len(imc)) / 256.

    coeffs0 = numpy.interp(samp_times, times, coeffs[:, 0])
    coeffs1 = numpy.interp(samp_times, times, coeffs[:, 1]) - 7.6e7

    vco_interp = coeffs0 * imc.data + coeffs1

    chan = "%s:IMC-VCO_PREDICTION" % (ifo,)
    vco_data = TimeSeries(vco_interp, epoch=st,
                          sample_rate=imc.sample_rate.value,
                          name=chan, channel=chan)
    vco_data.write("%s-vcoprediction-%u-%u.hdf" % (ifo, st, dur), format='hdf')
예제 #3
0
def noise_from_psd(length, sample_rate, psd, seed=0, name=None, unit=u.m):
    """ Create noise with a given psd.

    Return noise with a given psd. Note that if unique noise is desired
    a unique seed should be provided.
    Parameters
    ----------
    length : int
        The length of noise to generate in seconds.
    sample_rate : float
       the sample rate of the data
    stride : int
        Length of noise segments in seconds
    psd : FrequencySeries
        The noise weighting to color the noise.
    seed : {0, int}
        The seed to generate the noise.

    Returns
    --------
    noise : TimeSeries
        A TimeSeries containing gaussian noise colored by the given psd.
    """
    if name is None:
        name='noise'
    length *=sample_rate

    noise_ts = TimeSeries(np.zeros(length),
            sample_rate=sample_rate, name=name, unit=unit)

    randomness = lal.gsl_rng("ranlux", seed)

    N = int (sample_rate / psd.df.value)
    n = N/2+1
    stride = N/2

    if n > len(psd):
        raise ValueError("PSD not compatible with requested delta_t")
    psd = (psd[0:n]).to_lal()
    psd.data.data[n-1] = 0
    segment = TimeSeries(np.zeros(N), sample_rate=sample_rate).to_lal()
    length_generated = 0

    SimNoise(segment, 0, psd, randomness)
    while (length_generated < length):
        if (length_generated + stride) < length:
            noise_ts.data[length_generated:length_generated+stride] = segment.data.data[0:stride]
        else:
            noise_ts.data[length_generated:length] = segment.data.data[0:length-length_generated]

        length_generated += stride
        SimNoise(segment,stride, psd, randomness)
    return noise_ts
예제 #4
0
    def refresh(self):
        lines = [l for ax in self._fig.axes for l in ax.lines]
        axes = cycle(self._fig.get_axes(self.AXES_CLASS.name))
        params = self.params['draw']
        for i, channel in enumerate(self.channels):
            try:
                line = lines[i]
            except IndexError:
                # haven't plotted this channel before
                ax = next(axes)
                label = (hasattr(channel, 'label') and channel.label or
                         channel.texname)
                pparams = {}
                for key in params:
                    try:
                        if params[key][i]:
                            pparams[key] = params[key][i]
                    except IndexError:
                        pass

                ts = self.data[channel][0].copy()
                for t2 in self.data[channel][1:]:
                    ts.append(t2, pad=self.buffer.pad, gap=self.buffer.gap)
                l = ax.plot(ts, label=label, **pparams)
                ax.legend()
            else:
                # TODO: remove .copy() as soon as copy=True is fixed in gwpy
                ts = TimeSeries(line.get_ydata(), times=line.get_xdata(),
                                copy=True).copy()
                for t2 in self.buffer.get((ts.span[1], self.epoch), channel,
                                          fetch=False):
                    ts.append(t2, pad=self.buffer.pad, gap=self.buffer.gap)
                line.set_xdata(ts.times.value)
                line.set_ydata(ts.value)

        # format figure
        if 'ylim' not in self.params['refresh']:
            for ax in self._fig.get_axes(self.AXES_CLASS.name):
                ax.relim()
                ax.autoscale_view(scalex=False)
        self.logger.info('Figure data updated')
        for ax in self._fig.get_axes(self.AXES_CLASS.name):
            if float(self.epoch) > (self.gpsstart + self.duration):
                ax.set_xlim(float(self.epoch - self.duration),
                            float(self.epoch))
            else:
                ax.set_xlim(float(self.gpsstart),
                            float(self.gpsstart) + self.duration)
            ax.set_epoch(self.epoch)
        self.set_params('refresh')
        self._fig.refresh()
        self.logger.debug('Figure refreshed')
예제 #5
0
 def _next(self):
     uchannels = self._unique_channel_names(self.channels)
     new = TimeSeriesDict()
     span = 0
     epoch = 0
     self.logger.debug('Waiting for next NDS2 packet...')
     while span < self.interval:
         try:
             buffers = next(self.iterator)
         except RuntimeError as e:
             self.logger.error('RuntimeError caught: %s' % str(e))
             self.restart()
             break
         for buff, c in zip(buffers, uchannels):
             ts = TimeSeries.from_nds2_buffer(buff)
             try:
                 new.append({c: ts}, gap=self.gap, pad=self.pad)
             except ValueError as e:
                 if 'discontiguous' in str(e):
                     e.args = ('NDS connection dropped data between %d and '
                               '%d' % (epoch, ts.span[0]),)
                 raise
             span = abs(new[c].span)
             epoch = new[c].span[-1]
             self.logger.debug('%ds data for %s received'
                               % (abs(ts.span), str(c)))
     out = type(new)()
     for chan in self.channels:
         out[chan] = new[self._channel_basename(chan)].copy()
     return out
def get_data(channel,start_time,length):
    print 'Starting data transfer for channel: ' + str(channel)
    connection = datafind.GWDataFindHTTPConnection()
    cache = connection.find_frame_urls(ifo[0],ifo+'_R',start_time,start_time+length,urltype='file')
    data = TimeSeries.read(cache,channel)
    print "Got data for channel: " + str(channel)
    return data
예제 #7
0
 def _next(self):
     uchannels = self._unique_channel_names(self.channels)
     new = TimeSeriesDict()
     span = 0
     epoch = 0
     att = 0
     self.logger.debug('Waiting for next NDS2 packet...')
     while span < self.interval:
         try:
             buffers = next(self.iterator)
         except RuntimeError as e:
             self.logger.error('RuntimeError caught: %s' % str(e))
             if att < self.attempts:
                 att += 1
                 wait_time = att / 3 + 1
                 self.logger.warning(
                     'Attempting to reconnect to the nds server... %d/%d'
                     % (att, self.attempts))
                 self.logger.warning('Next attempt in minimum %d seconds' %
                                     wait_time)
                 self.restart()
                 sleep(wait_time - tconvert('now') % wait_time)
                 continue
             else:
                 self.logger.critical(
                     'Maximum number of attempts reached, exiting')
                 break
         att = 0
         for buff, c in zip(buffers, uchannels):
             ts = TimeSeries.from_nds2_buffer(buff)
             try:
                 new.append({c: ts}, gap=self.gap, pad=self.pad)
             except ValueError as e:
                 if 'discontiguous' in str(e):
                     e.message = (
                         'NDS connection dropped data between %d and '
                         '%d, restarting building the buffer from %d ') \
                         % (epoch, ts.span[0], ts.span[0])
                     self.logger.warning(str(e))
                     new = TimeSeriesDict()
                     new[c] = ts.copy()
                 elif ('starts before' in str(e)) or \
                         ('overlapping' in str(e)):
                     e.message = (
                         'Overlap between old data and new data in the '
                         'nds buffer, only the new data will be kept.')
                     self.logger.warning(str(e))
                     new = TimeSeriesDict()
                     new[c] = ts.copy()
                 else:
                     raise
             span = abs(new[c].span)
             epoch = new[c].span[-1]
             self.logger.debug('%ds data for %s received'
                               % (abs(ts.span), str(c)))
     out = type(new)()
     for chan in self.channels:
         out[chan] = new[self._channel_basename(chan)].copy()
     return out
예제 #8
0
파일: test_astro.py 프로젝트: stefco/gwpy
def psd():
    h5path = os.path.join(os.path.dirname(__file__), 'data',
                          'HLV-HW100916-968654552-1.hdf')
    try:
        data = TimeSeries.read(h5path, 'L1:LDAS-STRAIN', format='hdf5')
    except ImportError as e:
        pytest.skip(str(e))
    return data.psd(.4, overlap=.2, window=('kaiser', 24))
예제 #9
0
 def test_add_timeseries(self):
     a = TimeSeries([1, 2, 3, 4, 5], name='test name', epoch=0,
                    sample_rate=1)
     # test simple add using 'name'
     data.add_timeseries(a)
     self.assertIn('test name', globalv.DATA)
     self.assertEqual(globalv.DATA['test name'], [a])
     # test add using key kwarg
     data.add_timeseries(a, key='test key')
     self.assertIn('test key', globalv.DATA)
     self.assertEqual(globalv.DATA['test key'], [a])
     # test add to existing key with coalesce
     b = TimeSeries([6, 7, 8, 9, 10], name='test name 2', epoch=5,
                    sample_rate=1)
     data.add_timeseries(b, key='test key', coalesce=True)
     self.assertEqual(globalv.DATA['test key'],
                      [a.append(b, inplace=False)])
def calibrate_imc_pslvco(ifo, start_time, dur, cache=None):
    st, et = start_time, start_time + dur
    if cache:
        pslvco = TimeSeries.read(cache, chan1_pat % ifo, start=st, end=et)
        imc = TimeSeries.read(cache, chan2_pat % ifo, start=st, end=et)
    else:
        imc = TimeSeries.fetch(chan2_pat % ifo, st, et)
        pslvco = TimeSeries.fetch(chan1_pat % ifo, st, et)

    arr_psl = pslvco[8::16]
    arr_imc = imc

    tmp1 = (arr_imc[8192::16384])[:-1]
    tmp2 = arr_psl[1:]
    a, b = numpy.polyfit(tmp1, tmp2, 1)

    return a, b
예제 #11
0
def test_save_loudest_tile_features():
    # prepare input data
    channel = GW.channels[0]
    noise = TimeSeries(
        numpy.random.normal(loc=1, scale=.5, size=16384 * 68),
        sample_rate=16384, epoch=-34).zpk([], [0], 1)
    glitch = TimeSeries(
        signal.gausspulse(numpy.arange(-1, 1, 1./16384), bw=100),
        sample_rate=16384, epoch=-1) * 1e-4
    in_ = noise.inject(glitch)
    _, _, _, qgram, _, _, _ = core.scan(
        gps=0, channel=channel, xoft=in_, resample=4096, fftlength=8)

    # test loudest tiles
    channel.save_loudest_tile_features(qgram, correlate=glitch)
    assert channel.Q == numpy.around(qgram.plane.q, 1)
    assert channel.energy == numpy.around(qgram.peak['energy'], 1)
    assert channel.snr == numpy.around(qgram.peak['snr'], 1)
    assert channel.t == numpy.around(qgram.peak['time'], 3)
    assert channel.f == numpy.around(qgram.peak['frequency'], 1)
    assert channel.corr == numpy.around(glitch.max().value, 1)
    assert channel.delay == 0.0
    assert channel.stdev == glitch.std().value
예제 #12
0
def _read_data(channel, st, et, frames=False):
    """
    get data, either from frames or from nds2
    """

    ifo = channel.split(':')[0]
    if frames:
        # read from frames
        connection = datafind.GWDataFindHTTPConnection()
	print ifo[0]
	if channel.split(':')[1] == 'GDS-CALIB_STRAIN':
	    cache = connection.find_frame_urls(ifo[0],ifo+'_HOFT_C00', st, et, urltype='file')
	else:
	    cache = connection.find_frame_urls(ifo[0], ifo + '_C', st, et,urltype='file')
        try:
            data = TimeSeries.read(cache, channel, st, et)
        except IndexError:
            cache = connection.find_frame_urls(ifo[0], ifo+'_R', st, et, urltype='file')
            data = TimeSeries.read(cache, channel, st, et)
    else:
        data = TimeSeries.fetch(channel, st, et)

    return data
def generate_fast_vco(ifo, segment, frames=False, fit=True):
    """
    Parameters:
    -----------
        ifo : start
            interferometer, e.g. 'L1'
        segment : array like
            time segment. first entry start second entry end
        frames : bool
            read from frames or nds2
        fit : bool
            fit from imc-f (default)
            or spline interpolation

    Returns:
    --------
        vco_data : saves file 'L1:IMC-VCO_PREDICTION-st-dur.hdf'
    """
    st = segment[0]
    et = segment[1]
    chan1_pat = '%s:SYS-TIMING_C_FO_A_PORT_11_SLAVE_CFC_FREQUENCY_5'
    chan2_pat = '%s:IMC-F_OUT_DQ'
    if frames:
        connection = datafind.GWDataFindHTTPConnection()
        cache = connection.find_frame_urls(
            ifo[0], '%s_R' % ifo, st, et + 1, urltype='file')
        if fit:
            imc = TimeSeries.read(cache, chan2_pat % ifo, st, et)
        else:
            imc = TimeSeries.read(cache, chan2_pat % ifo, st, st + 1)
        pslvco = TimeSeries.read(cache, chan1_pat % ifo, st, et + 1)
    else:
        if fit:
            imc = TimeSeries.fetch(chan2_pat % ifo, st, et)
        else:
	    print 'HI BEFORE LOADING IMC'
            imc = TimeSeries.fetch(chan2_pat % ifo, st, st + 1)
	print 'HI BEFORE LOADING PSL'
        pslvco = TimeSeries.fetch(chan1_pat % ifo, st, et + 1)
	print 'HI AFTER LOADING'

    pslvco = pslvco[16 + 8::16]

    if fit:
        imc_srate = int(imc.sample_rate.value)
        imc2 = imc[imc_srate / 2::imc_srate]
        data = np.array((imc2.value, pslvco.value)).T
        vco_interp = fit_with_imc(data, imc)
    else:
        vco_interp = interp_spline(pslvco)

    chan = "%s:IMC-VCO_PREDICTION" % (ifo,)
    vco_data = TimeSeries(vco_interp, epoch=st,
                          sample_rate=256,
                          name=chan, channel=chan)

    return vco_data
def _get_vco_data(vco_file, times):
    print('Finding mean values of %s' % channel)
    s = times.size
    amp = numpy.zeros(s)
    vco = TimeSeries.from_hdf5(vco_file)
    for i, t in enumerate(times):
        t = t.seconds + t.nanoseconds / 1e9
        idx1 = int(
            (t - vco.times.value[0] - 0.01) * vco.sample_rate.value)
        idx2 = int(
            (t - vco.times.value[0] + 0.01) * vco.sample_rate.value)
        temp = vco[idx1:idx2]
        amp[i] = (temp.mean().value - 3e6) / 1.e3
        print('    Processed trigger %d/%d' % (i + 1, s), end='\r')
    print('    Processed trigger %d/%d' % (s, s))
    return amp
예제 #15
0
 def test_fetch(self):
     try:
         nds_buffer = mockutils.mock_nds2_buffer(
             'X1:TEST', self.data, 1000000000, self.data.shape[0], 'm')
     except ImportError as e:
         self.skipTest(str(e))
     nds_connection = mockutils.mock_nds2_connection([nds_buffer])
     with mock.patch('nds2.connection') as mock_connection, \
          mock.patch('nds2.buffer', nds_buffer):
         mock_connection.return_value = nds_connection
         # use verbose=True to hit more lines
         ts = TimeSeries.fetch('X1:TEST', 1000000000, 1000000001,
                               verbose=True)
     nptest.assert_array_equal(ts.value, self.data)
     self.assertEqual(ts.sample_rate, self.data.shape[0] * units.Hz)
     self.assertTupleEqual(ts.span, (1000000000, 1000000001))
     self.assertEqual(ts.unit, units.meter)
예제 #16
0
        x1500_ns = read_gif('X1500_TR240posNS', start, end, write=True)
        x1500_ud = read_gif('X1500_TR240posUD', start, end, write=True)
        strain = read_gif('CALC_STRAIN', start, end, write=True)
        print('read')
        x1500_ew.write(_gwf_fmt.format(sensor='x1500_ew'),
                       format='gwf.lalframe')
        x1500_ns.write(_gwf_fmt.format(sensor='x1500_ns'),
                       format='gwf.lalframe')
        x1500_ud.write(_gwf_fmt.format(sensor='x1500_ud'),
                       format='gwf.lalframe')
        strain.write(gwf_fmt.format(sensor='strain'), format='gwf.lalframe')
        print('wrote')
        #exit()
    else:
        x1500_ew = TimeSeries.read(_gwf_fmt.format(sensor='x1500_ew'),
                                   'X1500_TR240posEW',
                                   format='gwf.lalframe')
        x1500_ns = TimeSeries.read(_gwf_fmt.format(sensor='x1500_ns'),
                                   'X1500_TR240posNS',
                                   format='gwf.lalframe')
        x1500_ud = TimeSeries.read(_gwf_fmt.format(sensor='x1500_ud'),
                                   'X1500_TR240posUD',
                                   format='gwf.lalframe')
        strain = TimeSeries.read(gwf_fmt.format(sensor='strain'),
                                 'CALC_STRAIN',
                                 format='gwf.lalframe')

    if True:
        # rotate
        x1500 = SeismoMeter(x1500_ew, x1500_ns, x1500_ud)
        x1500.rotate(-30)
예제 #17
0
파일: get_data.py 프로젝트: tjma12/detchar
from gwpy.timeseries import TimeSeries

ifo = 'H1'
st_lst = [1160194829, 1160194829 + 16 * 60]
dur = 300

chan_lst = []
names = []
for pd in ['AS_A', 'AS_B', 'REFL_A', 'REFL_B']:
    for quad in ['I', 'Q']:
        for dof in ['PIT', 'YAW']:
            chan_lst.append('ASC-%s_RF45_%s_%s_OUT_DQ' % (pd, quad, dof))
            names.append(pd + '45' + quad + dof[0])

chan_lst.append('LSC-MOD_RF45_AM_CTRL_OUT_DQ')
names.append('RF45AM')

chan_lst.extend([
    'LSC-DARM_IN1_DQ', 'LSC-POP_A_RF45_I_ERR_DQ', 'LSC-POP_A_RF45_Q_ERR_DQ',
    'LSC-POP_A_RF9_I_ERR_DQ', 'LSC-POP_A_RF9_Q_ERR_DQ'
])
names.extend(['DARM', 'POP45I', 'POP45Q', 'POP9I', 'POP9Q'])

for st in st_lst:
    for chan, name in zip(chan_lst, names):
        data = TimeSeries.fetch('%s:%s' % (ifo, chan), st, st + dur)
        data.write('%s-%s-%u-%u.hdf' % (ifo, name, st, dur))
예제 #18
0
start_time = end_time - duration

roll_off = 0.4  # smoothness in a tukey window, default is 0.4s
# This determines the time window used to fetch open data
psd_duration = 32 * duration
psd_start_time = start_time - psd_duration
psd_end_time = start_time

filter_freq = None  # low pass filter frequency to cut signal content above
# Nyquist frequency. The condition is 2 * filter_freq >= sampling_frequency

ifo_list = bilby.gw.detector.InterferometerList([])
for det in interferometer_names:
    logger.info("Downloading analysis data for ifo {}".format(det))
    ifo = bilby.gw.detector.get_empty_interferometer(det)
    data = TimeSeries.fetch_open_data(det, start_time, end_time)
    ifo.set_strain_data_from_gwpy_timeseries(data)
    # Additional arguments you might need to pass to TimeSeries.fetch_open_data:
    # - sample_rate = 4096, most data are stored by LOSC at this frequency
    # there may be event-related data releases with a 16384Hz rate.
    # - tag = 'CLN' for clean data; C00/C01 for raw data (different releases)
    # note that for O2 events a "tag" is required to download the data.
    # - channel =  {'H1': 'H1:DCS-CALIB_STRAIN_C02',
    #               'L1': 'L1:DCS-CALIB_STRAIN_C02',
    #               'V1': 'V1:FAKE_h_16384Hz_4R'}}
    # for some events can specify channels: source data stream for LOSC data.
    logger.info("Downloading psd data for ifo {}".format(det))
    psd_data = TimeSeries.fetch_open_data(det, psd_start_time, psd_end_time)
    psd_alpha = 2 * roll_off / duration  # shape parameter of tukey window
    psd = psd_data.psd(fftlength=duration,
                       overlap=0,
예제 #19
0
import numpy
from numpy import testing as nptest

from gwpy.testing.compat import mock
from gwpy.timeseries import (TimeSeries, TimeSeriesDict)
from gwpy.segments import (Segment, DataQualityFlag)

from .. import datafind

__author__ = 'Alex Urban <*****@*****.**>'

# global test objects

HOFT = TimeSeries(numpy.random.normal(loc=1, scale=.5, size=16384 * 66),
                  sample_rate=16384,
                  epoch=0,
                  name='X1:TEST-STRAIN')

FLAG = DataQualityFlag(known=[(-33, 33)],
                       active=[(-33, 33)],
                       name='X1:TEST-FLAG:1')

# -- make sure data can be read -----------------------------------------------


@mock.patch('gwpy.segments.DataQualityFlag.query', return_value=FLAG)
def test_check_flag(segserver):
    # attempt to query segments database for an analysis flag
    flag = 'X1:TEST-FLAG:1'
    assert datafind.check_flag(flag, gpstime=0, duration=64, pad=1) is True
    assert datafind.check_flag(flag, gpstime=800, duration=64, pad=1) is False
예제 #20
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:
        # read all time-series data
        try:
            group = h5file['timeseries']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            ts = TimeSeries.read(dataset, format='hdf')
            if (re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name)
                    and ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() == mode.SUMMARY_MODE_DAY:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # read all state-vector data
        try:
            group = h5file['statevector']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            sv = StateVector.read(dataset, format='hdf')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # read all spectrogram data
        try:
            group = h5file['spectrogram']
        except KeyError:
            group = dict()
        for key, dataset in group.iteritems():
            key = key.rsplit(',', 1)[0]
            spec = Spectrogram.read(dataset, format='hdf')
            spec.channel = get_channel(spec.channel)
            add_spectrogram(spec, key=key)

        try:
            group = h5file['segments']
        except KeyError:
            group = dict()
        for name, dataset in group.iteritems():
            dqflag = DataQualityFlag.read(dataset, format='hdf')
            globalv.SEGMENTS += {name: dqflag}
예제 #21
0
# Demodulation is useful when trying to examine steady sinusoidal
# signals we know to be contained within data. For instance,
# we can download some data from LOSC to look at trends of the
# amplitude and phase of LIGO Livingston's calibration line at 331.3 Hz:

from gwpy.timeseries import TimeSeries

data = TimeSeries.fetch_open_data('L1', 1131350417, 1131357617)

# We can demodulate the `TimeSeries` at 331.3 Hz with a stride of one
# minute:

amp, phase = data.demodulate(331.3, stride=60)

# We can then plot these trends to visualize fluctuations in the
# amplitude of the calibration line:

from gwpy.plot import Plot

plot = Plot(amp)
ax = plot.gca()
ax.set_ylabel('Strain Amplitude at 331.3 Hz')
plot.show()
예제 #22
0
def load_gw(t0, detector):
    strain = TimeSeries.fetch_open_data(detector, t0-14, t0+14, cache=False)
    return strain
예제 #23
0
from gwpy.timeseries import TimeSeries
data = TimeSeries.fetch_open_data('L1', start, end)
예제 #24
0
from gwpy.timeseries import TimeSeries
h1 = TimeSeries.fetch_open_data('H1', 1187006834, 1187010930)
l1 = TimeSeries.fetch_open_data('L1', 1187006834, 1187010930)
예제 #25
0
파일: wscan.py 프로젝트: hagabbar/PyOmega
def main():
    # Parse commandline arguments

    opts = parse_commandline()

    ###########################################################################
    #                                   Parse Ini File                        #
    ###########################################################################

    # ---- Create configuration-file-parser object and read parameters file.
    cp = ConfigParser.ConfigParser()
    cp.read(opts.inifile)

    # ---- Read needed variables from [parameters] and [channels] sections.
    alwaysPlotFlag = cp.getint('parameters', 'alwaysPlotFlag')
    sampleFrequency = cp.getint('parameters', 'sampleFrequency')
    blockTime = cp.getint('parameters', 'blockTime')
    searchFrequencyRange = json.loads(
        cp.get('parameters', 'searchFrequencyRange'))
    searchQRange = json.loads(cp.get('parameters', 'searchQRange'))
    searchMaximumEnergyLoss = cp.getfloat('parameters',
                                          'searchMaximumEnergyLoss')
    searchWindowDuration = cp.getfloat('parameters', 'searchWindowDuration')
    whiteNoiseFalseRate = cp.getfloat('parameters', 'whiteNoiseFalseRate')
    plotTimeRanges = json.loads(cp.get('parameters', 'plotTimeRanges'))
    plotFrequencyRange = json.loads(cp.get('parameters', 'plotFrequencyRange'))
    plotNormalizedERange = json.loads(
        cp.get('parameters', 'plotNormalizedERange'))
    frameCacheFile = cp.get('channels', 'frameCacheFile')
    frameTypes = cp.get('channels', 'frameType').split(',')
    channelNames = cp.get('channels', 'channelName').split(',')
    detectorName = channelNames[0].split(':')[0]
    det = detectorName.split('1')[0]

    ###########################################################################
    #                           create output directory                       #
    ###########################################################################

    # if outputDirectory not specified, make one based on center time
    if opts.outDir is None:
        outDir = './scans'
    else:
        outDir = opts.outDir + '/'
    outDir += '/'

    # report status
    if not os.path.isdir(outDir):
        if opts.verbose:
            print('creating event directory')
        os.makedirs(outDir)
    if opts.verbose:
        print('outputDirectory:  {0}'.format(outDir))

    ########################################################################
    #     Determine if this is a normal omega scan or a Gravityspy         #
    #    omega scan with unique ID. If Gravity spy then additional         #
    #    files and what not must be generated                              #
    ########################################################################

    IDstring = "{0:.2f}".format(opts.eventTime)

    ###########################################################################
    #               Process Channel Data                                      #
    ###########################################################################

    # find closest sample time to event time
    centerTime = np.floor(opts.eventTime) + np.round(
        (opts.eventTime - np.floor(opts.eventTime)) *
        sampleFrequency) / sampleFrequency

    # determine segment start and stop times
    startTime = round(centerTime - blockTime / 2)
    stopTime = startTime + blockTime

    # This is for ordering the output page by SNR
    loudestEnergyAll = []
    channelNameAll = []
    peakFreqAll = []
    mostSignQAll = []

    for channelName in channelNames:
        if 'STRAIN' in channelName:
            frameType = frameTypes[0]
        else:
            frameType = frameTypes[1]

        # Read in the data
        if opts.NSDF:
            data = TimeSeries.fetch(channelName, startTime, stopTime)
        else:
            connection = datafind.GWDataFindHTTPConnection()
            cache = connection.find_frame_urls(det,
                                               frameType,
                                               startTime,
                                               stopTime,
                                               urltype='file')
            data = TimeSeries.read(cache,
                                   channelName,
                                   format='gwf',
                                   start=startTime,
                                   end=stopTime)

        # resample data
        if data.sample_rate.decompose().value != sampleFrequency:
            data = data.resample(sampleFrequency)

# Cropping the results before interpolation to save on time and memory
# perform the q-transform
        try:
            specsgrams = []
            for iTimeWindow in plotTimeRanges:
                durForPlot = iTimeWindow / 2
                try:
                    outseg = Segment(centerTime - durForPlot,
                                     centerTime + durForPlot)
                    qScan = data.q_transform(qrange=(4, 64),
                                             frange=(10, 2048),
                                             gps=centerTime,
                                             search=0.5,
                                             tres=0.002,
                                             fres=0.5,
                                             outseg=outseg,
                                             whiten=True)
                    qValue = qScan.q
                    qScan = qScan.crop(centerTime - iTimeWindow / 2,
                                       centerTime + iTimeWindow / 2)
                except:
                    outseg = Segment(centerTime - 2 * durForPlot,
                                     centerTime + 2 * durForPlot)
                    qScan = data.q_transform(qrange=(4, 64),
                                             frange=(10, 2048),
                                             gps=centerTime,
                                             search=0.5,
                                             tres=0.002,
                                             fres=0.5,
                                             outseg=outseg,
                                             whiten=True)
                    qValue = qScan.q
                    qScan = qScan.crop(centerTime - iTimeWindow / 2,
                                       centerTime + iTimeWindow / 2)
                specsgrams.append(qScan)

            loudestEnergyAll.append(qScan.max().value)
            peakFreqAll.append(qScan.yindex[np.where(
                qScan.value == qScan.max().value)[1]].value[0])
            mostSignQAll.append(qValue)
            channelNameAll.append(channelName)

        except:
            print('bad channel {0}: skipping qScan'.format(channelName))
            continue

        if opts.make_webpage:
            # Set some plotting params
            myfontsize = 15
            mylabelfontsize = 20
            myColor = 'k'
            if detectorName == 'H1':
                title = "Hanford"
            elif detectorName == 'L1':
                title = "Livingston"
            else:
                title = "VIRGO"

            if 1161907217 < startTime < 1164499217:
                title = title + ' - ER10'
            elif startTime > 1164499217:
                title = title + ' - O2a'
            elif 1126400000 < startTime < 1137250000:
                title = title + ' - O1'
            else:
                raise ValueError("Time outside science or engineering run\
			   or more likely code not updated to reflect\
			   new science run")

            # Create one image containing all spectogram grams
            superFig = Plot(figsize=(27, 6))
            superFig.add_subplot(141, projection='timeseries')
            superFig.add_subplot(142, projection='timeseries')
            superFig.add_subplot(143, projection='timeseries')
            superFig.add_subplot(144, projection='timeseries')
            iN = 0

            for iAx, spec in zip(superFig.axes, specsgrams):
                iAx.plot(spec)

                iAx.set_yscale('log', basey=2)
                iAx.set_xscale('linear')

                xticks = np.linspace(spec.xindex.min().value,
                                     spec.xindex.max().value, 5)
                xticklabels = []
                dur = float(plotTimeRanges[iN])
                [
                    xticklabels.append(str(i))
                    for i in np.linspace(-dur / 2, dur / 2, 5)
                ]
                iAx.set_xticks(xticks)
                iAx.set_xticklabels(xticklabels)

                iAx.set_xlabel('Time (s)',
                               labelpad=0.1,
                               fontsize=mylabelfontsize,
                               color=myColor)
                iAx.set_ylim(10, 2048)
                iAx.yaxis.set_major_formatter(ScalarFormatter())
                iAx.ticklabel_format(axis='y', style='plain')
                iN = iN + 1

                superFig.add_colorbar(ax=iAx,
                                      cmap='viridis',
                                      label='Normalized energy',
                                      clim=plotNormalizedERange,
                                      pad="3%",
                                      width="5%")

            superFig.suptitle(title,
                              fontsize=mylabelfontsize,
                              color=myColor,
                              x=0.51)
            superFig.save(outDir + channelName.replace(':', '-') + '_' +
                          IDstring + '_spectrogram_' + '.png')

    if opts.make_webpage:

        channelNameAll = [i.replace(':', '-') for i in channelNameAll]
        loudestEnergyAll = [str(i) for i in loudestEnergyAll]
        peakFreqAll = [str(i) for i in peakFreqAll]
        mostSignQAll = [str(i) for i in mostSignQAll]

        # Zip SNR with channelName
        loudestEnergyAll = dict(zip(channelNameAll, loudestEnergyAll))
        peakFreqAll = dict(zip(channelNameAll, peakFreqAll))
        mostSignQAll = dict(zip(channelNameAll, mostSignQAll))

        plots = glob.glob(outDir + '*.png'.format(channelName))
        plots = [i.split('/')[-1] for i in plots]
        channelPlots = dict(zip(channelNameAll, plots))

        f1 = open(outDir + 'index.html', 'w')
        env = Environment(loader=FileSystemLoader('../'))
        template = env.get_template('webpage/omegatemplate.html')
        print >> f1, template.render(channelNames=channelNameAll,
                                     SNR=loudestEnergyAll,
                                     Q=mostSignQAll,
                                     FREQ=peakFreqAll,
                                     ID=IDstring,
                                     plots=channelPlots)
        f1.close()

        for channelName in channelNameAll:
            f2 = open(outDir + '%s.html' % channelName, 'w')
            template = env.get_template('webpage/channeltemplate.html'.format(
                opts.pathToHTML))
            # List plots for given channel
            print >> f2, template.render(channelNames=channelNameAll,
                                         thisChannel=channelName,
                                         plots=channelPlots)
            f2.close()
예제 #26
0
would happen if a binary black hole merger signal occured at or near
the time of a glitch. In LIGO data analysis, this procedure is referred
to as an _injection_.

In the example below, we will create a stream of random, white Gaussian
noise, then inject a simulation of GW150914 into it at a known time.
"""

__author__ = "Alex Urban <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we prepare one second of Gaussian noise:

from numpy import random
from gwpy.timeseries import TimeSeries
noise = TimeSeries(random.normal(scale=.1, size=16384), sample_rate=16384)

# Then we can download a simulation of the GW150914 signal from LOSC:

from astropy.utils.data import get_readable_fileobj
source = 'https://losc.ligo.org/s/events/GW150914/P150914/'
url = '%s/fig2-unfiltered-waveform-H.txt' % source
with get_readable_fileobj(url) as f:
    signal = TimeSeries.read(f, format='txt')
signal.t0 = .5  # make sure this intersects with noise time samples

# Note, since this simulation cuts off before a certain time, it is
# important to taper its ends to zero to avoid ringing artifacts.
# We can accomplish this using the
# :meth:`~gwpy.timeseries.TimeSeries.taper` method.
예제 #27
0
It is used to measure the 'Gaussianity' of those data, where a value of 1
indicates Gaussian behaviour, less than 1 indicates coherent variations,
and greater than 1 indicates incoherent variation.
It is a useful measure of the quality of the strain data being generated
and recorded at a LIGO site.
"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = 'gwpy.frequencyseries'

# To demonstate this, we can load some data from the LIGO Livingston
# intereferometer around the time of the GW151226 gravitational wave detection:

from gwpy.timeseries import TimeSeries
gwdata = TimeSeries.fetch_open_data('L1',
                                    'Dec 26 2015 03:37',
                                    'Dec 26 2015 03:47',
                                    verbose=True)

# Next, we can calculate a Rayleigh statistic `FrequencySeries` using the
# :meth:`~gwpy.timeseries.TimeSeries.rayleigh_spectrum` method of the
# `~gwpy.timeseries.TimeSeries` with a 2-second FFT and 1-second overlap (50%):

rayleigh = gwdata.rayleigh_spectrum(2, 1)

# For easy comparison, we can calculate the spectral sensitivity ASD of the
# strain data and plot both on the same figure:

from gwpy.plot import Plot
plot = Plot(gwdata.asd(2, 1),
            rayleigh,
            geometry=(2, 1),
예제 #28
0
def make_q_scans(event_time, **kwargs):
    """Classify triggers in this table

    Parameters:
    ----------

    Returns
    -------
    """
    # Parse Keyword Arguments
    config = kwargs.pop('config', GravitySpyConfigFile())
    timeseries = kwargs.pop('timeseries', None)
    source = kwargs.pop('source', None)
    channel_name = kwargs.pop('channel_name', None)
    frametype = kwargs.pop('frametype', None)
    verbose = kwargs.pop('verbose', False)

    if verbose:
        logger = log.Logger('Gravity Spy: Making Q Scans')

    if (timeseries is None) and (channel_name is None):
        raise ValueError("If not directly passing a timeseries, then "
                         "the user must pass channel_name")

    ###########################################################################
    #                                   Parse Ini File                        #
    ###########################################################################
    sample_frequency = config.sample_frequency
    block_time = config.block_time
    search_frequency_range = config.search_frequency_range
    search_q_range = config.search_q_range
    plot_time_ranges = config.plot_time_ranges
    plot_normalized_energy_range = config.plot_normalized_energy_range

    if verbose:
        logger.info("""
                    You have chosen the following parameters
                    
                        Sample Frequency : {0}
                        Block Time : {1}
                        Frequency Range : {2}
                        Q Range : {3}
                        Spectrogram Colorbar Range: {4}

                    """.format(sample_frequency, block_time,
                               search_frequency_range, search_q_range,
                               plot_time_ranges, plot_normalized_energy_range))

    # find closest sample time to event time
    center_time = (
                   numpy.floor(event_time) +
                   numpy.round((event_time - numpy.floor(event_time)) *
                   sample_frequency) / sample_frequency
                  )

    # determine segment start and stop times
    start_time = round(center_time - block_time / 2)
    stop_time = start_time + block_time

    # Read in the data
    if timeseries:
        data = timeseries.crop(start_time, stop_time, verbose=verbose)
    elif source:
        if verbose:
            logger.info('Reading Data From Source ...')
        data = TimeSeries.read(source=source, channel=channel_name,
                               start=start_time, end=stop_time, verbose=verbose)
    else:
        if verbose:
            logger.info('Fetching Data...')
        data = TimeSeries.get(channel_name, start_time, stop_time,
                              frametype=frametype, verbose=verbose).astype('float64')

    # resample data
    if verbose:
        logger.info('Resampling Data...')
    if data.sample_rate.decompose().value != sample_frequency:
        data = data.resample(sample_frequency)

    # Cropping the results before interpolation to save on time and memory
    # perform the q-transform
    if verbose:
        logger.info('Processing Q Scans...')

    specsgrams = []
    for time_window in plot_time_ranges:
        duration_for_plot = time_window/2
        try:
            outseg = Segment(center_time - duration_for_plot,
                             center_time + duration_for_plot)
            q_scan = data.q_transform(qrange=tuple(search_q_range),
                                      frange=tuple(search_frequency_range),
                                      gps=center_time,
                                      search=0.5, tres=0.002,
                                      fres=0.5, outseg=outseg, whiten=True)
            q_value = q_scan.q
            q_scan = q_scan.crop(center_time-time_window/2,
                                 center_time+time_window/2)
        except:
            outseg = Segment(center_time - 2*duration_for_plot,
                             center_time + 2*duration_for_plot)
            q_scan = data.q_transform(qrange=tuple(search_q_range),
                                      frange=tuple(search_frequency_range),
                                      gps=center_time, search=0.5,
                                      tres=0.002,
                                      fres=0.5, outseg=outseg, whiten=True)
            q_value = q_scan.q
            q_scan = q_scan.crop(center_time-time_window/2,
                                 center_time+time_window/2)
        specsgrams.append(q_scan)

    if verbose:
        logger.info('The most significant q value is {0}'.format(q_value))

    return specsgrams, q_value
# download a 128 second chunk of data (the whole data length will be used to generate the
# power spectral density to get a good average)
duration = 128  # number of seconds of data to download
gpsstart = 1187008884  # start time
gpsend = gpsstart + duration
samplerate = 16384

detector = "L1"

# fetch the open data from GWOSC
data = TimeSeries.fetch_open_data(
    detector,
    gpsstart,
    gpsend,
    sample_rate=samplerate,
    format='hdf5',
    host='https://www.gw-openscience.org',
    verbose=False,
    cache=True,
)

# convert the data to a PyCBC time series
pycbcdata = PyCBCTimeSeries(data.data, delta_t=(1 / data.sample_rate.value))

# high-pass filter the data to only include the frequencies we're interested in
lowcutoff = 1000
buffer = 50  # just allow a bit of a buffer at the edges
pycbcdata = pycbcdata.highpass_fir(lowcutoff - buffer,
                                   8)  # 8 is the "order" of the filter

# create the template bank
예제 #30
0
import gwpy
from gwosc.datasets import event_gps
gps = event_gps('GW150914')
print(gps)
segment = (int(gps) - 5, int(gps) + 5)
print(segment)
from gwpy.timeseries import TimeSeries
ldata = TimeSeries.fetch_open_data('L1', *segment, verbose=True)
print(ldata)
예제 #31
0
def read_frame(filename,
               ifo,
               readstrain=True,
               strain_chan=None,
               dq_chan=None,
               inj_chan=None):
    """
    Helper function to read frame files
    """

    from gwpy.timeseries import TimeSeries

    if ifo is None:
        raise TypeError("""To read GWF data, ifo must be 'H1', 'H2', or 'L1'.
        def loaddata(filename, ifo=None):""")

    #-- Read strain channel
    if strain_chan is None:
        strain_chan = ifo + ':LOSC-STRAIN'

    if readstrain:
        try:
            sd = TimeSeries.read(filename, strain_chan)
            strain = sd.value
            gpsStart = sd.t0.value
            ts = sd.dt.value
        except:
            print("ERROR reading file {0} with strain channel {1}".format(
                filename, strain_chan))
            raise
    else:
        ts = 1
        strain = 0

    #-- Read DQ channel
    if dq_chan is None:
        dq_chan = ifo + ':LOSC-DQMASK'

    try:
        qd = TimeSeries.read(str(filename), str(dq_chan))
        gpsStart = qd.t0.value
        qmask = np.array(qd.value)
        dq_ts = qd.dt.value
        shortnameList_wbit = str(qd.unit).split()
        shortnameList = [name.split(':')[1] for name in shortnameList_wbit]
    except:
        print("ERROR reading DQ channel '{0}' from file: {1}".format(
            dq_chan, filename))
        raise

    #-- Read Injection channel
    if inj_chan is None:
        inj_chan = ifo + ':LOSC-INJMASK'

    try:
        injdata = TimeSeries.read(str(filename), str(inj_chan))
        injmask = injdata.value
        injnamelist_bit = str(injdata.unit).split()
        injnamelist = [name.split(':')[1] for name in injnamelist_bit]
    except:
        print("ERROR reading injection channel '{0}' from file: {1}".format(
            inj_chan, filename))
        raise

    return strain, gpsStart, ts, qmask, shortnameList, injmask, injnamelist
예제 #32
0
"""Tests for :mod:`gwdetchar.daq`
"""

import numpy
import pytest

from numpy.testing import assert_array_equal
from unittest import mock

from gwpy.timeseries import TimeSeries
from gwpy.segments import (Segment, SegmentList)
from gwpy.testing.utils import assert_segmentlist_equal

from .. import daq

OVERFLOW_SERIES = TimeSeries([0, 0, 0, 1, 1, 0, 0, 1, 0, 1], dx=.5)
CUMULATIVE_SERIES = TimeSeries([0, 0, 0, 1, 2, 2, 2, 3, 3, 4], dx=.5)
OVERFLOW_TIMES = numpy.array([1.5, 3.5, 4.5])
OVERFLOW_SEGMENTS = SegmentList([
    Segment(1.5, 2.5),
    Segment(3.5, 4),
    Segment(4.5, 5),
])
CROSSING_TIMES = numpy.array([1.5, 2.5, 3.5, 4, 4.5])

CHANNELS = [
    'X1:TEST-CHANNEL_1',
    'X1:FEC-1_ADC_OVERFLOW_ACC_0_1',
    'X1:FEC-1_ADC_OVERFLOW_ACC_0_2',
    'X1:FEC-1_ADC_OVERFLOW_ACC_0_3',
    'X1:FEC-1_ADC_OVERFLOW_ACC_0_4',
예제 #33
0
	sec_per_t_unit = 60.0
else:
	t_unit = 'seconds'
	sec_per_t_unit = 1.0

for i in range(0, num_points):
	times[i] = times[i] / sec_per_t_unit

# Collect range data in arrays
ranges = []
medians = []
stds = []
for i in range(0, len(channel_list)):
	ranges.append([])
	for j in range(0, num_points):
		data = TimeSeries.read(frame_cache_list[i], "%s:%s" % (ifo, channel_list[i]), start = gps_start_time + j * stride, end = gps_start_time + j * stride + integration_time)
		PSD = data.psd(8, 4, method = 'lal_median')
		BNS_range = float(inspiral_range(PSD, fmin=10).value)
		ranges[i].append(BNS_range)
	medians.append(numpy.median(ranges[i]))
	stds.append(numpy.std(ranges[i]))
# Make plots
colors = ["blue", "green", "limegreen", "red", "yellow", "purple", "pink"] # Hopefully the user will not want to plot more than 7 datasets on one plot.
plt.figure(figsize = (12, 8))
for i in range(0, len(channel_list)):
	plt.gcf().subplots_adjust(bottom=0.15)
	plt.plot(times, ranges[i], colors[i % 6], linewidth = 1.5, label = r'%s:%s [median = %0.1f Mpc, $\sigma$ = %0.1f Mpc]' % (ifo, channel_list[i].replace('_', '\_'), medians[i], stds[i]))
	if options.make_title:
		plt.title("%s binary neutron star inspiral range" % ifo)
	plt.ylabel('Angle-averaged range [Mpc]')
	plt.xlabel('Time [%s] from %s UTC' % (t_unit, time.strftime("%b %d %Y %H:%M:%S", time.gmtime(gps_start_time + 315964782))))
예제 #34
0
 def frame_read(self, format=None):
     ts = TimeSeries.read(self.framefile, 'L1:LDAS-STRAIN', format=format)
     self.assertTrue(ts.epoch == Time(968654552, format='gps', scale='utc'))
     self.assertTrue(ts.sample_rate == units.Quantity(16384, 'Hz'))
     self.assertTrue(ts.unit == units.Unit('strain'))
예제 #35
0
potential gravitational-wave signals.

This algorithm was used to visualise the first ever gravitational-wave
detection GW150914, so we can reproduce `that result (bottom panel of figure 1)
<https://doi.org/10.1103/PhysRevLett.116.061102>`_ here.
"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we need to download the `TimeSeries` record for the H1 strain
# measurement from |GWOSC|_:

from gwpy.timeseries import TimeSeries

data = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)

# Next, we generate the `~TimeSeries.q_transform` of these data:
qspecgram = data.q_transform(outseg=(1126259462.2, 1126259462.5))

# .. note::
#    We can save memory by focusing on a specific window around the
#    interesting time. The ``outseg`` keyword argument returns a `Spectrogram`
#    that is only as long as we need it to be.

# Now, we can plot the resulting `~gwpy.spectrogram.Spectrogram`:

plot = qspecgram.plot(figsize=[8, 4])
ax = plot.gca()
ax.set_xscale('seconds')
ax.set_yscale('log')
예제 #36
0
파일: inject.py 프로젝트: gwpy/gwpy
would happen if a binary black hole merger signal occured at or near
the time of a glitch. In LIGO data analysis, this procedure is referred
to as an _injection_.

In the example below, we will create a stream of random, white Gaussian
noise, then inject a simulation of GW150914 into it at a known time.
"""

__author__ = "Alex Urban <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we prepare one second of Gaussian noise:

from numpy import random
from gwpy.timeseries import TimeSeries
noise = TimeSeries(random.normal(scale=.1, size=16384), sample_rate=16384)

# Then we can download a simulation of the GW150914 signal from GWOSC:

from astropy.utils.data import get_readable_fileobj
url = ("https://www.gw-openscience.org/s/events/GW150914/P150914/"
       "fig2-unfiltered-waveform-H.txt")
with get_readable_fileobj(url) as f:
    signal = TimeSeries.read(f, format='txt')
signal.t0 = .5  # make sure this intersects with noise time samples

# Note, since this simulation cuts off before a certain time, it is
# important to taper its ends to zero to avoid ringing artifacts.
# We can accomplish this using the
# :meth:`~gwpy.timeseries.TimeSeries.taper` method.
예제 #37
0
"""Filtering a `TimeSeries` with a ZPK filter

Several data streams read from the LIGO detectors are whitened before being
recorded to prevent numerical errors when using single-precision data
storage.
In this example we read such `channel <gwpy.detector.Channel>` and undo the
whitening to show the physical content of these data.

"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we import the `TimeSeries` and :meth:`~TimeSeries.get` the data:
from gwpy.timeseries import TimeSeries
white = TimeSeries.get('L1:OAF-CAL_DARM_DQ', 'March 2 2015 12:00',
                       'March 2 2015 12:30')

# Now, we can re-calibrate these data into displacement units by first applying
# a `highpass <TimeSeries.highpass>` filter to remove the low-frequency noise,
# and then applying our de-whitening filter in `ZPK <TimeSeries.zpk>` format
# with five zeros at 100 Hz and five poles at 1 Hz (giving an overall DC
# gain of 10 :sup:`-10`:
hp = white.highpass(4)
displacement = hp.zpk([100] * 5, [1] * 5, 1e-10)

# We can visualise the impact of the whitening by calculating the ASD
# `~gwpy.frequencyseries.FrequencySeries` before and after the filter,

whiteasd = white.asd(8, 4)
dispasd = displacement.asd(8, 4)
예제 #38
0
from gwpy.timeseries import TimeSeries

llo = TimeSeries.fetch('L1:LDAS-STRAIN,rds', 'August 1 2010', 'August 1 2010 00:10')
variance = llo.spectral_variance(10, fftlength=1, log=True, low=1e-24, high=1e-19, nbins=100)


plot = variance.plot(norm='log', vmin=0.5, vmax=100)
ax = plot.gca()
ax.grid()
ax.set_xlim(40, 4096)
ax.set_ylim(1e-24, 1e-19)
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel(r'GW ASD [strain/\rtHz]')
ax.set_title('LIGO Livingston Observatory sensitivity variance')
plot.save('variance.png')

#commonto!
예제 #39
0
                               format='gwf.lalframe',
                               nproc=nproc)
    ixv1 = data['K1:PEM-SEIS_IXV_GND_X_OUT_DQ']
    exv = data['K1:PEM-SEIS_EXV_GND_X_OUT_DQ']
    gif = data['K1:GIF-X_STRAIN_OUT16'] * 3e3 * 1e6
    ixv_press = data['K1:PEM-WEATHER_IXV_FIELD_PRES_OUT16']
    diff13 = (ixv1 - exv) / np.sqrt(2)
    comm13 = (ixv1 + exv) / np.sqrt(2)

if tplot and readgwf:
    plot = data.plot()
    plot.savefig('img_timeseries.png')
    plot.close()

if True:
    x500_press = TimeSeries.read('2019May03_6hours_x500_press.gwf',
                                 'X500_BARO', start, end)
    x500_press = x500_press
    plot = x500_press.plot(ylabel='Pressure')
    plot.savefig('img_x500_press.png')
    plot.close()

# -----------------------------------------------
# Calc Coherence
# -----------------------------------------------
import matplotlib.pyplot as plt
if plot_coherence and readgwf:
    print('Calc Coherence ')
    coh13 = ixv1.coherence(exv, fftlength, fftlength * ovlp, window=window)
    deg13 = ixv1.csd(exv, fftlength, fftlength / 2).angle().rad2deg()
    coh1 = gif.coherence(ixv_press, fftlength, fftlength * ovlp, window=window)
    deg1 = gif.csd(ixv_press, fftlength, fftlength / 2).angle().rad2deg()
예제 #40
0
파일: fetch_HPI.py 프로젝트: nkij/LIGO
from matplotlib.ticker import MultipleLocator, FormatStrFormatter

from gwpy.time import Time
from gwpy.timeseries import (TimeSeries, TimeSeriesDict)

print "Import Modules Success"

#Start-End Time                                                                 

start = Time('2014-06-17 04:00:00', format='iso', scale='utc')
end = Time('2014-06-17 15:00:00', format='iso', scale='utc')
print start.iso, start.gps
print end.iso, end.gps

data = TimeSeries.fetch('L1:HPI-ETMY_BLND_IPS_Z_IN1_DQ.mean,m-trend', start, end, verbose=True)

plot_data = data.plot()
ax = plot_data.gca()
#plot_data.show()                                                               

ax.set_epoch(start.gps)
ax.set_xlim(start.gps, end.gps)
ax.set_ylabel('Amplitude[ ]')
ax.set_title(data.channel.texname)
#ax.set_ylim([20000,30000])
                                                                                

plot_data.save('L1-HPI-ETMY_BLND_IPS_Z_IN1_DQ_JUN14.png')
print "PLOT SAVED"

예제 #41
0
### Set the channel that witnesses fringes (to plot specgram for) 
witness_base = "GDS-CALIB_STRAIN"
#witness_base = "LSC-MICH_IN1_DQ"
#witness_base = '%s:ASC-Y_TR_A_NSUM_OUT_DQ' % ifo
#witness_base = 'LSC-SRCL_IN1_DQ'
#witness_base = "LSC-REFL_A_RF9_Q_ERR_DQ"
#witness_base = "ASC-AS_A_RF45_Q_PIT_OUT_DQ" 
#witness_base = "ASC-AS_B_RF36_Q_PIT_OUT_DQ"
#witness_base = "OMC-LSC_SERVO_OUT_DQ"

if plotspec==1:
	witness_chan = ifo + ':' + witness_base
	print witness_chan
	# load timeseries for witness channel
        witness=TimeSeries.fetch(witness_chan, start_time, start_time+dur, verbose=True)
	if witness_base=="GDS-CALIB_STRAIN":
		witness=witness.highpass(20,gpass=3) # highpass the witness data
        elif witness_base=="ASC-AS_B_RF45_I_PIT_OUT_DQ" or witness_base=="ASC-AS_B_RF36_Q_PIT_OUT_DQ" or witness_base=="LSC-MICH_IN1_DQ" or witness_base=="ASC-AS_A_RF45_Q_PIT_OUT_DQ":
		witness=witness.highpass(10,gpass=3) # highpass the witness data
	# Calculate DARM spectrogram 
        secsPerFFT = .75 # Hz
	overlap = 0.9 # fractional overlap
        Fs = witness.sample_rate.value
        NFFT = int(round(Fs*secsPerFFT))
        noverlap = int(round(overlap*NFFT))
        Pxx, freq, t, im = specgram(witness.value,NFFT=NFFT,Fs=witness.sample_rate.value,noverlap=noverlap,scale_by_freq='magnitude',detrend=mlab.detrend_linear,window=mlab.window_hanning)

### Known SUS displacement w/r/t sus frame channels (in microns)
position_chans = [\
'SUS-BS_M1_DAMP_L_IN1_DQ',\
예제 #42
0
from urllib2 import urlopen
from numpy import asarray
from gwpy.timeseries import TimeSeries

data = urlopen('http://www.ligo.org/science/GW100916/'
               'L-strain_hp30-968654552-10.txt').read()
ts = TimeSeries(asarray(data.splitlines(), dtype=float),
                epoch=968654552,
                sample_rate=16384)
plot = ts.plot()
plot.set_ylabel('Gravitational-wave strain amplitude')
plot.set_title('LIGO Livingston Observatory data for GW100916')
plot.show()
예제 #43
0
파일: test_plotter.py 프로젝트: bfarr/gwpy
 def setUp(self):
     self.ts = TimeSeries.read(TEST_HDF_FILE, 'H1:LDAS-STRAIN')
     self.sg = self.ts.spectrogram2(0.5, 0.49)
     self.mmm = [self.ts, self.ts * 0.9, self.ts*1.1]
예제 #44
0
파일: test_plotter.py 프로젝트: bfarr/gwpy
 def setUp(self):
     self.ts = TimeSeries.read(TEST_HDF_FILE, 'H1:LDAS-STRAIN')
예제 #45
0
    def getTimeSeries(self, arg_list):
        """Verify and interpret arguments to get all
        TimeSeries objects defined"""

        # retrieve channel data from NDS as a TimeSeries
        for chans in arg_list.chan:
            for chan in chans:
                if chan not in self.chan_list:
                    self.chan_list.append(chan)

        if len(self.chan_list) < self.min_timeseries:
            raise ArgumentError('A minimum of %d channels must be ' +
                                'specified for this product' %
                                self.min_timeseries)

        if len(arg_list.start) > 0:
            for start_arg in arg_list.start:
                if type(start_arg) is list:
                    for starts in start_arg:
                        if isinstance(starts, basestring):
                            starti = int(starts)

                        elif starts is list:
                            for start_str in starts:
                                starti = int(start_str)
                        # ignore duplicates (to make it easy for ldvw)
                        if starti not in self.start_list:
                            self.start_list.append(starti)
                else:
                    self.start_list.append(int(start_arg))
        else:
            raise ArgumentError('No start times specified')

        # Verify the number of datasets specified is valid for this plot
        self.n_datasets = len(self.chan_list) * len(self.start_list)
        if self.n_datasets < self.get_min_datasets():
            raise ArgumentError('%d datasets are required for this ' +
                                'plot but only %d are supplied' %
                                (self.get_min_datasets(), self.n_datasets))

        if self.n_datasets > self.get_max_datasets():
            raise ArgumentError('A maximum of %d datasets allowed for ' +
                                'this plot but %d specified' %
                                (self.get_max_datasets(), self.n_datasets))

        if arg_list.duration:
            self.dur = int(arg_list.duration)
        else:
            self.dur = 10

        verb = self.verbose > 1

        # determine how we're supposed get our data
        source = 'NDS2'
        frame_cache = False

        if arg_list.framecache:
            source = 'frames'
            frame_cache = arg_list.framecache

        # set up filter parameters for all channels
        highpass = 0
        if arg_list.highpass:
            highpass = float(arg_list.highpass)
            self.filter += "highpass(%.1f) " % highpass

        # Get the data from NDS or Frames
        # time_groups is a list of timeseries index grouped by
        # start time for coherence like plots
        self.time_groups = []
        for start in self.start_list:
            time_group = []
            for chan in self.chan_list:
                if verb:
                    print 'Fetching %s %d, %d using %s' % \
                          (chan, start, self.dur, source)
                if frame_cache:
                    data = TimeSeries.read(frame_cache, chan, start=start,
                                           end=start+self.dur)
                else:
                    data = TimeSeries.fetch(chan, start, start+self.dur,
                                            verbose=verb)

                if highpass > 0:
                    data = data.highpass(highpass)

                self.timeseries.append(data)
                time_group.append(len(self.timeseries)-1)
            self.time_groups.append(time_group)

        # report what we have if they asked for it
        self.log(3, ('Channels: %s' % self.chan_list))
        self.log(3, ('Start times: %s, duration' % self.start_list, self.dur))
        self.log(3, ('Number of time series: %d' % len(self.timeseries)))

        if len(self.timeseries) != self.n_datasets:
            self.log(0, ('%d datasets requested but only %d transfered' %
                         (self.n_datasets, len(self.timeseries))))
            if len(self.timeseries) > self.get_min_datasets():
                self.log(0, 'Proceeding with the data that was transferred.')
            else:
                self.log(0, 'Not enough data for requested plot.')
                from sys import exit
                exit(2)
        return
예제 #46
0
파일: test_archive.py 프로젝트: gwpy/gwsumm
import h5py

from numpy import (random, testing as nptest)

from gwpy.table import EventTable
from gwpy.timeseries import (TimeSeries, StateVector)
from gwpy.spectrogram import Spectrogram
from gwpy.segments import (Segment, SegmentList)

from gwsumm import (archive, data, globalv, channels, triggers)

__author__ = 'Duncan Macleod <*****@*****.**>'

TEST_DATA = TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], epoch=100,
                       unit='meter', sample_rate=1, channel='X1:TEST-CHANNEL',
                       name='TEST DATA')
TEST_DATA.channel = channels.get_channel(TEST_DATA.channel)


# -- utilities ----------------------------------------------------------------

def empty_globalv():
    globalv.DATA = type(globalv.DATA)()
    globalv.SPECTROGRAMS = type(globalv.SPECTROGRAMS)()
    globalv.SEGMENTS = type(globalv.SEGMENTS)()
    globalv.TRIGGERS = type(globalv.TRIGGERS)()


def create(data, **metadata):
    SeriesClass = metadata.pop('series_class', TimeSeries)
# Demodulation is useful when trying to examine steady sinusoidal
# signals we know to be contained within data. For instance,
# we can download some data from LOSC to look at trends of the
# amplitude and phase of Livingston's calibration line at 331.3 Hz:

from gwpy.timeseries import TimeSeries
data = TimeSeries.fetch_open_data('L1', 1131350417, 1131357617)

# We can demodulate the `TimeSeries` at 331.3 Hz with a stride of once
# per minute:

amp, phase = data.demodulate(331.3, stride=60)

# We can then plot these trends to visualize changes in the amplitude
# and phase of the calibration line:

from gwpy.plotter import TimeSeriesPlot
plot = TimeSeriesPlot(amp, phase, sep=True)
plot.show()
# We can design an arbitrarily complicated filter using
# :mod:`gwpy.signal.filter_design`

from gwpy.signal import filter_design
bp = filter_design.bandpass(50, 250, 4096.)
notches = [filter_design.notch(f, 4096.) for f in (60, 120, 180)]
zpk = filter_design.concatenate_zpks(bp, *notches)

# And then can download some data from LOSC to apply it using
# `TimeSeries.filter`:

from gwpy.timeseries import TimeSeries
data = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
filtered = data.filter(zpk, filtfilt=True)

# We can plot the original signal, and the filtered version, cutting
# off either end of the filtered data to remove filter-edge artefacts

from gwpy.plotter import TimeSeriesPlot
plot = TimeSeriesPlot(data, filtered[128:-128], sep=True)
plot.show()
예제 #49
0
    help=
    'GPS start time or datetime of analysis. Default: 5 minutes prior to current time'
)
parser.add_argument(
    '--duration',
    type=int,
    default=60,
    required=False,
    help='Duration of of analysis in seconds. Default: 60 seconds')

args = parser.parse_args()

gpsend = args.gpsstart + args.duration

#Timeseries Data
TS = TimeSeries.fetch(channel, args.gpsstart, gpsend)
specgram = TS.spectrogram(2, fftlength=1, overlap=.5)**(1 / 2.)
normalised = specgram.ratio('median')

#Plot QSpectrogram
qspecgram = TS.q_transform(qrange=(4, 150),
                           frange=(10, 100),
                           outseg=(args.gpsstart, gpsend),
                           fres=.01)
plot = qspecgram.imshow(figsize=[8, 4])
cmap = cm.get_cmap('viridis')
ax = plot.gca()
ax.set_title('Q Transform')
ax.set_xscale('seconds')
ax.set_yscale('log')
ax.set_ylim(10, 100)
예제 #50
0
def osem_velocity(osem_data, pad_sec=64, new_sample_rate=None):
    nyquist = osem_data.sample_rate.value / 2.
    assert new_sample_rate >= 16
    result = fir_helper(osem_data, [0., 0.04, 0.08, 7.2, nyquist],
                        [0., 1.e-4, 2. * pi * 0.08, 2. * pi * 7.2, 0.],
                        pad_sec=pad_sec,
                        new_sample_rate=new_sample_rate,
                        deriv=True)
    return result


if __name__ == '__main__':
    times = arange(1024 * 256, dtype=float32) / 256.
    test = TimeSeries(
        cos(2. * pi * 0.4 * times) + 2. * sin(2. * pi * 1. * times) -
        cos(2. * pi * 3. * times) - 10. * sin(2. * pi * 10. * times),
        sample_rate=256,
        name='test')
    dtest = TimeSeries(
        2. * pi *
        (-0.4 * sin(2. * pi * 0.4 * times) + 2. * cos(2. * pi * 1. * times) +
         3. * sin(2. * pi * 3. * times)),
        sample_rate=256,
        name='deriv')

    result1 = fir_helper(
        test, [0., 0.01, 0.02, 0.4, 7., 8., 128.],
        [0., 0., 2 * pi * 0.02, 2 * pi * 0.4, 2 * pi * 7., 1.e-4, 0.],
        pad_sec=64)

    result = fir_helper(
예제 #51
0
parts of the detector, closely monitoring mechanical subsystems and
environmental conditions. We can cross-correlate data from these sensors with
the primary gravitational wave data to look for evidence of terrestrial noise.

We demonstrate below a prominent 'whistle glitch' in the gravitational wave
channel, which is also witnessed by a photodiode in the Pre-Stabilized Laser
(PSL) chamber. This example uses data from the LIGO Livingston detector during
Advanced LIGO's second observing run.
"""

__author__ = "Alex Urban <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we import the `TimeSeries` and :meth:`~TimeSeries.get` the data:
from gwpy.timeseries import TimeSeries
hoft = TimeSeries.get('L1:GDS-CALIB_STRAIN', 1172489751, 1172489815)
aux = TimeSeries.get('L1:PSL-ISS_PDA_REL_OUT_DQ', 1172489751, 1172489815)

# Next, we should `~TimeSeries.whiten` the data to enhance the higher-frequency
# content and make a more faithful comparison between data streams.
whoft = hoft.whiten(8, 4)
waux = aux.whiten(8, 4)

 # We can now cross-correlate these channels:
mfilter = waux.crop(1172489782.57, 1172489783.57)
snr = whoft.correlate(mfilter).abs()

# and plot the resulting normalised signal-to-noise ratio:
plot = snr.crop(1172489782.07, 1172489784.07).plot()
plot.axes[0].set_epoch(1172489783.07)
plot.axes[0].set_ylabel('Signal-to-noise ratio', fontsize=16)
예제 #52
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source

    This method reads all found data into the data containers defined by
    the `gwsumm.globalv` module, then returns nothing.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:

        # -- channels ---------------------------

        try:
            ctable = Table.read(h5file['channels'])
        except KeyError:  # no channels table written
            pass
        else:
            for row in ctable:
                chan = get_channel(row['name'])
                for p in ctable.colnames[1:]:
                    if row[p]:
                        setattr(chan, p, row[p])

        # -- timeseries -------------------------

        for dataset in h5file.get('timeseries', {}).values():
            ts = TimeSeries.read(dataset, format='hdf5')
            if (re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name)
                    and ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() != mode.Mode.day:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # -- statevector -- ---------------------

        for dataset in h5file.get('statevector', {}).values():
            sv = StateVector.read(dataset, format='hdf5')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # -- spectrogram ------------------------

        for tag, add_ in zip(
            ['spectrogram', 'coherence-components'],
            [add_spectrogram, add_coherence_component_spectrogram]):
            for key, dataset in h5file.get(tag, {}).items():
                key = key.rsplit(',', 1)[0]
                spec = Spectrogram.read(dataset, format='hdf5')
                spec.channel = get_channel(spec.channel)
                add_(spec, key=key)

        # -- segments ---------------------------

        for name, dataset in h5file.get('segments', {}).items():
            dqflag = DataQualityFlag.read(h5file,
                                          path=dataset.name,
                                          format='hdf5')
            globalv.SEGMENTS += {name: dqflag}

        # -- triggers ---------------------------

        for dataset in h5file.get('triggers', {}).values():
            load_table(dataset)
예제 #53
0
파일: test_plotter.py 프로젝트: bfarr/gwpy
 def setUp(self):
     self.ts = TimeSeries.read(TEST_HDF_FILE, 'H1:LDAS-STRAIN')
     self.asd = self.ts.asd(1)
     self.mmm = [self.asd, self.asd*0.9, self.asd*1.1]
예제 #54
0
파일: hoff.py 프로젝트: Phatom/gwpy
The LIGO Laboratory has publicly released the strain data around the time of
the GW150914 gravitational-wave detection; we can use these to calculate
and display the spectral sensitivity of each of the detectors at that time.
"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = 'gwpy.frequencyseries'

# In order to generate a `FrequencySeries` we need to import the
# `~gwpy.timeseries.TimeSeries` and use
# :meth:`~gwpy.timeseries.TimeSeries.fetch_open_data` to download the strain
# records:

from gwpy.timeseries import TimeSeries
lho = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
llo = TimeSeries.fetch_open_data('L1', 1126259446, 1126259478)

# We can then call the :meth:`~gwpy.timeseries.TimeSeries.asd` method to
# calculated the amplitude spectral density for each
# `~gwpy.timeseries.TimeSeries`:
lhoasd = lho.asd(4, 2)
lloasd = llo.asd(4, 2)

# We can then :meth:`~FrequencySeries.plot` the spectra using the 'standard'
# colour scheme:

plot = lhoasd.plot(label='LIGO-Hanford', color='gwo:ligo-hanford')
ax = plot.gca()
ax.plot(lloasd, label='LIGO-Livingston', color='gwo:ligo-livingston')
ax.set_xlim(10, 2000)
예제 #55
0
from gwpy.timeseries import TimeSeries
white = TimeSeries.get(
    'L1:OAF-CAL_DARM_DQ', 'March 2 2015 12:00', 'March 2 2015 12:30')
예제 #56
0
# load data
from gwpy.timeseries import TimeSeries
raw = TimeSeries.fetch_open_data('L1', 1126259446, 1126259478)

# calculate filtered timeseries, and Q-transform spectrogram
data = raw.bandpass(50, 300).notch(60)
qtrans = raw.q_transform()

# plot
from matplotlib import pyplot
plot, axes = pyplot.subplots(nrows=2, sharex=True, figsize=(8, 6))
tax, qax = axes
tax.plot(data.crop(1126259462, 1126259463), color='gwpy:ligo-livingston')
qax.imshow(qtrans.crop(1126259462, 1126259463))
tax.set_xlabel('')
tax.set_xscale('auto-gps')
tax.set_xlim(1126259462.2, 1126259462.5)
tax.set_ylabel('Strain amplitude')
qax.set_yscale('log')
qax.set_ylabel('Frequency [Hz]')
qax.colorbar(clim=(0, 35), label='Normalised energy')
예제 #57
0
from gwpy.timeseries import TimeSeries
gwdata = TimeSeries.fetch('H1:LDAS-STRAIN', 'September 16 2010 06:40',
                          'September 16 2010 06:50')
specgram = gwdata.spectrogram(20, fftlength=8, overlap=4) ** (1/2.)
plot = specgram.plot(norm='log', vmin=1e-23, vmax=1e-19)
ax = plot.gca()
ax.set_ylim(40, 4000)
ax.set_yscale('log')
plot.add_colorbar(label='Gravitational-wave strain [m/$\sqrt{\mathrm{Hz}}$]')
plot.show()
# Heterodyning can be useful in analysing quasi-monochromatic signals
# with a known phase evolution, such as continuous-wave signals
# from rapidly rotating neutron stars. These sources radiate at a
# frequency that slowly decreases over time, and is Doppler modulated
# due to the Earth's rotational and orbital motion.

# To see an example of heterodyning in action, we can simulate a signal
# whose phase evolution is described by the frequency and its first
# derivative with respect to time. We can download some O1 era
# LIGO-Livingston data from GWOSC, inject the simulated signal, and
# recover its amplitude.

from gwpy.timeseries import TimeSeries
data = TimeSeries.fetch_open_data('L1', 1131350417, 1131354017)

# We now need to set the signal parameters, generate the expected
# phase evolution, and create the signal:

import numpy
f0 = 123.456789  # signal frequency (Hz)
fdot = -9.87654321e-7  # signal frequency derivative (Hz/s)
fpeoch = 1131350417  # phase epoch
amp = 1.5e-22  # signal amplitude
phase0 = 0.4  # signal phase at the phase epoch
times = data.times.value - fepoch
phase = 2 * numpy.pi * (f0 * times + 0.5 * fdot * times**2)
signal = TimeSeries(amp * numpy.cos(phase + phase0),
                    sample_rate=data.sample_rate,
                    t0=data.t0)
data = data.inject(signal)
예제 #59
0
    temp[len(temp)/2] = 1.
    return TimeSeries(temp, epoch=data.epoch, sample_rate=data.sample_rate)

def step_like(data):
    temp = zeros(len(data))
    temp[len(temp)/2:] = 1.
    return TimeSeries(temp, epoch=data.epoch, sample_rate=data.sample_rate)

if __name__ == '__main__':
    import sys
    if len(sys.argv) < 6:
        print "Args: chan t_PSD dur_PSD seglen t_glitch"
        exit()
    chan = sys.argv[1]
    t1_psd = int(sys.argv[2])
    dur_psd = int(sys.argv[3])
    seglen = int(sys.argv[4])
    tt = float(sys.argv[5])

    st = int(tt) - seglen/2
    
    data_for_psd = TimeSeries.fetch(chan, t1_psd, t1_psd+dur_psd)
    data = TimeSeries.fetch(chan, st, st+seglen)
    invasd = build_whitener(data_for_psd, seglen,  method='median-mean')
    data = step_like(data)
    temp = apply_whitening(data, invasd)
    #plot = data.plot()
    plot = temp.plot()
    plot.show()

예제 #60
0
from .. import (config, core)

from matplotlib import use
use('agg')  # noqa

# backend-dependent import
from .. import plot  # noqa: E402

__author__ = 'Alex Urban <*****@*****.**>'

# global test objects

FFTLENGTH = 8

NOISE = TimeSeries(numpy.random.normal(loc=1, scale=.5, size=16384 * 68),
                   sample_rate=16384,
                   epoch=-34).zpk([], [0], 1)
GLITCH = TimeSeries(signal.gausspulse(numpy.arange(-1, 1, 1. / 16384), bw=100),
                    sample_rate=16384,
                    epoch=-1) * 1e-4
INPUT = NOISE.inject(GLITCH)

CONFIGURATION = {
    'q-range': '3.3166,150',
    'frequency-range': '4.0,Inf',
    'plot-time-durations': '4',
    'always-plot': 'True',
}
CHANNEL = config.OmegaChannel(channelname='L1:TEST-STRAIN',
                              section='test',
                              **CONFIGURATION)