Example #1
0
    def fetch(self, channels, t0, duration, fs, nproc=4):
        """ Fetch data """
        # if channels is a file
        if isinstance(channels, str):
            channels = open(channels).read().splitlines()
        target_channel = channels[0]

        # get data and resample
        data = TimeSeriesDict.get(channels,
                                  t0,
                                  t0 + duration,
                                  nproc=nproc,
                                  allow_tape=True)
        data = data.resample(fs)

        # sorted by channel name
        data = OrderedDict(sorted(data.items()))

        # reset attributes
        self.data = []
        self.channels = []
        for chan, ts in data.items():
            self.data.append(ts.value)
            self.channels.append(chan)
        self.data = np.stack(self.data)
        self.channels = np.stack(self.channels)
        self.t0 = t0
        self.fs = fs
        self.target_idx = np.where(self.channels == target_channel)[0][0]
Example #2
0
    pyplot.ion()

# Before anything else, we import the objects we will need:
from gwpy.time import tconvert
from gwpy.timeseries import TimeSeriesDict
from gwpy.plotter import BodePlot

# and set the times of our query, and the channels we want:
start = tconvert('May 27 2014 04:00')
end = start + 1800
gndchannel = 'L1:ISI-GND_STS_ITMY_Z_DQ'
hpichannel = 'L1:HPI-ITMY_BLND_L4C_Z_IN1_DQ'

# We can call the :meth:`~TimeSeriesDict.get` method of the `TimeSeriesDict`
# to retrieve all data in a single operation:
data = TimeSeriesDict.get([gndchannel, hpichannel], start, end, verbose=True)
gnd = data[gndchannel]
hpi = data[hpichannel]

# Next, we can call the :meth:`~TimeSeries.average_fft` method to calculate
# an averages, complex-valued FFT for each `TimeSeries`:
gndfft = gnd.average_fft(100, 50, window='hamming')
hpifft = hpi.average_fft(100, 50, window='hamming')

# Finally, we can divide one by the other to get the transfer function
# (up to the lower Nyquist)
size = min(gndfft.size, hpifft.size)
tf = hpifft[:size] / gndfft[:size]

# The `~gwpy.plotter.BodePlot` knows how to separate a complex-valued
# `~gwpy.spectrum.Spectrum` into magnitude and phase:
Example #3
0
from gwpy.timeseries import TimeSeriesDict
alldata = TimeSeriesDict.get(['H1:PSL-PWR_PMC_TRANS_OUT16','H1:IMC-PWR_IN_OUT16'], 'Feb 1 00:00', 'Feb 1 02:00')
Example #4
0
(`~gwpy.spectrum.Spectrum`) giving a time-averaged measure of coherence.

The `TimeSeries` method :meth:`~TimeSeries.coherence_spectrogram` performs the
same coherence calculation every ``stride``, giving a time-varying coherence
measure.

"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we import the `TimeSeriesDict`
from gwpy.timeseries import TimeSeriesDict

# and then :meth:`~TimeSeriesDict.get` both data sets:
data = TimeSeriesDict.get(['L1:LSC-SRCL_IN1_DQ', 'L1:LSC-CARM_IN1_DQ'],
                           'Feb 13 2015', 'Feb 13 2015 00:15')

# We can then use the :meth:`~TimeSeries.coherence_spectrogram` method
# of one `TimeSeries` to calcululate the time-varying coherence with
# respect to the other, using a 0.5-second FFT length, with a
# 0.45-second (90%) overlap, with a 8-second stride:
coh = data['L1:LSC-SRCL_IN1_DQ'].coherence_spectrogram(
    data['L1:LSC-CARM_IN1_DQ'], 8, 0.5, 0.45)

# Finally, we can :meth:`~gwpy.spectrogram.Spectrogram.plot` the
# resulting data
plot = coh.plot()
ax = plot.gca()
ax.set_ylabel('Frequency [Hz]')
ax.set_yscale('log')
ax.set_ylim(10, 8000)
Example #5
0
(`~gwpy.spectrum.Spectrum`) giving a time-averaged measure of coherence.

The `TimeSeries` method :meth:`~TimeSeries.coherence_spectrogram` performs the
same coherence calculation every ``stride``, giving a time-varying coherence
measure.

"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we import the `TimeSeriesDict`
from gwpy.timeseries import TimeSeriesDict

# and then :meth:`~TimeSeriesDict.get` both data sets:
data = TimeSeriesDict.get(['L1:LSC-SRCL_IN1_DQ', 'L1:LSC-CARM_IN1_DQ'],
                          'Feb 13 2015', 'Feb 13 2015 00:15')

# We can then use the :meth:`~TimeSeries.coherence_spectrogram` method
# of one `TimeSeries` to calcululate the time-varying coherence with
# respect to the other, using a 0.5-second FFT length, with a
# 0.45-second (90%) overlap, with a 8-second stride:
coh = data['L1:LSC-SRCL_IN1_DQ'].coherence_spectrogram(
    data['L1:LSC-CARM_IN1_DQ'], 8, 0.5, 0.45)

# Finally, we can :meth:`~gwpy.spectrogram.Spectrogram.plot` the
# resulting data
plot = coh.plot()
ax = plot.gca()
ax.set_ylabel('Frequency [Hz]')
ax.set_yscale('log')
ax.set_ylim(10, 8000)
Example #6
0
# and one for plotting the data:
from gwpy.plotter import TimeSeriesPlot

# Next we define the channels we want, namely the 0.03Hz-1Hz ground motion
# band-limited RMS channels (1-second average trends).
# We do this using string-replacement so we can substitute the interferometer
# prefix easily when we need to:
channels = [
    '%s:ISI-BS_ST1_SENSCOR_GND_STS_X_BLRMS_30M_100M.mean,s-trend',
    '%s:ISI-BS_ST1_SENSCOR_GND_STS_Y_BLRMS_30M_100M.mean,s-trend',
    '%s:ISI-BS_ST1_SENSCOR_GND_STS_Z_BLRMS_30M_100M.mean,s-trend',
]

# At last we can :meth:`~TimeSeriesDict.get` 12 hours of data for each
# interferometer:
lho = TimeSeriesDict.get([c % 'H1' for c in channels], 'Feb 13 2015 16:00',
                         'Feb 14 2015 04:00')
llo = TimeSeriesDict.get([c % 'L1' for c in channels], 'Feb 13 2015 16:00',
                         'Feb 14 2015 04:00')

# Next we can plot the data, with a separate `~gwpy.plotter.Axes` for each
# instrument:
plot = TimeSeriesPlot(lho, llo)
ax1, ax2 = plot.axes
for ifo, ax in zip(('Hanford', 'Livingston'), (ax1, ax2)):
    ax.legend(['X', 'Y', 'Z'])
    ax.set_yscale('log')
    ax.text(1.01,
            0.5,
            ifo,
            ha='left',
            va='center',
Example #7
0
        continue
    ### Get triggers
    print("Collecting triggers for labelling")
    trig_files = idq.get_all_files_in_range(trigger_dir,
                                            seg[0],
                                            seg[1],
                                            pad=0,
                                            suffix='.trg')
    trigger_dict = event.loadkwm(trig_files)
    trigger_dict.include([[seg[0], seg[1]]])
    if trigger_dict[gwchannel]:
        trigger_dict.apply_signif_threshold(threshold=signif_threshold,
                                            channels=[gwchannel])
        darmtrg = trigger_dict.get_triggers_from_channel(gwchannel)
        auxdata = TimeSeriesDict.get(
            channels, seg[0], seg[1], frametype='L1_R',
            verbose=True)  #Generate a dictionary for each
        for key, value in auxdata.iteritems():
            #print(value)
            value.whiten(fftlength, overlap)  #Whiten the data
            if value.sample_rate.value == samplerate:  #Convert all channels to the same samplingrate
                continue
            else:
                auxdata[key] = auxdata[key].resample(samplerate)
                assert auxdata[key].sample_rate.value == 1024.

        datapath = '/home/kyle.rose/RNN/runs/%d/version%d/Lockseg%d_%d-%d_Data' % (
            run, version, (i + 10), seg[0], seg[1])
        if not os.path.exists(os.path.dirname(datapath)):
            try:
                os.makedirs(os.path.dirname(datapath))
Example #8
0
    pyplot.ion()

# Before anything else, we import the objects we will need:
from gwpy.time import tconvert
from gwpy.timeseries import TimeSeriesDict
from gwpy.plot import BodePlot

# and set the times of our query, and the channels we want:
start = tconvert('May 27 2014 04:00')
end = start + 1800
gndchannel = 'L1:ISI-GND_STS_ITMY_Z_DQ'
hpichannel = 'L1:HPI-ITMY_BLND_L4C_Z_IN1_DQ'

# We can call the :meth:`~TimeSeriesDict.get` method of the `TimeSeriesDict`
# to retrieve all data in a single operation:
data = TimeSeriesDict.get([gndchannel, hpichannel], start, end, verbose=True)
gnd = data[gndchannel]
hpi = data[hpichannel]

# Next, we can call the :meth:`~TimeSeries.average_fft` method to calculate
# an averages, complex-valued FFT for each `TimeSeries`:
gndfft = gnd.average_fft(100, 50, window='hamming')
hpifft = hpi.average_fft(100, 50, window='hamming')

# Finally, we can divide one by the other to get the transfer function
# (up to the lower Nyquist)
size = min(gndfft.size, hpifft.size)
tf = hpifft[:size] / gndfft[:size]

# The `~gwpy.plot.BodePlot` knows how to separate a complex-valued
# `~gwpy.frequencyseries.FrequencySeries` into magnitude and phase:
Example #9
0
from gwpy.timeseries import TimeSeriesDict
data = TimeSeriesDict.get(
    [
        "H1:ISI-GND_STS_ITMY_Z_BLRMS_30M_100M.rms,s-trend",
        "H1:ISI-GND_STS_ETMY_Z_BLRMS_30M_100M.rms,s-trend"
    ],
    "July 22 2021 12:00",
    "July 22 2021 14:00",
)
plot = data.plot(ylabel="Ground motion [nm/s]")
plot.show()
Example #10
0
# and one for plotting the data:
from gwpy.plot import Plot

# Next we define the channels we want, namely the 0.03Hz-1Hz ground motion
# band-limited RMS channels (1-second average trends).
# We do this using string-replacement so we can substitute the interferometer
# prefix easily when we need to:
channels = [
    '{ifo}:ISI-BS_ST1_SENSCOR_GND_STS_X_BLRMS_30M_100M.mean,s-trend',
    '{ifo}:ISI-BS_ST1_SENSCOR_GND_STS_Y_BLRMS_30M_100M.mean,s-trend',
    '{ifo}:ISI-BS_ST1_SENSCOR_GND_STS_Z_BLRMS_30M_100M.mean,s-trend',
]

# At last we can :meth:`~TimeSeriesDict.get` 12 hours of data for each
# interferometer:
lho = TimeSeriesDict.get([c.format(ifo='H1') for c in channels],
                         'Feb 13 2015 16:00', 'Feb 14 2015 04:00')
llo = TimeSeriesDict.get([c.format(ifo='L1') for c in channels],
                         'Feb 13 2015 16:00', 'Feb 14 2015 04:00')

# Next we can plot the data, with a separate `~gwpy.plot.Axes` for each
# instrument:
plot = Plot(lho, llo, figsize=(12, 6), sharex=True, yscale='log')
ax1, ax2 = plot.axes
for ifo, ax in zip(('Hanford', 'Livingston'), (ax1, ax2)):
    ax.legend(['X', 'Y', 'Z'])
    ax.text(1.01, 0.5, ifo, ha='left', va='center', transform=ax.transAxes,
            fontsize=18)
ax1.set_ylabel('$1-3$\,Hz motion [nm/s]', y=-0.1)
ax2.set_ylabel('')
ax1.set_title('Magnitude 7.1 earthquake impact on LIGO')
plot.show()
Example #11
0
def timesseriesdict(start, end):
    return TimeSeriesDict.get([args.channel1, args.channel2],
                              start,
                              end,
                              verbose=True,
                              frametype='H1_R')
Example #12
0
    #grabbing individual data from TimeSeriesDict
c1_b = data[args.channel1]
c2_b = data[args.channel2]

plt_type = 'TimeSeries'
c1_b_name = c1.name
c2_b_name = c2.name
c = c1_b_name.replace("_", "\_")
d = c2_b_name.replace("_", "\_")
plot_2(c1_b, c2_b, EQ_before, EQ_after, c, d, plt_type, args.date_str + ", time(sec)" , "velocity (nm/s)", "total" + plt_type + str(EQ_before) + "\_" + str(EQ_after))
'''
dict_data = []
for start, end in time_segs:
    data = TimeSeriesDict.get([args.channel1, args.channel2],
                              start,
                              end,
                              verbose=True,
                              frametype='H1_R')
    #print data
    dict_data.append(data)
print dict_data[0][args.channel1]
print dict_data[1][args.channel1]
print time_segs[0][0]
#plotting before, during, after the EQ

#plt_type=ASD, BLRMS, TimeSeries, AVG_FFT
#data = TimeSeriesDict.get([args.channel1, args.channel2], start, end, verbose=True, frametype='H1_R')
#grabbing individual data from TimeSeriesDict
c1_b = dict_data[0][args.channel1]
c2_b = dict_data[0][args.channel2]
c1_d = dict_data[1][args.channel1]
Example #13
0
def compute_all(channels,
                start,
                stop,
                history=timedelta(hours=2),
                filename=DEFAULT_FILENAME,
                **kwargs):
    # set up duration (minute-trend data has dt=1min, so reject intervals not on the minute).
    duration = (stop - start).total_seconds() / 60
    assert (stop - start).total_seconds() / 60 == (stop -
                                                   start).total_seconds() // 60
    duration = int(duration)
    logger.info(
        f'Clustering data from {start} to {stop} ({duration} minutes).')

    # download data using TimeSeries.get(), including history of point at t0.
    logger.debug(
        f'Initiating download from {start} to {stop} with history={history}...'
    )
    dl = TimeSeriesDict.get(channels,
                            start=to_gps(start - history),
                            end=to_gps(stop))
    logger.info(f'Downloaded from {start} to {stop} with history={history}.')

    if exists('input.npy'):
        input_data = np.load('input.npy')
        logger.info('Loaded input matrix.')
    else:
        # generate input matrix of the form [sample1;...;sampleN] with sampleK = [feature1,...,featureN]
        # for sklearn.cluster algorithms. This is the slow part of the function, so a progress bar is shown.
        logger.debug(f'Initiating input matrix generation...')
        with Progress('building input', (duration * 60)) as progress:
            input_data = stack([
                concatenate([
                    progress(dl[channel].crop,
                             t,
                             start=to_gps(start + timedelta(seconds=t) -
                                          history),
                             end=to_gps(start + timedelta(seconds=t))).value
                    for channel in channels
                ]) for t in range(0, int(duration * 60), 60)
            ])

        # verify input matrix dimensions.
        assert input_data.shape == (duration,
                                    int(
                                        len(channels) *
                                        history.total_seconds() / 60))
        np.save('input.npy', input_data)
        logger.info('Completed input matrix generation.')

    params = {
        'quantile': .3,
        'eps': .3,
        'damping': .9,
        'preference': -200,
        'n_neighbors': 10,
        'n_clusters': 15,
        'min_samples': 20,
        'xi': 0.05,
        'min_cluster_size': 0.1
    }

    if exists('X.npy'):
        X = np.load('X.npy')
        logger.info('Loaded X')
    else:
        # normalize dataset for easier parameter selection
        X = StandardScaler().fit_transform(input_data)
        np.save('X.npy', X)
        logger.info('Generated X')

    if exists('bandwidth.npy'):
        bandwidth = np.load('bandwidth.npy')
        logger.info('Loaded bandwidth')
    else:
        # estimate bandwidth for mean shift
        bandwidth = cluster.estimate_bandwidth(X, quantile=params['quantile'])
        np.save('bandwidth.npy', bandwidth)
        logger.info('Generated bandwidth')

    if exists('connectivity.npy'):
        connectivity = np.load('connectivity.npy', allow_pickle=True)
        logger.info('Loaded connectivity')
    else:
        # connectivity matrix for structured Ward
        connectivity = kneighbors_graph(X,
                                        n_neighbors=params['n_neighbors'],
                                        include_self=False)
        # make connectivity symmetric
        connectivity = 0.5 * (connectivity + connectivity.T)
        np.save('connectivity.npy', connectivity)
        logger.info('Generated connectivity')

    ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
    two_means = cluster.MiniBatchKMeans(n_clusters=params['n_clusters'])
    ward = cluster.AgglomerativeClustering(n_clusters=params['n_clusters'],
                                           linkage='ward',
                                           connectivity=connectivity)
    spectral = cluster.SpectralClustering(n_clusters=params['n_clusters'],
                                          eigen_solver='arpack',
                                          affinity="nearest_neighbors")
    dbscan = cluster.DBSCAN(eps=params['eps'])
    optics = cluster.OPTICS(min_samples=params['min_samples'],
                            xi=params['xi'],
                            min_cluster_size=params['min_cluster_size'])
    affinity_propagation = cluster.AffinityPropagation(
        damping=params['damping'], preference=params['preference'])
    average_linkage = cluster.AgglomerativeClustering(
        linkage="average",
        affinity="cityblock",
        n_clusters=params['n_clusters'],
        connectivity=connectivity)
    birch = cluster.Birch(n_clusters=params['n_clusters'])
    gmm = mixture.GaussianMixture(n_components=params['n_clusters'],
                                  covariance_type='full')

    clustering_algorithms = (
        ('MiniBatchKMeans', two_means),
        ('AffinityPropagation', affinity_propagation), ('MeanShift', ms),
        ('SpectralClustering', spectral), ('DBSCAN', dbscan),
        ('OPTICS', optics), ('Birch', birch), ('GaussianMixture', gmm)
        # ('Ward', ward),
        # ('AgglomerativeClustering', average_linkage),
    )

    for name, algorithm in clustering_algorithms:
        if exists(f'part-{name}-{filename}'):
            labels = TimeSeries.read(f'part-{name}-{filename}',
                                     f'{name}-labels')
            logger.debug(f'LOADED {name}.')
        else:
            logger.debug(f'doing {name}...')
            # catch warnings related to kneighbors_graph
            with warnings.catch_warnings():
                warnings.filterwarnings(
                    "ignore",
                    message="the number of connected components of the " +
                    "connectivity matrix is [0-9]{1,2}" +
                    " > 1. Completing it to avoid stopping the tree early.",
                    category=UserWarning)
                warnings.filterwarnings(
                    "ignore",
                    message="Graph is not fully connected, spectral embedding"
                    + " may not work as expected.",
                    category=UserWarning)
                algorithm.fit(X)

            if hasattr(algorithm, 'labels_'):
                y_pred = algorithm.labels_.astype(np.int)
            else:
                y_pred = algorithm.predict(X)
            # cast the output labels to a TimeSeries so that cropping is easy later on.
            labels = TimeSeries(
                y_pred,
                times=dl[channels[0]].crop(start=to_gps(start),
                                           end=to_gps(stop)).times,
                name=f'{name}-labels')

            labels.write(f'part-{name}-{filename}')
        # put labels in data download dictionary for easy saving.
        dl[labels.name] = labels

    # write data download and labels to specified filename.
    cache_file = abspath(filename)
    if exists(cache_file):
        remove(cache_file)
    dl.write(cache_file)
    logger.info(f'Wrote cache to {filename}')
Example #14
0
# and one for plotting the data:
from gwpy.plot import Plot

# Next we define the channels we want, namely the 0.03Hz-1Hz ground motion
# band-limited RMS channels (1-second average trends).
# We do this using string-replacement so we can substitute the interferometer
# prefix easily when we need to:
channels = [
    '{ifo}:ISI-BS_ST1_SENSCOR_GND_STS_X_BLRMS_30M_100M.mean,s-trend',
    '{ifo}:ISI-BS_ST1_SENSCOR_GND_STS_Y_BLRMS_30M_100M.mean,s-trend',
    '{ifo}:ISI-BS_ST1_SENSCOR_GND_STS_Z_BLRMS_30M_100M.mean,s-trend',
]

# At last we can :meth:`~TimeSeriesDict.get` 12 hours of data for each
# interferometer:
lho = TimeSeriesDict.get([c.format(ifo='H1') for c in channels],
                         'Feb 13 2015 16:00', 'Feb 14 2015 04:00')
llo = TimeSeriesDict.get([c.format(ifo='L1') for c in channels],
                         'Feb 13 2015 16:00', 'Feb 14 2015 04:00')

# Next we can plot the data, with a separate `~gwpy.plot.Axes` for each
# instrument:
plot = Plot(lho, llo, figsize=(12, 6), sharex=True, yscale='log')
ax1, ax2 = plot.axes
for ifo, ax in zip(('Hanford', 'Livingston'), (ax1, ax2)):
    ax.legend(['X', 'Y', 'Z'])
    ax.text(1.01,
            0.5,
            ifo,
            ha='left',
            va='center',
            transform=ax.transAxes,
Example #15
0
resultName = "./Results/{}_{}.txt".format(IFO,ID)

channels = [
    'L1:ISI-GND_STS_ITMY_X_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ITMY_Y_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ITMY_Z_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMY_X_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMY_Y_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMY_Z_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMX_X_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMX_Y_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMX_Z_BLRMS_{}.mean,s-trend'.format(blrms_band),
]

print("Fetching data......")
DATA  = TimeSeriesDict.get([c for c in channels],
                         tstart,tstop)

for i in DATA:
    LEN = len(DATA[i].value)

dataX  = np.array(np.zeros([len(channels), LEN] ))

count = 0
for i in DATA:
    if eqBandpass:
        W = fftfreq(DATA[i].value.size, d=np.array(DATA[i].dt.value))
        f_signal = rfft(np.array(DATA[i].value))
        cut_f_signal = f_signal.copy()
        cut_f_signal[(W<20.0e-3)] = 0
        cut_f_signal[(W>100.0e-3)] = 0
        cut_signal = irfft(cut_f_signal)
Example #16
0
resultName = "./Results/{}_{}.txt".format(IFO, ID)

channels = [
    'L1:ISI-GND_STS_ITMY_X_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ITMY_Y_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ITMY_Z_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMY_X_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMY_Y_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMY_Z_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMX_X_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMX_Y_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMX_Z_BLRMS_{}.mean,s-trend'.format(blrms_band),
]

print("Fetching data......")
DATA = TimeSeriesDict.get([c for c in channels], tstart, tstop)

for i in DATA:
    LEN = len(DATA[i].value)

dataX = np.array(np.zeros([len(channels), LEN]))

count = 0
for i in DATA:
    if eqBandpass:
        W = fftfreq(DATA[i].value.size, d=np.array(DATA[i].dt.value))
        f_signal = rfft(np.array(DATA[i].value))
        cut_f_signal = f_signal.copy()
        cut_f_signal[(W < 20.0e-3)] = 0
        cut_f_signal[(W > 100.0e-3)] = 0
        cut_signal = irfft(cut_f_signal)
Example #17
0
of coherence.

The `TimeSeries` method :meth:`~TimeSeries.coherence_spectrogram` performs the
same coherence calculation every ``stride``, giving a time-varying coherence
measure.

"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = "gwpy.timeseries"

# First, we import the `TimeSeriesDict`
from gwpy.timeseries import TimeSeriesDict

# and then :meth:`~TimeSeriesDict.get` both data sets:
data = TimeSeriesDict.get(["L1:LSC-SRCL_IN1_DQ", "L1:LSC-CARM_IN1_DQ"], "Feb 13 2015", "Feb 13 2015 00:15")

# We can then use the :meth:`~TimeSeries.coherence_spectrogram` method
# of one `TimeSeries` to calcululate the time-varying coherence with
# respect to the other, using a 0.5-second FFT length, with a
# 0.45-second (90%) overlap, with a 8-second stride:
coh = data["L1:LSC-SRCL_IN1_DQ"].coherence_spectrogram(data["L1:LSC-CARM_IN1_DQ"], 8, 0.5, 0.45)

# Finally, we can :meth:`~gwpy.spectrogram.Spectrogram.plot` the
# resulting data
plot = coh.plot()
ax = plot.gca()
ax.set_ylabel("Frequency [Hz]")
ax.set_yscale("log")
ax.set_ylim(10, 8000)
ax.set_title("Coherence between SRCL and CARM for L1")
Example #18
0
same coherence calculation every ``stride``, giving a time-varying coherence
measure.

"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we import the `TimeSeriesDict`
from gwpy.timeseries import TimeSeriesDict

# and then :meth:`~TimeSeriesDict.get` the data for the strain output
# (``H1:GDS-CALIB_STRAIN``) and the PSL periscope accelerometer
# (``H1:PEM-CS_ACC_PSL_PERISCOPE_X_DQ``):
data = TimeSeriesDict.get(['H1:GDS-CALIB_STRAIN',
                           'H1:PEM-CS_ACC_PSL_PERISCOPE_X_DQ'],
                           1126260017, 1126260617)
hoft = data['H1:GDS-CALIB_STRAIN']
acc = data['H1:PEM-CS_ACC_PSL_PERISCOPE_X_DQ']

# We can then calculate the :meth:`~TimeSeries.coherence` of one
# `TimeSeries` with respect to the other, using an 2-second Fourier
# transform length, with a 1-second (50%) overlap:
coh = hoft.coherence_spectrogram(acc, 10, fftlength=.5, overlap=.25)

# Finally, we can :meth:`~gwpy.spectrogram.Spectrogram.plot` the
# resulting data
plot = coh.plot()
ax = plot.gca()
ax.set_ylabel('Frequency [Hz]')
ax.set_yscale('log')
Example #19
0
# and one for plotting the data:
from gwpy.plotter import TimeSeriesPlot

# Next we define the channels we want, namely the 0.03Hz-1Hz ground motion
# band-limited RMS channels (1-second average trends).
# We do this using string-replacement so we can substitute the interferometer
# prefix easily when we need to:
channels = [
    '%s:ISI-BS_ST1_SENSCOR_GND_STS_X_BLRMS_30M_100M.mean,s-trend',
    '%s:ISI-BS_ST1_SENSCOR_GND_STS_Y_BLRMS_30M_100M.mean,s-trend',
    '%s:ISI-BS_ST1_SENSCOR_GND_STS_Z_BLRMS_30M_100M.mean,s-trend',
]

# At last we can :meth:`~TimeSeriesDict.get` 12 hours of data for each
# interferometer:
lho = TimeSeriesDict.get([c % 'H1' for c in channels],
                         'Feb 13 2015 16:00', 'Feb 14 2015 04:00', verbose=True)
llo = TimeSeriesDict.get([c % 'L1' for c in channels],
                         'Feb 13 2015 16:00', 'Feb 14 2015 04:00', verbose=True)

# Next we can plot the data, with a separate `~gwpy.plotter.Axes` for each
# instrument:
plot = TimeSeriesPlot(lho, llo)
for ifo, ax in zip(['H1', 'L1'], plot.axes):
   ax.legend(['X', 'Y', 'Z'])
   ax.yaxis.set_label_position('right')
   ax.set_ylabel(ifo, rotation=0, va='center', ha='left')
   ax.set_yscale('log')
plot.text(0.1, 0.5, '$1-3$\,Hz motion [nm/s]', rotation=90, fontsize=24,
          ha='center', va='center')
plot.axes[0].set_title('Magnitude 7.1 earthquake impact on LIGO', fontsize=24)
plot.show()