예제 #1
0
 def _next(self):
     uchannels = self._unique_channel_names(self.channels)
     new = TimeSeriesDict()
     span = 0
     epoch = 0
     self.logger.debug('Waiting for next NDS2 packet...')
     while span < self.interval:
         try:
             buffers = next(self.iterator)
         except RuntimeError as e:
             self.logger.error('RuntimeError caught: %s' % str(e))
             self.restart()
             break
         for buff, c in zip(buffers, uchannels):
             ts = TimeSeries.from_nds2_buffer(buff)
             try:
                 new.append({c: ts}, gap=self.gap, pad=self.pad)
             except ValueError as e:
                 if 'discontiguous' in str(e):
                     e.args = ('NDS connection dropped data between %d and '
                               '%d' % (epoch, ts.span[0]),)
                 raise
             span = abs(new[c].span)
             epoch = new[c].span[-1]
             self.logger.debug('%ds data for %s received'
                               % (abs(ts.span), str(c)))
     out = type(new)()
     for chan in self.channels:
         out[chan] = new[self._channel_basename(chan)].copy()
     return out
예제 #2
0
 def _next(self):
     uchannels = self._unique_channel_names(self.channels)
     new = TimeSeriesDict()
     span = 0
     epoch = 0
     att = 0
     self.logger.debug('Waiting for next NDS2 packet...')
     while span < self.interval:
         try:
             buffers = next(self.iterator)
         except RuntimeError as e:
             self.logger.error('RuntimeError caught: %s' % str(e))
             if att < self.attempts:
                 att += 1
                 wait_time = att / 3 + 1
                 self.logger.warning(
                     'Attempting to reconnect to the nds server... %d/%d'
                     % (att, self.attempts))
                 self.logger.warning('Next attempt in minimum %d seconds' %
                                     wait_time)
                 self.restart()
                 sleep(wait_time - tconvert('now') % wait_time)
                 continue
             else:
                 self.logger.critical(
                     'Maximum number of attempts reached, exiting')
                 break
         att = 0
         for buff, c in zip(buffers, uchannels):
             ts = TimeSeries.from_nds2_buffer(buff)
             try:
                 new.append({c: ts}, gap=self.gap, pad=self.pad)
             except ValueError as e:
                 if 'discontiguous' in str(e):
                     e.message = (
                         'NDS connection dropped data between %d and '
                         '%d, restarting building the buffer from %d ') \
                         % (epoch, ts.span[0], ts.span[0])
                     self.logger.warning(str(e))
                     new = TimeSeriesDict()
                     new[c] = ts.copy()
                 elif ('starts before' in str(e)) or \
                         ('overlapping' in str(e)):
                     e.message = (
                         'Overlap between old data and new data in the '
                         'nds buffer, only the new data will be kept.')
                     self.logger.warning(str(e))
                     new = TimeSeriesDict()
                     new[c] = ts.copy()
                 else:
                     raise
             span = abs(new[c].span)
             epoch = new[c].span[-1]
             self.logger.debug('%ds data for %s received'
                               % (abs(ts.span), str(c)))
     out = type(new)()
     for chan in self.channels:
         out[chan] = new[self._channel_basename(chan)].copy()
     return out
예제 #3
0
파일: sei.py 프로젝트: berkowitze/gwsumm
    def process(self):

        # data span
        start = self.gpstime - self.duration / 2.
        end = self.gpstime + self.duration / 2.

        # get data
        if self.use_nds:
            data = TimeSeriesDict.fetch(self.chanlist, start, end)
        else:
            from glue.datafind import GWDataFindHTTPConnection
            conn = GWDataFindHTTPConnection()
            cache = conn.find_frame_urls(self.ifo[0], '%s_C' % self.ifo,
                                         self.start, self.end, urltype='file')
            if len(cache) == 0:
                data = {}
            else:
                data = TimeSeriesDict.read(cache, self.chanlist, start=start,
                                           end=end, nproc=self.nproc)

        # make plot
        plot, axes = subplots(nrows=self.geometry[0], ncols=self.geometry[1],
                              sharex=True,
                              subplot_kw={'projection': 'timeseries'},
                              FigureClass=TimeSeriesPlot, figsize=[12, 6])
        axes[0,0].set_xlim(start, end)
        for channel, ax in zip(self.chanlist, axes.flat):
            ax.set_epoch(self.gpstime)
            # plot data
            try:
                ax.plot(data[channel])
            except KeyError:
                ax.text(self.gpstime, 0.5, "No data", va='center', ha='center',
                        transform=ax.transData)
            # plot trip indicator
            ylim = ax.get_ylim()
            ax.plot([self.gpstime, self.gpstime], ylim, linewidth=0.5,
                    linestyle='--', color='red')
            ax.set_ylim(*ylim)
            ax.set_xlabel('')
            ax.set_title(channel.texname, fontsize=10)
            ax.xaxis.set_minor_locator(NullLocator())
            for tick in ax.yaxis.get_major_ticks():
                tick.label.set_fontsize(10)
            for tick in ax.xaxis.get_major_ticks():
                tick.label.set_fontsize(16)
        plot.text(0.5, 0.04, 'Time [seconds] from trip (%s)' % self.gpstime,
                  ha='center', va='bottom', fontsize=24)
        plot.text(0.01, 0.5, 'Amplitude %s' % self.unit, ha='left', va='center',
                  rotation='vertical', fontsize=24)

        plot.suptitle('%s %s %s watchdog trip: %s'
                      % (self.ifo, self.chamber, self.sensor, self.gpstime),
                      fontsize=24)

        plot.save(self.outputfile)
        plot.close()
        return self.outputfile
예제 #4
0
 def __init__(self,
              channels,
              filename,
              start=default_start,
              end=default_end):
     self.channels = channels
     if path.exists('./data/{}.hdf5'.format(filename)):
         self.data = TimeSeriesDict.read('./data/{}.hdf5'.format(filename))
     else:
         self.data = TimeSeriesDict.fetch(channels, start, end)
         self.data.write('./data/{}.hdf5'.format(filename))
예제 #5
0
파일: sei.py 프로젝트: pvasired/gwsumm
    def process(self):

        # data span
        start = self.gpstime - self.duration / 2.
        end = self.gpstime + self.duration / 2.

        # get data
        if self.use_nds:
            data = TimeSeriesDict.fetch(self.chanlist, start, end)
        else:
            from glue.datafind import GWDataFindHTTPConnection
            conn = GWDataFindHTTPConnection()
            cache = conn.find_frame_urls(self.ifo[0], '%s_C' % self.ifo,
                                         self.start, self.end, urltype='file')
            data = TimeSeriesDict.read(cache, self.chanlist, start=start,
                                       end=end, nproc=self.nproc)

        # make plot
        plot, axes = subplots(nrows=self.geometry[0], ncols=self.geometry[1],
                              sharex=True,
                              subplot_kw={'projection': 'timeseries'},
                              FigureClass=TimeSeriesPlot, figsize=[12, 6])
        axes[0,0].set_xlim(start, end)
        for channel, ax in zip(self.chanlist, axes.flat):
            ax.set_epoch(self.gpstime)
            # plot data
            ax.plot(data[channel])
            # plot trip indicator
            ylim = ax.get_ylim()
            ax.plot([self.gpstime, self.gpstime], ylim, linewidth=0.5,
                    linestyle='--', color='red')
            ax.set_ylim(*ylim)
            ax.set_xlabel('')
            ax.set_title(channel.texname, fontsize=10)
            ax.xaxis.set_minor_locator(NullLocator())
            for tick in ax.yaxis.get_major_ticks():
                tick.label.set_fontsize(10)
            for tick in ax.xaxis.get_major_ticks():
                tick.label.set_fontsize(16)
        plot.text(0.5, 0.04, 'Time [seconds] from trip (%s)' % self.gpstime,
                  ha='center', va='bottom', fontsize=24)
        plot.text(0.01, 0.5, 'Amplitude %s' % self.unit, ha='left', va='center',
                  rotation='vertical', fontsize=24)

        plot.suptitle('%s %s %s watchdog trip: %s'
                      % (self.ifo, self.chamber, self.sensor, self.gpstime),
                      fontsize=24)

        plot.save(self.outputfile)
        plot.close()
        return self.outputfile
예제 #6
0
 def _next(self):
     uchannels = self._unique_channel_names(self.channels)
     new = TimeSeriesDict()
     span = 0
     epoch = 0
     att = 0
     self.logger.debug('Waiting for next NDS2 packet...')
     while span < self.interval:
         try:
             buffers = next(self.iterator)
         except RuntimeError as e:
             self.logger.error('RuntimeError caught: %s' % str(e))
             if att < self.attempts:
                 att += 1
                 wait_time = att / 3 + 1
                 self.logger.warning(
                     'Attempting to reconnect to the nds server... %d/%d' %
                     (att, self.attempts))
                 self.logger.warning('Next attempt in minimum %d seconds' %
                                     wait_time)
                 self.restart()
                 sleep(wait_time - tconvert('now') % wait_time)
                 continue
             else:
                 self.logger.critical(
                     'Maximum number of attempts reached, exiting')
                 break
         att = 0
         for buff, c in zip(buffers, uchannels):
             ts = TimeSeries.from_nds2_buffer(buff)
             try:
                 new.append({c: ts}, gap=self.gap, pad=self.pad)
             except ValueError as e:
                 if 'discontiguous' in str(e):
                     e.message = (
                         'NDS connection dropped data between %d and '
                         '%d, restarting building the buffer from %d ') \
                         % (epoch, ts.span[0], ts.span[0])
                     self.logger.warning(str(e))
                     new = TimeSeriesDict()
                     new[c] = ts.copy()
                 elif ('starts before' in str(e)) or \
                         ('overlapping' in str(e)):
                     e.message = (
                         'Overlap between old data and new data in the '
                         'nds buffer, only the new data will be kept.')
                     self.logger.warning(str(e))
                     new = TimeSeriesDict()
                     new[c] = ts.copy()
                 else:
                     raise
             span = abs(new[c].span)
             epoch = new[c].span[-1]
             self.logger.debug('%ds data for %s received' %
                               (abs(ts.span), str(c)))
     out = type(new)()
     for chan in self.channels:
         out[chan] = new[self._channel_basename(chan)].copy()
     return out
예제 #7
0
    def channeling_read(out_channels: List[str], **kwargs) -> TimeSeriesDict:
        out = TimeSeriesDict()

        for channel in out_channels:
            for prefix in search_dirs:
                for in_channel in in_channels:
                    try:
                        # lock the target file
                        h5file, _ = path2h5file(get_path(
                            f'{in_channel} {generation_start}',
                            'hdf5',
                            prefix=prefix),
                                                mode='r')
                        # read off the dataset
                        out[channel] = TimeSeries.read(h5file, channel,
                                                       **kwargs)
                    except (FileNotFoundError, KeyError, OSError):
                        # file not found / hdf5 can't open file (OSError), channel not in file (KeyError)
                        continue
                    break
                else:
                    continue
                break
            else:
                # tried all search dirs but didn't find it. Attempt to download.
                raise FileNotFoundError(f'CANNOT FIND {channel}!!')
                # out[channel] = TimeSeries.get(channel, **kwargs) # slow.
        return out
예제 #8
0
def timeseries(start, end, plot=True):
    kwargs = {'verbose': True, 'host': '10.68.10.121', 'port': 8088}
    #kwargs = {'verbose':True,'host':'localhost','port':8088}
    data = TimeSeriesDict.fetch(channels, start, end, **kwargs)
    c = 299792458  # m/sec
    lam = 1064e-9  # m
    #gif = data['K1:VIS-ETMX_GIF_ARM_L_OUT16']
    gif = data['K1:GIF-X_STRAIN_OUT16'] * 3000 * 1e6
    xarm = data['K1:CAL-CS_PROC_XARM_FILT_AOM_OUT16'] * 3000.0 / (
        c / lam) * 1e6  # [um]
    #xarm = data['K1:CAL-CS_PROC_XARM_FILT_TM_OUT16']*3000.0/(c/lam)*1e6 # [um]
    _etmx_seis = data['K1:PEM-SEIS_EXV_GND_X_OUT16']
    _itmx_seis = data['K1:PEM-SEIS_IXV_GND_X_OUT16']
    #etmx_seis = data['K1:PEM-SEIS_EXV_GND_X_OUT_DQ']
    #itmx_seis = data['K1:PEM-SEIS_IXV_GND_X_OUT_DQ']
    diff_seis = _etmx_seis - _itmx_seis
    #diff_seis = etmx_seis - itmx_seis
    #etmx_lvdt = data['K1:VIS-ETMX_IP_BLEND_LVDTL_IN1_DQ']
    #itmx_lvdt = data['K1:VIS-ITMX_IP_BLEND_LVDTL_IN1_DQ']
    #diff_lvdt = etmx_lvdt + itmx_lvdt
    comm_seis = _etmx_seis + _itmx_seis
    #comm_seis = diff_seis
    if plot:
        plt.plot(gif)
        plt.savefig('timeseries.png')
        plt.close()
    return gif, xarm, diff_seis, comm_seis  #,diff_lvdt
예제 #9
0
    def fetch(self, channels, t0, duration, fs, nproc=4):
        """ Fetch data """
        # if channels is a file
        if isinstance(channels, str):
            channels = open(channels).read().splitlines()
        target_channel = channels[0]

        # get data and resample
        data = TimeSeriesDict.get(channels,
                                  t0,
                                  t0 + duration,
                                  nproc=nproc,
                                  allow_tape=True)
        data = data.resample(fs)

        # sorted by channel name
        data = OrderedDict(sorted(data.items()))

        # reset attributes
        self.data = []
        self.channels = []
        for chan, ts in data.items():
            self.data.append(ts.value)
            self.channels.append(chan)
        self.data = np.stack(self.data)
        self.channels = np.stack(self.channels)
        self.t0 = t0
        self.fs = fs
        self.target_idx = np.where(self.channels == target_channel)[0][0]
예제 #10
0
def huge(start, end):
    source = filelist(start, end, trend='full', place='kashiwa')
    data = TimeSeriesDict.read(source, channels, start=start, end=end, nproc=4)
    c = 299792458  # m/sec
    lam = 1064e-9  # m
    gif = data['K1:VIS-ETMX_GIF_ARM_L_OUT16']
    xarm = data['K1:CAL-CS_PROC_XARM_FILT_AOM_OUT16'] * 3000.0 / (
        c / lam) * 1e6  # [um]
    etmx_seis = data['K1:PEM-SEIS_EXV_GND_X_OUT16']
    itmx_seis = data['K1:PEM-SEIS_IXV_GND_X_OUT16']
    diff_seis = etmx_seis - itmx_seis
    comm_seis = etmx_seis + itmx_seis

    # Coherence
    coh_gif2xarm = gif.coherence(xarm, fftlength=fftlen, overlap=ovlp)
    coh_gif2seis = gif.coherence(diff_seis, fftlength=fftlen, overlap=ovlp)
    coh_xarm2seiscomm = xarm.coherence(comm_seis,
                                       fftlength=fftlen,
                                       overlap=ovlp)

    # ASD
    gif = gif.asd(fftlength=fftlen, overlap=ovlp)
    xarm = xarm.asd(fftlength=fftlen, overlap=ovlp)
    diff_seis = diff_seis.asd(fftlength=fftlen, overlap=ovlp)
    comm_seis = comm_seis.asd(fftlength=fftlen, overlap=ovlp)
    w = 2.0 * np.pi * (diff_seis.frequencies.value)
    diff_seis = diff_seis / w
    comm_seis = comm_seis / w
    return xarm
def getData(channels, start, stop, filename, fftl=4, ovlp=2):
    if path.exists('{}.hdf5'.format(filename)):
        data = TimeSeriesDict.read('{}.hdf5'.format(filename))
    else:
        data = TimeSeriesDict.fetch(channels, start, stop)
        data.write('{}.hdf5'.format(filename), overwrite=True)
    spec = {}
    for i in channels:
        spec[i] = {}
        spec[i]['sp'], spec[i]['norm'] = specgram(data[i], fftl, ovlp)
        spec[i]['sp_asd'] = spec[i]['sp'].percentile(50)
        if channels.index(i) == 0:
            spec[i]['sp_asd'] = calDARM(spec[i]['sp_asd'][1:])
        if i[10:13] == 'ACC':
            spec[i]['sp_asd'] = calAccel(spec[i]['sp_asd'][1:])
    np.save(filename, spec)
    return spec
예제 #12
0
 def compute(self, raw: TimeSeries) -> TimeSeriesDict:
     out = TimeSeriesDict()
     times = unique([60 * (t.value // 60) for t in raw.times])
     raw.name = raw.name + '.mean'
     out[raw.name] = TimeSeries(
         [raw.crop(t - 60, t).mean().value for t in times[1:]], times=times)
     out[raw.name].__metadata_finalize__(raw)
     return out
예제 #13
0
def getData(channels,start,stop,filename):
    data = TimeSeriesDict.fetch(channels,start,stop)
    spec = {}
    for i in channels:
        spec[i] = {}
        spec[i]['sp'],spec[i]['norm'] = specgram(data[i])
        spec[i]['sp_asd'] = spec[i]['sp'].percentile(50)
    data.write('{}.hdf5'.format(filename),overwrite=True)
    np.save(filename,spec)
    return spec 
예제 #14
0
def loadHDF5(filename):
    data = TimeSeriesDict.read('{}.hdf5'.format(filename))
    spec = {}
    channels = data.keys()
    for i in channels:
        spec[i] = {}
        spec[i]['sp'],spec[i]['norm'] = specgram(data[i])
        spec[i]['sp_asd'] = spec[i]['sp'].percentile(50)
    np.save(filename,spec)
    return spec
예제 #15
0
    def update_data(self, new, gap='pad', pad=0):
        """Update the `SpectrogramMonitor` data

        This method only applies a ratio, if configured
        """
        # data buffer will return dict of 1-item lists, so reform to tsd
        new = TimeSeriesDict((key, val[0]) for key, val in new.iteritems())
        self.spectrograms.append(self.spectrograms.from_timeseriesdict(new))
        self.spectrograms.crop(self.epoch - self.duration)
        self.data = type(self.spectrograms.data)()
        for channel in self.channels:
            self.data[channel] = type(self.spectrograms.data[channel])(
                *self.spectrograms.data[channel])
            if hasattr(channel, 'ratio') and channel.ratio is not None:
                for i in range(len(self.data[channel])):
                    self.data[channel][i] = (
                        self.spectrograms.data[channel][i].ratio(
                            channel.ratio))
        self.epoch = self.data[self.channels[0]][-1].span[-1]
        return self.data
예제 #16
0
def get_array2d(start,end,axis='X',prefix='./data',**kwargs):
    '''
    '''
    nproc = kwargs.pop('nproc',4)
    bandpass = kwargs.pop('bandpass',None)
    blrms = kwargs.pop('blrms',None)
    fftlen = kwargs.pop('fftlen',2**8)
    overlap = fftlen/2

    # check existance of the spectrogram data
    fname_hdf5 = fname_hdf5_asd(start,end,prefix,axis)
    if os.path.exists(fname_hdf5):
        specgram = Spectrogram.read(fname_hdf5)
        if blrms:
            timeseries = specgram.crop_frequencies(blrms[0],blrms[1]).sum(axis=1)
            return timeseries
        return specgram
    
    # If spectrogram dose not exist, calculate it from timeseries data.
    try:
        fname = fname_gwf(start,end,prefix='./data')
        chname = get_seis_chname(start,end,axis=axis)
        # check existance of the timeseries data
        if os.path.exists(fname):
            data = TimeSeries.read(fname,chname,nproc=nproc)
        else:
            # when timeseries data dose not exist
            fnamelist = existedfilelist(start,end)
            chname = get_seis_chname(start,end)
            datadict = TimeSeriesDict.read(fnamelist,chname,nproc=nproc)
            datadict = datadict.resample(32)
            datadict = datadict.crop(start,end)
            chname = get_seis_chname(start,end,axis=axis)
            datadict.write(fname,format='gwf.lalframe')
            data = TimeSeries.read(fname,chname,nproc=nproc)
            # If data broken, raise Error.
            if data.value.shape[0] != 131072:
                log.debug(data.value.shape)
                log.debug('####### {0} {1}'.format(start,end))
                raise ValueError('data broken')
    except:
        log.debug(traceback.format_exc())
        raise ValueError('!!!')

    # if data broken, raise Error.
    if data.value.shape[0] != 131072: # (131072 = 2**17 = 2**12[sec] * 2**5[Hz] )
        log.debug(data.value.shape)
        log.debug('!!!!!!!! {0} {1}'.format(start,end))
        raise ValueError('data broken')

    # calculate from timeseries data
    specgram = data.spectrogram2(fftlength=fftlen,overlap=overlap,nproc=nproc)
    specgram.write(fname_hdf5,format='hdf5',overwrite=True)
    return specgram
def loadHDF5(filename, fftl=4, ovlp=2):
    data = TimeSeriesDict.read('{}.hdf5'.format(filename))
    spec = {}
    for i in data.keys():
        spec[i] = {}
        spec[i]['sp'], spec[i]['norm'] = specgram(data[i], fftl, ovlp)
        spec[i]['sp_asd'] = spec[i]['sp'].percentile(50)
        if i[10:13] == 'ACC':
            spec[i]['sp_asd'] = calAccel(spec[i]['sp_asd'][1:])
    np.save(filename, spec)
    return spec
예제 #18
0
def get_guardian_segments(node, frametype, start, end, nproc=1, pad=(0, 0),
                          strict=False):
    """Determine state segments for a given guardian node
    """
    ifo, node = node.split(':', 1)
    if node.startswith('GRD-'):
        node = node[4:]
    pstart = start - pad[0]
    pend = end + pad[1]

    # find frame cache
    cache = data.find_frames(ifo, frametype, pstart, pend)

    # pre-format data segments
    span = SegmentList([Segment(pstart, pend)])
    segs = SegmentList()
    csegs = cache_segments(cache)
    if not csegs:
        return csegs

    # read data
    stub = "{}:GRD-{}".format(ifo, node)
    if strict:
        channels = ["{}_OK".format(stub)]
    else:
        state = "{}_STATE_N".format(stub)
        nominal = "{}_NOMINAL_N".format(stub)
        active = "{}_ACTIVE".format(stub)
        channels = [state, nominal, active]
    for seg in csegs & span:
        if strict:
            sv = StateVector.read(
                cache, channels[0], nproc=nproc, start=seg[0], end=seg[1],
                bits=[0], gap='pad', pad=0,).astype('uint32')
            segs += sv.to_dqflags().intersection().active
        else:
            gdata = TimeSeriesDict.read(
                cache, channels, nproc=nproc, start=seg[0], end=seg[1],
                gap='pad', pad=0)
            ok = ((gdata[state].value == gdata[nominal].value) &
                  (gdata[active].value == 1)).view(StateTimeSeries)
            ok.t0 = gdata[state].t0
            ok.dt = gdata[state].dt
            segs += ok.to_dqflag().active

    # truncate to integers, and apply padding
    for i, seg in enumerate(segs):
        segs[i] = type(seg)(int(ceil(seg[0])) + pad[0],
                            int(floor(seg[1])) - pad[1])
    segs.coalesce()

    return segs.coalesce()
예제 #19
0
def timeseries(start, end, plot=True):
    kwargs = {'verbose': True, 'host': '10.68.10.121', 'port': 8088}
    #kwargs = {'verbose':True,'host':'localhost','port':8088}
    data = TimeSeriesDict.fetch(channels, start, end, **kwargs)
    c = 299792458  # m/sec
    lam = 1064e-9  # m
    gif = data['K1:GIF-X_STRAIN_OUT16'] * 3000 * 1e6
    xarm = data['K1:CAL-CS_PROC_XARM_FILT_AOM_OUT16'] * 3000.0 / (
        c / lam) * 1e6  # [um]
    # Seismometer
    etmx_seis = data['K1:PEM-SEIS_EXV_GND_X_OUT16']
    itmx_seis = data['K1:PEM-SEIS_IXV_GND_X_OUT16']
    diff_seis = etmx_seis - itmx_seis
    comm_seis = etmx_seis + itmx_seis
    # # ACC
    # etmx_acc_h1 = data['K1:VIS-ETMX_IP_ACCINF_H1_OUT16']
    # etmx_acc_h2 = data['K1:VIS-ETMX_IP_ACCINF_H2_OUT16']
    # etmx_acc_h3 = data['K1:VIS-ETMX_IP_ACCINF_H3_OUT16']
    # P = etmx_acc_mat
    # etmx_acc_l = P[0][0]*etmx_acc_h1 + P[0][1]*etmx_acc_h2 + P[0][2]*etmx_acc_h3 # L
    # etmx_acc_t = P[1][0]*etmx_acc_h1 + P[1][1]*etmx_acc_h2 + P[1][2]*etmx_acc_h3 # T
    # itmx_acc_h1 = data['K1:VIS-ITMX_IP_ACCINF_H1_OUT16']
    # itmx_acc_h2 = data['K1:VIS-ITMX_IP_ACCINF_H2_OUT16']
    # itmx_acc_h3 = data['K1:VIS-ITMX_IP_ACCINF_H3_OUT16']
    # P = itmx_acc_mat
    # itmx_acc_l = P[0][0]*itmx_acc_h1 + P[0][1]*itmx_acc_h2 + P[0][2]*itmx_acc_h3 # L
    # itmx_acc_t = P[1][0]*itmx_acc_h1 + P[1][1]*itmx_acc_h2 + P[1][2]*itmx_acc_h3 # T
    # diff_acc_l = etmx_acc_l + itmx_acc_l
    # diff_acc_t = etmx_acc_t + itmx_acc_t
    # print np.abs(etmx_acc_mat[0,:]).sum()
    # IP ACT
    etmx_act_l = data['K1:VIS-ETMX_IP_DAMP_L_OUT16']
    itmx_act_l = data['K1:VIS-ITMX_IP_DAMP_L_OUT16']
    diff_act_l = -etmx_act_l - itmx_act_l
    diff_acc_l = diff_act_l
    #diff_acc_l = etmx_act_l
    # GAS ACT
    #etmx_gas_f0 = data['K1:VIS-ETMX_F0_SUMOUT_GAS_OUT16']
    #diff_acc_l = etmx_gas_f0
    #
    #pr3 = data['K1:VIS-PR3_TM_OPLEV_SERVO_YAW_OUT16'
    #pr2 = data['K1:VIS-PR3_TM_OPLEV_SERVO_YAW_OUT16']
    pr3 = data['K1:VIS-ETMX_IP_DAMP_Y_OUT16']
    pr2 = data['K1:VIS-ITMX_IP_DAMP_Y_OUT16']

    if plot:
        plt.plot(gif)
        plt.savefig('timeseries.png')
        plt.close()
    return gif, xarm, diff_seis, comm_seis, diff_acc_l, pr3, pr2
예제 #20
0
def get_signals(r, rdot, phi, dt, mass):
    """Create a `~gwpy.timeseries.TimeSeriesDict` based on orbital trajectory
    """
    rconv = G * mass / c**2
    tconv = G * mass / c**3
    dt *= tconv
    r = TimeSeries(rconv * r,
                   dt=dt.value,
                   name="radial coordinate along geodesic")
    rdot = TimeSeries(c * rdot,
                      dt=dt.value,
                      name="radial component of 4-velocity")
    phi = TimeSeries(phi,
                     dt=dt.value,
                     name="azimuthal coordinate along geodesic")
    return TimeSeriesDict({'r': r, 'rdot': rdot, 'phi': phi})
예제 #21
0
def test_get_data_dict_from_cache(tsdget, remove, find_data):
    # set return values
    tsdget.return_value = TimeSeriesDict({'X1:TEST-STRAIN': HOFT.crop(16, 48)})
    remove.return_value = ['X1:TEST-STRAIN']
    find_data.return_value = ['test.gwf']
    # retrieve test frame
    start = 16
    end = start + 32
    channels = ['X1:TEST-STRAIN']
    data = datafind.get_data(channels, start, end, source=True)

    # test data products
    assert isinstance(data, TimeSeriesDict)
    assert data[channels[0]].duration.value == 32
    assert data[channels[0]].span == Segment(start, end)
    nptest.assert_array_equal(data[channels[0]].value,
                              HOFT.crop(start, end).value)
    def __call__(self, idx):
        start = idx * self._update_size
        stop = (idx + 1) * self._update_size

        if self.data is None or stop > self.data.shape[1]:
            # try to load in the next second's worth of data
            # if it takes more than a second to get created,
            # then assume the worst and raise an error
            start_time = time.time()
            path = self.path_pattern.format(self.t0)
            while time.time() - start_time < 3:
                try:
                    data = TimeSeriesDict.read(path, self.channels)
                    break
                except FileNotFoundError:
                    continue
            else:
                raise ValueError(f"Couldn't find next timestep file {path}")
            self._latency_t0 = _get_file_gps_timestamp(path)

            # resample the data and turn it into a numpy array
            data.resample(self.sample_rate)
            data = np.stack([data[channel].value
                             for channel in self.channels]).astype("float32")

            if self.data is not None and start < self.data.shape[1]:
                leftover = self.data[:, start:]
                data = np.concatenate([leftover, data], axis=1)
            self.data = data
            self.t0 += 1

            # raising an index error will get the DataGenerator's
            # _get_data fn to reset the index
            raise IndexError

        # return the next piece of data
        x = self.data[:, start:stop]

        # offset the frame's initial time by the time
        # corresponding to the first sample of stream
        t0 = self._latency_t0 + idx * self.kernel_stride
        return Package(x=x, t0=t0)
예제 #23
0
    def update_data(self, new):
        """Update the `SpectrogramMonitor` data

        This method only applies a ratio, if configured
        """
        # check that the stored epoch is bigger then the first buffered data
        if new[self.channels[0]][0].span[0] > self.epoch:
            s = ('The available data starts at gps %d '
                 'which. is after the end of the last spectrogram (gps %d)'
                 ': a segment is missing and will be skipped!')
            self.logger.warning(s, new[self.channels[0]][0].span[0],
                                self.epoch)
            self.epoch = new[self.channels[0]][0].span[0]
        # be sure that the first cycle is syncronized with the buffer
        if not self.spectrograms.data:
            self.epoch = new[self.channels[0]][0].span[0]
        self.olepoch = self.epoch
        while int(new[self.channels[0]][0].span[-1]) >=\
                int(self.epoch + self.stride):
            # data buffer will return dict of 1-item lists, so reform to tsd
            _new = TimeSeriesDict((key, val[0].crop(self.epoch, self.epoch +
                                                    self.stride))
                                  for key, val in new.iteritems())
            self.logger.debug('Computing spectrogram from epoch %d',
                              self.epoch)
            self.spectrograms.append(
                self.spectrograms.from_timeseriesdict(_new))
            self.epoch += self.stride
        self.spectrograms.crop(self.epoch - self.duration)
        self.data = type(self.spectrograms.data)()
        for channel in self.channels:
            self.data[channel] = type(self.spectrograms.data[channel])(
                *self.spectrograms.data[channel])
            if hasattr(channel, 'ratio') and channel.ratio is not None:
                for i in range(len(self.data[channel])):
                    self.data[channel][i] = (
                        self.spectrograms.data[channel][i].ratio(
                            channel.ratio))
        self.epoch = self.data[self.channels[0]][-1].span[-1]
        return self.data
예제 #24
0
def _check_baddata(segment, data=None, prefix='./data', **kwargs):
    ''' Check whether given segment is good or not.
    
    1. Read timeseriese data from frame file saved in local place. 
       If data could not be read, return "No Data" flag.
    2. Check lack of data. 
    

    1 bit : no data
    2 bit : lack of data
    3 bit : missed caliblation 
    4 bit : big earthquake
    '''
    start, end = segment
    fname = iofunc.fname_gwf(start, end, prefix)
    fname = existedfilelist(start, end)
    chname = get_seis_chname(start, end)
    try:
        data = TimeSeriesDict.read(fname, chname, verbose=False, **kwargs)
        lack_of_data = any([0.0 in d.value for d in data.values()]) * 4
        miss_calib = any([1000.0 < d.mean().value for d in data.values()]) * 8
        bigeq = any([
            any((d.std() * 6).value < (d - d.mean()).abs().value)
            for d in data.values()
        ]) * 16
        return data, (lack_of_data + miss_calib + bigeq)
    except IOError as e:
        nodata = (True) * 2
        return None, nodata
    except ValueError as e:
        if 'Cannot append discontiguous TimeSeries' in e.args[0]:
            log.debug(e)
            nodata = (True) * 2
            return None, nodata
        else:
            log.debug(traceback.format_exc())
            raise ValueError('!!')
    except:
        log.debug(traceback.format_exc())
        raise ValueError('!!!')
예제 #25
0
def save_spectrogram(segmentlist, fftlength=2**10, overlap=2**9, **kwargs):
    '''
    
    '''
    log.debug('Save spectrograms')
    lackofdata = SegmentList()
    prefix = kwargs.pop('prefix', './data')
    write = kwargs.pop('write', True)
    skip = kwargs.pop('skip', False)

    fnames = [fname_png_asd(start, end, prefix) for start, end in segmentlist]
    not_checked = _check_skip(segmentlist, fnames)

    log.debug('{0}(/{1}) are not checked'.format(len(not_checked),
                                                 len(segmentlist)))
    log.debug('Save spectrograms..')
    for i, segment in enumerate(not_checked):
        try:
            #fname = fname_gwf(start,end,prefix)
            fname = existedfilelist(segment[0], segment[1])
            chname = get_seis_chname(segment[0], segment[1])
            hoge = kwargs.pop('fftlength', 'None')
            hoge = kwargs.pop('overlap', 'None')
            data = TimeSeriesDict.read(fname, chname, **kwargs)
            data = data.resample(32)
            data = data.crop(segment[0], segment[1])
        except:
            log.debug(traceback.format_exc())
            raise ValueError('No such data {0}'.format(fname))
        # plot
        kwargs['fftlength'] = fftlength
        kwargs['overlap'] = overlap
        sglist = _calc_spectrogram(data, segment, **kwargs)
        #asdlist = [sg.percentile(50) for sg in sglist]
        fname = fname_png_asd(segment[0], segment[1], prefix)
        #plot_asd(asdlist,fname,**kwargs)
        log.debug('{0:03d}/{1:03d} {2} '.format(i, len(segmentlist), fname) +
                  'Plot')
예제 #26
0
chname = [
    'K1:GIF-X_ANGLE_IN1_DQ',
    'K1:GIF-X_LAMP_IN1_DQ',
    'K1:GIF-X_PHASE_IN1_DQ',
    'K1:GIF-X_PPOL_IN1_DQ',
    'K1:GIF-X_P_AMP_IN1_DQ',
    'K1:GIF-X_P_OFFSET_IN1_DQ',
    'K1:GIF-X_ROTATION_IN1_DQ',
    'K1:GIF-X_SPOL_IN1_DQ',
    'K1:GIF-X_STRAIN_IN1_DQ',
    'K1:GIF-X_S_AMP_IN1_DQ',
    'K1:GIF-X_S_OFFSET_IN1_DQ',
    'K1:GIF-X_ZABS_IN1_DQ',
    ]
    
data = TimeSeriesDict.fetch(chname,start,end,
                            host='10.68.10.121',port=8088)

N = 128
angle = data['K1:GIF-X_ANGLE_IN1_DQ']
angle = angle.value[:N]
angle = np.unwrap(angle)
angle = np.rad2deg(angle)
ppol = data['K1:GIF-X_PPOL_IN1_DQ']
p_ave = np.average(ppol.value)
ppol = ppol.value[:N]
spol = data['K1:GIF-X_SPOL_IN1_DQ']
s_ave = np.average(spol.value)
spol = spol.value[:N]
time = np.arange(len(angle))/2048.0
print time
예제 #27
0
    # 
    end = start + 1

    # 
    filterbank = True
    if filterbank:
        # Read filter names
        with open('./filtername.txt','r') as f:
            channels = map(lambda x:x.replace('\n',''),f.readlines())
        #
        source = filelist(start,end,trend='full',place='kamioka')
        f = open('./results/{0}.txt'.format(hoge), mode='w')
        f.write('# NAME,STATUS,[FILTER_NUMBER],GAIN,OFFSET,LIMIT'+'\n')
        for name in channels:
            names = [name+_suffix for _suffix in ['_SWSTAT','_GAIN','_OFFSET','_LIMIT']]
            data = TimeSeriesDict.read(source,names,start=start,end=end,nproc=4,
                                       format='gwf.lalframe')
            swstat = int(data[name+'_SWSTAT'].mean())
            gain = data[name+'_GAIN'].mean()
            offset = data[name+'_OFFSET'].mean()
            limit = data[name+'_LIMIT'].mean()
            txt = '{0},{2:3.5e},{3:3.5e},{4:3.5e},{1}'.format(name,filt_status(swstat),
                                                              gain,offset,limit)
            print(txt)
            f.write(txt+'\n')
        f.close()
    #
    matrix = False
    if matrix:
        # Read filter names
        names = np.loadtxt('matrixname.txt',dtype=np.str)
        source = filelist(start,end,trend='full',place='kashiwa')
예제 #28
0
DATA = TimeSeries([1, 2, 3, 4, 5, 5, 5, 4, 5, 4], dx=.5, name='X1:TEST_OUTPUT')
SATURATIONS = numpy.array([2., 4.])
SEGMENTS = SegmentList([
    Segment(2., 3.5),
    Segment(4., 4.5),
])

CHANNELS = [
    'X1:TEST_LIMIT',
    'X1:TEST_LIMEN',
    'X1:TEST_SWSTAT',
]
TSDICT = TimeSeriesDict({
    'X1:TEST_LIMEN':
    TimeSeries(numpy.ones(10), dx=.5, name='X1:TEST_LIMEN'),
    'X1:TEST_OUTPUT':
    DATA,
    'X1:TEST_LIMIT':
    TimeSeries(5 * numpy.ones(10), dx=.5, name='X1:TEST_LIMIT'),
})

# -- unit tests ---------------------------------------------------------------


def test_find_saturations():
    sats = saturation.find_saturations(DATA, limit=5., segments=False)
    assert_array_equal(sats, SATURATIONS)
    segs = saturation.find_saturations(DATA,
                                       limit=5. * DATA.unit,
                                       segments=True)
    assert_segmentlist_equal(segs.active, SEGMENTS)
예제 #29
0
파일: coherence.py 프로젝트: e-q/gwpy
of coherence.

The `TimeSeries` method :meth:`~TimeSeries.coherence_spectrogram` performs the
same coherence calculation every ``stride``, giving a time-varying coherence
measure.

"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = "gwpy.timeseries"

# First, we import the `TimeSeriesDict`
from gwpy.timeseries import TimeSeriesDict

# and then :meth:`~TimeSeriesDict.get` both data sets:
data = TimeSeriesDict.get(["L1:LSC-SRCL_IN1_DQ", "L1:LSC-CARM_IN1_DQ"], "Feb 13 2015", "Feb 13 2015 00:15")

# We can then use the :meth:`~TimeSeries.coherence_spectrogram` method
# of one `TimeSeries` to calcululate the time-varying coherence with
# respect to the other, using a 0.5-second FFT length, with a
# 0.45-second (90%) overlap, with a 8-second stride:
coh = data["L1:LSC-SRCL_IN1_DQ"].coherence_spectrogram(data["L1:LSC-CARM_IN1_DQ"], 8, 0.5, 0.45)

# Finally, we can :meth:`~gwpy.spectrogram.Spectrogram.plot` the
# resulting data
plot = coh.plot()
ax = plot.gca()
ax.set_ylabel("Frequency [Hz]")
ax.set_yscale("log")
ax.set_ylim(10, 8000)
ax.set_title("Coherence between SRCL and CARM for L1")
예제 #30
0
        print dof2,'QUAD'
        for trend in TRENDS:
            print trend,'QUAD'
            l1_channels.append('L1:SUS-%s_L1_WIT_%s_DQ.%s,m-trend' % (optic, dof2, trend))
            l2_channels.append('L1:SUS-%s_L2_WIT_%s_DQ.%s,m-trend' % (optic, dof2, trend))
            print l1_channels
            print l2_channels
    for dof3 in DEGREE_OF_FREEDOM:
        print dof3, 'QUAD'
        for trend in TRENDS:
            print trend, 'QUAD'
            l3_channels.append('L1:SUS-%s_L3_OPLEV_%s_OUT_DQ.%s,m-trend' % (optic, dof3, trend))
            print l3_channels

data = dict()
data[topstage] = TimeSeriesDict.fetch(m1_channels, start, end, verbose=True)

if optic in TRIPLE:
    data['M2'] = TimeSeriesDict.fetch(m2_channels, start, end, verbose=True)
    data['M3'] = TimeSeriesDict.fetch(m3_channels, start, end, verbose=True)

else:
    data['L1'] = TimeSeriesDict.fetch(l1_channels, start, end, verbose=True)
    data['L2'] = TimeSeriesDict.fetch(l2_channels, start, end, verbose=True)
    data['L3'] = TimeSeriesDict.fetch(l3_channels, start, end, verbose=True)


for dof in TOPSTAGE_DOFS:
    if optic in QUAD:
        print "%s  QUAD" %(dof)
        stub = 'L1:SUS-%s_%s_DAMP_%s_INMON.%s,m-trend' % (optic, topstage, dof, '%s')
예제 #31
0
from gwpy.timeseries import TimeSeriesDict
alldata = TimeSeriesDict.get(['H1:PSL-PWR_PMC_TRANS_OUT16','H1:IMC-PWR_IN_OUT16'], 'Feb 1 00:00', 'Feb 1 02:00')
예제 #32
0
resultName = "./Results/{}_{}.txt".format(IFO,ID)

channels = [
    'L1:ISI-GND_STS_ITMY_X_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ITMY_Y_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ITMY_Z_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMY_X_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMY_Y_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMY_Z_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMX_X_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMX_Y_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMX_Z_BLRMS_{}.mean,s-trend'.format(blrms_band),
]

print("Fetching data......")
DATA  = TimeSeriesDict.get([c for c in channels],
                         tstart,tstop)

for i in DATA:
    LEN = len(DATA[i].value)

dataX  = np.array(np.zeros([len(channels), LEN] ))

count = 0
for i in DATA:
    if eqBandpass:
        W = fftfreq(DATA[i].value.size, d=np.array(DATA[i].dt.value))
        f_signal = rfft(np.array(DATA[i].value))
        cut_f_signal = f_signal.copy()
        cut_f_signal[(W<20.0e-3)] = 0
        cut_f_signal[(W>100.0e-3)] = 0
        cut_signal = irfft(cut_f_signal)
예제 #33
0
if fft > stride / 2.:
    print('Warning: stride is shorter than fft length. Set stride=fft*2.')
    stride = fft * 2.

# Get data from frame files
if kamioka:
    sources = mylib.GetFilelist_Kamioka(gpsstart, gpsend)
else:
    sources = mylib.GetFilelist(gpsstart, gpsend)

channels = [refchannel, channel]

data = TimeSeriesDict.read(sources,
                           channels,
                           format='gwf.lalframe',
                           start=int(float(gpsstart)),
                           end=int(float(gpsend)) + 1)

ref = data[refchannel]
com = data[channel]

# Use same sampling rate
if com.dt.value < ref.dt.value:
    com = com.resample(1. / ref.dt.value)
if com.dt.value > ref.dt.value:
    ref = ref.resample(1. / com.dt.value)

ref = ref.crop(float(gpsstart), float(gpsend))
com = com.crop(float(gpsstart), float(gpsend))
예제 #34
0
from gwpy.timeseries import TimeSeriesDict
import phasespace as ps

chan1 = 'H1:ASC-DSOFT_P_OUT_DQ'
chan2 = 'H1:ASC-DSOFT_Y_OUT_DQ'
startgps = 1128411017
duration = 300
startgps2 = 1128453317

time1 = TimeSeriesDict.fetch([chan1, chan2],
                             startgps,
                             startgps + duration,
                             verbose=True)
time2 = TimeSeriesDict.fetch([chan1, chan2],
                             startgps2,
                             startgps2 + duration,
                             verbose=True)

pit_yaw = ps.phase_space(y_ts=time1[chan1],
                         x_ts=time1[chan2],
                         y_ts_comp=time2[chan1],
                         x_ts_comp=time2[chan2])
scatterhist = pit_yaw.plot_2d_scatter_hist_comparison(timer=32,
                                                      median=False,
                                                      flip=True)
scatterhist.savefig('test.png')
예제 #35
0
resultName = "./Results/{}_{}.txt".format(IFO, ID)

channels = [
    'L1:ISI-GND_STS_ITMY_X_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ITMY_Y_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ITMY_Z_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMY_X_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMY_Y_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMY_Z_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMX_X_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMX_Y_BLRMS_{}.mean,s-trend'.format(blrms_band),
    'L1:ISI-GND_STS_ETMX_Z_BLRMS_{}.mean,s-trend'.format(blrms_band),
]

print("Fetching data......")
DATA = TimeSeriesDict.get([c for c in channels], tstart, tstop)

for i in DATA:
    LEN = len(DATA[i].value)

dataX = np.array(np.zeros([len(channels), LEN]))

count = 0
for i in DATA:
    if eqBandpass:
        W = fftfreq(DATA[i].value.size, d=np.array(DATA[i].dt.value))
        f_signal = rfft(np.array(DATA[i].value))
        cut_f_signal = f_signal.copy()
        cut_f_signal[(W < 20.0e-3)] = 0
        cut_f_signal[(W > 100.0e-3)] = 0
        cut_signal = irfft(cut_f_signal)
예제 #36
0
파일: driftmon_mean.py 프로젝트: nkij/LIGO
            channels_M0.append('L1:SUS-%s_M0_DAMP_%s_INMON.%s,m-trend' % (optic_m0, dof, trend))

for optic_m1 in OPTICS_M1:
    for dof in DOFS:
        for trend in TRENDS:
            channels_M1.append('L1:SUS-%s_M1_DAMP_%s_INMON.%s,m-trend' % (optic_m1, dof, trend))

for optic_m2 in OPTICS_M2:
    for dof2 in DOFS2:
        for trend in TRENDS:
            channels_M2.append('L1:SUS-%s_M2_WIT_%s_DQ.%s,m-trend' % (optic_m2, dof2, trend))
            channels_M3.append('L1:SUS-%s_M3_WIT_%s_DQ.%s,m-trend' % (optic_m2, dof2, trend))



data_m0 = TimeSeriesDict.fetch(channels_M0, start, end, verbose=True)
data_m1 = TimeSeriesDict.fetch(channels_M1, start, end, verbose=True)
data_m2 = TimeSeriesDict.fetch(channels_M2, start, end, verbose=True)
data_m3 = TimeSeriesDict.fetch(channels_M3, start, end, verbose=True)

for optic_m1 in OPTICS_M1:
    print "%s " %(optic_m1)
    for dof in DOFS:
        print "%s  " %(dof)
        data_m1_mean = data_m1['L1:SUS-%s_M1_DAMP_%s_INMON.mean,m-trend' % (optic_m1, dof)]-data_m1['L1:SUS-%s_M1_DAMP_%s_INMON.mean,m-trend' % (optic_m1, dof)].mean().value
        plot_m1_mean = data_m1_mean.plot()
        axP = plot_m1_mean.gca()    
        axP.set_ylabel('Amplitude - Mean Value (urad)')
        axP.set_title('Mean %s M1 %s' %(optic_m1, dof))
        pylab.ylim([-200,200])
#        L = axP.legend(loc='upper right', ncol=1, fancybox=True, shadow=True)
예제 #37
0
# and one for plotting the data:
from gwpy.plot import Plot

# Next we define the channels we want, namely the 0.03Hz-1Hz ground motion
# band-limited RMS channels (1-second average trends).
# We do this using string-replacement so we can substitute the interferometer
# prefix easily when we need to:
channels = [
    '{ifo}:ISI-BS_ST1_SENSCOR_GND_STS_X_BLRMS_30M_100M.mean,s-trend',
    '{ifo}:ISI-BS_ST1_SENSCOR_GND_STS_Y_BLRMS_30M_100M.mean,s-trend',
    '{ifo}:ISI-BS_ST1_SENSCOR_GND_STS_Z_BLRMS_30M_100M.mean,s-trend',
]

# At last we can :meth:`~TimeSeriesDict.get` 12 hours of data for each
# interferometer:
lho = TimeSeriesDict.get([c.format(ifo='H1') for c in channels],
                         'Feb 13 2015 16:00', 'Feb 14 2015 04:00')
llo = TimeSeriesDict.get([c.format(ifo='L1') for c in channels],
                         'Feb 13 2015 16:00', 'Feb 14 2015 04:00')

# Next we can plot the data, with a separate `~gwpy.plot.Axes` for each
# instrument:
plot = Plot(lho, llo, figsize=(12, 6), sharex=True, yscale='log')
ax1, ax2 = plot.axes
for ifo, ax in zip(('Hanford', 'Livingston'), (ax1, ax2)):
    ax.legend(['X', 'Y', 'Z'])
    ax.text(1.01, 0.5, ifo, ha='left', va='center', transform=ax.transAxes,
            fontsize=18)
ax1.set_ylabel('$1-3$\,Hz motion [nm/s]', y=-0.1)
ax2.set_ylabel('')
ax1.set_title('Magnitude 7.1 earthquake impact on LIGO')
plot.show()
7: 'BS',
8: 'PR2',
9: 'SR2',
10: 'MC2'}

# read in channel lists to populate each input matrix

PD_DOF_chans=np.loadtxt('PD_DOF_MTRX_chans',dtype=str)
ARM_INPUT_chans=np.loadtxt('ARM_INPUT_MTRX_chans',dtype=str)
OUTPUT_chans=np.loadtxt('OUTPUT_MTRX_chans',dtype=str)
ARM_OUTPUT_chans=np.loadtxt('ARM_OUTPUT_MTRX_chans',dtype=str)

# make a timeseries dictionary for each input matrix
print 'Fetching a bunch of data from frames'

PD_DOF_data = TimeSeriesDict.read(cache,PD_DOF_chans,start=start_gps,end=end_gps)
ARM_INPUT_data = TimeSeriesDict.read(cache,ARM_INPUT_chans,start=start_gps,end=end_gps)
OUTPUT_data = TimeSeriesDict.read(cache,OUTPUT_chans,start=start_gps,end=end_gps)
ARM_OUTPUT_data = TimeSeriesDict.read(cache,ARM_OUTPUT_chans,start=start_gps,end=end_gps)

# main workhorse function of this script
#
# requires a dictionary containing matrix element time series and two dictionaries
# that map these matrix elements to IO channels
#
# grab the first sample of each channel - if it's non-zero, add it to the list of active channels
# 
# process the active channels and return a set of tuples indicating the matrix entries
# 
# send through a search function that lines up the inputs and outputs
# uses the set of tuples generated from active channels a dictionary maps them to PDs and DOFs
예제 #39
0
파일: blrms.py 프로젝트: Maple-Wang/gwpy
# and one for plotting the data:
from gwpy.plotter import TimeSeriesPlot

# Next we define the channels we want, namely the 0.03Hz-1Hz ground motion
# band-limited RMS channels (1-second average trends).
# We do this using string-replacement so we can substitute the interferometer
# prefix easily when we need to:
channels = [
    '%s:ISI-BS_ST1_SENSCOR_GND_STS_X_BLRMS_30M_100M.mean,s-trend',
    '%s:ISI-BS_ST1_SENSCOR_GND_STS_Y_BLRMS_30M_100M.mean,s-trend',
    '%s:ISI-BS_ST1_SENSCOR_GND_STS_Z_BLRMS_30M_100M.mean,s-trend',
]

# At last we can :meth:`~TimeSeriesDict.get` 12 hours of data for each
# interferometer:
lho = TimeSeriesDict.get([c % 'H1' for c in channels],
                         'Feb 13 2015 16:00', 'Feb 14 2015 04:00', verbose=True)
llo = TimeSeriesDict.get([c % 'L1' for c in channels],
                         'Feb 13 2015 16:00', 'Feb 14 2015 04:00', verbose=True)

# Next we can plot the data, with a separate `~gwpy.plotter.Axes` for each
# instrument:
plot = TimeSeriesPlot(lho, llo)
for ifo, ax in zip(['H1', 'L1'], plot.axes):
   ax.legend(['X', 'Y', 'Z'])
   ax.yaxis.set_label_position('right')
   ax.set_ylabel(ifo, rotation=0, va='center', ha='left')
   ax.set_yscale('log')
plot.text(0.1, 0.5, '$1-3$\,Hz motion [nm/s]', rotation=90, fontsize=24,
          ha='center', va='center')
plot.axes[0].set_title('Magnitude 7.1 earthquake impact on LIGO', fontsize=24)
plot.show()
예제 #40
0
# and one for plotting the data:
from gwpy.plot import Plot

# Next we define the channels we want, namely the 0.03Hz-1Hz ground motion
# band-limited RMS channels (1-second average trends).
# We do this using string-replacement so we can substitute the interferometer
# prefix easily when we need to:
channels = [
    '{ifo}:ISI-BS_ST1_SENSCOR_GND_STS_X_BLRMS_30M_100M.mean,s-trend',
    '{ifo}:ISI-BS_ST1_SENSCOR_GND_STS_Y_BLRMS_30M_100M.mean,s-trend',
    '{ifo}:ISI-BS_ST1_SENSCOR_GND_STS_Z_BLRMS_30M_100M.mean,s-trend',
]

# At last we can :meth:`~TimeSeriesDict.get` 12 hours of data for each
# interferometer:
lho = TimeSeriesDict.get([c.format(ifo='H1') for c in channels],
                         'Feb 13 2015 16:00', 'Feb 14 2015 04:00')
llo = TimeSeriesDict.get([c.format(ifo='L1') for c in channels],
                         'Feb 13 2015 16:00', 'Feb 14 2015 04:00')

# Next we can plot the data, with a separate `~gwpy.plot.Axes` for each
# instrument:
plot = Plot(lho, llo, figsize=(12, 6), sharex=True, yscale='log')
ax1, ax2 = plot.axes
for ifo, ax in zip(('Hanford', 'Livingston'), (ax1, ax2)):
    ax.legend(['X', 'Y', 'Z'])
    ax.text(1.01,
            0.5,
            ifo,
            ha='left',
            va='center',
            transform=ax.transAxes,
예제 #41
0
(`~gwpy.spectrum.Spectrum`) giving a time-averaged measure of coherence.

The `TimeSeries` method :meth:`~TimeSeries.coherence_spectrogram` performs the
same coherence calculation every ``stride``, giving a time-varying coherence
measure.

"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we import the `TimeSeriesDict`
from gwpy.timeseries import TimeSeriesDict

# and then :meth:`~TimeSeriesDict.get` both data sets:
data = TimeSeriesDict.get(['L1:LSC-SRCL_IN1_DQ', 'L1:LSC-CARM_IN1_DQ'],
                           'Feb 13 2015', 'Feb 13 2015 00:15')

# We can then use the :meth:`~TimeSeries.coherence_spectrogram` method
# of one `TimeSeries` to calcululate the time-varying coherence with
# respect to the other, using a 0.5-second FFT length, with a
# 0.45-second (90%) overlap, with a 8-second stride:
coh = data['L1:LSC-SRCL_IN1_DQ'].coherence_spectrogram(
    data['L1:LSC-CARM_IN1_DQ'], 8, 0.5, 0.45)

# Finally, we can :meth:`~gwpy.spectrogram.Spectrogram.plot` the
# resulting data
plot = coh.plot()
ax = plot.gca()
ax.set_ylabel('Frequency [Hz]')
ax.set_yscale('log')
ax.set_ylim(10, 8000)
예제 #42
0
        unit = 'Humidity [\%]'
    elif channel.find('ACC') != -1:
        unit = r'Acceleration [$m/s^2$]'
    elif channel.find('MIC') != -1:
        unit = 'Sound [Pa]'

    if mtrend:
        for x in suffix:
            chnames.append(channel + '.' + x)
            latexchnames.append(channel.replace('_', '\_') + '.' + x)

    # Time series
    if mtrend:
        data = TimeSeriesDict.read(sources,
                                   chnames,
                                   format='gwf.lalframe',
                                   nproc=2,
                                   start=int(start),
                                   pad=0.0)
        #data = data.crop(send)
        t0 = data['K1:PEM-EXV_SEIS_WE_SENSINF_OUT_DQ.max'].t0
    if full:
        data = TimeSeries.read(sources,
                               channel,
                               format='gwf.lalframe',
                               nproc=2,
                               start=int(start),
                               pad=0.0)
        t0 = data.t0
        _max = data.max()
        _min = data.min()
        fs = 1. / data.dt
20: 'OM3',
21: 'TMSX',
22: 'TMSY',
23: 'PM1'}

# read in channel lists to populate each input matrix

INMATRIX_chans_PIT=np.loadtxt('ASC_INMATRIX_P_chans.txt',dtype=str)
INMATRIX_chans_YAW=np.loadtxt('ASC_INMATRIX_Y_chans.txt',dtype=str)
OUTMATRIX_chans_PIT=np.loadtxt('ASC_OUTMATRIX_P_chans.txt',dtype=str)
OUTMATRIX_chans_YAW=np.loadtxt('ASC_OUTMATRIX_Y_chans.txt',dtype=str)

# make a timeseries dictionary for each input matrix
print 'Fetching a bunch of data from frames'

INMATRIX_PIT_data = TimeSeriesDict.read(cache,INMATRIX_chans_PIT,start=start_gps,end=end_gps)
INMATRIX_YAW_data = TimeSeriesDict.read(cache,INMATRIX_chans_YAW,start=start_gps,end=end_gps)
OUTMATRIX_PIT_data = TimeSeriesDict.read(cache,OUTMATRIX_chans_PIT,start=start_gps,end=end_gps)
OUTMATRIX_YAW_data = TimeSeriesDict.read(cache,OUTMATRIX_chans_YAW,start=start_gps,end=end_gps)

# main workhorse function of this script
#
# requires a dictionary containing matrix element time series and two dictionaries
# that map these matrix elements to IO channels
#
# grab the first sample of each channel - if it's non-zero, add it to the list of active channels
# 
# process the active channels and return a set of tuples indicating the matrix entries
# 
# send through a search function that lines up the inputs and outputs
# uses the set of tuples generated from active channels a dictionary maps them to PDs and DOFs
예제 #44
0
    pyplot.ion()

# Before anything else, we import the objects we will need:
from gwpy.time import tconvert
from gwpy.timeseries import TimeSeriesDict
from gwpy.plot import BodePlot

# and set the times of our query, and the channels we want:
start = tconvert('May 27 2014 04:00')
end = start + 1800
gndchannel = 'L1:ISI-GND_STS_ITMY_Z_DQ'
hpichannel = 'L1:HPI-ITMY_BLND_L4C_Z_IN1_DQ'

# We can call the :meth:`~TimeSeriesDict.get` method of the `TimeSeriesDict`
# to retrieve all data in a single operation:
data = TimeSeriesDict.get([gndchannel, hpichannel], start, end, verbose=True)
gnd = data[gndchannel]
hpi = data[hpichannel]

# Next, we can call the :meth:`~TimeSeries.average_fft` method to calculate
# an averages, complex-valued FFT for each `TimeSeries`:
gndfft = gnd.average_fft(100, 50, window='hamming')
hpifft = hpi.average_fft(100, 50, window='hamming')

# Finally, we can divide one by the other to get the transfer function
# (up to the lower Nyquist)
size = min(gndfft.size, hpifft.size)
tf = hpifft[:size] / gndfft[:size]

# The `~gwpy.plot.BodePlot` knows how to separate a complex-valued
# `~gwpy.frequencyseries.FrequencySeries` into magnitude and phase:
예제 #45
0
파일: fetch_HPI_all.py 프로젝트: nkij/LIGO
'L1:HPI-ETMX_BLND_IPS_RZ_IN1_DQ.mean, m-trend',
'L1:HPI-ETMX_BLND_IPS_VP_IN1_DQ.mean, m-trend',
'L1:HPI-ETMX_BLND_IPS_X_IN1_DQ.mean, m-trend', 
'L1:HPI-ETMX_BLND_IPS_Y_IN1_DQ.mean, m-trend',
'L1:HPI-ETMX_BLND_IPS_Z_IN1_DQ.mean, m-trend',
'L1:HPI-ETMY_BLND_IPS_HP_IN1_DQ.mean, m-trend',
'L1:HPI-ETMY_BLND_IPS_RX_IN1_DQ.mean, m-trend',
'L1:HPI-ETMY_BLND_IPS_RY_IN1_DQ.mean, m-trend',
'L1:HPI-ETMY_BLND_IPS_RZ_IN1_DQ.mean, m-trend',
'L1:HPI-ETMY_BLND_IPS_VP_IN1_DQ.mean, m-trend',
'L1:HPI-ETMY_BLND_IPS_X_IN1_DQ.mean, m-trend',
'L1:HPI-ETMY_BLND_IPS_Y_IN1_DQ.mean, m-trend',
'L1:HPI-ETMY_BLND_IPS_Z_IN1_DQ.mean, m-trend']

#data = dict()
data = TimeSeriesDict.fetch(channels, start, end, verbose=True)

print "DONE"

#for data in channles:
#    plot_data = data.plot()
#    ax = plot_data.gca()

#    ax.set_epoch(start.gps)
#    ax.set_xlim(start.gps, end.gps)
#    ax.set_ylabel('Amplitude[ ]')
#    ax.set_title(data.channel.texname)
#    ax.set_ylim([20000,30000])
 
#    print "DONE"
#    plot_data.save(data.channel.text)
예제 #46
0
    pyplot.ion()

# Before anything else, we import the objects we will need:
from gwpy.time import tconvert
from gwpy.timeseries import TimeSeriesDict
from gwpy.plotter import BodePlot

# and set the times of our query, and the channels we want:
start = tconvert('May 27 2014 04:00')
end = start + 1800
gndchannel = 'L1:ISI-GND_STS_ITMY_Z_DQ'
hpichannel = 'L1:HPI-ITMY_BLND_L4C_Z_IN1_DQ'

# We can call the :meth:`~TimeSeriesDict.fetch` method of the `TimeSeriesDict`
# to retrieve all data in a single operation:
data = TimeSeriesDict.fetch([gndchannel, hpichannel], start, end, verbose=True)
gnd = data[gndchannel]
hpi = data[hpichannel]

# Next, we can call the :meth:`~TimeSeries.average_fft` method to calculate
# an averages, complex-valued FFT for each `TimeSeries`:
gndfft = gnd.average_fft(100, 50, window='hamming')
hpifft = hpi.average_fft(100, 50, window='hamming')

# Finally, we can divide one by the other to get the transfer function
# (up to the lower Nyquist)
size = min(gndfft.size, hpifft.size)
tf = hpifft[:size] / gndfft[:size]

# The `~gwpy.plotter.BodePlot` knows how to separate a complex-valued
# `~gwpy.spectrum.Spectrum` into magnitude and phase:
예제 #47
0
파일: blrms.py 프로젝트: WanduiAlbert/gwpy
# and one for plotting the data:
from gwpy.plotter import TimeSeriesPlot

# Next we define the channels we want, namely the 0.03Hz-1Hz ground motion
# band-limited RMS channels (1-second average trends).
# We do this using string-replacement so we can substitute the interferometer
# prefix easily when we need to:
channels = [
    '%s:ISI-BS_ST1_SENSCOR_GND_STS_X_BLRMS_30M_100M.mean,s-trend',
    '%s:ISI-BS_ST1_SENSCOR_GND_STS_Y_BLRMS_30M_100M.mean,s-trend',
    '%s:ISI-BS_ST1_SENSCOR_GND_STS_Z_BLRMS_30M_100M.mean,s-trend',
]

# At last we can fetch 12 hours of data for each interferometer using the
# `TimeSeriesDict.fetch` method:
lho = TimeSeriesDict.fetch([c % 'H1' for c in channels],
                           'Feb 13 2015 16:00', 'Feb 14 2015 04:00')
llo = TimeSeriesDict.fetch([c % 'L1' for c in channels],
                           'Feb 13 2015 16:00', 'Feb 14 2015 04:00')

# Next we can plot the data, with a separate `~gwpy.plotter.Axes` for each
# instrument:
plot = TimeSeriesPlot(lho, llo)
for ifo, ax in zip(['H1', 'L1'], plot.axes):
   ax.legend(['X', 'Y', 'Z'])
   ax.yaxis.set_label_position('right')
   ax.set_ylabel(ifo, rotation=0, va='center', ha='left')
   ax.set_yscale('log')
plot.text(0.1, 0.5, '$1-3$\,Hz motion [nm/s]', rotation=90, fontsize=24,
          ha='center', va='center')
plot.axes[0].set_title('Magnitude 7.1 earthquake impact on LIGO', fontsize=24)
plot.show()
예제 #48
0
same coherence calculation every ``stride``, giving a time-varying coherence
measure.

"""

__author__ = "Duncan Macleod <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we import the `TimeSeriesDict`
from gwpy.timeseries import TimeSeriesDict

# and then :meth:`~TimeSeriesDict.get` the data for the strain output
# (``H1:GDS-CALIB_STRAIN``) and the PSL periscope accelerometer
# (``H1:PEM-CS_ACC_PSL_PERISCOPE_X_DQ``):
data = TimeSeriesDict.get(['H1:GDS-CALIB_STRAIN',
                           'H1:PEM-CS_ACC_PSL_PERISCOPE_X_DQ'],
                           1126260017, 1126260617)
hoft = data['H1:GDS-CALIB_STRAIN']
acc = data['H1:PEM-CS_ACC_PSL_PERISCOPE_X_DQ']

# We can then calculate the :meth:`~TimeSeries.coherence` of one
# `TimeSeries` with respect to the other, using an 2-second Fourier
# transform length, with a 1-second (50%) overlap:
coh = hoft.coherence_spectrogram(acc, 10, fftlength=.5, overlap=.25)

# Finally, we can :meth:`~gwpy.spectrogram.Spectrogram.plot` the
# resulting data
plot = coh.plot()
ax = plot.gca()
ax.set_ylabel('Frequency [Hz]')
ax.set_yscale('log')
예제 #49
0
print end.iso, end.gps



channels_M1 = []
OPTICS_M1 = ['MC1', 'MC2', 'MC3']
DOFS = ['P', 'R', 'Y']


for optic_m1 in OPTICS_M1:
    for dof in DOFS:
        channels_M1.append('L1:SUS-%s_M1_DAMP_%s_INMON.mean,m-trend' % (optic_m1, dof))



data_m1 = TimeSeriesDict.fetch(channels_M1, start, end, verbose=True)


for dof in DOFS:
    print "DOF = %s  " %(dof)
    data_mc1_mean = data_m1['L1:SUS-MC1_M1_DAMP_%s_INMON.mean,m-trend' % (dof)]-data_m1['L1:SUS-MC1_M1_DAMP_%s_INMON.mean,m-trend' % (dof)].mean().value
    data_mc2_mean = data_m1['L1:SUS-MC2_M1_DAMP_%s_INMON.mean,m-trend' % (dof)]-data_m1['L1:SUS-MC2_M1_DAMP_%s_INMON.mean,m-trend' % (dof)].mean().value
    data_mc3_mean = data_m1['L1:SUS-MC3_M1_DAMP_%s_INMON.mean,m-trend' % (dof)]-data_m1['L1:SUS-MC3_M1_DAMP_%s_INMON.mean,m-trend' % (dof)].mean().value
    plot_mc1_mean = data_mc1_mean.plot()
    ax = plot_mc1_mean.gca()
    ax.plot(data_mc2_mean, label='MC2')
    ax.plot(data_mc3_mean, label='MC3')
    ax.set_ylabel('Mean amplitude - Mean Value (urad)')
    ax.set_title('%s' %(dof))
    pylab.ylim([-200,200])
    L = ax.legend(loc='upper right', ncol=1, fancybox=True, shadow=True)