示例#1
0
def huge(start, end):
    source = filelist(start, end, trend='full', place='kashiwa')
    data = TimeSeriesDict.read(source, channels, start=start, end=end, nproc=4)
    c = 299792458  # m/sec
    lam = 1064e-9  # m
    gif = data['K1:VIS-ETMX_GIF_ARM_L_OUT16']
    xarm = data['K1:CAL-CS_PROC_XARM_FILT_AOM_OUT16'] * 3000.0 / (
        c / lam) * 1e6  # [um]
    etmx_seis = data['K1:PEM-SEIS_EXV_GND_X_OUT16']
    itmx_seis = data['K1:PEM-SEIS_IXV_GND_X_OUT16']
    diff_seis = etmx_seis - itmx_seis
    comm_seis = etmx_seis + itmx_seis

    # Coherence
    coh_gif2xarm = gif.coherence(xarm, fftlength=fftlen, overlap=ovlp)
    coh_gif2seis = gif.coherence(diff_seis, fftlength=fftlen, overlap=ovlp)
    coh_xarm2seiscomm = xarm.coherence(comm_seis,
                                       fftlength=fftlen,
                                       overlap=ovlp)

    # ASD
    gif = gif.asd(fftlength=fftlen, overlap=ovlp)
    xarm = xarm.asd(fftlength=fftlen, overlap=ovlp)
    diff_seis = diff_seis.asd(fftlength=fftlen, overlap=ovlp)
    comm_seis = comm_seis.asd(fftlength=fftlen, overlap=ovlp)
    w = 2.0 * np.pi * (diff_seis.frequencies.value)
    diff_seis = diff_seis / w
    comm_seis = comm_seis / w
    return xarm
示例#2
0
文件: sei.py 项目: berkowitze/gwsumm
    def process(self):

        # data span
        start = self.gpstime - self.duration / 2.
        end = self.gpstime + self.duration / 2.

        # get data
        if self.use_nds:
            data = TimeSeriesDict.fetch(self.chanlist, start, end)
        else:
            from glue.datafind import GWDataFindHTTPConnection
            conn = GWDataFindHTTPConnection()
            cache = conn.find_frame_urls(self.ifo[0], '%s_C' % self.ifo,
                                         self.start, self.end, urltype='file')
            if len(cache) == 0:
                data = {}
            else:
                data = TimeSeriesDict.read(cache, self.chanlist, start=start,
                                           end=end, nproc=self.nproc)

        # make plot
        plot, axes = subplots(nrows=self.geometry[0], ncols=self.geometry[1],
                              sharex=True,
                              subplot_kw={'projection': 'timeseries'},
                              FigureClass=TimeSeriesPlot, figsize=[12, 6])
        axes[0,0].set_xlim(start, end)
        for channel, ax in zip(self.chanlist, axes.flat):
            ax.set_epoch(self.gpstime)
            # plot data
            try:
                ax.plot(data[channel])
            except KeyError:
                ax.text(self.gpstime, 0.5, "No data", va='center', ha='center',
                        transform=ax.transData)
            # plot trip indicator
            ylim = ax.get_ylim()
            ax.plot([self.gpstime, self.gpstime], ylim, linewidth=0.5,
                    linestyle='--', color='red')
            ax.set_ylim(*ylim)
            ax.set_xlabel('')
            ax.set_title(channel.texname, fontsize=10)
            ax.xaxis.set_minor_locator(NullLocator())
            for tick in ax.yaxis.get_major_ticks():
                tick.label.set_fontsize(10)
            for tick in ax.xaxis.get_major_ticks():
                tick.label.set_fontsize(16)
        plot.text(0.5, 0.04, 'Time [seconds] from trip (%s)' % self.gpstime,
                  ha='center', va='bottom', fontsize=24)
        plot.text(0.01, 0.5, 'Amplitude %s' % self.unit, ha='left', va='center',
                  rotation='vertical', fontsize=24)

        plot.suptitle('%s %s %s watchdog trip: %s'
                      % (self.ifo, self.chamber, self.sensor, self.gpstime),
                      fontsize=24)

        plot.save(self.outputfile)
        plot.close()
        return self.outputfile
def loadHDF5(filename):
    data = TimeSeriesDict.read('{}.hdf5'.format(filename))
    spec = {}
    channels = data.keys()
    for i in channels:
        spec[i] = {}
        spec[i]['sp'],spec[i]['norm'] = specgram(data[i])
        spec[i]['sp_asd'] = spec[i]['sp'].percentile(50)
    np.save(filename,spec)
    return spec
def loadHDF5(filename, fftl=4, ovlp=2):
    data = TimeSeriesDict.read('{}.hdf5'.format(filename))
    spec = {}
    for i in data.keys():
        spec[i] = {}
        spec[i]['sp'], spec[i]['norm'] = specgram(data[i], fftl, ovlp)
        spec[i]['sp_asd'] = spec[i]['sp'].percentile(50)
        if i[10:13] == 'ACC':
            spec[i]['sp_asd'] = calAccel(spec[i]['sp_asd'][1:])
    np.save(filename, spec)
    return spec
示例#5
0
 def __init__(self,
              channels,
              filename,
              start=default_start,
              end=default_end):
     self.channels = channels
     if path.exists('./data/{}.hdf5'.format(filename)):
         self.data = TimeSeriesDict.read('./data/{}.hdf5'.format(filename))
     else:
         self.data = TimeSeriesDict.fetch(channels, start, end)
         self.data.write('./data/{}.hdf5'.format(filename))
示例#6
0
def get_array2d(start,end,axis='X',prefix='./data',**kwargs):
    '''
    '''
    nproc = kwargs.pop('nproc',4)
    bandpass = kwargs.pop('bandpass',None)
    blrms = kwargs.pop('blrms',None)
    fftlen = kwargs.pop('fftlen',2**8)
    overlap = fftlen/2

    # check existance of the spectrogram data
    fname_hdf5 = fname_hdf5_asd(start,end,prefix,axis)
    if os.path.exists(fname_hdf5):
        specgram = Spectrogram.read(fname_hdf5)
        if blrms:
            timeseries = specgram.crop_frequencies(blrms[0],blrms[1]).sum(axis=1)
            return timeseries
        return specgram
    
    # If spectrogram dose not exist, calculate it from timeseries data.
    try:
        fname = fname_gwf(start,end,prefix='./data')
        chname = get_seis_chname(start,end,axis=axis)
        # check existance of the timeseries data
        if os.path.exists(fname):
            data = TimeSeries.read(fname,chname,nproc=nproc)
        else:
            # when timeseries data dose not exist
            fnamelist = existedfilelist(start,end)
            chname = get_seis_chname(start,end)
            datadict = TimeSeriesDict.read(fnamelist,chname,nproc=nproc)
            datadict = datadict.resample(32)
            datadict = datadict.crop(start,end)
            chname = get_seis_chname(start,end,axis=axis)
            datadict.write(fname,format='gwf.lalframe')
            data = TimeSeries.read(fname,chname,nproc=nproc)
            # If data broken, raise Error.
            if data.value.shape[0] != 131072:
                log.debug(data.value.shape)
                log.debug('####### {0} {1}'.format(start,end))
                raise ValueError('data broken')
    except:
        log.debug(traceback.format_exc())
        raise ValueError('!!!')

    # if data broken, raise Error.
    if data.value.shape[0] != 131072: # (131072 = 2**17 = 2**12[sec] * 2**5[Hz] )
        log.debug(data.value.shape)
        log.debug('!!!!!!!! {0} {1}'.format(start,end))
        raise ValueError('data broken')

    # calculate from timeseries data
    specgram = data.spectrogram2(fftlength=fftlen,overlap=overlap,nproc=nproc)
    specgram.write(fname_hdf5,format='hdf5',overwrite=True)
    return specgram
示例#7
0
def get_guardian_segments(node, frametype, start, end, nproc=1, pad=(0, 0),
                          strict=False):
    """Determine state segments for a given guardian node
    """
    ifo, node = node.split(':', 1)
    if node.startswith('GRD-'):
        node = node[4:]
    pstart = start - pad[0]
    pend = end + pad[1]

    # find frame cache
    cache = data.find_frames(ifo, frametype, pstart, pend)

    # pre-format data segments
    span = SegmentList([Segment(pstart, pend)])
    segs = SegmentList()
    csegs = cache_segments(cache)
    if not csegs:
        return csegs

    # read data
    stub = "{}:GRD-{}".format(ifo, node)
    if strict:
        channels = ["{}_OK".format(stub)]
    else:
        state = "{}_STATE_N".format(stub)
        nominal = "{}_NOMINAL_N".format(stub)
        active = "{}_ACTIVE".format(stub)
        channels = [state, nominal, active]
    for seg in csegs & span:
        if strict:
            sv = StateVector.read(
                cache, channels[0], nproc=nproc, start=seg[0], end=seg[1],
                bits=[0], gap='pad', pad=0,).astype('uint32')
            segs += sv.to_dqflags().intersection().active
        else:
            gdata = TimeSeriesDict.read(
                cache, channels, nproc=nproc, start=seg[0], end=seg[1],
                gap='pad', pad=0)
            ok = ((gdata[state].value == gdata[nominal].value) &
                  (gdata[active].value == 1)).view(StateTimeSeries)
            ok.t0 = gdata[state].t0
            ok.dt = gdata[state].dt
            segs += ok.to_dqflag().active

    # truncate to integers, and apply padding
    for i, seg in enumerate(segs):
        segs[i] = type(seg)(int(ceil(seg[0])) + pad[0],
                            int(floor(seg[1])) - pad[1])
    segs.coalesce()

    return segs.coalesce()
示例#8
0
文件: sei.py 项目: pvasired/gwsumm
    def process(self):

        # data span
        start = self.gpstime - self.duration / 2.
        end = self.gpstime + self.duration / 2.

        # get data
        if self.use_nds:
            data = TimeSeriesDict.fetch(self.chanlist, start, end)
        else:
            from glue.datafind import GWDataFindHTTPConnection
            conn = GWDataFindHTTPConnection()
            cache = conn.find_frame_urls(self.ifo[0], '%s_C' % self.ifo,
                                         self.start, self.end, urltype='file')
            data = TimeSeriesDict.read(cache, self.chanlist, start=start,
                                       end=end, nproc=self.nproc)

        # make plot
        plot, axes = subplots(nrows=self.geometry[0], ncols=self.geometry[1],
                              sharex=True,
                              subplot_kw={'projection': 'timeseries'},
                              FigureClass=TimeSeriesPlot, figsize=[12, 6])
        axes[0,0].set_xlim(start, end)
        for channel, ax in zip(self.chanlist, axes.flat):
            ax.set_epoch(self.gpstime)
            # plot data
            ax.plot(data[channel])
            # plot trip indicator
            ylim = ax.get_ylim()
            ax.plot([self.gpstime, self.gpstime], ylim, linewidth=0.5,
                    linestyle='--', color='red')
            ax.set_ylim(*ylim)
            ax.set_xlabel('')
            ax.set_title(channel.texname, fontsize=10)
            ax.xaxis.set_minor_locator(NullLocator())
            for tick in ax.yaxis.get_major_ticks():
                tick.label.set_fontsize(10)
            for tick in ax.xaxis.get_major_ticks():
                tick.label.set_fontsize(16)
        plot.text(0.5, 0.04, 'Time [seconds] from trip (%s)' % self.gpstime,
                  ha='center', va='bottom', fontsize=24)
        plot.text(0.01, 0.5, 'Amplitude %s' % self.unit, ha='left', va='center',
                  rotation='vertical', fontsize=24)

        plot.suptitle('%s %s %s watchdog trip: %s'
                      % (self.ifo, self.chamber, self.sensor, self.gpstime),
                      fontsize=24)

        plot.save(self.outputfile)
        plot.close()
        return self.outputfile
def getData(channels, start, stop, filename, fftl=4, ovlp=2):
    if path.exists('{}.hdf5'.format(filename)):
        data = TimeSeriesDict.read('{}.hdf5'.format(filename))
    else:
        data = TimeSeriesDict.fetch(channels, start, stop)
        data.write('{}.hdf5'.format(filename), overwrite=True)
    spec = {}
    for i in channels:
        spec[i] = {}
        spec[i]['sp'], spec[i]['norm'] = specgram(data[i], fftl, ovlp)
        spec[i]['sp_asd'] = spec[i]['sp'].percentile(50)
        if channels.index(i) == 0:
            spec[i]['sp_asd'] = calDARM(spec[i]['sp_asd'][1:])
        if i[10:13] == 'ACC':
            spec[i]['sp_asd'] = calAccel(spec[i]['sp_asd'][1:])
    np.save(filename, spec)
    return spec
    def __call__(self, idx):
        start = idx * self._update_size
        stop = (idx + 1) * self._update_size

        if self.data is None or stop > self.data.shape[1]:
            # try to load in the next second's worth of data
            # if it takes more than a second to get created,
            # then assume the worst and raise an error
            start_time = time.time()
            path = self.path_pattern.format(self.t0)
            while time.time() - start_time < 3:
                try:
                    data = TimeSeriesDict.read(path, self.channels)
                    break
                except FileNotFoundError:
                    continue
            else:
                raise ValueError(f"Couldn't find next timestep file {path}")
            self._latency_t0 = _get_file_gps_timestamp(path)

            # resample the data and turn it into a numpy array
            data.resample(self.sample_rate)
            data = np.stack([data[channel].value
                             for channel in self.channels]).astype("float32")

            if self.data is not None and start < self.data.shape[1]:
                leftover = self.data[:, start:]
                data = np.concatenate([leftover, data], axis=1)
            self.data = data
            self.t0 += 1

            # raising an index error will get the DataGenerator's
            # _get_data fn to reset the index
            raise IndexError

        # return the next piece of data
        x = self.data[:, start:stop]

        # offset the frame's initial time by the time
        # corresponding to the first sample of stream
        t0 = self._latency_t0 + idx * self.kernel_stride
        return Package(x=x, t0=t0)
示例#11
0
def _check_baddata(segment, data=None, prefix='./data', **kwargs):
    ''' Check whether given segment is good or not.
    
    1. Read timeseriese data from frame file saved in local place. 
       If data could not be read, return "No Data" flag.
    2. Check lack of data. 
    

    1 bit : no data
    2 bit : lack of data
    3 bit : missed caliblation 
    4 bit : big earthquake
    '''
    start, end = segment
    fname = iofunc.fname_gwf(start, end, prefix)
    fname = existedfilelist(start, end)
    chname = get_seis_chname(start, end)
    try:
        data = TimeSeriesDict.read(fname, chname, verbose=False, **kwargs)
        lack_of_data = any([0.0 in d.value for d in data.values()]) * 4
        miss_calib = any([1000.0 < d.mean().value for d in data.values()]) * 8
        bigeq = any([
            any((d.std() * 6).value < (d - d.mean()).abs().value)
            for d in data.values()
        ]) * 16
        return data, (lack_of_data + miss_calib + bigeq)
    except IOError as e:
        nodata = (True) * 2
        return None, nodata
    except ValueError as e:
        if 'Cannot append discontiguous TimeSeries' in e.args[0]:
            log.debug(e)
            nodata = (True) * 2
            return None, nodata
        else:
            log.debug(traceback.format_exc())
            raise ValueError('!!')
    except:
        log.debug(traceback.format_exc())
        raise ValueError('!!!')
示例#12
0
def save_spectrogram(segmentlist, fftlength=2**10, overlap=2**9, **kwargs):
    '''
    
    '''
    log.debug('Save spectrograms')
    lackofdata = SegmentList()
    prefix = kwargs.pop('prefix', './data')
    write = kwargs.pop('write', True)
    skip = kwargs.pop('skip', False)

    fnames = [fname_png_asd(start, end, prefix) for start, end in segmentlist]
    not_checked = _check_skip(segmentlist, fnames)

    log.debug('{0}(/{1}) are not checked'.format(len(not_checked),
                                                 len(segmentlist)))
    log.debug('Save spectrograms..')
    for i, segment in enumerate(not_checked):
        try:
            #fname = fname_gwf(start,end,prefix)
            fname = existedfilelist(segment[0], segment[1])
            chname = get_seis_chname(segment[0], segment[1])
            hoge = kwargs.pop('fftlength', 'None')
            hoge = kwargs.pop('overlap', 'None')
            data = TimeSeriesDict.read(fname, chname, **kwargs)
            data = data.resample(32)
            data = data.crop(segment[0], segment[1])
        except:
            log.debug(traceback.format_exc())
            raise ValueError('No such data {0}'.format(fname))
        # plot
        kwargs['fftlength'] = fftlength
        kwargs['overlap'] = overlap
        sglist = _calc_spectrogram(data, segment, **kwargs)
        #asdlist = [sg.percentile(50) for sg in sglist]
        fname = fname_png_asd(segment[0], segment[1], prefix)
        #plot_asd(asdlist,fname,**kwargs)
        log.debug('{0:03d}/{1:03d} {2} '.format(i, len(segmentlist), fname) +
                  'Plot')
示例#13
0
    # Read timeseries data of Trillium120
    from Kozapy.utils import filelist
    from lib.channel import get_seis_chname
    m31 = True
    if m31:
        start = tconvert('May31 2019 00:00:00')
        end = start + 2**13
        fname = filelist(start,end)
        chname = get_seis_chname(start,end,place='EXV')
        print(chname)
    else:
        fname = fname_gwf_tr120(dataname)
        chname = frtools.get_channels(fname)        
    try:
        data = TimeSeriesDict.read(fname,chname,**kwargs)
    except:
        print(fname)
        raise ValueError('!')

    exv,ixv,ixv2,eyv = check_channel_name(chname)
    exv = check_data(data,exv)
    ixv = check_data(data,ixv)
    ixv2 = check_data(data,ixv2)
    eyv = check_data(data,eyv)
    if ixv2 != None:
        d12 = ixv-ixv2
        c12 = ixv+ixv2
    d31 = exv-ixv
    c31 = exv+ixv
    t0 = exv.t0.value
示例#14
0
def get_data(channels,
             gpstime,
             duration,
             pad,
             frametype=None,
             source=None,
             dtype='float64',
             nproc=1,
             verbose=False):
    """Retrieve data for a given channel, centered at a given time

    Parameters
    ----------
    channels : `list`
        required data channels
    gpstime : `float`
        GPS time of required data
    duration : `float`
        duration (in seconds) of required data
    pad : `float`
        amount of extra data to read in at the start and end for filtering
    frametype : `str`, optional
        name of frametype in which this channel is stored, by default will
        search for all required frame types
    source : `str`, `list`, optional
        `str` path of a LAL-format cache file or single data file, will
        supercede `frametype` if given, defaults to `None`
    dtype : `str` or `dtype`, optional
        typecode or data-type to which the output `TimeSeries` is cast
    nproc : `int`, optional
        number of parallel processes to use, uses serial process by default
    verbose : `bool`, optional
        print verbose output about NDS progress, default: False

    See Also
    --------
    gwpy.timeseries.TimeSeries.get
        for the underlying method to read from frames or NDS
    gwpy.timeseries.TimeSeries.read
        for the underlying method to read from a local file cache
    """
    # set GPS start and end time
    start = gpstime - duration / 2. - pad
    end = gpstime + duration / 2. + pad
    # construct file cache if none is given
    if source is None:
        source = find_frames(frametype[0], frametype, start, end)
    # read from frames or NDS
    if source:
        return TimeSeriesDict.read(source,
                                   channels,
                                   start=start,
                                   end=end,
                                   nproc=nproc,
                                   verbose=verbose,
                                   dtype=dtype)
    else:
        return TimeSeriesDict.fetch(channels,
                                    start,
                                    end,
                                    verbose=verbose,
                                    dtype=dtype)
示例#15
0
comparison_ixv1_diff12 = True
comparison_diff12_diff13 = True
comparison_comm13_diff13 = True
comparison_comm12_diff12 = True
comparison_seis_gif = True
plot_coherence = True
tplot = True
write = True

# -----------------------------------------------
# TimeSeriese data
# -----------------------------------------------
if readgwf:
    data = TimeSeriesDict.read('2019Dec10_3hours.gwf',
                               chnames,
                               start,
                               end,
                               format='gwf.lalframe',
                               nproc=nproc)
    ixv1 = data['K1:PEM-IXV_GND_TR120Q_X_OUT_DQ'] * 2  # klog8746
    ixv2 = data['K1:PEM-IXV_GND_TR120QTEST_X_OUT_DQ'] * 2  # klog8746
    exv = data['K1:PEM-EXV_GND_TR120Q_X_OUT_DQ'] * 2  # klog8746
    diff12 = (ixv1 - ixv2) / np.sqrt(2)
    diff13 = (ixv1 - exv) / np.sqrt(2)
    comm12 = (ixv1 + ixv2) / np.sqrt(2)
    comm13 = (ixv1 + exv) / np.sqrt(2)

if tplot and readgwf:
    plot = data.plot()
    plot.savefig('img_timeseries.png')
    plot.close()
def cropHDF5(readfile, writefile, starttime, stoptime):
    data = TimeSeriesDict.read(readfile)
    data = data.crop(start=starttime, end=stoptime)
    data.write(writefile, overwrite=True)
示例#17
0
if fft > stride / 2.:
    print('Warning: stride is shorter than fft length. Set stride=fft*2.')
    stride = fft * 2.

# Get data from frame files
if kamioka:
    sources = mylib.GetFilelist_Kamioka(gpsstart, gpsend)
else:
    sources = mylib.GetFilelist(gpsstart, gpsend)

channels = [refchannel, channel]

data = TimeSeriesDict.read(sources,
                           channels,
                           format='gwf.lalframe',
                           start=int(float(gpsstart)),
                           end=int(float(gpsend)) + 1)

ref = data[refchannel]
com = data[channel]

# Use same sampling rate
if com.dt.value < ref.dt.value:
    com = com.resample(1. / ref.dt.value)
if com.dt.value > ref.dt.value:
    ref = ref.resample(1. / com.dt.value)

ref = ref.crop(float(gpsstart), float(gpsend))
com = com.crop(float(gpsstart), float(gpsend))
示例#18
0
    # 
    end = start + 1

    # 
    filterbank = True
    if filterbank:
        # Read filter names
        with open('./filtername.txt','r') as f:
            channels = map(lambda x:x.replace('\n',''),f.readlines())
        #
        source = filelist(start,end,trend='full',place='kamioka')
        f = open('./results/{0}.txt'.format(hoge), mode='w')
        f.write('# NAME,STATUS,[FILTER_NUMBER],GAIN,OFFSET,LIMIT'+'\n')
        for name in channels:
            names = [name+_suffix for _suffix in ['_SWSTAT','_GAIN','_OFFSET','_LIMIT']]
            data = TimeSeriesDict.read(source,names,start=start,end=end,nproc=4,
                                       format='gwf.lalframe')
            swstat = int(data[name+'_SWSTAT'].mean())
            gain = data[name+'_GAIN'].mean()
            offset = data[name+'_OFFSET'].mean()
            limit = data[name+'_LIMIT'].mean()
            txt = '{0},{2:3.5e},{3:3.5e},{4:3.5e},{1}'.format(name,filt_status(swstat),
                                                              gain,offset,limit)
            print(txt)
            f.write(txt+'\n')
        f.close()
    #
    matrix = False
    if matrix:
        # Read filter names
        names = np.loadtxt('matrixname.txt',dtype=np.str)
        source = filelist(start,end,trend='full',place='kashiwa')
示例#19
0
def threshold_table(start,
                    stop,
                    reading_channels,
                    channels,
                    bands,
                    label='kmeans-labels',
                    filename=DEFAULT_FILENAME,
                    prefix='.'):
    """
    Makes a html table of 'percent increase' from the largest cluster by band and channel.
    """
    data = TimeSeriesDict.read(filename,
                               reading_channels + [label],
                               start=to_gps(start),
                               end=to_gps(stop))
    labels = data[label]

    clusters = list(range(max(labels.value) + 1))
    cluster_counts = list(
        len(labels.value[labels.value == c]) for c in clusters)
    largest_cluster = cluster_counts.index(max(cluster_counts))
    clusters.remove(largest_cluster)

    logger.info(
        f'Largest cluster found to be Nº{largest_cluster} ({100 * max(cluster_counts) // len(labels.value)}%). Doing {clusters}.'
    )
    cluster_counts.remove(max(cluster_counts))

    def amplitude(channel, cluster):
        """return median amplitude for channel in cluster."""
        try:
            chan = data[channel]
        except KeyError:
            return 0.0
        return median([
            chan.value[i] for i, c in enumerate(labels.value) if c == cluster
        ])

    def threshold(cluster, channel, band) -> str:
        f_channel = f'{channel}_BLRMS_{band}.mean'
        base = amplitude(f_channel, largest_cluster)
        if base != 0.0:
            return str(int(
                100 * (amplitude(f_channel, cluster) - base) / base)) + '%'
        else:
            return str(amplitude(f_channel, cluster))

    range_chan = 'L1:DMT-SNSH_EFFECTIVE_RANGE_MPC.mean'
    if range_chan in reading_channels:
        base_range = amplitude(range_chan, largest_cluster)
        if base_range != 0.0:
            snsh = lambda c: 'SNSH: ' + str(
                int(100 * (amplitude(range_chan, c) - base_range) / base_range)
            ) + '%'
        else:
            snsh = lambda c: 'SNSH: 0.0'
    else:
        snsh = lambda c: ''

    with Progress('taking thresholds', len(clusters)) as progress:
        for i, cluster in enumerate(clusters):
            buffer = [[''] + bands]
            for channel in channels:
                buffer.append([channel] + [
                    progress(threshold, i, cluster, channel, band)
                    for band in bands
                ])
            html_table(
                f'cluster {cluster} ({colors[cluster]}) {snsh(cluster)}',
                csv_writer(buffer, get_path(f'{cluster}', 'csv',
                                            prefix=prefix)),
                get_path(f'{cluster}', 'html', prefix=prefix))
    html_table(
        'Index',
        csv_writer(
            [['clusters:']] +
            [[f'<a href="{cluster}.html">Nº{cluster} ({colors[cluster]})</a>']
             for cluster in clusters], get_path('idx', 'csv', prefix=prefix)),
        get_path('index', 'html', prefix=prefix))
]
start = tconvert('Dec 10 2018 00:00:00')
end = tconvert('Dec 10 2018 03:00:00')

nproc = 10

# Read Data
cache = True
dump = False
if cache and not dump:
    print('Read using cache')
    from glue import lal
    from pylal import frutils
    cachefname = './full_2018_Dec10-Dec20.cache'
    source = lal.Cache.fromfile(open(cachefname))
    data = TimeSeriesDict.read(source,chname,start=start,end=end,format='gwf.lalframe',nproc=nproc)
elif dump:
    print('Read using dumped gwf file')
    source = 'dump.gwf'
    data = TimeSeriesDict.read(source,chname,start=start,end=end,format='gwf.lalframe')
else:
    print('Read using single gwf full file')
    source = 'K-K1_C-1231133824-32.gwf'
    source = '/data/full/12284/K-K1_C-1228435200-32.gwf'
    data = TimeSeries.read(source,chname,format='gwf.lalframe')    

# some treatment
#data.override_unit('um/s') # bugs when use lalframe reader


# plot
7: 'BS',
8: 'PR2',
9: 'SR2',
10: 'MC2'}

# read in channel lists to populate each input matrix

PD_DOF_chans=np.loadtxt('PD_DOF_MTRX_chans',dtype=str)
ARM_INPUT_chans=np.loadtxt('ARM_INPUT_MTRX_chans',dtype=str)
OUTPUT_chans=np.loadtxt('OUTPUT_MTRX_chans',dtype=str)
ARM_OUTPUT_chans=np.loadtxt('ARM_OUTPUT_MTRX_chans',dtype=str)

# make a timeseries dictionary for each input matrix
print 'Fetching a bunch of data from frames'

PD_DOF_data = TimeSeriesDict.read(cache,PD_DOF_chans,start=start_gps,end=end_gps)
ARM_INPUT_data = TimeSeriesDict.read(cache,ARM_INPUT_chans,start=start_gps,end=end_gps)
OUTPUT_data = TimeSeriesDict.read(cache,OUTPUT_chans,start=start_gps,end=end_gps)
ARM_OUTPUT_data = TimeSeriesDict.read(cache,ARM_OUTPUT_chans,start=start_gps,end=end_gps)

# main workhorse function of this script
#
# requires a dictionary containing matrix element time series and two dictionaries
# that map these matrix elements to IO channels
#
# grab the first sample of each channel - if it's non-zero, add it to the list of active channels
# 
# process the active channels and return a set of tuples indicating the matrix entries
# 
# send through a search function that lines up the inputs and outputs
# uses the set of tuples generated from active channels a dictionary maps them to PDs and DOFs
示例#22
0
def mkSegment(gst, get, utc_date, txt=True) :

    chGRDLSC = 'K1:GRD-LSC_LOCK_STATE_N'
    chGRDIFO = 'K1:GRD-IFO_STATE_N'
    chGRDEQ = 'K1:GRD-PEM_EARTHQUAKE_STATE_N'
    chOMCADC    = 'K1:FEC-32_ADC_OVERFLOW_0_0'

    channels = [chGRDLSC,chGRDIFO,chGRDEQ,chOMCADC]
    
    if getpass.getuser() == "controls":
        gwf_cache = '/users/DET/Cache/latest.cache'
        with open(gwf_cache, 'r') as fobj:
            cache = Cache.fromfile(fobj)
    else:
        # add 1sec margin for locked segments contract.
        cache = GetFilelist(gst-1, get+1)

    #------------------------------------------------------------

    #print('Reading {0} timeseries data...'.format(date))
    # add 1sec margin for locked segments contract.
    channeldata = TimeSeriesDict.read(cache, channels, start=gst-1, end=get+1, format='gwf.lalframe', gap='pad')
    channeldataGRDIFO = channeldata[chGRDIFO]
    channeldataGRDLSC = channeldata[chGRDLSC]
    channeldataGRDEQ = channeldata[chGRDEQ]
    channeldataOMCADC = channeldata[chOMCADC]

    sv={}
    sv['K1-GRD_SCIENCE_MODE'] = channeldataGRDIFO == 1000 
    # Locked will be defined by inverse of unlocked segments for technical reason.    
    #sv['K1-GRD_LOCKED'] = channeldataGRDLSC == 1000 
    sv['K1-GRD_UNLOCKED'] = channeldataGRDLSC != 1000
    sv['K1-GRD_PEM_EARTHQUAKE'] = channeldataGRDEQ == 1000
    sv['K1-OMC_OVERFLOW_VETO'] = channeldataOMCADC != 0
    # OMC_OVERFLOW_OK will be defined by inverse of veto segments for technical reason.
    #sv['K1-OMC_OVERFLOW_OK'] = channeldataOMCADC == 0


    dqflag = {}
    for key in keys:
        if key == 'K1-GRD_LOCKED' or key == 'K1-OMC_OVERFLOW_OK':
            continue
        dqflag[key] = sv[key].to_dqflag(round=True)

    # To omit fraction. round=True option is inclusive in default.         

    dqflag['K1-GRD_SCIENCE_MODE'].active = dqflag['K1-GRD_SCIENCE_MODE'].active.contract(1.0)

    dqflag['K1-GRD_LOCKED'] = ~dqflag['K1-GRD_UNLOCKED']
    dqflag['K1-GRD_LOCKED'].name = "K1:GRD-LSC_LOCK_STATE_N == 1000"

    dqflag['K1-OMC_OVERFLOW_OK'] = ~dqflag['K1-OMC_OVERFLOW_VETO']
    dqflag['K1-OMC_OVERFLOW_OK'].name = "K1:FEC-32_ADC_OVERFLOW_0_0 == 0"
    
    dqflag['K1-GRD_SCIENCE_MODE'].description = "Observation mode. K1:GRD-IFO_STATE_N == 1000"
    dqflag['K1-GRD_UNLOCKED'].description = "Interferometer is not locked. K1:GRD-LSC_LOCK_STATE_N != 1000"
    dqflag['K1-GRD_LOCKED'].description = "Interferometer is locked. K1:GRD-LSC_LOCK_STATE_N == 1000"
    dqflag['K1-OMC_OVERFLOW_VETO'].description = "OMC overflow happened. K1:FEC-32_ADC_OVERFLOW_0_0 != 0"
    dqflag['K1-OMC_OVERFLOW_OK'].description = "OMC overflow does not happened. K1:FEC-32_ADC_OVERFLOW_0_0 == 0"

    for key in keys:

        # added 1sec margin for locked segments contract is removed.
        margin = DataQualityFlag(known=[(gst,get)],active=[(gst-1,gst),(get,get+1)])
        dqflag[key] -= margin

        # write down 15 min segments. 
        if txt:
            with open(filepath_txt[key], mode='w') as f:
                for seg in dqflag[key].active :
                    f.write('{0} {1}\n'.format(int(seg[0]), int(seg[1])))
        
        # if accumulated file exists, it is added. 
        if os.path.exists(filepath_xml[key]):
            tmp = DataQualityFlag.read(filepath_xml[key])        
            dqflag[key] = dqflag[key] + tmp

        dqflag[key].write(filepath_xml[key],overwrite=True)
示例#23
0
#channels = [ channel in allchannels if not channel in ignore ]
channels = []
while allchannels:
    e = allchannels.pop()
    if e not in ignore:
        channels.append(e)
channels.append(refchannel)

# Get data from frame files                                                                                 
if not Qonly:       
    if kamioka:
        sources = mylib.GetFilelist_Kamioka(gpsstart,gpsend)
    else:
        sources = mylib.GetFilelist(gpsstart,gpsend)

    data = TimeSeriesDict.read(sources,channels,format='gwf.lalframe',start=float(gpsstart),end=float(gpsend))

if kamioka:
    sources = mylib.GetFilelist_Kamioka(gpsstartT,gpsendT)
else:
    sources = mylib.GetFilelist(gpsstartT,gpsendT)

dataT = TimeSeriesDict.read(sources,channels,format='gwf.lalframe',start=float(gpsstartT),end=float(gpsendT))

margin=4

gpsstartmargin=float(gpsstartQ)-margin
gpsendmargin=float(gpsendQ)+margin

if kamioka:
    sources = mylib.GetFilelist_Kamioka(gpsstartmargin,gpsendmargin)
示例#24
0
def _check_nodata(segment,
                  sample_freq=32,
                  headder='',
                  prefix='./data',
                  **kwargs):
    '''

    Parameters
    ----------
    sources : list of str
        Path to sources. 
    chname :  list of str
        Channel names. It's passed to 
    sample_freq : int
    
    '''
    start, end = segment
    fname = iofunc.fname_gwf(start, end, prefix)

    if lack_gwf(start, end):
        data, ans = None, 'Nodata [noGWF]'
        ans = '{0} {1} {2}'.format(headder, fname, ans)
        log.debug(ans)
        return data, ans

    if broken_gwf(start, end):
        data, ans = None, 'Nodata [Broken]'
        return data, ans

    chname = get_seis_chname(start, end)
    sources = existedfilelist(start, end)
    try:
        data = TimeSeriesDict.read(sources,
                                   chname,
                                   format='gwf.lalframe',
                                   **kwargs)
        data = data.resample(sample_freq)
        data = data.crop(start, end)
        [d.override_unit('ct') for d in data.values()]
        assert None not in [d.name for d in data.values()], 'not exit!'
        _ans = write(data, fname)
        ans = 'OK. {0}'.format(_ans)
    except ValueError as e:
        data = None
        if 'need more than 0 values to unpack' in e.args[0]:
            ans = 'NoData [ValueError1]'
            log.debug('{0}, {1}'.format(start, end))
            log.debug(traceback.format_exc())
            exit()
        else:
            log.debug(traceback.format_exc())
            ans = 'NoData [ValueError]'
            raise ValueError('Unknown error. Please confirm.')
    except RuntimeError as e:
        data = None
        if 'Internal function call failed: I/O error' in e.args[0]:
            ans = 'NoData [Broken]'
            ''' Could not read because of broken file!!!!
            $ FrChannels /data/full/12132/K-K1_C-1213232928-32.gwf
            *** Error reading frame from file /data/full/12132/K-K1_C-1213232928-32.gwf
            *** FrError: in FrVectRead : Record length error: nBytes=3136930 nBytesR=2989154 length=147813           
            *** FrError: in FrameRead  missing dictionary
            *** FrError: in FrameRead Read Error
            '''
        elif 'Wrong name' in e.args[0]:
            ans = 'NoData [noChannel?]'
        else:
            log.debug(traceback.format_exc())
            ans = 'NoData [RuntimeError]'
            raise RuntimeError('Unknown error. Please confirm.')
    except TypeError as e:
        data = None
        if "'NoneType' object is not iterable" in e.args[0]:
            ans = 'NoData [noChannel]'
        else:
            log.debug(traceback.format_exc())
            ans = 'NoData [TypeError]'
            raise TypeError('Unknown error. Please confirm.')
    except:
        log.debug(traceback.format_exc())
        raise RuntimeError('Unknown error. Please confirm.')

    ans = '{0} {1} {2}'.format(headder, fname, ans)
    log.debug(ans)
    return data, ans
示例#25
0
    "orange", "royalblue", "limegreen", "red", "gold", "magenta",
    "lightskyblue", "black", "aquamarine", "darkorchid", "saddlebrown",
    "salmon", "greenyellow", "navy"
]
colorindex = 0

for gpsstart, gpsend in zip(gpsstarts, gpsends):
    if kamioka:
        sources = mylib.GetFilelist_Kamioka(gpsstart, gpsend)
    else:
        sources = mylib.GetFilelist(gpsstart, gpsend)

    allchannel = channels + refchannels
    data = TimeSeriesDict.read(sources,
                               allchannel,
                               format='gwf.lalframe',
                               start=float(gpsstart),
                               end=float(gpsend))

    for refchannel, channel in zip(refchannels, channels):

        ref = data[refchannel]
        com = data[channel]

        if fft > float(gpsend) - float(gpsstart):
            tmpfft = float(gpsend) - float(gpsstart)
            tmpol = tmpfft / 2.
            print(
                "Given FFT length is too long. Automatically modified to given time duration."
            )
            print("FFT length = " + str(tmpfft))
示例#26
0
                     'and startgps>={0} and endgps<={1}'.format(args.start,args.end))

    log.info('# ----------------------------------------')
    log.info('# Start SeismicNoise                      ')
    log.info('# ----------------------------------------')    

    random.seed(3434)    
    segments = use
    segments = random.sample(use,10)
    n = len(segments)
    import traceback
    for i,(start,end) in enumerate(segments,1):
        sources = existedfilelist(start,end)
        channels = get_seis_chname(start,end,place=['IXV','EXV'],axis=['Z'])
        try:
            data = TimeSeriesDict.read(sources,channels,nproc=nproc)
            data.crop(start,end)
            data.resample(32)
            status = 'OK'
        except ValueError as e:
            if 'Failed to read' in e[0]:
                status = 'LACK_OF_FILE'
            elif 'Cannot append discontiguous TimeSeries' in e[0]:
                status = 'LACK_OF_FILE'
            else:
                log.debug(traceback.format_exc())
                status = 'Unknown'
                exit()
        except TypeError as e:
            if 'NoneType' in e[0]:
                status = 'LACK_OF_FILE'
示例#27
0
def representative_spectra(channels,
                           start,
                           stop,
                           rate,
                           label='kmeans-labels',
                           filename=DEFAULT_FILENAME,
                           prefix='.',
                           downloader=TimeSeriesDict.get,
                           cluster_numbers=None,
                           groups=None,
                           **kwargs):
    """
    Make representative spectra for each cluster based on the median psd for minutes in that cluster.
    Downloads only the raw minutes in the cluster to save.
    """
    if groups is None:
        groups = channels

    # read the labels from the save file.
    labels = TimeSeries.read(filename,
                             label,
                             start=to_gps(start),
                             end=to_gps(stop))
    logger.info(f'Read labels {start} to {stop} from {filename}')

    if cluster_numbers is None:
        clusters = list(range(max(labels.value) + 1))

        cluster_counts = list(
            len(labels.value[labels.value == c]) for c in clusters)
        largest_cluster = cluster_counts.index(max(cluster_counts))
        clusters.remove(largest_cluster)

        logger.info(
            f'Largest cluster found to be Nº{largest_cluster} ({100 * max(cluster_counts) // len(labels.value)}%). Doing {clusters}.'
        )
        cluster_counts.remove(max(cluster_counts))
    else:
        clusters = cluster_numbers
        cluster_counts = list(
            len(labels.value[labels.value == c]) for c in clusters)

    t, v, d = labels.times, labels.value, diff(labels.value)

    pairs = list(
        zip([t[0]] + list(t[:-1][d != 0]),
            list(t[1:][d != 0]) + [t[-1]]))
    values = list(v[:-1][d != 0]) + [v[-1]]
    assert len(pairs) == len(values)  # need to include start-| and |-end
    # l|r l|r l|r l|r
    # l,r l,r l,r l,r
    # l r,l r,l r,l r # zip(start + l[1:], r[:-1] + stop)

    print(pairs)
    for pair in pairs:
        print(int(pair[1].value) - int(pair[0].value))
    print(values)

    # use h5py to make a mutable object pointing to a file on disk.
    save_file, filename = path2h5file(
        get_path(f'spectra-cache {start}', 'hdf5', prefix=prefix))
    logger.debug(f'Initiated hdf5 stream to {filename}')

    logger.info(f'Patching {filename}...')
    for i, (dl_start, end) in enumerate(pairs):
        if values[i] in clusters:
            if not data_exists(channels, to_gps(end).seconds, save_file):
                logger.debug(
                    f'Downloading Nº{values[i]} from {dl_start} to {end}...')
                try:
                    dl = downloader(channels,
                                    start=to_gps(dl_start) - LIGOTimeGPS(60),
                                    end=to_gps(end) + LIGOTimeGPS(seconds=1))
                    out = TimeSeriesDict()
                    for n in dl:
                        out[n] = dl[n].resample(**better_aa_opts(dl[n], rate))
                    write_to_disk(out, to_gps(dl_start).seconds, save_file)
                except RuntimeError:  # Cannot find all relevant data on any known server
                    logger.warning(
                        f"SKIPPING Nº{values[i]} from {dl_start} to {end} !!")

    logger.info('Reading data...')
    data = TimeSeriesDict.read(save_file, channels)

    logger.info('Starting PSD generation...')

    f = data[channels[0]].crop(
        start=to_gps(data[channels[0]].times[-1]) - LIGOTimeGPS(60),
        end=to_gps(data[channels[0]].times[-1])).psd().frequencies

    d = (to_gps(labels.times[-1]).seconds - to_gps(labels.times[1]).seconds)
    for i, cluster in enumerate(clusters):
        try:
            psds = {
                channel: FrequencySeries.read(filename, f'{cluster}-{channel}')
                for channel in channels
            }
            logger.info(f'Loaded Nº{cluster}.')

        except KeyError:

            logger.info(
                f'Doing Nº{cluster} ({100 * cluster_counts[i] / len(labels.value):.2f}% of data)...'
            )
            with Progress(f'psd Nº{cluster} ({i + 1}/{len(clusters)})',
                          len(channels) * d) as progress:
                psds = {
                    channel: FrequencySeries(median(stack([
                        progress(data[channel].crop,
                                 pc * d + (to_gps(time).seconds -
                                           to_gps(labels.times[1]).seconds),
                                 start=to_gps(time) - LIGOTimeGPS(60),
                                 end=to_gps(time)).psd().value
                        for c, time in zip(labels.value, labels.times)
                        if c == cluster
                    ]),
                                                    axis=0),
                                             frequencies=f,
                                             name=f'{cluster}-{channel}')
                    for pc, channel in enumerate(channels)
                }
            for name in psds.keys():
                psds[name].write(filename, **writing_opts)

        # plotting is slow, so show a nice progress bar.
        logger.debug('Initiating plotting routine...')
        with Progress('plotting', len(groups)) as progress:

            for p, (group, lbls, title) in enumerate(groups):
                # plot the group in one figure.
                plt = Plot(*(psds[channel] for channel in group),
                           separate=False,
                           sharex=True,
                           zorder=1,
                           **kwargs)
                # plt.gca().set_xlim((30,60))
                # modify the figure as a whole.
                # plt.add_segments_bar(dq, label='')
                plt.gca().set_xscale('log')
                plt.gca().set_yscale('log')
                plt.suptitle(title)
                plt.legend(lbls)

                # save to png.
                progress(
                    plt.save, p,
                    get_path(f'{cluster}-{title}',
                             'png',
                             prefix=f'{prefix}/{cluster}'))
示例#28
0
def get_guardian_segments(node,
                          frametype,
                          start,
                          end,
                          nproc=1,
                          pad=(0, 0),
                          strict=False):
    """Determine state segments for a given guardian node
    """
    ifo, node = node.split(':', 1)
    if node.startswith('GRD-'):
        node = node[4:]
    pstart = start - pad[0]
    pend = end + pad[1]

    # find frame cache
    cache = data.find_frames(ifo, frametype, pstart, pend)

    # pre-format data segments
    span = SegmentList([Segment(pstart, pend)])
    segs = SegmentList()
    csegs = cache_segments(cache)
    if not csegs:
        return csegs

    # read data
    stub = "{}:GRD-{}".format(ifo, node)
    if strict:
        channels = ["{}_OK".format(stub)]
    else:
        state = "{}_STATE_N".format(stub)
        nominal = "{}_NOMINAL_N".format(stub)
        active = "{}_ACTIVE".format(stub)
        channels = [state, nominal, active]
    for seg in csegs & span:
        if strict:
            sv = StateVector.read(
                cache,
                channels[0],
                nproc=nproc,
                start=seg[0],
                end=seg[1],
                bits=[0],
                gap='pad',
                pad=0,
            ).astype('uint32')
            segs += sv.to_dqflags().intersection().active
        else:
            gdata = TimeSeriesDict.read(cache,
                                        channels,
                                        nproc=nproc,
                                        start=seg[0],
                                        end=seg[1],
                                        gap='pad',
                                        pad=0)
            ok = ((gdata[state].value == gdata[nominal].value) &
                  (gdata[active].value == 1)).view(StateTimeSeries)
            ok.t0 = gdata[state].t0
            ok.dt = gdata[state].dt
            segs += ok.to_dqflag().active

    # truncate to integers, and apply padding
    for i, seg in enumerate(segs):
        segs[i] = type(seg)(int(ceil(seg[0])) + pad[0],
                            int(floor(seg[1])) - pad[1])
    segs.coalesce()

    return segs.coalesce()
示例#29
0
print(datanames)
for dataname in datanames:
    hdf5_fmt = './{dataname}/Xaxis_{sensor}_50pct.hdf5'.replace(
        "{dataname}", dataname)
    hdf5_fmt_csd = './{dataname}/{name}.hdf5'.replace("{dataname}", dataname)
    t0 = _t0[dataname]

    if specgram:
        gwf_fmt = './{dataname}/X.gwf'.replace("{dataname}", dataname)
        chname = frtools.get_channels(gwf_fmt)

        data = TimeSeriesDict.read(
            './{dataname}/X.gwf'.format(dataname=dataname),
            chname,
            format='gwf.lalframe',
            pad=np.nan,
            nproc=2,
            verbose=True)

        # Override unit of data from count to voltage.
        c2v = (10.0 / 2**15) * u.V / u.ct  # [1]
        print data
        exit()

    # Load FrequencySeries from saved hdf5 files.
    if True:
        exv0 = read(hdf5_fmt.format(sensor='exv'))
        ixv1 = read(hdf5_fmt.format(sensor='ixv1'))
        ixv2 = read(hdf5_fmt.format(sensor='ixv2'))
        d12 = read(hdf5_fmt.format(sensor='diff12')) * 2
示例#30
0
    def draw(self):

        # data span
        start = self.gpstime - self.duration / 2.
        end = self.gpstime + self.duration / 2.

        # get data
        if self.use_nds:
            data = TimeSeriesDict.fetch(self.chanlist, start, end)
        else:
            from glue.datafind import GWDataFindHTTPConnection
            conn = GWDataFindHTTPConnection()
            cache = conn.find_frame_urls(self.ifo[0],
                                         '%s_R' % self.ifo,
                                         self.start,
                                         self.end,
                                         urltype='file')
            if len(cache) == 0:
                data = {}
            else:
                data = TimeSeriesDict.read(cache,
                                           self.chanlist,
                                           start=start,
                                           end=end)

        # make plot
        plot, axes = subplots(nrows=self.geometry[0],
                              ncols=self.geometry[1],
                              sharex=True,
                              subplot_kw={'xscale': 'auto-gps'},
                              FigureClass=Plot,
                              figsize=[12, 6])
        axes[0, 0].set_xlim(start, end)
        for channel, ax in zip(self.chanlist, axes.flat):
            ax.set_epoch(self.gpstime)
            # plot data
            try:
                ax.plot(data[channel])
            except KeyError:
                ax.text(self.gpstime,
                        0.5,
                        "No data",
                        va='center',
                        ha='center',
                        transform=ax.transData)
            # plot trip indicator
            ax.axvline(self.gpstime,
                       linewidth=0.5,
                       linestyle='--',
                       color='red')
            ax.set_xlabel('')
            ax.set_ylabel('')
            ax.set_title(usetex_tex(channel.name), fontsize=10)
            ax.xaxis.set_minor_locator(NullLocator())
            for tick in ax.yaxis.get_major_ticks():
                tick.label.set_fontsize(10)
            for tick in ax.xaxis.get_major_ticks():
                tick.label.set_fontsize(16)
        plot.text(0.5,
                  0.02,
                  'Time [seconds] from trip (%s)' % self.gpstime,
                  ha='center',
                  va='bottom',
                  fontsize=24)
        plot.text(0.01,
                  0.5,
                  'Amplitude %s' % self.unit,
                  ha='left',
                  va='center',
                  rotation='vertical',
                  fontsize=24)

        plot.suptitle('%s %s %s watchdog trip: %s' %
                      (self.ifo, self.chamber, self.sensor, self.gpstime),
                      fontsize=24)

        plot.save(self.outputfile)
        plot.close()
        return self.outputfile
示例#31
0
        unit = 'Humidity [\%]'
    elif channel.find('ACC') != -1:
        unit = r'Acceleration [$m/s^2$]'
    elif channel.find('MIC') != -1:
        unit = 'Sound [Pa]'

    if mtrend:
        for x in suffix:
            chnames.append(channel + '.' + x)
            latexchnames.append(channel.replace('_', '\_') + '.' + x)

    # Time series
    if mtrend:
        data = TimeSeriesDict.read(sources,
                                   chnames,
                                   format='gwf.lalframe',
                                   nproc=2,
                                   start=int(start),
                                   pad=0.0)
        #data = data.crop(send)
        t0 = data['K1:PEM-EXV_SEIS_WE_SENSINF_OUT_DQ.max'].t0
    if full:
        data = TimeSeries.read(sources,
                               channel,
                               format='gwf.lalframe',
                               nproc=2,
                               start=int(start),
                               pad=0.0)
        t0 = data.t0
        _max = data.max()
        _min = data.min()
        fs = 1. / data.dt
示例#32
0
    'K1:PEM-IY0_SENSOR9_OUT_DQ', 'K1:PEM-IY0_SENSOR10_OUT_DQ',
    'K1:PEM-IY0_SENSOR11_OUT_DQ', 'K1:FEC-99_STATE_WORD_FE',
    'K1:FEC-121_STATE_WORD_FE'
]

cache = './WEATHER_IY0.cache'
kwargs = {}
kwargs['verbose'] = True
kwargs['pad'] = np.nan
kwargs['format'] = 'gwf.lalframe'
kwargs['nproc'] = 6
kwargs['start'] = start
kwargs['end'] = end

if False:
    data = TimeSeriesDict.read(cache, chlst, **kwargs)
    data.write('./weather_iy0.gwf', format='gwf.lalframe')
if True:
    data = TimeSeriesDict.read('./weather_iy0.gwf', chlst, **kwargs)
    #data = TimeSeriesDict.read('./weather_iy0_long.gwf',chlst,**kwargs)
    print('loaded')

daq_iy0 = data['K1:FEC-99_STATE_WORD_FE']
daq_ix1 = data['K1:FEC-121_STATE_WORD_FE']
no5_temp = data['K1:PEM-IY0_SENSOR5_OUT_DQ']  # ct
no5_humd = data['K1:PEM-IY0_SENSOR6_OUT_DQ']  # ct
no5_baro = data['K1:PEM-IY0_SENSOR7_OUT_DQ']  # ct
no6_temp = data['K1:PEM-IY0_SENSOR9_OUT_DQ']  # ct
no6_humd = data['K1:PEM-IY0_SENSOR10_OUT_DQ']  # ct
no6_baro = data['K1:PEM-IY0_SENSOR11_OUT_DQ']  # ct
no5_temp.override_unit('ct')
20: 'OM3',
21: 'TMSX',
22: 'TMSY',
23: 'PM1'}

# read in channel lists to populate each input matrix

INMATRIX_chans_PIT=np.loadtxt('ASC_INMATRIX_P_chans.txt',dtype=str)
INMATRIX_chans_YAW=np.loadtxt('ASC_INMATRIX_Y_chans.txt',dtype=str)
OUTMATRIX_chans_PIT=np.loadtxt('ASC_OUTMATRIX_P_chans.txt',dtype=str)
OUTMATRIX_chans_YAW=np.loadtxt('ASC_OUTMATRIX_Y_chans.txt',dtype=str)

# make a timeseries dictionary for each input matrix
print 'Fetching a bunch of data from frames'

INMATRIX_PIT_data = TimeSeriesDict.read(cache,INMATRIX_chans_PIT,start=start_gps,end=end_gps)
INMATRIX_YAW_data = TimeSeriesDict.read(cache,INMATRIX_chans_YAW,start=start_gps,end=end_gps)
OUTMATRIX_PIT_data = TimeSeriesDict.read(cache,OUTMATRIX_chans_PIT,start=start_gps,end=end_gps)
OUTMATRIX_YAW_data = TimeSeriesDict.read(cache,OUTMATRIX_chans_YAW,start=start_gps,end=end_gps)

# main workhorse function of this script
#
# requires a dictionary containing matrix element time series and two dictionaries
# that map these matrix elements to IO channels
#
# grab the first sample of each channel - if it's non-zero, add it to the list of active channels
# 
# process the active channels and return a set of tuples indicating the matrix entries
# 
# send through a search function that lines up the inputs and outputs
# uses the set of tuples generated from active channels a dictionary maps them to PDs and DOFs
示例#34
0
    start = tconvert(
        'Sep 06 2019 03:22:00 JST'
    )  # dame! , control signal saturated because of output limitter.
    end = tconvert('Sep 06 2019 04:00:00 JST')
elif hoge == 'sc1_5':
    # SC1_5 : Strainmeter 4th trial (IPdcdamp + GIFsc, mat=1,gain=-1)
    start = tconvert('Sep 06 2019 03:42:00 JST')
    end = tconvert('Sep 06 2019 04:42:00 JST')

# setting
fftlen = 2**6
ovlp = fftlen / 2.0

# Timeseries
source = filelist(start, end, trend='full', place='kashiwa')
data = TimeSeriesDict.read(source, channels, start=start, end=end, nproc=4)
c = 299792458  # m/sec
lam = 1064e-9  # m
gif = data['K1:VIS-ETMX_GIF_ARM_L_OUT16']
xarm = data['K1:CAL-CS_PROC_XARM_FILT_AOM_OUT16'] * 3000.0 / (
    c / lam) * 1e6  # [um]
etmx_seis = data['K1:PEM-SEIS_EXV_GND_X_OUT16']
itmx_seis = data['K1:PEM-SEIS_IXV_GND_X_OUT16']
diff_seis = etmx_seis - itmx_seis
comm_seis = etmx_seis + itmx_seis

# Coherence
coh_gif2xarm = gif.coherence(xarm, fftlength=fftlen, overlap=ovlp)
coh_gif2seis = gif.coherence(diff_seis, fftlength=fftlen, overlap=ovlp)
coh_xarm2seiscomm = xarm.coherence(comm_seis, fftlength=fftlen, overlap=ovlp)