Exemple #1
0
def _save_longterm_spectrogram(axis, idnum, fname, **kwargs):
    '''
    '''
    prefix = kwargs.pop('prefix', './data')
    specgrams = Spectrogram.read(fname[0], format='hdf5')
    for fname in fname[1:]:
        try:
            specgrams.append(Spectrogram.read(fname, format='hdf5'),
                             gap='ignore')
        except:
            log.debug(traceback.format_exc())
            raise ValueError('AAAA')
    fname_hdf5 = prefix + '/SG_LongTerm_{0}_{1}.hdf5'.format(axis, idnum)
    log.debug('{0} Combined'.format(fname_hdf5))
    specgrams.write(fname_hdf5, format='hdf5', overwrite=True)
    log.debug('{0} Saved'.format(fname_hdf5))
Exemple #2
0
def get_specgram(*chname,**kwargs):
    n = len(chname)
    if n==2:
        chname1,chname2 = chname
        specgram = get_csd_specgram(chname1,chname2,**kwargs)
        return specgram
    elif n==1:
        chname = chname[0]
    else:
        raise ValueError('Wrong chname arguments')
    

    fftlength = kwargs.pop('fftlength',None)
    remake = kwargs.pop('remake',False)
    overlap = kwargs.pop('overlap',None)
    hdf5fname = to_hdf5fname(chname,**kwargs)
    #print hdf5fname
    if remake:
        if os.path.exists(hdf5fname):
            os.remove(hdf5fname)
        timeseries = get_timeseries(chname,**kwargs)
        specgram = timeseries.spectrogram(stride=fftlength*2,
                                          fftlength=fftlength,
                                          overlap=overlap,
                                          window='hanning')
        #exit()        
        specgram.write(hdf5fname)
        return specgram
    else:
        #warnings.warn('Dont use fftlength option..')
        specgram = Spectrogram.read(hdf5fname)
        return specgram
Exemple #3
0
def get_array2d(start,end,axis='X',prefix='./data',**kwargs):
    '''
    '''
    nproc = kwargs.pop('nproc',4)
    bandpass = kwargs.pop('bandpass',None)
    blrms = kwargs.pop('blrms',None)
    fftlen = kwargs.pop('fftlen',2**8)
    overlap = fftlen/2

    # check existance of the spectrogram data
    fname_hdf5 = fname_hdf5_asd(start,end,prefix,axis)
    if os.path.exists(fname_hdf5):
        specgram = Spectrogram.read(fname_hdf5)
        if blrms:
            timeseries = specgram.crop_frequencies(blrms[0],blrms[1]).sum(axis=1)
            return timeseries
        return specgram
    
    # If spectrogram dose not exist, calculate it from timeseries data.
    try:
        fname = fname_gwf(start,end,prefix='./data')
        chname = get_seis_chname(start,end,axis=axis)
        # check existance of the timeseries data
        if os.path.exists(fname):
            data = TimeSeries.read(fname,chname,nproc=nproc)
        else:
            # when timeseries data dose not exist
            fnamelist = existedfilelist(start,end)
            chname = get_seis_chname(start,end)
            datadict = TimeSeriesDict.read(fnamelist,chname,nproc=nproc)
            datadict = datadict.resample(32)
            datadict = datadict.crop(start,end)
            chname = get_seis_chname(start,end,axis=axis)
            datadict.write(fname,format='gwf.lalframe')
            data = TimeSeries.read(fname,chname,nproc=nproc)
            # If data broken, raise Error.
            if data.value.shape[0] != 131072:
                log.debug(data.value.shape)
                log.debug('####### {0} {1}'.format(start,end))
                raise ValueError('data broken')
    except:
        log.debug(traceback.format_exc())
        raise ValueError('!!!')

    # if data broken, raise Error.
    if data.value.shape[0] != 131072: # (131072 = 2**17 = 2**12[sec] * 2**5[Hz] )
        log.debug(data.value.shape)
        log.debug('!!!!!!!! {0} {1}'.format(start,end))
        raise ValueError('data broken')

    # calculate from timeseries data
    specgram = data.spectrogram2(fftlength=fftlen,overlap=overlap,nproc=nproc)
    specgram.write(fname_hdf5,format='hdf5',overwrite=True)
    return specgram
Exemple #4
0
def get_spectrogram(start, end, axis='X', seis='EXV', **kwargs):
    ''' Get Spectrogram    

    Parameters
    ----------
    start : `int`
        start GPS time.
    end : `int`
       end GPS time.

    Returns
    -------
    specgram : `gwpy.spectrogram.Spectrogram`
        spectrogram.
    '''
    nproc = kwargs.pop('nproc', 3)
    bandpass = kwargs.pop('bandpass', None)
    fftlen = kwargs.pop('fftlen', 2**8)
    diff = kwargs.pop('diff', False)
    fs = kwargs.pop('fs', 256)
    fname_hdf5 = fname_specgram(start, end, prefix=seis, axis=axis)

    # Load specgram from hdf5 file
    if os.path.exists(fname_hdf5):
        specgram = Spectrogram.read(fname_hdf5, format='hdf5')
        return specgram

    # If no file, make specgram from timeseries data
    try:
        chname = get_seis_chname(start, end, axis=axis, seis=seis)[0]
        fnamelist = existedfilelist(start, end)
        data = TimeSeries.read(fnamelist, chname, nproc=nproc)
        data = data.resample(fs)
        data = data.crop(start, end)
    except:
        log.debug(traceback.format_exc())
        raise ValueError('!!! {0} {1}'.format(start, end))

    # calculate specgram
    specgram = data.spectrogram2(fftlength=fftlen,
                                 overlap=fftlen / 2,
                                 nproc=nproc)
    try:
        fname_dir = '/'.join(fname_hdf5.split('/')[:4])
        if not os.path.exists(fname_dir):
            os.makedirs(fname_dir)
        specgram.write(fname_hdf5, format='hdf5', overwrite=True)
        log.debug('Make {0}'.format(fname_hdf5))
    except:
        log.debug(traceback.format_exc())
        raise ValueError('!!!')
    return specgram
Exemple #5
0
def save_asd(axis, available, percentile=50, **kwargs):
    prefix = kwargs.pop('prefix', './data')
    write = kwargs.pop('write', None)
    write_gwf = kwargs.pop('write_gwf', None)
    skip = kwargs.pop('skip', None)

    asd_fmt = '{0}/{1}_{2:02d}_LongTerm.hdf5'.format(prefix, axis, percentile)
    if os.path.exists(asd_fmt):
        #log.debug(asd_fmt+' Read')
        return FrequencySeries.read(asd_fmt, format='hdf5')

    log.debug(asd_fmt + ' Saving {0:02d} percentile'.format(percentile))
    fnamelist = [
        prefix + '/{0}_{1}_{2}.hdf5'.format(axis, start, end)
        for start, end in available
    ]
    specgrams = Spectrogram.read(fnamelist[0], format='hdf5')
    [specgrams.append(Spectrogram.read(fname,format='hdf5'),gap='ignore') \
     for fname in fnamelist]
    asd = specgrams.percentile(percentile)
    asd.write(asd_fmt, format='hdf5', overwrite=True)
    return asd
Exemple #6
0
def get_csd_specgram(chname1,chname2,remake=True,fftlength=2**6,overlap=0.0,**kwargs):
    hdf5fname = to_hdf5fname(chname1,chname2)
    if remake:
        if os.path.exists(hdf5fname):
            os.remove(hdf5fname)
        timeseries1 = get_timeseries(chname1,**kwargs)
        timeseries2 = get_timeseries(chname2,**kwargs)
        specgram = timeseries1.csd_spectrogram(timeseries2,
                                               stride=fftlength*2,
                                               fftlength=fftlength,
                                               overlap=overlap,
                                               window='hanning',
                                               nproc=2)
        specgram.write(hdf5fname)
        return specgram
    else:
        #warnings.warn('Dont use fftlength option..')
        specgram = Spectrogram.read(hdf5fname)
        return specgram
Exemple #7
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source

    This method reads all found data into the data containers defined by
    the `gwsumm.globalv` module, then returns nothing.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:

        # -- channels ---------------------------

        try:
            ctable = Table.read(h5file['channels'])
        except KeyError:  # no channels table written
            pass
        else:
            for row in ctable:
                chan = get_channel(row['name'])
                for p in ctable.colnames[1:]:
                    if row[p]:
                        setattr(chan, p, row[p])

        # -- timeseries -------------------------

        for dataset in h5file.get('timeseries', {}).values():
            ts = TimeSeries.read(dataset, format='hdf5')
            if (re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name) and
                    ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() != mode.Mode.day:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # -- statevector -- ---------------------

        for dataset in h5file.get('statevector', {}).values():
            sv = StateVector.read(dataset, format='hdf5')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # -- spectrogram ------------------------

        for tag, add_ in zip(
                ['spectrogram', 'coherence-components'],
                [add_spectrogram, add_coherence_component_spectrogram]):
            for key, dataset in h5file.get(tag, {}).items():
                key = key.rsplit(',', 1)[0]
                spec = Spectrogram.read(dataset, format='hdf5')
                spec.channel = get_channel(spec.channel)
                add_(spec, key=key)

        # -- segments ---------------------------

        for name, dataset in h5file.get('segments', {}).items():
            dqflag = DataQualityFlag.read(h5file, path=dataset.name,
                                          format='hdf5')
            globalv.SEGMENTS += {name: dqflag}

        # -- triggers ---------------------------

        for dataset in h5file.get('triggers', {}).values():
            load_table(dataset)
Exemple #8
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:
        # read all time-series data
        try:
            group = h5file['timeseries']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            ts = TimeSeries.read(dataset, format='hdf')
            if (re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name) and
                    ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() == mode.SUMMARY_MODE_DAY:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # read all state-vector data
        try:
            group = h5file['statevector']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            sv = StateVector.read(dataset, format='hdf')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # read all spectrogram data
        try:
            group = h5file['spectrogram']
        except KeyError:
            group = dict()
        for key, dataset in group.iteritems():
            key = key.rsplit(',', 1)[0]
            spec = Spectrogram.read(dataset, format='hdf')
            spec.channel = get_channel(spec.channel)
            add_spectrogram(spec, key=key)

        try:
            group = h5file['segments']
        except KeyError:
            group = dict()
        for name, dataset in group.iteritems():
            dqflag = DataQualityFlag.read(dataset, format='hdf')
            globalv.SEGMENTS += {name: dqflag}
Exemple #9
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:
        # read all time-series data
        try:
            group = h5file['timeseries']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            ts = TimeSeries.read(dataset, format='hdf5')
            if (re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name)
                    and ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() != mode.Mode.day:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # read all state-vector data
        try:
            group = h5file['statevector']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            sv = StateVector.read(dataset, format='hdf5')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # read all spectrogram data
        for tag in ['spectrogram', 'coherence-components']:
            if tag == 'coherence-components':
                add_ = add_coherence_component_spectrogram
            else:
                add_ = add_spectrogram
            try:
                group = h5file[tag]
            except KeyError:
                group = dict()
            for key, dataset in group.iteritems():
                key = key.rsplit(',', 1)[0]
                spec = Spectrogram.read(dataset, format='hdf5')
                spec.channel = get_channel(spec.channel)
                add_(spec, key=key)

        # read all segments
        try:
            group = h5file['segments']
        except KeyError:
            group = dict()
        for name in group:
            dqflag = DataQualityFlag.read(group, path=name, format='hdf5')
            globalv.SEGMENTS += {name: dqflag}

        # read all triggers
        try:
            group = h5file['triggers']
        except KeyError:
            group = dict()
        for key in group:
            load_table(group[key])
Exemple #10
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source

    This method reads all found data into the data containers defined by
    the `gwsumm.globalv` module, then returns nothing.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:

        # -- channels ---------------------------

        try:
            ctable = Table.read(h5file['channels'])
        except KeyError:  # no channels table written
            pass
        else:
            for row in ctable:
                chan = get_channel(row['name'])
                for p in ctable.colnames[1:]:
                    if row[p]:
                        setattr(chan, p, row[p])

        # -- timeseries -------------------------

        for dataset in h5file.get('timeseries', {}).values():
            ts = TimeSeries.read(dataset, format='hdf5')
            if (re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name)
                    and ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() != mode.Mode.day:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # -- statevector -- ---------------------

        for dataset in h5file.get('statevector', {}).values():
            sv = StateVector.read(dataset, format='hdf5')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # -- spectrogram ------------------------

        for tag, add_ in zip(
            ['spectrogram', 'coherence-components'],
            [add_spectrogram, add_coherence_component_spectrogram]):
            for key, dataset in h5file.get(tag, {}).items():
                key = key.rsplit(',', 1)[0]
                spec = Spectrogram.read(dataset, format='hdf5')
                spec.channel = get_channel(spec.channel)
                add_(spec, key=key)

        # -- segments ---------------------------

        for name, dataset in h5file.get('segments', {}).items():
            dqflag = DataQualityFlag.read(h5file,
                                          path=dataset.name,
                                          format='hdf5')
            globalv.SEGMENTS += {name: dqflag}

        # -- triggers ---------------------------

        for dataset in h5file.get('triggers', {}).values():
            load_table(dataset)