Exemplo n.º 1
0
def read_frame(frame, channel, st=None, et=None, cfac=1.589459e-9):
    """
    reads ligo frames

    Parameters
    ----------
    frame : `str`
        filepath to a frame
    channel : `str`
        channel in the frame to load
    st : `int`, date string, optional
        optional start time. defaults to beginning
        of frame
    et : `int ,date string, optional
        optional end time. defaults to end
        of frame

    Returns
    -------
    TS : `Trace`
        time series trace
    """

    if st is not None and et is not None:
        d1 = cfac * Trace.read(frame, channel, st, et).detrend()
        d2 = TimeSeries.read(frame, channel, st, et, format='lalframe')
    else:
        d1 = cfac * Trace.read(frame, channel).detrend()
        d2 = TimeSeries.read(frame, channel, format='lalframe')
    d1 = Trace(d1.value)
    d1.__dict__ = d2.copy_metadata()
    d1.location = d1.get_location()
    return d1
Exemplo n.º 2
0
def read_frame(filename, ifo, readstrain=True, strain_chan=None, dq_chan=None, inj_chan=None):
    """
    Helper function to read frame files
    """

    from gwpy.timeseries import TimeSeries


    if ifo is None:
        raise TypeError("""To read GWF data, ifo must be 'H1', 'H2', or 'L1'.
        def loaddata(filename, ifo=None):""")

    #-- Read strain channel
    if strain_chan is None:
        strain_chan = ifo + ':LOSC-STRAIN'

    if readstrain:
        try:
            sd = TimeSeries.read(filename, strain_chan)
            strain = sd.value
            gpsStart = sd.t0.value
            ts = sd.dt.value
        except:
            print("ERROR reading file {0} with strain channel {1}".format(filename, strain_chan))
            raise
    else:
        ts = 1
        strain = 0

    #-- Read DQ channel
    if dq_chan is None:
        dq_chan = ifo + ':LOSC-DQMASK'

    try:
        qd = TimeSeries.read(str(filename), str(dq_chan))
        gpsStart = qd.t0.value
        qmask = np.array(qd.value)
        dq_ts = qd.dt.value
        shortnameList_wbit = str(qd.unit).split()
        shortnameList = [name.split(':')[1] for name in shortnameList_wbit]
    except:
        print("ERROR reading DQ channel '{0}' from file: {1}".format(dq_chan, filename))
        raise

    #-- Read Injection channel
    if inj_chan is None:
        inj_chan = ifo + ':LOSC-INJMASK'

    try:
        injdata = TimeSeries.read(str(filename), str(inj_chan))
        injmask = injdata.value
        injnamelist_bit = str(injdata.unit).split()
        injnamelist     = [name.split(':')[1] for name in injnamelist_bit]
    except:
        print("ERROR reading injection channel '{0}' from file: {1}".format(inj_chan, filename))
        raise

    return strain, gpsStart, ts, qmask, shortnameList, injmask, injnamelist
def generate_fast_vco(ifo, segment, frames=False, fit=True):
    """
    Parameters:
    -----------
        ifo : start
            interferometer, e.g. 'L1'
        segment : array like
            time segment. first entry start second entry end
        frames : bool
            read from frames or nds2
        fit : bool
            fit from imc-f (default)
            or spline interpolation

    Returns:
    --------
        vco_data : saves file 'L1:IMC-VCO_PREDICTION-st-dur.hdf'
    """
    st = segment[0]
    et = segment[1]
    chan1_pat = '%s:SYS-TIMING_C_FO_A_PORT_11_SLAVE_CFC_FREQUENCY_5'
    chan2_pat = '%s:IMC-F_OUT_DQ'
    if frames:
        connection = datafind.GWDataFindHTTPConnection()
        cache = connection.find_frame_urls(
            ifo[0], '%s_R' % ifo, st, et + 1, urltype='file')
        if fit:
            imc = TimeSeries.read(cache, chan2_pat % ifo, st, et)
        else:
            imc = TimeSeries.read(cache, chan2_pat % ifo, st, st + 1)
        pslvco = TimeSeries.read(cache, chan1_pat % ifo, st, et + 1)
    else:
        if fit:
            imc = TimeSeries.fetch(chan2_pat % ifo, st, et)
        else:
	    print 'HI BEFORE LOADING IMC'
            imc = TimeSeries.fetch(chan2_pat % ifo, st, st + 1)
	print 'HI BEFORE LOADING PSL'
        pslvco = TimeSeries.fetch(chan1_pat % ifo, st, et + 1)
	print 'HI AFTER LOADING'

    pslvco = pslvco[16 + 8::16]

    if fit:
        imc_srate = int(imc.sample_rate.value)
        imc2 = imc[imc_srate / 2::imc_srate]
        data = np.array((imc2.value, pslvco.value)).T
        vco_interp = fit_with_imc(data, imc)
    else:
        vco_interp = interp_spline(pslvco)

    chan = "%s:IMC-VCO_PREDICTION" % (ifo,)
    vco_data = TimeSeries(vco_interp, epoch=st,
                          sample_rate=256,
                          name=chan, channel=chan)

    return vco_data
def generate_fast_vco(ifo, segment, frames=False, fit=True):
    """
    Parameters:
    -----------
        ifo : start
            interferometer, e.g. 'L1'
        segment : array like
            time segment. first entry start second entry end
        frames : bool
            read from frames or nds2
        fit : bool
            fit from imc-f (default)
            or spline interpolation

    Returns:
    --------
        vco_data : saves file 'L1:IMC-VCO_PREDICTION-st-dur.hdf'
    """
    st = segment[0]
    et = segment[1]
    chan1_pat = '%s:SYS-TIMING_C_FO_A_PORT_11_SLAVE_CFC_FREQUENCY_5'
    chan2_pat = '%s:IMC-F_OUT_DQ'

    if frames:
        connection = datafind.GWDataFindHTTPConnection()
        cache = connection.find_frame_urls(
            ifo[0], '%s_R' % ifo, st, et + 1, urltype='file')
        if fit:
            imc = TimeSeries.read(cache, chan2_pat % ifo, st, et)
        else:
            imc = TimeSeries.read(cache, chan2_pat % ifo, st, st + 1)
        pslvco = TimeSeries.read(cache, chan1_pat % ifo, st, et + 1)
    else:
        if fit:
            imc = TimeSeries.fetch(chan2_pat % ifo, st, et)
        else:
            imc = TimeSeries.fetch(chan2_pat % ifo, st, st + 1)
        pslvco = TimeSeries.fetch(chan1_pat % ifo, st, et + 1)

    pslvco = pslvco[16 + 8::16]

    if fit:
        imc_srate = int(imc.sample_rate.value)
        imc2 = imc[imc_srate / 2::imc_srate]
        data = np.array((imc2.value, pslvco.value)).T
        vco_interp = fit_with_imc(data, imc)
    else:
        vco_interp = interp_spline(pslvco)

    chan = "%s:IMC-VCO_PREDICTION" % (ifo,)
    vco_data = TimeSeries(vco_interp, epoch=st,
                          sample_rate=256,
                          name=chan, channel=chan)

    return vco_data
Exemplo n.º 5
0
def get_array2d(start,end,axis='X',prefix='./data',**kwargs):
    '''
    '''
    nproc = kwargs.pop('nproc',4)
    bandpass = kwargs.pop('bandpass',None)
    blrms = kwargs.pop('blrms',None)
    fftlen = kwargs.pop('fftlen',2**8)
    overlap = fftlen/2

    # check existance of the spectrogram data
    fname_hdf5 = fname_hdf5_asd(start,end,prefix,axis)
    if os.path.exists(fname_hdf5):
        specgram = Spectrogram.read(fname_hdf5)
        if blrms:
            timeseries = specgram.crop_frequencies(blrms[0],blrms[1]).sum(axis=1)
            return timeseries
        return specgram
    
    # If spectrogram dose not exist, calculate it from timeseries data.
    try:
        fname = fname_gwf(start,end,prefix='./data')
        chname = get_seis_chname(start,end,axis=axis)
        # check existance of the timeseries data
        if os.path.exists(fname):
            data = TimeSeries.read(fname,chname,nproc=nproc)
        else:
            # when timeseries data dose not exist
            fnamelist = existedfilelist(start,end)
            chname = get_seis_chname(start,end)
            datadict = TimeSeriesDict.read(fnamelist,chname,nproc=nproc)
            datadict = datadict.resample(32)
            datadict = datadict.crop(start,end)
            chname = get_seis_chname(start,end,axis=axis)
            datadict.write(fname,format='gwf.lalframe')
            data = TimeSeries.read(fname,chname,nproc=nproc)
            # If data broken, raise Error.
            if data.value.shape[0] != 131072:
                log.debug(data.value.shape)
                log.debug('####### {0} {1}'.format(start,end))
                raise ValueError('data broken')
    except:
        log.debug(traceback.format_exc())
        raise ValueError('!!!')

    # if data broken, raise Error.
    if data.value.shape[0] != 131072: # (131072 = 2**17 = 2**12[sec] * 2**5[Hz] )
        log.debug(data.value.shape)
        log.debug('!!!!!!!! {0} {1}'.format(start,end))
        raise ValueError('data broken')

    # calculate from timeseries data
    specgram = data.spectrogram2(fftlength=fftlen,overlap=overlap,nproc=nproc)
    specgram.write(fname_hdf5,format='hdf5',overwrite=True)
    return specgram
Exemplo n.º 6
0
    def channeling_read(out_channels: List[str], **kwargs) -> TimeSeriesDict:
        out = TimeSeriesDict()

        for channel in out_channels:
            for prefix in search_dirs:
                for in_channel in in_channels:
                    try:
                        # lock the target file
                        h5file, _ = path2h5file(get_path(
                            f'{in_channel} {generation_start}',
                            'hdf5',
                            prefix=prefix),
                                                mode='r')
                        # read off the dataset
                        out[channel] = TimeSeries.read(h5file, channel,
                                                       **kwargs)
                    except (FileNotFoundError, KeyError, OSError):
                        # file not found / hdf5 can't open file (OSError), channel not in file (KeyError)
                        continue
                    break
                else:
                    continue
                break
            else:
                # tried all search dirs but didn't find it. Attempt to download.
                raise FileNotFoundError(f'CANNOT FIND {channel}!!')
                # out[channel] = TimeSeries.get(channel, **kwargs) # slow.
        return out
Exemplo n.º 7
0
 def test_ascii_read(self):
     fp = self.test_ascii_write(delete=False)
     try:
         ts = TimeSeries.read(fp)
     finally:
         if os.path.isfile(fp):
             os.remove(fp)
Exemplo n.º 8
0
def check_idq(cache, channel, start, end):
    """Looks for iDQ frame and reads them.

    Parameters
    ----------
    cache : :class:`glue.lal.Cache`
        Cache from which to check.
    channel : str
        which idq channel (pglitch)
    start, end: int or float
        GPS start and end times desired.

    Returns
    -------
    tuple
        Tuple mapping iDQ channel to its maximum P(glitch).

    Example
    -------
    >>> check_idq(cache, 'H1:IDQ-PGLITCH-OVL-100-1000',
                  1216496260, 1216496262)
    ('H1:IDQ-PGLITCH-OVL-100-1000', 0.87)
    """
    if cache:
        try:
            idq_prob = TimeSeries.read(cache, channel, start=start, end=end)
            return (channel, float(idq_prob.max()))
        except RuntimeError:
            log.exception('Failed to read from low-latency iDQ frame files')
    # FIXME: figure out how to get access to low-latency frames outside
    # of the cluster. Until we figure that out, actual I/O errors have
    # to be non-fatal.
    return (channel, None)
def get_data(channel,start_time,length):
    print 'Starting data transfer for channel: ' + str(channel)
    connection = datafind.GWDataFindHTTPConnection()
    cache = connection.find_frame_urls(ifo[0],ifo+'_R',start_time,start_time+length,urltype='file')
    data = TimeSeries.read(cache,channel)
    print "Got data for channel: " + str(channel)
    return data
Exemplo n.º 10
0
def get_timeseries(chname,**kwargs):
    start = kwargs.pop('start',None)
    end = kwargs.pop('end',None)
    nds = kwargs.pop('nds',None)
    fname = to_gwffname(chname,**kwargs)

    if os.path.exists(fname):
        print('{0} exist.',format(fname))
        print('Skip fetch from nds {0}'.format(fname))
        data = TimeSeries.read(fname,chname,start,end,
                                format='gwf.lalframe',
                                verbose=True,
                                pad=np.nan)
        return data
    elif nds and not os.path.exists(fname):
        print('{0} dose not exist.',format(fname))
        data = TimeSeries.fetch(chname, start, end,
                                verbose=True,
                                host='10.68.10.121', port=8088,
                                pad=np.nan,
                                verify=True,type=1,dtype=np.float32)
        data.write(fname,format='gwf.lalframe')
        print('wrote data in '+fname)
        return data
    else:
        print(nds)
        print(os.path.exists(fname))
        print(fname)
        raise ValueError('! Must use nds or load files')
        exit()
def generate_gauss_ifo_data(ifoparams, generalparams):
    # generate gaussian data
    asd1 = MagSpectrum.read(ifoparams['gw_noise_asd'])
    if asd1.f0.value != 0:
        raise ValueError('Initial frequency of supplied ASDs must be zero')

    # get psd
    psd1 = MagSpectrum(asd1.value**2,
                       df=asd1.df,
                       f0=asd1.f0,
                       name=ifoparams['name'] + ':gauss_data')
    fname = (generalparams['output_prefix'] + '/' + 'gaussian_frames/' +
             '%s-GAUSSIAN-DATA-%d-DAYS.gwf')
    ndays = float(generalparams['ndays'])
    # generate gaussian noise
    print('Generating noise for %s:' % ifoparams['name'])
    print('Writing it to ' + fname % (ifoparams['name'], float(ndays)))
    print('============================')
    ts1 = psd1.generate_gaussian_timeseries(float(ndays) * 86400,
                                            float(
                                                generalparams['sample_rate']),
                                            name=psd1.name)
    ts1.write(str(fname % (ifoparams['name'], ndays)))  # , overwrite=True)
    ts1_read = TimeSeries.read(str(fname % (ifoparams['name'], ndays)),
                               ifoparams['name'] + ':gauss_data')
    plot = ts1.asd(10).plot()
    ax = plot.gca()
    ax.plot(asd1)
    ax.plot(ts1_read.asd(10))
    plot.savefig('gauss_data_plot_test')
    print('>>>>>>> DONE GENERATING GAUSSIAN NOISE')
Exemplo n.º 12
0
def main(centerTime,duration,frameTypes,channelNames,detectors,rightascension,declination,FFTlength,sampleFrequency):

    #----- Start and stop time for this event.
    startTime = centerTime - duration / 2;
    stopTime = centerTime + duration / 2;

    # zip frameTypes and detectors, and channel names and detectors
    frameType   = dict(zip(detectors,frameTypes))
    channelName = dict(zip(detectors,channelNames))
    data        = dict()
    white_data  = dict()

    # Read in the data
    for iDet in detectors:
        connection = datafind.GWDataFindHTTPConnection()
        cache      = connection.find_frame_urls(iDet.strip('1'), frameType[iDet], startTime, stopTime, urltype='file')
        data[iDet] = TimeSeries.read(cache,channelName[iDet], format='gwf',start=startTime,end=stopTime)

    for (iDet,iSeries) in data.iteritems():
        # resample data
        if iSeries.sample_rate.decompose().value != sampleFrequency:
            iSeries = iSeries.resample(sampleFrequency)
        asd = iSeries.asd(FFTlength, FFTlength/2., method='median-mean')
        # Apply ASD to the data to whiten it
        whitened = iSeries.whiten(FFTlength, FFTlength/2., asd=asd) 
        white_data[iDet] = whitened.fft()
Exemplo n.º 13
0
def main_compare_gif_gotic2():
    start = tconvert('Oct 15 2018 00:00:00')
    end = tconvert('Oct 21 2018 00:00:00')

    # gif data
    pfx = '/Users/miyo/Dropbox/KagraData/gif/'
    segments = GifData.findfiles(start, end, 'CALC_STRAIN', prefix=pfx)
    allfiles = [path for files in segments for path in files]
    strain = TimeSeries.read(source=allfiles,
                             name='CALC_STRAIN',
                             format='gif',
                             pad=numpy.nan,
                             nproc=2)
    strain = strain.detrend('linear')

    # gotic data
    source = '201805010000_201811010000.gotic'
    gifx = KagraGoticStrain.read(source, start=start, end=end).x
    gifx = gifx.detrend('linear')
    gifx = gifx * 0.9

    # plot
    plot = Plot(gifx, strain, xscale='auto-gps')
    plot.legend()
    plot.subplots_adjust(right=.86)
    plot.savefig('result.png')
    plot.close()
Exemplo n.º 14
0
 def load_data(self, gps_start, gps_end, ifos, fs, channel='GATED'):
     """
     return dict(ifo : gwStrain)
     """
     #print(self._contents)
     gps_seg = TimeSegment(gps_start, gps_end)
     ret = dict()
     for ifo in ifos:
         channel_name = f'{ifo}_{channel}'
         if channel in channel_dict:
             channel_ifo = channel_dict[channel_name]
         else:
             channel_ifo = channel_name
         value = np.array([])
         epoch = gps_start
         check_epoch = True
         for frame, timeseg, fname in self._iter_segment(ifo):
             seg = gps_seg.check_overlap(timeseg)
             if seg is None:
                 continue
             if check_epoch:
                 epoch = seg.start
                 check_epoch = False
             data = TimeSeries.read(fname, channel_ifo)
             srate = data.sample_rate.value
             idx = seg.get_idx(timeseg.start, srate)
             data = data[idx]
             if fs != srate:
                 data = resample(data, srate, fs)
             value = np.insert(value, len(value), data)
         ret[ifo] = gwStrain(value, epoch, ifo, fs, info=f'{ifo}_strain')
     return ret
Exemplo n.º 15
0
def load_data_manual(gpsstart, gpsend, ifo, channel, fs=4096):
    # This method will reset self.value & self.epoch & self.duration
    filelist, glist = find_data_path(ifo, gpsstart, gpsend)
    if len(filelist) == 0:
        return CEV.PROCESS_FAIL
    argsrt = np.argsort(np.asarray(glist))
    value = np.array([])
    epoch = int(gpsstart)
    for idx in argsrt:
        fname = filelist[idx]
        data = TimeSeries.read(fname, channel)
        srate = data.sample_rate.value
        gf0, gf1 = parse_datafile(fname.name)
        if epoch < gf0:
            dataidx0 = 0
            if len(value) == 0:
                epoch = gf0
        else:
            dataidx0 = int((epoch - gf0) * srate)
        if gpsend > gf1:
            dataidx1 = data.size
        else:
            dataidx1 = int((gpsend - gf0) * srate) + 1
        value = np.concatenate([value, data.value[dataidx0:dataidx1]])
    ret = gwStrain(value, epoch, ifo, srate, info=f'{ifo}_strain')
    if fs != srate:
        return ret.resample(fs)
    return ret
Exemplo n.º 16
0
def raw_to_bandpass(strain_file, t_event, detector, bp_lo, bp_hi):
    strain = TimeSeries.read(strain_file, format='hdf5.losc')
    center = int(tevent)
    strain = strain.crop(center - 16, center + 16)
    white_data = strain.whiten()
    white_data_bp = white_data.bandpass(bp_lo, bp_hi)

    return white_data_bp
def calibrate_imc_pslvco(ifo, start_time, dur, cache=None):
    st, et = start_time, start_time + dur
    if cache:
        pslvco = TimeSeries.read(cache, chan1_pat % ifo, start=st, end=et)
        imc = TimeSeries.read(cache, chan2_pat % ifo, start=st, end=et)
    else:
        imc = TimeSeries.fetch(chan2_pat % ifo, st, et)
        pslvco = TimeSeries.fetch(chan1_pat % ifo, st, et)

    arr_psl = pslvco[8::16]
    arr_imc = imc

    tmp1 = (arr_imc[8192::16384])[:-1]
    tmp2 = arr_psl[1:]
    a, b = numpy.polyfit(tmp1, tmp2, 1)

    return a, b
Exemplo n.º 18
0
def psd():
    h5path = os.path.join(os.path.dirname(__file__), 'data',
                          'HLV-HW100916-968654552-1.hdf')
    try:
        data = TimeSeries.read(h5path, 'L1:LDAS-STRAIN', format='hdf5')
    except ImportError as e:
        pytest.skip(str(e))
    return data.psd(.4, overlap=.2, window=('kaiser', 24))
def calibrate_imc_pslvco(ifo, start_time, dur, cache=None):
    st, et = start_time, start_time + dur
    if cache:
        pslvco = TimeSeries.read(cache, chan1_pat % ifo, start=st, end=et)
        imc = TimeSeries.read(cache, chan2_pat % ifo, start=st, end=et)
    else:
        imc = TimeSeries.fetch(chan2_pat % ifo, st, et)
        pslvco = TimeSeries.fetch(chan1_pat % ifo, st, et)

    arr_psl = pslvco[8::16]
    arr_imc = imc

    tmp1 = (arr_imc[8192::16384])[:-1]
    tmp2 = arr_psl[1:]
    a, b = numpy.polyfit(tmp1, tmp2, 1)

    return a, b
Exemplo n.º 20
0
def psd():
    h5path = os.path.join(os.path.dirname(__file__), 'data',
                          'HLV-GW100916-968654552-1.hdf')
    try:
        data = TimeSeries.read(h5path, 'L1:LDAS-STRAIN', format='hdf5')
    except ImportError as e:
        pytest.skip(str(e))
    return data.psd(.4, overlap=.2, window=('kaiser', 24))
Exemplo n.º 21
0
 def append(self, filename, duration):
     try:
         data = TimeSeries.read(filename, self._channel)
         self._value = np.concatenate([self._value, data])
     except:
         data = np.zeros(self._fs * duration)
         self._value = np.concatenate([self._value, data])
         self._broken_time += duration
     self._duration += duration
Exemplo n.º 22
0
def Grab_Sfiles(start, end, run, channel, frame):
    """Return a list of Time Series pulled from files stored in this script's directory.
    Arguments:
    run -- String representing an observing run (ie O1, O2, O3a, etc).
    Otherwise as for Grab_Series().
    
    Returns:
    As for Grab_Series()."""
    modesl = []
    entries = os.listdir('Local_Data/{}/{}'.format(run, channel))
    for ends in Seg_Split(start, end, frame):
        for entry in entries:
            file = 'Local_Data/{}/{}/{}'.format(run, channel, entry)
            series = TimeSeries.read(file, channel)
            if series.times[0].value<=ends[0]<series.times[-1].value:
                modes = TimeSeries.read(file, channel, ends[0], ends[-1], verbose=True)
                modesl.append(modes)
                break
    return modesl
Exemplo n.º 23
0
def read_gif(chname, start, end, write=False):
    segments = GifData.findfiles(start, end, chname, prefix=pfx)
    allfiles = [path for files in segments for path in files]
    data = TimeSeries.read(source=allfiles,
                           name=chname,
                           format='gif',
                           pad=np.nan,
                           nproc=2)
    data.name = chname
    return data
Exemplo n.º 24
0
def load_inject_condition(t_i, t_f, t_inj, inj_type, inj_params=None, local=False, Tc=16, To=2, fw=2048, window='tukey', detector='H', 
						  qtrans=False, qsplit=False, dT=2.0, hp=None, save=False, data_path=None):
	"""Fucntion to load a chunk, inject a waveform and condition, created to enable parallelizing.
	"""
	if local:
		files = get_files(detector)
		try:
			data = TimeSeries.read(files, start=t_i, end=t_f, format='hdf5.losc') # load data locally
		except:
			return

	else:
		# load data from losc
		try:
			data = TimeSeries.fetch_open_data(detector + '1', *(t_i, t_f), sample_rate=fw, verbose=False, cache=True)
		except:
			return

	if np.isnan(data.value).any():
		return

	wf_times = data.times.value

	if inj_type == 'ccsn':
		shift = int((t_inj - (wf_times[0] + Tc/2)) * fw)
		hp = np.roll(hp.value, shift)
		
		hp = TimeSeries(hp, t0=wf_times[0], dt=data.dt)
		try:
			hp = hp.taper()
		except:
			pass

		injected_data = data.inject(hp)

	else:
		injected_data = inject(data, t_inj, inj_type, inj_params)

	cond_data = condition_data(injected_data, To, fw, window, qtrans, qsplit, dT)

	x = []
	times = []

	for dat in cond_data:
		x.append(dat.values)
		times.append(dat.t0)

	x = np.asarray(x)
	times = np.asarray(times)

	idx = find_closest_index(t_inj, times)

	x = x[idx]
	times = times[idx]
	return x, times
def dump_calibrated_data(fname):
    data = numpy.load(fname)

    # Figure out the times covered by the file from the filename
    # I should start using HDF5 so I can store metadata
    temp = fname.split('.')[0]
    temp = temp.split('-')
    ifo = temp[0]
    st, dur = int(temp[-2]), int(temp[-1])
    et = st + dur

    maxidx = len(data)
    width = 45

    weights = 1. - ((numpy.arange(-width, width) / float(width))**2)

    # The VCO frequencies are integers so we could dither them
    # to avoid quantization error if we wanted to be fancy
    # but it seems to make no differece
    if False:
        from numpy.random import triangular
        data[:, 1] += triangular(-1., 0., 1., size=len(data))

    # Just fit the whole thing at once, to get a single coefficient
    a, b = numpy.polyfit(data[:, 0], data[:, 1], 1)
    print "%.1f %u" % (a, b)

    # Slide through the data fitting PSL to IMC for data around each sample
    coeffs = []
    for idx in xrange(maxidx):
        idx1 = max(0, idx - width)
        idx2 = min(idx + width, maxidx)
        coeffs.append(numpy.polyfit(data[idx1:idx2, 0], data[idx1:idx2, 1], 1,
                                    w=weights[idx1 - idx + width:idx2 - idx + width]))
    coeffs = numpy.array(coeffs)
    times = numpy.arange(len(coeffs)) + 0.5
    connection = datafind.GWDataFindHTTPConnection()
    cache = connection.find_frame_urls(
        ifo[0], '%s_R' % ifo, st, et, urltype='file')

    imc = TimeSeries.read(cache, "%s:IMC-F_OUT_DQ" % ifo, st, et)
    imc = imc[::16384 / 256]
    print imc
    samp_times = numpy.arange(len(imc)) / 256.

    coeffs0 = numpy.interp(samp_times, times, coeffs[:, 0])
    coeffs1 = numpy.interp(samp_times, times, coeffs[:, 1]) - 7.6e7

    vco_interp = coeffs0 * imc.data + coeffs1

    chan = "%s:IMC-VCO_PREDICTION" % (ifo,)
    vco_data = TimeSeries(vco_interp, epoch=st,
                          sample_rate=imc.sample_rate.value,
                          name=chan, channel=chan)
    vco_data.write("%s-vcoprediction-%u-%u.hdf" % (ifo, st, dur), format='hdf')
Exemplo n.º 26
0
def get_open_strain_data(
        name, start_time, end_time, outdir, cache=False, buffer_time=0, **kwargs):
    """ A function which accesses the open strain data

    This uses `gwpy` to download the open data and then saves a cached copy for
    later use

    Parameters
    ==========
    name: str
        The name of the detector to get data for
    start_time, end_time: float
        The GPS time of the start and end of the data
    outdir: str
        The output directory to place data in
    cache: bool
        If true, cache the data
    buffer_time: float
        Time to add to the beginning and end of the segment.
    **kwargs:
        Passed to `gwpy.timeseries.TimeSeries.fetch_open_data`

    Returns
    =======
    strain: gwpy.timeseries.TimeSeries
        The object containing the strain data. If the connection to the open-data server
        fails, this function returns `None`.

    """
    from gwpy.timeseries import TimeSeries
    filename = '{}/{}_{}_{}.txt'.format(outdir, name, start_time, end_time)

    if buffer_time < 0:
        raise ValueError("buffer_time < 0")
    start_time = start_time - buffer_time
    end_time = end_time + buffer_time

    if os.path.isfile(filename) and cache:
        logger.info('Using cached data from {}'.format(filename))
        strain = TimeSeries.read(filename)
    else:
        logger.info('Fetching open data from {} to {} with buffer time {}'
                    .format(start_time, end_time, buffer_time))
        try:
            strain = TimeSeries.fetch_open_data(name, start_time, end_time, **kwargs)
            logger.info('Saving cache of data to {}'.format(filename))
            strain.write(filename)
        except Exception as e:
            logger.info("Unable to fetch open data, see debug for detailed info")
            logger.info("Call to gwpy.timeseries.TimeSeries.fetch_open_data returned {}"
                        .format(e))
            strain = None

    return strain
Exemplo n.º 27
0
def make_omegascan(ifo, t0, durs):
    """Helper function to create a single omegascan image, with
    multiple durations.

    Parameters
    ----------
    ifo : str
        'H1', 'L1', or 'V1'
    t0 : int or float
        Central time of the omegascan.
    durs : list of floats/ints
        List of three durations which will be scanned symmetrically about t0.
        Example: [0.5, 2, 10]

    Returns
    -------
    bytes or None
        bytes of png of the omegascan, or None if no omegascan created.

    """
    # Explicitly use a non-interactive Matplotlib backend.
    plt.switch_backend('agg')

    # Collect data
    longest = max(durs)
    long_start, long_end = t0 - longest, t0 + longest
    cache = create_cache(ifo, long_start, long_end)
    strain_name = app.conf['strain_channel_names'][ifo]
    try:
        ts = TimeSeries.read(cache, strain_name,
                             start=long_start, end=long_end).astype('float64')
        # Do q_transforms for the different durations
        qgrams = [ts.q_transform(
            frange=(20, 4096), gps=t0, outseg=(t0 - dur, t0 + dur), logf=True)
            for dur in durs]
    except (IndexError, FloatingPointError, ValueError):
        # data from cache can't be properly read, or data is weird
        fig = plt.figure()
        plt.axis("off")
        plt.text(0.1, 0.45, f"Failed to create {ifo} omegascan", fontsize=17)
    else:
        fig = Plot(*qgrams,
                   figsize=(10 * len(durs), 5),
                   geometry=(1, len(durs)),
                   yscale='log',
                   method='pcolormesh')
        for ax in fig.axes:
            fig.colorbar(ax=ax, label='Normalized energy', clim=(0, 30))
            ax.set_epoch(t0)
        fig.suptitle(f'Omegascans of {strain_name} at {t0}', fontweight="bold")

    outfile = io.BytesIO()
    fig.savefig(outfile, format='png', dpi=300)
    return outfile.getvalue()
Exemplo n.º 28
0
 def test_hdf5_read(self):
     try:
         hdfout = self.test_hdf5_write(delete=False)
     except ImportError as e:
         raise unittest.SkipTest(str(e))
     else:
         try:
             ts = TimeSeries.read(hdfout, 'TEST CASE')
         finally:
             if os.path.isfile(hdfout):
                 os.remove(hdfout)
Exemplo n.º 29
0
    def set_from_csv(self, filename):
        """ Set the strain data from a csv file

        Parameters
        ==========
        filename: str
            The path to the file to read in

        """
        from gwpy.timeseries import TimeSeries
        timeseries = TimeSeries.read(filename, format='csv')
        self.set_from_gwpy_timeseries(timeseries)
Exemplo n.º 30
0
    def generate_noise_from_file(cls,
                                 file_name,
                                 event_time,
                                 block_time,
                                 channel_names,
                                 sample_frequency,
                                 verbose=False):
        """Obtain data for either on source, off source, or injections.

        This uses the gwpy `TimeSeriesDict.get` method

        Parameters
        ----------
        event_time : (`float)
            trigger time of event to be processing

        block_time : (`int`)
            length of data to be processed

        channel_names (`list`) :
            required data channels.

        sample_frequency (`int`):
            sample rate of the data desired

        verbose : `bool`, optional
            print verbose output about NDS progress.

        Returns:

            `TimeSeriesDict` :
        """
        #----- Start and stop time for this event.
        start_time = event_time - block_time / 2
        stop_time = event_time + block_time / 2

        # Retrieve data and then resample and set epoch
        data = {}
        for det in channel_names:
            data[det] = TimeSeries.read(file_name,
                                        path='/noise/aLIGOZeroDetHighPower')

        data = XTimeSeries(data)

        for (idet, iseries) in data.items():
            # set epoch of timeseries to the event_time
            iseries.t0 = start_time

        # set one of the detectors to be the reference detecotr
        # for any future coherent combinations

        return data
Exemplo n.º 31
0
def get_spectrogram(start, end, axis='X', seis='EXV', **kwargs):
    ''' Get Spectrogram    

    Parameters
    ----------
    start : `int`
        start GPS time.
    end : `int`
       end GPS time.

    Returns
    -------
    specgram : `gwpy.spectrogram.Spectrogram`
        spectrogram.
    '''
    nproc = kwargs.pop('nproc', 3)
    bandpass = kwargs.pop('bandpass', None)
    fftlen = kwargs.pop('fftlen', 2**8)
    diff = kwargs.pop('diff', False)
    fs = kwargs.pop('fs', 256)
    fname_hdf5 = fname_specgram(start, end, prefix=seis, axis=axis)

    # Load specgram from hdf5 file
    if os.path.exists(fname_hdf5):
        specgram = Spectrogram.read(fname_hdf5, format='hdf5')
        return specgram

    # If no file, make specgram from timeseries data
    try:
        chname = get_seis_chname(start, end, axis=axis, seis=seis)[0]
        fnamelist = existedfilelist(start, end)
        data = TimeSeries.read(fnamelist, chname, nproc=nproc)
        data = data.resample(fs)
        data = data.crop(start, end)
    except:
        log.debug(traceback.format_exc())
        raise ValueError('!!! {0} {1}'.format(start, end))

    # calculate specgram
    specgram = data.spectrogram2(fftlength=fftlen,
                                 overlap=fftlen / 2,
                                 nproc=nproc)
    try:
        fname_dir = '/'.join(fname_hdf5.split('/')[:4])
        if not os.path.exists(fname_dir):
            os.makedirs(fname_dir)
        specgram.write(fname_hdf5, format='hdf5', overwrite=True)
        log.debug('Make {0}'.format(fname_hdf5))
    except:
        log.debug(traceback.format_exc())
        raise ValueError('!!!')
    return specgram
Exemplo n.º 32
0
def _read_data(channel, st, et, frames=False):
    """
    get data, either from frames or from nds2
    """

    ifo = channel.split(':')[0]
    if frames:
        # read from frames
        connection = datafind.GWDataFindHTTPConnection()
	print ifo[0]
	if channel.split(':')[1] == 'GDS-CALIB_STRAIN':
	    cache = connection.find_frame_urls(ifo[0],ifo+'_HOFT_C00', st, et, urltype='file')
	else:
	    cache = connection.find_frame_urls(ifo[0], ifo + '_C', st, et,urltype='file')
        try:
            data = TimeSeries.read(cache, channel, st, et)
        except IndexError:
            cache = connection.find_frame_urls(ifo[0], ifo+'_R', st, et, urltype='file')
            data = TimeSeries.read(cache, channel, st, et)
    else:
        data = TimeSeries.fetch(channel, st, et)

    return data
Exemplo n.º 33
0
def get_primary_ts(channel, start, end, filepath=None,
                   frametype=None, cache=None, nproc=1):
    """Retrieve primary channel timeseries
    by either reading a .gwf file or querying
    """
    if filepath is not None:
        LOGGER.info('Reading primary channel file')
        return TimeSeries.read(filepath, channel=channel, start=start, end=end,
                               format='gwf', nproc=nproc)
    else:
        LOGGER.info('Querying primary channel')
        return get_data(channel, start, end,
                        verbose='Reading primary:'.rjust(30),
                        frametype=frametype, source=cache, nproc=nproc)
Exemplo n.º 34
0
def get_lsd(cache,
            channel,
            GPSstart,
            duration,
            stride,
            overlap,
            pltflg=0,
            filename='',
            yunit=''):
    '''
    This function calculates linear spectral density.
    If pltflg==1, make plot.
    Return: lsd, freq, mu(meanvalue)
    '''
    data = TimeSeries.read(cache,
                           channel,
                           GPSstart,
                           GPSstart + duration,
                           format='lalframe')
    fs = 1. / data.dt.value  # sampling frequency
    mu = np.mean(data.value)  # mean value of data
    # Define parameters for FFT
    st = stride  # FFT stride in seconds
    ov = st * overlap  # overlap in seconds
    nfft = int(st * fs)
    freq, t, Sxx = signal.spectrogram(data.value - mu,
                                      window=signal.hann(nfft),
                                      nperseg=nfft,
                                      fs=fs,
                                      noverlap=int(ov * fs))
    lspg = np.sqrt(Sxx)
    lsd = np.mean(lspg, axis=1)
    if pltflg == 1:
        plt.figure()
        plt.loglog(freq, lsd)
        plt.grid(True)
        plt.title(channel)
        plt.xlim(freq[1:].min(), freq[1:].max())
        plt.xlabel('Frequency [Hz]')
        plt.ylabel('LSD [' + yunit + '/rtHz]')
        plt.tight_layout()
        fname = filename
        plt.savefig(fname)
        plt.close()
    return lsd, freq, mu
Exemplo n.º 35
0
def get_spectrogram(cache,
                    channel,
                    GPSstart,
                    duration,
                    stride,
                    overlap,
                    pltflg=0,
                    filename='',
                    rtnflg=0):
    '''
    This function calculated linear spectrogram.
    If pltflg==1, makes plot.
    If rtnflg==1, returns lspg, time, freq, mu
    '''
    data = TimeSeries.read(cache,
                           channel,
                           GPSstart,
                           GPSstart + duration,
                           format='lalframe')
    fs = 1. / data.dt.value  # sampling frequency
    mu = np.mean(data.value)  # mean value of data
    # Define parameters for FFT
    st = stride  # FFT stride in seconds
    ov = st * overlap  # overlap in seconds
    nfft = int(st * fs)
    freq, t, Sxx = signal.spectrogram(data.value - mu,
                                      window=signal.hann(nfft),
                                      nperseg=nfft,
                                      fs=fs,
                                      noverlap=int(ov * fs))
    lspg = np.sqrt(Sxx)
    if pltflg == 1:
        T, FREQ = np.meshgrid(t, freq)
        fig2, ax = plt.subplots()
        pc = ax.pcolor(T, FREQ, 10 * np.log10(lspg), cmap='rainbow')
        cb = fig2.colorbar(pc)
        fname = filename
        plt.sacefig(fname)
        plt.close()
    if rtnflg == 1:
        return lspg, t, freq, mu
Exemplo n.º 36
0
def hoge(gps):
    if np.isnan(gps):
        return None
    from gwpy.time import tconvert
    #print tconvert(gps)
    from gwpy.timeseries import TimeSeries
    from glue.lal import Cache
    channel = 'K1:PEM-EXV_SEIS_Z_SENSINF_OUT_DQ'
    start = gps #- 5*60
    end = gps + 30*60
    gwf_cache = 'full_Sep01-Nov01.cache'
    with open(gwf_cache, 'r') as fobj:
        cache = Cache.fromfile(fobj)
    #print cache
    data = TimeSeries.read(cache,channel,start=start,end=end,verbose=True,nproc=2,pad=np.nan,format='gwf.lalframe')    
    plot = data.plot(
        title = 'hoge'
        #ylabel='Strain amplitude',
    )
    plot.savefig('{0}.png'.format(gps))
    plot.close()
Exemplo n.º 37
0
parser.add_option("--condor-cluster", type="float",help="Cluster ID")
parser.add_option("--proc", type="float", help="Job ID")
(opts, args) = parser.parse_args()

starttime = opts.starttime
endtime   = opts.endtime
h0_max    = opts.h0
wm = 'all/fg500'

#-------- Importing, filtering and timeshifting data ----------#
durationH    = endtime - starttime
oldstarttime = starttime
oldendtime   = endtime
Xspacing     = 2.44140625E-4
pathtoinput  = "/home/spxha/"
strainH      = TimeSeries.read(pathtoinput+'S6framesH1.lcf',channel='H1:LDAS-STRAIN', start=starttime, end=endtime)
strainL      = TimeSeries.read(pathtoinput+'S6framesL1.lcf',channel='L1:LDAS-STRAIN', start=starttime, end=endtime)
num_points   = int(durationH/Xspacing)
h0_min       = 0.0000001
h0_vals_num  = 30
#----------------------------
# Applying a bandpass filter
#----------------------------
coefsL  = get_filter_coefs('L1')
coefsH  = get_filter_coefs('H1')
strainL = filter_data(strainL,coefsL)
strainH = filter_data(strainH,coefsH)
timeH   = np.arange(starttime, endtime, Xspacing)
timeL   = timeH
detMap  = {'H1': lal.LALDetectorIndexLHODIFF, 'L1':
lal.LALDetectorIndexLLODIFF}
Exemplo n.º 38
0
def validity_test(starttime, endtime, h0_max=0.001):
	#------- Packages ---------#
	print 'Importing packages'
	import numpy as np
	import astropy, gwpy, h5py, lal, sys, os
	from astropy.coordinates import get_sun
	import astropy.time as Time
	from scipy.signal import butter, filtfilt
	from gwpy.timeseries import TimeSeries
	from antres import antenna_response as ant_res
	from scipy.misc import logsumexp
	from notchfilt import get_filter_coefs, filter_data
	from optparse import OptionParser

	#-------- Importing, filtering and timeshifting data ----------#
	print 'Reading data'
	durationH = endtime - starttime 	# same for durationL, but not used anyway
	oldstarttime = starttime  			# for use later
	oldendtime = endtime 				# for use later
	Xspacing = 2.44140625E-4
	gpsTime = np.linspace(starttime,endtime,int(1/Xspacing))
	pathtoinput = "/home/spxha/"
	strainH = TimeSeries.read(pathtoinput+'S6framesH1.lcf',channel='H1:LDAS-STRAIN', start=starttime, end=endtime)
	strainL = TimeSeries.read(pathtoinput+'S6framesL1.lcf',channel='L1:LDAS-STRAIN', start=starttime, end=endtime)
	num_points = int(durationH/Xspacing)
	h0_min=0.00005
	h0_vals_num=30

	# Adding in a fake signal
	# frequency in the middle of the range we're interested in 125 Hz
	omega = 250.0*np.pi
	amplitude = 0.5e-24 # (hopefully) middle of h0 values
	# the signal to introduce is of the form amplitude*np.sin(omega*t)
	# first get timeL and timeH in synch with the sun's position

	print 'Getting the detectors in sync'
	timeH = np.arange(starttime, endtime, Xspacing)
	timeL = timeH
	detMap = {'H1': lal.LALDetectorIndexLHODIFF, 'L1':
	lal.LALDetectorIndexLLODIFF}
	detH1 = lal.CachedDetectors[detMap['H1']]
	detL1 = lal.CachedDetectors[detMap['L1']]
	tgps = lal.LIGOTimeGPS(starttime, 0)

	#---------- Get right ascension and declination of source in radians ----------#
	numseg30 = int((endtime-starttime)/30.)
	seg30 = starttime + 30*np.linspace(1,numseg30,numseg30) # 30 second update rate
	tdelay = [[0] for _ in range(numseg30)]
	tdelay   = [[0] for _ in range(numseg30)]
	for i in range(numseg30-1):
			for j in range(int(len(timeL)*Xspacing)):
					if ((timeL[j/Xspacing]>seg30[i])&(timeL[j/Xspacing]<seg30[i+1])):
							coordstime=seg30[i]
							coords = get_sun(Time.Time(coordstime,format='gps'))
							tdelay[i] = lal.ArrivalTimeDiff(detH1.location, detL1.location, coords.ra.hour*np.pi/12, coords.dec.hour*np.pi/12, tgps)
	
	coordstime = seg30[-1]
	coords     = get_sun(Time.Time(coordstime,format='gps'))
	tdelay[-1] = lal.ArrivalTimeDiff(detH1.location, detL1.location, coords.ra.hour*np.pi/12, coords.dec.hour*np.pi/12, tgps)
	tdelay     = np.array(tdelay)
	tdelay     = np.repeat(tdelay,int(30/Xspacing))

	# make sure tdelay and timeL are of same length in case integer-ing caused slight inconsistency.
	b = np.ones(len(timeL)-len(tdelay))*tdelay[-1]
	tdelay = np.append(tdelay,b)
	timeL = timeL - tdelay
	

	#------------ Down-sampling ------------#
	print 'Down-sampling'
	Xspacing = Xspacing*8
	num_points = int(durationH/Xspacing)
	newtimeL, newtimeH, newstrainH, newstrainL, newtdelay = [[0 for _ in range(num_points)] for _ in range(5)]
	for i in range(num_points):
		j = 8*i + 4
		newstrainH[i] = np.mean(strainH[j-4:j+4])
		newstrainL[i] = np.mean(strainL[j-4:j+4])
		newtimeH[i]   = timeH[j]
		newtimeL[i]   = timeL[j]
		newtdelay[i]  = tdelay[j]

	newstrainH = newstrainH[76800:len(newstrainH)]
	newstrainL = newstrainL[76800:len(newstrainL)]
	newtimeH   = newtimeH[76800:len(newtimeH)]
	newtimeL   = newtimeL[76800:len(newtimeL)]
	newtdelay = newtdelay[76800:len(newtdelay)]
	starttime  = starttime + 150
	durationH  = int(newtimeH[-1]) - int(newtimeH[0])
	num_points = int(durationH/Xspacing)
	del timeL, timeH, tdelay, strainH, strainL
	#----------- add in fake signal ------------#
	newtimeH = newtimeH[0:num_points]
	newtimeL = newtimeL[0:num_points]
	newtdelay = newtdelay[0:num_points]
	print 'Insert fake signal'
        numseg = int((durationH)/600)
        segs = np.linspace(0,numseg,numseg+1)*600
        segs = segs + newtimeL[0]
	psi1=np.pi/4
        ra,dec,fp,fc = [[0 for _ in range(numseg+1)] for _ in range(4)]
        for i in range(numseg+1):
                coordstime = segs[i]
                coords = get_sun(Time.Time(coordstime,format='gps'))
                ra[i] = coords.ra.hour*np.pi/12
                dec[i] = coords.dec.hour*np.pi/12
	FcX0, FpX0, FcY0, FpY0 = [[0 for _ in range(num_points)] for _ in range(4)]
	for i in range(num_points):
		FpX0[i], FcX0[i] = ant_res(gpsTime[int(i*Xspacing/600.)], ra[int(i*Xspacing/600.)], dec[int(i*Xspacing/600.)], 0, 'H1')
		FpY0[i], FcY0[i] = ant_res(gpsTime[int(i*Xspacing/600.)], ra[int(i*Xspacing/600.)], dec[int(i*Xspacing/600.)], 0, 'L1')
	cos2pi1 = np.cos(2*psi1)
	sin2pi1 = np.sin(2*psi1)
	fakeH, fakeL, FpX, FcX, FpY, FcY = [[0 for _ in range(num_points)] for _ in range(6)]
	for i in range(num_points):
		FpX[i] = FpX0[i]*cos2pi1 + FcX0[i]*sin2pi1
		FcX[i] = FcX0[i]*cos2pi1 - FpX0[i]*sin2pi1
		FpY[i] = FpY0[i]*cos2pi1 + FcY0[i]*sin2pi1
		FcY[i] = FcY0[i]*cos2pi1 - FpY0[i]*sin2pi1
	for i in range(len(newtimeH)):
		fakeH[i] = amplitude*(FpX[i]*np.sin(omega*newtimeH[i]) + FcX[i]*np.cos(omega*newtimeH[i]))
		fakeL[i] = amplitude*(FpY[i]*np.sin(omega*newtimeL[i]) + FcY[i]*np.cos(omega*newtimeL[i]))
	tdelayidx  = [0 for _ in range(len(newtdelay))]
	for i in range(len(newtdelay)):
		tdelayidx[i] = int(newtdelay[i]*Xspacing)
	idxmax= np.max(tdelayidx)
	print len(newtdelay), len(newstrainL)
	newstrainL0,newstrainH0 = [[0 for _ in range(len(newtdelay)-idxmax)] for _ in range(2)]
	for i in range(idxmax,len(newtdelay)):
		newstrainL0[i-idxmax]=newstrainL[i-tdelayidx[i]]
	newstrainH0=newstrainH[0:len(newstrainL0)]

	newstrainH0 = newstrainH0 + fakeH
	newstrainL0 = newstrainL0 + fakeL
	del newstrainL, newstrainH, fakeL, fakeH
	# H1 and L1 are now in sync and filtered between 100 and 150 Hz.
	del FcX,FcX0,FpX,FpX0,FcY,FcY0,FpY,FpY0
	#----- Applying a bandpass filter -----#
	print 'Filtering data'
	coefsL = get_filter_coefs('L1')
	coefsH = get_filter_coefs('H1')
	newstrainL0 = np.array(newstrainL0)
	newstrainH0 = np.array(newstrainH0)
	newstrainL = filter_data(newstrainL0,coefsL)
	newstrainH = filter_data(newstrainH0,coefsH)
	del coefsL, coefsH
	############################################################
	#------------ Finding probability distribution ------------#
	#   ------------ Defining some stuff for p -------------   #
	print 'Finding likelihood Part 1/2'
	psi_array = np.linspace(0,np.pi,10)
	dpsi = psi_array[1]-psi_array[0]
	sigmaA = 10.0
	h0min = h0_min*np.std(newstrainH)
	h0max = h0_max*np.std(newstrainH)
	h0_array = np.linspace(h0min,h0max,h0_vals_num)
	invSigma0 = np.array([[(1./sigmaA**2), 0.], [0., (1./sigmaA**2)]])
	detSigma0 = sigmaA**4
	dX = newstrainH
	dY = newstrainL
	del newstrainH, newstrainL, newstrainH0, newstrainL0, newtimeL, newtimeH
	FcX0, FpX0, FcY0, FpY0 = [[0 for _ in range(num_points)] for _ in range(4)]
	for i in range(num_points):
		FpX0[i], FcX0[i] = ant_res(gpsTime[int(i*Xspacing/600.)], ra[int(i*Xspacing/600.)], dec[int(i*Xspacing/600.)], 0, 'H1')
		FpY0[i], FcY0[i] = ant_res(gpsTime[int(i*Xspacing/600.)], ra[int(i*Xspacing/600.)], dec[int(i*Xspacing/600.)], 0, 'L1')
	p = [0  for _ in range(len(h0_array))]
	ppsi = [0 for _ in range(len(psi_array))]
	logdpsi_2 = np.log(0.5*dpsi)

	cos2pi, sin2pi = [[0 for _ in range(len(psi_array))] for _ in range(2)]
	FpX, FcX, FpY, FcY = [[[0 for _ in range(num_points)] for _ in range(len(psi_array))] for _ in range(4)]
	for k in range(len(psi_array)):
		cos2pi[k] = np.cos(2*psi_array[k])
		sin2pi[k] = np.sin(2*psi_array[k])
		for i in range(num_points):
			FpX[k][i] = FpX0[i]*cos2pi[k] + FcX0[i]*sin2pi[k]
			FcX[k][i] = FcX0[i]*cos2pi[k] - FpX0[i]*sin2pi[k]
			FpY[k][i] = FpY0[i]*cos2pi[k] + FcY0[i]*sin2pi[k]
			FcY[k][i] = FcY0[i]*cos2pi[k] - FpY0[i]*sin2pi[k]
	print 'Finding likelihoot Part 2/2. This will take a while... '
	for i in range(num_points):
		d = np.array([dX[i], dY[i]])
		d.shape = (2,1)
		if (i + int(60/Xspacing)<num_points):
			int1 = i + int(60/Xspacing)
		else:
			int1 = i
		if (i - int(60/Xspacing)>0):
			int0 = i - int(60/Xspacing)
		else:
			int0 = 0
		sigmaX = np.std(dX[int0:int1])
		sigmaY = np.std(dY[int0:int1])
		C = np.array([[sigmaX**2, 0.], [0., sigmaY**2]])
		invC = np.array([[(1./sigmaX**2), 0.], [0., (1/sigmaY**2)]])
		detC = sigmaX**2 * sigmaY**2
		for j in range(len(h0_array)):
			for k in range(len(psi_array)):
				M = h0_array[j]*np.array([[FpX[k][i], FpY[k][i]], [FcX[k][i], FcY[k][i]]])
				M = np.array([[M[0][0][0],M[0][1][0]],[M[1][0][0], M[1][1][0]]])
				invSigma = np.dot(M.T, np.dot(invC, M)) + invSigma0
				Sigma = np.linalg.inv(invSigma)
				detSigma = np.linalg.det(Sigma)
				chi = np.dot(Sigma, np.dot(M.T, np.dot(invC, d)))
				ppsi[k]    = 0.5*np.log(detSigma) - 0.5*np.log(16.*np.pi**4*detSigma0*detC) -  0.5*(np.vdot(d.T, np.dot(invC, d)) + np.vdot(chi.T, np.dot(invSigma, chi)))
			p[j] += logdpsi_2 + logsumexp([logsumexp(ppsi[:-1]), logsumexp(ppsi[1:])])


	# Write into a file.
	wm = 'all/siginj'
	if os.path.exists(wm)==False:
		os.mkdir(wm)
	else:
		pass
	np.savetxt(wm+'/p'+str(oldstarttime)+'.txt',p)
	np.savetxt(wm+'/h0'+str(oldstarttime)+'.txt',h0_array)
Exemplo n.º 39
0
    def getTimeSeries(self, arg_list):
        """Verify and interpret arguments to get all
        TimeSeries objects defined"""

        # retrieve channel data from NDS as a TimeSeries
        for chans in arg_list.chan:
            for chan in chans:
                if chan not in self.chan_list:
                    self.chan_list.append(chan)

        if len(self.chan_list) < self.min_timeseries:
            raise ArgumentError('A minimum of %d channels must be ' +
                                'specified for this product' %
                                self.min_timeseries)

        if len(arg_list.start) > 0:
            for start_arg in arg_list.start:
                if type(start_arg) is list:
                    for starts in start_arg:
                        if isinstance(starts, basestring):
                            starti = int(starts)

                        elif starts is list:
                            for start_str in starts:
                                starti = int(start_str)
                        # ignore duplicates (to make it easy for ldvw)
                        if starti not in self.start_list:
                            self.start_list.append(starti)
                else:
                    self.start_list.append(int(start_arg))
        else:
            raise ArgumentError('No start times specified')

        # Verify the number of datasets specified is valid for this plot
        self.n_datasets = len(self.chan_list) * len(self.start_list)
        if self.n_datasets < self.get_min_datasets():
            raise ArgumentError('%d datasets are required for this ' +
                                'plot but only %d are supplied' %
                                (self.get_min_datasets(), self.n_datasets))

        if self.n_datasets > self.get_max_datasets():
            raise ArgumentError('A maximum of %d datasets allowed for ' +
                                'this plot but %d specified' %
                                (self.get_max_datasets(), self.n_datasets))

        if arg_list.duration:
            self.dur = int(arg_list.duration)
        else:
            self.dur = 10

        verb = self.verbose > 1

        # determine how we're supposed get our data
        source = 'NDS2'
        frame_cache = False

        if arg_list.framecache:
            source = 'frames'
            frame_cache = arg_list.framecache

        # set up filter parameters for all channels
        highpass = 0
        if arg_list.highpass:
            highpass = float(arg_list.highpass)
            self.filter += "highpass(%.1f) " % highpass

        # Get the data from NDS or Frames
        # time_groups is a list of timeseries index grouped by
        # start time for coherence like plots
        self.time_groups = []
        for start in self.start_list:
            time_group = []
            for chan in self.chan_list:
                if verb:
                    print 'Fetching %s %d, %d using %s' % \
                          (chan, start, self.dur, source)
                if frame_cache:
                    data = TimeSeries.read(frame_cache, chan, start=start,
                                           end=start+self.dur)
                else:
                    data = TimeSeries.fetch(chan, start, start+self.dur,
                                            verbose=verb)

                if highpass > 0:
                    data = data.highpass(highpass)

                self.timeseries.append(data)
                time_group.append(len(self.timeseries)-1)
            self.time_groups.append(time_group)

        # report what we have if they asked for it
        self.log(3, ('Channels: %s' % self.chan_list))
        self.log(3, ('Start times: %s, duration' % self.start_list, self.dur))
        self.log(3, ('Number of time series: %d' % len(self.timeseries)))

        if len(self.timeseries) != self.n_datasets:
            self.log(0, ('%d datasets requested but only %d transfered' %
                         (self.n_datasets, len(self.timeseries))))
            if len(self.timeseries) > self.get_min_datasets():
                self.log(0, 'Proceeding with the data that was transferred.')
            else:
                self.log(0, 'Not enough data for requested plot.')
                from sys import exit
                exit(2)
        return
Exemplo n.º 40
0
Arquivo: inject.py Projeto: gwpy/gwpy
__author__ = "Alex Urban <*****@*****.**>"
__currentmodule__ = 'gwpy.timeseries'

# First, we prepare one second of Gaussian noise:

from numpy import random
from gwpy.timeseries import TimeSeries
noise = TimeSeries(random.normal(scale=.1, size=16384), sample_rate=16384)

# Then we can download a simulation of the GW150914 signal from GWOSC:

from astropy.utils.data import get_readable_fileobj
url = ("https://www.gw-openscience.org/s/events/GW150914/P150914/"
       "fig2-unfiltered-waveform-H.txt")
with get_readable_fileobj(url) as f:
    signal = TimeSeries.read(f, format='txt')
signal.t0 = .5  # make sure this intersects with noise time samples

# Note, since this simulation cuts off before a certain time, it is
# important to taper its ends to zero to avoid ringing artifacts.
# We can accomplish this using the
# :meth:`~gwpy.timeseries.TimeSeries.taper` method.

signal = signal.taper()

# Since the time samples overlap, we can inject this into our noise data
# using :meth:`~gwpy.types.series.Series.inject`:

data = noise.inject(signal)

# Finally, we can visualize the full process in the time domain:
Exemplo n.º 41
0
ifo = str(sys.argv[5])
frames = ifo + '1_M'
channel_list = str(sys.argv[3])
out_file = str(sys.argv[4])

fP = open(out_file,'w')

chan_list = []
chan_read = open(channel_list)
for line in chan_read.readlines():
    chan_list.append(line)



connection = datafind.GWDataFindHTTPConnection()
cache = connection.find_frame_urls(ifo, frames, start_gps, end_gps, urltype='file')

for chan in chan_list:

    chan1 = chan[:-1]
    data1=TimeSeries.read(cache, chan1, start=start_gps, end=end_gps)

    if any(diff(data1.value)>0):
        print >> fP, chan1


fP.close()



def generate_triggers(channel,gps_start,gps_end,ifo,frames,outdir,segments,padding):
    # generate frame cache and connection
    pad_gps_end = gps_end - padding
    print "Processing segment: " + str(gps_start) + " - " + str(pad_gps_end)
    connection = datafind.GWDataFindHTTPConnection()
    cache = connection.find_frame_urls(ifo, frames, int(gps_start), int(gps_end), urltype='file')
    data=TimeSeries.read(cache, channel, start=int(gps_start), end=int(pad_gps_end))

    time_vec=data.times.value


    '''

    We are interested in times when the channels switch from a normal state to an overflowing
    state or vice versa. We're not checking the first and last data point of each set because it's not 
    possible to tell whether or not a channel has just started overflowing at our first data
    point or if it had been overflowing beforehand. 

    This big loop will test every data point (that isn't an endpoint) and record it in the
    trigger vector  if it's an overflow transition.

    '''

#    trig_segs = seg.segmentlist()
#
#    for j in np.arange(np.size(data,0)):
#        if (0 < j < (np.size(data,0) - 1)):
#            if cumu_seg_test(data[j-1],data[j],data[j+1]):
#                trig_segs |= seg.segmentlist([seg.segment(time_vec[j+1],time_vec[j+1]+1)])
#
#    trig_segs = trig_segs.coalesce()

    trigger_vec = []
    for j in np.arange(np.size(data,0)):
        if (0 < j < (np.size(data,0) - 1)):
            if cumu_seg_test(data[j-1],data[j],data[j+1]):
                trigger_vec.append(time_vec[j+1])


    if (np.size(trigger_vec) == 0):
        print "No triggers found for " + str(channel)
        return
    else:
        print "Found triggers for " + str(channel)
        
    # map triggers into float type and then convert them all into LIGOTimeGPS notation
    trig_times = map(LIGOTimeGPS,map(float,trigger_vec))

    # create mocked up frequency and SNR vectors to fill in XML tables
    freqs = np.empty(np.size(trigger_vec))
    freqs.fill(100)
    snrs = np.empty(np.size(trigger_vec))
    snrs.fill(10)


    sngl_burst_table_up = lsctables.New(lsctables.SnglBurstTable, ["peak_time", "peak_time_ns","peak_frequency","snr"])

    for t,f,s in zip(trig_times, freqs, snrs):
        row = sngl_burst_table_up.RowType()
        row.set_peak(t)
        row.peak_frequency = f
        row.snr = s
        sngl_burst_table_up.append(row)
        

    xmldoc_up = ligolw.Document()
    xmldoc_up.appendChild(ligolw.LIGO_LW())
    xmldoc_up.childNodes[0].appendChild(sngl_burst_table_up)

    directory_up = (outdir + '/' + channel[:2] + "/" + 
    channel[3:] + "/" + str(gps_start)[:5] + "/")

    if not os.path.exists(directory_up):
        os.makedirs(directory_up)
        
    utils.write_filename(xmldoc_up, directory_up + channel[:2] + "-" + channel[3:6] +
    "_" + channel[7:] + "_ADC-" + str(gps_start) + "-" + str(gps_end - gps_start) + 
    ".xml.gz", gz=True)
Exemplo n.º 43
0
 def setUp(self):
     self.ts = TimeSeries.read(TEST_HDF_FILE, 'H1:LDAS-STRAIN')
     self.asd = self.ts.asd(1)
     self.mmm = [self.asd, self.asd*0.9, self.asd*1.1]
Exemplo n.º 44
0
    def getTimeSeries(self, arg_list):
        """Verify and interpret arguments to get all
        TimeSeries objects defined"""

        # retrieve channel data from NDS as a TimeSeries
        for chans in arg_list.chan:
            for chan in chans:
                if chan not in self.chan_list:
                    self.chan_list.append(chan)

        if len(self.chan_list) < self.min_timeseries:
            raise ArgumentError("A minimum of %d channels must be specified for this product" % self.min_timeseries)

        if len(arg_list.start) > 0:
            self.start_list = list(set(map(int, arg_list.start)))
        else:
            raise ArgumentError("No start times specified")

        # Verify the number of datasets specified is valid for this plot
        self.n_datasets = len(self.chan_list) * len(self.start_list)
        if self.n_datasets < self.get_min_datasets():
            raise ArgumentError(
                "%d datasets are required for this plot but only %d are "
                "supplied" % (self.get_min_datasets(), self.n_datasets)
            )

        if self.n_datasets > self.get_max_datasets():
            raise ArgumentError(
                "A maximum of %d datasets allowed for this plot but %d "
                "specified" % (self.get_max_datasets(), self.n_datasets)
            )

        if arg_list.duration:
            self.dur = int(arg_list.duration)
        else:
            self.dur = 10

        verb = self.verbose > 1

        # determine how we're supposed get our data
        source = "NDS2"
        frame_cache = False

        if arg_list.framecache:
            source = "frames"
            frame_cache = arg_list.framecache

        # set up filter parameters for all channels
        highpass = 0
        if arg_list.highpass:
            highpass = float(arg_list.highpass)
            self.filter += "highpass(%.1f) " % highpass

        # Get the data from NDS or Frames
        # time_groups is a list of timeseries index grouped by
        # start time for coherence like plots
        self.time_groups = []
        for start in self.start_list:
            time_group = []
            for chan in self.chan_list:
                if verb:
                    print "Fetching %s %d, %d using %s" % (chan, start, self.dur, source)
                if frame_cache:
                    data = TimeSeries.read(frame_cache, chan, start=start, end=start + self.dur)
                else:
                    data = TimeSeries.fetch(chan, start, start + self.dur, verbose=verb)

                if highpass > 0:
                    data = data.highpass(highpass)

                self.timeseries.append(data)
                time_group.append(len(self.timeseries) - 1)
            self.time_groups.append(time_group)

        # report what we have if they asked for it
        self.log(3, ("Channels: %s" % self.chan_list))
        self.log(3, ("Start times: %s, duration" % self.start_list, self.dur))
        self.log(3, ("Number of time series: %d" % len(self.timeseries)))

        if len(self.timeseries) != self.n_datasets:
            self.log(0, ("%d datasets requested but only %d transfered" % (self.n_datasets, len(self.timeseries))))
            if len(self.timeseries) > self.get_min_datasets():
                self.log(0, "Proceeding with the data that was transferred.")
            else:
                self.log(0, "Not enough data for requested plot.")
                sys.exit(2)
        return
a, b = polyfit(data[:, 0], data[:, 1], 1)
print "%.1f %u" % (a, b)

# Slide through the data fitting PSL to IMC for data around each sample
coeffs = []
for idx in xrange(maxidx):
    idx1 = max(0, idx - width)
    idx2 = min(idx + width, maxidx)
    coeffs.append(polyfit(data[idx1:idx2, 0], data[idx1:idx2, 1], 1,
                          w=weights[idx1 - idx + width:idx2 - idx + width]))
coeffs = array(coeffs)
times = arange(len(coeffs)) + 0.5
connection = datafind.GWDataFindHTTPConnection()
cache = connection.find_frame_urls(
    ifo[0], '%s_R' % ifo, st, et, urltype='file')

imc = TimeSeries.read(cache, "%s:IMC-F_OUT_DQ" % ifo, st, et)
imc = imc[::16384 / 256]
print imc
samp_times = arange(len(imc)) / 256.

coeffs0 = interp(samp_times, times, coeffs[:, 0])
coeffs1 = interp(samp_times, times, coeffs[:, 1]) - 7.6e7

vco_interp = coeffs0 * imc.data + coeffs1

chan = "%s:IMC-VCO_PREDICTION" % (ifo,)
vco_data = TimeSeries(vco_interp, epoch=st, sample_rate=imc.sample_rate.value,
                      name=chan, channel=chan)
vco_data.write("%s-vcoprediction-%u-%u.hdf" % (ifo, st, dur), format='hdf')
Exemplo n.º 46
0
 def setUp(self):
     # read data
     self.data = TimeSeries.read(self.framefile, 'L1:LDAS-STRAIN')
     # calculate PSD
     self.psd = self.data.psd(0.4, 0.2, window=('kaiser', 24))
Exemplo n.º 47
0
# plotting all the data
from gwpy.timeseries import TimeSeries
import matplotlib.pyplot as plt
import numpy as np
import gwpy
import glue


data = np.loadtxt('/home/spxha/solarGW/intersect6.txt',dtype='f8')
TimesStart = data[:,0]
TimesEnd = data[:,1]
Xspacing = 2.44140625E-4
for i in range(len(TimesStart)):
	print i	
	duration = TimesEnd[i]-TimesStart[i]
	timeseriesi = np.linspace(TimesStart[i], TimesEnd[i], int(duration/Xspacing))
	straini = TimeSeries.read('/home/spxha/S6framesH1.lcf',channel='H1:PSL-ODC_CHANNEL_OUT_DQ',start=TimesStart[i], end=TimesEnd[i], format='lcf')

	if i==0:
		strain = straini
		timeseries = timeseriesi
	else:
		strain = np.append(strain, straini)
		timeseries = np.append(timeseries, timeseriesi)
	del duration, timeseriesi, straini

plt.figure()
plt.plot(timeseries,strain)
plt.savefig('/home/spxha/Timeseries.png')
    vco_data = TimeSeries(vco_interp, epoch=st,
                          sample_rate=imc.sample_rate.value,
                          name=chan, channel=chan)
    vco_data.write("%s-vcoprediction-%u-%u.hdf" % (ifo, st, dur), format='hdf')

if __name__ == '__main__':
    ifo = sys.argv[1]
    st = int(sys.argv[2])
    dur = int(sys.argv[3])
    et = st + dur

    connection = datafind.GWDataFindHTTPConnection()
    cache = connection.find_frame_urls(
        ifo[0], '%s_R' % ifo, st, et, urltype='file')

    print "Get IMC"
    imc = TimeSeries.read(cache, chan2_pat % ifo, st, et)
    print "Get psl"
    pslvco = TimeSeries.read(cache, chan1_pat % ifo, st, et + 1)
    print "Downsample psl"
    pslvco = pslvco[16 + 8::16]
    print "Downsample imc"
    imc_srate = int(imc.sample_rate.value)
    imc = imc[imc_srate / 2::imc_srate]

    print "Saving"
    data = numpy.array((imc.data, pslvco.data)).T
    fname = "%s-imc-vco-%u-%u.npy" % (ifo, st, dur)
    numpy.save("%s-imc-vco-%u-%u.npy" % (ifo, st, dur), data)
    dump_calibrated_data(fname)
Exemplo n.º 49
0
 def setUp(self):
     self.ts = TimeSeries.read(TEST_HDF_FILE, 'H1:LDAS-STRAIN')
     self.sg = self.ts.spectrogram2(0.5, 0.49)
     self.mmm = [self.ts, self.ts * 0.9, self.ts*1.1]
Exemplo n.º 50
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:
        # read all time-series data
        try:
            group = h5file['timeseries']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            ts = TimeSeries.read(dataset, format='hdf')
            if (re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name) and
                    ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() == mode.SUMMARY_MODE_DAY:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # read all state-vector data
        try:
            group = h5file['statevector']
        except KeyError:
            group = dict()
        for dataset in group.itervalues():
            sv = StateVector.read(dataset, format='hdf')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # read all spectrogram data
        try:
            group = h5file['spectrogram']
        except KeyError:
            group = dict()
        for key, dataset in group.iteritems():
            key = key.rsplit(',', 1)[0]
            spec = Spectrogram.read(dataset, format='hdf')
            spec.channel = get_channel(spec.channel)
            add_spectrogram(spec, key=key)

        try:
            group = h5file['segments']
        except KeyError:
            group = dict()
        for name, dataset in group.iteritems():
            dqflag = DataQualityFlag.read(dataset, format='hdf')
            globalv.SEGMENTS += {name: dqflag}
Exemplo n.º 51
0
 def setUp(self):
     self.ts = TimeSeries.read(TEST_HDF_FILE, 'H1:LDAS-STRAIN')
# for now let's just assume you're not looking at a lock
# longer than a day...
for i in range(int(86400 / t)):
    new_times = []
    new_freqs = []
    for time, snr, freq in zip(times, snrs, freqs):
        if time > st and time < st + t:
            new_times.append(time)
            new_freqs.append(freq)
    if len(new_times) == 0:
        st += t
        continue
    connection = datafind.GWDataFindHTTPConnection()
    cache = connection.find_frame_urls(
        'L', 'L1_C', st, st + t, urltype='file')
    data = TimeSeries.read(cache, channel, st, st + t)
    data2 = detrend(data)
    data = TimeSeries(
        data2, dx=data.dx, sample_rate=data.sample_rate, x0=data.x0)
    specgram = data.spectrogram2(fftlength=.1, overlap=0.1 * 0.9)
    specgram = specgram.ratio('median')
    plot = specgram.plot(vmin=1, vmax=10, norm='log')
    plot.add_colorbar(label='amplitude relative to median')
    ax = plot.gca()
    ax.set_ylim(40, 7e4)
    ax.set_yscale('log')
    # ax.scatter(new_times,new_freqs,'x',color='r')
    for time, freq in zip(new_times, new_freqs):
        ax.scatter(time, freq, marker='x', color='r', s=160)
    ax.set_title('expect %d triggers' % (len(new_times)))
    plotfile = '%s/%s/%s/%d/%d' % (dir2, extra_dir,
	if (x == y != z) and (z != 0):
		return True
	else:
		return False

# check if an overflow channel is cumulative by crosschecking ndcuid_list and model_list
# model list lines are recorded as <model_name.mdl> <cumulative status> <ndcuid=num>
def checkCumulative(chan_name,model_list,ndcuid_list):
	ID = 'ndcuid=' + str(chan_name)[str(chan_name).find('-')+1:str(chan_name).find('_')]
	if (model_list[ndcuid_list.index(ID)][1] == 'cumulative'):
		return True
	else:
		return False
		

data=TimeSeries.read(cache, channel, start=gps_start, end=gps_end)

time_vec = linspace(gps_start,gps_end,(gps_end - gps_start)*16,endpoint=False)


'''

We are interested in times when the channels switch from a normal state to an overflowing
state or vice versa. We're not checking the first and last data point of each set because it's not 
possible to tell whether or not a channel has just started overflowing at our first data
point or if it had been overflowing beforehand. 

This big loop will test every data point (that isn't an endpoint) and record it in the
trigger vector  if it's an overflow transition.

'''
Exemplo n.º 54
0
def read_data_archive(sourcefile):
    """Read archived data from an HDF5 archive source

    This method reads all found data into the data containers defined by
    the `gwsumm.globalv` module, then returns nothing.

    Parameters
    ----------
    sourcefile : `str`
        path to source HDF5 file
    """
    from h5py import File

    with File(sourcefile, 'r') as h5file:

        # -- channels ---------------------------

        try:
            ctable = Table.read(h5file['channels'])
        except KeyError:  # no channels table written
            pass
        else:
            for row in ctable:
                chan = get_channel(row['name'])
                for p in ctable.colnames[1:]:
                    if row[p]:
                        setattr(chan, p, row[p])

        # -- timeseries -------------------------

        for dataset in h5file.get('timeseries', {}).values():
            ts = TimeSeries.read(dataset, format='hdf5')
            if (re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name) and
                    ts.sample_rate.value == 1.0):
                ts.channel.type = 's-trend'
            elif re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name):
                ts.channel.type = 'm-trend'
            ts.channel = get_channel(ts.channel)
            try:
                add_timeseries(ts, key=ts.channel.ndsname)
            except ValueError:
                if mode.get_mode() != mode.Mode.day:
                    raise
                warnings.warn('Caught ValueError in combining daily archives')
                # get end time
                globalv.DATA[ts.channel.ndsname].pop(-1)
                t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
                add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)

        # -- statevector -- ---------------------

        for dataset in h5file.get('statevector', {}).values():
            sv = StateVector.read(dataset, format='hdf5')
            sv.channel = get_channel(sv.channel)
            add_timeseries(sv, key=sv.channel.ndsname)

        # -- spectrogram ------------------------

        for tag, add_ in zip(
                ['spectrogram', 'coherence-components'],
                [add_spectrogram, add_coherence_component_spectrogram]):
            for key, dataset in h5file.get(tag, {}).items():
                key = key.rsplit(',', 1)[0]
                spec = Spectrogram.read(dataset, format='hdf5')
                spec.channel = get_channel(spec.channel)
                add_(spec, key=key)

        # -- segments ---------------------------

        for name, dataset in h5file.get('segments', {}).items():
            dqflag = DataQualityFlag.read(h5file, path=dataset.name,
                                          format='hdf5')
            globalv.SEGMENTS += {name: dqflag}

        # -- triggers ---------------------------

        for dataset in h5file.get('triggers', {}).values():
            load_table(dataset)