def _read_data(channel, st, et): """ get data, either from frames or from nds2 """ ifo = channel.split(':')[0][0] if channel.split(':')[1] == 'GDS-CALIB_STRAIN': print ifo + '1_HOFT_C00' data = TimeSeries.find(channel, st, et, frametype=ifo + '1_HOFT_C00') else: data = TimeSeries.find(channel, st, et, frametype=ifo + '1_R') return data
def gen_real_noise(duration, sampling_frequency, det, ref_geocent_time, psd_files=[], real_noise_seg=[None, None]): """ pull real noise samples """ # compute the number of time domain samples Nt = int(sampling_frequency * duration) # Get ifos bilby variable ifos = bilby.gw.detector.InterferometerList(det) start_open_seg, end_open_seg = real_noise_seg # 1 sec noise segments for ifo_idx, ifo in enumerate(ifos): # iterate over interferometers time_series = TimeSeries.find( '%s:GDS-CALIB_STRAIN' % det[ifo_idx], start_open_seg, end_open_seg) # pull timeseries data using gwpy ifo.set_strain_data_from_gwpy_timeseries( time_series=time_series) # input new ts into bilby ifo noise_sample = ifos[ 0].strain_data.frequency_domain_strain # get frequency domain strain noise_sample /= ifos[ 0].amplitude_spectral_density_array # assume default psd from bilby noise_sample = np.sqrt(2.0 * Nt) * np.fft.irfft( noise_sample) # convert frequency to time domain return noise_sample
def load_data_from_gwpy(gpsstart, gpsend, ifo, channel, frame, fs=4096): try: data = TimeSeries.find(channel, gpsstart, gpsend, frametype=frame, allow_tape=False) value = data.value srate = data.sample_rate.value epoch = data.epoch.value #duration = data.duration.value ret = gwStrain(value, epoch, ifo, srate, info=f'{ifo}_strain') if srate != fs: return ret.resample(fs) return ret except: return CEV.PROCESS_FAIL
def aux_feat_get(params): chan, ifo = params[0], params[1] print chan full_data = [] position_chunks = [] velocity_chunks = [] try: data = TimeSeries.find(chan, t1, t2, ifo + '_R', verbose=True) tmp_pos = fir.osem_position(data, new_sample_rate=16) tmp_vel = fir.osem_velocity(data, new_sample_rate=16) position_chunks.append(tmp_pos.value) velocity_chunks.append(tmp_vel.value) full_data.append(concatenate(position_chunks)) full_data.append(concatenate(velocity_chunks)) except RuntimeError as err: return err full_data = array(full_data) return (chan, full_data)
def coherence(cls, channel1, channel2, st, et, overlap=None, pad=False, stride=1, resamplerate1=None, resamplerate2=None): """ Class methond that calculates coherence between two channels and return a coherence segment. Parameters ---------- channel1 : `str` Name of first channel channel2 : `str` Name of second channel st : `int` Start time et : `int` End time stride : `int`, optional, default=1 second Length of ffts overlap : `int`, optional, default=0.5 seconds Amount of overlap between ffts pad : `bool` Determines whether or not to zeropad the data when taking ffts Returns ------- segment : :class:`PEMCoherenceSegment` Coherence segment for these two channels """ # read in data if isinstance(channel1, Channel): data1 = TimeSeries.find(channel1.name, st, et, frametype=channel1.frametype) else: data1 = TimeSeries.get(channel1, st, et) if resamplerate1 is not None and resamplerate1 < data1.sample_rate.value: data1 = data1.resample(resamplerate1) if isinstance(channel2, Channel): data2 = TimeSeries.find(channel2.name, st, et, frametype=channel2.frametype) else: data2 = TimeSeries.get(channel2, st, et) if resamplerate2 is not None and resamplerate2 < data2.sample_rate.value: data2 = data2.resample(resamplerate2) # get fft spectrograms fftgram1 = cf.fftgram(data1, stride, overlap=overlap, pad=pad) fftgram2 = cf.fftgram(data2, stride, overlap=overlap, pad=pad) # cut things down if frequency arrays are too long # TODO: eventually port this over to `gwpy.spectrogram.Spectrogram.crop()` # in some way using the `gwpy.detector.Channel.frequency_range()` specified # but want to keep backwards compatability in case strings are supplied maxlen = min(fftgram1.shape[1], fftgram2.shape[1]) # take csd csd12 = cf.csdgram(fftgram1[:, :maxlen], fftgram2[:, :maxlen], stride, overlap=overlap, pad=pad) # get number of segments analyzed N = fftgram1.shape[0] # take mean of csd csd12 = np.mean(csd12, 0) # take mean of fftgrams, take abs to get psds psd1 = np.mean(np.abs(fftgram1)**2, 0) psd2 = np.mean(np.abs(fftgram2)**2, 0) # return the segment return PEMCoherenceSegment(channel1, channel2, csd12, psd1, psd2, N, st, et)
segs=get_science_segments(ifo,st,et) for seg in segs: print seg.start, seg.end full_data=[] darm_data=[] for chan in chan_lst: print 'Getting data for', chan position_chunks=[] velocity_chunks=[] q_chunks=[] for t1,t2 in chunk_segments(segs,chunk,pad): print 'Getting chunk', t1, t2 data=TimeSeries.find(chan,t1,t2,ifo+'_R',nproc=6,verbose=True) tmp_pos=fir.osem_position(data,pad,new_sample_rate=1) tmp_vel=fir.osem_velocity(data,pad,new_sample_rate=1) position_chunks.append(tmp_pos.value) velocity_chunks.append(tmp_vel.value) #Calculate q_transform chunks tmp_q=fir.q_scan(data,pad,new_sample_rate=1) q_tstep=round(tmp_q.shape[0]/float(len(data))) * data.sample_rate.value for q_v in range(0+(pad*1000),len(tmp_q)-(pad*1000),1000): if q_v == 0+(pad*1000): small_chunksq = tmp_q[q_v,:] small_chunksq = np.asarray(small_chunksq) else: small_chunksq = np.vstack((small_chunksq,tmp_q[q_v,:])) q_chunks.append(small_chunksq)