def BLWNB(f,df,dt,fs): #BLWNB - Generate a random signal of given duration with constant power #in a given band (and zero power out of band). # x = blwnb(f,df,dt,fs) # # f Scalar. Minimum signal frequency [Hz]. # df Scalar. Full signal bandwidth [Hz]. # dt Scalar. Signal duration [s]. # fs Scalar. Signal sample rate [Hz]. # Power is restricted to the band (f,f+df). # Note that fs must be greater than 2*(f+df). # original version: L. S. Finn, 2004.08.03 # $Id: BLWNB.m 4992 2015-07-25 18:59:12Z [email protected] $ #% ---- Check that fs > 2*(f+df), otherwise sampling rate is not high enough to # cover requested frequency range of the signal. if (fs <= abs(2*(f+df))): raise ValueError('Sampling rate fs is too small, fs = '+str(fs)+' must be greater than 2*(f+df) = '+str(np.abs(2*(f+df)))) if f < 0 or df <= 0 or fs <= 0 or dt <= 0 : raise ValueError('All arguments must be greater than zero') #% ---- Generate white noise with duration dt at sample rate df. This will be #% white over the band [-df/2,df/2]. nSamp = ceil(dt*df) x_old = TimeSeries(np.random.randn(nSamp),sample_rate=1/dt) #% ---- Resample to desired sample rate fs. x=x_old.resample(fs/df) #frac = Fraction(Decimal(fs/df)) #p, q = frac.numerator , frac.denominator #% ---- Note that the rat() function returns p,q values that give the desired #% ratio to a default accuracy of 1e-6. This is a big enough error that #% x may be a few samples too short or too long. If too long, then truncate #% to duration dt. If too short, zero-pad to duration dt. #print((np.zeros(nSamp-len(x)).shape)) nSamp = round(dt*fs) if len(x) > nSamp: x = x[0:nSamp] elif len(x) < nSamp: x=np.hstack((np.array(x),np.zeros(nSamp-len(x)))) #% ---- Heterodyne up by f+df/2 (moves zero frequency to center of desired band). fup = f+df/2. x = x*np.exp(-2*np.pi*1j*fup/fs*np.arange(1,len(x)+1)) #% ---- Take real part and adjust amplitude. x = np.array(np.real(x)/np.sqrt(2)) #% ---- Done. return(x)
def prep_ccsn(h, sim_times, Tc, fw): """Function to prepare a single polarization of a simulated ccsn waveform - resample, high pass filter and window """ dt = sim_times[1] - sim_times[0] h = TimeSeries(h, t0=sim_times[0], dt=dt) h = h.resample(rate=fw, ftype='iir', n=20) # downsample to working frequency fw h = h.highpass(frequency=11, filtfilt=True) # filter out frequencies below 20Hz inj_window = scisig.tukey(M=len(h), alpha=0.08, sym=True) h = h * inj_window h = h.pad(int((fw * Tc - len(h)) / 2)) return h
def compute(self, raw: TimeSeries) -> TimeSeriesDict: debug = f'compute_blrms ({self.channel}) : ' # resample to specified frequency. raw = raw.resample(**better_aa_opts(raw, self.fs)) # compute spectogram. Set up kwargs for creation of output TimeSeries. F, T, Sh = spectrogram(raw.value, nperseg=self.fs * self.tn, noverlap=int(self.fs * self.to), fs=self.fs) ts_kwargs = dict(t0=raw.t0, dt=T[1] - T[0], unit=raw.unit) logger.debug(debug + 'Computed scipy.spectrogram.') # identify lines by comparing the PSD to a calculated background. Sh_m = median(Sh, axis=1) # get median PSD for each time. Nf = int(self.df / F[1]) # convert the median window in number of bins. Sb_m = zeros_like(Sh_m) # start with empty background vector. # compute a windowed median of the PSD. for i, f in enumerate(F): # select the bins in the current window. idx = arange(max(i - Nf, 0), min(i + Nf, F.shape[0])) # compute estimate of background (without lines) by computing the median in the current window. Sb_m[i] = median(Sh_m[idx]) else: logger.debug(debug + 'Estimated PSD of background.') # find all lines, i.e. all bins where the PSD is larger than thr times the background. line_idx = where(logical_and(F > 10, Sh_m / Sb_m > self.thr))[0] logger.debug(debug + 'Located the line frequencies.') # Compute BLRMS for all bands out = TimeSeriesDict() for band_start, band_stop in self.bands: channel_prefix = f'{self.channel}_BLRMS_{band_start}_{band_stop}' # select frequency bins. idx = arange(int(band_start / F[1]), int(band_stop / F[1])) # full BLRMS, using all bins. out[channel_prefix] = TimeSeries(sum(Sh[idx, :], axis=0), **ts_kwargs) # remove the index of all lines. idx = setdiff1d(idx, line_idx) # compute BLRMS excluding lines. out[f'{channel_prefix}_nolines'] = TimeSeries( sum(Sh[idx, :], axis=0), **ts_kwargs) # Time-domain median smoothing and glitch removal for prefix in channel_prefix, f'{channel_prefix}_nolines': blrms = out[prefix].value # convert the time-domain median window size from seconds to BLRMS samples NT = int(self.dt / T[1]) # empty vectors for running median and running 'median-stdev' blrms_m = zeros_like(blrms) blrms_rms = zeros_like(blrms) # loop over all time samples for i, x in enumerate(blrms): # select samples in current window idx = arange(max(i - NT, 0), min(i + NT, T.shape[0])) # median of the BLRMS in the current window blrms_m[i] = median(blrms[idx]) # median-based equivalent of the variance blrms_rms[i] = median((blrms[idx] - blrms_m[i])**2) # identify all glitch times as samples when the BLRMS deviated from median more than 3 times the median-stdev glitch_idx = where((blrms - blrms_m) > 3 * sqrt(blrms_rms))[0] # remove the glitchy times blrms_noglitch = blrms.copy() # first set glitchy times to NaN blrms_noglitch[glitch_idx] = NaN idx = isnan( blrms_noglitch) # then find the samples that are glitchy # linear interpolation using values around glitches blrms_noglitch[idx] = interp(T[idx], T[~idx], blrms[~idx]) # save results to dictionary out[f'{prefix}_smooth'] = TimeSeries(blrms_m, **ts_kwargs) out[f'{prefix}_noglitch'] = TimeSeries(blrms_noglitch, **ts_kwargs) # F_lines = F[line_idx] # lines = {'F_lines': F_lines, 'F': F, 'Smedian': Sh_m, 'Sbg': Sb_m, 'line_idx': line_idx} # fix channel names. for i in out: out[i].name = i return out
def load_inject_condition_ccsn(t_i, t_f, t_inj, ra, dec, ccsn_paper, ccsn_file, D_kpc=10, local=False, Tc=16, To=2, fw=2048, window='tukey', detector='H', qtrans=False, qsplit=False, dT=2.0, save=False, data_path=None): """Fucntion to load a chunk, inject a waveform and condition, created to enable parallelizing. """ if local: files = get_files(detector) try: data = TimeSeries.read(files, start=t_i, end=t_f, format='hdf5.losc') # load data locally except: return else: # load data from losc try: data = TimeSeries.fetch_open_data(detector + '1', *(t_i, t_f), sample_rate=fw, verbose=False, cache=True) except: return if np.isnan(data.value).any(): return det_obj = Detector(detector + '1') delay = det_obj.time_delay_from_detector(Detector('H1'), ra, dec, t_inj) t_inj += delay fp, fc = det_obj.antenna_pattern(ra, dec, 0, t_inj) wfs_path = Path(git_path + '/shared/ccsn_wfs/' + ccsn_paper) sim_data = [i.strip().split() for i in open(join(wfs_path, ccsn_file)).readlines()] if ccsn_paper == 'radice': line_s = 1 else: line_s = 0 D = D_kpc * 3.086e+21 # cm sim_times = np.asarray([float(dat[0]) for dat in sim_data[line_s:]]) hp = np.asarray([float(dat[1]) for dat in sim_data[line_s:]]) / D if ccsn_paper == 'abdikamalov': hc = np.zeros(hp.shape) else: hc = np.asarray([float(dat[2]) for dat in sim_data[line_s:]]) / D dt = sim_times[1] - sim_times[0] h = fp * hp + fc * hc h = TimeSeries(h, t0=sim_times[0], dt=dt) h = h.resample(rate=fw, ftype = 'iir', n=20) # downsample to working frequency fw h = h.highpass(frequency=11, filtfilt=True) # filter out frequencies below 20Hz inj_window = scisig.tukey(M=len(h), alpha=0.08, sym=True) h = h * inj_window h = h.pad(int((fw * Tc - len(h)) / 2)) wf_times = data.times.value shift = int((t_inj - (wf_times[0] + Tc/2)) * fw) h = np.roll(h.value, shift) h = TimeSeries(h, t0=wf_times[0], dt=data.dt) try: h = h.taper() except: pass injected_data = data.inject(h) cond_data = condition_data(injected_data, To, fw, window, qtrans, qsplit, dT) x = [] times = [] for dat in cond_data: x.append(dat.values) times.append(dat.t0) x = np.asarray(x) times = np.asarray(times) idx = find_closest_index(t_inj, times) x = x[idx] times = times[idx] return x, times
# Strain # ----------- if False: chname = 'CALC_STRAIN' segments = findfiles(start, end, chname, prefix='/Users/miyo/Dropbox/KagraData/gif') source = [path for files in segments for path in files] strain = TimeSeries.read(source=source, name=chname, format='gif', pad=numpy.nan, nproc=1) strain = TimeSeries(strain.value, times=strain.times.value, name=chname) strain = strain.resample(8.0) #strain.write('Dec10_3hours_strain.gwf',format='gwf.lalframe') # ----------- # Pressure # ----------- if True: chname = 'X500_BARO' segments = findfiles(start, end, chname, prefix='/Users/miyo/Dropbox/KagraData/gif') source = [path for files in segments for path in files] x500_baro = TimeSeries.read(source=source, name=chname, format='gif',