def getMergerStrain(self): ''' Uses the pycbc library to load in the strain data for the purposes of using the matched filtering methode. It dosent go through the gwpy library that would normally use for the fft and spectrogram stuff ''' merger = Merger(self.event_id) return merger.strain(self.detector_id)
def setUp(self, *args): self.context = _context self.scheme = _scheme self.tolerance = 1e-6 xr = numpy.random.uniform(low=-1, high=1.0, size=2**20) xi = numpy.random.uniform(low=-1, high=1.0, size=2**20) self.x = Array(xr + xi * 1.0j, dtype=complex64) self.z = zeros(2**20, dtype=float32) for i in range(0, 4): trusted_accum(self.z, self.x) m = Merger("GW170814") ifos = ['H1', 'L1', 'V1'] data = {} psd = {} for ifo in ifos: # Read in and condition the data and measure PSD ts = m.strain(ifo).highpass_fir(15, 512) data[ifo] = resample_to_delta_t(ts, 1.0 / 2048).crop(2, 2) p = data[ifo].psd(2) p = interpolate(p, data[ifo].delta_f) p = inverse_spectrum_truncation(p, int(2 * data[ifo].sample_rate), low_frequency_cutoff=15.0) psd[ifo] = p hp, _ = get_fd_waveform(approximant="IMRPhenomD", mass1=31.36, mass2=31.36, f_lower=20.0, delta_f=data[ifo].delta_f) hp.resize(len(psd[ifo])) # For each ifo use this template to calculate the SNR time series snr = {} snr_unnorm = {} norm = {} corr = {} for ifo in ifos: snr_unnorm[ifo], corr[ifo], norm[ifo] = \ matched_filter_core(hp, data[ifo], psd=psd[ifo], low_frequency_cutoff=20) snr[ifo] = snr_unnorm[ifo] * norm[ifo] self.snr = snr self.snr_unnorm = snr_unnorm self.norm = norm self.corr = corr self.hp = hp self.data = data self.psd = psd self.ifos = ifos
def setUp(self,*args): self.context = _context self.scheme = _scheme self.tolerance = 1e-6 xr = numpy.random.uniform(low=-1, high=1.0, size=2**20) xi = numpy.random.uniform(low=-1, high=1.0, size=2**20) self.x = Array(xr + xi * 1.0j, dtype=complex64) self.z = zeros(2**20, dtype=float32) for i in range(0, 4): trusted_accum(self.z, self.x) m = Merger("GW170814") ifos = ['H1', 'L1', 'V1'] data = {} psd = {} for ifo in ifos: # Read in and condition the data and measure PSD ts = m.strain(ifo).highpass_fir(15, 512) data[ifo] = resample_to_delta_t(ts, 1.0/2048).crop(2, 2) p = data[ifo].psd(2) p = interpolate(p, data[ifo].delta_f) p = inverse_spectrum_truncation(p, 2 * data[ifo].sample_rate, low_frequency_cutoff=15.0) psd[ifo] = p hp, _ = get_fd_waveform(approximant="IMRPhenomD", mass1=31.36, mass2=31.36, f_lower=20.0, delta_f=data[ifo].delta_f) hp.resize(len(psd[ifo])) # For each ifo use this template to calculate the SNR time series snr = {} snr_unnorm = {} norm = {} corr = {} for ifo in ifos: snr_unnorm[ifo], corr[ifo], norm[ifo] = \ matched_filter_core(hp, data[ifo], psd=psd[ifo], low_frequency_cutoff=20) snr[ifo] = snr_unnorm[ifo] * norm[ifo] self.snr = snr self.snr_unnorm = snr_unnorm self.norm = norm self.corr = corr self.hp = hp self.data = data self.psd = psd self.ifos = ifos
def setUp(self): ###### Get data for references analysis of 170817 m = Merger("GW170817") ifos = ['H1', 'V1', 'L1'] self.psds = {} self.data = {} for ifo in ifos: print("Processing {} data".format(ifo)) # Download the gravitational wave data for GW170817 url = "https://dcc.ligo.org/public/0146/P1700349/001/" url += "{}-{}1_LOSC_CLN_4_V1-1187007040-2048.gwf" fname = download_file(url.format(ifo[0], ifo[0]), cache=True) ts = read_frame(fname, "{}:LOSC-STRAIN".format(ifo), start_time=int(m.time - 260), end_time=int(m.time + 40)) ts = highpass(ts, 15.0) ts = resample_to_delta_t(ts, 1.0 / 2048) ts = ts.time_slice(m.time - 112, m.time + 16) self.data[ifo] = ts.to_frequencyseries() psd = interpolate(ts.psd(4), ts.delta_f) psd = inverse_spectrum_truncation(psd, int(4 * psd.sample_rate), trunc_method='hann', low_frequency_cutoff=20.0) self.psds[ifo] = psd self.static = { 'mass1': 1.3757, 'mass2': 1.3757, 'f_lower': 20.0, 'approximant': "TaylorF2", 'polarization': 0, 'ra': 3.44615914, 'dec': -0.40808407, 'tc': 1187008882.42840, } self.variable = ( 'distance', 'inclination', ) self.flow = {'H1': 25, 'L1': 25, 'V1': 25} inclination_prior = SinAngle(inclination=None) distance_prior = Uniform(distance=(10, 100)) tc_prior = Uniform(tc=(m.time - 0.1, m.time + 0.1)) self.prior = JointDistribution(self.variable, inclination_prior, distance_prior) ###### Expected answers # Answer taken from marginalized gaussian model self.q1 = {'distance': 42.0, 'inclination': 2.5} self.a1 = 541.8235746138382 # answer taken from brute marginize pol + phase self.a2 = 542.581 self.pol_samples = 200
def summary( m: Merger, parameters: typing.List[MergerParameters] = DEFAULT_SUMMARY_PARAMETERS ) -> str: """A summary string of a Merger object Args: m: Merger, the merger to be summarized Returns: str, the summary string of the merger Sample: Merger[GW150914](Mass1=35.6, Mass2=30.6, FinalSpin=0.69) """ return 'Merger[{name}]({parameters})'.format( name=name(m), parameters=', '.join('{}={}'.format(p.name, m.median1d(p)) for p in parameters))
def generate_psd( data_dir: Union[str, os.PathLike], static_args_ini: str, gps_time: int = GPS_TIME, psd_window: int = 1024, ifos: Union[str, List[str]] = ['H1', 'L1'], out_dir: Optional[str] = None, verbose: bool = False, validate: bool = False, ): """Generates Power Spectral Densities (PSDs) using a welch estimate. Future work: To do: check PSD generation for V1 To do: Enable multiple PSDs for the same ifo. """ # load static argument file _, static_args = read_ini_config(static_args_ini) # specify output directory data_dir = Path(data_dir) out_dir = Path(out_dir) if out_dir is not None else data_dir assert not out_dir.is_file( ), f"{out_dir} is a file. It should either not exist or be a directory." out_dir.mkdir(parents=True, exist_ok=True) if verbose: print( f"[{datetime.now().strftime('%H:%M:%S')}] Saving {ifos} PSDs to {out_dir}/" ) # retrieve strain data from valid windows from .hdf files timeline = NoiseTimeline(data_dir, ifos) strains = timeline.get_strains(gps_time, psd_window) psds = {} for ifo in strains: psds[ifo] = pycbc.psd.estimate.welch( strains[ifo], avg_method='median', seg_len=static_args['td_length'], seg_stride=static_args['td_length'], window=get_tukey_window( static_args['waveform_length'], static_args['target_sampling_rate'], )) out_file = out_dir / f'{ifo}_PSD.npy' psds[ifo].save(out_file) if verbose: print( f"[{datetime.now().strftime('%H:%M:%S')}] Saved {ifo} PSD to {str(out_file)}." ) if validate: validate_psds(strains, psds, out_dir=out_dir / 'figures', gps_time=Merger('GW150914').time)
from pycbc.filter import highpass_fir, lowpass_fir from pycbc.psd import welch, interpolate from pycbc.catalog import Merger import pylab for ifo in ['H1', 'L1']: # Read data and remove low frequency content h1 = Merger("GW150914").strain(ifo) h1 = highpass_fir(h1, 15, 8) # Calculate the noise spectrum psd = interpolate(welch(h1), 1.0 / h1.duration) # whiten white_strain = (h1.to_frequencyseries() / psd ** 0.5).to_timeseries() # remove some of the high and low smooth = highpass_fir(white_strain, 35, 8) smooth = lowpass_fir(white_strain, 300, 8) # time shift and flip L1 if ifo == 'L1': smooth *= -1 smooth.roll(int(.007 / smooth.delta_t)) pylab.plot(smooth.sample_times, smooth, label=ifo) pylab.legend() pylab.xlim(1126259462.21, 1126259462.45) pylab.ylim(-150, 150) pylab.ylabel('Smoothed-Whitened Strain')
def mathched_filtering(self,m1,m2,f_highPass = 15,\ fft_crop = 2,\ psd_interval = 4,\ genWave_f_lowerbound = 20,\ snrCrop = 4): #done to avoid loading the data every time when used in a loop if self.mergerStrain == None: #this methode takes in a duration instead of a time interval #This automatically pulls strain data centered around the #gps time stamp instead of you specifing it yourself. self.mergerStrain = self.getMergerStrain() merger = Merger(self.event_id) ''' There is an issue for how the strain data is read using this methode when being used with the filter.highpass methode Need to find a conversion so that a custome time interval can be used when selecting a data set ''' #changing from the class wide strain array to a local one at the same #time of performing the highpass filtering. strain = filter.highpass(self.mergerStrain, f_highPass) strain = filter.resample_to_delta_t(strain, 1.0 / 2048) #removing discontinuities errors that form at the end due to resampling conditioned = strain.crop(fft_crop, fft_crop) #crops off the first #and last two seconds #generating the psd, thats used in the matched filtering methode #the psd is used to weight "the frequency components of the #potential signal and data by the noise amplitude" psd = conditioned.psd(psd_interval) #this matches the psd to our conditioned strain data psd = pycbc.psd.interpolate(psd, conditioned.delta_f) #this generated a 1/psd that is used to further filter the data psd = pycbc.psd.inverse_spectrum_truncation(psd,\ psd_interval*conditioned.sample_rate,\ low_frequency_cutoff=f_highPass) #Generating matched filtering waveform hp, hc = get_td_waveform(approximant="SEOBNRv4_opt", mass1=m1, mass2=m2, delta_t=conditioned.delta_t, f_lower=genWave_f_lowerbound) #Resizing the matched filtering wave form to the size of the our data hp.resize(len(conditioned)) #shifting the moldeled wave form to the aproximant location of the #merger event template = hp.cyclic_time_shift(hp.start_time) #generating the signal to noise ratio data set snr = filter.matched_filter(template, conditioned, psd=psd, low_frequency_cutoff=genWave_f_lowerbound) #cropping out the problamatic data points. There are discontinuitie #errors at the ends of the interval snr = snr.crop(snrCrop + psd_interval, snrCrop) snrPeakIndex = abs(snr).numpy().argmax() snrPeak = abs(snr)[snrPeakIndex] snrPeakTime = snr.sample_times[snrPeakIndex] # # Shift the template to the peak time # dt = snrPeakTime - conditioned.start_time # aligned = template.cyclic_time_shift(dt) # # # scale the template so that it would have SNR 1 in this data # aligned /= sigma(aligned, psd=psd, low_frequency_cutoff=20.0) # # # Scale the template amplitude and phase to the peak value # aligned = (aligned.to_frequencyseries() * snrPeak).to_timeseries() # aligned.start_time = conditioned.start_time # We do it this way so that we can whiten both the template and the data white_data = (conditioned.to_frequencyseries() / psd**0.5).to_timeseries() white_data = white_data.highpass_fir(f_highPass * 2, 512).lowpass_fir(230, 512) # Select the time around the merger white_data = white_data.time_slice(merger.time - .1, merger.time + .1) outputFormater = namedtuple('signal_to_noise_ratio_data',\ ['snr','snrPeakIndex','snrPeak','snrPeakTime','white_data']) #returning the signal to noise ratio return outputFormater(snr, snrPeakIndex, snrPeak, snrPeakTime, white_data)
def tensorboard_writer( queue: mp.Queue, log_dir: str, parameters: List[str], labels: List[str], static_args_ini: str, basis_dir: str, num_basis: int, val_coefficients: Optional[torch.Tensor] = None, val_gts: Optional[torch.Tensor] = None, figure_titles: Optional[List[str]] = None, ): # suppress luminosity distance debug messages logger = logging.getLogger('bilby') logger.propagate = False logger.setLevel(logging.WARNING) if log_dir is None: tb = SummaryWriter() else: tb = SummaryWriter(log_dir) _, static_args = read_ini_config(static_args_ini) ifos = ('H1', 'L1') interferometers = { 'H1': 'Hanford', 'L1': 'Livingston', 'V1': 'Virgo', 'K1': 'KAGRA' } basis_dir = Path(basis_dir) basis = SVDBasis(basis_dir, static_args_ini, ifos, file=None, preload=False) basis.load(time_translations=False, verbose=False) basis.truncate(num_basis) val_coefficients = val_coefficients.numpy() for j in range(val_coefficients.shape[0]): fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(16, 4)) for i, ifo in enumerate(ifos): Vh = basis.Vh[0] if basis.Vh.shape[0] == 1 else basis.Vh[i] reconstruction = val_coefficients[j, i] @ Vh reconstruction = FrequencySeries(reconstruction, delta_f=static_args['delta_f']) strain = reconstruction.to_timeseries( delta_t=static_args['delta_t']) ax.plot(strain.sample_times, strain, label=interferometers[ifo], alpha=0.6) ax.set_title(f'Reconstructed {figure_titles[j]} Strain') ax.set_xlabel('Time (s)') ax.set_ylabel('Strain') # units? ax.set_xlim((static_args['seconds_before_event'] - 1, static_args['seconds_before_event'] + 1)) ax.legend(loc='upper left') ax.grid('both') # ax.axvline(static_args['seconds_before_event'], color='r', linestyle='--') # merger time marker # ax.set_xticks([static_args['seconds_before_event']], minor=True) # add low frequency cutoff to ticks # ax.set_xticklabels(['$t_{c}$'], minor=True, color='r') tb.add_figure(f'reconstructions/{figure_titles[j]}', fig) del reconstruction del val_coefficients del basis # bilby setup - specify the output directory and the name of the bilby run result = bilby.result.read_in_result(outdir='bilby_runs/GW150914', label='GW150914') bilby_parameters = [ 'mass_1', 'mass_2', 'phase', 'geocent_time', 'luminosity_distance', 'a_1', 'a_2', 'tilt_1', 'tilt_2', 'phi_12', 'phi_jl', 'theta_jn', 'psi', 'ra', 'dec' ] bilby_samples = result.posterior[bilby_parameters].values # # Shift the time of coalescence by the trigger time bilby_samples[:, 3] = bilby_samples[:, 3] - Merger('GW150914').time bilby_df = pd.DataFrame(bilby_samples.astype(np.float32), columns=bilby_parameters) bilby_df = bilby_df.rename(columns={ 'luminosity_distance': 'distance', 'geocent_time': 'time' }) bilby_df = bilby_df.loc[:, parameters] domain = [ [10, 80], # mass 1 [10, 80], # mass 2 [0, 2 * np.pi], # phase [0, 1], # a_1 [0, 1], # a 2 [0, np.pi], # tilt 1 [0, np.pi], # tilt 2 [0, 2 * np.pi], # phi_12 [0, 2 * np.pi], # phi_jl [0, np.pi], # theta_jn [0, np.pi], # psi [0, 2 * np.pi], # ra [-np.pi / 2, np.pi / 2], # dec # [0.005,0.055], # tc [100, 800], # distance ] cosmoprior = bilby.gw.prior.UniformSourceFrame( name='luminosity_distance', minimum=1e2, maximum=1e3, ) while True: try: epoch, scalars, samples = queue.get() if samples is not None: # requires (batch, samples, parameters) assert len( samples.shape ) == 3, "samples must be of shape (batch, samples, parameters)" if figure_titles is not None: # to do - better handling of passing figure info through queue assert samples.shape[0] == len(figure_titles), ( "sample.shape[0] and figure_titles must have matching lengths" ) else: figure_titles = [''] * samples.shape[0] for key, value in scalars.items(): tb.add_scalar(key, value, epoch) if samples is not None: assert isinstance(samples, torch.Tensor) for i in range(samples.shape[0]): fig = plt.figure(figsize=(20, 21)) if i == 0: # GW150914 ONLY - hardcoded to first position samples_df = pd.DataFrame(samples[i].numpy(), columns=parameters) weights = cosmoprior.prob(samples_df['distance']) weights = weights / np.mean(weights) corner.corner( bilby_df, fig=fig, labels=labels, levels=[0.5, 0.9], quantiles=[0.25, 0.75], color='tab:orange', scale_hist=True, plot_datapoints=False, ) corner.corner( samples_df, fig=fig, levels=[0.5, 0.9], quantiles=[0.25, 0.75], color='tab:blue', scale_hist=True, plot_datapoints=False, show_titles=True, weights=weights * len(bilby_samples) / len(samples_df), range=domain, ) fig.legend( handles=[ mpatches.Patch(color='tab:blue', label='Neural Spline Flow'), mpatches.Patch(color='tab:orange', label='Bilby (dynesty)') ], loc='upper right', fontsize=16, ) else: samples_df = pd.DataFrame(samples[i].numpy(), columns=parameters) weights = cosmoprior.prob(samples_df['distance']) weights = weights / np.mean(weights) corner.corner( samples_df, fig=fig, labels=labels, levels=[0.5, 0.9], quantiles=[0.25, 0.75], color='tab:blue', truth_color='tab:orange', scale_hist=True, plot_datapoints=False, show_titles=True, truths=val_gts[i].numpy() if val_gts is not None else None, weights=weights * len(bilby_samples) / len(samples_df), range=domain, ) fig.legend( handles=[ mpatches.Patch(color='tab:blue', label='Neural Spline Flow'), mpatches.Patch(color='tab:orange', label='Ground Truth') ], loc='upper right', fontsize=16, ) fig.suptitle(f'{figure_titles[i]} Parameter Estimation', fontsize=18) # fig.savefig(f'gwpe/figures/{figure_titles[i]}.png') tb.add_figure(f'posteriors/{figure_titles[i]}', fig, epoch) tb.flush() except Exception as e: # warning: assertions may not trigger exception to exit process traceback.print_exc() os.kill(os.getpid(), signal.SIGSTOP) # to do: check kill command
from pycbc.frame import read_frame from pycbc.filter import highpass_fir, lowpass_fir from pycbc.waveform import get_fd_waveform from pycbc.psd import welch, interpolate from pycbc.catalog import Merger import pylab for ifo in ['H1', 'L1']: # Read data and remove low frequency content h1 = Merger("GW150914").strain(ifo) h1 = highpass_fir(h1, 15, 8) # Calculate the noise spectrum psd = interpolate(welch(h1), 1.0 / h1.duration) # whiten white_strain = (h1.to_frequencyseries() / psd ** 0.5).to_timeseries() # remove some of the high and low smooth = highpass_fir(white_strain, 35, 8) smooth = lowpass_fir(white_strain, 300, 8) # time shift and flip L1 if ifo == 'L1': smooth *= -1 smooth.roll(int(.007 / smooth.delta_t)) pylab.plot(smooth.sample_times, smooth, label=ifo) pylab.legend() pylab.xlim(1126259462.21, 1126259462.45)
def generate_gw150914_context( n: int, noise_dir: Union[str, os.PathLike], psd_dir: Union[str, os.PathLike], basis_dir: Union[str, os.PathLike], static_args_ini: str, ifos: List[str] = ['H1', 'L1'], verbose: bool = False, ): """Function loads GW150914 segment from O1 dataset, applies signal processing steps such as whitening and low-pass filtering, then projects to reduced basis coefficients.""" _, static_args = read_ini_config(static_args_ini) basis = SVDBasis(basis_dir, static_args_ini, ifos, file=None, preload=False) basis.load(time_translations=False, verbose=verbose) if n is not None: basis.truncate(n) # get GW150914 Test data timeline = NoiseTimeline(data_dir=noise_dir, ifos=ifos) strains = timeline.get_strains( int(Merger('GW150914').time - static_args['seconds_before_event'] - 1), int(static_args['waveform_length'] + 2)) psds = {} for ifo in ifos: # coloured noise from psd psd_file = Path(psd_dir) / f'{ifo}_PSD.npy' assert psd_file.is_file(), f"{psd_file} does not exist." psds[ifo] = load_psd_from_file(psd_file, delta_f=static_args['delta_f']) start_time = Merger('GW150914').time - static_args['seconds_before_event'] end_time = Merger('GW150914').time + static_args['seconds_after_event'] coefficients = [] for i, ifo in enumerate(strains): strains[ifo] = strains[ifo].time_slice(start_time, end_time) # whiten with settings associated to longer strain strains[ifo] = strains[ifo] * get_tukey_window( static_args['sample_length']) # hann window strains[ifo] = strains[ifo].to_frequencyseries( delta_f=static_args['delta_f']) # fft strains[ifo] /= psds[ifo]**0.5 # whiten strains[ifo][:int(static_args['f_lower'] / static_args['delta_f'])] = 0. # lowpass below 20Hz strains[ifo] = strains[ ifo][:static_args['fd_length']] # truncate to 1024Hz # project gw150914 strain to reduced basis V = basis.V[0] if basis.V.shape[0] == 1 else basis.V[i] coefficients.append(strains[ifo] @ V) coefficients = np.stack(coefficients) # flatten for 1-d residual network input # coefficients = np.concatenate([coefficients.real, coefficients.imag], axis=0) # coefficients = coefficients.reshape(coefficients.shape[0]*coefficients.shape[1]) return coefficients
# IPython log file from pycbc.catalog import Merger m = Merger('GW150914') data = { ifo : m.strain(ifo) for ifo in ('H1', 'L1') } ligo_data = dat['L1'] ligo_data = data['L1'] ligo_data type(ligo_data) import pycbc get_ipython().run_line_magic('pinfo', 'pycbc.types.timeseries.TimeSeries') pycbc.types.timeseries.TimeSeries.__bases__ pycbc.types.timeseries.Array.__bases__ get_ipython().run_line_magic('pinfo2', 'pycbc.types.timeseries.TimeSeries') get_ipython().run_line_magic('pinfo2', 'pycbc.types.timeseries.Array') get_ipython().run_line_magic('pinfo', 'ligo_data._data') type(ligo_data._data) get_ipython().run_line_magic('pinfo', 'ligo_data._data') type(ligo_data._data) pycbc.types.aligned.ArrayWithAligned.__bases__ get_ipython().run_line_magic('pinfo2', 'ligo_data.highpass_fir') get_ipython().run_line_magic('pinfo', 'pycbc.filter.highpass_fir') import pycbc.filter get_ipython().run_line_magic('pinfo', 'pycbc.filter.highpass_fir') get_ipython().run_line_magic('pinfo2', 'pycbc.filter.highpass_fir') get_ipython().run_line_magic('pinfo2', 'ligo_data.highpass_fir') freq = 15 get_ipython().run_line_magic('pinfo2', 'ligo_data.highpass_fir') get_ipython().run_line_magic('pinfo2', 'pycbc.filter.highpass_fir')
"""The data used were fetched online. All the events downloaded from these websites. Later on, we will look at only one of the events.""" !wget -nc https://dcc.ligo.org/public/0146/P1700349/001/H-H1_LOSC_CLN_4_V1-1187007040-2048.gwf !wget -nc https://dcc.ligo.org/public/0146/P1700349/001/L-L1_LOSC_CLN_4_V1-1187007040-2048.gwf """Pick the event with the code *GW170817*. We pick the time frame from 224 seconds before merging and end 32 seconds after merging.""" # Commented out IPython magic to ensure Python compatibility. # %matplotlib inline import pylab from pycbc.filter import highpass from pycbc.catalog import Merger from pycbc.frame import read_frame merger = Merger("GW170817") # the merging part of the event, event = two black holes merging and causing gravitational wave strain, stilde = {}, {} for ifo in ['L1', 'H1']: ts = read_frame("{}-{}_LOSC_CLN_4_V1-1187007040-2048.gwf".format(ifo[0], ifo), '{}:LOSC-STRAIN'.format(ifo), start_time=merger.time - 224, # merger.time = the time of merging end_time=merger.time + 32, check_integrity=False) """Cleaning and applying highpass filter. Downsample to 2048 Hz to make the data analysis more convenient. Power density of the noise is higher than the signal. At higher frequency the amplitude of the noise is lower. And then graph.""" from pycbc.catalog import Merger from pycbc.filter import resample_to_delta_t, highpass from pycbc.catalog import Merger from pycbc.filter import resample_to_delta_t, highpass
print(fd_approximants()) # Commented out IPython magic to ensure Python compatibility. !wget -nc https://dcc.ligo.org/public/0146/P1700349/001/H-H1_LOSC_CLN_4_V1-1187007040-2048.gwf !wget -nc https://dcc.ligo.org/public/0146/P1700349/001/L-L1_LOSC_CLN_4_V1-1187007040-2048.gwf # %matplotlib inline import pylab from pycbc.filter import highpass from pycbc.catalog import Merger from pycbc.frame import read_frame merger = Merger("GW170817") import numpy as np import pycbc.types strain, stilde = {}, {} for ifo in['H1','L1']: # We'll download the data and select 256 secondsthat includes the event time ts =read_frame("{}-{}_LOSC_CLN_4_V1-1187007040-2048.gwf".format(ifo[0],ifo),'{}:LOSC-STRAIN'.format(ifo),start_time=merger.time -224,end_time=merger.time +32,check_integrity=False) # Read the detector data and remove low frequencycontent strain[ifo] = highpass(ts,15) # Remove time corrupted by the high pass filter strain[ifo] = strain[ifo].crop(4,4) # Also create a frequency domain version of the data stilde[ifo] = strain[ifo].to_frequencyseries() #print (strain.delta_t) pylab.plot(strain['H1'].sample_times, strain['H1'])