def read(cls, subsys, filename): """ read from a file Parameters ---------- subsys : `str` Channel subsystem filename : `str` File to read from """ # open file if isinstance(filename, str): f = h5py.File(filename, 'r') else: f = filename chandict = {subsys: f[subsys].keys()} darm_channel = f['psd1'].keys()[0].split()[0].strip() ss = PEMSubsystem(subsys, darm_channel, chandict) psd1 = FrequencySeries.read(f['psd1'][f['psd1'].keys()[0]]) for channel in chandict[subsys]: N = int(f[subsys][channel]['metadata'][0, 1]) st = int(f[subsys][channel]['metadata'][1, 1]) et = int(f[subsys][channel]['metadata'][2, 1]) csd12 = FrequencySeries.read(f[subsys][channel]['csd mean']) psd2 = FrequencySeries.read(f[subsys][channel]['%s mean' % channel]) ss[channel] = PEMCoherenceSegment(darm_channel, channel, csd12, psd1, psd2, N, st, et) ss.failed_channels = f['failed_channels'][:] f.close() return ss
def test_read_ligolw(self): with tempfile.NamedTemporaryFile(mode='w+') as fobj: fobj.write(LIGO_LW_ARRAY) array = FrequencySeries.read( fobj, 'psd', match={'channel': 'X1:TEST-CHANNEL_1'}) utils.assert_array_equal(array, list(range(1, 11)) / units.Hz) utils.assert_array_equal(array.frequencies, list(range(10)) * units.Hz) assert numpy.isclose(array.epoch.gps, 1000000000) # precision gah! assert array.unit == units.Hz**-1 array2 = FrequencySeries.read( fobj, 'psd', match={'channel': 'X1:TEST-CHANNEL_2'}) assert array2.epoch is None # assert errors with pytest.raises(ValueError): FrequencySeries.read(fobj, 'blah') with pytest.raises(ValueError): FrequencySeries.read(fobj, 'psd') with pytest.raises(ValueError): FrequencySeries.read(fobj, 'psd', match={ 'channel': 'X1:TEST-CHANNEL_1', 'blah': 'blah' })
def _percentile(axis, pctl=50, unit='um', suffix='', _dir='', **kwargs): fname = './data2/{3}/{0}_{1}.hdf5'.format(axis, pctl, suffix, _dir) model = kwargs.pop('model', None) _asd = FrequencySeries.read(fname)**0.5 if model == '120QA': amp = 10**(30.0 / 20.0) elif model == 'compact': amp = 10**(45.0 / 20.0) seis = Trillium(model) v2vel = seis.v2vel c2v = 20.0 / 2**15 _asd = v2vel(_asd) * c2v / amp * 1e6 if unit == 'um': asd = _asd / (2.0 * np.pi * _asd.frequencies.value) #asd.write('./LongTerm_{0}_{1}_DISP.txt'.format(axis,pctl),format='txt') elif unit == 'm': asd = _asd / (2.0 * np.pi * _asd.frequencies.value) * 1e-6 asd.write('./{0}_{1}_DISP.txt'.format(axis, pctl), format='txt') elif unit == 'um/sec': asd = _asd asd.write('./{0}_{1}_VELO.txt'.format(axis, pctl), format='txt') elif unit == 'um/sec/sec': asd = _asd * (2.0 * np.pi * _asd.frequencies.value) #asd.write('./LongTerm_{0}_{1}_ACC.txt'.format(axis,pctl),format='txt') elif unit == 'm/sec/sec': asd = _asd * (2.0 * np.pi * _asd.frequencies.value) * 1e-6 else: raise ValueError('!!1', unit) return asd
def read(fname): try: data = FrequencySeries.read(fname, format='hdf5') except IOError as ioe: print(ioe) exit() else: pass return data
def read_frequencyseries(filename): """Read a `~gwpy.frequencyseries.FrequencySeries` from a file IF using HDF5, the filename can be given as a combined filename/path, i.e. ``test.hdf5/path/to/dataset``. Parameters ---------- filename : `str` path of file to read Returns ------- series : `~gwpy.frequencyseries.FrequencySeries` the data as read Raises ------ astropy.io.registry.IORegistryError if the input format cannot be identified or is not registered """ # try and parse path in HDF5 file if given try: ext = HDF5_FILENAME.search(filename).groupdict()['ext'] except AttributeError: # no match kwargs = {} else: kwargs = {'path': filename.rsplit(ext, 1)[1]} # read file try: return FrequencySeries.read(filename, **kwargs) except IORegistryError: if filename.endswith('.gz'): fmt = os.path.splitext(filename[:-3])[-1] else: fmt = os.path.splitext(filename)[-1] return FrequencySeries.read(filename, format=fmt.lstrip('.'), **kwargs)
def read_frequencyseries(filename): """Read a `~gwpy.frequencyseries.FrequencySeries` from a file IF using HDF5, the filename can be given as a combined filename/path, i.e. ``test.hdf5/path/to/dataset``. Parameters ---------- filename : `str` path of file to read Returns ------- series : `~gwpy.frequencyseries.FrequencySeries` the data as read Raises ------ astropy.io.registry.IORegistryError if the input format cannot be identified or is not registered """ # try and parse path in HDF5 file if given try: ext = HDF5_FILENAME.search(filename).groupdict()['ext'] except AttributeError: # no match kwargs = {} else: kwargs = {'path': filename.rsplit(ext, 1)[1]} # read file try: return FrequencySeries.read(filename, **kwargs) except IORegistryError as e: if filename.endswith('.gz'): fmt = os.path.splitext(filename[:-3])[-1] else: fmt = os.path.splitext(filename)[-1] return FrequencySeries.read(filename, format=fmt.lstrip('.'), **kwargs)
def kagra_seis(axis='X', pctl=90): if axis in ['X', 'Y', 'Z']: prefix = '/Users/miyo/Git/miyopy/miyopy/seismodel/JGW-T1910436-v5/' fname = 'LongTerm_{axis}_{pctl}_VELO.txt'.format(axis=axis, pctl=pctl) vel_asd = FrequencySeries.read(prefix + fname) return vel_asd elif axis == 'H': vel_x = kagra_seis('X', pctl) vel_y = kagra_seis('Y', pctl) vel_h = (vel_x**2 + vel_y**2)**(1. / 2) return vel_h elif axis == 'V': return kagra_seis('Z', pctl) else: raise ValueError('hoge')
def _percentile(axis, pctl=50, unit='um/sec', **kwargs): _asd = FrequencySeries.read('./data2/LongTerm_{0}_{1}.hdf5'.format( axis, pctl))**0.5 amp = 10**(30.0 / 20.0) c2v = 20.0 / 2**15 _asd = v2vel(_asd) * c2v / amp * 1e6 if unit == 'um': asd = _asd / (2.0 * np.pi * _asd.frequencies.value) #asd.write('./LongTerm_{0}_{1}_DISP.txt'.format(axis,pctl),format='txt') elif unit == 'um/sec': asd = _asd #asd.write('./LongTerm_{0}_{1}_VELO.txt'.format(axis,pctl),format='txt') else: raise ValueError('!!1') return asd
def plot_spectra(clusters, channel, unit='cts', xlog=True, legend=None, xlim=None, **kwargs): from glob import glob from gwpy.frequencyseries import FrequencySeries from gwpy.plot import Plot title = channel psds = {} for cluster in clusters: for filename in glob('*.hdf5'): try: psds[cluster] = FrequencySeries.read(filename, f'{cluster}-{channel}') print(f'found in {filename}') break except KeyError: continue else: raise KeyError(f'Could not find Nº{cluster}') if legend is None: legend = clusters # plot the group in one figure. plt = Plot(*(psds[cluster] for cluster in psds), separate=False, sharex=True, zorder=1, **kwargs) if xlim is not None: plt.gca().set_xlim(xlim) plt.gca().set_ylim((1e-48, 1e-37)) # modify the figure as a whole. # plt.add_segments_bar(dq, label='') # plt.gca().set_color_cycle(['red', 'green', 'blue', 'yellow']) if xlog: plt.gca().set_xscale('log') plt.gca().set_yscale('log') plt.gca().set_ylabel(f'Power Spectral Density [{unit}^2/Hz]') plt.suptitle(title) plt.legend(legend, prop={'size': 15}) # save to png. plt.save(f'{title}.png')
def _percentile(axis, pctl=50, unit='um/sec', **kwargs): suffix = '_{0}_{1}'.format(start, end) fname = fname_hdf5_longasd(axis, pctl, suffix=suffix, prefix='./tmp') _asd = FrequencySeries.read(fname)**0.5 amp = 10**(30.0 / 20.0) c2v = 20.0 / 2**15 _asd = v2vel(_asd) * c2v / amp * 1e6 if unit == 'um': asd = _asd / (2.0 * np.pi * _asd.frequencies.value) asd.write('./LongTerm_{0}_{1}_DISP.txt'.format(axis, pctl), format='txt') elif unit == 'um/sec': asd = _asd asd.write('./LongTerm_{0}_{1}_VELO.txt'.format(axis, pctl), format='txt') else: raise ValueError('!!1') return asd
def save_asd(axis, available, percentile=50, **kwargs): prefix = kwargs.pop('prefix', './data') write = kwargs.pop('write', None) write_gwf = kwargs.pop('write_gwf', None) skip = kwargs.pop('skip', None) asd_fmt = '{0}/{1}_{2:02d}_LongTerm.hdf5'.format(prefix, axis, percentile) if os.path.exists(asd_fmt): #log.debug(asd_fmt+' Read') return FrequencySeries.read(asd_fmt, format='hdf5') log.debug(asd_fmt + ' Saving {0:02d} percentile'.format(percentile)) fnamelist = [ prefix + '/{0}_{1}_{2}.hdf5'.format(axis, start, end) for start, end in available ] specgrams = Spectrogram.read(fnamelist[0], format='hdf5') [specgrams.append(Spectrogram.read(fname,format='hdf5'),gap='ignore') \ for fname in fnamelist] asd = specgrams.percentile(percentile) asd.write(asd_fmt, format='hdf5', overwrite=True) return asd
def test_read_ligolw(self): with tempfile.NamedTemporaryFile(mode='w+') as fobj: fobj.write(LIGO_LW_ARRAY) array = FrequencySeries.read( fobj, 'psd', match={'channel': 'X1:TEST-CHANNEL_1'}) utils.assert_array_equal(array, list(range(1, 11)) / units.Hz) utils.assert_array_equal(array.frequencies, list(range(10)) * units.Hz) assert numpy.isclose(array.epoch.gps, 1000000000) # precision gah! assert array.unit == units.Hz ** -1 array2 = FrequencySeries.read( fobj, 'psd', match={'channel': 'X1:TEST-CHANNEL_2'}) assert array2.epoch is None # assert errors with pytest.raises(ValueError): FrequencySeries.read(fobj, 'blah') with pytest.raises(ValueError): FrequencySeries.read(fobj, 'psd') with pytest.raises(ValueError): FrequencySeries.read( fobj, 'psd', match={'channel': 'X1:TEST-CHANNEL_1', 'blah': 'blah'})
import numpy as np import matplotlib.pyplot as plt import astropy.units as u from gwpy.frequencyseries import FrequencySeries def act_lim(fmax, width, mass): vel = np.sqrt(2 * fmax * width / mass) # kinetic energy return vel.decompose() if __name__ == '__main__': axis = 'Y' asd_10 = FrequencySeries.read( './LongTermSeismicNoise/LongTerm_{0}_10_VELO.txt'.format( axis)) # um/sec/rtHz asd_50 = FrequencySeries.read( './LongTermSeismicNoise/LongTerm_{0}_50_VELO.txt'.format( axis)) # um/sec/rtHz asd_90 = FrequencySeries.read( './LongTermSeismicNoise/LongTerm_{0}_90_VELO.txt'.format( axis)) # um/sec/rtHz fig, ax = plt.subplots(1, 1, figsize=(10, 10)) df = asd_10.df.value blrms = lambda asd, l, h: np.sqrt((asd**2).crop(l, h).sum() * df * 1.5) for asd, name in zip([asd_50, asd_90], ['50', '90']): print '--- {1} {0}th'.format(name, axis) for l, h in zip([0.03, 0.10, 0.30, 1.0, 3.0, 10.0], [0.10, 0.30, 1.0, 3.0, 10.0]): a = blrms(asd, l, h)
from gwpy.frequencyseries import FrequencySeries import matplotlib.pyplot as plt import numpy as np asd_no5_temp = FrequencySeries.read('asd_no5_temp.hdf5') asd_no5_humd = FrequencySeries.read('asd_no5_humd.hdf5') asd_no5_baro = FrequencySeries.read('asd_no5_baro.hdf5') asd_no6_temp = FrequencySeries.read('asd_no6_temp.hdf5') asd_no6_humd = FrequencySeries.read('asd_no6_humd.hdf5') asd_no6_baro = FrequencySeries.read('asd_no6_baro.hdf5') import numpy as np from astropy import units as u from astropy.constants import k_B Vnn = 3e-6 * u.V / (u.Hz)**(1 / 2.) fcv = 2.7 * u.Hz fcc = 140.0 * u.Hz Inn = 0.4e-12 * u.A / (u.Hz)**(1 / 2.) R1 = 10e3 * u.ohm R2 = 10e3 * u.ohm #R3 = 100e3*u.ohm Rs = 10e3 * u.ohm T = (273 + 28) * u.K e1 = np.sqrt(4 * np.pi * k_B * T * R1) * (1 + R2 / R1) e2 = np.sqrt(4 * np.pi * k_B * T * R2) es = np.sqrt(4 * np.pi * k_B * T * Rs) * (R2 / R1) inn_s = Inn * Rs * (1 + R2 / R1) inn_2 = Inn * R2 inn = Inn * Rs vnn = Vnn * (1 + R2 / R1)
# ax1.set_ylabel('Displacement [um]') # ax1.set_ylim(-8,2) # ax1.set_yticks(range(-8,3,2)) # ax2.plot(xarm,color='k',label='X-Arm Feedback Signal') # ax2.set_xscale('auto-gps') # ax2.set_ylabel('Displacement [um]') # ax2.set_ylim(-24,-14) # ax2.set_yticks(range(-24,-13,2)) # ax1.legend(fontsize=20,loc='upper left') # ax2.legend(fontsize=20,loc='upper left') # plt.savefig('timeseries.png') # plt.close() # exit() # simulation total = FrequencySeries.read('../vismodel/total_wsc.hdf5') #total = FrequencySeries.read('../vismodel/total_wosc.hdf5') # Timeseries when No control gif, xarm, diff_seis, comm_seis = timeseries(start, end) coh_gif2xarm, coh_gif2seis, coh_xarm2seiscomm = coherence(gif, xarm, diff_seis, comm_seis, fftlen=fftlen, ovlp=ovlp) gif, xarm, diff_seis, comm_seis, = asd(gif, xarm, diff_seis, comm_seis, fftlen=fftlen,
def representative_spectra(channels, start, stop, rate, label='kmeans-labels', filename=DEFAULT_FILENAME, prefix='.', downloader=TimeSeriesDict.get, cluster_numbers=None, groups=None, **kwargs): """ Make representative spectra for each cluster based on the median psd for minutes in that cluster. Downloads only the raw minutes in the cluster to save. """ if groups is None: groups = channels # read the labels from the save file. labels = TimeSeries.read(filename, label, start=to_gps(start), end=to_gps(stop)) logger.info(f'Read labels {start} to {stop} from {filename}') if cluster_numbers is None: clusters = list(range(max(labels.value) + 1)) cluster_counts = list( len(labels.value[labels.value == c]) for c in clusters) largest_cluster = cluster_counts.index(max(cluster_counts)) clusters.remove(largest_cluster) logger.info( f'Largest cluster found to be Nº{largest_cluster} ({100 * max(cluster_counts) // len(labels.value)}%). Doing {clusters}.' ) cluster_counts.remove(max(cluster_counts)) else: clusters = cluster_numbers cluster_counts = list( len(labels.value[labels.value == c]) for c in clusters) t, v, d = labels.times, labels.value, diff(labels.value) pairs = list( zip([t[0]] + list(t[:-1][d != 0]), list(t[1:][d != 0]) + [t[-1]])) values = list(v[:-1][d != 0]) + [v[-1]] assert len(pairs) == len(values) # need to include start-| and |-end # l|r l|r l|r l|r # l,r l,r l,r l,r # l r,l r,l r,l r # zip(start + l[1:], r[:-1] + stop) print(pairs) for pair in pairs: print(int(pair[1].value) - int(pair[0].value)) print(values) # use h5py to make a mutable object pointing to a file on disk. save_file, filename = path2h5file( get_path(f'spectra-cache {start}', 'hdf5', prefix=prefix)) logger.debug(f'Initiated hdf5 stream to {filename}') logger.info(f'Patching {filename}...') for i, (dl_start, end) in enumerate(pairs): if values[i] in clusters: if not data_exists(channels, to_gps(end).seconds, save_file): logger.debug( f'Downloading Nº{values[i]} from {dl_start} to {end}...') try: dl = downloader(channels, start=to_gps(dl_start) - LIGOTimeGPS(60), end=to_gps(end) + LIGOTimeGPS(seconds=1)) out = TimeSeriesDict() for n in dl: out[n] = dl[n].resample(**better_aa_opts(dl[n], rate)) write_to_disk(out, to_gps(dl_start).seconds, save_file) except RuntimeError: # Cannot find all relevant data on any known server logger.warning( f"SKIPPING Nº{values[i]} from {dl_start} to {end} !!") logger.info('Reading data...') data = TimeSeriesDict.read(save_file, channels) logger.info('Starting PSD generation...') f = data[channels[0]].crop( start=to_gps(data[channels[0]].times[-1]) - LIGOTimeGPS(60), end=to_gps(data[channels[0]].times[-1])).psd().frequencies d = (to_gps(labels.times[-1]).seconds - to_gps(labels.times[1]).seconds) for i, cluster in enumerate(clusters): try: psds = { channel: FrequencySeries.read(filename, f'{cluster}-{channel}') for channel in channels } logger.info(f'Loaded Nº{cluster}.') except KeyError: logger.info( f'Doing Nº{cluster} ({100 * cluster_counts[i] / len(labels.value):.2f}% of data)...' ) with Progress(f'psd Nº{cluster} ({i + 1}/{len(clusters)})', len(channels) * d) as progress: psds = { channel: FrequencySeries(median(stack([ progress(data[channel].crop, pc * d + (to_gps(time).seconds - to_gps(labels.times[1]).seconds), start=to_gps(time) - LIGOTimeGPS(60), end=to_gps(time)).psd().value for c, time in zip(labels.value, labels.times) if c == cluster ]), axis=0), frequencies=f, name=f'{cluster}-{channel}') for pc, channel in enumerate(channels) } for name in psds.keys(): psds[name].write(filename, **writing_opts) # plotting is slow, so show a nice progress bar. logger.debug('Initiating plotting routine...') with Progress('plotting', len(groups)) as progress: for p, (group, lbls, title) in enumerate(groups): # plot the group in one figure. plt = Plot(*(psds[channel] for channel in group), separate=False, sharex=True, zorder=1, **kwargs) # plt.gca().set_xlim((30,60)) # modify the figure as a whole. # plt.add_segments_bar(dq, label='') plt.gca().set_xscale('log') plt.gca().set_yscale('log') plt.suptitle(title) plt.legend(lbls) # save to png. progress( plt.save, p, get_path(f'{cluster}-{title}', 'png', prefix=f'{prefix}/{cluster}'))
def act_lim(fmax, width, mass): vel = np.sqrt(2 * fmax * width / mass) # kinetic energy return vel.decompose() if __name__ == '__main__': # Ground Velocity finess = 50 #[m] Green Finess width = 532e-9 * u.m / (2 * finess) #[m] Green PDH Linewidth fmax = 1.5e-4 * u.N #[N] Max force on IM mass = (22.8 + 20.5) * u.kg #[kg] Mass of (Mirror + IM) req_vel = act_lim(fmax, width, mass) #print 'Required Ground Velocity : {0:2.1e}'.format(vel) # req_vel = req_vel.value * 1e6 x_10 = FrequencySeries.read( './LongTermSeismicNoise/LongTerm_X_10_VELO.txt') # um/sec/rtHz x_50 = FrequencySeries.read( './LongTermSeismicNoise/LongTerm_X_50_VELO.txt') # um/sec/rtHz x_90 = FrequencySeries.read( './LongTermSeismicNoise/LongTerm_X_90_VELO.txt') # um/sec/rtHz w = 2.0 * np.pi * x_10.frequencies.value x_10 = x_10 / w x_50 = x_50 / w x_90 = x_90 / w x_10 = x_10 * np.sqrt(2) x_50 = x_50 * np.sqrt(2) x_90 = x_90 * np.sqrt(2) x_rms_10 = x_10.rms() x_rms_50 = x_50.rms() x_rms_90 = x_90.rms() fig, ax = plt.subplots(1, 1, figsize=(10, 10))
if readgif: #strain = TimeSeries.read('Dec10_4days_strain.gwf','CALC_STRAIN',start,end) strain = TimeSeries.read('2019Dec10_3hours_strain.gwf', 'CALC_STRAIN', start, end) gif = strain * 3e3 * 1e6 plot = gif.plot(ylabel='Strain') plot.savefig('img_gif.png') plot.close() _, gif, _ = asd(gif, fftlength=fftlength) # ----------------------------------------------- # Calibrated ASD # ----------------------------------------------- if readhdf5: print('Read from hdf5') ixv1 = FrequencySeries.read('./fs_ixv1.hdf5', format='hdf5') exv = FrequencySeries.read('./fs_exv.hdf5', format='hdf5') diff12 = FrequencySeries.read('./fs_diff12.hdf5', format='hdf5') diff13 = FrequencySeries.read('./fs_diff13.hdf5', format='hdf5') comm12 = FrequencySeries.read('./fs_comm12.hdf5', format='hdf5') comm13 = FrequencySeries.read('./fs_comm13.hdf5', format='hdf5') tr120_selfnoise = FrequencySeries.read('./fs_selfnoise.hdf5', format='hdf5') df = ixv1.df else: _, ixv1, _ = asd(ixv1, fftlength=fftlength) _, exv, _ = asd(exv, fftlength=fftlength) _, diff12, _ = asd(diff12, fftlength=fftlength, window='hanning', ovlp=0.5) _, diff13, _ = asd(diff13, fftlength=fftlength, window='hanning', ovlp=0.5) _, comm12, _ = asd(comm12, fftlength=fftlength, window='hanning', ovlp=0.5) _, comm13, _ = asd(comm13, fftlength=fftlength, window='hanning', ovlp=0.5)
etmx_acc_mat = [[ 0.6500,-0.1483,-0.4185], # L [-0.0099,-0.5598, 0.9110], # T [ 0.8679, 0.4580, 1.0462],] # Y etmx_acc_mat = np.array(etmx_acc_mat) # # H1, H2, H3, itmx_acc_mat = [[ 0.5152,-0.1943,-0.3234], # L [ 0.0858,-0.4984, 0.4062], # T [ 0.5617, 0.5664, 0.5876],] # Y itmx_acc_mat = np.array(itmx_acc_mat) if on: # 5. GIF injection start = 'Sep 17 2019 05:39:00 JST' end = 'Sep 17 2019 05:49:00 JST' onoff = 'ON' total = FrequencySeries.read('../vismodel/total_wsc.hdf5') else: # 4. Gain 0.5 start = 'Sep 17 2019 05:26:00 JST' end = 'Sep 17 2019 05:36:00 JST' onoff = 'OFF' total = FrequencySeries.read('../vismodel/total_wosc_1.0.hdf5') total = FrequencySeries.read('../vismodel/total_wosc_1.4.hdf5') #total = FrequencySeries.read('../vismodel/total_wosc.hdf5') # setting fftlen = 2**6 ovlp = fftlen/2.0 def timeseries(start,end,plot=True): kwargs = {'verbose':True,'host':'10.68.10.121','port':8088}
va='bottom') ax2.text(150, -180, 'BW : {0:3.2e}, Window : {1}, AVE : {2:3d}'.format( df, window, ave), rotation=90, ha='left', va='bottom') plt.savefig('img_coherence.png') # ----------------------------------------------- # Calibrated ASD # ----------------------------------------------- if readhdf5: print('Read from hdf5') ixv1 = FrequencySeries.read('./fs_ixv1.hdf5', format='hdf5') diff13 = FrequencySeries.read('./fs_diff13.hdf5', format='hdf5') comm13 = FrequencySeries.read('./fs_comm13.hdf5', format='hdf5') tr120_selfnoise = FrequencySeries.read('./fs_selfnoise.hdf5', format='hdf5') gif = FrequencySeries.read('./fs_gif.hdf5', format='hdf5') #x500_press = FrequencySeries.read('./fs_x500_press.hdf5',format='hdf5') df = ixv1.df else: _, ixv1, _ = asd(ixv1, fftlength=fftlength, window='hanning', ovlp=0.5) _, gif, _ = asd(gif, fftlength=fftlength, window='hanning', ovlp=0.5) _, x500_press, _ = asd(x500_press, fftlength=fftlength, window='hanning', ovlp=0.5) _, ixv_press, _ = asd(ixv_press,
def _draw(self): """Load all data, and generate this `SpectrumDataPlot` """ plot = self.plot = FrequencySeriesPlot( figsize=self.pargs.pop('figsize', [12, 6])) ax = plot.gca() if self.state: self.pargs.setdefault( 'suptitle', '[%s-%s, state: %s]' % (self.span[0], self.span[1], label_to_latex(str(self.state)))) suptitle = self.pargs.pop('suptitle', None) if suptitle: plot.suptitle(suptitle, y=0.993, va='top') # parse plotting arguments cmap = self.pargs.pop('cmap', None) varargs = self.parse_variance_kwargs() plotargs = self.parse_plot_kwargs()[0] legendargs = self.parse_legend_kwargs() # get reference arguments refs = [] refkey = 'None' for key in sorted(self.pargs.keys()): if key == 'reference' or re.match('reference\d+\Z', key): refs.append(dict()) refs[-1]['source'] = self.pargs.pop(key) refkey = key if re.match('%s[-_]' % refkey, key): refs[-1][key[len(refkey)+1:]] = self.pargs.pop(key) # get channel arguments if hasattr(self.channels[0], 'asd_range'): low, high = self.channels[0].asd_range varargs.setdefault('low', low) varargs.setdefault('high', high) # calculate spectral variance and plot # pad data request to over-fill plots (no gaps at the end) if self.state and not self.all_data: valid = self.state.active else: valid = SegmentList([self.span]) livetime = float(abs(valid)) if livetime: plotargs.setdefault('vmin', 1/livetime) plotargs.setdefault('vmax', 1.) plotargs.pop('label') specgram = get_spectrogram(self.channels[0], valid, query=False, format='asd').join(gap='ignore') if specgram.size: asd = specgram.median(axis=0) asd.name = None variance = specgram.variance(**varargs) # normalize the variance variance /= livetime / specgram.dt.value # undo demodulation variance = undo_demodulation(variance, self.channels[0], self.pargs.get('xlim', None)) # plot ax.plot(asd, color='grey', linewidth=0.3) m = ax.plot_variance(variance, cmap=cmap, **plotargs) #else: # ax.scatter([1], [1], c=[1], visible=False, vmin=plotargs['vmin'], # vmax=plotargs['vmax'], cmap=plotargs['cmap']) #plot.add_colorbar(ax=ax, log=True, label='Fractional time at amplitude') # allow channel data to set parameters if getattr(self.channels[0], 'frequency_range', None) is not None: self.pargs.setdefault('xlim', self.channels[0].frequency_range) if isinstance(self.pargs['xlim'], Quantity): self.pargs['xlim'] = self.pargs['xlim'].value if hasattr(self.channels[0], 'asd_range'): self.pargs.setdefault('ylim', self.channels[0].asd_range) # display references for i, ref in enumerate(refs): if 'source' in ref: source = ref.pop('source') try: refspec = FrequencySeries.read(source) except IOError as e: warnings.warn('IOError: %s' % str(e)) except Exception as e: # hack for old versions of GWpy # TODO: remove me when GWSumm requires GWpy > 0.1 if 'Format could not be identified' in str(e): refspec = FrequencySeries.read(source, format='dat') else: raise else: if 'filter' in ref: refspec = refspec.filter(*ref.pop('filter')) if 'scale' in ref: refspec *= ref.pop('scale', 1) ax.plot(refspec, **ref) # customise hlines = list(self.pargs.pop('hline', [])) self.apply_parameters(ax, **self.pargs) # add horizontal lines to add if hlines: if not isinstance(hlines[-1], float): lineparams = hlines.pop(-1) else: lineparams = {'color':'r', 'linestyle': '--'} for yval in hlines: try: yval = float(yval) except ValueError: continue else: ax.plot(ax.get_xlim(), [yval, yval], **lineparams) # set grid ax.grid(b=True, axis='both', which='both') if not plot.colorbars: plot.add_colorbar(ax=ax, visible=False) return self.finalize()
def _draw(self): """Load all data, and generate this `SpectrumDataPlot` """ plot = self.plot = FrequencySeriesPlot( figsize=self.pargs.pop('figsize', [12, 6])) ax = plot.gca() ax.grid(b=True, axis='both', which='both') if self.state: self.pargs.setdefault( 'suptitle', '[%s-%s, state: %s]' % (self.span[0], self.span[1], label_to_latex(str(self.state)))) suptitle = self.pargs.pop('suptitle', None) if suptitle: plot.suptitle(suptitle, y=0.993, va='top') # get spectrum format: 'amplitude' or 'power' sdform = self.pargs.pop('format') if sdform == 'rayleigh': method = 'rayleigh' else: method = None use_percentiles = str( self.pargs.pop('no-percentiles')).lower() == 'false' # parse plotting arguments plotargs = self.parse_plot_kwargs() legendargs = self.parse_legend_kwargs() # get reference arguments refs = [] refkey = 'None' for key in sorted(self.pargs.keys()): if key == 'reference' or re.match('reference\d+\Z', key): refs.append(dict()) refs[-1]['source'] = self.pargs.pop(key) refkey = key if re.match('%s[-_]' % refkey, key): refs[-1][key[len(refkey)+1:]] = self.pargs.pop(key) # add data if self.type == 'coherence-spectrum': iterator = zip(self.channels[0::2], self.channels[1::2], plotargs) else: iterator = zip(self.channels, plotargs) for chantuple in iterator: channel = chantuple[0] channel2 = chantuple[1] pargs = chantuple[-1] if self.state and not self.all_data: valid = self.state else: valid = SegmentList([self.span]) if self.type == 'coherence-spectrum': data = get_coherence_spectrum([str(channel), str(channel2)], valid, query=False) else: data = get_spectrum(str(channel), valid, query=False, format=sdform, method=method) # undo demodulation for spec in data: spec = undo_demodulation(spec, channel, self.pargs.get('xlim', None)) # anticipate log problems if self.pargs['logx']: data = [s[1:] for s in data] if self.pargs['logy']: for sp in data: sp.value[sp.value == 0] = 1e-100 if use_percentiles: ax.plot_spectrum_mmm(*data, **pargs) else: pargs.pop('alpha', None) ax.plot_spectrum(data[0], **pargs) # allow channel data to set parameters if getattr(channel, 'frequency_range', None) is not None: self.pargs.setdefault('xlim', channel.frequency_range) if isinstance(self.pargs['xlim'], Quantity): self.pargs['xlim'] = self.pargs['xlim'].value if (sdform in ['amplitude', 'asd'] and hasattr(channel, 'asd_range')): self.pargs.setdefault('ylim', channel.asd_range) elif hasattr(channel, 'psd_range'): self.pargs.setdefault('ylim', channel.psd_range) # display references for i, ref in enumerate(refs): if 'source' in ref: source = ref.pop('source') try: refspec = FrequencySeries.read(source) except IOError as e: warnings.warn('IOError: %s' % str(e)) except Exception as e: # hack for old versions of GWpy # TODO: remove me when GWSumm requires GWpy > 0.1 if 'Format could not be identified' in str(e): refspec = FrequencySeries.read(source, format='dat') else: raise else: ref.setdefault('zorder', -len(refs) + 1) if 'filter' in ref: refspec = refspec.filter(*ref.pop('filter')) if 'scale' in ref: refspec *= ref.pop('scale', 1) ax.plot(refspec, **ref) # customise hlines = list(self.pargs.pop('hline', [])) for key, val in self.pargs.iteritems(): try: getattr(ax, 'set_%s' % key)(val) except AttributeError: setattr(ax, key, val) # add horizontal lines to add if hlines: if not isinstance(hlines[-1], float): lineparams = hlines.pop(-1) else: lineparams = {'color':'r', 'linestyle': '--'} for yval in hlines: try: yval = float(yval) except ValueError: continue else: ax.plot(ax.get_xlim(), [yval, yval], **lineparams) if len(self.channels) > 1 or ax.legend_ is not None: plot.add_legend(ax=ax, **legendargs) if not plot.colorbars: plot.add_colorbar(ax=ax, visible=False) return self.finalize()
def draw(self): # initialise (plot, axes) = self.init_plot() ax = axes[0] ax.grid(b=True, axis='y', which='major') channel = self.channels[0] # parse data arguments sdform = self.pargs.pop('format') clim = self.pargs.pop('clim') clog = self.pargs.pop('logcolor') clabel = self.pargs.pop('colorlabel') rasterized = self.pargs.pop('rasterized', True) ratio = self.ratio # get cmap if ratio in ['median', 'mean'] or ( isinstance(ratio, str) and os.path.isfile(ratio)): self.pargs.setdefault('cmap', 'Spectral_r') cmap = self.pargs.pop('cmap', None) # get data if self.state and not self.all_data: valid = self.state.active else: valid = SegmentList([self.span]) if self.type == 'coherence-spectrogram': specgrams = get_coherence_spectrogram(self.channels, valid, query=False) else: specgrams = get_spectrogram(channel, valid, query=False, format=sdform) # calculate ratio spectrum if len(specgrams) and (ratio in ['median', 'mean'] or isinstance(ratio, int)): try: allspec = specgrams.join(gap='ignore') except ValueError as e: if 'units do not match' in str(e): warnings.warn(str(e)) for spec in specgrams[1:]: spec.unit = specgrams[0].unit allspec = specgrams.join(gap='ignore') else: raise if isinstance(ratio, int): ratio = allspec.percentile(ratio) else: ratio = getattr(allspec, ratio)(axis=0) elif isinstance(ratio, str) and os.path.isfile(ratio): try: ratio = FrequencySeries.read(ratio) except IOError as e: warnings.warn('IOError: %s' % str(e)) except Exception as e: # hack for old versions of GWpy # TODO: remove me when GWSumm requires GWpy > 0.1 if 'Format could not be identified' in str(e): ratio = FrequencySeries.read(ratio, format='dat') else: raise # allow channel data to set parameters if getattr(channel, 'frequency_range', None) is not None: self.pargs.setdefault('ylim', channel.frequency_range) if isinstance(self.pargs['ylim'], Quantity): self.pargs['ylim'] = self.pargs['ylim'].value if (ratio is None and sdform in ['amplitude', 'asd'] and hasattr(channel, 'asd_range') and clim is None): clim = channel.asd_range elif (ratio is None and hasattr(channel, 'psd_range') and clim is None): clim = channel.psd_range # plot data for specgram in specgrams: # undo demodulation specgram = undo_demodulation(specgram, channel, self.pargs.get('ylim', None)) # calculate ratio if ratio is not None: specgram = specgram.ratio(ratio) # plot ax.plot_spectrogram(specgram, cmap=cmap, rasterized=rasterized) # add colorbar if len(specgrams) == 0: ax.scatter([1], [1], c=[1], visible=False, cmap=cmap) plot.add_colorbar(ax=ax, clim=clim, log=clog, label=clabel, cmap=cmap) # customise and finalise for key, val in self.pargs.iteritems(): if key == 'ratio': continue try: getattr(ax, 'set_%s' % key)(val) except AttributeError: setattr(ax, key, val) if self.state: self.add_state_segments(ax) return self.finalize()
from control import matlab import matplotlib.pyplot as plt import numpy as np from gwpy.frequencyseries import FrequencySeries from vismodel.utils import times,rms,_bode,degwrap,mybode from vismodel.lvdt import lvdt from vismodel.geophone import geo,geo_tf from vismodel.ip import Ps,Pa from vismodel.filt import blendfilter from vismodel.filt import servo,servo2,servo3,servo4 from seismodel.trillium import tr120,tr120_u,tr120_selfnoise # Seismic Noise #seis = FrequencySeries.read('./seismodel/data1_ixv_x_50pct.hdf5')*1e6 print('temporal data') seisETMX = FrequencySeries.read('./seismodel/2018Dec10_exv.hdf5') seisITMX = FrequencySeries.read('./seismodel/2018Dec10_ixv.hdf5') seisETMX = seisETMX.crop(1e-3,20) seisITMX = seisETMX.crop(1e-3,20) freq = seisETMX.frequencies.value df = seisETMX.df.value # Run plot_stage_motion = True plot_servo = True plot_control = True compare_disp_noise = True plot_supersensor_noise = True plot_ST = True # LVDT Noise