def avg_data( files ): #Finds mean and median power spectrum profiles given a list of files of same frequency range data = [] for file in files: fil = Filterbank(file) power_vals = fil.data[0][0] data.append(power_vals) data = np.array(data) stacked_data = np.dstack( data )[0] #Turns data into array of length equal to # of chans, each of size equal to number of data points mean_data = [] median_data = [] for channel in stacked_data: mean_data.append(np.mean(channel)) median_data.append(np.median(channel)) mean_data = np.array(mean_data) median_data = np.array(median_data) freqs = np.array(Filterbank(files[0]).freqs) dict = {'freqs': freqs, 'mean_data': mean_data, 'median_data': median_data} return dict
def test_voyager_fix_header(): filename = '/workdata/bl/data/voyager_f1032192_t300_v2.fil' new_filename = 'voyager_ext.fil' fb = Filterbank(filename, f_start=8420.1, f_stop=8420.3) fb.write_to_filterbank(new_filename) fb = Filterbank(new_filename) filename = new_filename assert read_header(filename)['ibeam'] == 1 fix_header(filename, 'ibeam', 7) assert read_header(filename)['ibeam'] == 7 fix_header(filename, 'ibeam', 1) assert read_header(filename)['ibeam'] == 1 fix_header(filename, 'ibeam', 13) assert read_header(filename)['ibeam'] == 13 pprint(read_header(filename)) fix_header(filename, 'rawdatafile', './blc3_9bit_guppi_57386_VOYAGER1_0004.0000.raw') assert read_header(filename)['rawdatafile'] == './blc3_9bit_guppi_57386_VOYAGER1_0004.0000.raw' fix_header(filename, 'rawdatafile', './blc3_2bit_guppi_57386_VOYAGER1_0004.0000.raw') assert read_header(filename)['rawdatafile'] == './blc3_2bit_guppi_57386_VOYAGER1_0004.0000.raw' os.remove(new_filename)
def test_select_frequency_range_reversed(self): """ Initialize 16 bits filterbank and test if values are in frequency range """ filename = './pspm16.fil' fil = Filterbank(filename) data = fil.select_data(freq_start=432, freq_stop=431) self.assertTrue(all(430.5 < i < 432.4 for i in data[0]))
def test_filterbank_time_range(self): """ Initialize 8 bits filterbank and test if values are in time range """ filename = './pspm8.fil' time_range = (10, 30) time_delt = abs(time_range[0] - time_range[1]) fil = Filterbank(filename) data = fil.select_data(time_start=time_range[0], time_stop=time_range[1]) self.assertEqual(len(data[1]), time_delt)
def test_voyager(): filename = '/workdata/bl/data/voyager_f1032192_t300_v2.fil' fb = Filterbank(filename) fb.info() fb.plot_spectrum() plt.show() fb = Filterbank(filename, f_start=8420, f_stop=8420.5) fb.info() fb.plot_spectrum() plt.show()
def __init__(self, source, b, a): # Automatically duplicate mono input to fit the desired output shape if b.shape[0]!=source.nchannels: if source.nchannels!=1: raise ValueError('Can only automatically duplicate source channels for mono sources, use RestructureFilterbank.') source = RestructureFilterbank(source, b.shape[0]) Filterbank.__init__(self, source) # Weave version of filtering requires Fortran ordering of filter params if len(b.shape)==2 and len(a.shape)==2: b = reshape(b, b.shape+(1,)) a = reshape(a, a.shape+(1,)) self.filt_b = array(b, order='F') self.filt_a = array(a, order='F') self.filt_state = zeros((b.shape[0], b.shape[1]-1, b.shape[2]), order='F')
def test_filterbank_parameters(self): """ Initialize 32 bits filterbank and test if all parameters work """ filename = './pspm32.fil' freq_range = (433, 435) time_range = (10, 20) time_delt = abs(time_range[0] - time_range[1]) fil = Filterbank(filename, freq_range=freq_range, time_range=time_range) data = fil.select_data() self.assertTrue(all(432.5 < i < 435.4 for i in data[0])) self.assertEqual(len(data[1]), time_delt)
def test_wrong_filename_raises_error(self): """ Initialize filterbank with an incorrect filename """ filename = './thispathdoesnotexist' with self.assertRaises(FileNotFoundError): Filterbank(filename)
def AA(file): #Returns AltAz coords for given observation fil = Filterbank(file) MJD = fil.header['tstart'] ra = fil.header['src_raj'] dec = fil.header['src_dej'] target = SkyCoord(ra, dec) altaz = target.transform_to( AltAz(location=GreenBank, obstime=Time(MJD, format='mjd'))) dict = {'alt': altaz.alt.degree, 'az': altaz.az.degree} return dict
def __init__(self, source, b, a): # Automatically duplicate mono input to fit the desired output shape if b.shape[0]!=source.nchannels: if source.nchannels!=1: raise ValueError('Can only automatically duplicate source channels for mono sources, use RestructureFilterbank.') source = RestructureFilterbank(source, b.shape[0]) Filterbank.__init__(self, source) # Weave version of filtering requires Fortran ordering of filter params if len(b.shape)==2 and len(a.shape)==2: b = reshape(b, b.shape+(1,)) a = reshape(a, a.shape+(1,)) self.filt_b = array(b, order='F') self.filt_a = array(a, order='F') self.filt_state = zeros((b.shape[0], b.shape[1], b.shape[2]), order='F') self.use_weave = get_global_preference('useweave') if self.use_weave: log_info('brian.hears.filtering.linearfilterbank', 'Using weave') self.cpp_compiler = get_global_preference('weavecompiler') self.extra_compile_args = ['-O3'] if self.cpp_compiler=='gcc': self.extra_compile_args += get_global_preference('gcc_options')
def __init__(self, source, b, a): # Automatically duplicate mono input to fit the desired output shape if b.shape[0] != source.nchannels: if source.nchannels != 1: raise ValueError( 'Can only automatically duplicate source channels for mono sources, use RestructureFilterbank.' ) source = RestructureFilterbank(source, b.shape[0]) Filterbank.__init__(self, source) # Weave version of filtering requires Fortran ordering of filter params if len(b.shape) == 2 and len(a.shape) == 2: b = reshape(b, b.shape + (1, )) a = reshape(a, a.shape + (1, )) self.filt_b = array(b, order='F') self.filt_a = array(a, order='F') self.filt_state = zeros((b.shape[0], b.shape[1], b.shape[2]), order='F') self.use_weave = get_global_preference('useweave') if self.use_weave: log_info('brian.hears.filtering.linearfilterbank', 'Using weave') self.cpp_compiler = get_global_preference('weavecompiler') self.extra_compile_args = ['-O3'] if self.cpp_compiler == 'gcc': self.extra_compile_args += get_global_preference('gcc_options')
def band_finder( filename ): #Classifies filterbank file into a band according to its middle freq fil = Filterbank(filename) fmax = fil.header['fch1'] nchans = fil.header['nchans'] ch_bandwidth = fil.header['foff'] fmid = fmax + (nchans * ch_bandwidth) / 2.0 if 1000 <= fmid < 2000: return 'L' elif 2000 <= fmid < 4000: return 'S' elif 4000 <= fmid < 8000: return 'C' elif 8000 <= fmid < 12000: return 'X'
def totalpower(file, fmin, fmax): fil = Filterbank(file) maxfreq = fil.header['fch1'] nchans = fil.header['nchans'] ch_bandwidth = fil.header['foff'] minfreq = maxfreq + nchans * ch_bandwidth if fmin < minfreq or fmax > maxfreq: raise ValueError( "One of the freq constraints is out of the freq range of this filterbank file." ) freqs = np.array(fil.freqs) data = np.array(fil.data[0][0]) newfreqs = np.array([x for x in freqs if x >= fmin and x <= fmax]) idx = np.where(np.in1d( freqs, newfreqs))[0] #Indices of where newfreqs values occur in freqs newdata = data[idx] totalpower = simps(x=newfreqs, y=newdata) return totalpower
def buffer_init(self): Filterbank.buffer_init(self) self.filt_state.set( zeros(self.filt_state.shape, dtype=self.filt_state.dtype))
def test_voyager_extract(): filename = '/workdata/bl/data/voyager_f1032192_t300_v2.fil' new_filename = 'voyager_ext.fil' fb = Filterbank(filename, f_start=8420.1, f_stop=8420.3) fb.info() fb.plot_spectrum() plt.show() fb.write_to_filterbank(new_filename) fb2 = Filterbank(new_filename) fb2.info() fb2.plot_spectrum() plt.show() os.remove(new_filename)
def buffer_init(self): Filterbank.buffer_init(self) self.filt_state[:] = 0
from __future__ import print_function import sys sys.path.append('breakthrough/GBT/filterbank_tools/') import numpy as np from filterbank import Filterbank as FB, db extrema = lambda a: (a.min(), a.max()) scalef = lambda a, ex: (a - ex[0]) / (ex[1] - ex[0]) iscalef = lambda a, ex: ex[0] + (a * (ex[1] - ex[0])) if __name__ == '__main__': infilf = '../data/blc3_2bit_guppi_57386_VOYAGER1_0002.gpuspec.0002.fil' synf = '../data/test_data' outf = 'synandskysig.fits' mixcoef = 0.5 fbin = FB(infilf) f, data = fbin.grab_data() data = db(data) imgshape = [32, 1] #plot_data.shape datext = extrema(data) with open(synf, 'rb') as fid: syndata = np.fromfile(fid, count=np.prod(imgshape), dtype='<f4') # scale syn data into range of data, mix syndata = iscalef(scalef(syndata, extrema(syndata)), datext) data = (1.0 - mixcoef) * data + mixcoef * syndata.reshape(imgshape) fits.writeto(outf, data)
def __init__(self, source, b, a, samplerate=None, precision='double', forcesync=True, pagelocked_mem=True, unroll_filterorder=None): # Automatically duplicate mono input to fit the desired output shape if b.shape[0] != source.nchannels: if source.nchannels != 1: raise ValueError( 'Can only automatically duplicate source channels for mono sources, use RestructureFilterbank.' ) source = RestructureFilterbank(source, b.shape[0]) Filterbank.__init__(self, source) if pycuda.context is None: set_gpu_device(0) self.precision = precision if self.precision == 'double': self.precision_dtype = float64 else: self.precision_dtype = float32 self.forcesync = forcesync self.pagelocked_mem = pagelocked_mem n, m, p = b.shape self.filt_b = b self.filt_a = a filt_b_gpu = array(b, dtype=self.precision_dtype) filt_a_gpu = array(a, dtype=self.precision_dtype) filt_state = zeros((n, m - 1, p), dtype=self.precision_dtype) if pagelocked_mem: filt_y = drv.pagelocked_zeros((n, ), dtype=self.precision_dtype) self.pre_x = drv.pagelocked_zeros((n, ), dtype=self.precision_dtype) else: filt_y = zeros(n, dtype=self.precision_dtype) self.pre_x = zeros(n, dtype=self.precision_dtype) self.filt_b_gpu = gpuarray.to_gpu(filt_b_gpu.T.flatten( )) # transform to Fortran order for better GPU mem self.filt_a_gpu = gpuarray.to_gpu( filt_a_gpu.T.flatten()) # access speeds self.filt_state = gpuarray.to_gpu(filt_state.T.flatten()) self.unroll_filterorder = unroll_filterorder if unroll_filterorder is None: if m <= 32: unroll_filterorder = True else: unroll_filterorder = False # TODO: improve code, check memory access patterns, maybe use local memory code = ''' #define x(s,i) _x[(s)*n+(i)] #define y(s,i) _y[(s)*n+(i)] #define a(i,j,k) _a[(i)+(j)*n+(k)*n*m] #define b(i,j,k) _b[(i)+(j)*n+(k)*n*m] #define zi(i,j,k) _zi[(i)+(j)*n+(k)*n*(m-1)] __global__ void filt(SCALAR *_b, SCALAR *_a, SCALAR *_x, SCALAR *_zi, SCALAR *_y, int numsamples) { int j = blockIdx.x * blockDim.x + threadIdx.x; if(j>=n) return; for(int s=0; s<numsamples; s++) { ''' for k in range(p): loopcode = ''' y(s,j) = b(j,0,k)*x(s,j) + zi(j,0,k); ''' if unroll_filterorder: for i in range(m - 2): loopcode += re.sub( '\\bi\\b', str(i), ''' zi(j,i,k) = b(j,i+1,k)*x(s,j) + zi(j,i+1,k) - a(j,i+1,k)*y(s,j); ''') else: loopcode += ''' for(int i=0;i<m-2;i++) zi(j,i,k) = b(j,i+1,k)*x(s,j) + zi(j,i+1,k) - a(j,i+1,k)*y(s,j); ''' loopcode += ''' zi(j,m-2,k) = b(j,m-1,k)*x(s,j) - a(j,m-1,k)*y(s,j); ''' if k < p - 1: loopcode += ''' x(s,j) = y(s,j); ''' loopcode = re.sub('\\bk\\b', str(k), loopcode) code += loopcode code += ''' } } ''' code = code.replace('SCALAR', self.precision) code = re.sub("\\bp\\b", str(p), code) #replace the variable by their values code = re.sub("\\bm\\b", str(m), code) code = re.sub("\\bn\\b", str(n), code) #print code self.gpu_mod = pycuda.compiler.SourceModule(code) self.gpu_filt_func = self.gpu_mod.get_function("filt") blocksize = 256 if n < blocksize: blocksize = n if n % blocksize == 0: gridsize = n / blocksize else: gridsize = n / blocksize + 1 self.block = (blocksize, 1, 1) self.grid = (gridsize, 1) self.gpu_filt_func.prepare((intp, intp, intp, intp, intp, int32), self.block) self._has_run_once = False
def buffer_init(self): Filterbank.buffer_init(self) self.filt_state.set(zeros(self.filt_state.shape, dtype=self.filt_state.dtype))
def maxfreq(file): #Returns max frequency in a .fil file fil = Filterbank(file) return fil.header['fch1']
import numpy as np import pylab as pl from filterbank import Filterbank as FB, db #from astropy.io import fits if __name__ == '__main__': argv = sys.argv if len(argv) not in (2,3): print("usage: %s input.fil [output.fits]") infilf = argv[1] outf = argv[2] if len(argv) == 3 else splitext(infilf)[0]+'.fits' fbin = FB(infilf) f, data = fbin.grab_data() data = db(data) # dump to fits for fun #fits.writeto(outf,data,clobber=True) #infits = fits.open(outf,memmap=True) #data = infits[0].data print(data.shape) print(f.shape) pl.imshow(data) f_step = int(f.shape[0]/25) t_step = int(data.shape[0]/3) xt = (f[::f_step]*10).astype(int)/10.0 pl.xticks(np.arange(len(xt)),xt,rotation=90)
def maxfreq(file): fil = Filterbank(file) maxfreq = float(fil.header['fch1']) return maxfreq
def __init__(self, source, b, a, samplerate=None, precision='double', forcesync=True, pagelocked_mem=True, unroll_filterorder=None): # Automatically duplicate mono input to fit the desired output shape if b.shape[0]!=source.nchannels: if source.nchannels!=1: raise ValueError('Can only automatically duplicate source channels for mono sources, use RestructureFilterbank.') source = RestructureFilterbank(source, b.shape[0]) Filterbank.__init__(self, source) if pycuda.context is None: set_gpu_device(0) self.precision=precision if self.precision=='double': self.precision_dtype=float64 else: self.precision_dtype=float32 self.forcesync=forcesync self.pagelocked_mem=pagelocked_mem n, m, p=b.shape self.filt_b=b self.filt_a=a filt_b_gpu=array(b, dtype=self.precision_dtype) filt_a_gpu=array(a, dtype=self.precision_dtype) filt_state=zeros((n, m-1, p), dtype=self.precision_dtype) if pagelocked_mem: filt_y=drv.pagelocked_zeros((n,), dtype=self.precision_dtype) self.pre_x=drv.pagelocked_zeros((n,), dtype=self.precision_dtype) else: filt_y=zeros(n, dtype=self.precision_dtype) self.pre_x=zeros(n, dtype=self.precision_dtype) self.filt_b_gpu=gpuarray.to_gpu(filt_b_gpu.T.flatten()) # transform to Fortran order for better GPU mem self.filt_a_gpu=gpuarray.to_gpu(filt_a_gpu.T.flatten()) # access speeds self.filt_state=gpuarray.to_gpu(filt_state.T.flatten()) self.unroll_filterorder = unroll_filterorder if unroll_filterorder is None: if m<=32: unroll_filterorder = True else: unroll_filterorder = False # TODO: improve code, check memory access patterns, maybe use local memory code=''' #define x(s,i) _x[(s)*n+(i)] #define y(s,i) _y[(s)*n+(i)] #define a(i,j,k) _a[(i)+(j)*n+(k)*n*m] #define b(i,j,k) _b[(i)+(j)*n+(k)*n*m] #define zi(i,j,k) _zi[(i)+(j)*n+(k)*n*(m-1)] __global__ void filt(SCALAR *_b, SCALAR *_a, SCALAR *_x, SCALAR *_zi, SCALAR *_y, int numsamples) { int j = blockIdx.x * blockDim.x + threadIdx.x; if(j>=n) return; for(int s=0; s<numsamples; s++) { ''' for k in range(p): loopcode=''' y(s,j) = b(j,0,k)*x(s,j) + zi(j,0,k); ''' if unroll_filterorder: for i in range(m-2): loopcode+=re.sub('\\bi\\b', str(i), ''' zi(j,i,k) = b(j,i+1,k)*x(s,j) + zi(j,i+1,k) - a(j,i+1,k)*y(s,j); ''') else: loopcode+=''' for(int i=0;i<m-2;i++) zi(j,i,k) = b(j,i+1,k)*x(s,j) + zi(j,i+1,k) - a(j,i+1,k)*y(s,j); ''' loopcode+=''' zi(j,m-2,k) = b(j,m-1,k)*x(s,j) - a(j,m-1,k)*y(s,j); ''' if k<p-1: loopcode+=''' x(s,j) = y(s,j); ''' loopcode=re.sub('\\bk\\b', str(k), loopcode) code+=loopcode code+=''' } } ''' code=code.replace('SCALAR', self.precision) code=re.sub("\\bp\\b", str(p), code) #replace the variable by their values code=re.sub("\\bm\\b", str(m), code) code=re.sub("\\bn\\b", str(n), code) #print code self.gpu_mod=pycuda.compiler.SourceModule(code) self.gpu_filt_func=self.gpu_mod.get_function("filt") blocksize=256 if n<blocksize: blocksize=n if n%blocksize==0: gridsize=n/blocksize else: gridsize=n/blocksize+1 self.block=(blocksize, 1, 1) self.grid=(gridsize, 1) self.gpu_filt_func.prepare((intp, intp, intp, intp, intp, int32), self.block) self._has_run_once=False
from filterbank import Filterbank import numpy as np import matplotlib.pyplot as plt import os, fnmatch, random, re, csv, argparse parser = argparse.ArgumentParser(description='Extract params from fils') parser.add_argument('filename', type=str, help='Path to fil file') args = parser.parse_args() filename = args.filename fil = Filterbank(filename) source = fil.header['source_name'] file_data = fil.data[0][0] #~~~~~~~~~~Functions~~~~~~~~~~ def band_finder( filename ): #Classifies filterbank file into a band according to its middle freq fil = Filterbank(filename) fmax = fil.header['fch1'] nchans = fil.header['nchans'] ch_bandwidth = fil.header['foff'] fmid = fmax + (nchans * ch_bandwidth) / 2.0 if 1000 <= fmid < 2000: return 'L' elif 2000 <= fmid < 4000: return 'S' elif 4000 <= fmid < 8000: return 'C'