def __init__(self, filename, f_start=None, f_stop=None, t_start=None, t_stop=None, load_data=True, max_load=1.): """ Constructor. Args: filename (str): filename of blimpy file. f_start (float): start frequency, in MHz f_stop (float): stop frequency, in MHz t_start (int): start time bin t_stop (int): stop time bin """ super(FilReader, self).__init__() self.header_keywords_types = sigproc.header_keyword_types if filename and os.path.isfile(filename): self.filename = filename self.load_data = load_data self.header = self.read_header() self.file_size_bytes = os.path.getsize(self.filename) self.idx_data = sigproc.len_header(self.filename) self.n_channels_in_file = self.header[b'nchans'] self.n_beams_in_file = self.header[ b'nifs'] #Placeholder for future development. self.n_pols_in_file = 1 #Placeholder for future development. self._n_bytes = int(self.header[b'nbits'] / 8) #number of bytes per digit. self._d_type = self._setup_dtype() self._setup_n_ints_in_file() self.file_shape = (self.n_ints_in_file, self.n_beams_in_file, self.n_channels_in_file) if self.header[b'foff'] < 0: self.f_end = self.header[b'fch1'] self.f_begin = self.f_end + self.n_channels_in_file * self.header[ b'foff'] else: self.f_begin = self.header[b'fch1'] self.f_end = self.f_begin + self.n_channels_in_file * self.header[ b'foff'] self.t_begin = 0 self.t_end = self.n_ints_in_file #Taking care all the frequencies are assigned correctly. self._setup_selection_range(f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop, init=True) #Convert input frequencies into what their corresponding channel number would be. self._setup_chans() #Update frequencies ranges from channel number. self._setup_freqs() self.freq_axis = 2 self.time_axis = 0 self.beam_axis = 1 # Place holder #EE ie. # spec = np.squeeze(fil_file.data) # set start of data, at real length of header (future development.) # self.datastart=self.hdrraw.find('HEADER_END')+len('HEADER_END')+self.startsample*self.channels #Applying data size limit to load. if max_load is not None: if max_load > 1.0: logger.warning( 'Setting data limit != 1GB, please handle with care!') self.MAX_DATA_ARRAY_SIZE = max_load * MAX_DATA_ARRAY_SIZE_UNIT else: self.MAX_DATA_ARRAY_SIZE = MAX_DATA_ARRAY_SIZE_UNIT if self.file_size_bytes > self.MAX_DATA_ARRAY_SIZE: self.large_file = True else: self.large_file = False if self.load_data: if self.large_file: if self.f_start or self.f_stop or self.t_start or self.t_stop: if self.isheavy(): logger.warning( "Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded, please try another (t,v) selection." % (self._calc_selection_size() / (1024.**3), self.MAX_DATA_ARRAY_SIZE / (1024.**3))) self._init_empty_selection() else: self.read_data() else: logger.warning( "The file is of size %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded. You could try another (t,v) selection." % (self.file_size_bytes / (1024.**3), self.MAX_DATA_ARRAY_SIZE / (1024.**3))) self._init_empty_selection() else: self.read_data() else: logger.info("Skipping loading data ...") self._init_empty_selection() else: raise IOError("Need a file to open, please give me one!")
def __init__(self, filename,f_start=None, f_stop=None,t_start=None, t_stop=None, load_data=True, max_load=1.): """ Constructor. Args: filename (str): filename of blimpy file. f_start (float): start frequency, in MHz f_stop (float): stop frequency, in MHz t_start (int): start time bin t_stop (int): stop time bin """ super(FilReader, self).__init__() self.header_keywords_types = sigproc.header_keyword_types if filename and os.path.isfile(filename): self.filename = filename self.load_data = load_data self.header = self.read_header() self.file_size_bytes = os.path.getsize(self.filename) self.idx_data = sigproc.len_header(self.filename) self.n_channels_in_file = self.header[b'nchans'] self.n_beams_in_file = self.header[b'nifs'] #Placeholder for future development. self.n_pols_in_file = 1 #Placeholder for future development. self._n_bytes = int(self.header[b'nbits'] / 8) #number of bytes per digit. self._d_type = self._setup_dtype() self._setup_n_ints_in_file() self.file_shape = (self.n_ints_in_file,self.n_beams_in_file,self.n_channels_in_file) if self.header[b'foff'] < 0: self.f_end = self.header[b'fch1'] self.f_begin = self.f_end + self.n_channels_in_file*self.header[b'foff'] else: self.f_begin = self.header[b'fch1'] self.f_end = self.f_begin + self.n_channels_in_file*self.header[b'foff'] self.t_begin = 0 self.t_end = self.n_ints_in_file #Taking care all the frequencies are assigned correctly. self._setup_selection_range(f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop, init=True) #Convert input frequencies into what their corresponding channel number would be. self._setup_chans() #Update frequencies ranges from channel number. self._setup_freqs() self.freq_axis = 2 self.time_axis = 0 self.beam_axis = 1 # Place holder #EE ie. # spec = np.squeeze(fil_file.data) # set start of data, at real length of header (future development.) # self.datastart=self.hdrraw.find('HEADER_END')+len('HEADER_END')+self.startsample*self.channels #Applying data size limit to load. if max_load is not None: if max_load > 1.0: logger.warning('Setting data limit != 1GB, please handle with care!') self.MAX_DATA_ARRAY_SIZE = max_load * MAX_DATA_ARRAY_SIZE_UNIT else: self.MAX_DATA_ARRAY_SIZE = MAX_DATA_ARRAY_SIZE_UNIT if self.file_size_bytes > self.MAX_DATA_ARRAY_SIZE: self.large_file = True else: self.large_file = False if self.load_data: if self.large_file: if self.f_start or self.f_stop or self.t_start or self.t_stop: if self.isheavy(): logger.warning("Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded, please try another (t,v) selection." % (self._calc_selection_size() / (1024. ** 3), self.MAX_DATA_ARRAY_SIZE / (1024. ** 3))) self._init_empty_selection() else: self.read_data() else: logger.warning("The file is of size %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded. You could try another (t,v) selection."%(self.file_size_bytes/(1024.**3), self.MAX_DATA_ARRAY_SIZE/(1024.**3))) self._init_empty_selection() else: self.read_data() else: logger.info("Skipping loading data ...") self._init_empty_selection() else: raise IOError("Need a file to open, please give me one!")
#://github.com/UCBerkeleySETI/blimpy/blob/master/blimpy/io/fil_reader.py # plots filterbank file and extracts candidates import matplotlib.pyplot as plt import numpy as np import scipy.signal from scipy import stats from pathlib import Path import sigproc import sys #fname = '/datax2/devfil/beam_data/20201110104110_HAT-P-44C.fil'; fname = sys.argv[1]; fhead = sigproc.read_header(fname); headlen = sigproc.len_header(fname); nSpec = int((Path(fname).stat().st_size - headlen)/fhead['nchans']/4); spec = np.zeros((fhead['nchans'])); for k in range(nSpec): print(str(k+1) + ' / ' + str(nSpec)); spec += np.fromfile(fname, dtype=np.uint32, count=int(fhead['nchans']), offset=headlen+int(4*k*fhead['nchans'])); plt.figure(); plt.plot(np.linspace(fhead['fch1'],fhead['fch1']+fhead['foff']*fhead['nchans'],fhead['nchans']),10.*np.log10(spec)); plt.grid(); plt.xlabel('frequency [MHz]'); plt.ylabel('power [dB]'); plt.suptitle(fhead['source_name']); plt.show();
'/mnt/data/dsa110/T3/corr07/03dec20/corr07_BF.fil',\ '/mnt/data/dsa110/T3/corr08/03dec20/corr08_BF.fil',\ '/mnt/data/dsa110/T3/corr09/03dec20/corr09_BF.fil',\ '/mnt/data/dsa110/T3/corr10/03dec20/corr10_BF.fil',\ '/mnt/data/dsa110/T3/corr11/03dec20/corr11_BF.fil',\ '/mnt/data/dsa110/T3/corr12/03dec20/corr12_BF.fil',\ '/mnt/data/dsa110/T3/corr13/03dec20/corr13_BF.fil',\ '/mnt/data/dsa110/T3/corr14/03dec20/corr14_BF.fil',\ '/mnt/data/dsa110/T3/corr15/03dec20/corr15_BF.fil',\ '/mnt/data/dsa110/T3/corr16/03dec20/corr16_BF.fil'] nChans = 384 fname = fnames[0] print('processing file ' + fname) header_len = sigproc.len_header(fname) filsize = int(Path(fname).stat().st_size) nSam = int((filsize - header_len) / nChans) data = np.fromfile(fname, dtype=np.uint8, count=-1, offset=header_len) alldata = np.reshape(data, (nChans, nSam), order='F') for fname in fnames[1:]: print('processing file ' + fname) header_len = sigproc.len_header(fname) filsize = int(Path(fname).stat().st_size) nSam = int((filsize - header_len) / nChans) data = np.fromfile(fname, dtype=np.uint8, count=-1, offset=header_len) data = np.reshape(data, (nChans, nSam), order='F') alldata = np.concatenate((alldata, data), axis=0) header_keyword_types = {