def readBlock(self, start, nsamps, as_filterbankBlock=True): """Read a block of filterbank data. Parameters ---------- start : int first time sample of the block to be read nsamps : int number of samples in the block (i.e. block will be nsamps*nchans in size) as_filterbankBlock : bool, optional whether to read data as filterbankBlock or numpy array, by default True Returns ------- :class:`~sigpyproc.Filterbank.FilterbankBlock` or :py:obj:`numpy.ndarray` 2-D array of filterbank data """ self._file.seek(self.header.hdrlen + start * self.sampsize) data = self._file.cread(self.header.nchans * nsamps) nsamps_read = data.size // self.header.nchans data = data.reshape(nsamps_read, self.header.nchans).transpose() start_mjd = self.header.mjdAfterNsamps(start) new_header = self.header.newHeader({"tstart": start_mjd}) if as_filterbankBlock: return FilterbankBlock(data, new_header) return data
def readBlock(self, start, nsamps): """Read a block of filterbank data. :param start: first time sample of the block to be read :type start: int :param nsamps: number of samples in the block (i.e. block will be nsamps*nchans in size) :type nsamps: int :return: 2-D array of filterbank data :rtype: :class:`~sigpyproc.Filterbank.FilterbankBlock` """ self._file.seek(self.header.hdrlen + start * self.sampsize) data = self._file.cread(self.header.nchans * nsamps) data = data.reshape(nsamps, self.header.nchans).transpose() start_mjd = self.header.mjdAfterNsamps(start) new_header = self.header.newHeader({'tstart': start_mjd}) return FilterbankBlock(data, new_header)
def readBlock(self, start, nsamps, as_filterbankBlock=True, apply_weights=False, apply_scales=False, apply_offsets=False): """ Read a block of PSRFits data (and return in filterbank). :param start: first time sample of the block to be read :type start: int :param nsamps: number of samples in the block (i.e. block will be nsamps*nchans in size) :type nsamps: int :return: 2-D array of filterbank data :rtype: :class:`~sigpyproc.Filterbank.FilterbankBlock` """ # Calculate starting subint and ending subint startsub = int(start / self.specinfo.spectra_per_subint) skip = int(start - (startsub * self.specinfo.spectra_per_subint)) endsub = int((start + nsamps - 1) / self.specinfo.spectra_per_subint) trunc = int(((endsub + 1) * self.specinfo.spectra_per_subint) - (start + nsamps)) # sort of cread (#TODO if possible) # Read full subints (need to be more fast #TODO) data = self.get_data(startsub, endsub, apply_weights=apply_weights, apply_scales=apply_scales, apply_offsets=apply_offsets) # data shape is (nsample, nchan) # Truncate data to desired interval if trunc > 0: data = data[skip:-trunc] elif trunc == 0: data = data[skip:] else: raise ValueError("Number of bins to truncate is negative: %d" % trunc) # Transpose the data (return as nchan, nsamp) data = data.transpose() start_mjd = self.header.mjdAfterNsamps(start) new_header = self.header.newHeader({'tstart':start_mjd}) if as_filterbankBlock: return FilterbankBlock(data, new_header) else: return data
def readDedispersedBlock(self, start, nsamps, dm, as_filterbankBlock=True, small_reads=True): """Read a block of dedispersed filterbank data, best used in cases where I/O time dominates reading a block of data. Parameters ---------- start : int first time sample of the block to be read nsamps : int number of samples in the block (i.e. block will be nsamps*nchans in size) dm : float dispersion measure to dedisperse at as_filterbankBlock : bool, optional whether to read data as filterbankBlock or numpy array, by default True small_reads : bool, optional if the datum size is greater than 1 byte, only read the data needed instead of every frequency of every sample, by default True Returns ------- :class:`~sigpyproc.Filterbank.FilterbankBlock` or :py:obj:`numpy.ndarray` 2-D array of filterbank data """ data = np.zeros((self.header.nchans, nsamps), dtype=self._file.dtype) min_sample = start + self.header.getDMdelays(dm) max_sample = min_sample + nsamps curr_sample = np.zeros(self.header.nchans, dtype=int) start_mjd = self.header.mjdAfterNsamps(start) new_header = self.header.newHeader({"tstart": start_mjd}) lowest_chan, highest_chan, sample_offset = (0, 0, start) with tqdm(total=nsamps * self.header.nchans) as progress: while curr_sample[-1] < nsamps: relevant_channels = np.argwhere( np.logical_and( max_sample > sample_offset, min_sample <= sample_offset ) ).flatten() lowest_chan = np.min(relevant_channels) highest_chan = np.max(relevant_channels) sampled_chans = np.arange(lowest_chan, highest_chan + 1, dtype=int) read_length = sampled_chans.size if self.bitfact == 1 and small_reads: next_offset = ( sample_offset * self.sampsize + lowest_chan * self.itemsize ) self._file.seek(self.header.hdrlen + next_offset) data[sampled_chans, curr_sample[sampled_chans]] = self._file.cread( read_length ) else: next_offset = sample_offset * self.sampsize self._file.seek(self.header.hdrlen + next_offset) sample = self._file.cread(self.sampsize) data[sampled_chans, curr_sample[sampled_chans]] = sample[ sampled_chans ] curr_sample[sampled_chans] += 1 if curr_sample[highest_chan] > nsamps: sample_offset = min_sample[highest_chan + 1] else: sample_offset += 1 progress.update(read_length) if as_filterbankBlock: data = FilterbankBlock(data, new_header) data.dm = dm return data return data