def __init__(self, your_object): self.your_object = your_object if self.your_object.isfits: logger.debug(f'Calculating dada size and data step for the fits files') self.list_of_subints = self.your_object.specinfo.num_subint.astype('int') if len(self.list_of_subints) > 1: # if there is more than one files see how many subints we need to read so that the data is equally split self.subint_steps = int(find_gcd(self.list_of_subints)) else: # if there is just one large file, read it in chunks self.subint_steps = int(np.max(np.prod(np.unique((primes(self.list_of_subints)))))) self.dada_size = self.subint_steps * self.your_object.your_header.nchans * self.your_object.specinfo.spectra_per_subint # * self.your_object.nbits / 8 # bytes self.data_step = int(self.subint_steps * self.your_object.specinfo.spectra_per_subint) else: nsamp_gulp = 2 ** 18 logger.debug(f'Calculating dada size and data step for the filterbank file') if self.your_object.your_header.nspectra < nsamp_gulp: self.dada_size = self.your_object.your_header.nspectra * self.your_object.your_header.nchans * self.your_object.your_header.nbits / 8 # bytes self.data_step = int(self.your_object.your_header.nspectra) else: self.data_step = int(closest_divisor(self.your_object.your_header.nspectra, nsamp_gulp)) self.dada_size = self.data_step * self.your_object.your_header.nchans * self.your_object.your_header.nbits / 8 # bytes self.dada_key = hex(np.random.randint(0, 16 ** 4))
def __init__( self, your_object, nstart=0, nsamp=None, c_min=None, c_max=None, outdir=None, outname=None, flag_rfi=False, progress=True, spectral_kurtosis_sigma=4, savgol_frequency_window=15, savgol_sigma=4, gulp=None, zero_dm_subt=False, time_decimation_factor=1, frequency_decimation_factor=1, replacement_policy="mean", ): self.your_object = your_object self.nstart = nstart if nsamp is None: self.nsamp = self.your_object.your_header.nspectra else: self.nsamp = nsamp self.c_min = c_min self.c_max = c_max self.time_decimation_factor = time_decimation_factor self.frequency_decimation_factor = frequency_decimation_factor if self.time_decimation_factor > 1: raise NotImplementedError("We have not implemented this feature yet.") if self.frequency_decimation_factor > 1: raise NotImplementedError("We have not implemented this feature yet.") self.replacement_policy = replacement_policy if self.replacement_policy not in ["mean", "median", "zero"]: raise ValueError( f"replacement_policy can only be 'mean', 'median' or 'zero'." ) self.outdir = outdir self.outname = outname self.flag_rfi = flag_rfi self.progress = progress self.sk_sig = spectral_kurtosis_sigma self.sg_fw = savgol_frequency_window self.sg_sig = savgol_sigma self.zero_dm_subt = zero_dm_subt self.data = None self.dada_is_set = False if gulp is not None: self.gulp = gulp else: # this logic fails if the number of samples is a prime number. p = np.sort(primes(self.nsamp))[::-1] if len(p) > 1: cumprods = np.cumprod(p) self.gulp = int(cumprods[len(cumprods) // 2]) else: self.gulp = self.nsamp if self.gulp > self.nsamp: self.gulp = self.nsamp original_dir, orig_basename = os.path.split( self.your_object.your_header.filename ) if not self.outname: name, ext = os.path.splitext(orig_basename) if ext == ".fits": temp = name.split("_") if len(temp) > 1: self.outname = "_".join(temp[:-1]) + "_converted" else: self.outname = name + "_converted" else: self.outname = name + "_converted" if self.outdir is None: self.outdir = original_dir logging.debug("Writer Attributes:-") for arg, value in sorted(vars(self).items()): logging.debug("Attribute %s: %r", arg, value)