def prepare_file(self, file): """ Prepares the PSRFITS file in the correct format for the program to use. """ try: hdul = fits.open(file) except OSError: return -1 name = hdul[0].header['SRC_NAME'] fe = hdul[0].header['FRONTEND'] if hdul[0].header[ 'OBS_MODE'] != "PSR" or name != self.psr_name or fe != self.frontend: hdul.close() return -1 hdul.close() ar = Archive(file, verbose=self.verbose) ar.tscrunch(nsubint=1) ar.fscrunch(nchan=self.subbands) nchan = ar.getNchan() nbin = ar.getNbin() data = ar.getData() return np.copy(data), nchan, nbin
def apply_cal_factor_to_prof(dir, psr_file, cal_factor, rfi_mitigation=False, threshold=3, setdata=True): file = dir + psr_file ar = Archive(file, verbose=False) if rfi_mitigation: rfi_mit = RFIMitigator(ar) rfi_mit.zap_minmax(threshold=threshold) nchan = ar.getNchan() nsub = ar.getNsubint() data = ar.getData() converted_data_aa, converted_data_bb = IQUV_to_AABB(data, basis="cartesian") new_converted_aa, new_converted_bb, new_converted, new_data = np.empty( nsub, dtype=object), np.empty(nsub, dtype=object), np.empty( nsub, dtype=object), np.empty(nsub, dtype=object) for i, sub in enumerate(np.arange(nsub)): new_converted_aa[i] = row_multiply(converted_data_aa[i], cal_factor[0]) new_converted_bb[i] = row_multiply(converted_data_bb[i], cal_factor[1]) new_converted[i] = np.array((new_converted_aa[i], new_converted_bb[i])) new_data[i] = AABB_to_IQUV(new_converted[i], basis="cartesian") if setdata: ar.setData(new_data) return new_data, ar
def load_archive(file, tscrunch=False): ar = Archive(file, verbose=False) if tscrunch: ar.tscrunch(nsubint=4) #ar.imshow() name = ar.getName() mjd = int(ar.getMJD()) fe = ar.getFrontend() nbin = ar.getNbin() data = ar.getData().reshape((ar.getNchan() * ar.getNsubint(), nbin)) return name, mjd, fe, nbin, data
def apply_cal_factor_to_prof(dir, psr_file, cal_factor, rfi_mitigation=False, threshold=3, setdata=True): cal_fa = vector_to_diagonal(cal_factor[0]) cal_fb = vector_to_diagonal(cal_factor[1]) file = dir + psr_file ar = Archive(file, verbose=False) if rfi_mitigation: rfi_mit = RFIMitigator(ar) rfi_mit.zap_minmax(threshold=threshold) nchan = ar.getNchan() nsub = ar.getNsubint() data = ar.getData(squeeze=False) print(data.shape) print(cal_fa.shape) converted_data = IQUV_to_AABB(data, basis="cartesian") print(converted_data.shape) exit() new_converted = [] new_data = [] s = np.array(s) aa = np.dot(converted_data, cal_fa) bb = np.dot(s, cal_fb) new_converted.append((aa, bb)) new_data.append(AABB_to_IQUV(new_converted[i], basis="cartesian")) print("Almost done") a = np.array(new_converted) new_data = np.array(new_data) print(new_data.shape) if setdata: ra = ar.setData(new_data) return new_data, ra
def get_Jy_per_count(dir, psr_cal_file, fitAA, fitBB): file = dir + psr_cal_file ar = Archive(file, verbose=False) rfi = RFIMitigator(ar) ar.tscrunch() s_duty = ar.getValue("CAL_PHS") duty = ar.getValue("CAL_DCYC") nchan = ar.getNchan() npol = ar.getNpol() nbin = ar.getNbin() BW = ar.getBandwidth() data = ar.getData() CTR_FREQ = ar.getCenterFrequency(weighted=True) converted_data = IQUV_to_AABB(data, basis="cartesian") frequencies = chan_to_freq(CTR_FREQ, BW, nchan) psr_cal, high_psr, low_psr = np.zeros((2, nchan, nbin)), np.zeros( (2, nchan)), np.zeros((2, nchan)) for i in np.arange(2): for j in np.arange(nchan): psr_cal[i][j], high_psr[i][j], low_psr[i][j] = prepare_cal_profile( converted_data[0][i][j], s_duty, duty) # Calculate jy_per_count{p, f} jy_per_count_factor = np.zeros_like(high_psr) # for i in np.arange( 2 ): for j in np.arange(nchan): jy_per_count_factor[0][j] = fitAA(frequencies[j]) / ( high_psr[0][j] - low_psr[0][j]) # A has units Jy / count for j in np.arange(nchan): jy_per_count_factor[1][j] = fitBB(frequencies[j]) / ( high_psr[1][j] - low_psr[1][j]) # A has units Jy / count return jy_per_count_factor
def get_AABB_Fcal(dir, continuum_on, continuum_off, args, G=10.0, T0=1.0): ON, OFF = dir + continuum_on, dir + continuum_off if args.freq_zap is not None: for i, arg in enumerate(args.freq_zap): args.freq_zap[i] = int(args.freq_zap[i]) ar_on, ar_off = Archive(ON, verbose=False), Archive(OFF, verbose=False) rfi_on, rfi_off = RFIMitigator(ar_on), RFIMitigator(ar_off) s_duty_on, s_duty_off = ar_on.getValue("CAL_PHS"), ar_off.getValue( "CAL_PHS") duty_on, duty_off = ar_on.getValue("CAL_DCYC"), ar_off.getValue("CAL_DCYC") nchan_on, nchan_off = ar_on.getNchan(), ar_off.getNchan() npol_on, npol_off = ar_on.getNpol(), ar_off.getNpol() nbin_on, nbin_off = ar_on.getNbin(), ar_off.getNbin() BW_on, BW_off = ar_on.getBandwidth(), ar_off.getBandwidth() CTR_FREQ_on, CTR_FREQ_off = ar_on.getCenterFrequency( weighted=True), ar_off.getCenterFrequency(weighted=True) ar_on.tscrunch() ar_off.tscrunch() if args.freq_zap is not None: if len(args.freq_zap) == 1: if args.channel_space: rfi_on.zap_channels(args.freq_zap) rfi_off.zap_channels(args.freq_zap) else: print( "No zapping occurred (tried to zap channels in frequency space). Carrying on with calibration..." ) elif len(args.freq_zap) == 2 and not args.channel_space: rfi_on.zap_frequency_range(args.freq_zap[0], args.freq_zap[1]) rfi_off.zap_frequency_range(args.freq_zap[0], args.freq_zap[1]) else: rfi_on.zap_channels(args.freq_zap) rfi_off.zap_channels(args.freq_zap) data_on, data_off = ar_on.getData(squeeze=True), ar_off.getData( squeeze=True) converted_data_on = IQUV_to_AABB(data_on, basis="cartesian") converted_data_off = IQUV_to_AABB(data_off, basis="cartesian") # Initialize the continuum data. SUBINT, POL, continuum_on_source, high_on_mean, low_on_mean = np.zeros( (2, nchan_on, nbin_on)), np.zeros((2, nchan_on)), np.zeros( (2, nchan_on)) continuum_off_source, high_off_mean, low_off_mean = np.zeros( (2, nchan_off, nbin_off)), np.zeros((2, nchan_off)), np.zeros( (2, nchan_off)) f_on, f_off, C0 = np.zeros_like(high_on_mean), np.zeros_like( high_off_mean), np.zeros_like(high_off_mean) T_sys = np.zeros_like(C0) F_cal = np.zeros_like(T_sys) # Load the continuum data for i in np.arange(2): for j in np.arange(nchan_on): continuum_on_source[i][j], high_on_mean[i][j], low_on_mean[i][ j] = prepare_cal_profile(converted_data_on[0][i][j], s_duty_on, duty_on) continuum_off_source[i][j], high_off_mean[i][j], low_off_mean[i][ j] = prepare_cal_profile(converted_data_off[0][i][j], s_duty_off, duty_off) f_on[i][j] = (high_on_mean[i][j] / low_on_mean[i][j]) - 1 f_off[i][j] = (high_off_mean[i][j] / low_off_mean[i][j]) - 1 if np.isnan(f_on[i][j]): f_on[i][j] = 1 if np.isnan(f_on[i][j]): f_off[i][j] = 1 C0[i][j] = T0 / ((1 / f_on[i][j]) - (1 / f_off[i][j])) T_sys[i][j] = C0[i][j] / f_off[i][j] F_cal[i][j] = (T_sys[i][j] * f_off[i][j]) / G # F_cal has units Jy / cal if np.isnan(F_cal[i][j]): F_cal[i][j] = 0 frequencies_on_off = chan_to_freq(CTR_FREQ_on, BW_on, nchan_on) f1, f2 = interp1d(frequencies_on_off, F_cal[0], kind='cubic', fill_value='extrapolate'), interp1d( frequencies_on_off, F_cal[1], kind='cubic', fill_value='extrapolate') return f1, f2
class Zap(): """ Master class for zapping data. Requires: file - .FITS (must be PSRFITS v5+ format) Optional: template - ASCII format: BIN# Flux (Required if not doing NN exicison) method - Either 'chauvenet', 'DMAD' or 'NN' verbose - Prints more information to the console **kwargs - Get parsed to plot.histogram_and_curves() or """ def __init__(self, file, template, method='chauvenet', nn_params=None, verbose=False, **kwargs): self.file = file if "cal" in self.file: raise ValueError(f"File {self.file} is not in PSR format.") elif "59071" in self.file: raise ValueError(f"Not doing 59071...") self.method = method self.verbose = verbose self.ar = Archive(file, verbose=False) if method != 'NN': _, self.template = u.get_data_from_asc(template) self.opw = u.get_1D_OPW_mask(self.template, windowsize=128) self.omit, self.rms_mu, self.rms_sigma = self.get_omission_matrix( **kwargs) unique, counts = np.unique(self.omit, return_counts=True) print(f"Good channels: {100*(counts[0]/sum(counts)):.3f}%") print(f"Bad channels: {100*(counts[1]/sum(counts)):.3f}%") elif nn_params != None: df = pd.DataFrame( np.reshape(self.ar.getData(), (self.ar.getNsubint() * self.ar.getNchan(), self.ar.getNbin()))) scaler = MinMaxScaler() scaled_df = scaler.fit_transform(df.iloc[:, :]) scaled_df = pd.DataFrame(scaled_df) self.x = scaled_df.iloc[:, :].values.transpose() self.nn = NeuralNet(self.x, np.array([[0], [0]])) self.nn.dims = [self.ar.getNbin(), 512, 10, 13, 8, 6, 6, 4, 4, 1] self.nn.threshold = 0.5 self.nn.load_params(root=nn_params) self.omit = self.nn_get_omission() np.set_printoptions(threshold=sys.maxsize) unique, counts = np.unique(self.omit, return_counts=True) print(f"Good channels: {100*(counts[0]/sum(counts)):.3f}%") print(f"Bad channels: {100*(counts[1]/sum(counts)):.3f}%") else: sys.exit() def nn_get_omission(self): pred = np.around(np.squeeze(self.nn.pred_data(self.x, False)), decimals=0).astype(np.int) pred = np.reshape(pred, (self.ar.getNsubint(), self.ar.getNchan())) return pred def get_omission_matrix(self, **kwargs): rms, lin_rms, mu, sigma = u.rms_arr_properties( self.ar.getData(), self.opw, 1.0) # Needs to input 2D array # Creates the histogram plot.histogram_and_curves( lin_rms, mean=mu, std_dev=sigma, bins=(self.ar.getNchan() * self.ar.getNsubint()) // 4, x_axis='Root Mean Squared', y_axis='Frequency Density', title=r'$M={},\ \sigma={}$'.format(mu, sigma), **kwargs) if self.method == 'chauvenet': rej_arr = physics.chauvenet(rms, median=mu, std_dev=sigma, threshold=2.0) elif self.method == 'DMAD': rej_arr = physics.DMAD(lin_rms, threshold=3.5) rej_arr = np.reshape(rej_arr, (self.ar.getNsubint(), self.ar.getNchan())) if self.verbose: print("Rejection criterion created.") return rej_arr, mu, sigma def plot_mask(self, **kwargs): fig = plt.figure(figsize=(7, 7)) ax = fig.add_subplot(111) ax.imshow(self.omit.T, cmap=plt.cm.gray, interpolation='nearest', aspect='auto') plt.show() def save_training_set(self, val_size=0.2): # From Chauvenet or DMAD. 1 is bad channel with open( f'{self.ar.getName()}_{int(self.ar.getMJD())}_{self.ar.getFrontend()}_{self.ar.getNbin()}.training', 'w') as t: t.write( f'# Training set for {self.ar.getName()} taken on {int(self.ar.getMJD())} at {self.ar.getFrontend()}\n' ) with open( f'{self.ar.getName()}_{int(self.ar.getMJD())}_{self.ar.getFrontend()}_{self.ar.getNbin()}.validation', 'w') as t: t.write( f'# Validation set for {self.ar.getName()} taken on {int(self.ar.getMJD())} at {self.ar.getFrontend()}\n' ) ps_0 = np.zeros(2049)[np.newaxis, :] ps_1 = np.zeros(2049)[np.newaxis, :] d = self.ar.getData().reshape( (self.ar.getNchan() * self.ar.getNsubint(), self.ar.getNbin())) omission = self.omit.reshape( (self.ar.getNchan() * self.ar.getNsubint())) i = 1 for omit, profile in zip(omission, d): try: choice = int(omit) if choice == 1: choice = 0 elif choice == 0: choice = 1 except ValueError: choice = -1 print(i, end='\r') if choice != -1: # Creates the profile / choice pairs and doubles up with the reciprocal profiles. p = np.append(profile, choice) #inv_p = np.append( -1*profile, choice ) if choice == 0: ps_0 = np.append(ps_0, p[np.newaxis, :], axis=0) else: ps_1 = np.append(ps_1, p[np.newaxis, :], axis=0) i += 1 ps_0, ps_1 = np.delete(ps_0, 0, 0), np.delete(ps_1, 0, 0) # Sort into training / validation sets train, validation = train_test_split(ps_0, test_size=val_size) ones_t, ones_v = train_test_split(ps_1, test_size=val_size) train, validation = np.append(train, ones_t, axis=0), np.append(validation, ones_v, axis=0) np.random.shuffle(train), np.random.shuffle(validation) for k in train: with open( f'{self.ar.getName()}_{int(self.ar.getMJD())}_{self.ar.getFrontend()}_{self.ar.getNbin()}.training', 'a') as t: np.savetxt(t, k, fmt='%1.5f ', newline='') t.write("\n") #np.savetxt( t, inv_p, fmt = '%1.5f ', newline = '' ) #t.write( "\n" ) for k in validation: with open( f'{self.ar.getName()}_{int(self.ar.getMJD())}_{self.ar.getFrontend()}_{self.ar.getNbin()}.validation', 'a') as t: np.savetxt(t, k, fmt='%1.5f ', newline='') t.write("\n") # Save as ASCII text file def save(self, outroot="zap_out", ext='.ascii'): outfile = outroot + ext with open(outfile, 'w+') as f: for i, t in enumerate(self.omit): for j, rej in enumerate(t): if rej == True: f.write(str(i) + " " + str(self.ar.freq[i][j]) + "\n") #f.write( f'{k} {self.ar.freq[k][i]}\n' ) return outfile
class DataCull: ''' Main class for data culling pulsar fits files to get a less noisy data set. ''' def __init__(self, filename, template, directory=None, SNLim=3000, verbose=False): ''' Initializes all archives and parameters in the data cube for a given file. Also requires a template to be parsed in. A custom signal / noise lower bound can also be set on initialization but the default is 3000. This will exit the current archive if the SNR is lower than the threshold. One can also set whether long arrays and other bits of console text are to be printed in full or in shorthand. ''' if verbose: print("Initializing DataCull object...") self.SNError = False # Parse directory in string or choose CWD if no directory given if directory == None: self.directory = str(os.getcwd()) else: self.directory = str(directory) # Parse filename if os.path.isfile(self.directory + filename): self.filename = str(filename) else: raise FileNotFoundError( "File {} not found in this directory...".format(filename)) # Load the template self.template = self._loadTemplate(template) # Parse verbose option self.verbose = verbose # Parse SNLim self.SNLim = SNLim # Load the file in the archive self.ar = Archive(self.__str__(), verbose=self.verbose) # Togglable print options if self.verbose: np.set_printoptions(threshold=np.inf) # Check if Signal / Noise is too low if self.ar.getSN() < SNLim: if self.verbose: print("Signal / Noise ratio is way too low. (Below {})".format( SNLim)) print("Data set to be thrown out...") self.SNError = True # Load the data cube for the file self.data = self.ar.getData() def __repr__(self): return "DataCull( filename = {}, template = {}, directory = {}, SNLim = {}, verbose = {} )".format( self.filename, self.templateName, self.directory, self.SNLim, self.verbose) def __str__(self): return self.directory + self.filename def _loadTemplate(self, templateFilename): ''' Loads a template specified by the user. If no extension is given, the extension .npy will be used. Note that this code is designed for numpy arrays so it would be wise to use them. Returns the template. ''' # Parse the template's filename into a string and ensure the correct extension self.templateName = str(templateFilename) self.templateName = u.addExtension(self.templateName, 'npy') # Load the template template = np.load(self.templateName) return template def reject(self, criterion='chauvenet', iterations=1, fourier=True, rms=True, binShift=True, showPlots=False): ''' Performs the rejection algorithm until the number of iterations has been reached or the data culling is complete, whichever comes first. The default number of iterations is 1. Requires the criterion to be set with the default criterion being Chauvenet's criterion. This is the function you should use to reject all outliers fully. ''' if self.verbose: print("Beginning data rejection for {}...".format(self.filename)) # Initialize the completion flag to false self.rejectionCompletionFlag = False if fourier: if self.verbose: print("Beginning FFT data rejection...") for i in np.arange(iterations): self.fourierTransformRejection(criterion, showPlots, showPlots) # If all possible outliers have been found and the flag is set to true, don't bother doing any more iterations. if self.rejectionCompletionFlag: generation = i + 1 if self.verbose: print( "RMS data rejection for {} complete after {} generations..." .format(self.filename, generation)) break # If the completion flag is still false, the cycles finished before full excision if self.verbose and not self.rejectionCompletionFlag: print("Maximum number of iterations ({}) completed...".format( iterations)) # Re-initialize the completion flag to false self.rejectionCompletionFlag = False if rms: if self.verbose: print("Beginning RMS data rejection...") for i in np.arange(iterations): self.rmsRejection(criterion, showPlots) # If all possible outliers have been found and the flag is set to true, don't bother doing any more iterations. if self.rejectionCompletionFlag: generation = i + 1 if self.verbose: print( "RMS data rejection for {} complete after {} generations..." .format(self.filename, generation)) break # If the completion flag is still false, the cycles finished before full excision if self.verbose and not self.rejectionCompletionFlag: print("Maximum number of iterations ({}) completed...".format( iterations)) # Re-initialize the completion flag to false self.rejectionCompletionFlag = False if binShift: if self.verbose: print("Beginning bin shift data rejection...") for i in np.arange(iterations): self.binShiftRejection(showPlots) # If all possible outliers have been found and the flag is set to true, don't bother doing any more iterations. if self.rejectionCompletionFlag == True: generation = i + 1 if self.verbose: print( "Bin shift data rejection for {} complete after {} generations..." .format(self.filename, generation)) break # If the completion flag is still false, the cycles finished before full excision if self.verbose and not self.rejectionCompletionFlag: print("Maximum number of iterations ({}) completed...".format( iterations)) # Re-load the data cube for the file self.data = self.ar.getData() def rmsRejection(self, criterion, showPlot=False): ''' Rejects outlier root mean squared values for off pulse regions and re-weights the data cube in the loaded archive. ''' # Re-load the data cube for the file self.data = self.ar.getData() templateMask = pu.binMaskFromTemplate(self.template) rmsArray, linearRmsArray, mu, sigma = u.getRMSArrayProperties( self.data, templateMask) if showPlot == True: # Creates the histogram pltu.histogram_and_curves( linearRmsArray, mean=mu, std_dev=sigma, x_axis='Root Mean Squared', y_axis='Frequency Density', title=r'$\mu={},\ \sigma={}$'.format(mu, sigma), show=True, curve_list=[spyst.norm.pdf, mathu.test_dist.test_pdf]) # Determine which criterion to use to reject data if criterion is 'chauvenet': # Chauvenet's Criterion rejectionCriterion = mathu.chauvenet(rmsArray, mu, sigma, 3) elif criterion is 'DMAD': # Double Median Absolute Deviation rejectionCriterion = mathu.doubleMAD(linearRmsArray) rejectionCriterion = np.reshape( rejectionCriterion, (self.ar.getNsubint(), self.ar.getNchan())) else: raise ValueError( "Allowed rejection criteria are either 'chauvenet' or 'DMAD'. Please use one of these..." ) # Set the weights of potential noise in each profile to 0 u.zeroWeights(rejectionCriterion, self.ar, self.verbose) # Checks to see if there were any data to reject. If this array has length 0, all data was good and the completion flag is set to true. if (len(np.where(rejectionCriterion)[0]) == 0): self.rejectionCompletionFlag = True if self.verbose: print("Data rejection cycle complete...") def fourierTransformRejection(self, criterion, showTempPlot=False, showOtherPlots=False): ''' Uses FFT (Fast Fourier Transform) to get the break-down of signals in the profile and compares to the the template. ''' # Re-load the data cube data = self.ar.getData() tempData = self.template # Initialize guess parameters and the curve to fit guess_params = [100, 100, 1024] curve = mathu.FFT_dist._pdf # Set up arrays for FFT profFFT = np.zeros_like(data) tempFFT = fft(tempData) # Normalize the template array w.r.t the max value and shift to middle tempFFT = abs(mathu.normalizeToMax(abs(tempFFT.T))) tempFFT = fftshift(tempFFT) # Create template FFT mask fftTempMask = pu.binMaskFromTemplate(tempFFT) rmsArray, linearRmsArray, mu, sigma = u.getRMSArrayProperties( data, fftTempMask) tempParams = opt.curve_fit(curve, np.arange(len(tempFFT)), tempFFT, p0=guess_params) t = np.arange(0, len(tempFFT), 0.01) temp_fit = mathu.normalizeToMax(curve(t, *tempParams[0])) if showTempPlot: pltu.plotAndShow(tempFFT, t, temp_fit) # Loop over the time and frequency indices (subints and channels) for time in np.arange(self.ar.getNsubint()): for frequency in np.arange(self.ar.getNchan()): # FFT then normalize and center FFT'd profile profFFT[time][frequency] = fft(data[time][frequency]) profFFT[time][frequency] = abs( mathu.normalizeToMax(abs(profFFT[time][frequency].T))) profFFT[time][frequency] = fftshift(profFFT[time][frequency]) if all(profFFT[time][frequency]) == 0: continue # Get optimization parameters for each profile for the same curve used to fit the template. params = opt.curve_fit(curve, np.arange(len(tempFFT)), profFFT[time][frequency], p0=guess_params) # Normalize the curve with the fitted parameters prof_fit = mathu.normalizeToMax(curve(t, *params[0])) if showOtherPlots: pltu.plotAndShow(profFFT[time][frequency], t, prof_fit, temp_fit) # if not all( u.is_similar_array( tempParams[0], params[0], tolerance = [ 1e-1, 1, 2 ] ) ): # print( "Not similar" ) # continue if params[0][1] < 0: print("Not similar") if self.verbose: print( "Setting the weight of (subint: {}, channel: {}) to 0" .format(time, frequency)) self.ar.setWeights(0, t=time, f=frequency) else: print("Similar") # # Check if profile FT RMS matches template FT RMS based on Chauvenet # if criterion is 'chauvenet': # Chauvenet's Criterion # # rejectionCriterion = mathu.chauvenet( rmsArray, mu, sigma, 2 ) # # elif criterion is 'DMAD': # Double Median Absolute Deviation # # rejectionCriterion = mathu.doubleMAD( linearRmsArray ) # rejectionCriterion = np.reshape( rejectionCriterion, ( self.ar.getNsubint(), self.ar.getNchan() ) ) # # else: # raise ValueError( "Allowed rejection criteria are either 'chauvenet' or 'DMAD'. Please use one of these..." ) # # if not rejectionCriterion: # if self.verbose: # print( "Setting the weight of (subint: {}, channel: {}) to 0".format( time, frequency ) ) # self.ar.setWeights( 0, t = time, f = frequency ) # Re-load the data cube self.data = self.ar.getData() def binShiftRejection(self, showPlot=False): ''' Gets the bin shift and bin shift errors of each profile in the file and plots both quantities as a histogram. Then, rejects based on Chauvenet criterion ''' nBinShift, nBinError = self.getBinShifts() # Reshape the bin shift and bin shift error arrays to be linear linearNBinShift, linearNBinError = np.reshape( nBinShift, (self.ar.getNchan() * self.ar.getNsubint())), np.reshape( nBinError, (self.ar.getNchan() * self.ar.getNsubint())) # Mean and standard deviation of the bin shift muS, sigmaS = np.nanmean(linearNBinShift), np.nanstd(linearNBinShift) # Mean and standard deviation of the bin shift error muE, sigmaE = np.nanmean(linearNBinError), np.nanstd(linearNBinError) if showPlot == True: # Create the histograms as two subplots pltu.histogram_and_curves( linearNBinShift, mean=muS, std_dev=sigmaS, x_axis=r'Bin Shift from Template, $\hat{\tau}$', y_axis='Frequency Density', title=r'$\mu={},\ \sigma={}$'.format(muS, sigmaS), show=True, curve_list=[spyst.norm.pdf]) pltu.histogram_and_curves( linearNBinError, mean=muE, std_dev=sigmaE, x_axis=r'Bin Shift Error, $\sigma_{\tau}$', y_axis='Frequency Density', title=r'$\mu={},\ \sigma={}$'.format(muE, sigmaE), show=True, curve_list=[spyst.maxwell.pdf]) # Adjust subplots so they look nice #plt.subplots_adjust( top=0.92, bottom=0.15, left=0.15, right=0.95, hspace=0.55, wspace=0.40 ) rejectionCriterionS, rejectionCriterionE = mathu.chauvenet( nBinShift, muS, sigmaS), mathu.chauvenet(nBinError, muE, sigmaE) # Set the weights of potential noise in each profile to 0 u.zeroWeights(rejectionCriterionS, self.ar, self.verbose) u.zeroWeights(rejectionCriterionE, self.ar, self.verbose) # Checks to see if there were any data to reject. If this array has length 0, all data was good and the completion flag is set to true. if len(np.where(rejectionCriterionS)[0]) == 0 and len( np.where(rejectionCriterionE)[0]) == 0: self.rejectionCompletionFlag = True if self.verbose: print("Data rejection cycle complete...") def getBinShifts(self): ''' Returns the bin shift and bin shift error. ''' if self.verbose: print("Getting bin shifts and errors from the template...") # Re-load the data cube self.data = self.ar.getData() templateMask = pu.binMaskFromTemplate(self.template) # Return the array of RMS values for each profile rmsArray = mathu.rmsMatrix2D(self.data, mask=templateMask, nanmask=True) # Initialize the bin shifts and bin shift errors nBinShift = np.zeros((self.ar.getNsubint(), self.ar.getNchan()), dtype=float) nBinError = np.zeros((self.ar.getNsubint(), self.ar.getNchan()), dtype=float) # Use PyPulse utility get_toa3 to obtain tauhat and sigma_tau for each profile and feed them into the two arrays. for time in np.arange(self.ar.getNsubint()): for frequency in np.arange(self.ar.getNchan()): if all(amp == 0 for amp in self.data[time][frequency]): nBinShift[time][frequency] = np.nan nBinError[time][frequency] = np.nan else: # Attempt to calculate the bin shift and error. If not possible, set the profile to 0. try: tauccf, tauhat, bhat, sigma_tau, sigma_b, snr, rho = get_toa3( self.template, self.data[time][frequency], rmsArray[time][frequency], dphi_in=0.1, snrthresh=0., nlagsfit=5, norder=2) nBinShift[time][frequency] = tauhat nBinError[time][frequency] = sigma_tau except: if self.verbose: print( "Setting the weight of (subint: {}, channel: {}) to 0" .format(time, frequency)) self.ar.setWeights(0, t=time, f=frequency) nBinShift[time][frequency] = np.nan nBinError[time][frequency] = np.nan # Mask the nan values in the array so that histogram_and_curves doesn't malfunction nBinShift, nBinError = np.ma.array( nBinShift, mask=np.isnan(nBinShift)), np.ma.array(nBinError, mask=np.isnan(nBinError)) return nBinShift, nBinError
# Plots pulsar time series import sys import math import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as clr from pypulse.archive import Archive from physics import calculate_rms_matrix if __name__ == "__main__": file = sys.argv[1] ar = Archive(file, prepare=True) ch = ar.getNsubint() chan = ar.getNchan() def on_key(event): print(event.key, math.floor(event.xdata), math.floor(event.ydata)) if event.key == 'z': with open(f'../Zap/{file[6:20]}_lbw_{file[-9:-5]}_2048.zap', 'a+') as t: t.write( f'{math.floor(event.xdata)} {ar.freq[math.floor(event.xdata)][math.floor(event.ydata)]}\n' ) elif event.key == 'r': with open(f'../Zap/{file[6:20]}_lbw_{file[-9:-5]}_2048.zap', 'a+') as t: for n in range(ch): t.write(f'{n} {ar.freq[n][math.floor(event.ydata)]}\n')