def parse_and_run(args): my_chan = args.chan myspectrum = spectrum(my_chan, args.smear, args.scale) if args.asc is not None: myspectrum = spectrum(my_chan, args.smear, args.scale) myspectrum.read_spectrum(args.asc) bin_num = 1 f = open(args.asc[:-5] + ".asc", "w") for count in myspectrum.spectrum[str(my_chan)]: line_output = str(bin_num) + "," + str(round(count)) + '\n' bin_num = bin_num + 1 f.write(line_output) f.close() exit(0) if (args.smear is not None) and (args.scale is None): myspectrum.read_spectrum(args.input_filename) myspectrum.smear_spectrum() myspectrum.write_spectrum() exit(0) if args.scale is not None: myspectrum.read_spectrum(args.input_filename) myspectrum.scale_spectrum() myspectrum.write_spectrum() exit(0) if args.sum_files is not None: if args.output_file is None: print("Must specify the output file via --output_file") exit(1) hist_size = 8192 combined_hist_pd = pd.DataFrame(0, index=np.arange(hist_size), columns=[str(my_chan)]) for my_hist in args.sum_files: print("Summing:", my_hist) zero_hist = None myinput = input_handler(my_hist) data_pd = myinput.read_in_data() if data_pd.size < hist_size: # Need to make all the columns match in size before adding together zero_hist = pd.DataFrame(0, index=np.arange(hist_size - data_pd.size), columns=[str(my_chan)]) data_pd = data_pd.append(zero_hist, ignore_index=True) combined_hist_pd = combined_hist_pd + data_pd print("Writing sum to:", args.output_file) combined_hist_pd.to_csv(args.output_file, sep='|', header=True, index=False, chunksize=50000, mode='w', encoding='utf-8') return
def parse_and_run(args): sum_all = False zoom_xmin = None zoom_xmax = None if args.channels is None: sum_all = True myinput = input_handler(args.input_filename) mydata_df = myinput.read_in_data() # if args.bin_number > (args.max_pulse_height-args.min_pulse_height): # bin_number = args.max_pulse_height-args.min_pulse_height # else: # bin_number = args.bin_number if args.plot_title is None: title = args.input_filename else: title = args.plot_title if args.zoom is True: if (args.zoom_xmin is None) or (args.zoom_xmin < args.min_pulse_height): zoom_xmin = args.min_pulse_height else: zoom_xmin = args.zoom_xmin if (args.zoom_xmax is None) or (args.zoom_xmax > args.max_pulse_height): zoom_xmax = args.max_pulse_height else: zoom_xmax = args.zoom_xmax input_file_wo_suffix = args.input_filename[:-5] myhist = hist_gen(input_filename=input_file_wo_suffix, save_all=args.save_all, overlay_files=args.overlay_files, max_pulse_height=args.max_pulse_height, min_pulse_height=args.min_pulse_height, title=title, xlabel=args.xlabel, ylabel=args.ylabel, energy_labels=args.energy_labels, y_axis_min=args.y_axis_min, y_axis_max=args.y_axis_max, zoom=args.zoom, zoom_xmin=zoom_xmin, zoom_xmax=zoom_xmax, zoom_ymin=args.zoom_ymin, zoom_ymax=args.zoom_ymax, ylog_zoom=args.ylog_zoom, overlay_multipliers=args.overlay_multipliers, output_filename=args.output_filename, ylog=args.ylog) myhist.grapher(mydata_df, args.channels, sum_all) return
def __init__(self, sort_type, event_length, event_extra_gap, max_hits_per_event, calibrate, cal_file, ppg_data_file=None, ppg_value_range=None): self.sort_type = sort_type self.event_length = event_length self.event_extra_gap = event_extra_gap self.max_hits_per_event = max_hits_per_event self.calibrate = calibrate self.ppg_data_file = ppg_data_file self.ppg_value_range = ppg_value_range self.total_count = 0 self.max_pulse_height = 65536 if sort_type == "histo": self.histo_data_dict = {} if self.calibrate: self.energy_calibration = energy_calibration(cal_file) self.energy_calibration.read_in_calibration_file() # events = event_handler(self.sort_type, event_queue, self.EVENT_LENGTH, self.EVENT_EXTRA_GAP, self.MAX_HITS_PER_EVENT) if self.ppg_data_file is not None: self.missed_hits_later = 0 self.missed_hits_begin = 0 self.first_change_timestamp = {} self.first_change_timestamp['mdpp16_timestamp'] = None self.first_change_timestamp['grif16_timestamp'] = None print("Reading in PPG Data file:", self.ppg_data_file) csv_reader_object = input_handler(self.ppg_data_file) ppg_data_list_tmp = csv_reader_object.read_in_data(separator=',') self.ppg_data_list = ppg_data_list_tmp.to_dict('records') ppg_data_list_tmp = 0 # Just cleaning up the memory from the pandas df for ppg_entry in self.ppg_data_list: # Find first usable entry if ppg_entry[ 'ppg_action'] == "start": # Make sure we are strting with a real start entry.. self.first_change_timestamp['mdpp16_timestamp'] = ppg_entry[ 'mdpp16_timestamp'] # Get the first timestamp listed in the ppg data file for each ADC self.first_change_timestamp[ 'grif16_timestamp'] = ppg_entry['grif16_timestamp'] break
def graph_with_overlays(self, my_axis, energy_axis, my_hist, zoomed): # Main graphing function, determines if there is a zoomed region or multipliers # it also handles overlays which is just data form a file to add to the main graph or # zoomed region if (self.overlay_files is not None) and (zoomed is True): for my_overlay_file in self.overlay_files: if self.overlay_multipliers is None: self.overlay_multipliers = [1] for my_multiplier in self.overlay_multipliers: myinput = input_handler(my_overlay_file) my_overlay_df = myinput.read_in_data() if zoomed is True: my_overlay_df = (my_overlay_df[self.zoom_xmin - 1:self.zoom_xmax - 1] * my_multiplier) else: my_overlay_df = my_overlay_df * my_multiplier length_diff = len(my_hist) - len(my_overlay_df[str( self.overlay_chan)]) my_zeros = np.zeros(length_diff) my_overlay_df = np.concatenate( (my_overlay_df[str(self.overlay_chan)], my_zeros)) length_diff = len(my_hist) - len(my_overlay_df) if self.smear_overlay is True: my_overlay_df = self.gaussian_smearing( my_overlay_df, 1) combined_hist = 0 combined_hist = my_hist + my_overlay_df if my_multiplier == 0: my_label = "Background" else: my_label = "NEEC " + self.sci_notation(my_multiplier) linewidth = 1 if zoomed is True: linewidth = 2 my_axis.step(energy_axis, combined_hist, where='mid', label=my_label, linewidth=linewidth) if self.fit_peak is True and ( self.fit_peak_xmin >= self.zoom_xmin ) and ( self.fit_peak_xmax <= self.zoom_xmax ): # make sure zoom is also between the two fit values #self.gothere = self.gothere + 1 if my_multiplier > 0: print("Overlay Multiplier:", self.sci_notation(my_multiplier)) self.peak_fitting( combined_hist, my_axis, self.fit_peak_xmin - self.zoom_xmin, self.fit_peak_xmax - self.zoom_xmin) else: if self.smearing is True: my_hist = self.gaussian_smearing(my_hist) my_axis.step(energy_axis, my_hist, where='mid')
def read_spectrum(self, input_filename): self.input_filename = input_filename my_input_handler = input_handler(input_filename) self.spectrum = my_input_handler.read_in_data()