# If your data is elsewhere, pass it as an argument datapath = sys.argv[1] if len(sys.argv) > 1 else os.environ['BEACON_DATA'] runs = numpy.array([793])#numpy.array([734,735,736,737,739,740,746,747,757,757,762,763,764,766,767,768,769,770,781,782,783,784,785,786,787,788,789,790,792,793]) #Selects which run to examine event_limit = 1 save_fig = True for run_index, run in enumerate(runs): reader = Reader(datapath,run) eventids = cc.getTimes(reader)[3] if event_limit is not None: if event_limit < len(eventids): eventids = eventids[0:event_limit] waveform_times, templates = loadTemplates(template_dirs['run%i'%run]['dir']) original_wf_len = int(reader.header().buffer_length) upsample_wf_len = original_wf_len*template_dirs['run%i'%run]['resample_factor'] corr_delay_times = numpy.arange(-upsample_wf_len+1,upsample_wf_len) #Setup Filter freqs = numpy.fft.rfftfreq(len(waveform_times), d=(waveform_times[1] - waveform_times[0])/1.0e9) b, a = scipy.signal.butter(template_dirs['run%i'%run]['filter_order'], template_dirs['run%i'%run]['crit_freq_low_pass_MHz']*1e6, 'low', analog=True) d, c = scipy.signal.butter(template_dirs['run%i'%run]['filter_order'], template_dirs['run%i'%run]['crit_freq_high_pass_MHz']*1e6, 'high', analog=True) filter_x_low_pass, filter_y_low_pass = scipy.signal.freqs(b, a,worN=freqs) filter_x_high_pass, filter_y_high_pass = scipy.signal.freqs(d, c,worN=freqs) filter_x = freqs filter_y = numpy.multiply(filter_y_low_pass,filter_y_high_pass) filter_y = numpy.tile(filter_y,(8,1)) templates_scaled = {} #precomputing len_template = len(waveform_times)
wf = reader.wf(0) wf, waveform_times = scipy.signal.resample(wf, len(wf) * resample_factor, t=reader.t()) dt = waveform_times[1] - waveform_times[0] filter_y, freqs = makeFilter(waveform_times, crit_freq_low_pass_MHz, crit_freq_high_pass_MHz, filter_order, plot_filter=plot_filter) try: for channel in range(8): waveforms_upsampled['ch%i' % channel] = numpy.zeros( (len(eventids), reader.header().buffer_length * resample_factor)) waveforms_raw['ch%i' % channel] = numpy.zeros( (len(eventids), reader.header().buffer_length)) print('Loading Waveforms_upsampled') plt.figure() ax = plt.subplot(4, 2, 1) good_events = [] for waveform_index, eventid in enumerate(eventids): sys.stdout.write('(%i/%i)\r' % (waveform_index, len(eventids))) sys.stdout.flush() reader.setEntry(eventid) raw_wfs, upsampled_wfs = loadSignals(reader, eventid, filter_y) good_events.append(~numpy.any(
def getSpectData(datapath, run, event_limit, bin_size=10, trigger_type=1, group_fft=False): ''' This function obtains the data for a spectrogram. Parameters ---------- datapath : str The path to the data where the runs are stored. This is the same as the input to the reader class. run : int The run number to be loaded. event_limit : int This limits the number of events to load. Loads from beginning of run to end, so reducing this speeds up the calculation by cutting off the later portions of the run. bin_size : int This is the number of seconds to include in each time slice of the spectrogram. The average spectra will be computed per bin. Default is 10. trigger_type : int This is the trigger type of events included in the spectrogram. The default is 1. group_fft : bool This enables the fft calculation to be performed simultaneously for all events, rather than per waveform as they are loaded in. This may be faster but requires more memory. Default is False. Returns ------- reader : examples.beacon_data_reader.Reader This is the reader for the selected run. freqs : numpy.ndarray of floats This is the list of frequencies for corresponding to the y-axis of the spectrogram data. spectra_dbish_binned : dict This is the data corresponding to the spectrogram. Each entry in the dictionary contains the spectrogram data for a particular channel. This are returned in dB-like units. I.e. they are calculated as if the waveforms were in volts, but in reality the waveforms are in adu. Some there is some offset from these values to true dB units. ''' reader = Reader(datapath, run) N = reader.N() if event_limit == None else min(reader.N(), abs(event_limit)) print('\nReader:') d = tools.interpret.getReaderDict(reader) pprint(d) print('\nHeader:') h = tools.interpret.getHeaderDict(reader) pprint(h) print('\nStatus:') s = tools.interpret.getStatusDict(reader) pprint(s) if reader.N() == 0: print('No events found in the selected run.') else: def rfftWrapper(channel, waveform_times, *args, **kwargs): spec = numpy.fft.rfft(*args, **kwargs) real_power_multiplier = 2.0 * numpy.ones_like( spec ) #The factor of 2 because rfft lost half of the power except for dc and Nyquist bins (handled below). if len(numpy.shape(spec)) != 1: real_power_multiplier[:, [0, -1]] = 1.0 else: real_power_multiplier[[0, -1]] = 1.0 spec_dbish = 10.0 * numpy.log10( real_power_multiplier * spec * numpy.conj(spec) / len(waveform_times) ) #10 because doing power in log. Dividing by N to match monutau. return channel, spec_dbish waveform_times = reader.t() freq_step = 1.0 / (len(waveform_times) * (numpy.diff(waveform_times)[0] * 1e-9)) freqs = numpy.arange(len(waveform_times) // 2 + 1) * freq_step freq_nyquist = 1 / (2.0 * numpy.diff(waveform_times)[0] * 1e-9) if group_fft == True: waveforms = {} spectra_dbish = {} readout_times = [] for channel in range(8): if group_fft == True: waveforms['ch%i' % channel] = numpy.zeros( (N, reader.header().buffer_length), dtype=int) spectra_dbish['ch%i' % channel] = numpy.zeros( (N, reader.header().buffer_length // 2 + 1), dtype=float) print('') for event_index, eventid in enumerate( range(N if event_limit == None else event_limit)): sys.stdout.write('\r(%i/%i)' % (eventid + 1, N)) sys.stdout.flush() reader.setEntry(eventid) readout_times.append(getattr(reader.header(), 'readout_time')) for channel in range(8): if group_fft == True: waveforms['ch%i' % channel][event_index] = reader.wf(channel) else: spectra_dbish['ch%i' % channel][event_index] = rfftWrapper( 'ch%i' % channel, waveform_times, reader.wf(channel))[1] if group_fft == True: with concurrent.futures.ThreadPoolExecutor( max_workers=cpu_count()) as executor: thread_results = [] for channel in range(8): thread_results.append( executor.submit(rfftWrapper, 'ch%i' % channel, waveform_times, waveforms['ch%i' % channel])) print('Weaving threads') sys.stdout.flush() for index, future in enumerate( concurrent.futures.as_completed(thread_results)): spectra_dbish[future.result()[0]] = future.result()[1] print('%i/8 Channel FFTs Completed' % (index + 1)) bin_edges = numpy.arange(min(readout_times), max(readout_times) + bin_size, bin_size) bin_L_2d = numpy.tile(bin_edges[:-1], (len(readout_times), 1)) bin_R_2d = numpy.tile( numpy.roll(bin_edges, -1)[:-1], (len(readout_times), 1)) readout_times_2d = numpy.tile(readout_times, (len(bin_edges) - 1, 1)).T cut_2d = numpy.logical_and(readout_times_2d >= bin_L_2d, readout_times_2d < bin_R_2d).T del bin_L_2d del bin_R_2d del readout_times_2d spectra_dbish_binned = {} for channel in range(8): spectra_dbish_binned['ch%i' % channel] = numpy.zeros( (len(freqs), len(bin_edges) - 1)) for index, cut in enumerate(cut_2d): spectra_dbish_binned['ch%i' % channel][:, index] = numpy.mean( spectra_dbish['ch%i' % channel][cut], axis=0) spectra_dbish_binned['ch%i' % channel] = numpy.flipud( numpy.ma.array(spectra_dbish_binned['ch%i' % channel], mask=numpy.isnan( spectra_dbish_binned['ch%i' % channel]))) return reader, freqs, spectra_dbish_binned
eventid = numpy.array([numpy.random.randint(reader.N())]) eventid = [2401] for eid in eventid: reader.setEntry(eid) ## dump the headers and status, just to show they're there if verbose == True: print('\nReader:') pprint(tools.interpret.getReaderDict(reader)) print('\nHeader:') pprint(tools.interpret.getHeaderDict(reader)) print('\nStatus:') pprint(tools.interpret.getStatusDict(reader)) reader.header().Dump() reader.status().Dump() #print reader.N() # plot all waveforms plt.figure() for i in range(4): if i == 0: ax = plt.subplot(4, 1, i + 1) plt.plot(reader.t() - 5200, reader.wf(2 * i)) else: plt.subplot(4, 1, i + 1, sharex=ax, sharey=ax) plt.plot(reader.t() - 5200, reader.wf(2 * i)) if i in [0, 1, 2, 3]: plt.ylabel('V (adu)') if i == 3:
exclude_eventids = [] waveforms_corr = {} upsampled_waveforms = {} for channel in channels: channel=int(channel) waveforms_corr['ch%i'%channel] = numpy.zeros((len(eventids),len(waveform_times_corr))) upsampled_waveforms['ch%i'%channel] = numpy.zeros((len(eventids),final_corr_length//2)) for event_index, eventid in enumerate(eventids): sys.stdout.write('(%i/%i)\r'%(event_index,len(eventids))) sys.stdout.flush() reader.setEntry(eventid) event_times = reader.t() for channel in channels: channel=int(channel) waveforms_corr['ch%i'%channel][event_index][0:reader.header().buffer_length] = reader.wf(channel) #Below are the actual time domain waveforms_corr and should not have the factor of 2 padding. The small rounding padding sticks around, so using waveform_times_padded_to_power2 times, if use_filter: upsampled_waveforms['ch%i'%channel][event_index] = numpy.fft.irfft(numpy.multiply(filter_y_wf,numpy.fft.rfft(waveforms_corr['ch%i'%channel][event_index][0:len(waveform_times_padded_to_power2)])),n=final_corr_length//2) * ((final_corr_length//2)/len(waveform_times_padded_to_power2)) else: upsampled_waveforms['ch%i'%channel][event_index] = numpy.fft.irfft(numpy.fft.rfft(waveforms_corr['ch%i'%channel][event_index][0:len(waveform_times_padded_to_power2)]),n=final_corr_length//2) * ((final_corr_length//2)/len(waveform_times_padded_to_power2)) #upsampled_waveforms['ch%i'%channel][event_index], upsampled_times = scipy.signal.resample(waveforms_corr['ch%i'%channel][event_index][0:len(waveform_times_padded_to_power2)],final_corr_length//2,t=waveform_times_padded_to_power2) ''' print('Upsampling waveforms_corr') for channel in channels: print(channel) channel=int(channel) upsampled_waveforms['ch%i'%channel], upsampled_times = scipy.signal.resample(waveforms_corr['ch%i'%channel],2*(final_corr_length//2 + 1),t=waveform_times_corr,axis = 1) ''' print('\n')
#Prepare filter reader.setEntry(eventids[0]) wf = reader.wf(0) if use_raw: if resample_factor != 1: print('\n!!!\nUsing raw waveforms for alignment. Setting the resample factor to 1.\n!!!\n') resample_factor = 1 wf , waveform_times = scipy.signal.resample(wf,len(wf)*resample_factor,t=reader.t()) dt = waveform_times[1] - waveform_times[0] filter_y,freqs = makeFilter(waveform_times,crit_freq_low_pass_MHz, crit_freq_high_pass_MHz, filter_order, plot_filter=plot_filter) try: for channel in range(8): waveforms_upsampled['ch%i'%channel] = numpy.zeros((len(eventids),reader.header().buffer_length*resample_factor)) waveforms_raw['ch%i'%channel] = numpy.zeros((len(eventids),reader.header().buffer_length)) print('Loading Waveforms_upsampled') #plt.figure() #ax = plt.subplot(4,2,1) for waveform_index, eventid in enumerate(eventids): sys.stdout.write('(%i/%i)\r'%(waveform_index,len(eventids))) sys.stdout.flush() reader.setEntry(eventid) raw_wfs, upsampled_wfs = loadSignals(reader,eventid,filter_y) for channel in range(8):