def getKnownPlaneTracks(ignore_planes=[]):
    '''
    Given the plane information outlines in loadKnownPlaneDict this will return
    a compisite dtype array with information about lat, lon, alt, and timing for
    each unique reported position of each plane in loadKnownPlaneDict.

    This can then be interpolated by the user at timestamps to get expected
    position corresponding to events in a run. 
    '''
    try:
        known_planes = info.loadKnownPlaneDict(ignore_planes=ignore_planes)
        output_tracks = {}
        calibrated_trigtime = {}
        for key in list(known_planes.keys()):
            runs = numpy.unique(known_planes[key]['eventids'][:, 0])
            calibrated_trigtime[key] = numpy.zeros(
                len(known_planes[key]['eventids'][:, 0]))
            for run in runs:
                run_cut = known_planes[key]['eventids'][:, 0] == run
                reader = Reader(os.environ['BEACON_DATA'], run)
                eventids = known_planes[key]['eventids'][run_cut, 1]
                try:
                    filename = createFile(
                        reader
                    )  #Creates an analysis file if one does not exist.  Returns filename to load file.
                    with h5py.File(filename, 'r') as file:
                        calibrated_trigtime[key][run_cut] = file[
                            'calibrated_trigtime'][...][eventids]
                except Exception as e:
                    print(e)
                    exc_type, exc_obj, exc_tb = sys.exc_info()
                    fname = os.path.split(
                        exc_tb.tb_frame.f_code.co_filename)[1]
                    print(exc_type, fname, exc_tb.tb_lineno)
                    print('Calculating calibrated trig times.')
                    calibrated_trigtime[key][run_cut] = getEventTimes(
                        reader, plot=False, smooth_window=101)[eventids]

            known_flight = known_planes[key]['known_flight']
            all_vals = getTracks(min(calibrated_trigtime[key]),
                                 max(calibrated_trigtime[key]),
                                 1000,
                                 hour_window=12)[1]
            vals = all_vals[all_vals['names'] == known_planes[key]
                            ['known_flight']]
            vals = vals[numpy.unique(vals['timestamps'], return_index=True)[1]]
            output_tracks[key] = vals
        return known_planes, calibrated_trigtime, output_tracks
    except Exception as e:
        print('Error in getKnownPlaneTracksLatLon.')
        print(e)
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        print(exc_type, fname, exc_tb.tb_lineno)
            if farm_mode == True:
                print('Farm Mode = True, no plots will be available')
                calculate_correlation_values = True #If True then the values we be newly calculated, if false then will try to load them from the existing files
            else:
                print('Farm Mode = False')
                calculate_correlation_values = False #If True then the values we be newly calculated, if false then will try to load them from the existing files
            #Parameters:
            #Curve choice is a parameter in the bi-delta template model that changes the timing of the input dela signal.
            curve_choice = 0
            upsample_factor = 4
            save_data = True

            if farm_mode == False:
                plt.close('all')
            run = int(sys.argv[1])
            reader = Reader(datapath,run)

            #Prepare for Correlations
            reader.setEntry(0)
            waveform_times = reader.t()
            waveform_sample = reader.wf(0)
            waveform_sample, waveform_times = scipy.signal.resample(waveform_sample,len(waveform_sample)*upsample_factor,t=waveform_times) #upsample times to desired amount.

            cr_gen = crt.CosmicRayGenerator(waveform_times,t_offset=800.0,model='bi-delta')
            template_t, template_E = cr_gen.eFieldGenerator(plot=True,curve_choice=curve_choice)
            
            len_t = len(template_t)
            template_E = template_E/(numpy.std(template_E)*len_t) #Pre dividing to handle normalization of cross correlation.
            

            if calculate_correlation_values == True:
Beispiel #3
0
def getSpectData(datapath,
                 run,
                 event_limit,
                 bin_size=10,
                 trigger_type=1,
                 group_fft=False):
    '''
    This function obtains the data for a spectrogram.

    Parameters
    ----------
    datapath : str
        The path to the data where the runs are stored.  This is the same as the input to
        the reader class.
    run : int
        The run number to be loaded.
    event_limit : int
        This limits the number of events to load.  Loads from beginning of run to end, so
        reducing this speeds up the calculation by cutting off the later portions of the
        run.
    bin_size : int
        This is the number of seconds to include in each time slice of the spectrogram.  The
        average spectra will be computed per bin.  Default is 10.
    trigger_type : int
        This is the trigger type of events included in the spectrogram.  The default is 1.
    group_fft : bool
        This enables the fft calculation to be performed simultaneously for all events, rather
        than per waveform as they are loaded in.  This may be faster but requires more memory.
        Default is False.

    Returns
    -------
    reader : examples.beacon_data_reader.Reader
        This is the reader for the selected run.
    freqs : numpy.ndarray of floats
        This is the list of frequencies for corresponding to the y-axis of the spectrogram data.
    spectra_dbish_binned : dict
        This is the data corresponding to the spectrogram.  Each entry in the dictionary contains
        the spectrogram data for a particular channel.  This are returned in dB-like units.  I.e.
        they are calculated as if the waveforms were in volts, but in reality the waveforms are in
        adu.  Some there is some offset from these values to true dB units.
    '''
    reader = Reader(datapath, run)
    N = reader.N() if event_limit == None else min(reader.N(),
                                                   abs(event_limit))

    print('\nReader:')
    d = tools.interpret.getReaderDict(reader)
    pprint(d)
    print('\nHeader:')
    h = tools.interpret.getHeaderDict(reader)
    pprint(h)
    print('\nStatus:')
    s = tools.interpret.getStatusDict(reader)
    pprint(s)

    if reader.N() == 0:
        print('No events found in the selected run.')
    else:

        def rfftWrapper(channel, waveform_times, *args, **kwargs):
            spec = numpy.fft.rfft(*args, **kwargs)
            real_power_multiplier = 2.0 * numpy.ones_like(
                spec
            )  #The factor of 2 because rfft lost half of the power except for dc and Nyquist bins (handled below).
            if len(numpy.shape(spec)) != 1:
                real_power_multiplier[:, [0, -1]] = 1.0
            else:
                real_power_multiplier[[0, -1]] = 1.0
            spec_dbish = 10.0 * numpy.log10(
                real_power_multiplier * spec * numpy.conj(spec) /
                len(waveform_times)
            )  #10 because doing power in log.  Dividing by N to match monutau.
            return channel, spec_dbish

        waveform_times = reader.t()
        freq_step = 1.0 / (len(waveform_times) *
                           (numpy.diff(waveform_times)[0] * 1e-9))
        freqs = numpy.arange(len(waveform_times) // 2 + 1) * freq_step
        freq_nyquist = 1 / (2.0 * numpy.diff(waveform_times)[0] * 1e-9)

        if group_fft == True:
            waveforms = {}
        spectra_dbish = {}
        readout_times = []

        for channel in range(8):
            if group_fft == True:
                waveforms['ch%i' % channel] = numpy.zeros(
                    (N, reader.header().buffer_length), dtype=int)
            spectra_dbish['ch%i' % channel] = numpy.zeros(
                (N, reader.header().buffer_length // 2 + 1), dtype=float)

        print('')

        for event_index, eventid in enumerate(
                range(N if event_limit == None else event_limit)):
            sys.stdout.write('\r(%i/%i)' % (eventid + 1, N))
            sys.stdout.flush()
            reader.setEntry(eventid)
            readout_times.append(getattr(reader.header(), 'readout_time'))
            for channel in range(8):
                if group_fft == True:
                    waveforms['ch%i' %
                              channel][event_index] = reader.wf(channel)
                else:
                    spectra_dbish['ch%i' % channel][event_index] = rfftWrapper(
                        'ch%i' % channel, waveform_times,
                        reader.wf(channel))[1]
        if group_fft == True:
            with concurrent.futures.ThreadPoolExecutor(
                    max_workers=cpu_count()) as executor:
                thread_results = []
                for channel in range(8):
                    thread_results.append(
                        executor.submit(rfftWrapper, 'ch%i' % channel,
                                        waveform_times,
                                        waveforms['ch%i' % channel]))

            print('Weaving threads')
            sys.stdout.flush()

            for index, future in enumerate(
                    concurrent.futures.as_completed(thread_results)):
                spectra_dbish[future.result()[0]] = future.result()[1]
                print('%i/8 Channel FFTs Completed' % (index + 1))

        bin_edges = numpy.arange(min(readout_times),
                                 max(readout_times) + bin_size, bin_size)
        bin_L_2d = numpy.tile(bin_edges[:-1], (len(readout_times), 1))
        bin_R_2d = numpy.tile(
            numpy.roll(bin_edges, -1)[:-1], (len(readout_times), 1))
        readout_times_2d = numpy.tile(readout_times, (len(bin_edges) - 1, 1)).T

        cut_2d = numpy.logical_and(readout_times_2d >= bin_L_2d,
                                   readout_times_2d < bin_R_2d).T

        del bin_L_2d
        del bin_R_2d
        del readout_times_2d

        spectra_dbish_binned = {}
        for channel in range(8):
            spectra_dbish_binned['ch%i' % channel] = numpy.zeros(
                (len(freqs), len(bin_edges) - 1))
            for index, cut in enumerate(cut_2d):
                spectra_dbish_binned['ch%i' % channel][:, index] = numpy.mean(
                    spectra_dbish['ch%i' % channel][cut], axis=0)
            spectra_dbish_binned['ch%i' % channel] = numpy.flipud(
                numpy.ma.array(spectra_dbish_binned['ch%i' % channel],
                               mask=numpy.isnan(
                                   spectra_dbish_binned['ch%i' % channel])))

        return reader, freqs, spectra_dbish_binned
Beispiel #4
0
matplotlib.rcParams['figure.figsize'] = [10, 11]
matplotlib.rcParams.update({'font.size': 16})

if __name__ == "__main__":
    if len(sys.argv) == 2:
        run = int(sys.argv[1])
    else:
        run = 1701

    try:

        template_filenames = numpy.array(
            glob.glob(os.environ['BEACON_ANALYSIS_DIR'] + 'templates/*.csv'))
        run = int(run)
        plot = False
        reader = Reader(datapath, run)
        try:
            print(reader.status())
        except Exception as e:
            print('Status Tree not present.  Returning Error.')
            print('\nError in %s' % inspect.stack()[0][3])
            print(e)
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            print(exc_type, fname, exc_tb.tb_lineno)
            sys.exit(1)
        filename = createFile(
            reader
        )  #Creates an analysis file if one does not exist.  Returns filename to load file.
        if filename is not None:
            with h5py.File(filename, 'a') as file:
Beispiel #5
0
    #Output
    save_templates = False

    #General Prep
    channels = numpy.arange(8,dtype=int)
    
    #Main loop
    for run_index, run in enumerate(runs):
        if 'run%i'%run in list(known_pulser_ids.keys()):
            try:
                if 'run%i'%run in list(ignorable_pulser_ids.keys()):
                    eventids = numpy.sort(known_pulser_ids['run%i'%run][~numpy.isin(known_pulser_ids['run%i'%run],ignorable_pulser_ids['run%i'%run])])
                else:
                    eventids = numpy.sort(known_pulser_ids['run%i'%run])

                reader = Reader(datapath,run)
                reader.setEntry(eventids[0])
                
                waveform_times = reader.t()
                dt = waveform_times[1]-waveform_times[0]
                waveform_times_padded_to_power2 = numpy.arange(2**(numpy.ceil(numpy.log2(len(waveform_times)))))*dt #Rounding up to a factor of 2 of the len of the waveforms  USED FOR WAVEFORMS
                waveform_times_corr = numpy.arange(2*len(waveform_times_padded_to_power2))*dt #multiplying by 2 for cross correlation later. USED FOR CORRELATIONS
                
                if use_filter:
                    filter_y_corr,freqs_corr = makeFilter(waveform_times_corr,crit_freq_low_pass_MHz, crit_freq_high_pass_MHz, filter_order,plot_filter=True)
                    filter_y_wf,freqs_wf = makeFilter(waveform_times_padded_to_power2,crit_freq_low_pass_MHz, crit_freq_high_pass_MHz, filter_order,plot_filter=False)
                else:
                    freqs_corr = numpy.fft.rfftfreq(len(waveform_times_corr), d=(waveform_times_corr[1] - waveform_times_corr[0])/1.0e9)
                    freqs_wf = numpy.fft.rfftfreq(len(waveform_times_padded_to_power2), d=(waveform_times_padded_to_power2[1] - waveform_times_padded_to_power2[0])/1.0e9)

                df_corr = freqs_corr[1] - freqs_corr[0] #Note that this is the df for the padded correlation ffts and would not be the same as the one for the normal waveform ffts which have not been doubled in length. 
Beispiel #6
0
        sine_subtract = True
        sine_subtract_min_freq_GHz = 0.03
        sine_subtract_max_freq_GHz = 0.09
        sine_subtract_percent = 0.03

        final_corr_length = 2**14

        run = int(run)
        time_window = 5 * 60  #seconds
        frequency_bin_edges_MHz = numpy.arange(0, 150, 5)
        antennas_of_interest = numpy.array([4, 5])
        trigger_types_of_interest = numpy.array([1, 3])
        save = True
        plot = False
        reader = Reader(datapath, run)
        try:
            print(reader.status())
        except Exception as e:
            print('Status Tree not present.  Returning Error.')
            print('\nError in %s' % inspect.stack()[0][3])
            print(e)
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            print(exc_type, fname, exc_tb.tb_lineno)
            sys.exit(1)

        filename = createFile(
            reader
        )  #Creates an analysis file if one does not exist.  Returns filename to load file.
Beispiel #7
0
    save_time_delays = True
    save_template = False


    waveform_index_range = (None,500)


    hpol_pairs  = numpy.array(list(itertools.combinations((0,2,4,6), 2)))
    vpol_pairs  = numpy.array(list(itertools.combinations((1,3,5,7), 2)))
    pairs       = numpy.vstack((hpol_pairs,vpol_pairs)) 

    try:
        run = int(run)

        reader = Reader(datapath,run)

        try:
            print(reader.status())
        except Exception as e:
            print('Status Tree not present.  Returning Error.')
            print('\nError in %s'%inspect.stack()[0][3])
            print(e)
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            print(exc_type, fname, exc_tb.tb_lineno)
            sys.exit(1)
        
        tct = TemplateCompareTool(reader, final_corr_length=final_corr_length, crit_freq_low_pass_MHz=crit_freq_low_pass_MHz, crit_freq_high_pass_MHz=crit_freq_high_pass_MHz, low_pass_filter_order=low_pass_filter_order, high_pass_filter_order=high_pass_filter_order, waveform_index_range=(None,None), plot_filters=False,apply_phase_response=False)
        
        filename = createFile(reader) #Creates an analysis file if one does not exist.  Returns filename to load file.
    use_known_ids = True

    resample_factor = 1

    #Filter settings
    crit_freq_low_pass_MHz = 75
    crit_freq_high_pass_MHz = 35
    filter_order = 6
    plot_filter = True
    power_sum_cut_location = 50  #index
    power_sum_cut_value = 13000  #Events with larger power sum then this are ignored.
    peak_cut = 60  #At least one channel has to have a signal cross this thresh.

    for run_index, run in enumerate(runs):
        eventids = known_pulser_ids['run%i' % run]
        reader = Reader(datapath, run)
        waveform_times = reader.t()
        waveforms_upsampled = {}
        waveforms_raw = {}

        #Prepare filter
        reader.setEntry(98958)
        wf = reader.wf(0)
        wf, waveform_times = scipy.signal.resample(wf,
                                                   len(wf) * resample_factor,
                                                   t=reader.t())
        dt = waveform_times[1] - waveform_times[0]
        filter_y, freqs = makeFilter(waveform_times,
                                     crit_freq_low_pass_MHz,
                                     crit_freq_high_pass_MHz,
                                     filter_order,
Beispiel #9
0
    def alignSelectedEvents(self, plot_aligned_wf=False,save_template=False,plot_timedelays=True):
        '''
        My plan is for this to be called when some events are circled in the plot.
        It will take those wf, align them, and plot the averaged waveforms.  No
        filters will be applied. 
        '''
        if plot_timedelays == True:
            runs, counts = numpy.unique(self.id[self.ind][:,0],return_counts=True)
            run = runs[numpy.argmax(counts)]
            print('Only calculating template from run with most points circled: run %i with %i events circled'%(run,max(counts)))
            eventids = self.id[self.ind][:,1][self.id[self.ind][:,0] == run]
            coords = self.xys[self.ind]

            self.plotTimeDelays(self.xys[self.ind][:,0]*60,self.total_hpol_delays[self.ind],self.total_vpol_delays[self.ind])

        _reader = Reader(datapath,run)
        
        crit_freq_low_pass_MHz = None
        low_pass_filter_order = None
        
        crit_freq_high_pass_MHz = None# 45
        high_pass_filter_order = None# 12
        
        waveform_index_range = (None,None)
        
        final_corr_length = 2**18

        tct = TemplateCompareTool(_reader, final_corr_length=final_corr_length, crit_freq_low_pass_MHz=crit_freq_low_pass_MHz, crit_freq_high_pass_MHz=crit_freq_high_pass_MHz, low_pass_filter_order=low_pass_filter_order, high_pass_filter_order=high_pass_filter_order, waveform_index_range=waveform_index_range, plot_filters=False,apply_phase_response=True)
        tdc = TimeDelayCalculator(_reader, final_corr_length=final_corr_length, crit_freq_low_pass_MHz=crit_freq_low_pass_MHz, crit_freq_high_pass_MHz=crit_freq_high_pass_MHz, low_pass_filter_order=low_pass_filter_order, high_pass_filter_order=high_pass_filter_order, waveform_index_range=waveform_index_range, plot_filters=False,apply_phase_response=True)
        self.cor = Correlator(_reader,  upsample=2**15, n_phi=360, n_theta=360, waveform_index_range=(None,None),crit_freq_low_pass_MHz=crit_freq_low_pass_MHz, crit_freq_high_pass_MHz=crit_freq_high_pass_MHz, low_pass_filter_order=low_pass_filter_order, high_pass_filter_order=high_pass_filter_order, plot_filter=False,apply_phase_response=True)
        
        if True:
            print('TRYING TO MAKE CORRELATOR PLOT.')
            print(eventids)
            self.cor.animatedMap(eventids, 'both', '', plane_zenith=None,plane_az=None,hilbert=False, max_method=None,center_dir='E',save=False,dpi=300)

        times, averaged_waveforms = tct.averageAlignedSignalsPerChannel( eventids, align_method=0, template_eventid=eventids[0], plot=plot_aligned_wf,event_type=None)
        
        resampled_averaged_waveforms_original_length = numpy.zeros((8,len(_reader.t())))
        for channel in range(8):
            resampled_averaged_waveforms_original_length[channel] = scipy.interpolate.interp1d(times,averaged_waveforms[channel],kind='cubic',bounds_error=False,fill_value=0)(reader.t())

        if False:
            for channel in range(8):
                plt.figure()
                plt.title(str(channel))
                for eventid in eventids:
                    tct.setEntry(eventid)
                    plt.plot(tct.t(),tct.wf(channel),label=str(eventid),alpha=0.8)
                plt.legend()
                plt.xlabel('t (ns)')
                plt.ylabel('adu')


        if save_template == True:
            filename_index = 0 
            filename = './generated_event_template_%i.csv'%filename_index
            existing_files = numpy.array(glob.glob('./*.csv'))

            while numpy.isin(filename,existing_files):
                filename_index += 1
                filename = './generated_event_template_%i.csv'%filename_index
            numpy.savetxt(filename,resampled_averaged_waveforms_original_length, delimiter=",")
            print('Genreated template saved as:\n%s'%filename)



        tdc.calculateMultipleTimeDelays(eventids, align_method=8,hilbert=False,plot=True, colors=numpy.array(coords)[:,0])

        return resampled_averaged_waveforms_original_length
Beispiel #10
0
        except Exception as e:
            print('\nError in %s' % inspect.stack()[0][3])
            print(e)
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            print(exc_type, fname, exc_tb.tb_lineno)


if __name__ == '__main__':
    try:
        plt.close('all')
        #Get timing info from real BEACON data for testing.
        run = 1509
        known_pulser_ids = info.loadPulserEventids(remove_ignored=True)
        eventid = known_pulser_ids['run%i' % run]['hpol'][0]
        reader = Reader(datapath, run)
        reader.setEntry(eventid)
        test_t = reader.t()
        test_pulser_adu = reader.wf(0)

        #Creating test signal
        cr_gen = CosmicRayGenerator(test_t, t_offset=800.0, model='bi-delta')
        for curve_choice in range(4):
            out_t, out_E = cr_gen.eFieldGenerator(plot=True,
                                                  curve_choice=curve_choice)

        plt.figure()
        plt.subplot(2, 1, 1)
        plt.plot(test_t, test_pulser_adu, label='Pulser Signal')
        plt.ylabel('E (adu)')
        plt.xlabel('t (ns)')
                numpy.rad2deg(
                    numpy.arctan2(sources_ENU[source_key][1],
                                  sources_ENU[source_key][0])))

        sort_cut = numpy.argsort(azimuths)[::-1]
        for index in sort_cut:
            print('%s : %0.3f' % (keys[index], azimuths[index]))

        run = 1650

        colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]

        #DETERMINE THE TIME DELAYS TO BE USED IN THE ACTUAL CALCULATION

        print('Calculating time delays from info.py')
        reader = Reader(datapath, run)


        ds = dataSlicerSingleRun(reader, impulsivity_dset_key, time_delays_dset_key, map_direction_dset_key,\
                    curve_choice=0, trigger_types=[2],included_antennas=[0,1,2,3,4,5,6,7],include_test_roi=False,\
                    cr_template_n_bins_h=200,cr_template_n_bins_v=200,\
                    impulsivity_n_bins_h=200,impulsivity_n_bins_v=200,\
                    time_delays_n_bins_h=150,time_delays_n_bins_v=150,min_time_delays_val=-200,max_time_delays_val=200,\
                    std_n_bins_h=200,std_n_bins_v=200,max_std_val=9,\
                    p2p_n_bins_h=128,p2p_n_bins_v=128,max_p2p_val=128,\
                    snr_n_bins_h=200,snr_n_bins_v=200,max_snr_val=35)

        ds.addROI(
            'Simple Template V > 0.7', {'cr_template_search_v': [0.7, 1.0]}
        )  # Adding 2 ROI in different rows and appending as below allows for "OR" instead of "AND"
        ds.addROI('Simple Template H > 0.7',
Beispiel #12
0
    plot_td = False
    plot_original_length_templates = True

    save_time_delays = True
    save_template = False

    waveform_index_range = (None, 500)

    hpol_pairs = numpy.array(list(itertools.combinations((0, 2, 4, 6), 2)))
    vpol_pairs = numpy.array(list(itertools.combinations((1, 3, 5, 7), 2)))
    pairs = numpy.vstack((hpol_pairs, vpol_pairs))

    try:
        run = int(run)

        reader = Reader(datapath, run)

        try:
            print(reader.status())
        except Exception as e:
            print('Status Tree not present.  Returning Error.')
            print('\nError in %s' % inspect.stack()[0][3])
            print(e)
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            print(exc_type, fname, exc_tb.tb_lineno)
            sys.exit(1)

        tct = TemplateCompareTool(
            reader,
            final_corr_length=final_corr_length,
Beispiel #13
0
        ]  #Filters here are attempting to correct for differences in signals from pulsers.
        low_pass_filter_order = [0, 8, 8, 8, 10, 8, 3, 8]

        crit_freq_high_pass_MHz = 65
        high_pass_filter_order = 12

        apply_phase_response = True
        hilbert = False

        #Load antenna position information from the info.py script
        origin = info.loadAntennaZeroLocation()  #Assuming default_deploy
        antennas_physical, antennas_phase_hpol, antennas_phase_vpol = info.loadAntennaLocationsENU(
        )  #Assuming default_deploy

        #Create a Reader object for the specific run.
        reader = Reader(datapath, run)
        print('The run associated with this reader is:')
        print(reader.run)
        print('This run has %i events' % (reader.N()))

        #Create a TimeDelayCalculator object for the specified run. Note that if the above parameters haven't been change
        tdc_raw = TimeDelayCalculator(reader,
                                      final_corr_length=final_corr_length,
                                      crit_freq_low_pass_MHz=None,
                                      crit_freq_high_pass_MHz=None,
                                      low_pass_filter_order=None,
                                      high_pass_filter_order=None,
                                      plot_filters=False,
                                      apply_phase_response=False)
        #Plot raw event
        tdc_raw.plotEvent(eventids[0],
Beispiel #14
0
if __name__ == '__main__':
    plt.close(
        'all'
    )  #Uncomment this if you want figures to be closed before this is run (helps if running multiple times in a row to avoid plot congestion).
    '''
    Here we pick a run, create a reader, get the eventids, get which eventids correspond to which trigger type,
    then plot 1 event from each trigger type.
    '''
    if True:
        plot_N_per_type = 2  #The number of events to plot her trigger type.  Meant to demonstrate what looping over events might look like.

        #Get run and events you want to look at.
        run = 1650
        #Create a Reader object for the specific run.
        reader = Reader(datapath, run)
        print('The run associated with this reader is:')
        print(reader.run)
        print('This run has %i events' % (reader.N()))
        eventids = numpy.arange(reader.N())
        trigger_type = loadTriggerTypes(reader)

        times = reader.t()  #The times of a waveform in ns.  Not upsampled.

        for trig_type in [1, 2, 3]:
            print('Plotting %i eventids of trig type %i' %
                  (plot_N_per_type, trig_type))
            trig_eventids = eventids[
                trigger_type == trig_type]  #All eventids of this trig type
            trig_eventids = numpy.sort(
                numpy.random.choice(trig_eventids, 2)
Beispiel #15
0
                if type(known_pulser_ids['run%i' % run]) is dict:
                    eventids = numpy.sort(known_pulser_ids['run%i' % run])
                else:
                    eventids = {}
                    eventids['hpol'] = numpy.sort(
                        known_pulser_ids['run%i' % run]['hpol'])
                    eventids['vpol'] = numpy.sort(
                        known_pulser_ids['run%i' % run]['vpol'])

                if ignore_eventids == True:
                    if 'run%i' % run in list(ignorable_pulser_ids.keys()):
                        if type(eventids) is dict:
                            eventids = eventids[~numpy.isin(
                                eventids, ignorable_pulser_ids['run%i' % run])]

                reader = Reader(datapath, run)
                reader.setEntry(eventids[0])
                tdc = TimeDelayCalculator(
                    reader,
                    final_corr_length=final_corr_length,
                    crit_freq_low_pass_MHz=crit_freq_low_pass_MHz,
                    crit_freq_high_pass_MHz=crit_freq_high_pass_MHz,
                    low_pass_filter_order=low_pass_filter_order,
                    high_pass_filter_order=high_pass_filter_order)
                time_shifts, corrs, pairs = tdc.calculateMultipleTimeDelays(
                    eventids)

                for pair_index, pair in enumerate(pairs):
                    if pair in hpol_pairs:
                        all_hpol_delays[str(pair)] = numpy.append(
                            all_hpol_delays[str(pair)],
Beispiel #16
0
    datapath = os.environ['BEACON_DATA']

    crit_freq_low_pass_MHz = None  #95 #This new pulser seems to peak in the region of 85 MHz or so
    low_pass_filter_order = None  #10

    crit_freq_high_pass_MHz = None  #50#None
    high_pass_filter_order = None  #4#None

    apply_phase_response = False
    hilbert = False

    hpol_beam_delays = info.loadBeamDelays()[0]

    try:
        run = int(run)
        reader = Reader(datapath, run)

        N = reader.head_tree.Draw("Entry$", "trigger_type==%i" % 2, "goff")
        eventids = numpy.frombuffer(reader.head_tree.GetV1(),
                                    numpy.dtype('float64'), N).astype(int)

        filename = createFile(
            reader
        )  #Creates an analysis file if one does not exist.  Returns filename to load file.

        #choose_eventid = 5194 #If none will run the full analysis, otherwise will just print plots relevant to that eventid.
        for choose_eventid in [None, 5194]:
            if filename is not None:
                with h5py.File(filename, 'r') as file:
                    if choose_eventid is None:
                        rf_cut = file['trigger_type'][...] == 2
Beispiel #17
0
from pprint import pprint
plt.ion()

if __name__ == '__main__':
    plt.close('all')
    printCredit()
    # If your data is elsewhere, pass it as an argument
    datapath = sys.argv[1] if len(sys.argv) > 1 else os.environ['BEACON_DATA']
    run = 1509  #Selects which run to examine
    eventids = numpy.array(
        [2401])  #numpy.array([90652,90674,90718,90766,90792,91019,91310])

    for eventid in eventids:
        #eventid = None#numpy.array([1,2,3]) #If None then a random event id is selected. Can be array of eventids as well.

        reader = Reader(datapath, run)
        verbose = True

        # this is a random event
        if type(eventid) == None:
            eventid = numpy.array([numpy.random.randint(reader.N())])
        elif type(eventid) == int:
            eventid = numpy.array([eventid])
        elif type(eventid) == list:
            eventid = numpy.array(eventid)
        elif type(eventid) == numpy.ndarray:
            pass
        else:
            print('event id not set in valid way, setting to random')
            eventid = numpy.array([numpy.random.randint(reader.N())])
Beispiel #18
0
    flagged_runs = {}
    for day_label in list(clean_days.keys()):
        flagged_runs[day_label] = []
    flagged_runs_cfg = {}
    for day_label in list(clean_days.keys()):
        flagged_runs_cfg[day_label] = []


    min_ts = []
    max_ts = []
    run_ids = []
    print('')
    for run_index,run_label in enumerate(run_labels):
        if 'run' in run_label:
            run = int(run_label.split('run')[-1])
            reader = Reader(datapath,run)
            if reader.N() == 0:
                continue

            sys.stdout.write('\r%i/%i'%(run_index+1,len(run_labels)))
            sys.stdout.flush()
            min_t = reader.head_tree.GetMinimum('readout_time')#utc.localize(datetime.fromtimestamp(reader.head_tree.GetMinimum('readout_time')))
            max_t = reader.head_tree.GetMaximum('readout_time')#utc.localize(datetime.fromtimestamp(reader.head_tree.GetMaximum('readout_time')))
            min_ts.append(min_t)
            max_ts.append(max_t)
            run_ids.append(run)

            for day_label in list(clean_days.keys()):
                cfg = numpy.array([],dtype=int)

                #Leading end in window.
Beispiel #19
0
            if farm_mode == True:
                print('Farm Mode = True, no plots will be available')
                calculate_correlation_values = True  #If True then the values we be newly calculated, if false then will try to load them from the existing files
            else:
                print('Farm Mode = False')
                calculate_correlation_values = False  #If True then the values we be newly calculated, if false then will try to load them from the existing files
            #Parameters:
            #Curve choice is a parameter in the bi-delta template model that changes the timing of the input dela signal.
            curve_choice = 0
            upsample_factor = 4
            save_data = True

            if farm_mode == False:
                plt.close('all')
            run = int(sys.argv[1])
            reader = Reader(datapath, run)

            #Prepare for Correlations
            reader.setEntry(0)
            waveform_times = reader.t()
            waveform_sample = reader.wf(0)
            waveform_sample, waveform_times = scipy.signal.resample(
                waveform_sample,
                len(waveform_sample) * upsample_factor,
                t=waveform_times)  #upsample times to desired amount.

            cr_gen = crt.CosmicRayGenerator(waveform_times,
                                            t_offset=800.0,
                                            model='bi-delta')
            template_t, template_E = cr_gen.eFieldGenerator(
                plot=True, curve_choice=curve_choice)
Beispiel #20
0
                line_count += 1
            waveforms['ch%i'%channel] = numpy.array(y)
    return x,waveforms


if __name__ == '__main__':
    #plt.close('all')
    # If your data is elsewhere, pass it as an argument
    # If your data is elsewhere, pass it as an argument
    datapath = sys.argv[1] if len(sys.argv) > 1 else os.environ['BEACON_DATA']
    runs = numpy.array([793])#numpy.array([734,735,736,737,739,740,746,747,757,757,762,763,764,766,767,768,769,770,781,782,783,784,785,786,787,788,789,790,792,793]) #Selects which run to examine
    event_limit = 1
    save_fig = True

    for run_index, run in enumerate(runs):
        reader = Reader(datapath,run)
        eventids = cc.getTimes(reader)[3]

        if event_limit is not None:
            if event_limit < len(eventids):
                eventids = eventids[0:event_limit]

        waveform_times, templates = loadTemplates(template_dirs['run%i'%run]['dir'])
        original_wf_len = int(reader.header().buffer_length)
        upsample_wf_len = original_wf_len*template_dirs['run%i'%run]['resample_factor']
        corr_delay_times = numpy.arange(-upsample_wf_len+1,upsample_wf_len)
        #Setup Filter
        freqs = numpy.fft.rfftfreq(len(waveform_times), d=(waveform_times[1] - waveform_times[0])/1.0e9)
        b, a = scipy.signal.butter(template_dirs['run%i'%run]['filter_order'], template_dirs['run%i'%run]['crit_freq_low_pass_MHz']*1e6, 'low', analog=True)
        d, c = scipy.signal.butter(template_dirs['run%i'%run]['filter_order'], template_dirs['run%i'%run]['crit_freq_high_pass_MHz']*1e6, 'high', analog=True)
import itertools
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
plt.ion()

if __name__ == '__main__':
    plt.close('all')
    # If your data is elsewhere, pass it as an argument
    datapath = sys.argv[1] if len(sys.argv) > 1 else os.environ['BEACON_DATA']
    runs = numpy.array([1509])

    event_min = 0

    for run_index, run in enumerate(runs):
        print('Run %i' % run)
        reader = Reader(datapath, run)
        raw_approx_trigger_time, raw_approx_trigger_time_nsecs, trig_time, eventids = cc.getTimes(
            reader, trigger_type=3)
        rtimes = raw_approx_trigger_time - raw_approx_trigger_time[0]

        if False:
            #USE THIS TO SAVE ONCE YOU HAVE SELECTED THE CORRECT EVENT RANGE.
            #cut = numpy.logical_and(eventids > 4157, eventids < 5794)
            cut = numpy.logical_and(eventids > 4208, eventids < 6033)
            extra_text = 'site_2_bicone_vpol_17dB'
            numpy.savetxt('./run%i_pulser_eventids_%s.csv' % (run, extra_text),
                          numpy.sort(eventids[cut]),
                          delimiter=",")

        meas = {}
        for channel in range(8):
Beispiel #22
0
        known_pulser_ids = info.loadPulserEventids(remove_ignored=True)

        #Prepare eventids

        eventids = {}
        eventids['hpol'] = numpy.sort(known_pulser_ids['run%i' % run]['hpol'])
        eventids['vpol'] = numpy.sort(known_pulser_ids['run%i' % run]['vpol'])
        all_eventids = numpy.sort(
            numpy.append(eventids['hpol'], eventids['vpol']))

        hpol_eventids_cut = numpy.isin(all_eventids, eventids['hpol'])
        vpol_eventids_cut = numpy.isin(all_eventids, eventids['vpol'])

        #Set up tempalte compare tool used for making averaged waveforms for first pass alignment.
        reader = Reader(datapath, run)
        tct = TemplateCompareTool(
            reader,
            final_corr_length=final_corr_length,
            crit_freq_low_pass_MHz=crit_freq_low_pass_MHz,
            crit_freq_high_pass_MHz=crit_freq_high_pass_MHz,
            low_pass_filter_order=low_pass_filter_order,
            high_pass_filter_order=high_pass_filter_order,
            waveform_index_range=waveform_index_range,
            plot_filters=plot_filters,
            apply_phase_response=apply_phase_response)

        #First pass alignment to make templates.
        times, hpol_waveforms = tct.averageAlignedSignalsPerChannel(
            eventids['hpol'],
            align_method=0,
    resample_factor = 100

    #Filter settings
    crit_freq_low_pass_MHz = 75
    crit_freq_high_pass_MHz = 15
    filter_order = 6
    plot_filter = True
    use_envelopes = False
    use_raw = True
    bins = 200
    expected_timing_pm_tol = 20 #ns
    corr_plot = True

    for run_index, run in enumerate(runs):
        eventids = numpy.sort(known_pulser_ids['run%i'%run])
        reader = Reader(datapath,run)

        waveform_times = reader.t()
        waveforms_upsampled = {}
        waveforms_raw = {}

        #Prepare filter
        reader.setEntry(eventids[0])
        wf = reader.wf(0)
        if use_raw:
            if resample_factor != 1:
                print('\n!!!\nUsing raw waveforms for alignment.  Setting the resample factor to 1.\n!!!\n') 
                resample_factor = 1

        wf , waveform_times = scipy.signal.resample(wf,len(wf)*resample_factor,t=reader.t())
        dt = waveform_times[1] - waveform_times[0]