def setUpClass(cls): cls.interpreter = PyDataInterpreter() cls.histogram = PyDataHistograming() cls.clusterizer = PyDataClusterizer() with AnalyzeRawData(raw_data_file=tests_data_folder + 'unit_test_data_1.h5', analyzed_data_file=tests_data_folder + 'unit_test_data_1_interpreted.h5', create_pdf=False) as analyze_raw_data: # analyze the digital scan raw data, do not show any feedback (no prints to console, no plots) analyze_raw_data.chunk_size = 2999999 analyze_raw_data.create_hit_table = True # can be set to false to omit hit table creation, std. setting is false analyze_raw_data.create_cluster_hit_table = True # adds the cluster id and seed info to each hit, std. setting is false analyze_raw_data.create_cluster_table = True # enables the creation of a table with all clusters, std. setting is false analyze_raw_data.create_trigger_error_hist = True # creates a histogram summing up the trigger errors analyze_raw_data.create_cluster_size_hist = True # enables cluster size histogramming, can save some time, std. setting is false analyze_raw_data.create_cluster_tot_hist = True # enables cluster ToT histogramming per cluster size, std. setting is false analyze_raw_data.create_meta_word_index = True # stores the start and stop raw data word index for every event, std. setting is false analyze_raw_data.create_meta_event_index = True # stores the event number for each readout in an additional meta data array, default: False analyze_raw_data.interpret_word_table(use_settings_from_file=False, fei4b=False) # the actual start conversion command with AnalyzeRawData(raw_data_file=tests_data_folder + 'unit_test_data_2.h5', analyzed_data_file=tests_data_folder + 'unit_test_data_2_interpreted.h5', create_pdf=False) as analyze_raw_data: # analyze the fast threshold scan raw data, do not show any feedback (no prints to console, no plots) analyze_raw_data.chunk_size = 2999999 analyze_raw_data.create_threshold_hists = True # makes only sense if threshold scan data is analyzed, std. setting is false analyze_raw_data.interpret_word_table(use_settings_from_file=False, fei4b=False) # the actual start conversion command with AnalyzeRawData(raw_data_file=None, analyzed_data_file=tests_data_folder + 'unit_test_data_1_interpreted.h5', create_pdf=False) as analyze_raw_data: # analyze the digital scan hit data, do not show any feedback (no prints to console, no plots) analyze_raw_data.chunk_size = 2999999 analyze_raw_data.create_cluster_hit_table = True analyze_raw_data.create_cluster_table = True analyze_raw_data.create_cluster_size_hist = True analyze_raw_data.create_cluster_tot_hist = True analyze_raw_data.analyze_hit_table(analyzed_data_out_file=tests_data_folder + 'unit_test_data_1_analyzed.h5') with AnalyzeRawData(raw_data_file=tests_data_folder + 'unit_test_data_3.h5', analyzed_data_file=tests_data_folder + 'unit_test_data_3_interpreted.h5', create_pdf=False) as analyze_raw_data: # analyze the digital scan raw data per scan parameter, do not show any feedback (no prints to console, no plots) analyze_raw_data.chunk_size = 2999999 analyze_raw_data.create_hit_table = True # can be set to false to omit hit table creation, std. setting is false analyze_raw_data.create_cluster_hit_table = True # adds the cluster id and seed info to each hit, std. setting is false analyze_raw_data.create_cluster_table = True # enables the creation of a table with all clusters, std. setting is false analyze_raw_data.create_trigger_error_hist = True # creates a histogram summing up the trigger errors analyze_raw_data.create_cluster_size_hist = True # enables cluster size histogramming, can save some time, std. setting is false analyze_raw_data.create_cluster_tot_hist = True # enables cluster ToT histogramming per cluster size, std. setting is false analyze_raw_data.create_meta_word_index = True # stores the start and stop raw data word index for every event, std. setting is false analyze_raw_data.create_meta_event_index = True # stores the event number for each readout in an additional meta data array, default: False analyze_raw_data.interpret_word_table(use_settings_from_file=False, fei4b=False) # the actual start conversion command with AnalyzeRawData(raw_data_file=tests_data_folder + 'unit_test_data_2.h5', analyzed_data_file=tests_data_folder + 'unit_test_data_2_hits.h5', create_pdf=False) as analyze_raw_data: # analyze the fast threshold scan raw data, do not show any feedback (no prints to console, no plots) analyze_raw_data.chunk_size = 2999999 analyze_raw_data.create_hit_table = True analyze_raw_data.create_threshold_hists = True # makes only sense if threshold scan data is analyzed, std. setting is false analyze_raw_data.interpret_word_table(use_settings_from_file=False, fei4b=False) # the actual start conversion command with AnalyzeRawData(raw_data_file=None, analyzed_data_file=tests_data_folder + 'unit_test_data_2_hits.h5', create_pdf=False) as analyze_raw_data: analyze_raw_data.chunk_size = 2999999 analyze_raw_data.create_threshold_hists = True analyze_raw_data.analyze_hit_table(analyzed_data_out_file=tests_data_folder + 'unit_test_data_2_analyzed.h5') with AnalyzeRawData(raw_data_file=tests_data_folder + 'unit_test_data_4.h5', analyzed_data_file=tests_data_folder + 'unit_test_data_4_interpreted.h5', create_pdf=False) as analyze_raw_data: analyze_raw_data.chunk_size = 2999999 analyze_raw_data.create_hit_table = True analyze_raw_data.interpret_word_table(use_settings_from_file=False, fei4b=False) # the actual start conversion command with AnalyzeRawData(raw_data_file=[tests_data_folder + 'unit_test_data_4_parameter_128.h5', tests_data_folder + 'unit_test_data_4_parameter_256.h5'], analyzed_data_file=tests_data_folder + 'unit_test_data_4_interpreted_2.h5', scan_parameter_name='parameter', create_pdf=False) as analyze_raw_data: analyze_raw_data.chunk_size = 2999999 analyze_raw_data.create_hit_table = True analyze_raw_data.interpret_word_table(use_settings_from_file=False, fei4b=False) # the actual start conversion command
def test_libraries_stability(self): # calls 50 times the constructor and destructor to check the libraries progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.ETA()], maxval=50, term_width=80) progress_bar.start() for i in range(50): interpreter = PyDataInterpreter() histogram = PyDataHistograming() clusterizer = PyDataClusterizer() del interpreter del histogram del clusterizer progress_bar.update(i) progress_bar.finish()