def test_plot_hdf5(self): # Add data to the archive num_histograms = 4 data = [ [4, 1, 0], [5, 2, 0], [6, 3, 0], [7, 4, 0], ] # Store data for _ in range(num_histograms): with self.h: for d in data: self.h.append(d) with temp_dir(): # Write data to HDF5 file file_name = 'result.h5' with h5py.File(file_name, 'w') as f: self.managers.dataset_mgr.write_hdf5(f) # Make analyzer object a = HistogramAnalyzer(file_name, self.s.detection.get_state_detection_threshold()) # Call plot functions to see if no exceptions occur a.plot_all_histograms() a.plot_all_probabilities() a.plot_all_mean_counts() a.plot_all_state_probabilities()
def test_hdf5_read(self): self._generate_hdf5_data() with temp_dir(): # Write data to HDF5 file file_name = 'result.h5' with h5py.File(file_name, 'w') as f: self.managers.dataset_mgr.write_hdf5(f) # Read file with HistogramAnalyzer a = HistogramAnalyzer(file_name, self.s.detection.get_state_detection_threshold()) # Compare analyzer to histogram context self._compare_analyzer_to_context(a) # Compare to analyzer from object source b = HistogramAnalyzer(self.s, self.s.detection.get_state_detection_threshold()) self.assertListEqual(a.keys, b.keys, 'Keys did not match') for key in a.keys: for v, w in zip(a.histograms[key], b.histograms[key]): self.assertListEqual(list(v), list(w), 'Histograms did not match') for v, w in zip(a.probabilities[key], b.probabilities[key]): self.assertListEqual(list(v), list(w), 'Probabilities did not match') for v, w in zip(a.mean_counts[key], b.mean_counts[key]): self.assertListEqual(list(v), list(w), 'Mean counts did not match') for v, w in zip(a.stdev_counts[key], b.stdev_counts[key]): self.assertListEqual(list(v), list(w), 'Stdev counts did not match') for v, w in zip(a.raw[key], b.raw[key]): self.assertListEqual(v.tolist(), w.tolist(), 'Raw counts did not match')
def test_hdf5_read_no_histogram_data(self): self._generate_hdf5_data(keep_raw=False, add_legacy=False) with temp_dir(): # Write data to HDF5 file file_name = 'result.h5' with h5py.File(file_name, 'w') as f: self.managers.dataset_mgr.write_hdf5(f) with self.assertRaises(KeyError, msg='Absence of histogram data did not raise'): HistogramAnalyzer(file_name)
def test_plot(self): # Add data to the archive bin_width = 1 * us bin_spacing = 1 * ns offset = 5 * ns data = [[16, 25, 56], [66, 84, 83], [45, 77, 96], [88, 63, 79]] # Store data with self.t: self.t.append(data, bin_width, bin_spacing, offset) with temp_dir(): # Make analyzer object a = TimeResolvedAnalyzer(self.s) # Call plot functions to see if no exceptions occur a.plot_all_traces()
def setUp(self) -> None: self._temp_dir = temp_dir() self._temp_dir.__enter__() # Create the system ddb = enable_dax_sim(_DEVICE_DB.copy(), enable=True, output='vcd', moninj_service=False) self.managers = get_managers(ddb) self.sys = _TestSystem(self.managers) # Get the signal manager self.sm: DaxSignalManager = typing.cast(VcdSignalManager, get_signal_manager()) self.assertIsInstance(self.sm, VcdSignalManager)
def test_gtk_wave_save_generator(self): with temp_dir(): ddb = enable_dax_sim(ddb=_DEVICE_DB.copy(), enable=True, output='vcd', moninj_service=False) with get_managers(ddb) as managers: system = _TestSystem(managers) self.assertTrue(system.dax_sim_enabled) # Create GTKWave save generator object, which immediately writes the waves file GTKWSaveGenerator(system) # Manually close signal manager before leaving temp dir get_signal_manager().close()
def test_hdf5_read_raw_and_legacy(self): self._generate_hdf5_data(keep_raw=True, add_legacy=True) with temp_dir(): # Write data to HDF5 file file_name = 'result.h5' with h5py.File(file_name, 'w') as f: self.managers.dataset_mgr.write_hdf5(f) # Read file with HistogramAnalyzer a = HistogramAnalyzer(file_name, self.s.detection.get_state_detection_threshold()) # Verify raw attribute is available self.assertTrue(hasattr(a, 'raw'), 'Expected attribute `raw`') # Compare analyzer to histogram context self._compare_analyzer_to_context(a)
def test_gtk_wave_save_generator_invalid_signal_manager(self): with temp_dir(): ddb = enable_dax_sim(ddb=_DEVICE_DB.copy(), enable=True, output='null', moninj_service=False) with get_managers(ddb) as managers: system = _TestSystem(managers) self.assertTrue(system.dax_sim_enabled) with self.assertRaises( RuntimeError, msg='Not using VCD signal manager did not raise'): # Create GTKWave save generator object, which immediately writes the waves file GTKWSaveGenerator(system) # Manually close signal manager before leaving temp dir get_signal_manager().close()
def test_hdf5_read(self): # Add data to the archive bin_width = 1 * us bin_spacing = 1 * ns offset = 5 * ns data_0 = [[1, 2], [3, 4], [2, 6], [4, 5], [9, 9], [9, 7], [7, 8]] data_1 = [[16, 25, 56], [66, 84, 83], [45, 77, 96], [88, 63, 79], [62, 93, 49], [29, 25, 7], [6, 17, 80]] # Store data with self.t: self.t.append(data_0, bin_width, bin_spacing, offset) self.t.config_dataset('foo') with self.t: self.t.append(data_1, bin_width, bin_spacing, offset) with temp_dir(): # Write data to HDF5 file file_name = 'result.h5' with h5py.File(file_name, 'w') as f: self.managers.dataset_mgr.write_hdf5(f) # Read file with TimeResolvedAnalyzer a = TimeResolvedAnalyzer(file_name) # Compare results self.assertListEqual(a.keys, self.t.get_keys(), 'Keys did not match') for k in a.keys: for v, w in zip(a.traces[k], self.t.get_traces(k)): for c in TimeResolvedContext.DATASET_COLUMNS: self.assertIn(c, v, 'Did not found expected dataset columns') self.assertIn(c, w, 'Did not found expected dataset columns') self.assertTrue(np.array_equal(v[c], w[c]), f'Column/data "{c}" of trace did not match') # Compare to analyzer from object source b = TimeResolvedAnalyzer(self.s) self.assertListEqual(a.keys, b.keys, 'Keys did not match') for k in a.keys: for v, w in zip(a.traces[k], b.traces[k]): for c in TimeResolvedContext.DATASET_COLUMNS: self.assertIn(c, v, 'Did not found expected dataset columns') self.assertIn(c, w, 'Did not found expected dataset columns') self.assertTrue(np.array_equal(v[c], w[c]), f'Column/data "{c}" of trace did not match')
def test_plot_hdf5(self): # Add data to the archive bin_width = 1 * us bin_spacing = 1 * ns offset = 5 * ns data = [[16, 25, 56], [66, 84, 83], [45, 77, 96], [88, 63, 79]] # Store data with self.t: self.t.append(data, bin_width, bin_spacing, offset) with temp_dir(): # Write data to HDF5 file file_name = 'result.h5' with h5py.File(file_name, 'w') as f: self.managers.dataset_mgr.write_hdf5(f) # Make analyzer object a = TimeResolvedAnalyzer(file_name) # Call plot functions to see if no exceptions occur a.plot_all_traces()
def test_scan_reader(self): self.scan.run() with temp_dir(): # Write data to HDF5 file file_name = 'result.h5' with h5py.File(file_name, 'w') as f: self.managers.dataset_mgr.write_hdf5(f) # Read HDF5 file with scan reader r = DaxScanReader(file_name) # Verify if the data matches with the scan object scannables = self.scan.get_scannables() scan_points = self.scan.get_scan_points() keys = list(scannables.keys()) self.assertSetEqual(set(r.keys), set(keys), 'Keys in reader did not match object keys') for k in keys: self.assertListEqual( scannables[k], list(r.scannables[k]), 'Scannable in reader did not match object scannable') self.assertListEqual( scan_points[k], list(r.scan_points[k]), 'Scan points in reader did not match object scan points') # Verify if the data matches with a scan reader using a different source r_ = DaxScanReader(self.scan) self.assertSetEqual(set(r.keys), set(r_.keys), 'Keys in readers did not match') for k in r.keys: self.assertListEqual(list(r_.scannables[k]), list(r.scannables[k]), 'Scannable in readers did not match') self.assertListEqual(list(r_.scan_points[k]), list(r.scan_points[k]), 'Scan points in readers did not match')
def test_plot(self): # Add data to the archive num_histograms = 4 data = [ [4, 1, 0], [5, 2, 0], [6, 3, 0], [7, 4, 0], ] # Store data for _ in range(num_histograms): with self.h: for d in data: self.h.append(d) with temp_dir(): # Make analyzer object a = HistogramAnalyzer(self.s, self.s.detection.get_state_detection_threshold()) # Call plot functions to see if no exceptions occur a.plot_all_histograms() a.plot_all_probabilities() a.plot_all_mean_counts() a.plot_all_state_probabilities()
def test_relation_graphviz(self): with temp_dir(): # We can not really test the contents of the graph at this moment g = dax.util.introspect.RelationGraphviz(self.sys) self.assertIsInstance(g, graphviz.Digraph)
def test_histogram_analyzer_module(self): # The histogram analyzer requests an output file which will trigger the creation of an experiment output dir # To prevent unnecessary directories after testing, we switch to a temp dir with temp_dir(): HistogramAnalyzer(self.h)
def test_analyzer_system(self): # The analyzer requests an output file which will trigger the creation of an experiment output dir # To prevent unnecessary directories after testing, we switch to a temp dir with temp_dir(): TimeResolvedAnalyzer(self.s)