def test_template(self): n = 10 channel_ids = np.arange(n) dom_ids = np.arange(n) times = np.arange(n) tots = np.arange(n) triggereds = np.ones(n) d_hits = { 'channel_id': channel_ids, 'dom_id': dom_ids, 'time': times, 'tot': tots, 'triggered': triggereds, 'group_id': 0, # event_id } tab = Table.from_template(d_hits, 'Hits') assert tab.name == 'Hits' assert tab.split_h5 is True assert isinstance(tab, Table) ar_hits = { 'channel_id': np.ones(n, dtype=int), 'dom_id': np.ones(n, dtype=int), 'time': np.ones(n, dtype=float), 'tot': np.ones(n, dtype=float), 'triggered': np.ones(n, dtype=bool), 'group_id': np.ones(n, dtype=int), } tab = Table.from_template(ar_hits, 'Hits') assert tab.name == 'Hits' assert tab.split_h5 is True assert isinstance(tab, Table)
def test_fromcolumns(self): n = 5 dlist = [ np.ones(n, dtype=int), np.zeros(n, dtype=float), 0, ] dt = np.dtype([('a', float), ('b', float), ('c', float)]) with pytest.raises(ValueError): tab = Table(dlist, dtype=dt) tab = Table.from_columns(dlist, dtype=dt) print(tab.dtype) print(tab.shape) print(tab) assert tab.h5loc == DEFAULT_H5LOC assert isinstance(tab, Table) tab = Table.from_columns(dlist, dtype=dt, h5loc='/foo') print(tab.dtype) print(tab.shape) print(tab) assert tab.h5loc == '/foo' assert isinstance(tab, Table) bad_dt = [('a', float), ('b', float), ('c', float), ('d', int)] with pytest.raises(ValueError): tab = Table.from_columns(dlist, dtype=bad_dt) print(tab.dtype) print(tab.shape) print(tab)
def test_fromrows(self): dlist = [ [1, 2, 3], [4, 5, 6], ] dt = np.dtype([('a', float), ('b', float), ('c', float)]) with pytest.raises(ValueError): tab = Table(dlist, dtype=dt) tab = Table.from_rows(dlist, dtype=dt) print(tab.dtype) print(tab.shape) print(tab) assert tab.h5loc == DEFAULT_H5LOC assert isinstance(tab, Table) tab = Table.from_rows(dlist, dtype=dt, h5loc='/foo') print(tab.dtype) print(tab.shape) print(tab) assert tab.h5loc == '/foo' assert isinstance(tab, Table) bad_dt = [('a', float), ('b', float), ('c', float), ('d', int)] with pytest.raises(ValueError): tab = Table.from_rows(dlist, dtype=bad_dt) print(tab.dtype) print(tab.shape) print(tab)
def test_fromcolumns(self): n = 5 dlist = [ np.ones(n, dtype=int), np.zeros(n, dtype=float), 0, ] dt = np.dtype([("a", float), ("b", float), ("c", float)]) with pytest.raises(ValueError): tab = Table(dlist, dtype=dt) tab = Table.from_columns(dlist, dtype=dt) print(tab.dtype) print(tab.shape) print(tab) assert tab.h5loc == DEFAULT_H5LOC assert isinstance(tab, Table) tab = Table.from_columns(dlist, dtype=dt, h5loc="/foo") print(tab.dtype) print(tab.shape) print(tab) assert tab.h5loc == "/foo" assert isinstance(tab, Table) bad_dt = [("a", float), ("b", float), ("c", float), ("d", int)] with pytest.raises(ValueError): tab = Table.from_columns(dlist, dtype=bad_dt) print(tab.dtype) print(tab.shape) print(tab)
def test_name(self): tab = self.arr.view(Table) assert tab.name == DEFAULT_NAME tab = Table(self.arr) assert tab.name == DEFAULT_NAME tab = Table(self.arr, name="foo") assert tab.name == "foo"
def test_incomplete_template(self): n = 10 channel_ids = np.arange(n) dom_ids = np.arange(n) # times = np.arange(n) tots = np.arange(n) triggereds = np.ones(n) d_hits = { 'channel_id': channel_ids, 'dom_id': dom_ids, # 'time': times, 'tot': tots, 'triggered': triggereds, 'group_id': 0, # event_id } with pytest.raises(KeyError): tab = Table.from_template(d_hits, 'Hits') assert tab is not None ar_hits = { 'channel_id': np.ones(n, dtype=int), 'dom_id': np.ones(n, dtype=int), # 'time': np.ones(n, dtype=float), 'tot': np.ones(n, dtype=float), 'triggered': np.ones(n, dtype=bool), 'group_id': np.ones(n, dtype=int), } with pytest.raises(KeyError): tab = Table.from_template(ar_hits, 'Hits') assert tab is not None
def test_split(self): tab = self.arr.view(Table) assert tab.split_h5 is False tab = Table(self.arr) assert tab.split_h5 is False tab = Table(self.arr, split_h5=True) assert tab.split_h5
def test_reorder_dtypes_w_matching_names_but_different_types_raise(self): dtype = np.dtype([("a", "<i8"), ("c", "<i8"), ("b", "<f8")]) dtype_reordered = np.dtype([("a", "<f8"), ("c", "<i8"), ("b", "<f8")]) tab = Table({"a": 1, "b": 2.5, "c": 3}, dtype=dtype) with self.assertRaises(ValueError): tab2 = Table(tab, dtype=dtype_reordered)
def test_apply_without_affecting_primary_hit_table(self): calib = Calibration(filename=DETX_FILENAME) hits = Table({'pmt_id': [1, 2, 1], 'time': [10.1, 11.2, 12.3]}) hits_compare = hits.copy() calib.apply(hits) for t_primary, t_calib in zip(hits_compare, hits): self.assertAlmostEqual(t_primary, t_calib)
def test_apply_without_affecting_primary_hit_table(self): calib = Calibration(filename=data_path("detx/detx_v1.detx")) hits = Table({"pmt_id": [1, 2, 1], "time": [10.1, 11.2, 12.3]}) hits_compare = hits.copy() calib.apply(hits, correct_slewing=False) for t_primary, t_calib in zip(hits_compare, hits): self.assertAlmostEqual(t_primary, t_calib)
def process(self, blob): blob["Tab"] = Table({"a": self.i}, h5loc="/tab") blob["SplitTab"] = Table( {"b": self.i}, h5loc="/split_tab", split_h5=True ) blob["Arr"] = NDArray(np.arange(self.i + 1), h5loc="/arr") self.i += 1 return blob
def test_pos_setter(self): tab = Table({ "pos_x": [1, 0, 0], "pos_y": [0, 1, 0], "pos_z": [0, 0, 1] }) new_pos = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] tab.pos = new_pos assert np.allclose(new_pos, tab.pos)
def test_dir_setter(self): tab = Table({ "dir_x": [1, 0, 0], "dir_y": [0, 1, 0], "dir_z": [0, 0, 1] }) new_dir = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] tab.dir = new_dir assert np.allclose(new_dir, tab.dir)
def test_crash_repr(self): a = np.array("", dtype=[("a", "<U1")]) with pytest.raises(TypeError): print(len(a)) tab = Table(a) s = tab.__str__() assert s is not None r = tab.__repr__() assert r is not None
def test_crash_repr(self): a = np.array('', dtype=[('a', '<U1')]) with pytest.raises(TypeError): print(len(a)) tab = Table(a) s = tab.__str__() assert s is not None r = tab.__repr__() assert r is not None
def test_pos_setter(self): tab = Table({ 'pos_x': [1, 0, 0], 'pos_y': [0, 1, 0], 'pos_z': [0, 0, 1] }) new_pos = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] tab.pos = new_pos assert np.allclose(new_pos, tab.pos)
def test_dir_setter(self): tab = Table({ 'dir_x': [1, 0, 0], 'dir_y': [0, 1, 0], 'dir_z': [0, 0, 1] }) new_dir = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] tab.dir = new_dir assert np.allclose(new_dir, tab.dir)
def test_merge(self): tab1 = Table({"a": [1]}, h5loc="/a", h5singleton=True) tab2 = Table({"a": [2]}) tab3 = Table({"a": [3]}) merged_tab = Table.merge([tab1, tab2, tab3]) assert 3 == len(merged_tab) self.assertListEqual([1, 2, 3], list(merged_tab.a)) assert "/a" == merged_tab.h5loc assert merged_tab.h5singleton
def process_event(self, data, blob): data_io = BytesIO(data) preamble = DAQPreamble(file_obj=data_io) # noqa event = DAQEvent(file_obj=data_io) header = event.header hits = event.snapshot_hits n_hits = event.n_snapshot_hits if n_hits == 0: return dom_ids, channel_ids, times, tots = zip(*hits) triggereds = np.zeros(n_hits) triggered_map = {} for triggered_hit in event.triggered_hits: dom_id, pmt_id, time, tot, _ = triggered_hit triggered_map[(dom_id, pmt_id, time, tot)] = True for idx, hit in enumerate(hits): triggereds[idx] = hit in triggered_map hit_series = Table.from_template({ 'channel_id': channel_ids, 'dom_id': dom_ids, 'time': times, 'tot': tots, 'triggered': triggereds, 'group_id': self.event_id, }, 'Hits') blob['Hits'] = hit_series event_info = Table.from_template( { 'det_id': header.det_id, # 'frame_index': self.index, # header.time_slice, 'frame_index': header.time_slice, 'livetime_sec': 0, 'mc_id': 0, 'mc_t': 0, 'n_events_gen': 0, 'n_files_gen': 0, 'overlays': event.overlays, 'trigger_counter': event.trigger_counter, 'trigger_mask': event.trigger_mask, 'utc_nanoseconds': header.ticks * 16, 'utc_seconds': header.time_stamp, 'weight_w1': 0, 'weight_w2': 0, 'weight_w3': 0, # MC weights 'run_id': header.run, # run id 'group_id': self.event_id, }, 'EventInfo' ) blob['EventInfo'] = event_info self.event_id += 1 self.index += 1
def test_drop_columns(self): tab = Table({"a": 1, "b": 2, "c": 3}) print(tab.dtype) tab = tab.drop_columns(["a", "b"]) print(tab.dtype) with pytest.raises(AttributeError): print(tab.a) with pytest.raises(AttributeError): print(tab.b) print(tab.c)
def test_drop_columns(self): tab = Table({'a': 1, 'b': 2, 'c': 3}) print(tab.dtype) tab = tab.drop_columns(['a', 'b']) print(tab.dtype) with pytest.raises(AttributeError): print(tab.a) with pytest.raises(AttributeError): print(tab.b) print(tab.c)
def test_sort(self): dt = np.dtype([('a', int), ('b', float), ('c', int)]) arr = np.array([ (0, 1.0, 2), (3, 7.0, 5), (6, 4.0, 8), ], dtype=dt) tab = Table(arr) tab_sort = tab.sorted('b') assert_array_equal(tab_sort['a'], np.array([0, 6, 3]))
def test_append_columns(self): tab = Table(self.arr) print(tab) with pytest.raises(ValueError): tab = tab.append_columns("new", [1, 2, 3, 4]) tab = tab.append_columns("new", [1, 2, 3]) print(tab) assert tab.new[0] == 1 assert tab.new[-1] == 3 tab = tab.append_columns("bar", 0) print(tab) assert tab.bar[0] == 0 assert tab.bar[-1] == 0 tab = tab.append_columns("lala", [1]) print(tab) assert tab.lala[0] == 1 assert tab.lala[-1] == 1 with pytest.raises(ValueError): tab = tab.append_columns(["m", "n"], [1, 2]) with pytest.raises(ValueError): tab = tab.append_columns(["m", "n"], [[1], [2]]) tab = tab.append_columns(["m", "n"], [[1, 1, 2], [2, 4, 5]]) print(tab) assert tab.m[0] == 1 assert tab.m[-1] == 2 assert tab.n[0] == 2 assert tab.n[-1] == 5
def test_flat_raises(self): with pytest.raises(ValueError): t = Table([1, 2, 3], dtype=int).dtype with pytest.raises(ValueError): t = Table([1, 2, 3], dtype=float).dtype with pytest.raises(ValueError): t = Table([1, 2, 3], dtype=None).dtype with pytest.raises(ValueError): t = Table([1, 2, 3]).dtype with pytest.raises(ValueError): t = Table([1, 2, 3], colnames=["a", "b", "c"]) # noqa
def test_add_tables_with_same_colnames_but_different_dtype_order(self): cols1 = ("b", "a") tab1 = Table.from_columns([[100, 200], [1, 2]], colnames=cols1) self.assertTupleEqual(cols1, tab1.dtype.names) cols2 = ("a", "b") tab2 = Table.from_columns([[3, 4, 5], [300, 400, 500]], colnames=cols2) added_tab = tab1 + tab2 self.assertListEqual([1, 2, 3, 4, 5], list(added_tab.a)) self.assertListEqual([100, 200, 300, 400, 500], list(added_tab.b)) self.assertListEqual(list(added_tab.dtype.names), list(tab1.dtype.names))
def test_adding_preserves_metadata(self): tab1 = Table({"a": [1, 2]}, h5loc="/a", h5singleton=True, split_h5=True, name="FooTable") tab2 = Table({"a": [3, 4, 5]}) added_tab = tab1 + tab2 assert "/a" == tab1.h5loc assert added_tab.h5singleton assert added_tab.split_h5 assert "FooTable" == added_tab.name
def test_add_tables_with_same_colnames_but_different_dtype_order(self): cols1 = ('b', 'a') tab1 = Table.from_columns([[100, 200], [1, 2]], colnames=cols1) self.assertTupleEqual(cols1, tab1.dtype.names) cols2 = ('a', 'b') tab2 = Table.from_columns([[3, 4, 5], [300, 400, 500]], colnames=cols2) added_tab = tab1 + tab2 self.assertListEqual([1, 2, 3, 4, 5], list(added_tab.a)) self.assertListEqual([100, 200, 300, 400, 500], list(added_tab.b)) self.assertListEqual( list(added_tab.dtype.names), list(tab1.dtype.names) )
def test_cherenkov_from_Table(self): arr = cherenkov(Table(self.calib_hits), Table(self.track)) self.assertAlmostEqual(arr["d_photon_closest"][0], 24.049593557846112) self.assertAlmostEqual(arr["d_photon"][0], 35.80244420413484) self.assertAlmostEqual(arr["d_track"][0], 45.88106599210481) self.assertAlmostEqual(arr["t_photon"][0], 70311759.26448613) self.assertAlmostEqual(arr["cos_photon_PMT"][0], -0.98123942583677) self.assertAlmostEqual(arr["dir_x_photon"][0], 0.45964884122649263) self.assertAlmostEqual(arr["dir_y_photon"][0], -0.8001372907490844) self.assertAlmostEqual(arr["dir_z_photon"][0], -0.3853612055096594)
def test_sort(self): dt = np.dtype([("a", int), ("b", float), ("c", int)]) arr = np.array( [ (0, 1.0, 2), (3, 7.0, 5), (6, 4.0, 8), ], dtype=dt, ) tab = Table(arr) tab_sort = tab.sorted("b") assert_array_equal(tab_sort["a"], np.array([0, 6, 3]))
def test_append_columns(self): tab = Table(self.arr) print(tab) with pytest.raises(ValueError): tab = tab.append_columns('new', [1, 2, 3, 4]) tab = tab.append_columns('new', [1, 2, 3]) print(tab) assert tab.new[0] == 1 assert tab.new[-1] == 3 tab = tab.append_columns('bar', 0) print(tab) assert tab.bar[0] == 0 assert tab.bar[-1] == 0 tab = tab.append_columns('lala', [1]) print(tab) assert tab.lala[0] == 1 assert tab.lala[-1] == 1 with pytest.raises(ValueError): tab = tab.append_columns(['m', 'n'], [1, 2]) with pytest.raises(ValueError): tab = tab.append_columns(['m', 'n'], [[1], [2]]) tab = tab.append_columns(['m', 'n'], [[1, 1, 2], [2, 4, 5]]) print(tab) assert tab.m[0] == 1 assert tab.m[-1] == 2 assert tab.n[0] == 2 assert tab.n[-1] == 5
def test_from_2d(self): l2d = [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)] names = ["a", "origin", "pmt_id", "time", "group_id"] dta = inflate_dtype(l2d, names) with pytest.raises(ValueError): t = Table(l2d) with pytest.raises(ValueError): t = Table(l2d, dtype=None) with pytest.raises(ValueError): t = Table(l2d, colnames=names) with pytest.raises(ValueError): t = Table(l2d, dtype=dta) with pytest.raises(ValueError): t = Table(l2d, dtype=dta, colnames=["a", "b", "c", "d"]) # noqa
def extract_event(self, event_number): blob = self._current_blob r = self.event_reader hits = r.events.snapshot_hits[event_number] trg_hits = r.events.triggered_hits[event_number] raw_event_info = r.events.headers[event_number] trigger_mask = self._get_trigger_mask(hits, trg_hits) hit_series = Table( { "channel_id": hits.channel_id, "dom_id": hits.dom_id, "time": hits.time, "tot": hits.tot, "triggered": trigger_mask, }, name="Hits", h5loc="/hits", split_h5=True, ) event_info = Table( { "det_id": raw_event_info["detector_id"], "frame_index": raw_event_info["frame_index"], "livetime_sec": 0, "mc_id": 0, "mc_t": 0, "n_events_gen": 0, "n_files_gen": 0, "overlays": raw_event_info["overlays"], "trigger_counter": raw_event_info["trigger_counter"], "trigger_mask": raw_event_info["trigger_mask"], "utc_nanoseconds": raw_event_info["UTC_16nanosecondcycles"] * 16.0, "utc_seconds": raw_event_info["UTC_seconds"], "weight_w1": np.nan, "weight_w2": np.nan, "weight_w3": np.nan, "run_id": raw_event_info["run"], }, name="EventInfo", h5loc="/event_info", ) self.event_index += 1 blob["EventInfo"] = event_info blob["Hits"] = hit_series return blob
def setUp(self): self.arr_bare = Table({ "a": [1, 2, 3], "b": [3, 4, 5], }) self.arr_wpos = Table({ "a": [1, 2, 3], "b": [3, 4, 5], "pos_x": [10, 20, 30], "pos_y": [40, 50, 60], "pos_z": [70, 80, 90], "dir_x": [10.0, 20.0, 30.0], "dir_y": [40.0, 50.0, 60.0], "dir_z": [70.0, 80.0, 90.0], })
def process(self, blob): self.index += 1 mc_hits = Table({"pmt_id": [1, 2, 1], "time": [10.1, 11.2, 12.3]}) hits = Table( { "dom_id": [2, 3, 3], "channel_id": [0, 1, 2], "time": [10.1, 11.2, 12.3], "tot": [0, 10, 255], } ) blob["Hits"] = hits blob["McHits"] = mc_hits return blob
def test_reorder_dtypes(self): dtype = np.dtype([("a", "<i8"), ("c", "<i8"), ("b", "<f8")]) dtype_reordered = np.dtype([("b", "<f8"), ("c", "<i8"), ("a", "<i8")]) tab = Table({"a": 1, "b": 2.5, "c": 3}, dtype=dtype) assert tab.dtype == dtype assert 1 == tab.a[0] assert 2.5 == tab.b[0] assert 3 == tab.c[0] tab_reordered = Table(tab, dtype=dtype_reordered) assert tab_reordered.dtype == dtype_reordered assert 1 == tab_reordered.a[0] assert 2.5 == tab_reordered.b[0] assert 3 == tab_reordered.c[0]
def test_merge_other_different_columns_fills_nan_when_fillna(self): tab1 = Table({"a": [1.1, 1.2], "b": [10.1, 10.2]}) tab2 = Table({"a": [2.1, 2.2], "c": [100.1, 100.2]}) merged_tab = Table.merge([tab1, tab2], fillna=True) assert 4 == len(merged_tab) self.assertListEqual([1.1, 1.2, 2.1, 2.2], list(merged_tab.a)) self.assertListEqual([10.1, 10.2], list(merged_tab.b[:2])) self.assertListEqual([100.1, 100.2], list(merged_tab.c[2:])) assert np.isnan(merged_tab.c[0]) assert np.isnan(merged_tab.c[1]) assert np.isnan(merged_tab.b[2]) assert np.isnan(merged_tab.b[3])
def extract_event(self): blob = self._current_blob r = self.event_reader r.retrieve_next_event() # do it at the beginning! n = r.number_of_snapshot_hits if n > self.buf_size: self._resize_buffers(int(n * 3 / 2)) r.get_hits( self._channel_ids, self._dom_ids, self._times, self._tots, self._triggereds ) hit_series = Table.from_template({ 'channel_id': self._channel_ids[:n], 'dom_id': self._dom_ids[:n], 'time': self._times[:n], 'tot': self._tots[:n], 'triggered': self._triggereds[:n], 'group_id': self.event_index, }, 'Hits') event_info = Table.from_template({ 'det_id': r.det_id, 'frame_index': r.frame_index, 'livetime_sec': 0, 'mc_id': 0, 'mc_t': 0, 'n_events_gen': 0, 'n_files_gen': 0, 'overlays': r.overlays, 'trigger_counter': r.trigger_counter, 'trigger_mask': r.trigger_mask, 'utc_nanoseconds': r.utc_nanoseconds, 'utc_seconds': r.utc_seconds, 'weight_w1': np.nan, 'weight_w2': np.nan, 'weight_w3': np.nan, 'run_id': 0, 'group_id': self.event_index, }, 'EventInfo') self.event_index += 1 blob['EventInfo'] = event_info blob['Hits'] = hit_series return blob
def test_triggered_keeps_attrs(self): n = 5 channel_ids = np.arange(n) dom_ids = np.arange(n) times = np.arange(n) tots = np.arange(n) triggereds = np.array([0, 1, 1, 0, 1]) hits = Table( { "channel_id": channel_ids, "dom_id": dom_ids, "time": times, "tot": tots, "triggered": triggereds, "group_id": 0, # event_id }, name="hits", h5loc="/foo", split_h5=True, ) triggered_hits = hits.triggered_rows assert len(triggered_hits) == 3 assert triggered_hits.split_h5 assert triggered_hits.name == "hits" assert triggered_hits.h5loc == "/foo"
def test_from_dict_with_unordered_columns_wrt_to_dtype_fields(self): data = {"b": [1, 2], "c": [3, 4], "a": [5, 6]} dt = [("a", float), ("b", float), ("c", float)] tab = Table.from_dict(data, dtype=dt) assert np.allclose([1, 2], tab.b) assert np.allclose([3, 4], tab.c) assert np.allclose([5, 6], tab.a)
def test_from_dict_with_unordered_columns_wrt_to_dtype_fields(self): data = {'b': [1, 2], 'c': [3, 4], 'a': [5, 6]} dt = [('a', float), ('b', float), ('c', float)] tab = Table.from_dict(data, dtype=dt) assert np.allclose([1, 2], tab.b) assert np.allclose([3, 4], tab.c) assert np.allclose([5, 6], tab.a)
def process_online_reco(self, data, blob): data_io = BytesIO(data) preamble = DAQPreamble(file_obj=data_io) # noqa _data = unpack("<iiiQI", data_io.read(4 + 4 + 4 + 8 + 4)) det_id, run_id, frame_index, trigger_counter, utc_seconds = _data shower_reco = unpack("9d", data_io.read(9 * 8)) shower_meta = unpack("3i", data_io.read(12)) track_reco = unpack("9d", data_io.read(9 * 8)) track_meta = unpack("3i", data_io.read(12)) print("Shower: x/y/z/dx/dy/dz/E/Q/t (type/status/ndf): ", shower_reco, shower_meta) print("Track: x/y/z/dx/dy/dz/E/Q/t (type/status/ndf): ", track_reco, track_meta) blob["ReconstructionInfo"] = Table( { "det_id": det_id, "run_id": run_id, "frame_index": frame_index, "trigger_counter": trigger_counter, "utc_seconds": utc_seconds, }, h5loc="reco", split_h5=True, name="Reconstructions", ) args = track_reco + track_meta blob["RecoTrack"] = RecoTrack(*args) args = shower_reco + shower_meta blob["RecoShower"] = RecoShower(*args)
def summaryslice_generator(self): slice_id = 0 while self.r.has_next: summary_slice = {} self.r.retrieve_next_summaryslice() blob = Blob() summaryslice_info = Table.from_template({ 'frame_index': self.r.frame_index, 'slice_id': slice_id, 'timestamp': self.r.utc_seconds, 'nanoseconds': self.r.utc_nanoseconds, 'n_frames': self.r.n_frames, }, 'SummarysliceInfo') blob['SummarysliceInfo'] = summaryslice_info while self.r.has_next_frame: rates = np.zeros(31, dtype='f8') hrvs = np.zeros(31, dtype='i4') fifos = np.zeros(31, dtype='i4') self.r.get_rates(rates) self.r.get_hrvs(hrvs) self.r.get_fifos(fifos) summary_slice[self.r.dom_id] = { 'rates': rates, 'hrvs': hrvs.astype(bool), 'fifos': fifos.astype(bool), 'n_udp_packets': self.r.number_of_received_packets, 'max_sequence_number': self.r.max_sequence_number, 'has_udp_trailer': self.r.has_udp_trailer, 'high_rate_veto': self.r.high_rate_veto, 'fifo_status': self.r.fifo_status, } self.r.retrieve_next_frame() blob['Summaryslice'] = summary_slice slice_id += 1 yield blob
def process(self, blob): blob["Arr"] = NDArray(np.arange(self.index + 1), h5loc="/arr") blob["Tab"] = Table( {"a": np.arange(self.index + 1), "i": self.index}, h5loc="/tab" ) self.index += 1 return blob
def test_assert_apply_adds_pmt_id_to_hits(self): calib = Calibration(filename=data_path("detx/detx_v1.detx")) hits = Table( {"dom_id": [2, 3, 3], "channel_id": [0, 1, 2], "time": [10.1, 11.2, 12.3]} ) chits = calib.apply(hits, correct_slewing=False) self.assertListEqual([4, 8, 9], list(chits.pmt_id))
def test_apply_to_hits_with_dom_id_and_channel_id_with_wrong_calib_raises(self): calib = Calibration(filename=data_path("detx/detx_v1.detx")) hits = Table({"dom_id": [999], "channel_id": [0], "time": [10.1]}) with self.assertRaises(KeyError): calib.apply(hits, correct_slewing=False)
def _extract_hits(self): total_hits = self.r.number_of_hits if total_hits > self.buf_size: buf_size = int(total_hits * 3 / 2) self._resize_buffers(buf_size) self.r.get_hits( self._channel_ids, self._dom_ids, self._times, self._tots ) group_id = 0 if total_hits > 0 else [] hits = Table.from_template( { 'channel_id': self._channel_ids[:total_hits], 'dom_id': self._dom_ids[:total_hits], 'time': self._times[:total_hits].astype('f8'), 'tot': self._tots[:total_hits], # 'triggered': self._triggereds[:total_hits], # dummy 'group_id': group_id, # slice_id will be set afterwards }, 'TimesliceHits' ) return hits
def test_correct_slewing(self): hits = Table( { "dom_id": [2, 3, 3], "channel_id": [0, 1, 2], "time": [10.1, 11.2, 12.3], "tot": [0, 10, 255], } ) tester = self class HitCalibrator(Module): def process(self, blob): self.services["correct_slewing"](hits) a_hit = hits[0] tester.assertAlmostEqual(10.1 - slew(a_hit.tot), a_hit.time) a_hit = hits[1] tester.assertAlmostEqual(11.2 - slew(a_hit.tot), a_hit.time) return blob pipe = Pipeline() pipe.attach(CalibrationService, filename=data_path("detx/detx_v1.detx")) pipe.attach(HitCalibrator) pipe.drain(1)
def test_merge_different_columns_with_no_nan_compatible_dtype_even_if_fillna( self ): tab1 = Table({'a': [1]}, h5loc='/a', h5singleton=True) tab2 = Table({'b': [2]}) tab3 = Table({'c': [3]}) with self.assertRaises(ValueError): merged_tab = Table.merge([tab1, tab2, tab3], fillna=True)
def test_from_columns_with_colnames(self): t = Table.from_columns([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15], [16, 17, 18], [19, 20, 21]], colnames=['a', 'b', 'c', 'd', 'e', 'f', 'g']) print("t.a: {}".format(t.a)) assert np.allclose([1, 2, 3], t.a) print("t.b: {}".format(t.b)) assert np.allclose([4, 5, 6], t.b)
def test_merge(self): tab1 = Table({'a': [1]}, h5loc='/a', h5singleton=True) tab2 = Table({'a': [2]}) tab3 = Table({'a': [3]}) merged_tab = Table.merge([tab1, tab2, tab3]) assert 3 == len(merged_tab) self.assertListEqual([1, 2, 3], list(merged_tab.a)) assert '/a' == merged_tab.h5loc assert merged_tab.h5singleton
def test_from_mixed_dict(self): dmap = { 'a': 1, 'b': 0., 'c': np.zeros(4), } # tab = Table.from_dict(dmap) # self.assertTrue(isinstance(tab, Table)) # assert tab.h5loc == DEFAULT_H5LOC dt = [('a', float), ('b', float), ('c', float)] tab = Table.from_dict(dmap, dtype=dt) assert tab.h5loc == DEFAULT_H5LOC assert isinstance(tab, Table) tab = Table.from_dict(dmap, dtype=dt, h5loc='/foo') assert tab.h5loc == '/foo' assert isinstance(tab, Table) bad_dt = [('a', float), ('b', float), ('c', float), ('d', int)] with pytest.raises(KeyError): tab = Table.from_dict(dmap, dtype=bad_dt)
def test_init_from_template_with_differently_ordered_dicts(self): t1 = Table.from_template({ 'frame_index': 1, 'slice_id': 2, 'timestamp': 3, 'nanoseconds': 4, 'n_frames': 5, }, 'TimesliceInfo') t2 = Table.from_template({ 'n_frames': 5, 'timestamp': 3, 'nanoseconds': 4, 'slice_id': 2, 'frame_index': 1, }, 'TimesliceInfo') assert t1.dtype == t2.dtype assert t1.frame_index[0] == t2.frame_index[0] assert t1.slice_id[0] == t2.slice_id[0] assert t1.nanoseconds[0] == t2.nanoseconds[0] assert t1.n_frames[0] == t2.n_frames[0] assert t1.timestamp[0] == t2.timestamp[0]
def test_adhoc_noname_template(self): a_template = { 'dtype': np.dtype([('a', '<u4'), ('b', 'f4')]), 'h5loc': '/yat', 'split_h5': True, 'h5singleton': True, } arr = np.array([(1, 3), (2, 4)], dtype=a_template['dtype']) tab = Table.from_template(arr, a_template) self.assertListEqual([1, 2], list(tab.a)) self.assertListEqual([3.0, 4.0], list(tab.b)) assert DEFAULT_NAME == tab.name assert tab.h5singleton
def test_apply_to_timeslice_hits(self): tshits = Table.from_template({ 'channel_id': [0, 1, 2], 'dom_id': [2, 3, 3], 'time': [10.1, 11.2, 12.3], 'tot': np.ones(3, dtype=float), 'group_id': 0, }, 'TimesliceHits') calib = Calibration(filename=DETX_FILENAME) c_tshits = calib.apply(tshits) assert len(c_tshits) == len(tshits) assert np.allclose([40, 80, 90], c_tshits.t0) # TimesliceHits is using int4 for times, so it's truncated when we pass in float64 assert np.allclose([50.1, 91.2, 102.3], c_tshits.time, atol=0.1)
def test_df(self): from pandas.util.testing import assert_frame_equal import pandas as pd dt = np.dtype([('a', int), ('b', float), ('c', int)]) arr = np.array([ (0, 1.0, 2), (3, 7.0, 5), (6, 4.0, 8), ], dtype=dt) print(dir(Table)) df = pd.DataFrame(arr) tab = Table.from_dataframe(df, h5loc='/bla') df2 = tab.to_dataframe() assert_frame_equal(df, df2)
def test_expand_scalars(self): dmap = { 'a': 1, 'b': 0., 'c': 0, } t1 = Table._expand_scalars(dmap) assert len(t1) > 0 dmap2 = { 'a': [1, 2, 1], 'b': 0., 'c': [0, 1], } t2 = Table._expand_scalars(dmap2) assert len(t2) > 0 dmap3 = { 'a': [1, 2, 1], 'b': [0.], 'c': [0, 1], } t3 = Table._expand_scalars(dmap3) assert len(t3) > 0 dmap4 = { 'a': [1, 2, 1], 'b': np.array(0.), 'c': [0, 1], } t4 = Table._expand_scalars(dmap4) assert len(t4) > 0 dmap5 = { 'a': [1, 2, 1], 'b': np.array([1]), 'c': [0, 1], } t5 = Table._expand_scalars(dmap5) assert len(t5) > 0
def test_merge_other_different_columns_fills_nan_when_fillna(self): tab1 = Table({'a': [1.1, 1.2], 'b': [10.1, 10.2]}) tab2 = Table({'a': [2.1, 2.2], 'c': [100.1, 100.2]}) merged_tab = Table.merge([tab1, tab2], fillna=True) assert 4 == len(merged_tab) self.assertListEqual([1.1, 1.2, 2.1, 2.2], list(merged_tab.a)) self.assertListEqual([10.1, 10.2], list(merged_tab.b[:2])) self.assertListEqual([100.1, 100.2], list(merged_tab.c[2:])) assert np.isnan(merged_tab.c[0]) assert np.isnan(merged_tab.c[1]) assert np.isnan(merged_tab.b[2]) assert np.isnan(merged_tab.b[3])
def setUp(self): self.arr_bare = Table({ 'a': [1, 2, 3], 'b': [3, 4, 5], }) self.arr_wpos = Table({ 'a': [1, 2, 3], 'b': [3, 4, 5], 'pos_x': [10, 20, 30], 'pos_y': [40, 50, 60], 'pos_z': [70, 80, 90], 'dir_x': [10.0, 20.0, 30.0], 'dir_y': [40.0, 50.0, 60.0], 'dir_z': [70.0, 80.0, 90.0], })
def get_blob(self, index): """Index is slice ID""" blob = self._current_blob self.r.retrieve_timeslice(index) timeslice_info = Table.from_template({ 'frame_index': self.r.frame_index, 'slice_id': index, 'timestamp': self.r.utc_seconds, 'nanoseconds': self.r.utc_nanoseconds, 'n_frames': self.r.n_frames, }, 'TimesliceInfo') hits = self._extract_hits() hits.group_id = index blob['TimesliceInfo'] = timeslice_info blob[self._hits_blob_key] = hits return blob
def _to_array(self, data, name=None): if data is None: return if np.isscalar(data): self.log.debug('toarray: is a scalar') return Table({name: np.asarray(data).reshape((1, ))}, h5loc='/misc/{}'.format(decamelise(name)), name=name) if hasattr(data, 'len') and len(data) <= 0: # a bit smelly ;) self.log.debug('toarray: data has no length') return # istype instead isinstance, to avoid heavy pandas import (hmmm...) if istype(data, 'DataFrame'): # noqa self.log.debug('toarray: pandas dataframe') data = Table.from_dataframe(data) return data