def test_to_short_data_field(self): hitlets_to_short = np.zeros(len(self.hitlets), dtype=strax.hitlet_with_data_dtype(2)) strax.copy_to_buffer(self.hitlets, hitlets_to_short, '_refresh_hit_to_hitlet') with self.assertRaises(ValueError): strax.get_hitlets_data(hitlets_to_short, self.records, np.ones(3000))
def compute(self, records_nv, start, end): # Search again for hits in records: hits = strax.find_hits( records_nv, min_amplitude=self.config['hit_min_amplitude_nv']) # Merge concatenate overlapping within a channel. This is important # in case hits were split by record boundaries. In case we # accidentally concatenate two PMT signals we split them later again. hits = strax.concat_overlapping_hits( hits, self.config['save_outside_hits_nv'], self.config['channel_map']['nveto'], start, end) hits = strax.sort_by_time(hits) # Now convert hits into temp_hitlets including the data field: if len(hits): nsamples = hits['length'].max() else: nsamples = 0 temp_hitlets = np.zeros( len(hits), strax.hitlet_with_data_dtype(n_samples=nsamples)) # Generating hitlets and copying relevant information from hits to hitlets. # These hitlets are not stored in the end since this array also contains a data # field which we will drop later. strax.refresh_hit_to_hitlets(hits, temp_hitlets) del hits # Get hitlet data and split hitlets: strax.get_hitlets_data(temp_hitlets, records_nv, to_pe=self.to_pe) temp_hitlets = strax.split_peaks( temp_hitlets, records_nv, self.to_pe, data_type='hitlets', algorithm='local_minimum', min_height=self.config['min_split_nv'], min_ratio=self.config['min_split_ratio_nv']) # Compute other hitlet properties: # We have to loop here 3 times over all hitlets... strax.hitlet_properties(temp_hitlets) entropy = strax.conditional_entropy(temp_hitlets, template='flat', square_data=False) temp_hitlets['entropy'][:] = entropy strax.compute_widths(temp_hitlets) # Remove data field: hitlets = np.zeros(len(temp_hitlets), dtype=strax.hitlet_dtype()) drop_data_field(temp_hitlets, hitlets) return hitlets
def test_inputs_are_empty(self): hitlets_empty = np.zeros(0, dtype=strax.hitlet_with_data_dtype(2)) records_empty = np.zeros(0, dtype=strax.record_dtype(10)) hitlets_result = strax.get_hitlets_data(hitlets_empty, self.records, np.ones(3000)) assert len(hitlets_result ) == 0, 'get_hitlet_data returned result for empty hitlets' hitlets_result = strax.get_hitlets_data(hitlets_empty, records_empty, np.ones(3000)) assert len(hitlets_result ) == 0, 'get_hitlet_data returned result for empty hitlets' with self.assertRaises(ValueError): strax.get_hitlets_data(self.hitlets, records_empty, np.ones(3000))
def compute(self, records_nv, start, end): hits = strax.find_hits(records_nv, min_amplitude=self.config['hit_min_amplitude_nv']) hits = remove_switched_off_channels(hits, self.to_pe) temp_hitlets = strax.create_hitlets_from_hits(hits, self.config['save_outside_hits_nv'], self.channel_range, chunk_start=start, chunk_end=end) del hits # Get hitlet data and split hitlets: temp_hitlets = strax.get_hitlets_data(temp_hitlets, records_nv, to_pe=self.to_pe) temp_hitlets = strax.split_peaks(temp_hitlets, records_nv, self.to_pe, data_type='hitlets', algorithm='local_minimum', min_height=self.config['min_split_nv'], min_ratio=self.config['min_split_ratio_nv'] ) # Compute other hitlet properties: # We have to loop here 3 times over all hitlets... strax.hitlet_properties(temp_hitlets) entropy = strax.conditional_entropy(temp_hitlets, template='flat', square_data=False) temp_hitlets['entropy'][:] = entropy # Remove data field: hitlets = np.zeros(len(temp_hitlets), dtype=strax.hitlet_dtype()) strax.copy_to_buffer(temp_hitlets, hitlets, '_copy_hitlets') return hitlets
def test_get_hitlets_data_without_data_field(self): hitlets_empty = np.zeros(len(self.hitlets), strax.hitlet_dtype()) strax.copy_to_buffer(self.hitlets, hitlets_empty, '_copy_hitlets_to_hitlets_without_data') hitlets = strax.get_hitlets_data(hitlets_empty, self.records, np.ones(3000)) self._test_data_is_identical(hitlets, [self.test_data_truth])
def test_get_hitlets_data(): dummy_records = [ # Contains Hitlet #: [ [1, 3, 2, 1, 0, 0], ], # 0 [ [0, 0, 0, 0, 1, 3], # 1 [2, 1, 0, 0, 0, 0] ], # [ [0, 0, 0, 0, 1, 3], # 2 [2, 1, 0, 1, 3, 2], ], # 3 [ [0, 0, 0, 0, 1, 2], # 4 [2, 2, 2, 2, 2, 2], [2, 1, 0, 0, 0, 0] ], [[2, 1, 0, 1, 3, 2]], # 5, 6 [[2, 2, 2, 2, 2, 2]] # 7 ] # Defining the true parameters of the hitlets: true_area = [7, 7, 7, 6, 18, 3, 6, 12] true_time = [10, 28, 46, 51, 68, 88, 91, 104] true_waveform = [[1, 3, 2, 1], [1, 3, 2, 1], [1, 3, 2, 1], [1, 3, 2], [1, 2, 2, 2, 2, 2, 2, 2, 2, 1], [2, 1], [1, 3, 2], [2, 2, 2, 2, 2, 2]] records = _make_fake_records(dummy_records) hits = strax.find_hits(records, min_amplitude=2) hits = strax.concat_overlapping_hits(hits, (1, 1), (0, 1), 0, float('inf')) hitlets = np.zeros( len(hits), strax.hitlet_with_data_dtype(n_samples=np.max(hits['length']))) strax.refresh_hit_to_hitlets(hits, hitlets) strax.get_hitlets_data(hitlets, records, np.array([1, 1])) for i, (a, wf, t) in enumerate(zip(true_area, true_waveform, true_time)): h = hitlets[i] assert h['area'] == a, f'Hitlet {i} has the wrong area' assert np.all(h['data'][:h['length']] == wf), f'Hitlet {i} has the wrong waveform' assert h['time'] == t, f'Hitlet {i} has the wrong starttime'
def test_get_hitlets_data(self): dummy_records = [ # Contains Hitlet #: [ [1, 3, 2, 1, 0, 0], ], # 0 [ [0, 0, 0, 0, 1, 3], # 1 [2, 1, 0, 0, 0, 0] ], # [ [0, 0, 0, 0, 1, 3], # 2 [2, 1, 0, 1, 3, 2], ], # 3 [ [0, 0, 0, 0, 1, 2], # 4 [2, 2, 2, 2, 2, 2], [2, 1, 0, 0, 0, 0] ], [[2, 1, 0, 1, 3, 2]], # 5, 6 [[2, 2, 2, 2, 2, 2]] # 7 ] # Defining the true parameters of the hitlets: true_area = [7, 7, 7, 6, 18, 3, 6, 12] true_time = [10, 28, 46, 51, 68, 88, 91, 104] true_waveform = [[1, 3, 2, 1], [1, 3, 2, 1], [1, 3, 2, 1], [1, 3, 2], [1, 2, 2, 2, 2, 2, 2, 2, 2, 1], [2, 1], [1, 3, 2], [2, 2, 2, 2, 2, 2]] records, hitlets = self.make_records_and_hitlets(dummy_records) hitlets = strax.get_hitlets_data(hitlets, records, np.ones(2)) for i, (a, wf, t) in enumerate(zip(true_area, true_waveform, true_time)): h = hitlets[i] assert h['area'] == a, f'Hitlet {i} has the wrong area' assert np.all(h['data'][:h['length']] == wf), f'Hitlet {i} has the wrong waveform' assert h['time'] == t, f'Hitlet {i} has the wrong starttime'
def test_empty_overlap(self): records = np.zeros(3, strax.record_dtype(10)) # Create fake records for which hitlet overlaps with channel 0 # although hit is in channel 1. See also github.com/AxFoundation/strax/pull/549 records['channel'] = (0, 1, 1) records['length'] = (10, 3, 10) records['time'] = (0, 0, 5) records['dt'] = 1 records['data'][-1] = np.ones(10) # Assume we extend our hits by 1 sample hence hitlet starts at 4 hitlet = np.zeros(1, strax.hitlet_with_data_dtype(11)) hitlet['time'] = 4 hitlet['dt'] = 1 hitlet['length'] = 11 hitlet['channel'] = 1 hitlet = strax.get_hitlets_data(hitlet, records, np.ones(10)) assert hitlet['time'] == 5 assert hitlet['length'] == 10 assert np.sum(hitlet['data']) == 10 assert hitlet['data'][0, 0] == 1
def test_data_field_is_empty(self): hitlets = strax.get_hitlets_data(self.hitlets, self.records, np.ones(3000)) with self.assertRaises(ValueError): strax.get_hitlets_data(hitlets, self.records, np.ones(3000)) self._test_data_is_identical(hitlets, [self.test_data_truth])
def test_get_hitlets_data_for_single_hitlet(self): hitlets = strax.get_hitlets_data(self.hitlets[0], self.records, np.ones(3000)) self._test_data_is_identical(hitlets, [self.test_data_truth])
def test_to_pe_wrong_shape(self): self.hitlets['channel'] = 2000 with self.assertRaises(ValueError): strax.get_hitlets_data(self.hitlets, self.records, np.ones(10))
def __call__(self, peaks, hits, records, rlinks, to_pe, data_type, do_iterations=1, min_area=0, **kwargs): if not len(records) or not len(peaks) or not do_iterations: return peaks # Build the *args tuple for self.find_split_points from kwargs # since numba doesn't support **kwargs args_options = [] for i, (k, value) in enumerate(self.find_split_args_defaults): if k in kwargs: value = kwargs[k] if k == 'threshold': # The 'threshold' option is a user-specified function value = value(peaks) args_options.append(value) args_options = tuple(args_options) # Check for spurious options argnames = [k for k, _ in self.find_split_args_defaults] for k in kwargs: if k not in argnames: raise TypeError(f"Unknown argument {k} for {self.__class__}") is_split = np.zeros(len(peaks), dtype=np.bool_) new_peaks = self._split_peaks( # Numba doesn't like self as argument, but it's ok with functions... split_finder=self.find_split_points, peaks=peaks, is_split=is_split, orig_dt=records[0]['dt'], min_area=min_area, args_options=tuple(args_options), result_dtype=peaks.dtype) if is_split.sum() != 0: # Found new peaks: compute basic properties if data_type == 'peaks': strax.sum_waveform(new_peaks, hits, records, rlinks, to_pe) strax.compute_widths(new_peaks) elif data_type == 'hitlets': # Add record fields here new_peaks = strax.sort_by_time( new_peaks ) # Hitlets are not necessarily sorted after splitting new_peaks = strax.get_hitlets_data(new_peaks, records, to_pe) # ... and recurse (if needed) new_peaks = self(new_peaks, hits, records, rlinks, to_pe, data_type, do_iterations=do_iterations - 1, min_area=min_area, **kwargs) if np.any(new_peaks['length'] == 0): raise ValueError( 'Want to add a new zero-length peak after splitting!') peaks = strax.sort_by_time( np.concatenate([peaks[~is_split], new_peaks])) return peaks