def to_operation(self, inputs, conditions={}): outputs = self.output_vars() def fit(x, y): slope, intercept, r_value, p_value, stderr = stats.linregress(x, y) return slope, intercept, r_value, p_value, stderr, slope * x + intercept nodes = [ gn.Map(name=self.name() + "_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=fit, parent=self.name()) ] return nodes
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() c0 = self.values['c0'] c1 = self.values['c1'] c2 = self.values['c2'] def poly(x): coeffs = [c0, c1, c2] return np.polynomial.polynomial.polyval(x, coeffs) node = gn.Map(name=self.name() + "_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=poly, parent=self.name()) return node
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() origin = self.values['origin'] extent = self.values['extent'] size = list(sorted([origin, extent])) def func(arr): return arr[slice(*size)] node = gn.Map(name=self.name() + "_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=func, parent=self.name()) return node
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() locpars = {'num_bunches': self.values['num bunches'], 'snr_filter': self.values['snr filter'], 'roi_expand': self.values['roi expand'], 'roi_fraction': self.values['roi fraction'], 'island_split_method': self.values['island split method'], 'island_split_par1': self.values['island split par1'], 'island_split_par2': self.values['island split par2']} node = gn.Map(name=self.name()+"_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=LOCProc(**locpars), parent=self.name()) return node
def to_operation(self, inputs, outputs, **kwargs): outputs = [self.name() + '.' + i for i in inputs.keys()] buffer_output = [self.name()] nodes = [ gn.RollingBuffer(name=self.name() + "_buffer", N=self.values['Num Points'], unique=self.values['Unique'], inputs=inputs, outputs=buffer_output, **kwargs), gn.Map(name=self.name() + "_operation", inputs=buffer_output, outputs=outputs, func=lambda a: zip(*a), **kwargs) ] return nodes
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() ox = self.values['origin x'] ex = self.values['extent x'] oy = self.values['origin y'] ey = self.values['extent y'] def func(img): return img[slice(ox, ox + ex), slice(oy, oy + ey)] node = gn.Map(name=self.name() + "_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=func, parent=self.name()) return node
def to_operation(self, inputs, conditions={}): outputs = [self.name() + '.' + i for i in inputs.keys()] buffer_output = [self.name()] nodes = [ gn.RollingBuffer(name=self.name() + "_buffer", N=self.values['Num Points'], unique=self.values['Unique'], condition_needs=conditions, inputs=inputs, outputs=buffer_output, parent=self.name()), gn.Map(name=self.name() + "_operation", inputs=buffer_output, outputs=outputs, func=lambda a: zip(*a), parent=self.name()) ] return nodes
def to_operation(self, inputs, outputs, **kwargs): map_outputs = [self.name() + "_map"] threshold = self.values['Threshold'] count = self.values['Count'] nodes = [ gn.Map(name=self.name() + "_operation", inputs=inputs, outputs=map_outputs, **kwargs, func=lambda arr: len(arr[arr > threshold]) > count), gn.PickN(name=self.name() + "_pickN", inputs=map_outputs, outputs=outputs, **kwargs) ] return nodes
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() if len(inputs) == 1: def func(arr): return np.sqrt(np.mean(np.square(arr))) else: def func(*arr): return list(map(lambda a: np.sqrt(np.mean(np.square(a))), arr)) node = gn.Map(name=self.name() + "_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=func, parent=self.name()) return node
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() sampleInterval = self.values['Sample Interval'] horpos = self.values['horpos'] gain = self.values['gain'] offset = self.values['offset'] delay = self.values['delay'] walk = self.values['walk'] threshold = self.values['threshold'] fraction = self.values['fraction'] def cfd_func(waveform): return cfd.cfd(sampleInterval, horpos, gain, offset, waveform, delay, walk, threshold, fraction) node = gn.Map(name=self.name()+"_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=cfd_func, parent=self.name()) return node
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() HF = psfHitFinder.HitFinder(self.values) def func(nhits, pktsec): HF.FindHits(pktsec[4, :nhits[4]], pktsec[0, :nhits[0]], pktsec[1, :nhits[1]], pktsec[2, :nhits[2]], pktsec[3, :nhits[3]]) return HF.GetXYT() node = gn.Map(name=self.name()+"_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=func, parent=self.name()) return node
def to_operation(self, inputs, conditions={}): map_outputs = [self.name() + "_map"] outputs = [self.name()] threshold = self.values['Threshold'] count = self.values['Count'] nodes = [ gn.Map(name=self.name() + "_operation", condition_needs=conditions, inputs=inputs, outputs=map_outputs, func=lambda arr: len(arr[arr > threshold]) > count, parent=self.name()), gn.PickN(name=self.name() + "_pickN", inputs=map_outputs, outputs=outputs, parent=self.name()) ] return nodes
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() axis = self.values['axis'] if len(inputs) == 1: def func(arr): return np.sum(arr, axis=axis) else: def func(*arr): return list(map(lambda a: np.sum(a, axis=axis), arr)) node = gn.Map(name=self.name() + "_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=func, parent=self.name()) return node
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() length = self.values['length'] def func(*args): r = list(map(np.array, zip(*itertools.combinations(*args, length)))) if r: return r else: return [np.array([])] * length node = gn.Map(name=self.name() + "_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=func, parent=self.name()) return node
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() axis = self.values['axis'] sections = len(outputs) def split(arr): splits = np.split(arr, sections, axis=axis) if axis == 0: splits = map(lambda a: a[0, :], splits) elif axis == 1: splits = map(lambda a: a[:, 0], splits) return list(splits) node = gn.Map(name=self.name() + "_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=split, parent=self.name()) return node
def to_operation(self, inputs, outputs, **kwargs): def fit(arr): arr = np.array(arr) slope, intercept, r_value, p_value, stderr = stats.linregress( arr[:, 0], arr[:, 1]) return arr[:, 0], arr[:, 1], slope * arr[:, 0] + intercept, r_value picked_outputs = [self.name() + "_accumulated"] nodes = [ gn.PickN(name=self.name() + "_picked", inputs=inputs, outputs=picked_outputs, N=self.values['N'], **kwargs), gn.Map(name=self.name() + "_operation", inputs=picked_outputs, outputs=outputs, func=fit, **kwargs) ] return nodes
def to_operation(self, inputs, outputs, **kwargs): def func(arr): mean = np.mean(arr) rms = np.sqrt(np.mean(np.square(arr))) return mean, rms accumulated_outputs = [self.name() + '_accumulated_events'] nodes = [ gn.PickN(name=self.name() + '_picked', N=self.values['N'], inputs=inputs, outputs=accumulated_outputs, **kwargs), gn.Map(name=self.name() + '_operation', inputs=accumulated_outputs, outputs=outputs, func=func, **kwargs) ] return nodes
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() constructor_params = {'npix_min': self.values['npix min'], 'npix_max': self.values['npix max'], 'amax_thr': self.values['amax thr'], 'atot_thr': self.values['atot thr'], 'son_min': self.values['son min']} call_params = {'thr_low': self.values['thr low'], 'thr_high': self.values['thr high'], 'rank': self.values['rank'], 'r0': self.values['r0'], 'dr': self.values['dr']} node = gn.Map(name=self.name()+"_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=PeakfinderAlgos(constructor_params, call_params, list(self.outputs().keys())), parent=self.name()) return node
def test_projection(qtbot): node = Projection('projection') widget = node.ctrlWidget() # showing windows steals focus after the tests exit, its not necessary for the test, and is annoying # widget.show() qtbot.addWidget(widget) assert node.values['axis'] == 0 qtbot.keyPress(node.ctrls['axis'], QtCore.Qt.Key_Up) assert node.values['axis'] == 1 inputs = {"In": node.name()} op = node.to_operation(inputs=inputs, outputs=["projection.Out"]) mop = gn.Map(name="projection_operation", inputs=list(inputs.values()), outputs=[node.name() + '.Out'], func=lambda a: np.sum(a, axis=1)) assert op.name == mop.name assert op.inputs == mop.inputs assert op.outputs == mop.outputs
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() ox = self.values['origin x'] ex = self.values['extent x'] oy = self.values['origin y'] ey = self.values['extent y'] def func(x, y, img): xstart = np.digitize(ox, x) ystart = np.digitize(oy, y) xs = slice(xstart, xstart + ex) ys = slice(ystart, ystart + ey) return x[xs], y[ys], img[xs, ys] node = gn.Map(name=self.name() + "_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=func, parent=self.name()) return node
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() map_outputs = [self.name() + "_bins", self.name() + "_counts"] nbins = self.values['bins'] density = self.values['density'] range = None if not self.values['auto range']: range = (self.values['range min'], self.values['range max']) def bin(arr, weights=None): counts, bins = np.histogram(arr, bins=nbins, range=range, density=density, weights=weights) return bins, counts def reduction(res, *rest): res[0] = rest[0] res[1] = res[1] + rest[1] return res node = [ gn.Map(name=self.name() + "_map", condition_needs=conditions, inputs=inputs, outputs=map_outputs, func=bin, parent=self.name()), gn.Accumulator(name=self.name() + "_accumulated", inputs=map_outputs, outputs=outputs, res_factory=lambda: [None, 0], reduction=reduction, parent=self.name()) ] return node
def to_operation(self, **kwargs): constructor_params = { 'npix_min': self.values['npix min'], 'npix_max': self.values['npix max'], 'amax_thr': self.values['amax thr'], 'atot_thr': self.values['atot thr'], 'son_min': self.values['son min'] } call_params = { 'thr_low': self.values['thr low'], 'thr_high': self.values['thr high'], 'rank': self.values['rank'], 'r0': self.values['r0'], 'dr': self.values['dr'] } node = gn.Map(name=self.name() + "_operation", **kwargs, func=PeakfinderAlgos(constructor_params, call_params, list(self.outputs().keys()))) return node
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() args = [] expr = self.values['operation'] # sympy doesn't like symbols name likes Sum.0.Out, need to remove dots. for arg in self.input_vars().values(): rarg = arg.replace('.', '') rarg = rarg.replace(':', '') rarg = rarg.replace(' ', '') args.append(rarg) expr = expr.replace(arg, rarg) params = {'args': args, 'expr': expr} node = gn.Map(name=self.name() + "_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=CalcProc(params), parent=self.name()) return node
def to_operation(self, inputs, outputs, **kwargs): outputs = [self.name() + '.' + i for i in inputs.keys()] buffer_output = [self.name()] if len(inputs.values()) > 1: node = [ gn.RollingBuffer(name=self.name() + "_buffer", N=self.values['Num Points'], inputs=inputs, outputs=buffer_output, **kwargs), gn.Map(name=self.name() + "_operation", inputs=buffer_output, outputs=outputs, func=lambda a: zip(*a), **kwargs) ] else: node = gn.RollingBuffer(name=self.name(), N=self.values['Num Points'], inputs=inputs, outputs=outputs, **kwargs) return node
def to_operation(self, inputs, conditions={}): outputs = self.output_vars() digitizer = self.values['digitizer'] channel = self.values['channel'] def func(d): peaks = d[digitizer][channel] start_pos = peaks[0] peaks = peaks[1] times = [] for start, peak in zip(start_pos, peaks): times.append(np.arange(start, start + len(peak))) return start_pos, times, peaks, len(peaks) node = gn.Map(name=self.name() + "_operation", condition_needs=conditions, inputs=inputs, outputs=outputs, func=func, parent=self.name()) return node
def to_operation(self, **kwargs): return gn.Map(name=self.name()+"_operation", **kwargs, func=lambda *args: args)
def to_operation(self, inputs, outputs, **kwargs): outputs = self.output_vars() def reduction(cv, v): cv.extend(v) return cv if self.values['binned']: bins = np.histogram_bin_edges(np.arange(self.values['min'], self.values['max']), bins=self.values['bins'], range=(self.values['min'], self.values['max'])) map_outputs = [self.name()+'_bin', self.name()+'_map_count'] reduce_outputs = [self.name()+'_reduce_count'] def func(k, v): return np.digitize(k, bins), [v] def stats(d): res = {bins[i]: (0, 0, 0) for i in range(0, bins.size)} for k, v in d.items(): try: stddev = np.std(v) res[bins[k]] = (np.mean(v), stddev, stddev/np.sqrt(len(v))) except IndexError: pass keys, values = zip(*sorted(res.items())) mean, stddev, error = zip(*values) return np.array(keys), np.array(mean), np.array(stddev), np.array(error) nodes = [ gn.Map(name=self.name()+'_map', inputs=inputs, outputs=map_outputs, func=func, **kwargs), gn.ReduceByKey(name=self.name()+'_reduce', inputs=map_outputs, outputs=reduce_outputs, reduction=reduction, **kwargs), gn.Map(name=self.name()+'_stats', inputs=reduce_outputs, outputs=outputs, func=stats, **kwargs) ] else: map_outputs = [self.name()+'_map_count'] reduce_outputs = [self.name()+'_reduce_count'] def stats(d): res = {} for k, v in d.items(): stddev = np.std(v) res[k] = (np.mean(v), stddev, stddev/np.sqrt(len(v))) keys, values = zip(*sorted(res.items())) mean, stddev, error = zip(*values) return np.array(keys), np.array(mean), np.array(stddev), np.array(error) nodes = [ gn.Map(name=self.name()+'_map', inputs=[inputs['Value']], outputs=map_outputs, func=lambda a: [a], **kwargs), gn.ReduceByKey(name=self.name()+'_reduce', inputs=[inputs['Bin']]+map_outputs, outputs=reduce_outputs, reduction=reduction, **kwargs), gn.Map(name=self.name()+'_stats', inputs=reduce_outputs, outputs=outputs, func=stats, **kwargs) ] return nodes
def to_operation(self, **kwargs): constant = self.values['constant'] return gn.Map(name=self.name()+"_operation", **kwargs, func=lambda: constant)
def to_operation(self, **kwargs): return gn.Map(name=self.name() + "_operation", **kwargs, func=EdgeFinderProc({}))
def to_operation(self, **kwargs): return gn.Map(name=self.name() + "_operation", **kwargs, func=lambda a: np.sum(a, dtype=np.float64))