def __init__(self, measure, beta, kappa, normed, coords, check_input): if 'efpm' in measure: raise ValueError('\'efpm\' no longer supported') if 'efm' in measure: raise ValueError('\'efm\' no longer supported') # store measure object self._measure = Measure(measure, beta, kappa, normed, coords, check_input)
def __init__(self, kwargs): kwargs_check('EFBase', kwargs, allowed=MEASURE_KWARGS) self._measure = Measure(kwargs.pop('measure'), **kwargs)
class EFPBase(with_metaclass(ABCMeta, object)): def __init__(self, measure, beta, kappa, normed, coords, check_input): if 'efpm' in measure: raise ValueError('\'efpm\' no longer supported') if 'efm' in measure: raise ValueError('\'efm\' no longer supported') # store measure object self._measure = Measure(measure, beta, kappa, normed, coords, check_input) def get_zs_thetas_dict(self, event, zs, thetas): if event is not None: zs, thetas = self._measure.evaluate(event) elif zs is None or thetas is None: raise TypeError( 'if event is None then zs and/or thetas cannot also be None') return zs, {w: thetas**w for w in self._weight_set} @abstractproperty def _weight_set(self): pass @property def measure(self): return self._measure.measure @property def beta(self): return self._measure.beta @property def kappa(self): return self._measure.kappa @property def normed(self): return self._measure.normed @property def coords(self): return self._measure.coords @property def check_input(self): return self._measure.check_input def _batch_compute_func(self, event): return self.compute(event, batch_call=True) @abstractmethod def compute(self, *args, **kwargs): pass def batch_compute(self, events, n_jobs=None): """Computes the value of the EFP on several events. **Arguments** - **events** : array_like or `fastjet.PseudoJet` - The events as an array of arrays of particles in coordinates matching those anticipated by `coords`. - **n_jobs** : _int_ or `None` - The number of worker processes to use. A value of `None` will use as many processes as there are CPUs on the machine. **Returns** - _1-d numpy.ndarray_ - A vector of the EFP value for each event. """ if n_jobs is None: n_jobs = multiprocessing.cpu_count() or 1 chunksize = max(1, len(events) // n_jobs) # setup processor pool with create_pool(n_jobs) as pool: results = np.asarray( list(pool.map(self._batch_compute_func, events, chunksize))) return results
class EFPBase(with_metaclass(ABCMeta, object)): def __init__(self, measure, beta, kappa, normed, coords, check_input): if 'efpm' in measure: raise ValueError('\'efpm\' no longer supported') if 'efm' in measure: raise ValueError('\'efm\' no longer supported') # store measure object self._measure = Measure(measure, beta, kappa, normed, coords, check_input) def get_zs_thetas_dict(self, event, zs, thetas): if event is not None: zs, thetas = self._measure.evaluate(event) elif zs is None or thetas is None: raise TypeError( 'if event is None then zs and/or thetas cannot also be None') return zs, {w: thetas**w for w in self._weight_set} @abstractproperty def _weight_set(self): pass @property def measure(self): return self._measure.measure @property def beta(self): return self._measure.beta @property def kappa(self): return self._measure.kappa @property def normed(self): return self._measure.normed @property def check_input(self): return self._measure.check_input @property def subslicing(self): return self._measure.subslicing def _batch_compute_func(self, event): return self.compute(event, batch_call=True) @abstractmethod def compute(self, *args, **kwargs): pass def batch_compute(self, events, n_jobs=-1): """Computes the value of the EFP on several events. **Arguments** - **events** : array_like or `fastjet.PseudoJet` - The events as an array of arrays of particles in coordinates matching those anticipated by `coords`. - **n_jobs** : _int_ - The number of worker processes to use. A value of `-1` will attempt to use as many processes as there are CPUs on the machine. **Returns** - _1-d numpy.ndarray_ - A vector of the EFP value for each event. """ if n_jobs == -1: try: self.n_jobs = multiprocessing.cpu_count() except: self.n_jobs = 4 # choose reasonable value # setup processor pool chunksize = max(len(events) // self.n_jobs, 1) if sys.version_info[0] == 3: with multiprocessing.Pool(self.n_jobs) as pool: results = np.asarray( list(pool.imap(self._batch_compute_func, events, chunksize))) # Pool is not a context manager in python 2 else: pool = multiprocessing.Pool(self.n_jobs) results = np.asarray( list(pool.imap(self._batch_compute_func, events, chunksize))) pool.close() return results