def __exit__(self, exc_type, exc_value, traceback): ''' Check the results. ''' if any(map(lambda e: e is not None, (exc_type, exc_value, traceback))): return if not self.fails: # Print the result print(self.__fmin) print(self.__results) # Check that the fit run fine assert not self.__fmin.hesse_failed # Check the values of the parameters if self.simultaneous: results = minkit.Registry() for c in self.proxy: results += c.pdf.all_real_args else: results = self.proxy.all_real_args for n, v in self.initials.items(): rv = results.get(n) assert np.allclose(v, rv.value, atol=self.nsigma * rv.error) # Reset the values of the PDF(s) if self.simultaneous: for c in self.proxy: c.pdf.set_values(**self.initials) else: self.proxy.set_values(**self.initials)
def test_evaluation_grid(): ''' Test the "evaluation_grid" function. ''' x = minkit.Parameter('x', bounds=(0, 20)) y = minkit.Parameter('y', bounds=(0, 20)) n = 100 # Test single range g = dataset.evaluation_grid(minkit.Registry([x]), x.bounds, n) assert len(g) == n # Test multi-range g = dataset.evaluation_grid(minkit.Registry( [x, y]), np.concatenate([x.bounds, y.bounds]), n) assert len(g) == n**2
def test_evaluation_grid(): ''' Test the "evaluation_grid" function. ''' x = minkit.Parameter('x', bounds=(0, 20)) y = minkit.Parameter('y', bounds=(40, 60)) n = 100 # Test single range p = minkit.Registry([x]) b = minkit.base.parameters.bounds_for_range(p, 'full')[0] g = dataset.evaluation_grid(aop, p, b, n) assert len(g.values.ua) == n # Test multi-range p = minkit.Registry([x, y]) b = minkit.base.parameters.bounds_for_range(p, 'full')[0] g = dataset.evaluation_grid(aop, p, b, n) assert len(g.values.ua) == len(p) * n**2
def test_uniform_sample(): ''' Test the "uniform_sample" function. ''' x = minkit.Parameter('x', bounds=(0, 20)) y = minkit.Parameter('y', bounds=(0, 20)) n = 10000 # Test single range p = minkit.Registry([x]) b = minkit.base.parameters.bounds_for_range(p, 'full')[0] g = dataset.uniform_sample(aop, p, b, n) assert len(g.values.ua) == n a = g['x'] lb, ub = x.bounds assert aop.count_nonzero(aop.ge(a, lb) & aop.le(a, ub)) == len(g) # Test multi-range p = minkit.Registry([x, y]) b = minkit.base.parameters.bounds_for_range(p, 'full')[0] g = dataset.uniform_sample(aop, p, b, n) assert len(g.values.ua) == len(p) * n for p in x, y: a = g[p.name] lb, ub = p.bounds assert aop.count_nonzero(aop.ge(a, lb) & aop.le(a, ub)) == len(g) r = aop.count_nonzero(aop.ge(a, 0.5 * (lb + ub))) / len(a) assert np.allclose(r, 0.5, rtol=0.05)
def test_registry(tmpdir): ''' Test the "Registry" class. ''' a = minkit.Parameter('a', 1., (-5, +5), None, 0.1, False) b = minkit.Parameter('b', 0., (-10, +10), None, 2., True) f = minkit.Registry([a, b]) with open(os.path.join(tmpdir, 'r.json'), 'wt') as fi: json.dump(f.to_json_object(), fi) with open(os.path.join(tmpdir, 'r.json'), 'rt') as fi: s = minkit.Registry.from_json_object(json.load(fi)) assert f.names == s.names for fv, sv in zip(f, s): check_parameters(fv, sv) assert all(not o is p for o, p in zip(s, s.copy())) # Must raise errors if different objects with the same names are added to the registry a2 = minkit.Parameter('a', 1., (-5, +5), None, 0.1, False) with pytest.raises(ValueError): f.append(a2) with pytest.raises(ValueError): f.insert(0, a2) with pytest.raises(ValueError): _ = f + [a2, a2] with pytest.raises(ValueError): f += [a2, a2] # These operations do not raise an error, and the registry is not modified pl = len(f) f.append(a) f.insert(0, a) f += [a, a] _ = f + [a, b] assert len(f) == pl