Esempio n. 1
0
def _load_new(obj):
    """Read measurements from a YAML stream or file that are compatible
    with the format generated by the `get_yaml` method of
    `flavio.classes.Measurement`."""
    measurements = yaml.load(obj)
    if isinstance(measurements, dict):
        measurements = [Measurement.from_yaml_dict(measurements, pname='observables')]
    else:
        measurements = [Measurement.from_yaml_dict(m, pname='observables') for m in measurements]
    return [m.name for m in measurements]
Esempio n. 2
0
def np_measurement(name, w, observables, covariance):
    """Measurement instance of `observables` measured with `covariance`
    assuming the central values to be equal to the NP predictions given
    the `Wilson` instance `w`."""
    def predict(obs):
        d = Observable.argument_format(obs, 'dict')
        return flavio.np_prediction(d.pop('name'), w, **d)

    cv = [predict(obs) for obs in observables]
    d = MultivariateNormalDistribution(cv, covariance=covariance)
    m = Measurement(name)
    m.add_constraint(observables, d)
    return m
Esempio n. 3
0
def _load_new(obj):
    """Read measurements from a YAML stream or file that are compatible
    with the format generated by the `get_yaml` method of
    `flavio.classes.Measurement`."""
    measurements = yaml.load(obj)
    if isinstance(measurements, dict):
        measurements = [
            Measurement.from_yaml_dict(measurements, pname='observables')
        ]
    else:
        measurements = [
            Measurement.from_yaml_dict(m, pname='observables')
            for m in measurements
        ]
    return [m.name for m in measurements]
Esempio n. 4
0
 def test_exp_combo(self):
     o = Observable('test_obs')
     o.arguments = ['x']
     m = Measurement('test_obs measurement 1')
     m.add_constraint([('test_obs', 1)], MultivariateNormalDistribution([1, 2], np.eye(2)))
     # error: no measurement
     with self.assertRaises(ValueError):
         flavio.combine_measurements('test_obs', x=1, include_measurements=['bla'])
     m.add_constraint([('test_obs', 1)], NormalDistribution(2, 3))
     combo = flavio.combine_measurements('test_obs', x=1)
     self.assertEqual(combo.central_value, 2)
     self.assertEqual(combo.standard_deviation, 3)
     m2 = Measurement('test_obs measurement 2')
     m2.add_constraint([('test_obs', 1)], NormalDistribution(3, 3))
     combo = flavio.combine_measurements('test_obs', x=1)
     self.assertAlmostEqual(combo.central_value, 2.5)
     self.assertAlmostEqual(combo.standard_deviation, sqrt(9 / 2))
     Observable.del_instance('test_obs')
Esempio n. 5
0
    def test_profiler(self):
        # defining some dummy parameters and observables
        Parameter('tmp a')
        Parameter('tmp b')
        Parameter('tmp c')
        Parameter('tmp d')
        p = ParameterConstraints()
        p.set_constraint('tmp b', '2+-0.3')
        p.set_constraint('tmp c', '0.2+-0.1')
        p.set_constraint('tmp d', '1+-0.5')

        def prediction(wc_obj, par):
            return par['tmp a']**2 + par['tmp b'] + par['tmp c'] + par[
                'tmp d']**2

        flavio.Observable('tmp obs')
        Prediction('tmp obs', prediction)
        m = Measurement('tmp measurement')
        m.add_constraint(['tmp obs'],
                         flavio.statistics.probability.NormalDistribution(
                             1, 0.2))
        # test 1D profiler
        fit_1d = FrequentistFit('test profiler 1d', p, ['tmp a'],
                                ['tmp b', 'tmp c', 'tmp d'], ['tmp obs'])
        profiler_1d = profiler.Profiler1D(fit_1d, -10, 10)
        x, z, n = profiler_1d.run(steps=4)
        self.assertEqual(x.shape, (4, ))
        self.assertEqual(z.shape, (4, ))
        self.assertEqual(n.shape, (3, 4))
        npt.assert_array_equal(x, profiler_1d.x)
        npt.assert_array_equal(z, profiler_1d.log_profile_likelihood)
        npt.assert_array_equal(n, profiler_1d.profile_nuisance)
        pdat = profiler_1d.pvalue_prob_plotdata()
        npt.assert_array_equal(pdat['x'], x)
        # test 2D profiler
        p.remove_constraint('d')
        fit_2d = FrequentistFit('test profiler 2d', p, ['tmp a', 'tmp d'],
                                ['tmp b', 'tmp c'], ['tmp obs'])
        profiler_2d = profiler.Profiler2D(fit_2d, -10, 10, -10, 10)
        x, y, z, n = profiler_2d.run(steps=(3, 4))
        self.assertEqual(x.shape, (3, ))
        self.assertEqual(y.shape, (4, ))
        self.assertEqual(z.shape, (3, 4))
        self.assertEqual(n.shape, (2, 3, 4))
        npt.assert_array_equal(x, profiler_2d.x)
        npt.assert_array_equal(y, profiler_2d.y)
        npt.assert_array_equal(z, profiler_2d.log_profile_likelihood)
        npt.assert_array_equal(n, profiler_2d.profile_nuisance)
        pdat = profiler_2d.contour_plotdata()
        npt.assert_array_almost_equal(pdat['z'], -2 * (z - np.max(z)))
        # delete dummy instances
        for p in ['tmp a', 'tmp b', 'tmp c', 'tmp d']:
            Parameter.del_instance(p)
        FrequentistFit.del_instance('test profiler 1d')
        Observable.del_instance('tmp obs')
        Measurement.del_instance('tmp measurement')
Esempio n. 6
0
def _load(obj):
    """Read measurements from a YAML stream or file."""
    measurements = yaml.load(obj)
    for m_name, m_data in measurements.items():
        m = Measurement(m_name)
        for arg in ['inspire', 'hepdata', 'experiment', 'url']:
            if arg in m_data:
                setattr(m, arg, m_data[arg])
        if 'observables' in m_data:
            # for multivariate constraints
            pd = probability.dict2dist(m_data['values'])
            pd = probability.convolve_distributions(pd)
            # for observables without arguments (i.e. strings), this is trivial;
            obs_list = [
                obs if isinstance(obs, str)
                # for obs. with arguments, need to convert dict of the form
                # {'name': myname, 'arg1': v1, ...} to a tuple of the form
                # (myname, v1, ...)
                else
                tuple([obs['name']] +
                      [obs[arg] for arg in Observable[obs['name']].arguments])
                for obs in m_data['observables']
            ]
            m.add_constraint(obs_list, pd)
        elif 'correlation' not in m_data:
            # for univariate constraints
            if isinstance(m_data['values'], list):
                for value_dict in m_data['values']:
                    args = Observable[value_dict['name']].arguments
                    # numerical values of arguments, e.g. [1, 6]
                    args_num = [value_dict[a] for a in args]
                    # insert string name in front of argument values and turn it
                    # into a tuple, e.g. ('FL(B0->K*mumu)', 1, 6)
                    args_num.insert(0, value_dict['name'])
                    obs_tuple = tuple(args_num)
                    if isinstance(value_dict['value'], dict):
                        m.set_constraint(obs_tuple,
                                         constraint_dict=value_dict['value'])
                    else:
                        m.set_constraint(obs_tuple, value_dict['value'])
            else:  # otherwise, 'values' is a dict just containing name: constraint_string
                for obs, value in m_data['values'].items():
                    if isinstance(value, dict) or isinstance(value, list):
                        m.set_constraint(obs, constraint_dict=value)
                    else:
                        m.set_constraint(obs, value)
        else:
            # for multivariate normal constraints
            observables = []
            central_values = []
            errors = []
            if isinstance(m_data['values'], list):
                for value_dict in m_data['values']:
                    # if "value" is a list, it contains the values of observable
                    # arguments (like q^2)
                    args = Observable[value_dict['name']].arguments
                    args_num = [value_dict[a] for a in args]
                    error_dict = errors_from_string(value_dict['value'])
                    args_num.insert(0, value_dict['name'])
                    obs_tuple = tuple(args_num)
                    observables.append(obs_tuple)
                    central_values.append(error_dict['central_value'])
                    squared_error = 0.
                    for sym_err in error_dict['symmetric_errors']:
                        squared_error += sym_err**2
                    for asym_err in error_dict['asymmetric_errors']:
                        squared_error += asym_err[0] * asym_err[1]
                    errors.append(sqrt(squared_error))
            else:  # otherwise, 'values' is a dict just containing name: constraint_string
                for obs, value in m_data['values'].items():
                    observables.append(obs)
                    error_dict = errors_from_string(value)
                    central_values.append(error_dict['central_value'])
                    squared_error = 0.
                    for sym_err in error_dict['symmetric_errors']:
                        squared_error += sym_err**2
                    for asym_err in error_dict['asymmetric_errors']:
                        squared_error += asym_err[0] * asym_err[1]
                    errors.append(sqrt(squared_error))
            correlation = _fix_correlation_matrix(m_data['correlation'],
                                                  len(observables))
            covariance = np.outer(np.asarray(errors),
                                  np.asarray(errors)) * correlation
            if not np.all(np.linalg.eigvals(covariance) > 0):
                # if the covariance matrix is not positive definite, try a dirty trick:
                # multiply all the correlations by 0.99.
                n_dim = len(correlation)
                correlation = (correlation -
                               np.eye(n_dim)) * 0.99 + np.eye(n_dim)
                covariance = np.outer(np.asarray(errors),
                                      np.asarray(errors)) * correlation
                # if it still isn't positive definite, give up.
                assert np.all(
                    np.linalg.eigvals(covariance) > 0
                ), "The covariance matrix is not positive definite!" + str(
                    covariance)
            m.add_constraint(
                observables,
                probability.MultivariateNormalDistribution(
                    central_values, covariance))
    return list(measurements.keys())
Esempio n. 7
0
def _load(obj):
    """Read measurements from a YAML stream or file."""
    measurements = yaml.load(obj)
    for m_name, m_data in measurements.items():
        m = Measurement(m_name)
        for arg in ['inspire', 'hepdata', 'experiment', 'url']:
            if arg in m_data:
                setattr(m, arg, m_data[arg])
        if 'correlation' not in m_data:
            if isinstance(m_data['values'], list):
                for value_dict in m_data['values']:
                    # if "value" is a list, it contains the values of observable
                    # arguments (like q^2)
                    args = Observable.get_instance(value_dict['name']).arguments
                    args_num = [value_dict[a] for a in args]
                    error_dict = errors_from_string(value_dict['value'])
                    central_value = error_dict['central_value']
                    squared_error = 0.
                    for sym_err in error_dict['symmetric_errors']:
                        squared_error += sym_err**2
                    for asym_err in error_dict['asymmetric_errors']:
                        squared_error += asym_err[0]*asym_err[1]
                    args_num.insert(0, value_dict['name'])
                    obs_tuple = tuple(args_num)
                    m.add_constraint([obs_tuple], probability.NormalDistribution(central_value, sqrt(squared_error)))
            else: # otherwise, 'values' is a dict just containing name: constraint_string
                for obs, value in m_data['values'].items():
                    error_dict = errors_from_string(value)
                    central_value = error_dict['central_value']
                    squared_error = 0.
                    for sym_err in error_dict['symmetric_errors']:
                        squared_error += sym_err**2
                    for asym_err in error_dict['asymmetric_errors']:
                        squared_error += asym_err[0]*asym_err[1]
                    m.add_constraint([obs], probability.NormalDistribution(central_value, sqrt(squared_error)))

        else:
            observables = []
            central_values = []
            errors = []
            if isinstance(m_data['values'], list):
                for value_dict in m_data['values']:
                    # if "value" is a list, it contains the values of observable
                    # arguments (like q^2)
                    args = Observable.get_instance(value_dict['name']).arguments
                    args_num = [value_dict[a] for a in args]
                    error_dict = errors_from_string(value_dict['value'])
                    args_num.insert(0, value_dict['name'])
                    obs_tuple = tuple(args_num)
                    observables.append(obs_tuple)
                    central_values.append(error_dict['central_value'])
                    squared_error = 0.
                    for sym_err in error_dict['symmetric_errors']:
                        squared_error += sym_err**2
                    for asym_err in error_dict['asymmetric_errors']:
                        squared_error += asym_err[0]*asym_err[1]
                    errors.append(sqrt(squared_error))
            else: # otherwise, 'values' is a dict just containing name: constraint_string
                for obs, value in m_data['values'].items():
                    observables.append(obs)
                    error_dict = errors_from_string(value)
                    central_values.append(error_dict['central_value'])
                    squared_error = 0.
                    for sym_err in error_dict['symmetric_errors']:
                        squared_error += sym_err**2
                    for asym_err in error_dict['asymmetric_errors']:
                        squared_error += asym_err[0]*asym_err[1]
                        errors.append(sqrt(squared_error))
            correlation = _fix_correlation_matrix(m_data['correlation'], len(observables))
            covariance = np.outer(np.asarray(errors), np.asarray(errors))*correlation
            if not np.all(np.linalg.eigvals(covariance) > 0):
                # if the covariance matrix is not positive definite, try a dirty trick:
                # multiply all the correlations by 0.99.
                n_dim = len(correlation)
                correlation = (correlation - np.eye(n_dim))*0.99 + np.eye(n_dim)
                covariance = np.outer(np.asarray(errors), np.asarray(errors))*correlation
                # if it still isn't positive definite, give up.
                assert np.all(np.linalg.eigvals(covariance) > 0), "The covariance matrix is not positive definite!" + str(covariance)
            m.add_constraint(observables, probability.MultivariateNormalDistribution(central_values, covariance))
Esempio n. 8
0
def _load(obj):
    """Read measurements from a YAML stream or file."""
    measurements = yaml.load(obj)
    for m_name, m_data in measurements.items():
        m = Measurement(m_name)
        for arg in ['inspire', 'hepdata', 'experiment', 'url']:
            if arg in m_data:
                setattr(m, arg, m_data[arg])
        if 'correlation' not in m_data:
            if isinstance(m_data['values'], list):
                for value_dict in m_data['values']:
                    # if "value" is a list, it contains the values of observable
                    # arguments (like q^2)
                    args = Observable.get_instance(value_dict['name']).arguments
                    args_num = [value_dict[a] for a in args]
                    constraints = constraints_from_string(value_dict['value'])

                    error_dict = errors_from_string(value_dict['value'])
                    central_value = error_dict['central_value']
                    squared_error = 0.
                    for sym_err in error_dict['symmetric_errors']:
                        squared_error += sym_err**2
                    for asym_err in error_dict['asymmetric_errors']:
                        squared_error += asym_err[0]*asym_err[1]
                    args_num.insert(0, value_dict['name'])
                    obs_tuple = tuple(args_num)
                    m.add_constraint([obs_tuple], probability.NormalDistribution(central_value, sqrt(squared_error)))
            else: # otherwise, 'values' is a dict just containing name: constraint_string
                for obs, value in m_data['values'].items():
                    constraints = constraints_from_string(value)
                    for constraint in constraints:
                        m.add_constraint([obs], constraint)
        else:
            observables = []
            central_values = []
            errors = []
            if isinstance(m_data['values'], list):
                for value_dict in m_data['values']:
                    # if "value" is a list, it contains the values of observable
                    # arguments (like q^2)
                    args = Observable.get_instance(value_dict['name']).arguments
                    args_num = [value_dict[a] for a in args]
                    error_dict = errors_from_string(value_dict['value'])
                    args_num.insert(0, value_dict['name'])
                    obs_tuple = tuple(args_num)
                    observables.append(obs_tuple)
                    central_values.append(error_dict['central_value'])
                    squared_error = 0.
                    for sym_err in error_dict['symmetric_errors']:
                        squared_error += sym_err**2
                    for asym_err in error_dict['asymmetric_errors']:
                        squared_error += asym_err[0]*asym_err[1]
                    errors.append(sqrt(squared_error))
            else: # otherwise, 'values' is a dict just containing name: constraint_string
                for obs, value in m_data['values'].items():
                    observables.append(obs)
                    error_dict = errors_from_string(value)
                    central_values.append(error_dict['central_value'])
                    squared_error = 0.
                    for sym_err in error_dict['symmetric_errors']:
                        squared_error += sym_err**2
                    for asym_err in error_dict['asymmetric_errors']:
                        squared_error += asym_err[0]*asym_err[1]
                        errors.append(sqrt(squared_error))
            correlation = _fix_correlation_matrix(m_data['correlation'], len(observables))
            covariance = np.outer(np.asarray(errors), np.asarray(errors))*correlation
            if not np.all(np.linalg.eigvals(covariance) > 0):
                # if the covariance matrix is not positive definite, try a dirty trick:
                # multiply all the correlations by 0.99.
                n_dim = len(correlation)
                correlation = (correlation - np.eye(n_dim))*0.99 + np.eye(n_dim)
                covariance = np.outer(np.asarray(errors), np.asarray(errors))*correlation
                # if it still isn't positive definite, give up.
                assert np.all(np.linalg.eigvals(covariance) > 0), "The covariance matrix is not positive definite!" + str(covariance)
            m.add_constraint(observables, probability.MultivariateNormalDistribution(central_values, covariance))
Esempio n. 9
0
def _load(obj):
    """Read measurements from a YAML stream or file."""
    measurements = yaml.load(obj)
    for m_name, m_data in measurements.items():
        m = Measurement(m_name)
        for arg in ['inspire', 'hepdata', 'experiment', 'url', 'description']:
            if arg in m_data:
                setattr(m, arg, m_data[arg])
        if 'observables' in m_data:
            # for multivariate constraints
            pd = probability.dict2dist(m_data['values'])
            pd = probability.convolve_distributions(pd)
            # for observables without arguments (i.e. strings), this is trivial;
            obs_list = [obs if isinstance(obs, str)
            # for obs. with arguments, need to convert dict of the form
            # {'name': myname, 'arg1': v1, ...} to a tuple of the form
            # (myname, v1, ...)
                        else tuple(
                                [obs['name']]
                              + [obs[arg] for arg in Observable[obs['name']].arguments])
                        for obs in m_data['observables']]
            m.add_constraint(obs_list, pd)
        elif 'correlation' not in m_data:
            # for univariate constraints
            if isinstance(m_data['values'], list):
                for value_dict in m_data['values']:
                    args = Observable[value_dict['name']].arguments
                    # numerical values of arguments, e.g. [1, 6]
                    args_num = [value_dict[a] for a in args]
                    # insert string name in front of argument values and turn it
                    # into a tuple, e.g. ('FL(B0->K*mumu)', 1, 6)
                    args_num.insert(0, value_dict['name'])
                    obs_tuple = tuple(args_num)
                    if isinstance(value_dict['value'], dict):
                        m.set_constraint(obs_tuple, constraint_dict=value_dict['value'])
                    else:
                        m.set_constraint(obs_tuple, value_dict['value'])
            else: # otherwise, 'values' is a dict just containing name: constraint_string
                for obs, value in m_data['values'].items():
                    if isinstance(value, dict) or isinstance(value, list):
                        m.set_constraint(obs, constraint_dict=value)
                    else:
                        m.set_constraint(obs, value)
        else:
            # for multivariate normal constraints
            observables = []
            central_values = []
            errors = []
            if isinstance(m_data['values'], list):
                for value_dict in m_data['values']:
                    # if "value" is a list, it contains the values of observable
                    # arguments (like q^2)
                    args = Observable[value_dict['name']].arguments
                    args_num = [value_dict[a] for a in args]
                    error_dict = errors_from_string(value_dict['value'])
                    args_num.insert(0, value_dict['name'])
                    obs_tuple = tuple(args_num)
                    observables.append(obs_tuple)
                    central_values.append(error_dict['central_value'])
                    squared_error = 0.
                    for sym_err in error_dict['symmetric_errors']:
                        squared_error += sym_err**2
                    for asym_err in error_dict['asymmetric_errors']:
                        squared_error += asym_err[0]*asym_err[1]
                    errors.append(sqrt(squared_error))
            else: # otherwise, 'values' is a dict just containing name: constraint_string
                for obs, value in m_data['values'].items():
                    observables.append(obs)
                    error_dict = errors_from_string(value)
                    central_values.append(error_dict['central_value'])
                    squared_error = 0.
                    for sym_err in error_dict['symmetric_errors']:
                        squared_error += sym_err**2
                    for asym_err in error_dict['asymmetric_errors']:
                        squared_error += asym_err[0]*asym_err[1]
                    errors.append(sqrt(squared_error))
            correlation = _fix_correlation_matrix(m_data['correlation'], len(observables))
            covariance = np.outer(np.asarray(errors), np.asarray(errors))*correlation
            m.add_constraint(observables, probability.MultivariateNormalDistribution(central_values, covariance))
    return list(measurements.keys())
Esempio n. 10
0
flavio.default_parameters.set_constraint('Delta M_S', '20.01 ± 1.25')


def myDeltaMS(wc_obj, par):
    DMs_SM = par['Delta M_S']
    if wc_obj.wc is None:
        return DMs_SM
    else:
        Cbs = -sq2 / (4 * GF *
                      ckm.xi('t', 'bs')(par)**2) * wc_obj.wc['CVLL_bsbs']
        return DMs_SM * abs(1 + Cbs / (1.3397e-3))


Observable('DMs')
Prediction('DMs', myDeltaMS)
m = Measurement('DMs exp')
m.add_constraint(['DMs'], NormalDistribution(17.757, 0.021))
m2 = Measurement('SLAC HFLAV 2018')
m2.add_constraint(['S_psiphi'], NormalDistribution(0.021,
                                                   0.030144582822607187))


def wc_Z(lambdaQ, MZp):
    "Wilson coefficients as functions of Z' couplings"
    if MZp < 100:
        return {}
    alpha = get_alpha(par, MZp)['alpha_e']
    return {
        'C9_bsmumu':
        -pi / (sq2 * GF * MZp**2 * alpha) * lambdaQ / ckm.xi('t', 'bs')(par),
        'C10_bsmumu':
Esempio n. 11
0
 def test_profiler(self):
     # defining some dummy parameters and observables
     Parameter('tmp a');
     Parameter('tmp b');
     Parameter('tmp c');
     Parameter('tmp d');
     p = ParameterConstraints()
     p.set_constraint('tmp b', '2+-0.3')
     p.set_constraint('tmp c', '0.2+-0.1')
     p.set_constraint('tmp d', '1+-0.5')
     def prediction(wc_obj, par):
         return par['tmp a']**2+par['tmp b']+par['tmp c']+par['tmp d']**2
     flavio.Observable('tmp obs');
     Prediction('tmp obs', prediction);
     m=Measurement('tmp measurement')
     m.add_constraint(['tmp obs'],
                 flavio.statistics.probability.NormalDistribution(1, 0.2))
     # test 1D profiler
     fit_1d = FrequentistFit('test profiler 1d',
                                 p, ['tmp a'], ['tmp b', 'tmp c', 'tmp d'], ['tmp obs'])
     profiler_1d = profiler.Profiler1D(fit_1d, -10, 10)
     x, z, n = profiler_1d.run(steps=4)
     self.assertEqual(x.shape, (4,))
     self.assertEqual(z.shape, (4,))
     self.assertEqual(n.shape, (3, 4))
     npt.assert_array_equal(x, profiler_1d.x)
     npt.assert_array_equal(z, profiler_1d.log_profile_likelihood)
     npt.assert_array_equal(n, profiler_1d.profile_nuisance)
     pdat = profiler_1d.pvalue_prob_plotdata()
     npt.assert_array_equal(pdat['x'], x)
     # test multiprocessing
     for threads in [2, 3, 4]:
         xt, zt, nt = profiler_1d.run(steps=4, threads=threads)
         npt.assert_array_almost_equal(x, xt, decimal=4)
         npt.assert_array_almost_equal(z, zt, decimal=4)
         npt.assert_array_almost_equal(n, nt, decimal=4)
     with self.assertRaises(ValueError):
         profiler_1d.run(steps=4, threads=5)
     # test 2D profiler
     p.remove_constraint('d')
     fit_2d = FrequentistFit('test profiler 2d',
                                 p, ['tmp a', 'tmp d'], ['tmp b', 'tmp c'], ['tmp obs'])
     profiler_2d = profiler.Profiler2D(fit_2d, -10, 10, -10, 10)
     x, y, z, n = profiler_2d.run(steps=(3,4))
     self.assertEqual(x.shape, (3,))
     self.assertEqual(y.shape, (4,))
     self.assertEqual(z.shape, (3, 4))
     self.assertEqual(n.shape, (2, 3, 4))
     npt.assert_array_equal(x, profiler_2d.x)
     npt.assert_array_equal(y, profiler_2d.y)
     npt.assert_array_equal(z, profiler_2d.log_profile_likelihood)
     npt.assert_array_equal(n, profiler_2d.profile_nuisance)
     pdat = profiler_2d.contour_plotdata()
     npt.assert_array_almost_equal(pdat['z'], -2*(z-np.max(z)))
     # test multiprocessing
     for threads in [2, 5, 12]:
         xt, yt, zt, nt = profiler_2d.run(steps=(3,4))
         npt.assert_array_almost_equal(x, xt, decimal=4)
         npt.assert_array_almost_equal(y, yt, decimal=4)
         npt.assert_array_almost_equal(z, zt, decimal=4)
         npt.assert_array_almost_equal(n, nt, decimal=4)
     with self.assertRaises(ValueError):
         profiler_2d.run(steps=(3,4), threads=13)
     # delete dummy instances
     for p in ['tmp a', 'tmp b', 'tmp c', 'tmp d']:
         Parameter.del_instance(p)
     FrequentistFit.del_instance('test profiler 1d')
     Observable.del_instance('tmp obs')
     Measurement.del_instance('tmp measurement')