Ejemplo n.º 1
0
    def __init__(self, bounds):
        super(TwoComponentDissociationModel, self).__init__(bounds)

        r = self.make_parameter("r", value=0.5, min=0, max=1)
        k1 = self.make_parameter("k1")
        k2 = self.make_parameter("k2")
        t = self.make_variable("t")
        y = self.make_variable("y")

        self.sf_model = Model({y: (r * exp(-k1 * t) + (1 - r) * exp(-k2 * t))})
Ejemplo n.º 2
0
    def __init__(self, bounds):
        super(TwoComponentDissociationModel, self).__init__(bounds)

        r = self.make_parameter('r', value=0.5, min=0, max=1)
        k1 = self.make_parameter('k1')
        k2 = self.make_parameter('k2')
        t = self.make_variable('t')
        y = self.make_variable('y')

        self.sf_model = Model({y: (r * exp(-k1*t) + (1 - r) * exp(-k2*t))})
Ejemplo n.º 3
0
    def test_known_solution(self):
        p, c1 = parameters('p, c1')
        y, t = variables('y, t')
        p.value = 3.0

        model_dict = {
            D(y, t): - p * y,
        }

        # Lets say we know the exact solution to this problem
        sol = Model({y: exp(- p * t)})

        # Generate some data
        tdata = np.linspace(0, 3, 10001)
        ydata = sol(t=tdata, p=3.22)[0]
        ydata += np.random.normal(0, 0.005, ydata.shape)

        ode_model = ODEModel(model_dict, initial={t: 0.0, y: ydata[0]})
        fit = Fit(ode_model, t=tdata, y=ydata)
        ode_result = fit.execute()

        c1.value = ydata[0]
        fit = Fit(sol, t=tdata, y=ydata)
        fit_result = fit.execute()

        self.assertAlmostEqual(ode_result.value(p) / fit_result.value(p), 1, 2)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)
        self.assertAlmostEqual(ode_result.stdev(p) / fit_result.stdev(p), 1, 3)
Ejemplo n.º 4
0
def test_fixed_parameters():
    """
    Make sure fixed parameters don't change on fitting
    """
    a, b, c, d = parameters('a, b, c, d')
    x, y = variables('x, y')

    c.value = 4.0
    a.min, a.max = 1.0, 5.0  # Bounds are needed for DifferentialEvolution
    b.min, b.max = 1.0, 5.0
    c.min, c.max = 1.0, 5.0
    d.min, d.max = 1.0, 5.0
    c.fixed = True

    model = Model({y: a * exp(-(x - b)**2 / (2 * c**2)) + d})
    # Generate data
    xdata = np.linspace(0, 100)
    ydata = model(xdata, a=2, b=3, c=2, d=2).y

    for minimizer in subclasses(BaseMinimizer):
        if minimizer is ChainedMinimizer:
            continue
        else:
            fit = Fit(model, x=xdata, y=ydata, minimizer=minimizer)
            fit_result = fit.execute()
            # Should still be 4.0, not 2.0!
            assert 4.0 == fit_result.params['c']
Ejemplo n.º 5
0
    def test_known_solution(self):
        p, c1 = parameters('p, c1')
        y, t = variables('y, t')
        p.value = 3.0

        model_dict = {
            D(y, t): - p * y,
        }

        # Lets say we know the exact solution to this problem
        sol = Model({y: exp(- p * t)})

        # Generate some data
        tdata = np.linspace(0, 3, 10001)
        ydata = sol(t=tdata, p=3.22)[0]
        ydata += np.random.normal(0, 0.005, ydata.shape)

        ode_model = ODEModel(model_dict, initial={t: 0.0, y: ydata[0]})
        fit = Fit(ode_model, t=tdata, y=ydata)
        ode_result = fit.execute()

        c1.value = ydata[0]
        fit = Fit(sol, t=tdata, y=ydata)
        fit_result = fit.execute()

        self.assertAlmostEqual(ode_result.value(p) / fit_result.value(p), 1, 2)
        self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)
        self.assertAlmostEqual(ode_result.stdev(p) / fit_result.stdev(p), 1, 3)
Ejemplo n.º 6
0
    def __init__(self, bounds):
        super(OneComponentAssociationModel, self).__init__(bounds)
        k1 = self.make_parameter("k1")
        t = self.make_variable("t")
        y = self.make_variable("y")

        self.sf_model = Model({y: (1 - exp(-k1 * t))})
Ejemplo n.º 7
0
    def __init__(self, bounds):
        super(OneComponentDissociationModel, self).__init__(bounds)
        k1 = self.make_parameter('k1')
        t = self.make_variable('t')
        y = self.make_variable('y')

        self.sf_model = Model({y: exp(-k1*t)})
def test_multi_indep():
    '''
    Tests the case with multiple components, multiple parameters and
    multiple independent variables
    '''
    w, x, y, z = sf.variables('w, x, y, z')
    a, b, c = sf.parameters('a, b, c')
    model = sf.Model({
        y: 3 * a * x**2 + b * x * w - c,
        z: sf.exp(a * x - b) + c * w
    })
    x_data = np.arange(10) / 10
    w_data = np.arange(10)

    exact = model.eval_jacobian(x=x_data, w=w_data, a=3.5, b=2, c=5)
    approx = model.finite_difference(x=x_data, w=w_data, a=3.5, b=2, c=5)
    _assert_equal(exact, approx)

    exact = model.eval_jacobian(x=0.3, w=w_data, a=3.5, b=2, c=5)
    approx = model.finite_difference(x=0.3, w=w_data, a=3.5, b=2, c=5)
    _assert_equal(exact, approx)

    exact = model.eval_jacobian(x=0.3, w=5, a=3.5, b=2, c=5)
    approx = model.finite_difference(x=0.3, w=5, a=3.5, b=2, c=5)
    _assert_equal(exact, approx)
Ejemplo n.º 9
0
def model_gauss2d(a_val,
                  x_mu_val,
                  y_mu_val,
                  sig_x_val,
                  sig_y_val,
                  base,
                  has_base=True):
    a = Parameter(name='a', value=a_val)
    sig_x = Parameter(name='sig_x', value=sig_x_val)
    sig_y = Parameter(name='sig_y', value=sig_y_val)
    x_mu = Parameter(name='x_mu', value=x_mu_val)
    y_mu = Parameter(name='y_mu', value=y_mu_val)

    if has_base:
        b = Parameter(name='b', value=base)
    else:
        b = base
    x_var = Variable(name='x_var')
    y_var = Variable(name='y_var')
    z_var = Variable(name='z_var')

    model = {
        z_var:
        a * exp(-(((x_var - x_mu)**2 / (2 * sig_x**2)) + ((y_var - y_mu)**2 /
                                                          (2 * sig_y**2)))) + b
    }
    return model
Ejemplo n.º 10
0
def test_LogLikelihood_global():
    """
    This is a test for global likelihood fitting to multiple data sets.
    Based on SO question 56006357.
    """
    # creating the data
    mu1, mu2 = .05, -.05
    sigma1, sigma2 = 3.5, 2.5
    n1, n2 = 80, 90
    np.random.seed(42)
    x1 = np.random.vonmises(mu1, sigma1, n1)
    x2 = np.random.vonmises(mu2, sigma2, n2)

    n = 2  # number of components
    xs = variables('x,' + ','.join('x_{}'.format(i) for i in range(1, n + 1)))
    x, xs = xs[0], xs[1:]
    ys = variables(','.join('y_{}'.format(i) for i in range(1, n + 1)))
    mu, kappa = parameters('mu, kappa')
    kappas = parameters(','.join('k_{}'.format(i) for i in range(1, n + 1)),
                        min=0,
                        max=10)
    mu.min, mu.max = -np.pi, np.pi

    template = exp(kappa * cos(x - mu)) / (2 * pi * besseli(0, kappa))

    model = Model({
        y_i: template.subs({
            kappa: k_i,
            x: x_i
        })
        for y_i, x_i, k_i in zip(ys, xs, kappas)
    })

    all_data = {xs[0]: x1, xs[1]: x2, ys[0]: None, ys[1]: None}
    all_params = {'mu': 1}
    all_params.update({k_i.name: 1 for k_i in kappas})

    # Evaluate the loglikelihood and its jacobian and hessian
    logL = LogLikelihood(model, data=all_data)
    eval_numerical = logL(**all_params)
    jac_numerical = logL.eval_jacobian(**all_params)
    hess_numerical = logL.eval_hessian(**all_params)

    # Test the types and shapes of the components.
    assert isinstance(eval_numerical, float)
    assert isinstance(jac_numerical, np.ndarray)
    assert isinstance(hess_numerical, np.ndarray)

    assert eval_numerical.shape == tuple()  # Empty tuple -> scalar
    assert jac_numerical.shape == (3, )
    assert hess_numerical.shape == (
        3,
        3,
    )
Ejemplo n.º 11
0
 def do_glob_fit(self):
     """this method performs global fit on the CCPS"""
     # create parameters for symfit
     dist = self.data.keys()
     v = sf.parameters('v_', value=500, min=0, max=1000)[0]
     d = sf.parameters('D_', value=50, min=0, max=100)[0]
     y0_p = sf.parameters(', '.join('y0_{}'.format(key)
                                    for key in self.data.keys()),
                          min=0,
                          max=1)
     b_p = sf.parameters(', '.join('b_{}'.format(key)
                                   for key in self.data.keys()),
                         value=50,
                         min=0,
                         max=100)
     # create variables for symfit
     x = sf.variables('x')[0]
     y_var = sf.variables(', '.join('y_{}'.format(key)
                                    for key in self.data.keys()))
     # get fixed & shared params
     dx, a, w2, a2, tau, s, wz2 = self.get_params()
     # create model
     model = sf.Model({
         y: y0 + b * sf.exp(-(dst * dx - v * (sf.cos(a)) * x)**2 /
                            (w2 + 0.5 * a2 + 4 * d * x)) *
         sf.exp(-(x**2) * (v * sf.sin(a) - dx / tau)**2 /
                (w2 + 0.5 * a2 + a * d * x)) / (4 * d * x + w2 + 0.5 * a2)
         for y, y0, b, dst in zip(y_var, y0_p, b_p, dist)
     })
     # dependent variables dict
     data = {y.name: self.data[dst] for y, dst in zip(y_var, dist)}
     # independent variable x
     max_time = len(self.data[20]) * tau
     x_data = np.linspace(0, max_time, len(self.data[20]))
     # fit
     fit = sf.Fit(model, x=x_data, **data)
     res = fit.execute()
     return res
Ejemplo n.º 12
0
def test_1_multi_model():
    '''Tests the case with 1 component and multiple parameters'''
    x, y = sf.variables('x, y')
    a, b = sf.parameters('a, b')
    model = sf.Model({y: 3 * a * x**2 - sf.exp(b) * x})
    x_data = np.arange(10)

    exact = model.eval_jacobian(x=x_data, a=3.5, b=2)
    approx = model.finite_difference(x=x_data, a=3.5, b=2)
    _assert_equal(exact, approx)

    exact = model.eval_jacobian(x=3, a=3.5, b=2)
    approx = model.finite_difference(x=3, a=3.5, b=2)
    _assert_equal(exact, approx)
Ejemplo n.º 13
0
    def test_1_multi_model(self):
        '''Tests the case with 1 component and multiple parameters'''
        x, y = sf.variables('x, y')
        a, b = sf.parameters('a, b')
        model = sf.Model({y: 3 * a * x**2 - sf.exp(b) * x})
        x_data = np.arange(10)

        exact = model.eval_jacobian(x=x_data, a=3.5, b=2)
        approx = model.finite_difference(x=x_data, a=3.5, b=2)
        np.testing.assert_allclose(exact, approx)

        exact = model.eval_jacobian(x=3, a=3.5, b=2)
        approx = model.finite_difference(x=3, a=3.5, b=2)
        np.testing.assert_allclose(exact, approx)
Ejemplo n.º 14
0
def test_multi_1_model():
    '''Tests the case with multiple components and one parameter'''
    x, y, z = sf.variables('x, y, z')
    a, = sf.parameters('a')
    model = sf.Model({y: 3 * a * x**2, z: sf.exp(a * x)})
    x_data = np.arange(10)

    exact = model.eval_jacobian(x=x_data, a=3.5)
    approx = model.finite_difference(x=x_data, a=3.5)
    _assert_equal(exact, approx)

    exact = model.eval_jacobian(x=3, a=3.5)
    approx = model.finite_difference(x=3, a=3.5)
    _assert_equal(exact, approx)
Ejemplo n.º 15
0
def test_multi_multi_model():
    '''Tests the case with multiple components and multiple parameters'''
    x, y, z = sf.variables('x, y, z')
    a, b, c = sf.parameters('a, b, c')
    model = sf.Model({y: 3 * a * x**2 + b * x - c, z: sf.exp(a * x - b) * c})
    x_data = np.arange(10)

    exact = model.eval_jacobian(x=x_data, a=3.5, b=2, c=5)
    approx = model.finite_difference(x=x_data, a=3.5, b=2, c=5)
    _assert_equal(exact, approx, rel=1e-3)

    exact = model.eval_jacobian(x=3, a=3.5, b=2, c=5)
    approx = model.finite_difference(x=3, a=3.5, b=2, c=5)
    _assert_equal(exact, approx, rel=1e-3)
Ejemplo n.º 16
0
    def test_multi_multi_model(self):
        '''Tests the case with multiple components and multiple parameters'''
        x, y, z = sf.variables('x, y, z')
        a, b, c = sf.parameters('a, b, c')
        model = sf.Model({y: 3 * a * x**2 + b * x - c,
                          z: sf.exp(a*x - b) * c})
        x_data = np.arange(10)

        exact = model.eval_jacobian(x=x_data, a=3.5, b=2, c=5)
        approx = model.finite_difference(x=x_data, a=3.5, b=2, c=5)
        np.testing.assert_allclose(exact, approx, rtol=1e-5)

        exact = model.eval_jacobian(x=3, a=3.5, b=2, c=5)
        approx = model.finite_difference(x=3, a=3.5, b=2, c=5)
        np.testing.assert_allclose(exact, approx, rtol=1e-5)
Ejemplo n.º 17
0
    def test_multi_1_model(self):
        '''Tests the case with multiple components and one parameter'''
        x, y, z = sf.variables('x, y, z')
        a, = sf.parameters('a')
        model = sf.Model({y: 3 * a * x**2,
                          z: sf.exp(a*x)})
        x_data = np.arange(10)

        exact = model.eval_jacobian(x=x_data, a=3.5)
        approx = model.finite_difference(x=x_data, a=3.5)
        np.testing.assert_allclose(exact, approx)

        exact = model.eval_jacobian(x=3, a=3.5)
        approx = model.finite_difference(x=3, a=3.5)
        np.testing.assert_allclose(exact, approx)
Ejemplo n.º 18
0
def fit_model(df_avg, frequency):

    Model = {
        y: a * exp(-b * x) + c * sin(2 * pi * frequency * x + phi) + delta
    }
    avg_osc = df_avg.groupby("time").mean()["psc"]
    argmax_ind = avg_osc.values.argmax()
    ydata = avg_osc.values[argmax_ind:]
    xdata = real_time[argmax_ind:]
    sigma_y = df_avg.groupby("time").std()["psc"].values[argmax_ind:]

    fit = Fit(model, x=xdata, y=ydata, sigma_y=sigma_y)
    fit_result = fit.execute()
    yfit = model[y](x=xdata, **fit_result.params)
    return avg_osc, xdata, ydata, fit_result, yfit
Ejemplo n.º 19
0
    def test_fixed_parameters(self):
        """
        Make sure fixed parameters don't change on fitting
        """
        xdata = np.arange(100)
        ydata = np.arange(100)

        a, b, c, d = parameters('a, b, c, d')
        x, y = variables('x, y')

        c.value = 4.0
        c.fixed = True

        model_dict = {y: a * exp(-(x - b)**2 / (2 * c**2)) + d}
        fit = Fit(model_dict, x=xdata, y=ydata)
        fit_result = fit.execute()
        self.assertEqual(4.0, fit_result.params['c'])
Ejemplo n.º 20
0
 def test_fixed_parameters(self):
     """
     Make sure fixed parameters don't change on fitting
     """
     xdata = np.arange(100)
     ydata = np.arange(100)
     
     a, b, c, d = parameters('a, b, c, d')
     x, y = variables('x, y')
     
     c.value = 4.0
     c.fixed = True
     
     model_dict = {y: a * exp(-(x - b)**2 / (2 * c**2)) + d}
     fit = Fit(model_dict, x=xdata, y=ydata)
     fit_result = fit.execute()
     self.assertEqual(4.0, fit_result.params['c'])
Ejemplo n.º 21
0
    def run(
            self,
            angle=0.02472,
            pixel_to_micron_x=9.81e-8,
            beam_waist_xy=0.5e-6,
            beam_waist_z=2e-6,
            rbc_radius=4e-6,
            s=1,
            tau=0.001,
    ):
        # create parameters for symfit
        distances = self.data.keys()
        v = sf.parameters('v_', value=500, min=0, max=1000)[0]
        d = sf.parameters('D_', value=50, min=0, max=100)[0]
        y0_p = sf.parameters(
            self.create_distance_strings('y0'),
            min=0,
            max=1,
        )
        b_p = sf.parameters(
            self.create_distance_strings('b'),
            value=50,
            min=0,
            max=100,
        )
        # create variables for symfit
        x = sf.variables('x')[0]
        y_var = sf.variables(self.create_distance_strings('y'))

        # create model
        # pixel_to_micron_x
        model = sf.Model({
            y:
            y0 + b * sf.exp(-(dst * pixel_to_micron_x - v * (sf.cos(angle)) * x)**2 / (beam_waist_xy + 0.5 * rbc_radius**2 + 4 * d * x)) * sf.exp(-(x**2) * (v * sf.sin(angle) - pixel_to_micron_x / tau)**2 / (beam_waist_xy + 0.5 * rbc_radius**2 + angle * d * x)) / (4 * d * x + beam_waist_xy + 0.5 * rbc_radius**2)
            for y, y0, b, dst in zip(y_var, y0_p, b_p, distances)
        })
        # dependent variables dict
        data = {y.name: self.data[dst] for y, dst in zip(y_var, distances)}
        # independent variable x
        n_data_points = len(list(self.data.values())[0])
        max_time = n_data_points * tau
        x_data = np.linspace(0, max_time, n_data_points)
        # fit
        fit = sf.Fit(model, x=x_data, **data)
        res = fit.execute()
        return res
Ejemplo n.º 22
0
def laplace_dataset():
    """Sample pytest fixture.

    See more at: http://doc.pytest.org/en/latest/fixture.html
    """
    t, f, s, F = sf.variables('t, f, s, F')
    model = sf.Model({f: t * sf.exp(-t)})
    laplace_model = sf.Model(
        {F: sf.laplace_transform(model[f], t, s, noconds=True)})

    epsilon = 0.01  # 1 percent noise
    s_data = np.linspace(0, 10, 101)[1:]
    F_data = laplace_model(s=s_data).F
    F_sigma = epsilon * F_data
    np.random.seed(42)
    F_data = np.random.normal(F_data, F_sigma)
    # Reshape to matrices
    F_data = F_data[:, None]
    F_sigma = F_sigma[:, None]
    M_mat = 1 / (s_data[None, :] + s_data[:, None])
    delta = np.atleast_2d(np.linalg.norm(F_sigma)**2)
    return {M_y: M_mat, y: F_data, y_stdev: F_sigma, d: delta}
Ejemplo n.º 23
0
    def test_known_solution(self):
        p, c1, c2 = parameters('p, c1, c2')
        y, t = variables('y, t')
        p.value = 3.0

        model_dict = {
            D(y, t): -p * y,
        }

        # Lets say we know the exact solution to this problem
        sol = c1 * exp(-p * t)

        # Generate some data
        tdata = np.linspace(0, 3, 101)
        ydata = sol(t=tdata, c1=1.0, p=3.22)

        ode_model = ODEModel(model_dict, initial={t: 0.0, y: 1.0})
        fit = Fit(ode_model, t=tdata, y=ydata)
        fit_result = fit.execute()
        y_sol, = ode_model(tdata, **fit_result.params)

        self.assertAlmostEqual(3.22, fit_result.value(p))
Ejemplo n.º 24
0
    def test_known_solution(self):
        p, c1, c2 = parameters('p, c1, c2')
        y, t = variables('y, t')
        p.value = 3.0

        model_dict = {
            D(y, t): - p * y,
        }

        # Lets say we know the exact solution to this problem
        sol = c1 * exp(- p * t)

        # Generate some data
        tdata = np.linspace(0, 3, 101)
        ydata = sol(t=tdata, c1=1.0, p=3.22)


        ode_model = ODEModel(model_dict, initial={t: 0.0, y: 1.0})
        fit = Fit(ode_model, t=tdata, y=ydata)
        fit_result = fit.execute()
        y_sol, = ode_model(tdata, **fit_result.params)

        self.assertAlmostEqual(3.22, fit_result.value(p))
Ejemplo n.º 25
0
    def test_multi_indep(self):
        '''
        Tests the case with multiple components, multiple parameters and
        multiple independent variables
        '''
        w, x, y, z = sf.variables('w, x, y, z')
        a, b, c = sf.parameters('a, b, c')
        model = sf.Model({y: 3 * a * x**2 + b * x * w - c,
                          z: sf.exp(a*x - b) + c*w})
        x_data = np.arange(10)/10
        w_data = np.arange(10)

        exact = model.eval_jacobian(x=x_data, w=w_data, a=3.5, b=2, c=5)
        approx = model.finite_difference(x=x_data, w=w_data, a=3.5, b=2, c=5)
        np.testing.assert_allclose(exact, approx, rtol=1e-5)

        exact = model.eval_jacobian(x=0.3, w=w_data, a=3.5, b=2, c=5)
        approx = model.finite_difference(x=0.3, w=w_data, a=3.5, b=2, c=5)
        np.testing.assert_allclose(exact, approx, rtol=1e-5)

        exact = model.eval_jacobian(x=0.3, w=5, a=3.5, b=2, c=5)
        approx = model.finite_difference(x=0.3, w=5, a=3.5, b=2, c=5)
        np.testing.assert_allclose(exact, approx, rtol=1e-5)
Ejemplo n.º 26
0
    def test_bounds(self):
        """
        The bounds of an object should always be such that lower < upper.
        :return:
        """
        a = Parameter(value= - 2.482092e-01, fixed=True)
        # a = Parameter()
        try:
            b = Parameter(value=5.0, min=6.0, max=4.0)
        except ValueError:
            b = Parameter(value=5.0, min=4.0, max=6.0)
        c = Parameter(value=2.219756e+02, fixed=True)
        x = Variable()

        # build the model
        model = Model(a + b * (1 - exp(-c / x)))
        print(model.bounds)
        for bounds in model.bounds:
            if None in bounds:
                pass
            else:
                # Both are set
                min, max = bounds
                self.assertGreaterEqual(max, min)
Ejemplo n.º 27
0
    def test_single_param_model(self):
        """
        Added after #161, this tests if models with a single additive parameter
        are fitted properly. The problem with these models is that their
        jacobian is in principle just int 1, which is not the correct shape.

        No news is good news.
        :return:
        """
        T = Variable('T')
        l = Variable('l')
        s = Parameter('s', value=300)
        a = Parameter('a', value=300)
        model = {l: s + a + 1 / (1 + exp(- T))}

        temp_data = [270, 280, 285, 290, 295, 300, 310, 320]
        length_data = [8.33, 8.41, 8.45, 8.5, 8.54, 9.13, 9.27, 9.4]
        fit = Fit(model, l=length_data, T=temp_data)
        fit_result = fit.execute()

        # Raise the stakes by increasing the dimensionality of the data
        TT, LL = np.meshgrid(temp_data, length_data)
        fit = Fit(model, l=LL, T=TT)
        fit_result = fit.execute()
Ejemplo n.º 28
0
    def test_single_param_model(self):
        """
        Added after #161, this tests if models with a single additive parameter
        are fitted properly. The problem with these models is that their
        jacobian is in principle just int 1, which is not the correct shape.

        No news is good news.
        :return:
        """
        T = Variable('T')
        l = Variable('l')
        s = Parameter('s', value=300)
        a = Parameter('a', value=300)
        model = {l: s + a + 1 / (1 + exp(- T))}

        temp_data = [270, 280, 285, 290, 295, 300, 310, 320]
        length_data = [8.33, 8.41, 8.45, 8.5, 8.54, 9.13, 9.27, 9.4]
        fit = Fit(model, l=length_data, T=temp_data)
        fit_result = fit.execute()

        # Raise the stakes by increasing the dimensionality of the data
        TT, LL = np.meshgrid(temp_data, length_data)
        fit = Fit(model, l=LL, T=TT)
        fit_result = fit.execute()
Ejemplo n.º 29
0
def distr(x, k, x0):
    kbT = 4.11
    return exp(-k*(x-x0)**2/kbT)
Ejemplo n.º 30
0
from __future__ import print_function
from symfit import Parameter, Variable, Fit, exp
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns

palette = sns.color_palette()


x = Variable()
A = Parameter()
sig = Parameter(name='sig', value=1.4, min=1.0, max=2.0)
x0 = Parameter(name='x0', value=15.0, min=0.0)
# Gaussian distrubution
model = A*exp(-((x - x0)**2/(2 * sig**2)))

# Sample 10000 points from a N(15.0, 1.5) distrubution
sample = np.random.normal(loc=15.0, scale=1.5, size=(10000,))
ydata, bin_edges = np.histogram(sample, 100)
xdata = (bin_edges[1:] + bin_edges[:-1])/2

fit = Fit(model, xdata, ydata)
fit_result = fit.execute()
print(fit_result)
print(model)

y = model(x=xdata, **fit_result.params)
sns.regplot(xdata, ydata, fit_reg=False)
plt.plot(xdata, y, color=palette[2])
plt.ylim(0, 400)
plt.show()
Ejemplo n.º 31
0
from symfit import parameters, variables, Fit, Piecewise, exp, Eq, Model
import numpy as np
import matplotlib.pyplot as plt

t, y = variables('t, y')
a, b, d, k, t0 = parameters('a, b, d, k, t0')

# Make a piecewise model
y1 = a * t + b
y2 = d * exp(-k * t)
model = Model({y: Piecewise((y1, t <= t0), (y2, t > t0))})

# As a constraint, we demand equality between the two models at the point t0
# to do this, we substitute t -> t0 and demand equality using `Eq`
constraints = [Eq(y1.diff(t).subs({t: t0}), y2.diff(t).subs({t: t0}))]

# # Generate example data
tdata = np.linspace(0, 4., 200)
ydata = model(t=tdata, a=63, b=300, d=2205, k=3, t0=0.65).y
ydata = np.random.normal(ydata, 0.05 * ydata)  # add 5% noise

# Help the fit by bounding the switchpoint between the models and giving initial
# guesses
t0.min = 0.5
t0.max = 0.8
b.value = 320

fit = Fit(model, t=tdata, y=ydata, constraints=constraints)
fit_result = fit.execute()
print(fit_result)
Ejemplo n.º 32
0
 plt.figure()
 plt.title('linewidth for 1mM TEMPOL')
 plot(d)
 d = d['$B_0$':(-9, 9)]
 plot(d, '--', alpha=0.5, linewidth=4)
 d.setaxis(
     '$B_0$', lambda x: x + 1
 )  # for a positive B_center, b/c the interactive guess doesn't deal well with negative parameters
 s_integral = d.C.run_nopop(np.cumsum, '$B_0$')
 #{{{fitting with voigt
 if not os.path.exists('dVoigt.pickle'):
     with open('dVoigt.pickle', 'wb') as fp:
         # cache the expression, which takes some time to generate
         print("no pickle file found -- generating")
         z = ((B - B_center) + s.I * R) / sigma / s.sqrt(2)
         faddeeva = s.simplify(s.exp(-z**2) * s.erfc(-s.I * z))
         voigt = A * s.re(faddeeva) / sigma / s.sqrt(2 * s.pi)
         voigt *= sigma * R  # so adjusting linewidth doesn't change amplitude
         voigt = voigt.simplify()
         # add real below b/c lambdify was giving complex answer
         dVoigt = s.re(s.re(voigt.diff(B)).simplify())
         pickle.dump(dVoigt, fp)
 else:
     with open('dVoigt.pickle', 'rb') as fp:
         print("reading expression from pickle")
         dVoigt = pickle.load(fp)
 plt.figure()
 plt.title('plot guess')
 logger.info(strm(A.value, "A value"))
 # {{{ need to re-do b/c defaults are stored in pickle
 for k, v in thisguess.items():
Ejemplo n.º 33
0
from symfit import parameters, variables, Fit, Piecewise, exp, Eq, Model
import numpy as np
import matplotlib.pyplot as plt

t, y = variables('t, y')
a, b, d, k, t0 = parameters('a, b, d, k, t0')

# Make a piecewise model
y1 = a * t + b
y2 = d * exp(- k * t)
model = Model({y: Piecewise((y1, t <= t0), (y2, t > t0))})

# As a constraint, we demand equality between the two models at the point t0
# to do this, we substitute t -> t0 and demand equality using `Eq`
constraints = [Eq(y1.diff(t).subs({t: t0}), y2.diff(t).subs({t: t0}))]

# # Generate example data
tdata = np.linspace(0, 4., 200)
ydata = model(t=tdata, a=63, b=300, d=2205, k=3, t0=0.65).y
ydata = np.random.normal(ydata, 0.05 * ydata)  # add 5% noise

# Help the fit by bounding the switchpoint between the models and giving initial
# guesses
t0.min = 0.5
t0.max = 0.8
b.value = 320

fit = Fit(model, t=tdata, y=ydata, constraints=constraints)
fit_result = fit.execute()
print(fit_result)
Ejemplo n.º 34
0
def distr(x, k, x0):
    kbT = 4.11
    return exp(-k * (x - x0)**2 / kbT)
Ejemplo n.º 35
0
import numpy as np
from symfit import variables, parameters, Fit, exp, Model
from symfit.core.objectives import LogLikelihood

# Draw samples from a bivariate distribution
np.random.seed(42)
data1 = np.random.exponential(5.5, 1000)
data2 = np.random.exponential(6, 2000)

# Define the model for an exponential distribution (numpy style)
a, b = parameters('a, b')
x1, y1, x2, y2 = variables('x1, y1, x2, y2')
model = Model({y1: (1 / a) * exp(-x1 / a), y2: (1 / b) * exp(-x2 / b)})
print(model)

fit = Fit(model, x1=data1, x2=data2, objective=LogLikelihood)
fit_result = fit.execute()
print(fit_result)

# Instead, we could also fit with only one parameter to see which works best
model = Model({y1: (1 / a) * exp(-x1 / a), y2: (1 / a) * exp(-x2 / a)})

fit = Fit(model, x1=data1, x2=data2, objective=LogLikelihood)
fit_result = fit.execute()
print(fit_result)