def test_convergence():
    """Testing whether expected scaling behaviour of Euler's method is obeyed by our implementation (linear).
    """
    number_sampled_h = 20

    # integration paramters
    h_array = np.linspace(start=1.0e-05, stop=1.0e-03,
                          num=number_sampled_h)  # tested time steps
    t_0 = 0.0  # initial time
    t_final = 10.0  # final time (limit time interval just to avoid computing time issues)
    y_0 = 0.1  # initial condition
    Lambda = 1.0

    # logistic growth ODE
    model = lambda t, x: Lambda * x
    exponential_model = euler.euler(model)

    error = np.empty(shape=number_sampled_h)
    for id_h, h in enumerate(h_array):
        times = np.arange(start=t_0, stop=t_final, step=h)
        analytic = exponential_growth(times, Lambda=Lambda)
        numeric = exponential_model.integrate(h=h,
                                              t_0=t_0,
                                              t_final=t_final,
                                              y_0=y_0)[1, :]
        error[id_h] = abs(analytic[-1] - numeric[-1])

    # find median gradient and make sure it doesn't vary too much, i.e. linear scaling.
    gradients = np.gradient(error)
    median_gradient = np.median(gradients)

    assert np.allclose(a=gradients, b=median_gradient, rtol=5.0e-02)
    def test_euler_Lambda(self, Lambda):
        """Test the range of valid lambda values for the logistic growth model.

        Arguments:
            Lambda {float} -- growth factor.
        """
        # integration paramters
        h = 0.001  # time steps
        t_0 = 0.0  # initial time
        t_final = 10.0  # final time (limit time interval just to avoid computing time issues)
        y_0 = 0.1  # initial condition

        times = np.arange(start=t_0, stop=t_final, step=h)
        analytic = logistic_growth_dimensionless(times, Lambda=Lambda)

        # logistic growth ODE
        model = lambda t, x: Lambda * x * (1 - x)
        logistic_model = euler.euler(model)
        numeric = logistic_model.integrate(h=h,
                                           t_0=t_0,
                                           t_final=t_final,
                                           y_0=y_0)[1, :]
        squared_distance = (analytic - numeric)**2

        tolerance = 0.01
        assert np.all(squared_distance <= tolerance**2)
    def test_euler_initial_y(self, y_0):
        """testing the valid realm of y_0. Here the logistic function can only assume values in (0, 1).

        Arguments:
            y_0 {float} -- initial value of the state variable.
        """
        # model parameters
        Lambda = 1.0

        # integration paramters
        h = 0.001  # time steps
        t_0 = 0.0  # initial time
        t_final = 10.0  # final time (limit time interval just to avoid computing time issues)
        y_0 = 0.1  # initial condition

        times = np.arange(start=t_0, stop=t_final, step=h)
        analytic = logistic_growth_dimensionless(times, Lambda=Lambda)

        # logistic growth ODE
        model = lambda t, x: Lambda * x * (1 - x)
        logistic_model = euler.euler(model)
        numeric = logistic_model.integrate(h=h,
                                           t_0=t_0,
                                           t_final=t_final,
                                           y_0=y_0)[1, :]
        squared_distance = (analytic - numeric)**2

        tolerance = 0.01
        assert np.all(squared_distance <= tolerance**2)
    def test_euler_intial_time(self, t_0):
        """Testing valid values for t_0. Within the physical limits of the computer any number t_0 shoud work. We just choose a rather
        small range [-100, 100] for efficiency of the test.

        Arguments:
            t_0 {float} -- initial time of integration.
        """
        # model parameters
        Lambda = 1.0

        # integration paramters
        h = 0.001  # time steps
        t_final = 10.0  # final time (limit time interval just to avoid computing time issues)
        y_0 = 0.1  # initial condition

        times = np.arange(start=t_0, stop=t_final, step=h)
        analytic = logistic_growth_dimensionless(times, t_0=t_0, Lambda=Lambda)

        # logistic growth ODE
        model = lambda t, x: Lambda * x * (1 - x)
        logistic_model = euler.euler(model)
        numeric = logistic_model.integrate(h=h,
                                           t_0=t_0,
                                           t_final=t_final,
                                           y_0=y_0)[1, :]
        squared_distance = (analytic - numeric)**2

        tolerance = 0.01
        assert np.all(squared_distance <= tolerance**2)
示例#5
0
    def plot(self) -> None:
        """Method to visualise the success/failure of the parameter inference.
        """
        # Solve ODEmodel with otimal parameters.
        ODEmodel = lambda t, x: self.model(t, x, self.optimal_parameters[0])
        # instantiate ODE model
        model = euler(ODEmodel)
        t_0 = self.data_time[0]
        t_final = self.data_time[-1]
        numerical_estimate = model.integrate(h=self.h,
                                             t_0=t_0,
                                             t_final=t_final,
                                             y_0=self.optimal_parameters[-1])

        # Generate plot.
        plt.figure(figsize=(6, 6))
        # Scatter plot data.
        plt.scatter(x=self.data_time,
                    y=self.data_y,
                    color='gray',
                    edgecolors='darkgreen',
                    alpha=0.5,
                    label='data')
        # Line plot fitted model
        plt.plot(numerical_estimate[0, :],
                 numerical_estimate[1, :],
                 color='black',
                 label='model')

        plt.xlabel('time')
        plt.ylabel('state variable')
        plt.legend()

        plt.show()
示例#6
0
    def _objective_function(self, parameters: np.ndarray) -> float:
        """Least squares objective function to be minimised in the process of parameter inference.

        Arguments:
            parameters {np.ndarray} -- Set of parameters that are used to solve the ODE model.
                The last parameter i.e. parameters[-1] is assumed to be the initial value of the
                state variable.
        Return:
            Point-wise squared distance between the data and the ODE model.
        """
        # setting lambda
        ODEmodel = lambda t, x: self.model(t, x, parameters[0])
        # instantiate ODE model
        model = euler(ODEmodel)
        t_0 = self.data_time[0]
        t_final = self.data_time[-1]
        numerical_estimate = model.integrate(h=self.h,
                                             t_0=t_0,
                                             t_final=t_final,
                                             y_0=parameters[-1])
        interpolated_solution = self._interpolate_numerical_solution(
            numerical_estimate)
        interpolated_solution_yvalues = interpolated_solution[1, :]
        squared_distance = np.sum(
            (self.data_y - interpolated_solution_yvalues)**2)

        return squared_distance
def test_euler():
    '''
    Testing the numerical solution of the implemented Euler method.
    '''
    ### model parameters
    Lambda = 1.0
    C = 100.0

    # integration paramters
    h = 0.001  # time steps
    t_0 = 0.0  # initial time
    t_final = 10.0  # final time
    y_0 = 1.0  # initial condition

    times = np.arange(start=t_0, stop=t_final, step=h)
    analytic = logistic_growth(times, C=C, Lambda=Lambda)
    # logistic growth ODE
    model = lambda t, N: Lambda * N * (1 - N / C)
    logistic_model = euler.euler(model)
    numeric = logistic_model.integrate(h=h, t_0=t_0, t_final=t_final,
                                       y_0=y_0)[1, :]

    tolerance = 0.1
    assert np.all((analytic - numeric)**2 < tolerance**2)
def test_interpolate_numerical_solution():
    """Test whether function is capable of producing linear interpolations between solution that matches input the data time points.

    Test case 1: Non-dynamic model.
    Test case 2: Model that is linear in time.
    """
    ### exact parameters and integration step size
    y_0 = 0.1
    t_0 = 0.0
    t_final = 10.0

    h = 0.001
    """test case 1:"""
    ### generate data
    data_time = np.linspace(0, 10, 3000)
    data_time = np.random.choice(a=data_time,
                                 size=100)  # random sample of times.
    exact_solution = np.full(shape=100, fill_value=y_0)

    data_y = exact_solution
    data = np.vstack(tup=(data_time, data_y))

    ### solve IVP
    ODEmodel = lambda t, x: 0
    model = euler(ODEmodel)
    numerical_solution = model.integrate(h=h,
                                         t_0=t_0,
                                         t_final=t_final,
                                         y_0=y_0)

    ### Instantiating inverse problem
    ODEmodel = lambda t, x, Lambda: 0
    inv_problem = InferenceProblem(ODEmodel, data)

    interpolated_solution = inv_problem._interpolate_numerical_solution(
        numerical_solution)

    assert np.allclose(a=interpolated_solution, b=data, rtol=5.0e-02)
    """test case 2:"""
    ### generate data
    data_time = np.linspace(0, 10, 3000)
    data_time = np.random.choice(a=data_time,
                                 size=100)  # random sample of times.
    exact_solution = np.full(shape=100, fill_value=y_0) + data_time

    data_y = exact_solution
    data = np.vstack(tup=(data_time, data_y))

    ### solve IVP
    ODEmodel = lambda t, x: 1.0
    model = euler(ODEmodel)
    numerical_solution = model.integrate(h=h,
                                         t_0=t_0,
                                         t_final=t_final,
                                         y_0=y_0)

    ### Instantiating inverse problem
    ODEmodel = lambda t, x, Lambda: 1.0
    inv_problem = InferenceProblem(ODEmodel, data)

    interpolated_solution = inv_problem._interpolate_numerical_solution(
        numerical_solution)

    assert np.allclose(a=interpolated_solution, b=data, rtol=5.0e-02)
示例#9
0
    def plot(self) -> None:
        """Visualise infered model. Plots data and the model with infered parameters as well as posterior distributions of
        parameters.

        Returns:
            None
        """
        fig = plt.figure(figsize=(18, 8), tight_layout=True)
        gs = gridspec.GridSpec(2, self.number_parameters)

        ### plot data and fit.
        ax = fig.add_subplot(gs[0, :])
        # Solve ODEmodel with otimal parameters.
        ODEmodel = lambda t, x: self.model(t, x, self.optimal_parameters[0])
        y_0 = self.optimal_parameters[-2]
        std = self.optimal_parameters[-1]
        # instantiate ODE model
        model = euler(ODEmodel)
        t_0 = self.data_time[0]
        t_final = self.data_time[-1]
        numerical_estimate = model.integrate(h=self.h,
                                             t_0=t_0,
                                             t_final=t_final,
                                             y_0=y_0)

        # Scatter plot data.
        ax.scatter(x=self.data_time,
                   y=self.data_y,
                   color='gray',
                   edgecolors='darkgreen',
                   alpha=0.5,
                   label='data')
        # Line plot fitted model
        ax.plot(numerical_estimate[0, :],
                numerical_estimate[1, :],
                color='black',
                label='model')
        # plot standard deviation
        ax.fill_between(numerical_estimate[0, :],
                        numerical_estimate[1, :],
                        numerical_estimate[1, :] - std,
                        color='grey',
                        alpha=0.3)
        ax.fill_between(numerical_estimate[0, :],
                        numerical_estimate[1, :],
                        numerical_estimate[1, :] + std,
                        color='grey',
                        alpha=0.3)

        ax.set_xlabel('time')
        ax.set_ylabel('state variable')
        ax.legend()

        for id_p, posterior in enumerate(self.posteriors):
            ax = fig.add_subplot(gs[1, id_p])
            ax.set_ylabel('posterior of parameter %d [# counts]' % id_p)
            ax.set_xlabel('parameter %d [dimensionless]' % id_p)
            hist, param_values = posterior
            ax.plot(param_values, hist, color='black', label='histogram')
            ax.axvline(x=self.optimal_parameters[id_p],
                       color='darkgreen',
                       label='optimum')

            ax.legend()

        plt.show()