Ejemplo n.º 1
0
    def test_scaled_log_likelihood(self):
        import pints
        import pints.toy as toy
        import numpy as np

        model = toy.LogisticModel()
        real_parameters = [0.015, 500]
        test_parameters = [0.014, 501]
        sigma = 0.001
        times = np.linspace(0, 1000, 100)
        values = model.simulate(real_parameters, times)

        # Create an object with links to the model and time series
        problem = pints.SingleSeriesProblem(model, times, values)

        # Create a scaled and not scaled log_likelihood
        log_likelihood_not_scaled = pints.KnownNoiseLogLikelihood(
            problem, sigma)
        log_likelihood_scaled = pints.ScaledLogLikelihood(
            log_likelihood_not_scaled)

        eval_not_scaled = log_likelihood_not_scaled(test_parameters)
        eval_scaled = log_likelihood_scaled(test_parameters)

        self.assertEqual(int(eval_not_scaled), -20959169232)
        self.assertAlmostEqual(eval_scaled * len(times), eval_not_scaled)
Ejemplo n.º 2
0
    def sample(self, x, parallel=False):
        """
        Runs the sampler, this method:
            (1) generates simulated data and adds noise
            (2) sets up the sampler with the method given,
                using an KnownNoiseLogLikelihood, and a UniformLogPrior
            (3) runs the sampler
            (4) returns:
                - the calculated rhat value
                - the average of ess across all chains, returning the
                  minimum result across all parameters
                - the total time taken by the sampler
        """

        the_model = self.model()
        values = the_model.simulate(self.real_parameters, self.times)
        value_range = np.max(values) - np.min(values)
        values += np.random.normal(0, self.noise * value_range, values.shape)
        problem = pints.MultiOutputProblem(the_model, self.times, values)
        log_likelihood = pints.KnownNoiseLogLikelihood(
            problem, value_range * self.noise)
        # lower = list(self.lower) + [value_range *
        #                            self.noise / 10.0]*the_model.n_outputs()
        #upper = list(self.upper) + [value_range * self.noise * 10]*the_model.n_outputs()
        lower = list(self.lower)
        upper = list(self.upper)
        middle = [0.5 * (u + l) for l, u in zip(lower, upper)]
        sigma = [u - l for l, u in zip(lower, upper)]
        log_prior = pints.UniformLogPrior(lower, upper)
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)
        n_chains = int(x[-1])
        xs = [[
            np.random.uniform() * (u - l) + l for l, u in zip(lower, upper)
        ] for c in range(n_chains)]
        mcmc = pints.MCMCSampling(log_posterior,
                                  n_chains,
                                  xs,
                                  method=self.method)
        [sampler.set_hyper_parameters(x[:-1]) for sampler in mcmc.samplers()]
        if parallel:
            mcmc.set_parallel(int(os.environ['OMP_NUM_THREADS']))

        mcmc.set_log_interval(1000)

        start = timer()
        chains = mcmc.run()
        end = timer()

        rhat = np.max(pints._diagnostics.rhat_all_params(chains))
        ess = np.zeros(chains[0].shape[1])
        for chain in chains:
            ess += np.array(pints._diagnostics.effective_sample_size(chain))
        ess /= n_chains
        ess = np.min(ess)
        print('rhat:', rhat)
        print('ess:', ess)
        print('time:', end - start)
        return rhat, ess, end - start
Ejemplo n.º 3
0
    def test_known_unknown_log_likelihood(self):
        import pints
        import pints.toy as toy
        import numpy as np

        model = toy.LogisticModel()
        parameters = [0.015, 500]
        sigma = 0.1
        times = np.linspace(0, 1000, 100)
        values = model.simulate(parameters, times)
        problem = pints.SingleSeriesProblem(model, times, values)

        # Test if known/unknown give same result
        l1 = pints.KnownNoiseLogLikelihood(problem, sigma)
        l2 = pints.UnknownNoiseLogLikelihood(problem)
        self.assertAlmostEqual(l1(parameters), l2(parameters + [sigma]))
Ejemplo n.º 4
0
def run_model(model,
              cell,
              protocol,
              time,
              voltage,
              current,
              plot='unfold',
              label=None,
              axes=None):

    # Select protocol file
    protocol_file = os.path.join(root, protocol + '.mmt')
    print(protocol_file)
    myokit_protocol = myokit.load_protocol(protocol_file)

    # Estimate noise from start of data
    sigma_noise = np.std(current[:2000], ddof=1)

    # fetch cmaes parameters
    obtained_parameters = model.fetch_parameters()

    # Cell-specific parameters
    temperature = model.temperature(cell)
    lower_conductance = model.conductance_limit(cell)

    # Apply capacitance filter based on protocol
    print('Applying capacitance filtering')
    time, voltage, current = model.capacitance(myokit_protocol, 0.1, time,
                                               voltage, current)

    forward_model = model.ForwardModel(myokit_protocol,
                                       temperature,
                                       sine_wave=False)
    problem = pints.SingleOutputProblem(forward_model, time, current)
    log_likelihood = pints.KnownNoiseLogLikelihood(problem, sigma_noise)
    log_prior = model.LogPrior(lower_conductance)
    log_posterior = pints.LogPosterior(log_likelihood, log_prior)

    # Show obtained parameters and score
    obtained_log_posterior = log_posterior(obtained_parameters)
    print('Kylie sine-wave parameters:')
    for x in obtained_parameters:
        print(pints.strfloat(x))
    print('Final log-posterior:')
    print(pints.strfloat(obtained_log_posterior))

    # Simulate
    simulated = forward_model.simulate(obtained_parameters, time)

    if plot == 'unfold':
        axes[0].plot(time, voltage, color='red')  #, label='voltage')
        #axes[0].legend(loc='upper right')
        axes[1].plot(time, current, alpha=0.3,
                     color='red')  #, label='measured current')

        if label == 0:
            model_name = 'circularCOIIC'
            axes[1].plot(time,
                         simulated,
                         alpha=1,
                         color='blue',
                         label=model_name)
        elif label == 1:
            model_name = 'linearCOI'
            axes[1].plot(time,
                         simulated,
                         alpha=1,
                         color='magenta',
                         label=model_name)
        elif label == 2:
            model_name = 'linearCCOI'
            axes[1].plot(time,
                         simulated,
                         alpha=1,
                         color='seagreen',
                         label=model_name)
        elif label == 3:
            model_name = 'linearCCCOI'
            axes[1].plot(time,
                         simulated,
                         alpha=1,
                         color='seagreen',
                         label=model_name)
    #axes.subplot(2,1,1)

    else:

        IkrModel.fold_plot(protocol, time, voltage, [current, simulated])
Ejemplo n.º 5
0
                                          temperature,
                                          myo_model,
                                          rate_dict,
                                          transform,
                                          sine_wave=sw)
        n_params = model.n_params

        #
        # Define problem
        #
        problem = pints.SingleOutputProblem(model, time, current)

        #
        # Define log-posterior
        #
        log_likelihood = pints.KnownNoiseLogLikelihood(problem, sigma_noise)
        log_prior = prior.LogPrior(rate_dict, lower_conductance, n_params,
                                   transform)
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)
        rate_checker = Rates.ratesPrior(transform, lower_conductance)

        # Define parameter set from best ones we have found so far.
        # Only refresh thse on the first sine wave fit protocol
        if protocol_name == 'sine-wave':
            parameter_set = np.loadtxt(cmaes_result_files + model_name +
                                       '-cell-' + str(cell) + '-cmaes.txt')
            ll_score = log_likelihood(parameter_set)
            print('CMAES model parameters start point: ', parameter_set)
            print('LogLikelihood (proportional to square error): ', ll_score)

            mcmc_best_param_file = mcmc_result_files + model_name + '-cell-' + str(
Ejemplo n.º 6
0
    def test_gaussian_log_likelihoods_single_output(self):
        # Single-output test for known/unknown noise log-likelihood methods

        model = pints.toy.LogisticModel()
        parameters = [0.015, 500]
        sigma = 0.1
        times = np.linspace(0, 1000, 100)
        values = model.simulate(parameters, times)
        values += np.random.normal(0, sigma, values.shape)
        problem = pints.SingleOutputProblem(model, times, values)

        # Test if known/unknown give same result
        l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        l2 = pints.GaussianLogLikelihood(problem)
        self.assertAlmostEqual(l1(parameters), l2(parameters + [sigma]))

        # Test invalid constructors
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, 0)
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, -1)

        # known noise value checks
        model = pints.toy.ConstantModel(1)
        times = np.linspace(0, 10, 10)
        values = model.simulate([2], times)
        org_values = np.arange(10) / 5.0
        problem = pints.SingleOutputProblem(model, times, org_values)
        log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, 1.5)
        self.assertAlmostEqual(log_likelihood([-1]), -21.999591968683927)
        l, dl = log_likelihood.evaluateS1([3])
        self.assertAlmostEqual(l, -23.777369746461702)
        self.assertAlmostEqual(dl[0], -9.3333333333333321)
        self.assertEqual(len(dl), 1)

        # unknown noise value checks
        log_likelihood = pints.GaussianLogLikelihood(problem)
        self.assertAlmostEqual(log_likelihood([-3, 1.5]), -47.777369746461702)

        # unknown noise check sensitivity
        model = pints.toy.ConstantModel(1)
        times = np.linspace(0, 10, 10)
        values = model.simulate([2], times)
        org_values = np.arange(10) / 5.0
        problem = pints.SingleOutputProblem(model, times, org_values)
        log_likelihood = pints.GaussianLogLikelihood(problem)
        l, dl = log_likelihood.evaluateS1([7, 2.0])
        self.assertAlmostEqual(l, -63.04585713764618)
        self.assertAlmostEqual(dl[0], -15.25)
        self.assertAlmostEqual(dl[1], 41.925000000000004)

        # Test deprecated aliases
        l1 = pints.KnownNoiseLogLikelihood(problem, sigma)
        self.assertIsInstance(l1, pints.GaussianKnownSigmaLogLikelihood)

        l2 = pints.UnknownNoiseLogLikelihood(problem)
        self.assertIsInstance(l2, pints.GaussianLogLikelihood)

        # test multiple output unknown noise
        model = pints.toy.ConstantModel(3)
        parameters = [0, 0, 0]
        times = [1, 2, 3, 4]
        values = model.simulate([0, 0, 0], times)
        org_values = [[10.7, 3.5, 3.8], [1.1, 3.2, -1.4], [9.3, 0.0, 4.5],
                      [1.2, -3, -10]]
        problem = pints.MultiOutputProblem(model, times, org_values)
        log_likelihood = pints.GaussianLogLikelihood(problem)
        # Test Gaussian_logpdf((10.7, 1.1, 9.3, 1.2)|mean=0, sigma=3.5) +
        #      Gaussian_logpdf((3.5, 3.2, 0.0, -3)|mean=0, sigma=1) +
        #      Gaussian_logpdf((3.8, -1.4, 4.5, -10)|mean=0, sigma=12)
        #      = -50.5088...
        self.assertAlmostEqual(log_likelihood(parameters + [3.5, 1, 12]),
                               -50.508848609684783)
        l, dl = log_likelihood.evaluateS1(parameters + [3.5, 1, 12])
        self.assertAlmostEqual(l, -50.508848609684783)
        self.assertAlmostEqual(dl[0], 1.820408163265306)
        self.assertAlmostEqual(dl[1], 3.7000000000000002)
        self.assertAlmostEqual(dl[2], -0.021527777777777774)
        self.assertAlmostEqual(dl[3], 3.6065306122448981)
        self.assertAlmostEqual(dl[4], 27.490000000000002)
        self.assertAlmostEqual(dl[5], -0.25425347222222222)

        # test multiple output model dimensions of sensitivities
        d = 20
        model = pints.toy.ConstantModel(d)
        parameters = [0 for i in range(d)]
        times = [1, 2, 3, 4]
        values = model.simulate(parameters, times)
        org_values = np.ones((len(times), d))
        extra_params = np.ones(d).tolist()
        problem = pints.MultiOutputProblem(model, times, org_values)
        log_likelihood = pints.GaussianLogLikelihood(problem)
        l = log_likelihood(parameters + extra_params)
        l1, dl = log_likelihood.evaluateS1(parameters + extra_params)
        self.assertTrue(np.array_equal(len(dl),
                                       len(parameters + extra_params)))
        self.assertEqual(l, l1)
Ejemplo n.º 7
0
    model_ap = forwardModel.ForwardModel(protocol_ap,
                                         temperature,
                                         myo_model,
                                         rate_dict,
                                         transform,
                                         sine_wave=False)
    npar = model.n_params
    #
    # Define problem
    #
    problem_sine = pints.SingleOutputProblem(model, time_sine, current_sine)
    problem_ap = pints.SingleOutputProblem(model_ap, time_ap, current_ap)
    #
    # Define log-posterior
    #
    log_likelihood = pints.KnownNoiseLogLikelihood(problem_sine,
                                                   sigma_noise_sine)
    log_likelihood_ap = pints.KnownNoiseLogLikelihood(problem_ap,
                                                      sigma_noise_ap)
    log_prior = prior.LogPrior(rate_dict, lower_conductance, npar, transform)
    log_posterior = pints.LogPosterior(log_likelihood, log_prior)
    log_posterior_ap = pints.LogPosterior(log_likelihood_ap, log_prior)
    rate_checker = Rates.ratesPrior(transform, lower_conductance)

    if args.mcmc:
        model_metrics = np.zeros((5, 7))
        root = os.path.abspath('mcmc_results')
        param_filename = os.path.join(
            root, model_name + '-cell-' + str(cell) + '-mcmc_traces.p')
        trace = cPickle.load(open(param_filename, 'rb'))

        burnin = 70000
Ejemplo n.º 8
0
    def load_problem(problem_dict):
        """
        Returns a dictionary containing an instantiated PINTS problem
        """
        problem_instance = copy.deepcopy(problem_dict)

        model = problem_dict["model"]()
        parameters = problem_dict['parameters']

        # simulate problem
        if 'simulation_noise_percent' in problem_dict:
            values, times, noise_stds = emutils.simulate(
                model,
                parameters=problem_dict['parameters'],
                times=problem_dict['times'],
                noise_range_percent=problem_dict['simulation_noise_percent'],
            )
        else:
            values, times = emutils.simulate(
                model,
                parameters=problem_dict['parameters'],
                times=problem_dict['times'],
                noise_range_percent=None,
            )
            noise_stds = None

        # create instance of a problem and
        if problem_dict['n_outputs'] == 1:
            problem = pints.SingleOutputProblem(model, times, values)
        else:
            problem = pints.MultiOutputProblem(model, times, values)

        # create likelihood with or without known noise
        # log_likelihood = pints.UnknownNoiseLogLikelihood(problem)
        log_likelihood = pints.KnownNoiseLogLikelihood(problem, noise_stds)

        # should either provide the percentage range for parameters
        # or the parameter range itself
        if 'param_range_percent' in problem_dict:
            param_range_percent = problem_dict['param_range_percent']
            params_lower = parameters - param_range_percent * np.abs(parameters)
            params_upper = parameters + param_range_percent * np.abs(parameters)
        else:
            params_lower, params_upper = problem_dict['param_range']

        # add noise
        # noise_lower, noise_upper = problem_dict['noise_bounds']

        bounds = pints.RectangularBoundaries(
            lower=params_lower,
            upper=params_upper,
        )

        log_prior = problem_dict['prior'](bounds)
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)

        # extend the dictionary with created variables
        problem_instance.update({
            'model': model,
            'values': values,
            'times': times,
            'noise_stds': noise_stds,
            'problem': problem,
            'bounds': bounds,
            'log_likelihood': log_likelihood,
            'log_prior': log_prior,
            'log_posterior': log_posterior
        })

        return problem_instance