Ejemplo n.º 1
0
    def test_no_output(self):
        # Repeat without any output
        with StreamCapture() as c:
            log = pints.Logger()
            log.set_stream(None)
            log.add_counter('#', width=2)
            log.add_float('Lat.', width=1)
            log.add_long_float('Number', file_only=True)
            log.add_int('Val', width=4)
            log.add_counter('Count', max_value=12345)
            log.add_time('Time')
            log.add_string('Q', 3)
            log.log(*data)
        self.assertOutput(expected='', returned=c.text())

        # Repeat on stderr
        with StreamCapture(stdout=True, stderr=True) as c:
            with TemporaryDirectory() as d:
                filename = d.path('test.csv')
                log = pints.Logger()
                log.set_filename(filename, csv=False)
                log.set_stream(sys.stderr)
                log.add_counter('#', width=2)
                log.add_float('Lat.', width=1)
                log.add_long_float('Number', file_only=True)
                log.add_int('Val', width=4)
                log.add_counter('Count', max_value=12345)
                log.add_time('Time')
                log.add_string('Q', 3)
                log.log(*data)
                with open(filename, 'r') as f:
                    out = f.read()
        self.assertOutput(expected='', returned=c.text()[0])
        self.assertOutput(expected=out2, returned=c.text()[1])
        self.assertOutput(expected=out3, returned=out)
Ejemplo n.º 2
0
    def test_file_writing_no_screen_txt(self):
        # Repeat without screen output, outside of csv mode
        with StreamCapture() as c:
            with TemporaryDirectory() as d:
                filename = d.path('test.csv')
                log = pints.Logger()
                log.set_filename(filename, csv=False)
                log.set_stream(None)
                log.add_counter('#', width=2)
                log.add_float('Lat.', width=1)
                log.add_long_float('Number', file_only=True)
                log.add_int('Val', width=4)
                log.add_counter('Count', max_value=12345)
                log.add_time('Time')
                log.add_string('Q', 3)
                log.log(*data)
                with open(filename, 'r') as f:
                    out = f.read()
        self.assertOutput(expected='', returned=c.text())
        self.assertOutput(expected=out3, returned=out)

        # Unset file output
        with StreamCapture() as c:
            with TemporaryDirectory() as d:
                filename = d.path('test.csv')
                log = pints.Logger()
                log.set_filename(filename, csv=False)
                log.set_filename(None)
                log.set_stream(None)
                log.add_counter('#', width=2)
                log.log(1)
                self.assertFalse(os.path.isfile(filename))
        self.assertOutput(expected='', returned=c.text())
        self.assertOutput(expected=out3, returned=out)
Ejemplo n.º 3
0
    def _initialise_logger(self):
        """
        Initialises logger.
        """
        # Start logging
        self._logging = self._log_to_screen or self._log_filename
        if self._logging:

            if self._log_to_screen:
                # Show current settings
                print('Running ' + self._sampler.name())
                print('Number of active points: ' + str(self._n_active_points))
                print('Total number of iterations: ' + str(self._iterations))
                print('Total number of posterior samples: ' +
                      str(self._posterior_samples))

            # Set up logger
            self._logger = pints.Logger()
            if not self._log_to_screen:
                self._logger.set_stream(None)
            if self._log_filename:
                self._logger.set_filename(self._log_filename,
                                          csv=self._log_csv)

            # Add fields to log
            self._logger.add_counter('Iter.', max_value=self._iterations)
            self._logger.add_counter('Eval.', max_value=self._iterations * 10)
            self._logger.add_time('Time m:s')
            self._logger.add_float('Delta_log(z)')
            self._logger.add_float('Acceptance rate')
Ejemplo n.º 4
0
    def test_method(self):

        # Create log pdf
        log_pdf = pints.toy.GaussianLogPDF([5, 5], [[4, 1], [1, 3]])

        # Create mcmc
        x0 = np.array([2, 2])
        sigma = [[3, 0], [0, 3]]
        mcmc = pints.NoUTurnMCMC(x0, sigma)

        # This method needs sensitivities
        self.assertTrue(mcmc.needs_sensitivities())

        # Perform short run, test logging while we are at it
        logger = pints.Logger()
        logger.set_stream(None)
        mcmc._log_init(logger)
        chain = []
        for i in range(2 * mcmc.number_adaption_steps()):
            x = mcmc.ask()
            fx, gr = log_pdf.evaluateS1(x)
            reply = mcmc.tell((fx, gr))
            mcmc._log_write(logger)
            if reply is not None:
                y, fy, ac = reply
                chain.append(y)
                recalc = log_pdf.evaluateS1(y)
                self.assertEqual(fy[0], recalc[0])
                self.assertTrue(np.all(fy[1] == recalc[1]))

        chain = np.array(chain)
        self.assertGreater(chain.shape[0], 1)
        self.assertEqual(chain.shape[1], len(x0))
Ejemplo n.º 5
0
    def test_all_simultaneously(self):
        # Normal use, all data at once
        with StreamCapture() as c:
            # Test logger with no fields
            log = pints.Logger()
            self.assertRaises(ValueError, log.log, 1)

            # Test logging output
            log.add_counter('#', width=2)
            log.add_float('Latitude', width=1)
            log.add_long_float('Number')
            log.add_int('Val', width=4)
            log.add_counter('Count', max_value=12345)
            log.add_time('Time')
            log.add_string('Q', 3)

            # Add all data in one go
            log.log(*data)
        self.assertOutput(expected=out1, returned=c.text())

        # Can't configure once logging
        self.assertRaises(RuntimeError, log.add_counter, 'a')
        self.assertRaises(RuntimeError, log.add_int, 'a')
        self.assertRaises(RuntimeError, log.add_float, 'a')
        self.assertRaises(RuntimeError, log.add_long_float, 'a')
        self.assertRaises(RuntimeError, log.add_time, 'a')
        self.assertRaises(RuntimeError, log.add_string, 'a', 3)
        self.assertRaises(RuntimeError, log.set_filename, 'a')
        self.assertRaises(RuntimeError, log.set_stream, sys.stdout)
Ejemplo n.º 6
0
 def test_file_only_fields_hidden_on_screen(self):
     # Log with file-only fields, and shorter name
     with StreamCapture() as c:
         log = pints.Logger()
         log.add_counter('#', width=2)
         log.add_float('Lat.', width=1)
         log.add_long_float('Number', file_only=True)
         log.add_int('Val', width=4)
         log.add_counter('Count', max_value=12345)
         log.add_time('Time')
         log.add_string('Q', 3)
         log.log(*data)
     self.assertOutput(expected=out2, returned=c.text())
Ejemplo n.º 7
0
    def test_partial_row_not_shown(self):
        # Normal use, all data at once, plus extra bit
        with StreamCapture() as c:
            log = pints.Logger()
            log.add_counter('#', width=2)
            log.add_float('Latitude', width=1)
            log.add_long_float('Number')
            log.add_int('Val', width=4)
            log.add_counter('Count', max_value=12345)
            log.add_time('Time')
            log.add_string('Q', 3)

            log.log(*data)
            log.log(1, 2, 3)  # not enough for more output!
        self.assertOutput(expected=out1, returned=c.text())
Ejemplo n.º 8
0
    def test_field_by_field(self):
        # Normal use, data field by field
        with StreamCapture() as c:
            log = pints.Logger()
            log.add_counter('#', width=2)
            log.add_float('Latitude', width=1)
            log.add_long_float('Number')
            log.add_int('Val', width=4)
            log.add_counter('Count', max_value=12345)
            log.add_time('Time')
            log.add_string('Q', 3)

            # Add data cell by cell
            for d in data:
                log.log(d)
        self.assertOutput(expected=out1, returned=c.text())
Ejemplo n.º 9
0
    def test_row_by_row(self):
        # Normal use, data row by row
        with StreamCapture() as c:
            log = pints.Logger()
            log.add_counter('#', width=2)
            log.add_float('Latitude', width=1)
            log.add_long_float('Number')
            log.add_int('Val', width=4)
            log.add_counter('Count', max_value=12345)
            log.add_time('Time')
            log.add_string('Q', 3)

            # Add data row by row
            n = 7
            for i in range(len(data) // n):
                log.log(*data[i * n:(i + 1) * n])
        self.assertOutput(expected=out1, returned=c.text())
Ejemplo n.º 10
0
 def test_file_writing_csv(self):
     # Repeat in csv mode
     with StreamCapture() as c:
         with TemporaryDirectory() as d:
             filename = d.path('test.csv')
             log = pints.Logger()
             log.set_filename(filename, csv=True)
             log.add_counter('#', width=2)
             log.add_float('Lat.', width=1)
             log.add_long_float('Number', file_only=True)
             log.add_int('Val', width=4)
             log.add_counter('Count', max_value=12345)
             log.add_time('Time')
             log.add_string('Q', 3)
             log.log(*data)
             with open(filename, 'r') as f:
                 out = f.read()
     self.assertOutput(expected=out2, returned=c.text())
     self.assertOutput(expected=out4, returned=out)
Ejemplo n.º 11
0
    def test_various_chunks(self):
        # Log in different sized chunks
        order = [3, 2, 1, 1, 4, 6, 3, 2, 6]
        self.assertEqual(sum(order), len(data))
        with StreamCapture() as c:
            log = pints.Logger()
            log.add_counter('#', width=2)
            log.add_float('Latitude', width=1)
            log.add_long_float('Number')
            log.add_int('Val', width=4)
            log.add_counter('Count', max_value=12345)
            log.add_time('Time')
            log.add_string('Q', 3)

            # Add data in different sized chunks
            offset = 0
            for n in order:
                log.log(*data[offset:offset + n])
                offset += n
        self.assertOutput(expected=out1, returned=c.text())
Ejemplo n.º 12
0
    def run(self):
        """
        Runs the MCMC sampler(s) and returns a number of markov chains, each
        representing the distribution of the given log-pdf.
        """
        # Check stopping criteria
        has_stopping_criterion = False
        has_stopping_criterion |= (self._max_iterations is not None)
        if not has_stopping_criterion:
            raise ValueError('At least one stopping criterion must be set.')

        # Iteration and evaluation counting
        iteration = 0
        evaluations = 0

        # Choose method to evaluate
        f = self._log_pdf
        if self._needs_sensitivities:
            f = f.evaluateS1

        # Create evaluator object
        if self._parallel:
            # Use at most n_workers workers
            n_workers = min(self._n_workers, self._chains)
            evaluator = pints.ParallelEvaluator(f, n_workers=n_workers)
        else:
            evaluator = pints.SequentialEvaluator(f)

        # Initial phase
        if self._needs_initial_phase:
            for sampler in self._samplers:
                sampler.set_initial_phase(True)

        # Write chains to disk
        chain_loggers = []
        if self._chain_files:
            for filename in self._chain_files:
                cl = pints.Logger()
                cl.set_stream(None)
                cl.set_filename(filename, True)
                for k in range(self._n_parameters):
                    cl.add_float('p' + str(k))
                chain_loggers.append(cl)

        # Write evaluations to disk
        eval_loggers = []
        if self._evaluation_files:
            # Bayesian inference on a log-posterior? Then separate out the
            # prior so we can calculate the loglikelihood
            prior = None
            if isinstance(self._log_pdf, pints.LogPosterior):
                prior = self._log_pdf.log_prior()

            # Set up loggers
            for filename in self._evaluation_files:
                cl = pints.Logger()
                cl.set_stream(None)
                cl.set_filename(filename, True)
                if prior:
                    # Logposterior in first column, to be consistent with the
                    # non-bayesian case
                    cl.add_float('logposterior')
                    cl.add_float('loglikelihood')
                    cl.add_float('logprior')
                else:
                    cl.add_float('logpdf')
                eval_loggers.append(cl)

            # Store last accepted logpdf, per chain
            current_logpdf = np.zeros(self._chains)
            current_prior = np.zeros(self._chains)

        # Set up progress reporting
        next_message = 0

        # Start logging
        logging = self._log_to_screen or self._log_filename
        if logging:
            if self._log_to_screen:
                print('Using ' + str(self._samplers[0].name()))
                print('Generating ' + str(self._chains) + ' chains.')
                if self._parallel:
                    print('Running in parallel with ' + str(n_workers) +
                          ' worker processess.')
                else:
                    print('Running in sequential mode.')
                if self._chain_files:
                    print('Writing chains to ' + self._chain_files[0] +
                          ' etc.')
                if self._evaluation_files:
                    print('Writing evaluations to ' +
                          self._evaluation_files[0] + ' etc.')

            # Set up logger
            logger = pints.Logger()
            if not self._log_to_screen:
                logger.set_stream(None)
            if self._log_filename:
                logger.set_filename(self._log_filename, csv=self._log_csv)

            # Add fields to log
            max_iter_guess = max(self._max_iterations or 0, 10000)
            max_eval_guess = max_iter_guess * self._chains
            logger.add_counter('Iter.', max_value=max_iter_guess)
            logger.add_counter('Eval.', max_value=max_eval_guess)
            for sampler in self._samplers:
                sampler._log_init(logger)
            logger.add_time('Time m:s')

        # Create chains
        # TODO Pre-allocate?
        chains = []

        # Start sampling
        timer = pints.Timer()
        running = True
        while running:
            # Initial phase
            # Note: self._initial_phase_iterations is None when no initial
            # phase is needed
            if iteration == self._initial_phase_iterations:
                for sampler in self._samplers:
                    sampler.set_initial_phase(False)
                if self._log_to_screen:
                    print('Initial phase completed.')

            # Get points
            if self._single_chain:
                xs = [sampler.ask() for sampler in self._samplers]
            else:
                xs = self._samplers[0].ask()

            # Calculate logpdfs
            fxs = evaluator.evaluate(xs)

            # Update evaluation count
            evaluations += len(fxs)

            # Update chains
            intermediate_step = False
            if self._single_chain:
                samples = np.array(
                    [s.tell(fxs[i]) for i, s in enumerate(self._samplers)])

                none_found = [x is None for x in samples]
                if any(none_found):
                    assert (all(none_found))  # Can't mix None w. samples
                    intermediate_step = True
            else:
                samples = self._samplers[0].tell(fxs)
                intermediate_step = samples is None

            # If no new samples were added, then no MCMC iteration was
            # performed, and so the iteration count shouldn't be updated,
            # logging shouldn't be triggered, and stopping criteria shouldn't
            # be checked
            if intermediate_step:
                continue

            # Add new samples to the chains
            chains.append(samples)

            # Write samples to disk
            for k, chain_logger in enumerate(chain_loggers):
                chain_logger.log(*samples[k])

            # Write evaluations to disk
            if self._evaluation_files:
                for k, eval_logger in enumerate(eval_loggers):
                    if np.all(xs[k] == samples[k]):
                        current_logpdf[k] = fxs[k]
                        if prior is not None:
                            current_prior[k] = prior(xs[k])
                    eval_logger.log(current_logpdf[k])
                    if prior is not None:
                        eval_logger.log(current_logpdf[k] - current_prior[k])
                        eval_logger.log(current_prior[k])

            # Show progress
            if logging and iteration >= next_message:
                # Log state
                logger.log(iteration, evaluations)
                for sampler in self._samplers:
                    sampler._log_write(logger)
                logger.log(timer.time())

                # Choose next logging point
                if iteration < self._message_warm_up:
                    next_message = iteration + 1
                else:
                    next_message = self._message_interval * (
                        1 + iteration // self._message_interval)

            # Update iteration count
            iteration += 1

            #
            # Check stopping criteria
            #

            # Maximum number of iterations
            if (self._max_iterations is not None
                    and iteration >= self._max_iterations):
                running = False
                halt_message = ('Halting: Maximum number of iterations (' +
                                str(iteration) + ') reached.')

            # TODO Add more stopping criteria

        # Log final state and show halt message
        if logging:
            logger.log(iteration, evaluations)
            for sampler in self._samplers:
                sampler._log_write(logger)
            logger.log(timer.time())
            if self._log_to_screen:
                print(halt_message)

        # Swap axes in chains, to get indices
        #  [chain, iteration, parameter]
        chains = np.array(chains)
        chains = chains.swapaxes(0, 1)

        # Return generated chains
        return chains
Ejemplo n.º 13
0
    def run(self):
        """ See :meth:`pints.MCMC.run()`. """

        # Check if settings are sensible
        max_post = 0.25 * (self._iterations + self._active_points)
        if self._posterior_samples > max_post:
            raise ValueError(
                'Number of posterior samples must not exceed 0.25 times (the'
                ' number of iterations + the number of active points).')

        # Set up progress reporting
        next_message = 0
        message_warm_up = 3
        message_interval = 20

        # Start logging
        logging = self._log_to_screen or self._log_filename
        if logging:
            # Create timer
            timer = pints.Timer()

            if self._log_to_screen:
                # Show current settings
                print('Running nested rejection sampling')
                print('Number of active points: ' + str(self._active_points))
                print('Total number of iterations: ' + str(self._iterations))
                print('Total number of posterior samples: ' + str(
                    self._posterior_samples))

            # Set up logger
            logger = pints.Logger()
            if not self._log_to_screen:
                logger.set_stream(None)
            if self._log_filename:
                logger.set_filename(self._log_filename, csv=self._log_csv)

            # Add fields to log
            logger.add_counter('Iter.', max_value=self._iterations)
            logger.add_counter('Eval.', max_value=self._iterations * 10)
            # TODO: Add other informative fields ?
            logger.add_time('Time m:s')

        # Problem dimension
        d = self._n_parameters

        # Generate initial random points by sampling from the prior
        m_active = np.zeros((self._active_points, d + 1))
        m_initial = self._log_prior.sample(self._active_points)
        for i in range(0, self._active_points):
            # Calculate likelihood
            m_active[i, d] = self._log_likelihood(m_initial[i, :])
            self._n_evals += 1

            # Show progress
            if logging and i >= next_message:
                # Log state
                logger.log(0, self._n_evals, timer.time())

                # Choose next logging point
                if i > message_warm_up:
                    next_message = message_interval * (
                        1 + i // message_interval)

        m_active[:, :-1] = m_initial

        # store all inactive points, along with their respective
        # log-likelihoods (hence, d+1)
        m_inactive = np.zeros((self._iterations, d + 1))

        # store weights
        w = np.zeros(self._active_points + self._iterations)

        # store X values (defined in [1])
        X = np.zeros(self._iterations + 1)
        X[0] = 1

        # log marginal likelihood holder
        v_log_Z = np.zeros(self._iterations + 1)

        # Run
        i_message = self._active_points - 1
        for i in range(0, self._iterations):
            a_running_log_likelihood = np.min(m_active[:, d])
            a_min_index = np.argmin(m_active[:, d])
            X[i + 1] = np.exp(-(i + 1) / self._active_points)
            w[i] = X[i] - X[i + 1]
            v_log_Z[i] = a_running_log_likelihood
            m_inactive[i, :] = m_active[a_min_index, :]

            # Independently samples params from the prior until
            # log_likelihood(params) > threshold.
            # Note a_running_log_likelihood can be -inf, so while is never run
            proposed = self._log_prior.sample()[0]
            log_likelihood = self._log_likelihood(proposed)
            self._n_evals += 1
            while log_likelihood < a_running_log_likelihood:
                proposed = self._log_prior.sample()[0]
                log_likelihood = self._log_likelihood(proposed)
                self._n_evals += 1
            m_active[a_min_index, :] = np.concatenate(
                (proposed, np.array([log_likelihood])))

            # Show progress
            if logging:
                i_message += 1
                if i_message >= next_message:
                    # Log state
                    logger.log(i_message, self._n_evals, timer.time())

                    # Choose next logging point
                    if i_message > message_warm_up:
                        next_message = message_interval * (
                            1 + i_message // message_interval)

        v_log_Z[self._iterations] = logsumexp(m_active[:, d])
        w[self._iterations:] = float(X[self._iterations]) / float(
            self._active_points)
        m_samples_all = np.vstack((m_inactive, m_active))
        log_Z = logsumexp(v_log_Z, b=w[0:(self._iterations + 1)])

        vP = np.exp(m_samples_all[:, d] - log_Z) * w
        m_theta = m_samples_all[:, :-1]
        vIndex = np.random.choice(
            range(0, self._iterations + self._active_points),
            self._posterior_samples, p=vP)
        m_posterior_samples = m_theta[vIndex, :]

        return m_posterior_samples, log_Z
Ejemplo n.º 14
0
    def run(self):
        """ See :meth:`pints.MCMC.run()`. """

        # Reset total number of log_likelihood evaluations
        self._n_evals = 0

        # Check if settings make sense
        max_post = 0.25 * (self._iterations + self._active_points)
        if self._posterior_samples > max_post:
            raise ValueError(
                'Number of posterior samples must not exceed 0.25 times (the'
                ' number of iterations + the number of active points).')
        if self._rejection_samples > self._iterations:
            raise ValueError(
                'Number of rejection samples must not exceed number of'
                ' iterations.')

        # Set up progress reporting
        next_message = 0
        message_warm_up = 3
        message_interval = 20

        # Start logging
        logging = self._log_to_screen or self._log_filename
        if logging:
            # Create timer
            timer = pints.Timer()

            if self._log_to_screen:
                # Show current settings
                print('Running nested rejection sampling')
                print('Number of active points: ' + str(self._active_points))
                print('Total number of iterations: ' + str(self._iterations))
                print('Enlargement factor: ' + str(self._enlargement_factor))
                print('Total number of posterior samples: ' + str(
                    self._posterior_samples))

            # Set up logger
            logger = pints.Logger()
            if not self._log_to_screen:
                logger.set_stream(None)
            if self._log_filename:
                logger.set_filename(self._log_filename, csv=self._log_csv)

            # Add fields to log
            logger.add_counter('Iter.', max_value=self._iterations)
            logger.add_counter('Eval.', max_value=self._iterations * 10)
            # TODO: Add other informative fields ?
            logger.add_time('Time m:s')

        # Problem dimension
        d = self._n_parameters

        # Generate initial random points by sampling from the prior
        m_active = np.zeros((self._active_points, d + 1))
        m_initial = self._log_prior.sample(self._active_points)
        for i in range(0, self._active_points):
            # Evaluate log likelihood
            m_active[i, d] = self._log_likelihood(m_initial[i, :])
            self._n_evals += 1

            # Show progress
            if logging and i >= next_message:
                # Log state
                logger.log(0, self._n_evals, timer.time())

                # Choose next logging point
                if i > message_warm_up:
                    next_message = message_interval * (
                        1 + i // message_interval)

        m_active[:, :-1] = m_initial

        # store all inactive points, along with their respective
        # log-likelihoods (hence, d+1)
        m_inactive = np.zeros((self._iterations, d + 1))

        # store weights
        w = np.zeros(self._active_points + self._iterations)

        # store X values (defined in [1])
        X = np.zeros(self._iterations + 1)
        X[0] = 1

        # log marginal likelihood holder
        v_log_Z = np.zeros(self._iterations + 1)

        # Run
        i_message = self._active_points - 1
        for i in range(0, self._iterations):

            a_running_log_likelihood = np.min(m_active[:, d])
            a_min_index = np.argmin(m_active[:, d])
            X[i + 1] = np.exp(-(i + 1.0) / self._active_points)
            w[i] = X[i] - X[i + 1]
            v_log_Z[i] = a_running_log_likelihood
            m_inactive[i, :] = m_active[a_min_index, :]

            if (i + 1) % self._rejection_samples == 0:
                A, centroid = self._minimum_volume_ellipsoid(m_active[:, :d])

            if i > self._rejection_samples:
                if ((i + 1 - self._rejection_samples)
                        % self._ellipsoid_update_gap == 0):
                    A, centroid = self._minimum_volume_ellipsoid(
                        m_active[:, :d])

            if i < self._rejection_samples:
                # Start off with rejection sampling, while this is still very
                # efficient.
                m_active[a_min_index, :] = self._reject_sample_prior(
                    a_running_log_likelihood)
            else:
                # After a number of samples, switch to ellipsoid sampling.
                m_active[a_min_index, :] = \
                    self._reject_ellipsoid_sample_faster(
                        a_running_log_likelihood, m_active[:, :d],
                        self._enlargement_factor, A, centroid)

            # Show progress
            if logging:
                i_message += 1
                if i_message >= next_message:
                    # Log state
                    logger.log(i_message, self._n_evals, timer.time())

                    # Choose next logging point
                    if i_message > message_warm_up:
                        next_message = message_interval * (
                            1 + i_message // message_interval)

        v_log_Z[self._iterations] = logsumexp(m_active[:, d])
        w[self._iterations:] = \
            float(X[self._iterations]) / float(self._active_points)
        m_samples_all = np.vstack((m_inactive, m_active))
        logZ = logsumexp(v_log_Z, b=w[0:(self._iterations + 1)])

        vP = np.exp(m_samples_all[:, d] - logZ) * w
        mTheta = m_samples_all[:, :-1]
        vIndex = np.random.choice(
            range(0, self._iterations + self._active_points),
            self._posterior_samples, p=vP)
        m_posterior_samples = mTheta[vIndex, :]

        return m_posterior_samples, logZ
Ejemplo n.º 15
0
    def run(self):
        """
        Runs the ABC sampler.
        """
        if self._max_iterations is None:
            raise ValueError("At least one stopping criterion must be set.")

        # Iteration and evaluation counting
        iteration = 0
        evaluations = 0
        accepted_count = 0

        # Choose method to evaluate
        f = self._error_measure

        # Create evaluator
        if self._parallel:
            n_workers = self._n_workers
            evaluator = pints.ParallelEvaluator(f, n_workers=n_workers)
        else:
            evaluator = pints.SequentialEvaluator(f)

        # Set up progress reporting
        next_message = 0

        # Start logging
        logging = self._log_to_screen or self._log_filename
        if logging:
            if self._log_to_screen:
                print('Using ' + str(self._sampler.name()))
                if self._parallel:
                    print('Running in parallel with ' + str(n_workers) +
                          ' worker processess.')
                else:
                    print('Running in sequential mode.')

            # Set up logger
            logger = pints.Logger()
            if not self._log_to_screen:
                logger.set_stream(None)
            if self._log_filename:
                logger.set_filename(self._log_filename, csv=self._log_csv)

            # Add fields to log
            max_iter_guess = max(self._max_iterations or 0, 10000)
            max_eval_guess = max_iter_guess
            logger.add_counter('Iter.', max_value=max_iter_guess)
            logger.add_counter('Eval.', max_value=max_eval_guess)
            logger.add_float('Acceptance rate')
            self._sampler._log_init(logger)
            logger.add_time('Time m:s')

        # Start sampling
        timer = pints.Timer()
        running = True

        # Specifying the number of samples we want to get
        # from the prior at once. It depends on whether we
        # are using parallelisation and how many workers
        # are being used.
        if self._parallel:
            n_requested_samples = self._n_workers
        else:
            n_requested_samples = 1

        samples = []
        # Sample until we find an acceptable sample
        while running:
            accepted_vals = None
            while accepted_vals is None:
                # Get points from prior
                xs = self._sampler.ask(n_requested_samples)

                # Simulate and get error
                fxs = evaluator.evaluate(xs)
                evaluations += self._n_workers

                # Tell sampler errors and get list of acceptable parameters
                accepted_vals = self._sampler.tell(fxs)

            accepted_count += len(accepted_vals)
            for val in accepted_vals:
                samples.append(val)

            iteration += 1

            # Log progress
            if logging and iteration >= next_message:
                # Log state
                logger.log(iteration, evaluations,
                           (accepted_count / evaluations))
                self._sampler._log_write(logger)
                logger.log(timer.time())

                # Choose next logging point
                if iteration < self._message_warm_up:
                    next_message = iteration + 1
                else:
                    next_message = self._message_interval * (
                        1 + iteration // self._message_interval)

            if iteration >= self._max_iterations:
                running = False
                halt_message = ('Halting: Maximum number of iterations (' +
                                str(iteration) + ') reached. Only (' +
                                str(accepted_count) + ') sample were ' +
                                'obtained')
            elif accepted_count >= self._n_samples:
                running = False
                halt_message = ('Halting: target number of samples (' +
                                str(accepted_count) + ') reached.')

        # Log final state and show halt message
        if logging:
            logger.log(iteration, evaluations)
            self._sampler._log_write(logger)
            logger.log(timer.time())
            if self._log_to_screen:
                print(halt_message)
        samples = np.array(samples)
        return samples
Ejemplo n.º 16
0
    def run(self):
        """
        Runs the optimisation, returns a tuple ``(x_best, f_best)``.

        An optional ``callback`` function can be passed in that will be called
        at the end of every iteration. The callback should take the arguments
        ``(iteration, optimiser)``, where ``iteration`` is the iteration count
        (an integer) and ``optimiser`` is the optimiser object.
        """
        # Can only run once for each controller instance
        if self._has_run:
            raise RuntimeError("Controller is valid for single use only")
        self._has_run = True

        # Check stopping criteria
        has_stopping_criterion = False
        has_stopping_criterion |= (self._max_iterations is not None)
        has_stopping_criterion |= (self._unchanged_max_iterations is not None)
        has_stopping_criterion |= (self._max_evaluations is not None)
        has_stopping_criterion |= (self._threshold is not None)
        if not has_stopping_criterion:
            raise ValueError('At least one stopping criterion must be set.')

        # Iterations and function evaluations
        iteration = 0
        evaluations = 0

        # Unchanged iterations count (used for stopping or just for
        # information)
        unchanged_iterations = 0

        # Choose method to evaluate
        f = self._function
        if self._needs_sensitivities:
            f = f.evaluateS1

        # Create evaluator object
        if self._parallel:
            # Get number of workers
            n_workers = self._n_workers

            # For population based optimisers, don't use more workers than
            # particles!
            if isinstance(self._optimiser, PopulationBasedOptimiser):
                n_workers = min(n_workers, self._optimiser.population_size())
            evaluator = pints.ParallelEvaluator(f, n_workers=n_workers)
        else:
            evaluator = pints.SequentialEvaluator(f)

        # Keep track of current best and best-guess scores.
        fb = fg = float('inf')

        # Internally we always minimise! Keep a 2nd value to show the user.
        fb_user, fg_user = (fb, fg) if self._minimising else (-fb, -fg)

        # Keep track of the last significant change
        f_sig = float('inf')

        # Set up progress reporting
        next_message = 0

        # Start logging
        logging = self._log_to_screen or self._log_filename
        if logging:
            if self._log_to_screen:
                # Show direction
                if self._minimising:
                    print('Minimising error measure')
                else:
                    print('Maximising LogPDF')

                # Show method
                print('Using ' + str(self._optimiser.name()))

                # Show parallelisation
                if self._parallel:
                    print('Running in parallel with ' + str(n_workers) +
                          ' worker processes.')
                else:
                    print('Running in sequential mode.')

            # Show population size
            pop_size = 1
            if isinstance(self._optimiser, PopulationBasedOptimiser):
                pop_size = self._optimiser.population_size()
                if self._log_to_screen:
                    print('Population size: ' + str(pop_size))

            # Set up logger
            logger = pints.Logger()
            if not self._log_to_screen:
                logger.set_stream(None)
            if self._log_filename:
                logger.set_filename(self._log_filename, csv=self._log_csv)

            # Add fields to log
            max_iter_guess = max(self._max_iterations or 0, 10000)
            max_eval_guess = max(
                self._max_evaluations or 0, max_iter_guess * pop_size)
            logger.add_counter('Iter.', max_value=max_iter_guess)
            logger.add_counter('Eval.', max_value=max_eval_guess)
            logger.add_float('Best')
            logger.add_float('Current')
            self._optimiser._log_init(logger)
            logger.add_time('Time m:s')

        # Start searching
        timer = pints.Timer()
        running = True
        try:
            while running:
                # Get points
                xs = self._optimiser.ask()

                # Calculate scores
                fs = evaluator.evaluate(xs)

                # Perform iteration
                self._optimiser.tell(fs)

                # Update current scores
                fb = self._optimiser.f_best()
                fg = self._optimiser.f_guessed()
                fb_user, fg_user = (fb, fg) if self._minimising else (-fb, -fg)

                # Check for significant changes
                f_new = fg if self._use_f_guessed else fb
                if np.abs(f_new - f_sig) >= self._unchanged_threshold:
                    unchanged_iterations = 0
                    f_sig = f_new
                else:
                    unchanged_iterations += 1

                # Update evaluation count
                evaluations += len(fs)

                # Show progress
                if logging and iteration >= next_message:
                    # Log state
                    logger.log(iteration, evaluations, fb_user, fg_user)
                    self._optimiser._log_write(logger)
                    logger.log(timer.time())

                    # Choose next logging point
                    if iteration < self._message_warm_up:
                        next_message = iteration + 1
                    else:
                        next_message = self._message_interval * (
                            1 + iteration // self._message_interval)

                # Update iteration count
                iteration += 1

                #
                # Check stopping criteria
                #

                # Maximum number of iterations
                if (self._max_iterations is not None and
                        iteration >= self._max_iterations):
                    running = False
                    halt_message = ('Maximum number of iterations ('
                                    + str(iteration) + ') reached.')

                # Maximum number of iterations without significant change
                halt = (self._unchanged_max_iterations is not None and
                        unchanged_iterations >= self._unchanged_max_iterations)
                if running and halt:
                    running = False
                    halt_message = ('No significant change for ' +
                                    str(unchanged_iterations) + ' iterations.')

                # Maximum number of evaluations
                if (self._max_evaluations is not None and
                        evaluations >= self._max_evaluations):
                    running = False
                    halt_message = (
                        'Maximum number of evaluations ('
                        + str(self._max_evaluations) + ') reached.')

                # Threshold value
                halt = (self._threshold is not None
                        and f_new < self._threshold)
                if running and halt:
                    running = False
                    halt_message = ('Objective function crossed threshold: '
                                    + str(self._threshold) + '.')

                # Error in optimiser
                error = self._optimiser.stop()
                if error:   # pragma: no cover
                    running = False
                    halt_message = str(error)

                elif self._callback is not None:
                    self._callback(iteration - 1, self._optimiser)

        except (Exception, SystemExit, KeyboardInterrupt):  # pragma: no cover
            # Unexpected end!
            # Show last result and exit
            print('\n' + '-' * 40)
            print('Unexpected termination.')
            print('Current score: ' + str(fg_user))
            print('Current position:')

            # Show current parameters
            x_user = self._optimiser.x_guessed()
            if self._transformation is not None:
                x_user = self._transformation.to_model(x_user)
            for p in x_user:
                print(pints.strfloat(p))
            print('-' * 40)
            raise

        # Stop timer
        self._time = timer.time()

        # Log final values and show halt message
        if logging:
            if iteration - 1 < next_message:
                logger.log(iteration, evaluations, fb_user, fg_user)
                self._optimiser._log_write(logger)
                logger.log(self._time)
            if self._log_to_screen:
                print('Halting: ' + halt_message)

        # Save post-run statistics
        self._evaluations = evaluations
        self._iterations = iteration

        # Get best parameters
        if self._use_f_guessed:
            x = self._optimiser.x_guessed()
            f = self._optimiser.f_guessed()
        else:
            x = self._optimiser.x_best()
            f = self._optimiser.f_best()

        # Inverse transform search parameters
        if self._transformation is not None:
            x = self._transformation.to_model(x)

        # Return best position and score
        return x, f if self._minimising else -f
Ejemplo n.º 17
0
    def run(self):
        """
        Runs the MCMC sampler(s) and returns the result.

        By default, this method returns an array of shape ``(n_chains,
        n_iterations, n_parameters)``.
        If storing chains to memory has been disabled with
        :meth:`set_chain_storage`, then ``None`` is returned instead.
        """
        # Check stopping criteria
        has_stopping_criterion = False
        has_stopping_criterion |= (self._max_iterations is not None)
        if not has_stopping_criterion:
            raise ValueError('At least one stopping criterion must be set.')

        # Iteration and evaluation counting
        iteration = 0
        n_evaluations = 0

        # Choose method to evaluate
        f = self._log_pdf
        if self._needs_sensitivities:
            f = f.evaluateS1

        # Create evaluator object
        if self._parallel:
            # Use at most n_workers workers
            n_workers = min(self._n_workers, self._n_chains)
            evaluator = pints.ParallelEvaluator(f, n_workers=n_workers)
        else:
            evaluator = pints.SequentialEvaluator(f)

        # Initial phase
        if self._needs_initial_phase:
            for sampler in self._samplers:
                sampler.set_initial_phase(True)

        # Storing evaluations to memory or disk
        prior = None
        store_evaluations = \
            self._evaluations_in_memory or self._evaluation_files
        if store_evaluations:
            # Bayesian inference on a log-posterior? Then separate out the
            # prior so we can calculate the loglikelihood
            if isinstance(self._log_pdf, pints.LogPosterior):
                prior = self._log_pdf.log_prior()

            # Store last accepted logpdf, per chain
            current_logpdf = np.zeros(self._n_chains)
            current_prior = np.zeros(self._n_chains)

        # Write chains to disk
        chain_loggers = []
        if self._chain_files:
            for filename in self._chain_files:
                cl = pints.Logger()
                cl.set_stream(None)
                cl.set_filename(filename, True)
                for k in range(self._n_parameters):
                    cl.add_float('p' + str(k))
                chain_loggers.append(cl)

        # Write evaluations to disk
        eval_loggers = []
        if self._evaluation_files:
            for filename in self._evaluation_files:
                cl = pints.Logger()
                cl.set_stream(None)
                cl.set_filename(filename, True)
                if prior:
                    # Logposterior in first column, to be consistent with the
                    # non-bayesian case
                    cl.add_float('logposterior')
                    cl.add_float('loglikelihood')
                    cl.add_float('logprior')
                else:
                    cl.add_float('logpdf')
                eval_loggers.append(cl)

        # Set up progress reporting
        next_message = 0

        # Start logging
        logging = self._log_to_screen or self._log_filename
        if logging:
            if self._log_to_screen:
                print('Using ' + str(self._samplers[0].name()))
                print('Generating ' + str(self._n_chains) + ' chains.')
                if self._parallel:
                    print('Running in parallel with ' + str(n_workers) +
                          ' worker processess.')
                else:
                    print('Running in sequential mode.')
                if self._chain_files:
                    print('Writing chains to ' + self._chain_files[0] +
                          ' etc.')
                if self._evaluation_files:
                    print('Writing evaluations to ' +
                          self._evaluation_files[0] + ' etc.')

            # Set up logger
            logger = pints.Logger()
            if not self._log_to_screen:
                logger.set_stream(None)
            if self._log_filename:
                logger.set_filename(self._log_filename, csv=self._log_csv)

            # Add fields to log
            max_iter_guess = max(self._max_iterations or 0, 10000)
            max_eval_guess = max_iter_guess * self._n_chains
            logger.add_counter('Iter.', max_value=max_iter_guess)
            logger.add_counter('Eval.', max_value=max_eval_guess)
            for sampler in self._samplers:
                sampler._log_init(logger)
            logger.add_time('Time m:s')

        # Pre-allocate arrays for chain storage
        if self._chains_in_memory:
            # Store full chains
            samples = np.zeros(
                (self._n_chains, self._max_iterations, self._n_parameters))
        else:
            # Store only the current iteration
            samples = np.zeros((self._n_chains, self._n_parameters))

        # Pre-allocate arrays for evaluation storage
        if self._evaluations_in_memory:
            if prior:
                # Store posterior, likelihood, prior
                evaluations = np.zeros(
                    (self._n_chains, self._max_iterations, 3))
            else:
                # Store pdf
                evaluations = np.zeros((self._n_chains, self._max_iterations))

        # Some samplers need intermediate steps, where None is returned instead
        # of a sample. Samplers can run asynchronously, so that one returns
        # None while another returns a sample.
        # To deal with this, we maintain a list of 'active' samplers that have
        # not reach `max_iterations` yet, and store the number of samples we
        # have in each chain.
        if self._single_chain:
            active = list(range(self._n_chains))
            n_samples = [0] * self._n_chains

        # Start sampling
        timer = pints.Timer()
        running = True
        while running:
            # Initial phase
            # Note: self._initial_phase_iterations is None when no initial
            # phase is needed
            if iteration == self._initial_phase_iterations:
                for sampler in self._samplers:
                    sampler.set_initial_phase(False)
                if self._log_to_screen:
                    print('Initial phase completed.')

            # Get points
            if self._single_chain:
                xs = [self._samplers[i].ask() for i in active]
            else:
                xs = self._samplers[0].ask()

            # Calculate logpdfs
            fxs = evaluator.evaluate(xs)

            # Update evaluation count
            n_evaluations += len(fxs)

            # Update chains
            if self._single_chain:
                # Single chain

                # Check and update the individual chains
                xs_iterator = iter(xs)
                fxs_iterator = iter(fxs)
                for i in list(active):  # new list: active may be modified
                    x = next(xs_iterator)
                    fx = next(fxs_iterator)
                    y = self._samplers[i].tell(fx)

                    if y is not None:
                        # Store sample in memory
                        if self._chains_in_memory:
                            samples[i][n_samples[i]] = y
                        else:
                            samples[i] = y

                        # Update current evaluations
                        if store_evaluations:
                            # Check if accepted, if so, update log_pdf and
                            # prior to be logged
                            accepted = np.all(y == x)
                            if accepted:
                                current_logpdf[i] = fx
                                if prior is not None:
                                    current_prior[i] = prior(y)

                            # Calculate evaluations to log
                            e = current_logpdf[i]
                            if prior is not None:
                                e = [
                                    e, current_logpdf[i] - current_prior[i],
                                    current_prior[i]
                                ]

                        # Store evaluations in memory
                        if self._evaluations_in_memory:
                            evaluations[i][n_samples[i]] = e

                        # Write evaluations to disk
                        if self._evaluation_files:
                            if prior is None:
                                eval_loggers[i].log(e)
                            else:
                                eval_loggers[i].log(*e)

                        # Stop adding samples if maximum number reached
                        n_samples[i] += 1
                        if n_samples[i] == self._max_iterations:
                            active.remove(i)

                # This is an intermediate step until the slowest sampler has
                # produced a new sample since the last `iteration`.
                intermediate_step = min(n_samples) <= iteration

            else:
                # Multi-chain methods

                # Get all chains samples at once
                ys = self._samplers[0].tell(fxs)
                intermediate_step = ys is None

                if not intermediate_step:
                    # Store samples in memory
                    if self._chains_in_memory:
                        samples[:, iteration] = ys
                    else:
                        samples = ys

                    # Update current evaluations
                    if store_evaluations:
                        es = []
                        for i, y in enumerate(ys):
                            # Check if accepted, if so, update log_pdf and
                            # prior to be logged
                            accepted = np.all(xs[i] == y)
                            if accepted:
                                current_logpdf[i] = fxs[i]
                                if prior is not None:
                                    current_prior[i] = prior(ys[i])

                            # Calculate evaluations to log
                            e = current_logpdf[i]
                            if prior is not None:
                                e = [
                                    e, current_logpdf[i] - current_prior[i],
                                    current_prior[i]
                                ]
                            es.append(e)

                    # Write evaluations to memory
                    if self._evaluations_in_memory:
                        for i, e in enumerate(es):
                            evaluations[i, iteration] = e

                    # Write evaluations to disk
                    if self._evaluation_files:
                        if prior is None:
                            for i, eval_logger in enumerate(eval_loggers):
                                eval_logger.log(es[i])
                        else:
                            for i, eval_logger in enumerate(eval_loggers):
                                eval_logger.log(*es[i])

            # If no new samples were added, then no MCMC iteration was
            # performed, and so the iteration count shouldn't be updated,
            # logging shouldn't be triggered, and stopping criteria shouldn't
            # be checked
            if intermediate_step:
                continue

            # Write samples to disk
            if self._chains_in_memory:
                for i, chain_logger in enumerate(chain_loggers):
                    chain_logger.log(*samples[i][iteration])
            else:
                for i, chain_logger in enumerate(chain_loggers):
                    chain_logger.log(*samples[i])

            # Show progress
            if logging and iteration >= next_message:
                # Log state
                logger.log(iteration, n_evaluations)
                for sampler in self._samplers:
                    sampler._log_write(logger)
                logger.log(timer.time())

                # Choose next logging point
                if iteration < self._message_warm_up:
                    next_message = iteration + 1
                else:
                    next_message = self._message_interval * (
                        1 + iteration // self._message_interval)

            # Update iteration count
            iteration += 1

            # Check requested number of samples
            if (self._max_iterations is not None
                    and iteration >= self._max_iterations):
                running = False
                halt_message = ('Halting: Maximum number of iterations (' +
                                str(iteration) + ') reached.')

        # Log final state and show halt message
        if logging:
            logger.log(iteration, n_evaluations)
            for sampler in self._samplers:
                sampler._log_write(logger)
            logger.log(timer.time())
            if self._log_to_screen:
                print(halt_message)

        # Store generated chains in memory
        if self._chains_in_memory:
            self._samples = samples

        # Store evaluations in memory
        if self._evaluations_in_memory:
            self._evaluations = evaluations

        # Return generated chains
        return samples if self._chains_in_memory else None
Ejemplo n.º 18
0
    def run(self, returnLL=False):
        """
        Runs the MCMC sampler(s) and returns a number of markov chains, each
        representing the distribution of the given log-pdf.
        """
        # Check stopping criteria
        has_stopping_criterion = False
        has_stopping_criterion |= (self._max_iterations is not None)
        if not has_stopping_criterion:
            raise ValueError('At least one stopping criterion must be set.')

        # Iteration and evaluation counting
        iteration = 0
        evaluations = 0

        # Create evaluator object
        if self._parallel:
            # Use at most n_workers workers
            n_workers = min(self._n_workers, self._chains)
            evaluator = pints.ParallelEvaluator(self._log_pdf,
                                                n_workers=n_workers)
        else:
            evaluator = pints.SequentialEvaluator(self._log_pdf)

        # Initial phase
        if self._needs_initial_phase:
            for sampler in self._samplers:
                sampler.set_initial_phase(True)

        # Set up progress reporting
        next_message = 0

        # Start logging
        logging = self._log_to_screen or self._log_filename
        if logging:
            if self._log_to_screen:
                print('Using ' + str(self._samplers[0].name()))
                print('Generating ' + str(self._chains) + ' chains.')
                if self._parallel:
                    print('Running in parallel with ' + str(n_workers) +
                          ' worker processess.')
                else:
                    print('Running in sequential mode.')

            # Set up logger
            logger = pints.Logger()
            if not self._log_to_screen:
                logger.set_stream(None)
            if self._log_filename:
                logger.set_filename(self._log_filename, csv=self._log_csv)

            # Add fields to log
            max_iter_guess = max(self._max_iterations or 0, 10000)
            max_eval_guess = max_iter_guess * self._chains
            logger.add_counter('Iter.', max_value=max_iter_guess)
            logger.add_counter('Eval.', max_value=max_eval_guess)
            for sampler in self._samplers:
                sampler._log_init(logger)
            logger.add_time('Time m:s')

        # Create chains
        # TODO Pre-allocate?
        # TODO Thinning
        # TODO Advanced logging
        LLs = []
        chains = []

        # Start sampling
        timer = pints.Timer()
        running = True
        while running:
            # Initial phase
            if (self._needs_initial_phase
                    and iteration == self._initial_phase_iterations):
                for sampler in self._samplers:
                    sampler.set_initial_phase(False)
                if self._log_to_screen:
                    print('Initial phase completed.')

            # Get points
            if self._single_chain:
                xs = [sampler.ask() for sampler in self._samplers]
            else:
                xs = self._samplers[0].ask()

            # Calculate scores
            fxs = evaluator.evaluate(xs)

            # Perform iteration(s)
            if self._single_chain:
                samples = np.array(
                    [s.tell(fxs[i]) for i, s in enumerate(self._samplers)])
            else:
                samples = self._samplers[0].tell(fxs)
            if returnLL:
                LLs.append(evaluator.evaluate(samples))
            chains.append(samples)

            # Update evaluation count
            evaluations += len(fxs)

            # Show progress
            if logging and iteration >= next_message:
                # Log state
                logger.log(iteration, evaluations)
                for sampler in self._samplers:
                    sampler._log_write(logger)
                logger.log(timer.time())

                # Choose next logging point
                if iteration < self._message_warm_up:
                    next_message = iteration + 1
                else:
                    next_message = self._message_rate * (
                        1 + iteration // self._message_rate)

            # Update iteration count
            iteration += 1

            #
            # Check stopping criteria
            #

            # Maximum number of iterations
            if (self._max_iterations is not None
                    and iteration >= self._max_iterations):
                running = False
                halt_message = ('Halting: Maximum number of iterations (' +
                                str(iteration) + ') reached.')

            # TODO Add more stopping criteria

        # Log final state and show halt message
        if logging:
            logger.log(iteration, evaluations)
            for sampler in self._samplers:
                sampler._log_write(logger)
            logger.log(timer.time())
            if self._log_to_screen:
                print(halt_message)

        # Swap axes in chains, to get indices
        #  [chain, iteration, parameter]
        chains = np.array(chains)
        chains = chains.swapaxes(0, 1)
        if returnLL:
            LLs = np.array(LLs)
            #LLs = LLs.swapaxes(0, 1)
            return chains, LLs

        # Return generated chains
        return chains
Ejemplo n.º 19
0
    def run(self):
        """
        Runs the optimisation, returns a tuple ``(xbest, fbest)``.
        """
        # Check stopping criteria
        has_stopping_criterion = False
        has_stopping_criterion |= (self._max_iterations is not None)
        has_stopping_criterion |= (self._max_unchanged_iterations is not None)
        has_stopping_criterion |= (self._threshold is not None)
        if not has_stopping_criterion:
            raise ValueError('At least one stopping criterion must be set.')

        # Iterations and function evaluations
        iteration = 0
        evaluations = 0

        # Unchanged iterations count (used for stopping or just for
        # information)
        unchanged_iterations = 0

        # Create evaluator object
        if self._parallel:
            # Get number of workers
            n_workers = self._n_workers

            # For population based optimisers, don't use more workers than
            # particles!
            if isinstance(self._optimiser, PopulationBasedOptimiser):
                n_workers = min(n_workers, self._optimiser.population_size())
            evaluator = pints.ParallelEvaluator(self._function,
                                                n_workers=n_workers)
        else:
            evaluator = pints.SequentialEvaluator(self._function)

        # Keep track of best position and score
        fbest = float('inf')

        # Internally we always minimise! Keep a 2nd value to show the user
        fbest_user = fbest if self._minimising else -fbest

        # Set up progress reporting
        next_message = 0

        # Start logging
        logging = self._log_to_screen or self._log_filename
        if logging:
            if self._log_to_screen:
                # Show direction
                if self._minimising:
                    print('Minimising error measure')
                else:
                    print('Maximising LogPDF')

                # Show method
                print('Using ' + str(self._optimiser.name()))

                # Show parallelisation
                if self._parallel:
                    print('Running in parallel with ' + str(n_workers) +
                          ' worker processes.')
                else:
                    print('Running in sequential mode.')

            # Show population size
            pop_size = 1
            if isinstance(self._optimiser, PopulationBasedOptimiser):
                pop_size = self._optimiser.population_size()
                if self._log_to_screen:
                    print('Population size: ' + str(pop_size))

            # Set up logger
            logger = pints.Logger()
            if not self._log_to_screen:
                logger.set_stream(None)
            if self._log_filename:
                logger.set_filename(self._log_filename, csv=self._log_csv)

            # Add fields to log
            max_iter_guess = max(self._max_iterations or 0, 10000)
            max_eval_guess = max_iter_guess * pop_size
            logger.add_counter('Iter.', max_value=max_iter_guess)
            logger.add_counter('Eval.', max_value=max_eval_guess)
            logger.add_float('Best')
            self._optimiser._log_init(logger)
            logger.add_time('Time m:s')

        # Start searching
        timer = pints.Timer()
        running = True
        try:
            while running:
                # Get points
                xs = self._optimiser.ask()

                # Calculate scores
                fs = evaluator.evaluate(xs)

                # Perform iteration
                self._optimiser.tell(fs)

                # Check if new best found
                fnew = self._optimiser.fbest()
                if fnew < fbest:
                    # Check if this counts as a significant change
                    if np.abs(fnew - fbest) < self._min_significant_change:
                        unchanged_iterations += 1
                    else:
                        unchanged_iterations = 0

                    # Update best
                    fbest = fnew

                    # Update user value of fbest
                    fbest_user = fbest if self._minimising else -fbest
                else:
                    unchanged_iterations += 1

                # Update evaluation count
                evaluations += len(fs)

                # Show progress
                if logging and iteration >= next_message:
                    # Log state
                    logger.log(iteration, evaluations, fbest_user)
                    self._optimiser._log_write(logger)
                    logger.log(timer.time())

                    # Choose next logging point
                    if iteration < self._message_warm_up:
                        next_message = iteration + 1
                    else:
                        next_message = self._message_interval * (
                            1 + iteration // self._message_interval)

                # Update iteration count
                iteration += 1

                #
                # Check stopping criteria
                #

                # Maximum number of iterations
                if (self._max_iterations is not None
                        and iteration >= self._max_iterations):
                    running = False
                    halt_message = ('Halting: Maximum number of iterations (' +
                                    str(iteration) + ') reached.')

                # Maximum number of iterations without significant change
                halt = (self._max_unchanged_iterations is not None and
                        unchanged_iterations >= self._max_unchanged_iterations)
                if halt:
                    running = False
                    halt_message = ('Halting: No significant change for ' +
                                    str(unchanged_iterations) + ' iterations.')

                # Threshold value
                if self._threshold is not None and fbest < self._threshold:
                    running = False
                    halt_message = ('Halting: Objective function crossed'
                                    ' threshold: ' + str(self._threshold) +
                                    '.')

                # Error in optimiser
                error = self._optimiser.stop()
                if error:  # pragma: no cover
                    running = False
                    halt_message = ('Halting: ' + str(error))

        except (Exception, SystemExit, KeyboardInterrupt):  # pragma: no cover
            # Unexpected end!
            # Show last result and exit
            print('\n' + '-' * 40)
            print('Unexpected termination.')
            print('Current best score: ' + str(fbest))
            print('Current best position:')
            for p in self._optimiser.xbest():
                print(pints.strfloat(p))
            print('-' * 40)
            raise
        time_taken = timer.time()

        # Log final values and show halt message
        if logging:
            logger.log(iteration, evaluations, fbest_user)
            self._optimiser._log_write(logger)
            logger.log(time_taken)
            if self._log_to_screen:
                print(halt_message)

        # Save post-run statistics
        self._evaluations = evaluations
        self._iterations = iteration
        self._time = time_taken

        # Return best position and score
        return self._optimiser.xbest(), fbest_user
Ejemplo n.º 20
0
    def test_logger(self):
        # Normal use, all data at once
        with StreamCapture() as c:
            # Test logger with no fields
            log = pints.Logger()
            self.assertRaises(ValueError, log.log, 1)

            # Test logging output
            log.add_counter('#', width=2)
            log.add_float('Latitude', width=1)
            log.add_long_float('Number')
            log.add_int('Val', width=4)
            log.add_counter('Count', max_value=12345)
            log.add_time('Time')
            log.add_string('Q', 3)

            # Add all data in one go
            log.log(*data)
        self.assertOutput(expected=out1, returned=c.text())

        # Can't configure once logging
        self.assertRaises(RuntimeError, log.add_counter, 'a')
        self.assertRaises(RuntimeError, log.add_int, 'a')
        self.assertRaises(RuntimeError, log.add_float, 'a')
        self.assertRaises(RuntimeError, log.add_long_float, 'a')
        self.assertRaises(RuntimeError, log.add_time, 'a')
        self.assertRaises(RuntimeError, log.add_string, 'a', 3)
        self.assertRaises(RuntimeError, log.set_filename, 'a')
        self.assertRaises(RuntimeError, log.set_stream, sys.stdout)

        # Normal use, all data at once, plus extra bit
        with StreamCapture() as c:
            log = pints.Logger()
            log.add_counter('#', width=2)
            log.add_float('Latitude', width=1)
            log.add_long_float('Number')
            log.add_int('Val', width=4)
            log.add_counter('Count', max_value=12345)
            log.add_time('Time')
            log.add_string('Q', 3)

            log.log(*data)
            log.log(1, 2, 3)  # not enough for more output!
        self.assertOutput(expected=out1, returned=c.text())

        # Normal use, data row by row
        with StreamCapture() as c:
            log = pints.Logger()
            log.add_counter('#', width=2)
            log.add_float('Latitude', width=1)
            log.add_long_float('Number')
            log.add_int('Val', width=4)
            log.add_counter('Count', max_value=12345)
            log.add_time('Time')
            log.add_string('Q', 3)

            # Add data row by row
            n = 7
            for i in range(len(data) // n):
                log.log(*data[i * n:(i + 1) * n])
        self.assertOutput(expected=out1, returned=c.text())

        # Normal use, data field by field
        with StreamCapture() as c:
            log = pints.Logger()
            log.add_counter('#', width=2)
            log.add_float('Latitude', width=1)
            log.add_long_float('Number')
            log.add_int('Val', width=4)
            log.add_counter('Count', max_value=12345)
            log.add_time('Time')
            log.add_string('Q', 3)

            # Add data cell by cell
            for d in data:
                log.log(d)
        self.assertOutput(expected=out1, returned=c.text())

        # Log in different sized chunks
        order = [3, 2, 1, 1, 4, 6, 3, 2, 6]
        self.assertEqual(sum(order), len(data))
        with StreamCapture() as c:
            log = pints.Logger()
            log.add_counter('#', width=2)
            log.add_float('Latitude', width=1)
            log.add_long_float('Number')
            log.add_int('Val', width=4)
            log.add_counter('Count', max_value=12345)
            log.add_time('Time')
            log.add_string('Q', 3)

            # Add data in different sized chunks
            offset = 0
            for n in order:
                log.log(*data[offset:offset + n])
                offset += n
        self.assertOutput(expected=out1, returned=c.text())

        # Log with file-only fields, and shorter name
        with StreamCapture() as c:
            log = pints.Logger()
            log.add_counter('#', width=2)
            log.add_float('Lat.', width=1)
            log.add_long_float('Number', file_only=True)
            log.add_int('Val', width=4)
            log.add_counter('Count', max_value=12345)
            log.add_time('Time')
            log.add_string('Q', 3)
            log.log(*data)
        self.assertOutput(expected=out2, returned=c.text())

        # Log with file-only fields, and shorter name, and file
        with StreamCapture() as c:
            with TemporaryDirectory() as d:
                filename = d.path('test.txt')
                log = pints.Logger()
                log.set_filename(filename)
                log.add_counter('#', width=2)
                log.add_float('Lat.', width=1)
                log.add_long_float('Number', file_only=True)
                log.add_int('Val', width=4)
                log.add_counter('Count', max_value=12345)
                log.add_time('Time')
                log.add_string('Q', 3)
                log.log(*data)
                with open(filename, 'r') as f:
                    out = f.read()
        self.assertOutput(expected=out2, returned=c.text())
        self.assertOutput(expected=out3, returned=out)

        # Repeat in csv mode
        with StreamCapture() as c:
            with TemporaryDirectory() as d:
                filename = d.path('test.csv')
                log = pints.Logger()
                log.set_filename(filename, csv=True)
                log.add_counter('#', width=2)
                log.add_float('Lat.', width=1)
                log.add_long_float('Number', file_only=True)
                log.add_int('Val', width=4)
                log.add_counter('Count', max_value=12345)
                log.add_time('Time')
                log.add_string('Q', 3)
                log.log(*data)
                with open(filename, 'r') as f:
                    out = f.read()
        self.assertOutput(expected=out2, returned=c.text())
        self.assertOutput(expected=out4, returned=out)

        # Repeat without screen output
        with StreamCapture() as c:
            with TemporaryDirectory() as d:
                filename = d.path('test.csv')
                log = pints.Logger()
                log.set_filename(filename, csv=True)
                log.set_stream(None)
                log.add_counter('#', width=2)
                log.add_float('Lat.', width=1)
                log.add_long_float('Number', file_only=True)
                log.add_int('Val', width=4)
                log.add_counter('Count', max_value=12345)
                log.add_time('Time')
                log.add_string('Q', 3)
                log.log(*data)
                with open(filename, 'r') as f:
                    out = f.read()
        self.assertOutput(expected='', returned=c.text())
        self.assertOutput(expected=out4, returned=out)

        # Repeat without screen output, outside of csv mode
        with StreamCapture() as c:
            with TemporaryDirectory() as d:
                filename = d.path('test.csv')
                log = pints.Logger()
                log.set_filename(filename, csv=False)
                log.set_stream(None)
                log.add_counter('#', width=2)
                log.add_float('Lat.', width=1)
                log.add_long_float('Number', file_only=True)
                log.add_int('Val', width=4)
                log.add_counter('Count', max_value=12345)
                log.add_time('Time')
                log.add_string('Q', 3)
                log.log(*data)
                with open(filename, 'r') as f:
                    out = f.read()
        self.assertOutput(expected='', returned=c.text())
        self.assertOutput(expected=out3, returned=out)

        # Unset file output
        with StreamCapture() as c:
            with TemporaryDirectory() as d:
                filename = d.path('test.csv')
                log = pints.Logger()
                log.set_filename(filename, csv=False)
                log.set_filename(None)
                log.set_stream(None)
                log.add_counter('#', width=2)
                log.log(1)
                self.assertFalse(os.path.isfile(filename))
        self.assertOutput(expected='', returned=c.text())
        self.assertOutput(expected=out3, returned=out)

        # Repeat without any output
        with StreamCapture() as c:
            log = pints.Logger()
            log.set_stream(None)
            log.add_counter('#', width=2)
            log.add_float('Lat.', width=1)
            log.add_long_float('Number', file_only=True)
            log.add_int('Val', width=4)
            log.add_counter('Count', max_value=12345)
            log.add_time('Time')
            log.add_string('Q', 3)
            log.log(*data)
        self.assertOutput(expected='', returned=c.text())

        # Repeat on stderr
        with StreamCapture(stdout=True, stderr=True) as c:
            with TemporaryDirectory() as d:
                filename = d.path('test.csv')
                log = pints.Logger()
                log.set_filename(filename, csv=False)
                log.set_stream(sys.stderr)
                log.add_counter('#', width=2)
                log.add_float('Lat.', width=1)
                log.add_long_float('Number', file_only=True)
                log.add_int('Val', width=4)
                log.add_counter('Count', max_value=12345)
                log.add_time('Time')
                log.add_string('Q', 3)
                log.log(*data)
                with open(filename, 'r') as f:
                    out = f.read()
        self.assertOutput(expected='', returned=c.text()[0])
        self.assertOutput(expected=out2, returned=c.text()[1])
        self.assertOutput(expected=out3, returned=out)