def test_logging(self): """ Tests logging for PSO and other optimisers. """ # Use a LogPDF to test if it shows the maximising message! r = pints.toy.TwistedGaussianLogPDF(2, 0.01) x = np.array([0, 1.01]) b = pints.RectangularBoundaries([-0.01, 0.95], [0.01, 1.05]) s = 0.01 opt = pints.OptimisationController(r, x, s, b, method) # No logging opt = pints.OptimisationController(r, x, s, b, method) opt.set_max_iterations(10) opt.set_log_to_screen(False) opt.set_log_to_file(False) with StreamCapture() as c: opt.run() self.assertEqual(c.text(), '') # Log to screen - using a LogPDF and parallelisation opt = pints.OptimisationController(r, x, s, b, method) opt.set_parallel(2) opt.set_max_iterations(10) opt.set_log_to_screen(True) opt.set_log_to_file(False) with StreamCapture() as c: opt.run() lines = c.text().splitlines() self.assertEqual(len(lines), 11) self.assertEqual(lines[0], 'Maximising LogPDF') self.assertEqual(lines[1], 'Using Particle Swarm Optimisation (PSO)') self.assertEqual(lines[2], 'Running in parallel with 2 worker processes.') self.assertEqual(lines[3], 'Population size: 6') self.assertEqual(lines[4], 'Iter. Eval. Best Time m:s') pint = '[0-9]+[ ]+' pflt = '[0-9.-]+[ ]+' ptim = '[0-9]{1}:[0-9]{2}.[0-9]{1}' pattern = re.compile(pint * 2 + pflt + ptim) for line in lines[5:-1]: self.assertTrue(pattern.match(line)) self.assertEqual( lines[-1], 'Halting: Maximum number of iterations (10) reached.') # Log to file opt = pints.OptimisationController(r, x, s, b, method=method) opt.set_max_iterations(10) with StreamCapture() as c: with TemporaryDirectory() as d: filename = d.path('test.txt') opt.set_log_to_screen(False) opt.set_log_to_file(filename) opt.run() with open(filename, 'r') as f: lines = f.read().splitlines() self.assertEqual(c.text(), '') self.assertEqual(len(lines), 6) self.assertEqual( lines[0], 'Iter. Eval. Best f0 f1 f2 f3 ' ' f4 f5 Time m:s') pattern = re.compile(pint * 2 + pflt * 7 + ptim) for line in lines[1:]: self.assertTrue(pattern.match(line))
def test_logging(self): # Test with logpdf r = pints.toy.TwistedGaussianLogPDF(2, 0.01) x = np.array([0, 1.01]) b = pints.RectangularBoundaries([-0.01, 0.95], [0.01, 1.05]) s = 0.01 opt = pints.OptimisationController(r, x, s, b, method=method) opt.set_log_to_screen(True) opt.set_max_unchanged_iterations(None) opt.set_log_interval(3) opt.set_max_iterations(10) with StreamCapture() as c: opt.run() lines = c.text().splitlines() self.assertEqual(lines[0], 'Maximising LogPDF') self.assertEqual( lines[1], 'Using Exponential Natural Evolution Strategy (xNES)') self.assertEqual(lines[2], 'Running in sequential mode.') self.assertEqual(lines[3], 'Population size: 6') self.assertEqual(lines[4], 'Iter. Eval. Best Time m:s') self.assertEqual(lines[5][:-3], '0 6 -4.140462 0:0') self.assertEqual(lines[6][:-3], '1 12 -4.140462 0:0') self.assertEqual(lines[7][:-3], '2 18 -4.140462 0:0') self.assertEqual(lines[8][:-3], '3 24 -4.140462 0:0') self.assertEqual(lines[9][:-3], '6 42 -4.140462 0:0') self.assertEqual(lines[10][:-3], '9 60 -4.140462 0:0') self.assertEqual(lines[11][:-3], '10 60 -4.140462 0:0') self.assertEqual( lines[12], 'Halting: Maximum number of iterations (10) reached.') # Invalid log interval self.assertRaises(ValueError, opt.set_log_interval, 0) # Test with error measure r = pints.toy.RosenbrockError() x = np.array([1.01, 1.01]) opt = pints.OptimisationController(r, x, method=pints.SNES) opt.set_log_to_screen(True) opt.set_max_unchanged_iterations(None) opt.set_log_interval(4) opt.set_max_iterations(11) opt.optimiser().set_population_size(4) with StreamCapture() as c: opt.run() lines = c.text().splitlines() self.assertEqual(lines[0], 'Minimising error measure') self.assertEqual(lines[1], 'Using Seperable Natural Evolution Strategy (SNES)') self.assertEqual(lines[2], 'Running in sequential mode.') self.assertEqual(lines[3], 'Population size: 4') self.assertEqual(lines[4], 'Iter. Eval. Best Time m:s') self.assertEqual(lines[5][:-3], '0 4 6.471867 0:0') self.assertEqual(lines[6][:-3], '1 8 6.471867 0:0') self.assertEqual(lines[7][:-3], '2 12 0.0949 0:0') self.assertEqual(lines[8][:-3], '3 16 0.0949 0:0') self.assertEqual(lines[9][:-3], '4 20 0.0949 0:0') self.assertEqual(lines[10][:-3], '8 36 0.0165 0:0') self.assertEqual(lines[11][:-3], '11 44 0.0165 0:0') self.assertEqual( lines[12], 'Halting: Maximum number of iterations (11) reached.') # Invalid log interval self.assertRaises(ValueError, opt.set_log_interval, 0)
def test_rosenbrock(self): # Tests the actions of the optimiser against a stored result r = pints.toy.RosenbrockError() x0 = [-0.75, 3.5] opt = pints.OptimisationController(r, x0, method=method) opt.set_log_to_screen(True) with StreamCapture() as c: x, f = opt.run() log = c.text() self.assertTrue(np.all(x == np.array([1, 1]))) self.assertEqual(f, 0) exp_lines = ( 'Minimising error measure', 'Using Nelder-Mead', 'Running in sequential mode.', 'Iter. Eval. Best Time m:s', '0 3 865.9531 0:00.0', '1 4 832.5452 0:00.0', '2 5 832.5452 0:00.0', '3 6 628.243 0:00.0', '20 23 4.95828 0:00.0', '40 43 3.525867 0:00.0', '60 63 2.377579 0:00.0', '80 83 1.114115 0:00.0', '100 103 0.551 0:00.0', '120 123 0.237 0:00.0', '140 143 0.0666 0:00.0', '160 163 0.00181 0:00.0', '180 183 6.96e-06 0:00.0', '200 203 2.66e-08 0:00.0', '220 223 5.06e-11 0:00.0', '240 243 2.43e-15 0:00.0', '260 263 5.58e-18 0:00.0', '280 283 7.74e-20 0:00.0', '300 303 6.66e-23 0:00.0', '320 323 1.86e-25 0:00.0', '340 343 3.16e-28 0:00.0', '360 364 3.08e-31 0:00.0', '380 390 0 0:00.0', '400 416 0 0:00.0', '420 443 0 0:00.0', '422 444 0 0:00.0', 'Halting: No significant change for 200 iterations.', ) # Compare lenght of log log_lines = [line.rstrip() for line in log.splitlines()] self.assertEqual(len(log_lines), len(exp_lines)) # Compare log lines, ignoring time bit (unles it's way too slow) for i, line1 in enumerate(log_lines): line2 = exp_lines[i] if line2[-6:] == '0:00.0': line1 = line1[:-6] line2 = line2[:-6] self.assertEqual(line1, line2)
def test_logging(self): # Test with logpdf r = pints.toy.TwistedGaussianLogPDF(2, 0.01) x = np.array([0, 1.01]) b = pints.RectangularBoundaries([-0.01, 0.95], [0.01, 1.05]) s = 0.01 opt = pints.OptimisationController(r, x, s, b, method) opt.set_log_to_screen(True) opt.set_max_unchanged_iterations(None) opt.set_log_interval(3) opt.set_max_iterations(10) self.assertEqual(opt.max_iterations(), 10) with StreamCapture() as c: opt.run() log_should_be = ( 'Maximising LogPDF\n' 'Using Exponential Natural Evolution Strategy (xNES)\n' 'Running in sequential mode.\n' 'Population size: 6\n' 'Iter. Eval. Best Time m:s\n' '0 6 -4.140462 0:00.0\n' '1 12 -4.140462 0:00.0\n' '2 18 -4.140462 0:00.0\n' '3 24 -4.140462 0:00.0\n' '6 42 -4.140462 0:00.0\n' '9 60 -4.140462 0:00.0\n' '10 60 -4.140462 0:00.0\n' 'Halting: Maximum number of iterations (10) reached.\n') self.assertEqual(log_should_be, c.text()) # Invalid log interval self.assertRaises(ValueError, opt.set_log_interval, 0) # Test with error measure r = pints.toy.RosenbrockError() x = np.array([1.01, 1.01]) opt = pints.OptimisationController(r, x, method=method) opt.set_log_to_screen(True) opt.set_max_unchanged_iterations(None) opt.set_log_interval(4) opt.set_max_iterations(10) self.assertEqual(opt.max_iterations(), 10) with StreamCapture() as c: opt.run() log_should_be = ( 'Minimising error measure\n' 'Using Exponential Natural Evolution Strategy (xNES)\n' 'Running in sequential mode.\n' 'Population size: 6\n' 'Iter. Eval. Best Time m:s\n' '0 6 0.888 0:00.0\n' '1 12 0.888 0:00.0\n' '2 18 0.29 0:00.0\n' '3 24 0.29 0:00.0\n' '4 30 0.0813 0:00.0\n' '8 54 0.0652 0:00.0\n' '10 60 0.0431 0:00.0\n' 'Halting: Maximum number of iterations (10) reached.\n') self.assertEqual(log_should_be, c.text()) # Invalid log interval self.assertRaises(ValueError, opt.set_log_interval, 0)
def test_logger(self): # Normal use, all data at once with StreamCapture() as c: # Test logger with no fields log = pints.Logger() self.assertRaises(ValueError, log.log, 1) # Test logging output log.add_counter('#', width=2) log.add_float('Latitude', width=1) log.add_long_float('Number') log.add_int('Val', width=4) log.add_counter('Count', max_value=12345) log.add_time('Time') log.add_string('Q', 3) # Add all data in one go log.log(*data) self.assertOutput(expected=out1, returned=c.text()) # Can't configure once logging self.assertRaises(RuntimeError, log.add_counter, 'a') self.assertRaises(RuntimeError, log.add_int, 'a') self.assertRaises(RuntimeError, log.add_float, 'a') self.assertRaises(RuntimeError, log.add_long_float, 'a') self.assertRaises(RuntimeError, log.add_time, 'a') self.assertRaises(RuntimeError, log.add_string, 'a', 3) self.assertRaises(RuntimeError, log.set_filename, 'a') self.assertRaises(RuntimeError, log.set_stream, sys.stdout) # Normal use, all data at once, plus extra bit with StreamCapture() as c: log = pints.Logger() log.add_counter('#', width=2) log.add_float('Latitude', width=1) log.add_long_float('Number') log.add_int('Val', width=4) log.add_counter('Count', max_value=12345) log.add_time('Time') log.add_string('Q', 3) log.log(*data) log.log(1, 2, 3) # not enough for more output! self.assertOutput(expected=out1, returned=c.text()) # Normal use, data row by row with StreamCapture() as c: log = pints.Logger() log.add_counter('#', width=2) log.add_float('Latitude', width=1) log.add_long_float('Number') log.add_int('Val', width=4) log.add_counter('Count', max_value=12345) log.add_time('Time') log.add_string('Q', 3) # Add data row by row n = 7 for i in range(len(data) // n): log.log(*data[i * n:(i + 1) * n]) self.assertOutput(expected=out1, returned=c.text()) # Normal use, data field by field with StreamCapture() as c: log = pints.Logger() log.add_counter('#', width=2) log.add_float('Latitude', width=1) log.add_long_float('Number') log.add_int('Val', width=4) log.add_counter('Count', max_value=12345) log.add_time('Time') log.add_string('Q', 3) # Add data cell by cell for d in data: log.log(d) self.assertOutput(expected=out1, returned=c.text()) # Log in different sized chunks order = [3, 2, 1, 1, 4, 6, 3, 2, 6] self.assertEqual(sum(order), len(data)) with StreamCapture() as c: log = pints.Logger() log.add_counter('#', width=2) log.add_float('Latitude', width=1) log.add_long_float('Number') log.add_int('Val', width=4) log.add_counter('Count', max_value=12345) log.add_time('Time') log.add_string('Q', 3) # Add data in different sized chunks offset = 0 for n in order: log.log(*data[offset:offset + n]) offset += n self.assertOutput(expected=out1, returned=c.text()) # Log with file-only fields, and shorter name with StreamCapture() as c: log = pints.Logger() log.add_counter('#', width=2) log.add_float('Lat.', width=1) log.add_long_float('Number', file_only=True) log.add_int('Val', width=4) log.add_counter('Count', max_value=12345) log.add_time('Time') log.add_string('Q', 3) log.log(*data) self.assertOutput(expected=out2, returned=c.text()) # Log with file-only fields, and shorter name, and file with StreamCapture() as c: with TemporaryDirectory() as d: filename = d.path('test.txt') log = pints.Logger() log.set_filename(filename) log.add_counter('#', width=2) log.add_float('Lat.', width=1) log.add_long_float('Number', file_only=True) log.add_int('Val', width=4) log.add_counter('Count', max_value=12345) log.add_time('Time') log.add_string('Q', 3) log.log(*data) with open(filename, 'r') as f: out = f.read() self.assertOutput(expected=out2, returned=c.text()) self.assertOutput(expected=out3, returned=out) # Repeat in csv mode with StreamCapture() as c: with TemporaryDirectory() as d: filename = d.path('test.csv') log = pints.Logger() log.set_filename(filename, csv=True) log.add_counter('#', width=2) log.add_float('Lat.', width=1) log.add_long_float('Number', file_only=True) log.add_int('Val', width=4) log.add_counter('Count', max_value=12345) log.add_time('Time') log.add_string('Q', 3) log.log(*data) with open(filename, 'r') as f: out = f.read() self.assertOutput(expected=out2, returned=c.text()) self.assertOutput(expected=out4, returned=out) # Repeat without screen output with StreamCapture() as c: with TemporaryDirectory() as d: filename = d.path('test.csv') log = pints.Logger() log.set_filename(filename, csv=True) log.set_stream(None) log.add_counter('#', width=2) log.add_float('Lat.', width=1) log.add_long_float('Number', file_only=True) log.add_int('Val', width=4) log.add_counter('Count', max_value=12345) log.add_time('Time') log.add_string('Q', 3) log.log(*data) with open(filename, 'r') as f: out = f.read() self.assertOutput(expected='', returned=c.text()) self.assertOutput(expected=out4, returned=out) # Repeat without screen output, outside of csv mode with StreamCapture() as c: with TemporaryDirectory() as d: filename = d.path('test.csv') log = pints.Logger() log.set_filename(filename, csv=False) log.set_stream(None) log.add_counter('#', width=2) log.add_float('Lat.', width=1) log.add_long_float('Number', file_only=True) log.add_int('Val', width=4) log.add_counter('Count', max_value=12345) log.add_time('Time') log.add_string('Q', 3) log.log(*data) with open(filename, 'r') as f: out = f.read() self.assertOutput(expected='', returned=c.text()) self.assertOutput(expected=out3, returned=out) # Unset file output with StreamCapture() as c: with TemporaryDirectory() as d: filename = d.path('test.csv') log = pints.Logger() log.set_filename(filename, csv=False) log.set_filename(None) log.set_stream(None) log.add_counter('#', width=2) log.log(1) self.assertFalse(os.path.isfile(filename)) self.assertOutput(expected='', returned=c.text()) self.assertOutput(expected=out3, returned=out) # Repeat without any output with StreamCapture() as c: log = pints.Logger() log.set_stream(None) log.add_counter('#', width=2) log.add_float('Lat.', width=1) log.add_long_float('Number', file_only=True) log.add_int('Val', width=4) log.add_counter('Count', max_value=12345) log.add_time('Time') log.add_string('Q', 3) log.log(*data) self.assertOutput(expected='', returned=c.text()) # Repeat on stderr with StreamCapture(stdout=True, stderr=True) as c: with TemporaryDirectory() as d: filename = d.path('test.csv') log = pints.Logger() log.set_filename(filename, csv=False) log.set_stream(sys.stderr) log.add_counter('#', width=2) log.add_float('Lat.', width=1) log.add_long_float('Number', file_only=True) log.add_int('Val', width=4) log.add_counter('Count', max_value=12345) log.add_time('Time') log.add_string('Q', 3) log.log(*data) with open(filename, 'r') as f: out = f.read() self.assertOutput(expected='', returned=c.text()[0]) self.assertOutput(expected=out2, returned=c.text()[1]) self.assertOutput(expected=out3, returned=out)
def test_live_chain_and_eval_logging(self): np.random.seed(1) xs = [] for i in range(3): f = 0.9 + 0.2 * np.random.rand() xs.append(np.array(self.real_parameters) * f) nchains = len(xs) # Test writing chains - not evals to disk (using LogPosterior) mcmc = pints.MCMCController(self.log_posterior, nchains, xs) mcmc.set_initial_phase_iterations(5) mcmc.set_max_iterations(20) mcmc.set_log_to_screen(True) mcmc.set_log_to_file(False) with StreamCapture() as c: with TemporaryDirectory() as d: cpath = d.path('chain.csv') p0 = d.path('chain_0.csv') p1 = d.path('chain_1.csv') p2 = d.path('chain_2.csv') epath = d.path('evals.csv') p3 = d.path('evals_0.csv') p4 = d.path('evals_1.csv') p5 = d.path('evals_2.csv') # Test files aren't created before mcmc runs mcmc.set_chain_filename(cpath) mcmc.set_log_pdf_filename(None) self.assertFalse(os.path.exists(cpath)) self.assertFalse(os.path.exists(epath)) self.assertFalse(os.path.exists(p0)) self.assertFalse(os.path.exists(p1)) self.assertFalse(os.path.exists(p2)) self.assertFalse(os.path.exists(p3)) self.assertFalse(os.path.exists(p4)) self.assertFalse(os.path.exists(p5)) # Test files are created afterwards chains1 = mcmc.run() self.assertFalse(os.path.exists(cpath)) self.assertFalse(os.path.exists(epath)) self.assertTrue(os.path.exists(p0)) self.assertTrue(os.path.exists(p1)) self.assertTrue(os.path.exists(p2)) self.assertFalse(os.path.exists(p3)) self.assertFalse(os.path.exists(p4)) self.assertFalse(os.path.exists(p5)) # Test files contain the correct chains import pints.io as io chains2 = np.array(io.load_samples(cpath, nchains)) self.assertTrue(np.all(chains1 == chains2)) text = c.text() self.assertIn('Writing chains to', text) self.assertIn('chain_0.csv', text) self.assertNotIn('Writing evaluations to', text) self.assertNotIn('evals_0.csv', text) # Test writing evals - not chains to disk (using LogPosterior) mcmc = pints.MCMCController(self.log_posterior, nchains, xs) mcmc.set_initial_phase_iterations(5) mcmc.set_max_iterations(20) mcmc.set_log_to_screen(True) mcmc.set_log_to_file(False) with StreamCapture() as c: with TemporaryDirectory() as d: cpath = d.path('chain.csv') p0 = d.path('chain_0.csv') p1 = d.path('chain_1.csv') p2 = d.path('chain_2.csv') epath = d.path('evals.csv') p3 = d.path('evals_0.csv') p4 = d.path('evals_1.csv') p5 = d.path('evals_2.csv') # Test files aren't created before mcmc runs mcmc.set_chain_filename(None) mcmc.set_log_pdf_filename(epath) self.assertFalse(os.path.exists(cpath)) self.assertFalse(os.path.exists(epath)) self.assertFalse(os.path.exists(p0)) self.assertFalse(os.path.exists(p1)) self.assertFalse(os.path.exists(p2)) self.assertFalse(os.path.exists(p3)) self.assertFalse(os.path.exists(p4)) self.assertFalse(os.path.exists(p5)) # Test files are created afterwards chains1 = mcmc.run() self.assertFalse(os.path.exists(cpath)) self.assertFalse(os.path.exists(epath)) self.assertFalse(os.path.exists(p0)) self.assertFalse(os.path.exists(p1)) self.assertFalse(os.path.exists(p2)) self.assertTrue(os.path.exists(p3)) self.assertTrue(os.path.exists(p4)) self.assertTrue(os.path.exists(p5)) # Test files contain the correct values import pints.io as io evals2 = np.array(io.load_samples(epath, nchains)) evals1 = [] for chain in chains1: logpdfs = np.array([self.log_posterior(x) for x in chain]) logpriors = np.array([self.log_prior(x) for x in chain]) loglikelihoods = logpdfs - logpriors evals = np.array([logpdfs, loglikelihoods, logpriors]).T evals1.append(evals) evals1 = np.array(evals1) self.assertTrue(np.all(evals1 == evals2)) text = c.text() self.assertNotIn('Writing chains to', text) self.assertNotIn('chain_0.csv', text) self.assertIn('Writing evaluations to', text) self.assertIn('evals_0.csv', text) # Test writing chains and evals to disk (with LogPosterior) mcmc = pints.MCMCController(self.log_posterior, nchains, xs) mcmc.set_initial_phase_iterations(5) mcmc.set_max_iterations(20) mcmc.set_log_to_screen(True) mcmc.set_log_to_file(False) with StreamCapture() as c: with TemporaryDirectory() as d: cpath = d.path('chain.csv') p0 = d.path('chain_0.csv') p1 = d.path('chain_1.csv') p2 = d.path('chain_2.csv') epath = d.path('evals.csv') p3 = d.path('evals_0.csv') p4 = d.path('evals_1.csv') p5 = d.path('evals_2.csv') # Test files aren't created before mcmc runs mcmc.set_chain_filename(cpath) mcmc.set_log_pdf_filename(epath) self.assertFalse(os.path.exists(cpath)) self.assertFalse(os.path.exists(epath)) self.assertFalse(os.path.exists(p0)) self.assertFalse(os.path.exists(p1)) self.assertFalse(os.path.exists(p2)) self.assertFalse(os.path.exists(p3)) self.assertFalse(os.path.exists(p4)) self.assertFalse(os.path.exists(p5)) # Test files are created afterwards chains1 = mcmc.run() self.assertFalse(os.path.exists(cpath)) self.assertFalse(os.path.exists(epath)) self.assertTrue(os.path.exists(p0)) self.assertTrue(os.path.exists(p1)) self.assertTrue(os.path.exists(p2)) self.assertTrue(os.path.exists(p3)) self.assertTrue(os.path.exists(p4)) self.assertTrue(os.path.exists(p5)) # Test chain files contain the correct values import pints.io as io chains2 = np.array(io.load_samples(cpath, nchains)) self.assertTrue(np.all(chains1 == chains2)) # Test eval files contain the correct values evals2 = np.array(io.load_samples(epath, nchains)) evals1 = [] for chain in chains1: logpdfs = np.array([self.log_posterior(x) for x in chain]) logpriors = np.array([self.log_prior(x) for x in chain]) loglikelihoods = logpdfs - logpriors evals = np.array([logpdfs, loglikelihoods, logpriors]).T evals1.append(evals) evals1 = np.array(evals1) self.assertTrue(np.all(evals1 == evals2)) text = c.text() self.assertIn('Writing chains to', text) self.assertIn('chain_0.csv', text) self.assertIn('Writing evaluations to', text) self.assertIn('evals_0.csv', text) # Test writing chains and evals to disk (with LogLikelihood) mcmc = pints.MCMCController(self.log_likelihood, nchains, xs) mcmc.set_initial_phase_iterations(5) mcmc.set_max_iterations(20) mcmc.set_log_to_screen(True) mcmc.set_log_to_file(False) with StreamCapture() as c: with TemporaryDirectory() as d: cpath = d.path('chain.csv') p0 = d.path('chain_0.csv') p1 = d.path('chain_1.csv') p2 = d.path('chain_2.csv') epath = d.path('evals.csv') p3 = d.path('evals_0.csv') p4 = d.path('evals_1.csv') p5 = d.path('evals_2.csv') # Test files aren't created before mcmc runs mcmc.set_chain_filename(cpath) mcmc.set_log_pdf_filename(epath) self.assertFalse(os.path.exists(cpath)) self.assertFalse(os.path.exists(epath)) self.assertFalse(os.path.exists(p0)) self.assertFalse(os.path.exists(p1)) self.assertFalse(os.path.exists(p2)) self.assertFalse(os.path.exists(p3)) self.assertFalse(os.path.exists(p4)) self.assertFalse(os.path.exists(p5)) # Test files are created afterwards chains1 = mcmc.run() self.assertFalse(os.path.exists(cpath)) self.assertFalse(os.path.exists(epath)) self.assertTrue(os.path.exists(p0)) self.assertTrue(os.path.exists(p1)) self.assertTrue(os.path.exists(p2)) self.assertTrue(os.path.exists(p3)) self.assertTrue(os.path.exists(p4)) self.assertTrue(os.path.exists(p5)) # Test chain files contain the correct values import pints.io as io chains2 = np.array(io.load_samples(cpath, nchains)) self.assertTrue(np.all(chains1 == chains2)) # Test eval files contain the correct values evals2 = np.array(io.load_samples(epath, nchains)) evals1 = [] for chain in chains1: evals1.append( np.array([self.log_likelihood(x) for x in chain]).T) evals1 = np.array(evals1).reshape(3, 20, 1) self.assertTrue(np.all(evals1 == evals2)) text = c.text() self.assertIn('Writing chains to', text) self.assertIn('chain_0.csv', text) self.assertIn('Writing evaluations to', text) self.assertIn('evals_0.csv', text) # Test logging can be disabled again mcmc = pints.MCMCController(self.log_posterior, nchains, xs) mcmc.set_initial_phase_iterations(5) mcmc.set_max_iterations(20) mcmc.set_log_to_screen(True) mcmc.set_log_to_file(False) with StreamCapture() as c: with TemporaryDirectory() as d: cpath = d.path('chain.csv') p0 = d.path('chain_0.csv') p1 = d.path('chain_1.csv') p2 = d.path('chain_2.csv') epath = d.path('evals.csv') p3 = d.path('evals_0.csv') p4 = d.path('evals_1.csv') p5 = d.path('evals_2.csv') # Test files aren't created before mcmc runs mcmc.set_chain_filename(cpath) mcmc.set_log_pdf_filename(epath) self.assertFalse(os.path.exists(cpath)) self.assertFalse(os.path.exists(epath)) self.assertFalse(os.path.exists(p0)) self.assertFalse(os.path.exists(p1)) self.assertFalse(os.path.exists(p2)) self.assertFalse(os.path.exists(p3)) self.assertFalse(os.path.exists(p4)) self.assertFalse(os.path.exists(p5)) # Test files are not created afterwards mcmc.set_chain_filename(None) mcmc.set_log_pdf_filename(None) mcmc.run() self.assertFalse(os.path.exists(cpath)) self.assertFalse(os.path.exists(epath)) self.assertFalse(os.path.exists(p0)) self.assertFalse(os.path.exists(p1)) self.assertFalse(os.path.exists(p2)) self.assertFalse(os.path.exists(p3)) self.assertFalse(os.path.exists(p4)) self.assertFalse(os.path.exists(p5)) text = c.text() self.assertNotIn('Writing chains to', text) self.assertNotIn('chain_0.csv', text) self.assertNotIn('Writing evaluations to', text) self.assertNotIn('evals_0.csv', text) # Test with a single chain nchains = 1 mcmc = pints.MCMCController(self.log_posterior, nchains, xs[:1]) mcmc.set_initial_phase_iterations(5) mcmc.set_max_iterations(20) mcmc.set_log_to_screen(True) mcmc.set_log_to_file(False) with StreamCapture() as c: with TemporaryDirectory() as d: cpath = d.path('chain.csv') p0 = d.path('chain_0.csv') p1 = d.path('chain_1.csv') p2 = d.path('chain_2.csv') epath = d.path('evals.csv') p3 = d.path('evals_0.csv') p4 = d.path('evals_1.csv') p5 = d.path('evals_2.csv') # Test files aren't created before mcmc runs mcmc.set_chain_filename(cpath) mcmc.set_log_pdf_filename(epath) self.assertFalse(os.path.exists(cpath)) self.assertFalse(os.path.exists(epath)) self.assertFalse(os.path.exists(p0)) self.assertFalse(os.path.exists(p1)) self.assertFalse(os.path.exists(p2)) self.assertFalse(os.path.exists(p3)) self.assertFalse(os.path.exists(p4)) self.assertFalse(os.path.exists(p5)) # Test files are created afterwards chains1 = mcmc.run() self.assertFalse(os.path.exists(cpath)) self.assertFalse(os.path.exists(epath)) self.assertTrue(os.path.exists(p0)) self.assertFalse(os.path.exists(p1)) self.assertFalse(os.path.exists(p2)) self.assertTrue(os.path.exists(p3)) self.assertFalse(os.path.exists(p4)) self.assertFalse(os.path.exists(p5)) # Test chain files contain the correct values import pints.io as io chains2 = np.array(io.load_samples(cpath, nchains)) self.assertTrue(np.all(chains1 == chains2)) # Test eval files contain the correct values evals2 = np.array(io.load_samples(epath, nchains)) evals1 = [] for chain in chains1: logpdfs = np.array([self.log_posterior(x) for x in chain]) logpriors = np.array([self.log_prior(x) for x in chain]) loglikelihoods = logpdfs - logpriors evals = np.array([logpdfs, loglikelihoods, logpriors]).T evals1.append(evals) evals1 = np.array(evals1) self.assertTrue(np.all(evals1 == evals2)) text = c.text() self.assertIn('Writing chains to', text) self.assertIn('chain_0.csv', text) self.assertIn('Writing evaluations to', text) self.assertIn('evals_0.csv', text)
def test_logging(self): np.random.seed(1) xs = [] for i in range(3): f = 0.9 + 0.2 * np.random.rand() xs.append(np.array(self.real_parameters) * f) nchains = len(xs) # No output with StreamCapture() as capture: mcmc = pints.MCMCController(self.log_posterior, nchains, xs) mcmc.set_initial_phase_iterations(5) mcmc.set_max_iterations(10) mcmc.set_log_to_screen(False) mcmc.set_log_to_file(False) mcmc.run() self.assertEqual(capture.text(), '') # With output to screen np.random.seed(1) with StreamCapture() as capture: mcmc = pints.MCMCController(self.log_posterior, nchains, xs) mcmc.set_initial_phase_iterations(5) mcmc.set_max_iterations(10) mcmc.set_log_to_screen(True) mcmc.set_log_to_file(False) mcmc.run() lines = capture.text().splitlines() for i, line in enumerate(lines): self.assertLess(i, len(LOG_SCREEN)) # Chop off time bit before comparison if LOG_SCREEN[i][-6:] == '0:00.0': self.assertEqual(line[:-6], LOG_SCREEN[i][:-6]) else: self.assertEqual(line, LOG_SCREEN[i]) self.assertEqual(len(lines), len(LOG_SCREEN)) # With output to file np.random.seed(1) with StreamCapture() as capture: with TemporaryDirectory() as d: filename = d.path('test.txt') mcmc = pints.MCMCController(self.log_posterior, nchains, xs) mcmc.set_initial_phase_iterations(5) mcmc.set_max_iterations(10) mcmc.set_log_to_screen(False) mcmc.set_log_to_file(filename) mcmc.run() with open(filename, 'r') as f: lines = f.read().splitlines() for i, line in enumerate(lines): self.assertLess(i, len(LOG_FILE)) # Chop off time bit before comparison if LOG_FILE[i][-6:] == '0:00.0': self.assertEqual(line[:-6], LOG_FILE[i][:-6]) else: self.assertEqual(line, LOG_FILE[i]) self.assertEqual(line[:-6], LOG_FILE[i][:-6]) self.assertEqual(len(lines), len(LOG_FILE)) self.assertEqual(capture.text(), '') # Invalid log interval self.assertRaises(ValueError, mcmc.set_log_interval, 0)