コード例 #1
0
ファイル: test_inference.py プロジェクト: gbaydin/pyprob
if __name__ == '__main__':
    # if torch.cuda.is_available():
    # pyprob.set_cuda(True)
    pyprob.set_verbosity(1)
    tests = []
    # tests.append('MVNWithUnknownMeanTestCase')
    tests.append('GaussianWithUnknownMeanTestCase')
    # tests.append('GaussianWithUnknownMeanMarsagliaTestCase')
    # tests.append('HiddenMarkovModelTestCase')

    time_start = time.time()
    success = unittest.main(defaultTest=tests, verbosity=2,
                            exit=False).result.wasSuccessful()
    print('\nDuration             : {}'.format(
        util.days_hours_mins_secs_str(time.time() - time_start)))
    print('Models run           : {}'.format(' '.join(tests)))
    print('Samples              : {}'.format(samples))
    print('Training traces      : {}\n'.format(training_traces))
    print('\nTotal inference performance scores\n')
    print(colored('Importance sampling  : ', 'yellow', attrs=['bold']), end='')
    print(
        colored('{:+.6e}'.format(perf_score_importance_sampling),
                'white',
                attrs=['bold']))
    print(colored('Inference compilation: ', 'yellow', attrs=['bold']), end='')
    print(
        colored('{:+.6e}\n'.format(perf_score_inference_compilation),
                'white',
                attrs=['bold']))
    sys.exit(0 if success else 1)
コード例 #2
0
        self.assertLess(kl_divergence, 0.75)


if __name__ == '__main__':
    # pyprob.seed(123)
    pyprob.set_verbosity(1)
    tests = []
    tests.append('GaussianWithUnknownMeanTestCase')
    tests.append('GaussianWithUnknownMeanMarsagliaWithReplacementTestCase')
    tests.append('HiddenMarkovModelTestCase')
    tests.append('BranchingTestCase')

    time_start = time.time()
    success = unittest.main(defaultTest=tests, verbosity=2, exit=False).result.wasSuccessful()
    print('\nDuration                   : {}'.format(util.days_hours_mins_secs_str(time.time() - time_start)))
    print('Models run                 : {}'.format(' '.join(tests)))
    print('\nTotal inference performance:\n')
    print(colored('                                       Samples        KL divergence  Duration (s) ', 'yellow', attrs=['bold']))
    print(colored('Importance sampling                  : ', 'yellow', attrs=['bold']), end='')
    print(colored('{:+.6e}  {:+.6e}  {:+.6e}'.format(importance_sampling_samples, importance_sampling_kl_divergence, importance_sampling_duration), 'white', attrs=['bold']))
    print(colored('Importance sampling w/ inference net.: ', 'yellow', attrs=['bold']), end='')
    print(colored('{:+.6e}  {:+.6e}  {:+.6e}'.format(importance_sampling_with_inference_network_samples, importance_sampling_with_inference_network_kl_divergence, importance_sampling_with_inference_network_duration), 'white', attrs=['bold']))
    print(colored('Lightweight Metropolis Hastings      : ', 'yellow', attrs=['bold']), end='')
    print(colored('{:+.6e}  {:+.6e}  {:+.6e}'.format(lightweight_metropolis_hastings_samples, lightweight_metropolis_hastings_kl_divergence, lightweight_metropolis_hastings_duration), 'white', attrs=['bold']))
    print(colored('Random-walk Metropolis Hastings      : ', 'yellow', attrs=['bold']), end='')
    print(colored('{:+.6e}  {:+.6e}  {:+.6e}\n'.format(random_walk_metropolis_hastings_samples, random_walk_metropolis_hastings_kl_divergence, random_walk_metropolis_hastings_duration), 'white', attrs=['bold']))

    # for container in docker_containers:
    #     print('Killing Docker container {}'.format(container.name))
    #     container.kill()