Example #1
0
        util.eval_print('input_shape', 'output_shape', 'batch_size',
                        'input_batch_shape', 'output_batch_shape',
                        'output_batch_shape_correct')

        self.assertEqual(output_batch_shape, output_batch_shape_correct)

    def test_nn_EmbeddingCNN3D5C(self):
        batch_size = 32
        input_shape = [2, 25, 25, 25]
        output_shape = [128]
        input_batch_shape = [batch_size] + input_shape
        output_batch_shape_correct = [batch_size] + output_shape

        input_batch = torch.zeros(input_batch_shape)
        nn = EmbeddingCNN3D5C(input_shape=torch.Size(input_shape),
                              output_shape=torch.Size(output_shape))
        output_batch = nn(input_batch)
        output_batch_shape = list(output_batch.size())

        util.eval_print('input_shape', 'output_shape', 'batch_size',
                        'input_batch_shape', 'output_batch_shape',
                        'output_batch_shape_correct')

        self.assertEqual(output_batch_shape, output_batch_shape_correct)


if __name__ == '__main__':
    pyprob.set_random_seed(123)
    pyprob.set_verbosity(1)
    unittest.main(verbosity=2)
Example #2
0
            map_estimates.append(self._model._alphabet[int(posterior.combine_duplicates().mode)])
        add_random_walk_metropolis_hastings_duration(time.time() - start)

        accuracy = sum([1 if map_estimates[i] == test_letters[i] else 0 for i in range(len(test_letters))])/len(test_letters)
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(util.empirical_to_categorical(p, max_val=len(self._model._alphabet)-1), tp) for (p, tp) in zip(posteriors, self._true_posteriors)]))

        util.eval_print('samples', 'test_letters', 'map_estimates', 'accuracy', 'kl_divergence')
        add_random_walk_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertGreater(accuracy, 0.9)
        self.assertLess(kl_divergence, 0.25)


if __name__ == '__main__':
    pyprob.set_random_seed(123)
    pyprob.set_verbosity(2)
    tests = []
    tests.append('GaussianWithUnknownMeanTestCase')
    tests.append('GaussianWithUnknownMeanMarsagliaTestCase')
    tests.append('HiddenMarkovModelTestCase')
    # tests.append('BranchingTestCase')
    tests.append('MiniCaptchaTestCase')

    time_start = time.time()
    success = unittest.main(defaultTest=tests, verbosity=2, exit=False).result.wasSuccessful()
    print('\nDuration                   : {}'.format(util.days_hours_mins_secs_str(time.time() - time_start)))
    print('Models run                 : {}'.format(' '.join(tests)))
    print('\nTotal inference performance:\n')
    print(colored('                                       Samples        KL divergence  Duration (s) ', 'yellow', attrs=['bold']))
    print(colored('Importance sampling                  : ', 'yellow', attrs=['bold']), end='')
    print(colored('{:+.6e}  {:+.6e}  {:+.6e}'.format(importance_sampling_samples, importance_sampling_kl_divergence, importance_sampling_duration), 'white', attrs=['bold']))