Exemplo n.º 1
0
    def test_accred_fitter(self):
        """ Test the fitter with some saved result data"""
        # ideal results
        with open(
                os.path.join(os.path.dirname(__file__),
                             'accred_ideal_results.json'), "r") as saved_file:
            ideal_results = json.load(saved_file)
        all_results = [
            Result.from_dict(result) for result in ideal_results['all_results']
        ]
        all_postp_list = ideal_results['all_postp_list']
        all_v_zero = ideal_results['all_v_zero']
        test_1 = accred.AccreditationFitter()
        for a, b, c in zip(all_results, all_postp_list, all_v_zero):
            test_1.AppendResults(a, b, c)
        (_, bnd, conf) = test_1.FullAccreditation(0.95)
        self.assertEqual(test_1._Nruns, test_1._Nacc,
                         "Error: Ideal outcomes not passing accred")

        theta = np.sqrt(np.log(2 / (1 - conf)) / (2 * len(all_postp_list)))
        bound = 1.7 / len(all_postp_list[0])
        bound = bound / (1.0 - theta)
        self.assertAlmostEqual(
            bound, bnd, msg="Error: Ideal outcomes not giving correct bound")
        # noisy results
        with open(
                os.path.join(os.path.dirname(__file__),
                             'accred_noisy_results.json'), "r") as saved_file:
            noisy_results = json.load(saved_file)
        all_strings = noisy_results['all_strings']
        all_postp_list = noisy_results['all_postp_list']
        all_v_zero = noisy_results['all_v_zero']
        confidence = noisy_results['confidence']
        accred_full = noisy_results['accred_full']
        accred_mean = noisy_results['accred_mean']

        test_1 = accred.AccreditationFitter()
        for a, b, c in zip(all_strings, all_postp_list, all_v_zero):
            test_1.AppendStrings(a, b, c)

        accred_full_test = test_1.FullAccreditation(confidence)
        accred_mean_test = test_1.MeanAccreditation(confidence)
        self.assertEqual(accred_full_test[1], accred_full[1],
                         "Error: Noisy outcomes fail full accred")

        self.assertEqual(accred_mean[1], accred_mean_test[1],
                         "Error: Noisy outcomes fail mean accred")
 def test_accred_fitter(self):
     """ Test the fitter with some saved result data"""
     # ideal results
     with open(
             os.path.join(os.path.dirname(__file__),
                          'accred_ideal_results.json'), "r") as saved_file:
         ideal_results = json.load(saved_file)
     all_results = [
         Result.from_dict(result) for result in ideal_results['all_results']
     ]
     all_postp_list = ideal_results['all_postp_list']
     all_v_zero = ideal_results['all_v_zero']
     test_1 = accred.AccreditationFitter()
     for a, b, c in zip(all_results, all_postp_list, all_v_zero):
         test_1.single_protocol_run(a, b, c)
         self.assertEqual(test_1.flag, 'accepted',
                          "Error: Ideal outcomes not passing accred")
     # noisy results
     with open(
             os.path.join(os.path.dirname(__file__),
                          'accred_noisy_results.json'), "r") as saved_file:
         noisy_results = json.load(saved_file)
     all_results = [
         Result.from_dict(result) for result in noisy_results['all_results']
     ]
     all_postp_list = noisy_results['all_postp_list']
     all_v_zero = noisy_results['all_v_zero']
     all_acc = noisy_results['all_acc']
     test_1 = accred.AccreditationFitter()
     for a, b, c, d in zip(all_results, all_postp_list, all_v_zero,
                           all_acc):
         test_1.single_protocol_run(a, b, c)
         self.assertEqual(test_1.flag, d,
                          "Error: Noisy outcomes not correct accred")
     test_1.bound_variation_distance(noisy_results['theta'])
     bound = test_1.bound
     self.assertEqual(bound, noisy_results['bound'],
                      "Error: Incorrect bound for noisy outcomes")
Exemplo n.º 3
0
 def test_accred_fitter(self):
     """ Test the fitter with some pickled result data"""
     # ideal results
     f0 = open(
         os.path.join(os.path.dirname(__file__),
                      'accred_ideal_results.pkl'), 'rb')
     ideal_results = pickle.load(f0)
     f0.close()
     all_results = ideal_results['all_results']
     all_postp_list = ideal_results['all_postp_list']
     all_v_zero = ideal_results['all_v_zero']
     test_1 = accred.AccreditationFitter()
     for a, b, c in zip(all_results, all_postp_list, all_v_zero):
         test_1.single_protocol_run(a, b, c)
         self.assertEqual(test_1.flag, 'accepted',
                          "Error: Ideal outcomes not passing accred")
     # noisy results
     f0 = open(
         os.path.join(os.path.dirname(__file__),
                      'accred_noisy_results.pkl'), 'rb')
     noisy_results = pickle.load(f0)
     f0.close()
     all_results = noisy_results['all_results']
     all_postp_list = noisy_results['all_postp_list']
     all_v_zero = noisy_results['all_v_zero']
     all_acc = noisy_results['all_acc']
     test_1 = accred.AccreditationFitter()
     for a, b, c, d in zip(all_results, all_postp_list, all_v_zero,
                           all_acc):
         test_1.single_protocol_run(a, b, c)
         self.assertEqual(test_1.flag, d,
                          "Error: Noisy outcomes not correct accred")
     test_1.bound_variation_distance(noisy_results['theta'])
     bound = test_1.bound
     self.assertEqual(bound, noisy_results['bound'],
                      "Error: Incorrect bound for noisy outcomes")