Exemplo n.º 1
0
    def test_empty_belief_udpate(self):

        prior = IndependentNormals(features=['pink'])
        empty_df = pd.DataFrame()
        posterior = prior.literal_update(empty_df)

        self.assertDictEqual(prior.beliefs, posterior.beliefs)
Exemplo n.º 2
0
    def test_pragmatic_update(self):

        prior = IndependentNormals(features=['pink', 'circle'],
                                   precision=PRIOR_PRECISION)

        obs_mean = 2
        obs_precision = PRIOR_PRECISION
        obs = pd.DataFrame.from_records([{
            "feature": "pink",
            "mean": obs_mean,
            "precision": obs_precision
        }])

        literal_posterior = prior.literal_update(obs)

        pragmatic_precision = 2
        pragmatic_posterior = prior.pseudopragmatic_gaussian_update(
            obs, pragmatic_precision)

        # Literal and pragmatic should be the same for means of both, and variance of pink
        self.assertEqual(literal_posterior["pink"]["mean"],
                         pragmatic_posterior["pink"]["mean"])
        self.assertEqual(literal_posterior["pink"]["precision"],
                         pragmatic_posterior["pink"]["precision"])
        self.assertEqual(literal_posterior["circle"]["mean"],
                         pragmatic_posterior["circle"]["mean"])

        # Should *differ* for precision of unmentioned (circle) feature
        self.assertNotEqual(literal_posterior["circle"]["precision"],
                            pragmatic_posterior["circle"]["precision"])
        self.assertEqual(pragmatic_posterior["circle"]["precision"],
                         PRIOR_PRECISION + pragmatic_precision)
Exemplo n.º 3
0
    def test_belief_samples(self):

        mean = 0
        var = 2

        prior = IndependentNormals(features=['pink'],
                                   mean=0,
                                   precision=1 / var)
        samples = prior.sample_beliefs(n=100000)

        self.assertAlmostEqual(mean, abs(samples['pink'].mean()), 1)
        self.assertAlmostEqual(var, samples['pink'].var(), 1)
Exemplo n.º 4
0
    def test_generate_gaussian_priors(self):

        prior = IndependentNormals(features=['pink'])
        self.assertDictEqual(
            {"pink": {
                'mean': PRIOR_MEAN,
                "precision": PRIOR_PRECISION
            }}, prior.beliefs)
Exemplo n.º 5
0
    def test_literal_update_precision(self):

        prior = IndependentNormals(features=['pink', 'circle'],
                                   precision=PRIOR_PRECISION)

        obs_precision = 1
        obs = pd.DataFrame.from_records([{
            "feature": "pink",
            "mean": 0,
            "precision": obs_precision
        }])

        posterior = prior.literal_update(obs)

        self.assertEqual(posterior["pink"]["precision"],
                         PRIOR_PRECISION + obs_precision)
        self.assertEqual(posterior["circle"]["precision"], PRIOR_PRECISION)
Exemplo n.º 6
0
    def __init__(self, name="Ensemble Feedforward NN", precision=2):

        priors = IndependentNormals(features=CONJUNCTIONS)

        super().__init__(CONJUNCTIONS, name=name + " (P{})".format(precision), prior=priors)

        # Choose to load a specific model from a training fold
        self.models = [TrajectoryFeedbackRewardPredictor.from_cv_fold(fold) for fold in range(0, 10)]
        self.precision = precision
Exemplo n.º 7
0
    def __init__(self, features=CONJUNCTIONS, name="Stateful Feedforward Neural Net", precision=2, fold=4):

        priors = IndependentNormals(features=features)

        super().__init__(features, name=name + " (P{})".format(precision), prior=priors)

        # Choose to load a specific model from a training fold
        self.nn = TrajectoryFeedbackRewardPredictor.from_cv_fold(fold)
        self.precision = precision
Exemplo n.º 8
0
    def __init__(self, features=PILOT_FEATURES, prior=None, name=None):

        self._name = name
        self.features = sorted(features)

        if prior is None:
            prior = IndependentNormals(self.features)

        self.initial_beliefs = prior
        self.belief_state = copy.deepcopy(self.initial_beliefs)
Exemplo n.º 9
0
    def test_optimal_action_selection(self):

        env = Environment.from_pilot_config(PILOT_FIRST_LEVEL_CONFIG)

        # Generate a belief that all objects except pink circles are terrible
        belief = IndependentNormals(mean=-10, precision=50)
        belief['pink|circle']['mean'] = 150

        belief_samples = belief.sample_beliefs(n=10)

        object_choices = optimal_choices_under_hypotheses(env, belief_samples)

        # Validate that we only ever chose pink circles
        for object_locations in object_choices.locations.values:
            self.assertSetEqual(PINK_CIRCLE_LOCATIONS, set(object_locations),
                                msg="Should only have chosen pink circles.")

        # Reset beliefs, resample, validate we only choose yellow squares.
        belief = IndependentNormals(mean=-10, precision=50)
        belief['yellow|square']['mean'] = 150

        belief_samples = belief.sample_beliefs(n=10)
        object_choices = optimal_choices_under_hypotheses(env, belief_samples)

        for object_locations in object_choices.locations.values:
            self.assertSetEqual(YELLOW_SQUARE_LOCATIONS, set(object_locations),
                                msg="Should only have chosen yellow square.")
Exemplo n.º 10
0
    def test_multiple_literal_updates(self):

        prior = IndependentNormals(features=['pink', 'circle'],
                                   precision=PRIOR_PRECISION)

        obs_mean = 2
        obs_precision = PRIOR_PRECISION
        obs = pd.DataFrame.from_records([
            {
                "feature": "pink",
                "mean": obs_mean,
                "precision": obs_precision
            },
            {
                "feature": "circle",
                "mean": obs_mean,
                "precision": obs_precision
            },
        ])

        posterior = prior.literal_update(obs)

        self.assertAlmostEqual(posterior["pink"]["mean"],
                               (PRIOR_MEAN + obs_mean) / 2)
        self.assertAlmostEqual(posterior["circle"]["mean"],
                               (PRIOR_MEAN + obs_mean) / 2)
        self.assertAlmostEqual(posterior["circle"]["precision"],
                               PRIOR_PRECISION + obs_precision)

        posterior = posterior.literal_update(obs)

        self.assertAlmostEqual(posterior["pink"]["mean"],
                               (PRIOR_MEAN + 2 * obs_mean) / 3)
        self.assertAlmostEqual(posterior["circle"]["mean"],
                               (PRIOR_MEAN + 2 * obs_mean) / 3)
        self.assertAlmostEqual(posterior["circle"]["precision"],
                               PRIOR_PRECISION + 2 * obs_precision)
Exemplo n.º 11
0
    def from_json(cls, data):

        new_agent = cls()
        new_agent.belief_state = IndependentNormals.from_json(data)

        return new_agent