def extract_training_observations(
        self,
        game_samples: SampleData,
        test_fraction,
    ):
        train_samples, test_samples = split_train_test(game_samples,
                                                       test_fraction)

        # Make policy samples for each label in (features, labels) pairs
        # - Note that this scrubbed the meta info
        print("\nBuilding policy training observations. Sit tight.")
        train_features, train_labels = self.extract_policy_observations(
            train_samples)
        train_samples = SampleData(
            features=train_features,
            labels=train_labels,
        )

        test_features, test_labels = self.extract_policy_observations(
            test_samples)
        test_samples = SampleData(
            features=test_features,
            labels=test_labels,
        )

        return train_samples, test_samples
    def extract_training_observations(
        self,
        samples,
        test_fraction,
    ) -> (SampleData, SampleData):
        train_samples, test_samples = split_train_test(samples, test_fraction)

        if self.weighting_strat:
            train_samples.weights = calculate_weights(train_samples,
                                                      self.highest_generation,
                                                      self.weighting_strat)
            test_samples.weights = calculate_weights(test_samples,
                                                     self.highest_generation,
                                                     self.weighting_strat)

        return train_samples, test_samples
    def train(self, samples, test_fraction=.2):
        raise RuntimeError("Broken")
        train_set, test_set = split_train_test(samples, test_fraction, "value")

        # "Train"
        self.state_visits = defaultdict(int)
        self.state_wins = defaultdict(int)
        for features, label in train_set:
            self.state_visits[tuple(features)] += 1
            self.state_wins[tuple(features)] += label

        # Convert them to dicts to maintain consistency with load
        self.state_visits = dict(self.state_visits)
        self.state_wins = dict(self.state_wins)

        # delete any keys that are too infrequent
        to_delete = []
        for k, v in self.state_visits.items():
            if v <= 5:
                to_delete.append(k)
        for k in to_delete:
            del self.state_visits[k]
            del self.state_wins[k]

        # "Test"
        absolute_error = 0
        absolute_error_random = 0
        for features, label in test_set:
            value = self.predict(features)
            random_value = -1.0 + (2.0 * random.random())
            absolute_error += abs(label - value)
            absolute_error_random += abs(label - random_value)
        mean_absolute_error = absolute_error / len(test_set)
        mean_absolute_error_random = absolute_error_random / len(test_set)

        print("MAE:", mean_absolute_error)
        print("MAE (random):", mean_absolute_error_random)