def continue_sampling(self, matrix): if matrix.profile_dict == {}: return True game = matrix.toGame() decision = False equilibria = [] all_eq = [] for old_eq in self.old_equilibria: new_eq = Nash.replicator_dynamics(game, old_eq, self.iters, self.converge_threshold) decision = decision or linalg.norm(new_eq-old_eq, 2) > self.compare_threshold distances = map(lambda e: linalg.norm(e-new_eq, 2), equilibria) if Regret.regret(game, new_eq) <= self.regret_threshold and \ all([d >= self.dist_threshold for d in distances]): equilibria.append(new_eq) all_eq.append(new_eq) for m in game.biasedMixtures() + [game.uniformMixture()] + \ [game.randomMixture() for __ in range(self.random_restarts)]: eq = Nash.replicator_dynamics(game, m, self.iters, self.converge_threshold) distances = map(lambda e: linalg.norm(e-eq,2), equilibria) if Regret.regret(game, eq) <= self.regret_threshold and \ all([d >= self.dist_threshold for d in distances]): equilibria.append(eq) decision = True all_eq.append(eq) if len(equilibria) == 0: decision = True self.old_equilibria = [min(all_eq, key=lambda e: Regret.regret(game, e))] else: self.old_equilibria = equilibria return decision
def continue_sampling(self, matrix): if matrix.profile_dict == {}: return True game = matrix.toGame() decision = True for m in game.biasedMixtures() + [game.uniformMixture()] + \ [game.randomMixture() for __ in range(self.random_restarts)]: eq = Nash.replicator_dynamics(game, m, self.iters, self.converge_threshold) confidence_interval = self.ci_calculator.one_sided_interval(matrix, eq, self.alpha) if confidence_interval < self.delta: self.eq.append(eq) decision = False return decision