def test_average(self): """Test the average of policies. Here we test that the average of values is the value of the average policy. """ game = crowd_modelling.MFGCrowdModellingGame() uniform_policy = policy.UniformRandomPolicy(game) mfg_dist = distribution.DistributionPolicy(game, uniform_policy) br_value = best_response_value.BestResponse(game, mfg_dist) py_value = policy_value.PolicyValue(game, mfg_dist, uniform_policy) greedy_pi = greedy_policy.GreedyPolicy(game, None, br_value) greedy_pi = greedy_pi.to_tabular() merged_pi = fictitious_play.MergedPolicy( game, list(range(game.num_players())), [uniform_policy, greedy_pi], [mfg_dist, distribution.DistributionPolicy(game, greedy_pi)], [0.5, 0.5]) merged_pi_value = policy_value.PolicyValue(game, mfg_dist, merged_pi) self.assertAlmostEqual(merged_pi_value(game.new_initial_state()), (br_value(game.new_initial_state()) + py_value(game.new_initial_state())) / 2)
def main(argv: Sequence[str]) -> None: # TODO(perolat): move to an example directory. if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') mfg_game = pyspiel.load_game(FLAGS.game, GAME_SETTINGS.get(FLAGS.game, {})) mfg_state = mfg_game.new_initial_state() print('Playing a single arbitrary trajectory') while not mfg_state.is_terminal(): print('State obs string:', mfg_state.observation_string(0)) if mfg_state.current_player() == pyspiel.PlayerId.CHANCE: action_list, prob_list = zip(*mfg_state.chance_outcomes()) action = np.random.choice(action_list, p=prob_list) mfg_state.apply_action(action) elif mfg_state.current_player() == pyspiel.PlayerId.MEAN_FIELD: dist_to_register = mfg_state.distribution_support() n_states = len(dist_to_register) dist = [1.0 / n_states for _ in range(n_states)] mfg_state.update_distribution(dist) else: legal_list = mfg_state.legal_actions() action = np.random.choice(legal_list) mfg_state.apply_action(action) print('compute nashconv') uniform_policy = policy.UniformRandomPolicy(mfg_game) nash_conv_fp = nash_conv.NashConv(mfg_game, uniform_policy) print('Nashconv:', nash_conv_fp.nash_conv()) print('compute distribution') mfg_dist = distribution.DistributionPolicy(mfg_game, uniform_policy) br_value = best_response_value.BestResponse( mfg_game, mfg_dist, value.TabularValueFunction(mfg_game)) py_value = policy_value.PolicyValue(mfg_game, mfg_dist, uniform_policy, value.TabularValueFunction(mfg_game)) print( 'Value of a best response policy to a uniform policy ' '(computed with best_response_value)', br_value(mfg_game.new_initial_state())) print('Value of the uniform policy:', py_value(mfg_game.new_initial_state())) greedy_pi = greedy_policy.GreedyPolicy(mfg_game, None, br_value) greedy_pi = greedy_pi.to_tabular() pybr_value = policy_value.PolicyValue(mfg_game, mfg_dist, greedy_pi, value.TabularValueFunction(mfg_game)) print( 'Value of a best response policy to a uniform policy (computed at the ' 'value of the greedy policy of the best response value)', pybr_value(mfg_game.new_initial_state())) print('merge') merged_pi = fictitious_play.MergedPolicy( mfg_game, list(range(mfg_game.num_players())), [uniform_policy, greedy_pi], [mfg_dist, distribution.DistributionPolicy(mfg_game, greedy_pi)], [0.5, 0.5]) merged_pi_value = policy_value.PolicyValue( mfg_game, mfg_dist, merged_pi, value.TabularValueFunction(mfg_game)) print(br_value(mfg_game.new_initial_state())) print(py_value(mfg_game.new_initial_state())) print(merged_pi_value(mfg_game.new_initial_state())) print((br_value(mfg_game.new_initial_state()) + py_value(mfg_game.new_initial_state())) / 2) print('fp') fp = fictitious_play.FictitiousPlay(mfg_game) for j in range(100): print('Iteration', j, 'of fictitious play') fp.iteration() fp_policy = fp.get_policy() nash_conv_fp = nash_conv.NashConv(mfg_game, fp_policy) print('Nashconv of the current FP policy', nash_conv_fp.nash_conv()) print('md') md = mirror_descent.MirrorDescent(mfg_game, value.TabularValueFunction(mfg_game)) for j in range(10): print('Iteration', j, 'of mirror descent') md.iteration() md_policy = md.get_policy() nash_conv_md = nash_conv.NashConv(mfg_game, md_policy) print('Nashconv of the current MD policy', nash_conv_md.nash_conv())
def main(argv: Sequence[str]) -> None: # TODO(perolat): move to an example directory. if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') game_settings = { 'only_distribution_reward': True, 'forbidden_states': '[0|0;0|1]', 'initial_distribution': '[0|2;0|3]', 'initial_distribution_value': '[0.5;0.5]', } mfg_game = pyspiel.load_game(FLAGS.game, game_settings) mfg_state = mfg_game.new_initial_state() while not mfg_state.is_terminal(): print(mfg_state.observation_string(0)) if mfg_state.current_player() == pyspiel.PlayerId.CHANCE: action_list, prob_list = zip(*mfg_state.chance_outcomes()) action = np.random.choice(action_list, p=prob_list) mfg_state.apply_action(action) elif mfg_state.current_player() == pyspiel.PlayerId.MEAN_FIELD: dist_to_register = mfg_state.distribution_support() n_states = len(dist_to_register) dist = [1.0 / n_states for _ in range(n_states)] mfg_state.update_distribution(dist) else: legal_list = mfg_state.legal_actions() action = np.random.choice(legal_list) mfg_state.apply_action(action) print('compute nashconv') uniform_policy = policy.UniformRandomPolicy(mfg_game) nash_conv_fp = nash_conv.NashConv(mfg_game, uniform_policy) print(nash_conv_fp.nash_conv()) print('compute distribution') mfg_dist = distribution.DistributionPolicy(mfg_game, uniform_policy) br_value = best_response_value.BestResponse(mfg_game, mfg_dist) py_value = policy_value.PolicyValue(mfg_game, mfg_dist, uniform_policy) print(br_value(mfg_game.new_initial_state())) print(py_value(mfg_game.new_initial_state())) greedy_pi = greedy_policy.GreedyPolicy(mfg_game, None, br_value) greedy_pi = greedy_pi.to_tabular() pybr_value = policy_value.PolicyValue(mfg_game, mfg_dist, greedy_pi) print(pybr_value(mfg_game.new_initial_state())) print('merge') merged_pi = fictitious_play.MergedPolicy( mfg_game, list(range(mfg_game.num_players())), [uniform_policy, greedy_pi], [mfg_dist, distribution.DistributionPolicy(mfg_game, greedy_pi)], [0.5, 0.5]) merged_pi_value = policy_value.PolicyValue(mfg_game, mfg_dist, merged_pi) print(br_value(mfg_game.new_initial_state())) print(py_value(mfg_game.new_initial_state())) print(merged_pi_value(mfg_game.new_initial_state())) print((br_value(mfg_game.new_initial_state()) + py_value(mfg_game.new_initial_state())) / 2) print('fp') fp = fictitious_play.FictitiousPlay(mfg_game) for j in range(100): print(j) fp.iteration() fp_policy = fp.get_policy() nash_conv_fp = nash_conv.NashConv(mfg_game, fp_policy) print(nash_conv_fp.nash_conv())