Example #1
0
class TestQlearning(TestCase):
    def setUp(self):
        self.real_player = RealPlayer(path=TEST_DATA_PATH, model=None)

    def test_q_table_values_q_learning(self):
        # given
        self.real_player.model = Qlearning()
        parameters = np.array([1, 0.1])
        expected = [-0.1, 0, 0.1, 0, 0.1, 0]
        # when
        self.real_player.log_likelihood_function(params=parameters)
        actual = self.real_player.model.Q_table
        # then
        self.assertEqual(actual, expected)

    def test_q_table_values_rescorla_wagner(self):
        # given
        self.real_player.model = RescorlaWagner()
        parameters = np.array([1, 1, 2])
        expected = [-2, 0, 1, 0, 1, 0]
        # when
        self.real_player.log_likelihood_function(params=parameters)
        actual = self.real_player.model.Q_table
        # then
        self.assertEqual(actual, expected)
Example #2
0
def make_plot(dir):
    model = Qlearning()
    rp = RealPlayer(dir, model=model)
    game = GameSession()
    player = VirtualPlayer(10,
                           0.1,
                           game_skeleton=game.game_skeleton,
                           model=model)
    game.play(player=player)
    game._create_result()

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    T = np.arange(-1, 1, 0.1)
    print(T.shape)
    alpha = np.arange(0, 1, 0.05)
    print(alpha.shape)

    X, Y = np.meshgrid(T, alpha)

    zs = np.array([
        rp.log_likelihood_function(list(x))
        for x in zip(np.ravel(X), np.ravel(Y))
    ])
    Z = zs.reshape(X.shape)

    ax.plot_surface(X, Y, Z)

    ax.set_xlabel('T')
    ax.set_ylabel('alpha')
    ax.set_zlabel('log likelihood')
 def test_read_real_player_excel(self):
     expected = pd.DataFrame({
         'StimulusLeft': [5, 4, 1],
         'StimulusRight': [6, 3, 2],
         'Action': [1, 0, 1],
         'Was the Action Correct?': [1, 1, 1],
         'Reward': [1, 1, -1]
     }).sort_index(inplace=True)
     actual = RealPlayer._read_real_player_excel(TEST_DATA_PATH).sort_index(
         inplace=True)
     self.assertEqual(actual, expected)
def save_all_real_players_parameters_to_csv(data_dir_path, new_filename, model,
                                            get_parameters):
    # type (str, str, Qlearning, function) -> None
    all_filenames = os.listdir(data_dir_path)
    with open('{}.csv'.format(new_filename), 'w') as file:
        writer = csv.writer(file, delimiter=',')
        writer.writerow(get_header(model))
        for filename in all_filenames:
            if filename.endswith('xls'):
                row = []
                rp = RealPlayer(os.path.join(data_dir_path, filename),
                                get_model(model))
                name = os.path.splitext(os.path.basename(filename))[0][:-8]
                player_parameters, starting_points = get_parameters(
                    real_player=rp)
                criteria = rp.model_selection()
                row.append(name)
                row.extend(player_parameters)
                row.extend(starting_points)
                row.extend(criteria)
                writer.writerow(row)
 def test_get_default_optimization_start_points_for_rescorla_wagner(self):
     real_player = RealPlayer(TEST_DATA_PATH, model=RescorlaWagner())
     assert_array_equal(real_player.start_points, np.array([1, 0.1, 0.1]))
 def test_get_default_optimization_start_points_for_Q_learning(self):
     real_player = RealPlayer(TEST_DATA_PATH, model=Qlearning())
     assert_array_equal(real_player.start_points, np.array([1, 0.1]))
 def setUp(self):
     self.real_player = RealPlayer(TEST_DATA_PATH, model=Mock())
class TestRealPlayer(TestCase):
    def setUp(self):
        self.real_player = RealPlayer(TEST_DATA_PATH, model=Mock())

    def test_read_real_player_excel(self):
        expected = pd.DataFrame({
            'StimulusLeft': [5, 4, 1],
            'StimulusRight': [6, 3, 2],
            'Action': [1, 0, 1],
            'Was the Action Correct?': [1, 1, 1],
            'Reward': [1, 1, -1]
        }).sort_index(inplace=True)
        actual = RealPlayer._read_real_player_excel(TEST_DATA_PATH).sort_index(
            inplace=True)
        self.assertEqual(actual, expected)

    @patch('player.minimize')
    def test_max_log_likelihood(self, mock_minimize):
        # given
        start_points = [3, 0.2]
        self.real_player.model = Qlearning()
        # when
        self.real_player.max_log_likelihood(start_points=start_points)
        # then
        mock_minimize.assert_called_once_with(
            self.real_player.log_likelihood_function,
            x0=start_points,
            method='Nelder-Mead')

    @patch('player.minimize')
    def test_max_log_likelihood_with_default_starting_points(
            self, mock_minimize):
        # given
        default_start_points = np.array([1, 0.1])
        self.real_player.model = Qlearning()
        # when
        self.real_player.max_log_likelihood()
        # then
        mock_minimize.assert_called_once()
        assert_array_equal(mock_minimize.call_args[1]['x0'],
                           default_start_points)

    def test_get_optimized_parameters_for_q_learning(self):
        # given
        self.real_player.model = Qlearning()
        # when
        actual = self.real_player.get_optimized_parameters()
        # then
        self.assertIsInstance(actual, np.ndarray)
        self.assertEqual(len(actual), 2)

    def test_get_optimized_parameters_for_rescorla_wagner(self):
        # given
        self.real_player.model = RescorlaWagner()
        # when
        actual = self.real_player.get_optimized_parameters()
        # then
        self.assertIsInstance(actual, np.ndarray)
        self.assertEqual(len(actual), 3)

    def test_log_likelihood_function_q_learning(self):
        # given
        self.real_player.model = Qlearning()
        parameters = np.array([1, 0.1])
        expected = -1 * (3 * np.log(1 / 2))
        # when
        actual = self.real_player.log_likelihood_function(params=parameters)
        # then
        self.assertEqual(actual, expected)

    def test_log_likelihood_function_rescorla_wagner(self):
        # given
        self.real_player.model = RescorlaWagner()
        parameters = np.array([1, 1, 2])
        expected = -1 * (3 * np.log(1 / 2))
        # when
        actual = self.real_player.log_likelihood_function(params=parameters)
        # then
        self.assertEqual(actual, expected)

    def test_get_default_optimization_start_points_for_Q_learning(self):
        real_player = RealPlayer(TEST_DATA_PATH, model=Qlearning())
        assert_array_equal(real_player.start_points, np.array([1, 0.1]))

    def test_get_default_optimization_start_points_for_rescorla_wagner(self):
        real_player = RealPlayer(TEST_DATA_PATH, model=RescorlaWagner())
        assert_array_equal(real_player.start_points, np.array([1, 0.1, 0.1]))
Example #9
0
 def setUp(self):
     self.real_player = RealPlayer(path=TEST_DATA_PATH, model=None)
Example #10
0
from scripts.models import Qlearning, RescorlaWagner
from scripts.player import RealPlayer

if __name__ == '__main__':
    model = RescorlaWagner()
    rp = RealPlayer(
        path=
        "C:\\Users\\Marlena\\PycharmProjects\\ZPI\\ZPI\\data\\MarlenaDudalearning.xls",
        model=model)
    print(rp.get_optimized_parameters())
    print(rp.model_selection())