Пример #1
0
 def test_reset(
         self, env: CounterpointEnv, actions: List[int],
         expected: np.ndarray
 ) -> None:
     """Test `reset` method."""
     for action in actions:
         env.step(action)
     observation = env.reset()
     np.testing.assert_equal(observation, expected)
     assert env.piece.current_time_in_eighths == 8
Пример #2
0
def test_estimate_number_of_trials(env: CounterpointEnv, actions: List[int],
                                   n_trials_estimation_depth: int,
                                   n_trials_estimation_width: int,
                                   n_trials_factor: float) -> None:
    """Test `estimate_number_of_trials` function."""
    for action in actions:
        env.step(action)
    result = estimate_number_of_trials(env, n_trials_estimation_depth,
                                       n_trials_estimation_width,
                                       n_trials_factor)
    assert result > 0
Пример #3
0
def main() -> None:
    """Parse CLI arguments, train agent, and test it."""
    cli_args = parse_cli_args()

    default_config_path = 'configs/default_config.yml'
    default_config_path = resource_filename(__name__, default_config_path)
    config_path = cli_args.config_path or default_config_path
    with open(config_path) as config_file:
        settings = yaml.safe_load(config_file)

    results_dir = settings['piece']['rendering_params']['dir']
    if not os.path.isdir(results_dir):
        os.mkdir(results_dir)

    piece = Piece(**settings['piece'])
    env = CounterpointEnv(piece, **settings['environment'])
    best_action_sequences = optimize_with_monte_carlo_beam_search(
        env, **settings['agent'])

    env.verbose = True
    for i_episode, action_sequence in enumerate(best_action_sequences):
        print(f"\nPiece #{i_episode}:")
        env.reset()
        for action in action_sequence:
            observation, reward, done, info = env.step(action)
        env.render()
        print(f"Reward is {reward}.")
Пример #4
0
def roll_in(env: CounterpointEnv, actions: List[int]) -> EnvWithActions:
    """
    Do roll-in actions.

    :param env:
        environment
    :param actions:
        sequence of roll-in actions
    :return:
        environment after roll-in actions
    """
    env.reset()
    for action in actions:
        env.step(action)
    env_with_actions = EnvWithActions(env, actions)
    return env_with_actions
Пример #5
0
 def test_reward(
         self, env: CounterpointEnv, actions: List[int], expected: float
 ) -> None:
     """Test that `step` method returns proper reward."""
     for action in actions:
         observation, reward, done, info = env.step(action)
     assert done
     assert round(reward, 4) == expected
Пример #6
0
 def test_observation(
         self, env: CounterpointEnv, actions: List[int],
         expected: np.ndarray
 ) -> None:
     """Test that `step` method returns proper observation."""
     for action in actions:
         observation, reward, done, info = env.step(action)
     assert not done
     np.testing.assert_equal(observation, expected)
Пример #7
0
 def test_info(
         self, env: CounterpointEnv, actions: List[int],
         expected: np.ndarray
 ) -> None:
     """Test that `step` method returns proper info about next actions."""
     for action in actions:
         observation, reward, done, info = env.step(action)
     result = info['next_actions']
     assert result == expected
Пример #8
0
class TestCounterpointEnv:
    """Tests for `CounterpointEnv` class."""

    @pytest.mark.parametrize(
        "env, actions, expected",
        [
            (
                # `env`
                CounterpointEnv(
                    piece=Piece(
                        tonic='C',
                        scale_type='major',
                        cantus_firmus=['C4', 'D4', 'E4', 'D4', 'C4'],
                        counterpoint_specifications={
                            'start_note': 'E4',
                            'end_note': 'E4',
                            'lowest_note': 'G3',
                            'highest_note': 'G4',
                            'start_pause_in_eighths': 4,
                            'max_skip_in_degrees': 2,
                        },
                        rules={
                            'names': ['rearticulation_stability'],
                            'params': {}
                        },
                        rendering_params={}
                    ),
                    scoring_coefs={'number_of_skips': 1},
                    scoring_fn_params={
                        'number_of_skips': {'rewards': {1: 1}}
                    },
                    reward_for_dead_end=-100,
                ),
                # `actions`
                [14, 6, 8],
                # `expected`
                np.array([
                    [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                ])
            ),
        ]
    )
    def test_observation(
            self, env: CounterpointEnv, actions: List[int],
            expected: np.ndarray
    ) -> None:
        """Test that `step` method returns proper observation."""
        for action in actions:
            observation, reward, done, info = env.step(action)
        assert not done
        np.testing.assert_equal(observation, expected)

    @pytest.mark.parametrize(
        "env, actions, expected",
        [
            (
                # `env`
                CounterpointEnv(
                    piece=Piece(
                        tonic='C',
                        scale_type='major',
                        cantus_firmus=['C4', 'D4', 'E4', 'D4', 'C4'],
                        counterpoint_specifications={
                            'start_note': 'E4',
                            'end_note': 'E4',
                            'lowest_note': 'G3',
                            'highest_note': 'G4',
                            'start_pause_in_eighths': 4,
                            'max_skip_in_degrees': 2,
                        },
                        rules={
                            'names': [
                                'rhythmic_pattern_validity',
                                'rearticulation_stability',
                                'consonance_on_strong_beat',
                                'resolution_of_suspended_dissonance',
                            ],
                            'params': {}
                        },
                        rendering_params={}
                    ),
                    scoring_coefs={'number_of_skips': 1},
                    scoring_fn_params={
                        'number_of_skips': {'rewards': {1: 1}}
                    },
                    reward_for_dead_end=-100,
                ),
                # `actions`
                [13, 15],
                # `expected`
                [6, 11]
            ),
        ]
    )
    def test_info(
            self, env: CounterpointEnv, actions: List[int],
            expected: np.ndarray
    ) -> None:
        """Test that `step` method returns proper info about next actions."""
        for action in actions:
            observation, reward, done, info = env.step(action)
        result = info['next_actions']
        assert result == expected

    @pytest.mark.parametrize(
        "env, actions, expected",
        [
            (
                # `env`
                CounterpointEnv(
                    piece=Piece(
                        tonic='C',
                        scale_type='major',
                        cantus_firmus=['C4', 'D4', 'E4', 'D4', 'C4'],
                        counterpoint_specifications={
                            'start_note': 'E4',
                            'end_note': 'E4',
                            'lowest_note': 'G3',
                            'highest_note': 'G4',
                            'start_pause_in_eighths': 4,
                            'max_skip_in_degrees': 2,
                        },
                        rules={
                            'names': ['rearticulation_stability'],
                            'params': {}
                        },
                        rendering_params={}
                    ),
                    scoring_coefs={'number_of_skips': 1},
                    scoring_fn_params={
                        'number_of_skips': {'rewards': {1: 1}}
                    },
                    reward_for_dead_end=-100,
                ),
                # `actions`
                [14, 6, 8, 11, 5, 15, 9],
                # `expected`
                0
            ),
            (
                # `env`
                CounterpointEnv(
                    piece=Piece(
                        tonic='C',
                        scale_type='major',
                        cantus_firmus=['C4', 'D4', 'E4', 'D4', 'C4'],
                        counterpoint_specifications={
                            'start_note': 'E4',
                            'end_note': 'E4',
                            'lowest_note': 'G3',
                            'highest_note': 'G4',
                            'start_pause_in_eighths': 4,
                            'max_skip_in_degrees': 2,
                        },
                        rules={
                            'names': ['rearticulation_stability'],
                            'params': {}
                        },
                        rendering_params={}
                    ),
                    scoring_coefs={'number_of_skips': 1},
                    scoring_fn_params={
                        'number_of_skips': {'rewards': {4: 1}}
                    },
                    reward_for_dead_end=-100,
                ),
                # `actions`
                [14, 6, 8, 11, 5, 15, 9],
                # `expected`
                1
            ),
            (
                # `env`
                CounterpointEnv(
                    piece=Piece(
                        tonic='C',
                        scale_type='major',
                        cantus_firmus=['C4', 'C4', 'C3', 'C4', 'C4'],
                        counterpoint_specifications={
                            'start_note': 'E4',
                            'end_note': 'E4',
                            'lowest_note': 'G3',
                            'highest_note': 'G4',
                            'start_pause_in_eighths': 4,
                            'max_skip_in_degrees': 2,
                        },
                        rules={
                            'names': ['absence_of_large_intervals'],
                            'params': {
                                'absence_of_large_intervals': {
                                    'max_n_semitones': 7
                                }
                            }
                        },
                        rendering_params={}
                    ),
                    scoring_coefs={'number_of_skips': 1},
                    scoring_fn_params={
                        'number_of_skips': {'rewards': {1: 1}}
                    },
                    reward_for_dead_end=-100,
                ),
                # `actions`
                [14, 12],
                # `expected`
                -100
            ),
        ]
    )
    def test_reward(
            self, env: CounterpointEnv, actions: List[int], expected: float
    ) -> None:
        """Test that `step` method returns proper reward."""
        for action in actions:
            observation, reward, done, info = env.step(action)
        assert done
        assert round(reward, 4) == expected

    @pytest.mark.parametrize(
        "env, actions, expected",
        [
            (
                # `env`
                CounterpointEnv(
                    piece=Piece(
                        tonic='C',
                        scale_type='major',
                        cantus_firmus=['C4', 'D4', 'E4', 'D4', 'C4'],
                        counterpoint_specifications={
                            'start_note': 'E4',
                            'end_note': 'E4',
                            'lowest_note': 'G3',
                            'highest_note': 'G4',
                            'start_pause_in_eighths': 4,
                            'max_skip_in_degrees': 2,
                        },
                        rules={
                            'names': ['rearticulation_stability'],
                            'params': {}
                        },
                        rendering_params={}
                    ),
                    scoring_coefs={'number_of_skips': 1},
                    scoring_fn_params={
                        'number_of_skips': {'rewards': {1: 1}}
                    },
                    reward_for_dead_end=-100,
                ),
                # `actions`
                [14, 6, 8, 11, 5, 15],
                # `expected`
                np.array([
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                ])
            ),
        ]
    )
    def test_reset(
            self, env: CounterpointEnv, actions: List[int],
            expected: np.ndarray
    ) -> None:
        """Test `reset` method."""
        for action in actions:
            env.step(action)
        observation = env.reset()
        np.testing.assert_equal(observation, expected)
        assert env.piece.current_time_in_eighths == 8
Пример #9
0
 "env, actions, n_trials_estimation_depth, n_trials_estimation_width, "
 "n_trials_factor",
 [
     (
         # `env`
         CounterpointEnv(
             piece=Piece(tonic='C',
                         scale_type='major',
                         cantus_firmus=['C4', 'D4', 'E4', 'D4', 'C4'],
                         counterpoint_specifications={
                             'start_note': 'E4',
                             'end_note': 'E4',
                             'lowest_note': 'G3',
                             'highest_note': 'G4',
                             'start_pause_in_eighths': 4,
                             'max_skip_in_degrees': 2,
                         },
                         rules={
                             'names': ['rearticulation_stability'],
                             'params': {}
                         },
                         rendering_params={}),
             reward_for_dead_end=-100,
             scoring_coefs={'entropy': 1},
             scoring_fn_params={},
         ),
         # `actions`
         [1],
         # `n_trials_estimation_depth`
         2,
         # `n_trials_estimation_width`