def test_source_wet_is_delayed_by_one_k(): env = CrossAdaptiveEnv() action = env.action_space.sample() N = 2 for _ in range(N): env.step(action) assert env.source_dry.player.k == N assert env.target.player.k == N assert env.source_wet.player.k == N - 1
def test_non_debug_mode_does_not_define_debug_channels(): config = { **CROSS_ADAPTIVE_DEFAULT_CONFIG, "debug": False, } env = CrossAdaptiveEnv(config) debug_channels = list( map(lambda param: f"{param.name}{DEBUG_SUFFIX}", env.effect.parameters)) action = env.action_space.sample() env.step(action) source = env.render() for ch in debug_channels: assert f'chn_k "{ch}"' not in source.csd
def test_debug_mode_renders_channels_to_debug_wave_file(): config = { **CROSS_ADAPTIVE_DEFAULT_CONFIG, "debug": True, } env = CrossAdaptiveEnv(config) debug_channels = list( map(lambda param: f"{param.name}{DEBUG_SUFFIX}", env.effect.parameters)) action = env.action_space.sample() env.step(action) source = env.render() assert "fout" in source.csd for ch in debug_channels: assert f"upsamp(k_{ch})" in source.csd
def test_debug_mode_sets_debug_channels(): config = { **CROSS_ADAPTIVE_DEFAULT_CONFIG, "debug": True, } env = CrossAdaptiveEnv(config) debug_channels = list( map(lambda param: f"{param.name}{DEBUG_SUFFIX}", env.effect.parameters)) action = env.action_space.sample() env.step(action) source = env.render() debug_values = source.player.get_channels(debug_channels) for v in debug_values: assert env.action_space.low[0] < v < env.action_space.high[1]
def test_source_wet_wraps_correctly_at_the_end_of_the_sound(): config = { **CROSS_ADAPTIVE_DEFAULT_CONFIG, "eval_interval": None, # episode is done at the end of the source } env = CrossAdaptiveEnv(config) action = env.action_space.sample() assert env.should_delay_source_wet_one_frame is True done = False while not done: _, _, done, _ = env.step(action) assert env.source_dry.player.k == 0 assert env.source_wet.player.k > 0 assert env.should_delay_source_wet_one_frame is False _, _, done, _ = env.step(action) assert env.source_wet.player.k == 0 assert env.source_dry.player.k == 1
def test_source_wet_is_equal_to_previous_source_dry_when_effect_is_thru(): config = {**CROSS_ADAPTIVE_DEFAULT_CONFIG, "effect": "thru"} env = CrossAdaptiveEnv(config) env.step(env.action_space.sample()) source_dry_features_after_first_step = env.source_dry_features.copy() source_wet_features_after_first_step = env.source_wet_features.copy() assert np.array_equal( source_wet_features_after_first_step, np.zeros(shape=len(env.analyser.analysis_features)), ) env.step(env.action_space.sample()) source_dry_features_after_second_step = env.source_dry_features.copy() source_wet_features_after_second_step = env.source_wet_features.copy() assert np.array_equal(source_wet_features_after_second_step, source_dry_features_after_first_step) assert not np.array_equal(source_dry_features_after_second_step, source_dry_features_after_first_step)
def run_offline_inference(agent: Trainer, env: CrossAdaptiveEnv): # NOTE: something is wrong here. For some reason, all the action values are too close to the bound done = False obs = env.reset() while not done: action = agent.compute_action(obs) # TODO: standardize action # it might be difficult to standardize the action in live mode, but offline inference essentially work obs, _, done, _ = env.step(action)
def test_multiple_targets_are_allowed(): targets = ["amen_5s.wav", "drums_5s.wav"] config = { **CROSS_ADAPTIVE_DEFAULT_CONFIG.copy(), "targets": targets, "eval_interval": None, } env = CrossAdaptiveEnv(config) assert Path(env.target.input).name == targets[0] done = False while not done: action = env.action_space.sample() _, _, done, _ = env.step(action) assert Path(env.target.input).name == targets[1] done = False while not done: action = env.action_space.sample() _, _, done, _ = env.step(action) assert Path(env.target.input).name == targets[0]
def test_env_inits_and_makes_first_step_correctly(): env = CrossAdaptiveEnv() empty_features = np.zeros(shape=len(env.analyser.analysis_features)) initial_state = env.get_state() assert np.array_equal(initial_state, np.concatenate((empty_features, empty_features))) action = env.action_space.sample() state, reward, done, _ = env.step(action) assert done is True assert reward == 0.0 assert not np.array_equal(state, initial_state) assert np.abs(state.mean()) > 0.0