Beispiel #1
0
def test_standardization_of_new_mfcc_extraction():
    analyser = Analyser(["rms", "mel"])
    sound = Sound("amen_5s.wav")
    standardizer = get_standardizer(sounds=[sound], analyser=analyser)

    sound.prepare_to_render(analyser=analyser)
    sound.features = []

    done = False

    while not done:
        done = done or sound.render()
        raw_features = sound.player.get_channels(analyser.analysis_features)
        standardized_features = np.array([
            standardizer.get_standardized_value(analyser.analysis_features[i],
                                                feature_value)
            for i, feature_value in enumerate(raw_features)
        ])
        sound.features.append(standardized_features)

    mel_features = np.array(sound.features)[:, 1:]  # ignore RMS
    assert -1.0 < mel_features.mean() < 1.0
    assert -1.0 < mel_features.std() < 1.0
    assert mel_features.max() <= 4.0
    assert mel_features.min() >= -4.0
Beispiel #2
0
def test_dry_and_wet_are_not_the_same():
    amen = "amen_trim.wav"
    feature_extractors = ["rms"]
    analyser = Analyser(feature_extractors)
    analysis_channels = analyser.analysis_features

    for effect_name in [
            "bandpass",
            "formant",
            "dist_lpf",
            "freeverb",
            "distortion",
            "gain",
    ]:
        effect = Effect(effect_name)
        dry = Sound(amen)
        dry.prepare_to_render(analyser=analyser)
        wet = Sound(amen)
        wet.prepare_to_render(effect=effect, analyser=analyser)

        for i in range(10):
            dry.render()
            wet.render()
            dry_chans = dry.player.get_channels(analysis_channels)
            wet_chans = wet.player.get_channels(analysis_channels)
            assert (dry_chans != wet_chans
                    ), f"Dry and wet should not be equal for {effect_name}"
Beispiel #3
0
def test_osc_route_is_included():
    features = ["rms", "spectral"]
    osc_route = "/test/test/test"
    analyser = Analyser(features, osc_route=osc_route)
    assert "OSCsend" in analyser.analyser_csd
    dfff = f"\"d{'f' * len(analyser.analysis_features)}\""
    assert (f'{dfff}, ktimestamp, {", ".join(analyser.analysis_features)}'
            in analyser.analyser_csd)
    assert osc_route in analyser.analyser_csd
Beispiel #4
0
def test_feature_extractors_output_something():
    feature_extractors = ["pitch", "spectral", "mfcc"]
    audio_to_analyse = "aSig"
    for fe in feature_extractors:
        # the other extractors depend on RMS for now
        analyser = Analyser(["rms", fe], audio_to_analyse=audio_to_analyse)
        analysis_features = analyser.analysis_features
        ksmps = 64
        orc = f"""
        sr=44100
        ksmps={ksmps}
        nchnls=1
        0dbfs=1

        gifftsize = {ksmps * 2}

        instr 1
        {audio_to_analyse} poscil 1.0, 220
        out {audio_to_analyse}
        {analyser.analyser_csd}
        endin
        """

        sco = """
        i1 0 3
        """

        cs = ctcsound.Csound()
        cs.setOption("--nosound")

        cs.compileOrc(orc)
        cs.readScore(sco)

        cs.start()
        features = []

        while cs.performBuffer() == 0:
            features.append([
                cs.controlChannel(feature)[0] for feature in analysis_features
            ])
        features = np.array(features)
        for i in range(len(analysis_features)):
            assert features[:, i].mean() > 0.0
        cs.cleanup()
        cs.reset()
        del cs
Beispiel #5
0
def test_two_dry_signals_yield_the_same_features():
    amen = "amen_trim.wav"
    feature_extractors = ["rms"]
    analyser = Analyser(feature_extractors)
    analysis_channels = analyser.analysis_features
    dry1 = Sound(amen)
    dry1.prepare_to_render(analyser=analyser)
    dry2 = Sound(amen)
    dry2.prepare_to_render(analyser=analyser)

    for i in range(100):
        dry1.render()
        dry2.render()
        dry1_chans = dry1.player.get_channels(analysis_channels)
        dry2_chans = dry2.player.get_channels(analysis_channels)
        assert (dry1_chans == dry2_chans
                ), "Two dry signals should yield the same features"
Beispiel #6
0
def test_new_mel():
    feature_extractors = ["rms", "mel"]
    analyser = Analyser(feature_extractors)
    sound = Sound("amen.wav")
    sound.prepare_to_render(analyser=analyser)

    feature_matrix = []
    done = False
    while not done:
        done = sound.render()
        feature_matrix.append(
            sound.player.get_channels(analyser.analysis_features))
    feature_matrix = np.array(feature_matrix[1:])  # only mfcc bins

    for i, feature in enumerate(analyser.analysis_features[1:]):  # skip RMS
        assert feature_matrix[:, i - 1].mean() != 0
        assert feature_matrix[:, i - 1].std() != 0
Beispiel #7
0
def test_mediator_receives_values_from_musician():
    dur_s = get_duration(INPUT_SOURCE)
    analyser = Analyser(["rms", "mfcc"], osc_route=OSC_SOURCE_FEATURES_ROUTE)
    mediator = Mediator(run=False)
    effect = Effect("bandpass")
    musician = Musician(
        effect=effect,
        analyser=analyser,
        input_source=INPUT_SOURCE,
        output_source=NO_SOUND,
        duration=dur_s,
    )
    mediator.run()
    musician.start()
    mediator.terminate()

    source_features = mediator.get_source_features(blocking=False)
    assert np.array(source_features).mean() > 0
    assert (np.array(source_features).size == len(analyser.analysis_features) +
            1)  # account for timestamp
Beispiel #8
0
def test_spectral_extractor_updates_new_values_every_frame_except_the_first():
    feature_extractors = ["rms", "spectral"]
    analyser = Analyser(feature_extractors)
    sound = Sound("amen.wav")
    sound.prepare_to_render(analyser=analyser)

    feature_matrix = []
    N = 10
    for i in range(N):
        sound.render()
        feature_matrix.append(
            sound.player.get_channels(analyser.analysis_features))
    feature_matrix = np.array(feature_matrix)
    rms = feature_matrix[:, 0]
    spread = feature_matrix[:, 2]
    flatness = feature_matrix[:, 3]

    # RMS values should update every k
    assert len(set(rms)) == len(rms)

    # spread and flatness update every k except the first
    assert len(set(spread)) == N - 1
    assert len(set(flatness)) == N - 1
Beispiel #9
0
def main():
    args = arguments()
    config = parse_config_file(args.config_file)

    # NOTE: practical constants to use while developing
    BLACKHOLE = "dac2"

    if args.is_target:
        osc_route = OSC_TARGET_FEATURES_ROUTE
        # NOTE: temporary hack to loop the target sound
        input_source = "amen_loop.wav"
        effect = None
        output_source = DAC
    else:
        osc_route = OSC_SOURCE_FEATURES_ROUTE
        # NOTE: temporary hack to loop the source sound
        input_source = "noise_loop.wav"
        effect = Effect(config["env"]["effect"])
        output_source = BLACKHOLE

    if args.live_mode:
        input_source = LIVE

    if args.output:
        output_source = args.output

    analyser = Analyser(config["env"]["feature_extractors"],
                        osc_route=osc_route)
    musician = Musician(
        analyser,
        effect=effect,
        input_source=input_source,
        output_source=output_source,
        duration=1000,
        is_target=args.is_target,
    )
    musician.start()
Beispiel #10
0
            if std == 0.0:
                standardized_reward = reward - mean
            else:
                standardized_reward = (reward - mean) / std
        else:
            # We don't have enough reward samples to standardize
            standardized_reward = reward

        # append the new reward after mean and std calculation
        self.reward_norm_batch.append(reward)
        return standardized_reward


if __name__ == "__main__":
    sounds = [Sound("noise_5s.wav"), Sound("drums_5s.wav")]
    a = Analyser(["rms", "pitch", "spectral"])
    s = Standardizer(sounds, a)
    print(s.stats)

    # new_sound = Sound("drums_7s.wav")
    # features = np.empty(shape=(len(a.analysis_features),))
    # new_sound.prepare_to_render(analyser=a)
    # done = False
    # while not done:
    #     done = new_sound.render()
    #     frame_features = np.array(new_sound.player.get_channels(a.analysis_features))
    #     if (frame_features > 1.0).any() or (frame_features < 0.0).any():
    #         # NOTE: hacky way of filtering out outliars since Csound params are supposed to be limited (?)
    #         # TODO: log how often this happens and try to debug it
    #         continue
    #     else:
Beispiel #11
0
def test_analyser_initialisation():
    features = ["rms", "pitch"]
    analyser = Analyser(features)
    for feature in features:
        assert f"START {feature}" in analyser.analyser_csd
        assert f"END {feature}" in analyser.analyser_csd
Beispiel #12
0
def test_globals_are_included():
    features = ["spectral"]
    analyser = Analyser(features)
    global_variables = ["giFftTabSize", "gifna", "gifnf"]
    for gv in global_variables:
        assert gv in analyser.analyser_csd
Beispiel #13
0
def test_all_extractors_use_same_audio_input():
    features = ["rms", "pitch"]
    analyser = Analyser(features)
    assert analyser.analyser_csd.count("aAnalyserInput") == len(features) + 1
Beispiel #14
0
def test_analyser_raises_error_on_unknown_extractor():
    features = ["unkwown_extractor"]
    with pytest.raises(ValueError):
        Analyser(features)
Beispiel #15
0
def get_standardizer(sounds=[Sound("noise.wav")],
                     analyser=Analyser(["rms"]),
                     reward_norm_batch_size=100):
    return Standardizer(sounds,
                        analyser,
                        reward_norm_batch_size=reward_norm_batch_size)
Beispiel #16
0
    def __init__(self, config=CROSS_ADAPTIVE_DEFAULT_CONFIG):
        self._reset_internal_state()

        self.source_input = config["source"]
        self.target_inputs = config["targets"]
        assert type(self.target_inputs) is list, "Targets should be provided as a list"
        self.target_index = 0
        self.effect = Effect(config["effect"])
        self.metric = metric_from_name(config["metric"])
        self.feature_extractors = config["feature_extractors"]
        self.render_to_dac = config["render_to_dac"]
        self.debug = config["debug"]
        self.standardize_rewards = config[
            "standardize_rewards"
        ]  # NOTE: experimental feature

        # how often the model should evaluate
        self.eval_interval = config["eval_interval"]
        if self.eval_interval is not None:
            self.step_index = 0

        # analyzer
        if not len(self.feature_extractors) > 0:
            raise ValueError(
                "The environment doesn't work without any feature extractors"
            )
        self.analyser = Analyser(self.feature_extractors)

        # standardizer
        self.standardizer = Standardizer(
            [
                Sound(sound_input)
                for sound_input in [self.source_input, *self.target_inputs]
            ],
            self.analyser,
            reward_norm_batch_size=100 if self.standardize_rewards else None,
        )

        # an observation = analysis of one source frame + one target frame
        self.observation_space = gym.spaces.Box(
            low=-DEVIATION_LIMIT,
            high=DEVIATION_LIMIT,
            shape=(len(self.analyser.analysis_features) * 2,),
        )

        # an action = a combination of effect parameters
        self.action_space = gym.spaces.Box(
            low=-1.0, high=1.0, shape=(len(self.effect.parameters),)
        )

        # initialize sound source
        self.source_dry = Sound(self.source_input)
        self.source_wet = Sound(self.source_input)
        self.target = Sound(self.target_inputs[self.target_index])

        self.source_dry.prepare_to_render(effect=None, analyser=self.analyser)
        self.source_wet.prepare_to_render(effect=self.effect, analyser=self.analyser)
        self.target.prepare_to_render(effect=None, analyser=self.analyser)

        self.source_dry_features = np.zeros(shape=len(self.analyser.analysis_features))
        self.source_wet_features = np.zeros(shape=len(self.analyser.analysis_features))
        self.target_features = np.zeros(shape=len(self.analyser.analysis_features))
        self.should_delay_source_wet_one_frame = True
Beispiel #17
0
 def __init__(self):
     self.analyser = Analyser(["rms", "pitch", "spectral"])
     self.effect = Effect("dist_lpf")
     self.sound = Sound("noise.wav")
     self.sound.prepare_to_render(self.effect, self.analyser)
Beispiel #18
0
def test_output_channels_exist():
    features = ["rms", "pitch", "spectral"]
    analyser = Analyser(features)
    for extractor in analyser.feature_extractors:
        for channel in extractor["channels"]:
            assert f'"{channel}"' in analyser.analyser_csd