def prepare_to_render( self, effect: Effect = None, analyser: Analyser = None, add_debug_channels=False, receive_mapping_over_osc=False, ): """ Prepares the Sound to be rendered by compiling the CSD templates. Args: effect: which Effect to apply, potentially None if no effect is desired analyser: an Analyser object, potentially None if the Sound doesn't need to be analysed add_debug_channels: if True, renders a multichannel audio file with control channels upsampled to audio rate receive_mapping_over_osc: if True, receives mapping over OSC instead of using channels """ effect_csd = effect.to_csd() if effect is not None else None base = TemplateHandler(EFFECT_BASE, template_dir=EFFECT_TEMPLATE_DIR) channels = effect.get_csd_channels() if effect is not None else [] save_to_path = os.path.join( CSD_DIR, f"{timestamp()}_{self.save_to}.csd", ) save_to_debug_path = os.path.join( AUDIO_OUTPUT_DIR, f"{timestamp()}_{self.save_to}{DEBUG_SUFFIX}.wav", ) self.csd = base.compile( input=f"-i{self.input}", output=f"-o{self.output}" if self.output != NO_SOUND else self.output, channels=channels, sample_rate=SAMPLE_RATE, ksmps=KSMPS, flags=self.flags, effect=effect_csd, analyser=analyser.analyser_csd if analyser is not None else "", duration=self.duration, add_debug_channels=add_debug_channels, debug_file_name=save_to_debug_path, receive_mapping_over_osc=receive_mapping_over_osc, osc_mapping_port=OSC_MAPPING_PORT, osc_mapping_route=OSC_MAPPING_ROUTE, debug_suffix=DEBUG_SUFFIX, ) base.save_to_file(save_to_path) return save_to_path
def test_dry_and_wet_are_not_the_same(): amen = "amen_trim.wav" feature_extractors = ["rms"] analyser = Analyser(feature_extractors) analysis_channels = analyser.analysis_features for effect_name in [ "bandpass", "formant", "dist_lpf", "freeverb", "distortion", "gain", ]: effect = Effect(effect_name) dry = Sound(amen) dry.prepare_to_render(analyser=analyser) wet = Sound(amen) wet.prepare_to_render(effect=effect, analyser=analyser) for i in range(10): dry.render() wet.render() dry_chans = dry.player.get_channels(analysis_channels) wet_chans = wet.player.get_channels(analysis_channels) assert (dry_chans != wet_chans ), f"Dry and wet should not be equal for {effect_name}"
class EffectAndAnalysis(AbstractTimeable): def __init__(self): self.analyser = Analyser(["rms", "pitch", "spectral"]) self.effect = Effect("dist_lpf") self.sound = Sound("noise.wav") self.sound.prepare_to_render(self.effect, self.analyser) def call(self): mapping = self.effect.random_mapping() self.sound.render(mapping) return self.sound.player.get_channels(self.analyser.analysis_features)
def test_effect_initialisation(): effect = Effect("bandpass") assert effect.parameters[0].name == "cutoff_freq" assert effect.parameters[0].mapping.min_value == 50.0 assert effect.parameters[0].mapping.max_value == 5000.0 assert effect.parameters[0].mapping.skew_factor == 0.3 assert effect.parameters[1].name == "bandwidth" assert effect.parameters[1].mapping.min_value == 0.01 assert effect.parameters[1].mapping.max_value == 1.0 assert effect.parameters[1].mapping.skew_factor == 1.0
def test_mediator_receives_values_from_musician(): dur_s = get_duration(INPUT_SOURCE) analyser = Analyser(["rms", "mfcc"], osc_route=OSC_SOURCE_FEATURES_ROUTE) mediator = Mediator(run=False) effect = Effect("bandpass") musician = Musician( effect=effect, analyser=analyser, input_source=INPUT_SOURCE, output_source=NO_SOUND, duration=dur_s, ) mediator.run() musician.start() mediator.terminate() source_features = mediator.get_source_features(blocking=False) assert np.array(source_features).mean() > 0 assert (np.array(source_features).size == len(analyser.analysis_features) + 1) # account for timestamp
def main(): args = arguments() config = parse_config_file(args.config_file) # NOTE: practical constants to use while developing BLACKHOLE = "dac2" if args.is_target: osc_route = OSC_TARGET_FEATURES_ROUTE # NOTE: temporary hack to loop the target sound input_source = "amen_loop.wav" effect = None output_source = DAC else: osc_route = OSC_SOURCE_FEATURES_ROUTE # NOTE: temporary hack to loop the source sound input_source = "noise_loop.wav" effect = Effect(config["env"]["effect"]) output_source = BLACKHOLE if args.live_mode: input_source = LIVE if args.output: output_source = args.output analyser = Analyser(config["env"]["feature_extractors"], osc_route=osc_route) musician = Musician( analyser, effect=effect, input_source=input_source, output_source=output_source, duration=1000, is_target=args.is_target, ) musician.start()
def __init__(self): self.analyser = Analyser(["rms", "pitch", "spectral"]) self.effect = Effect("dist_lpf") self.sound = Sound("noise.wav") self.sound.prepare_to_render(self.effect, self.analyser)
def test_formant_have_the_right_channels(): effect = Effect("formant") channels = effect.get_csd_channels() channel_names = [channel.name for channel in channels] assert channel_names == ["freq"]
def test_gain_have_the_right_csd_channels(): effect = Effect("gain") channels = effect.get_csd_channels() channel_names = [channel.name for channel in channels] assert channel_names == ["gain"]
def test_bandpass_have_the_right_csd_channels(): effect = Effect("bandpass") channels = effect.get_csd_channels() channel_names = [channel.name for channel in channels] assert channel_names == ["cutoff_freq", "bandwidth"]
def __init__(self, config=CROSS_ADAPTIVE_DEFAULT_CONFIG): self._reset_internal_state() self.source_input = config["source"] self.target_inputs = config["targets"] assert type(self.target_inputs) is list, "Targets should be provided as a list" self.target_index = 0 self.effect = Effect(config["effect"]) self.metric = metric_from_name(config["metric"]) self.feature_extractors = config["feature_extractors"] self.render_to_dac = config["render_to_dac"] self.debug = config["debug"] self.standardize_rewards = config[ "standardize_rewards" ] # NOTE: experimental feature # how often the model should evaluate self.eval_interval = config["eval_interval"] if self.eval_interval is not None: self.step_index = 0 # analyzer if not len(self.feature_extractors) > 0: raise ValueError( "The environment doesn't work without any feature extractors" ) self.analyser = Analyser(self.feature_extractors) # standardizer self.standardizer = Standardizer( [ Sound(sound_input) for sound_input in [self.source_input, *self.target_inputs] ], self.analyser, reward_norm_batch_size=100 if self.standardize_rewards else None, ) # an observation = analysis of one source frame + one target frame self.observation_space = gym.spaces.Box( low=-DEVIATION_LIMIT, high=DEVIATION_LIMIT, shape=(len(self.analyser.analysis_features) * 2,), ) # an action = a combination of effect parameters self.action_space = gym.spaces.Box( low=-1.0, high=1.0, shape=(len(self.effect.parameters),) ) # initialize sound source self.source_dry = Sound(self.source_input) self.source_wet = Sound(self.source_input) self.target = Sound(self.target_inputs[self.target_index]) self.source_dry.prepare_to_render(effect=None, analyser=self.analyser) self.source_wet.prepare_to_render(effect=self.effect, analyser=self.analyser) self.target.prepare_to_render(effect=None, analyser=self.analyser) self.source_dry_features = np.zeros(shape=len(self.analyser.analysis_features)) self.source_wet_features = np.zeros(shape=len(self.analyser.analysis_features)) self.target_features = np.zeros(shape=len(self.analyser.analysis_features)) self.should_delay_source_wet_one_frame = True