def model_predict(model, input_track, sample_size, trans_fw=None, trans_bw=None): dim = sample_size n_batches = int(len(input_track) / dim) - 1 pred_batches = input_track[0:n_batches * dim].reshape((-1, dim)) pred_batches_shifted = input_track[dim // 2:n_batches * dim + dim // 2].reshape((-1, dim)) print("FFT transforming...") if trans_fw is not None: pred_batches, pred_batches_shifted = map(lambda x: parallel_apply_along_axis(trans_fw, 0, x), (pred_batches, pred_batches_shifted)) if trans_bw is None: trans_bw = lambda x: x xfp = x_fade_profile(dim) print("Predicting and transforming...") pred_batches, pred_batches_shifted = map(lambda x: parallel_apply_along_axis(trans_bw, 0, model.predict(x)), (pred_batches, pred_batches_shifted)) print("Applying x-fade and mixing...") x0 = np.array([xfp * batch for batch in pred_batches]).reshape(-1) x1 = np.array([xfp * batch for batch in pred_batches_shifted]).reshape(-1) return mix_at(x0, x1, dim // 2)
def test_allpass_comb_reverb(lead): allpass = digital_filters.CombAllPassReverb(np.linspace(0.9, 0.95, 10), np.linspace(500, 3000, 10)) fx = allpass.apply(lead) wet = signalproc.mix_at(lead, fx, 0) play_array(fx, norm=1) pass
def test_iir_filter(lead): iir = digital_filters.IirFilter([-0.8], [8000], [0.8], [8000]) fx = iir.apply(lead) wet = signalproc.mix_at(lead, -fx, 0) play_array(fx, norm=1) pass
def apply(self, x): result = np.array(x) for gc, delay in zip(self.gain_coefs, self.delays): x_ = x * gc result = signalproc.mix_at(result, x_, delay) return result
def mixdown(audio_tracks): y = copy(audio_tracks[0]) for t in audio_tracks[1:]: y = mix_at(y, t, at=0) # Normalize amp y = y - np.mean(y) y /= 1.25 * (np.percentile(y, 95) - np.percentile(y, 5)) return y
def model_predict(model, input_track, fragment_length): dim = fragment_length n_batches = int(len(input_track) / dim) - 1 pred_batches = input_track[0:n_batches * dim].reshape((-1, dim)) pred_batches_shifted = input_track[dim // 2:n_batches * dim + dim // 2].reshape((-1, dim)) xfp = x_fade_profile(dim) x0 = np.array([xfp * batch for batch in model.predict(pred_batches)]).reshape(-1) x1 = np.array([xfp * batch for batch in model.predict(pred_batches_shifted)]).reshape(-1) return mix_at(x0, x1, dim // 2)
def apply(self, x): result = np.array(x) for gc, delay in zip(self.gain_coefs_fwd, self.delays_fwd): x_ = x * gc result = signalproc.mix_at(result, x_, delay) for i in range(len(result)): for gc, delay in zip(self.gain_coefs_bwd, self.delays_bwd): delayed_i = i - delay if delayed_i >= 0: result[i] = result[i] + gc * result[delayed_i] return result
def generate_track(self, track: Track): positioned_notes = np.array(track.generate_notes()) max_duration = get_max_duration(positioned_notes) pcm = self.generator.sampling_info.generate_silence( max_duration.seconds) for note in positioned_notes: assert isinstance(note, PositionedNote) y = self.generate_note(note.note, note.duration, self.velocity * note.velocity) # Note: variable pcm is updated in max_at method pcm = mix_at( pcm, y, note.offset.samples(self.generator.sampling_info.sample_rate)) return pcm
def predict(self, input_track): dim = self.fragment_length n_batches = int(len(input_track) / dim) - 1 pred_batches = input_track[0:n_batches * dim].reshape((-1, dim)) pred_batches_shifted = input_track[dim // 2:n_batches * dim + dim // 2].reshape((-1, dim)) xfp = self.x_fade_profile(dim) x0 = np.array([ xfp * batch for batch in self.model.predict(pred_batches) ]).reshape(-1) x1 = np.array([ xfp * batch for batch in self.model.predict(pred_batches_shifted) ]).reshape(-1) return signalproc.mix_at(x0, x1, dim // 2)