def convolution(sound, start, end): x = sound.frames h = clipboard.clip numchan = max(x.ndim, h.ndim) if numchan == 1: y = ola_fftconvolve(x[start:end], h) else: y = [] if h.ndim == 1: #! H will be computed two times ! h = edit.mix_channels_auto(h, 2) if x.ndim == 1: x = edit.mix_channels_auto(x, 2) h = h.transpose() x = x.transpose() for nchan in range(2): tmp = ola_fftconvolve(x[nchan][start:end], h[nchan]) y.append(tmp) y = numpy.array(y) y = y.transpose() # normalize if y.any(): M = abs(y).max() factor = 1. / M y = y * factor l = len(sound.frames) sound.paste(0, l, y)
def monoize(sound, start, end): x = sound.frames y = edit.mix_channels_auto(sound.frames, 1) do = (replace_frames, [sound, y]) undo = (replace_frames, [sound, x]) sound.history.add(do, undo) sound.changed()
def _do_paste(self, start, end, clip): if self.is_empty(): self.frames = clip else: # FIXME: should resample clip = edit.mix_channels_auto(clip, self.numchan()) x = self.frames y = numpy.concatenate((x[:start], clip, x[end:])) self.frames = y
def _do_mix(self, start, end, clip): if self.is_empty(): self.frames = clip else: # FIXME: should resample clip = edit.mix_channels_auto(clip, self.numchan()) if start != end: length = min(end - start, len(clip)) self.frames[start:start + length] += clip[:length] else: a = self.frames b = clip sound_length = max(len(a), start + len(b)) if self.numchan() > 1: c = numpy.zeros((sound_length, self.numchan())) else: c = numpy.zeros(sound_length) c[:len(a)] = a c[start:start + len(b)] += b self.frames = c