def render(self, chunk_size=None): gain = getattr(self.track, 'gain', None) if chunk_size is None: # self has start and duration, so it is a valid index into track. output = self.track[self].data # Normalize volume if necessary if gain is not None: # limit expects a float32 vector output = limit(multiply(output, float32(gain))) yield output else: if isinstance(self.start, float): start = int(self.start * 44100) end = int((self.start + self.duration) * 44100) else: start, end = self.start, self.end for i in xrange(start, end, chunk_size): if gain is not None: yield limit(multiply( self.track[i:min(end, i + chunk_size)].data, float32(gain) )).astype(numpy.int16) else: yield self.track[i:min(end, i + chunk_size)].data
def g(self, d, gain, rate): s = 44100 if gain is not None: return limit(multiply(dirac.timeScale(d, rate, s, self.quality), float32(gain))) else: return dirac.timeScale(d, rate, s, self.quality)
def render(self): # self has start and duration, so it is a valid index into track. output = self.track[self] # Normalize volume if necessary gain = getattr(self.track, 'gain', None) if gain != None: # limit expects a float32 vector output.data = limit(multiply(output.data, float32(gain))) return output
def stretch(self, t, l): """t is a track, l is a list""" signal_start = int(l[0][0] * t.sampleRate) signal_duration = int((sum(l[-1]) - l[0][0]) * t.sampleRate) vecin = t.data[signal_start:signal_start + signal_duration,:] rates = [] for i in xrange(len(l)): rates.append((int(l[i][0] * t.sampleRate) - signal_start, self.durations[i] / l[i][1])) vecout = dirac.timeScale(vecin, rates, t.sampleRate, 0) if hasattr(t, 'gain'): vecout = limit(multiply(vecout, float32(t.gain))) return AudioData(ndarray=vecout, shape=vecout.shape, sampleRate=t.sampleRate, numChannels=vecout.shape[1])
def stretch(self, t, l): """t is a track, l is a list""" signal_start = int(l[0][0] * t.sampleRate) signal_duration = int((sum(l[-1]) - l[0][0]) * t.sampleRate) vecin = t.data[signal_start:signal_start + signal_duration,:] rates = [] for i in xrange(len(l)): rate = (int(l[i][0] * t.sampleRate) - signal_start, self.durations[i] / l[i][1]) rates.append(rate) vecout = dirac.timeScale(list(vecin), rates, t.sampleRate, 0) if hasattr(t, 'gain'): vecout = limit(multiply(vecout, float32(t.gain))) audio_out = AudioData(ndarray=vecout, shape=vecout.shape, sampleRate=t.sampleRate, numChannels=vecout.shape[1]) return audio_out