def _make_ydata(chunk, step): ydata = [] for i in range(0, len(chunk), step): piece = audio.chr2num(chunk[i:i+step]) mi, ma = min(piece), max(piece) y = max(abs(mi), abs(ma)) ydata.append(y) return ydata
def c2i(data): if type(data) <> type('') or len(data) <> 4: raise error, 'c2i: bad arg (not string[4])' bytes = audio.chr2num(data) for i in (1, 2, 3): if bytes[i] < 0: bytes[i] = bytes[i] + 256 return ((bytes[0]*256 + bytes[1])*256 + bytes[2])*256 + bytes[3]
def softclip(s): if '\177' not in s and '\200' not in s: return s num = audio.chr2num(s) extremes = (-128, 127) for i in range(1, len(num)-1): if num[i] in extremes: num[i] = (num[i-1] + num[i+1]) / 2 return audio.num2chr(num)
def unbias(s): if not s: return s a = audio.chr2num(s) sum = 0 for i in a: sum = sum + i bias = (sum + len(a)/2) / len(a) print 'Bias value:', bias if bias: for i in range(len(a)): a[i] = a[i] - bias s = audio.num2chr(a) return s
def timer(self): if self.sampling: chunk = audio.wait_recording() self.sampling = 0 nums = audio.chr2num(chunk) ampl = max(abs(min(nums)), abs(max(nums))) self.append(ampl) if self.enabled and not self.sampling: audio.setrate(self.rate) size = Rates[self.rate]/10 size = size/48*48 audio.start_recording(size) self.sampling = 1 if self.sampling: self.parent.settimer(1)
def stretch(s, a, b): y = audio.chr2num(s) m = len(y) out = [] n = m * b / a # i, j will walk through y and out (step 1) # ib, ja are i*b, j*a and are kept as close together as possible i, ib = 0, 0 j, ja = 0, 0 for j in range(n): ja = ja+a while ib < ja: i = i+1 ib = ib+b if i >= m: break if ib = ja: out.append(y[i]) else: out.append((y[i]*(ja-(ib-b)) + y[i-1]*(ib-ja)) / b)