def mul_stereo(fileName, width, lfactor, rfactor): lsample = audioop.tomono(fileName, width, 1, 0) rsample = audioop.tomono(fileName, width, 0, 1) lsample = audioop.mul(lsample, width, lfactor) rsample = audioop.mul(rsample, width, rfactor) lsample = audioop.tostereo(lsample, width, 1, 0) rsample = audioop.tostereo(rsample, width, 0, 1) return audioop.add(lsample, rsample, width)
def mul_stereo(fileName,width,lfactor,rfactor): lsample = audioop.tomono(fileName, width, 1, 0) rsample = audioop.tomono(fileName,width, 0, 1) lsample = audioop.mul(lsample,width,lfactor) rsample = audioop.mul(rsample, width,rfactor) lsample = audioop.tostereo(lsample, width, 1, 0) rsample = audioop.tostereo(rsample, width, 0, 1) return audioop.add(lsample,rsample,width)
def test_tostereo(self): for w in 1, 2, 4: data1 = datas[w] data2 = bytearray(2 * len(data1)) for k in range(w): data2[k::2*w] = data1[k::w] self.assertEqual(audioop.tostereo(data1, w, 1, 0), data2) self.assertEqual(audioop.tostereo(data1, w, 0, 0), b'\0' * len(data2)) for k in range(w): data2[k+w::2*w] = data1[k::w] self.assertEqual(audioop.tostereo(data1, w, 1, 1), data2)
def pan(slice, pan_pos=0.5, amp=1.0): amps = pantamp(pan_pos) lslice = audioop.tomono(slice, audio_params[1], 1, 0) lslice = audioop.tostereo(lslice, audio_params[1], amps[0], 0) rslice = audioop.tomono(slice, audio_params[1], 0, 1) rslice = audioop.tostereo(rslice, audio_params[1], 0, amps[1]) slice = audioop.add(lslice, rslice, audio_params[1]) return audioop.mul(slice, audio_params[1], amp)
def _convert_data(self, data: bytes, to_depth: int, to_channels: int, to_rate: int, to_unsigned: bool = False) -> bytes: """Convert audio data.""" out_width = to_depth // 8 if self._from_float: ldata = audioop.tomono(data, self._width, 1, 0) rdata = audioop.tomono(data, self._width, 0, 1) for mono_data in [ldata, rdata]: float_array = array('f', mono_data) out_array = array('i' if self._out_depth > 16 else 'h') for i in float_array: if i > 1.0: i = 1.0 elif i < -1.0: i = -1.0 out_array.append(round(i * 32767.0)) mono_data = out_array.tobytes() ldata = audioop.tostereo(ldata, self._width, 1, 0) rdata = audioop.tostereo(rdata, self._width, 0, 1) data = audioop.add(ldata, rdata, self._width) if self._to_alaw: data = audioop.lin2alaw(data, self._width) if self._depth != to_depth: data = audioop.lin2lin( data, self._width, out_width ) if self._unsigned != to_unsigned: data = audioop.bias(data, out_width, 128) # Make it stereo if self._channels < to_channels: data = audioop.tostereo(data, out_width, 1, 1) # Make it mono elif self._channels > to_channels: data = audioop.tomono(data, out_width, 1, 1) # Convert the sample rate of the data to the requested rate. if self._rate != to_rate and data: data, self._state = audioop.ratecv( data, out_width, to_channels, self._rate, to_rate, self._state, ) return data
def test_tostereo(self): for w in 1, 2, 4: data1 = datas[w] data2 = bytearray(2 * len(data1)) for k in range(w): data2[k :: 2 * w] = data1[k::w] self.assertEqual(audioop.tostereo(data1, w, 1, 0), data2) self.assertEqual(audioop.tostereo(data1, w, 0, 0), b"\0" * len(data2)) for k in range(w): data2[k + w :: 2 * w] = data1[k::w] self.assertEqual(audioop.tostereo(data1, w, 1, 1), data2)
def testtostereo(data): data2 = '' for d in data[0]: data2 = data2 + d + d if audioop.tostereo(data[0], 1, 1, 1) <> data2: return 0 return 1
def raw_read(self): """Return some amount of data as a raw audio string""" buf = self.source.raw_read() if buf is None: self.eof = True return None # Convert channels as needed if self.set_channels and self.source.channels() != self.set_channels: if self.set_channels == 1: buf = audioop.tomono(buf, self.source.raw_width(), .5, .5) else: buf = audioop.tostereo(buf, self.source.raw_width(), 1, 1) # Convert sampling rate as needed if self.set_sampling_rate and self.source.sampling_rate() != self.set_sampling_rate: (buf, self.ratecv_state) = audioop.ratecv(buf, self.source.raw_width(), self.channels(), self.source.sampling_rate(), self.set_sampling_rate, self.ratecv_state) if self.set_raw_width and self.source.raw_width() != self.set_raw_width: if self.source.raw_width() == 1 and self.source.has_unsigned_singles(): buf = audioop.bias(buf, 1, -128) buf = audioop.lin2lin(buf, self.source.raw_width(), self.set_raw_width) if self.set_raw_width == 1 and self.source.has_unsigned_singles(): buf = audioop.bias(buf, 1, 128) return buf
def to44KStereo(self, buffer): try: b = audioop.tostereo(buffer.data, 2, 1, 1) b, self.fromstate = audioop.ratecv(b, 2, 2, 8000, 44100, self.fromstate) except audioop.error: return '' return b
def coerce_lin(source_aiff, template_obj): '''Read data from source, and convert it to match template's params.''' import audioop frag = source_aiff.read_lin() Ss = source_aiff.stream St = template_obj.stream # Sample width if Ss.getsampwidth() != St.getsampwidth(): print 'coerce sampwidth %i -> %i' %(Ss.getsampwidth(), St.getsampwidth()) frag = audioop.lin2lin(frag, Ss.getsampwidth(), St.getsampwidth()) width = St.getsampwidth() # Channels if Ss.getnchannels() != St.getnchannels(): print 'coerce nchannels %i -> %i' %(Ss.getnchannels(), St.getnchannels()) if Ss.getnchannels()==2 and St.getnchannels()==1: frag = audioop.tomono(frag, width, 0.5, 0.5) elif Ss.getnchannels()==1 and St.getnchannels()==2: frag = audioop.tostereo(frag, width, 1.0, 1.0) else: print "Err: can't match channels" # Frame rate if Ss.getframerate() != St.getframerate(): print 'coerce framerate %i -> %i' %(Ss.getframerate(), St.getframerate()) frag,state = audioop.ratecv( frag, width, St.getnchannels(), Ss.getframerate(), # in rate St.getframerate(), # out rate None, 2,1 ) return frag
def encode(self, frame, force_keyframe=False): assert frame.format.name == 's16' assert frame.layout.name in ['mono', 'stereo'] channels = len(frame.layout.channels) data = bytes(frame.planes[0]) timestamp = frame.pts # resample at 48 kHz if frame.sample_rate != SAMPLE_RATE: data, self.rate_state = audioop.ratecv(data, SAMPLE_WIDTH, channels, frame.sample_rate, SAMPLE_RATE, self.rate_state) timestamp = (timestamp * SAMPLE_RATE) // frame.sample_rate # convert to stereo if channels == 1: data = audioop.tostereo(data, SAMPLE_WIDTH, 1, 1) length = lib.opus_encode(self.encoder, ffi.cast('int16_t*', ffi.from_buffer(data)), SAMPLES_PER_FRAME, self.cdata, len(self.cdata)) assert length > 0 return [self.buffer[0:length]], timestamp
def convert_to(self, data: bytes, to_depth: int, to_channels: int, to_rate: int, to_unsigned: bool = False) -> bytes: """Convert audio data.""" dest_width = to_depth // 8 print(to_depth, self._depth) if self._depth != to_depth: if self._depth == 8: data = audioop.bias(data, 1, 128) data = audioop.lin2lin(data, self._width, dest_width) if to_depth == 8: data = audioop(data, 1, 128) if self._unsigned != to_unsigned: data = audioop.bias(data, dest_width, 128) # Make it stereo if self._channels < to_channels: data = audioop.tostereo(data, dest_width, 1, 1) # Make it mono elif self._channels > to_channels: data = audioop.tomono(data, dest_width, 1, 1) # print(dest_width) # Convert the sample rate of the data to the requested rate. if self._rate != to_rate and data: data, self._state = audioop.ratecv(data, dest_width, to_channels, self._rate, to_rate, self._state, 2, 1) return data
def to_stereo(self): if self.channels == 2: return self elif self.channels == 1: return Audio(channels=2, width=self.width, rate=self.rate, data=audioop.tostereo(self.data, self.width, 0.5, 0.5)) else: raise ValueError(f"Can't convert audio with channels={self.channels}")
def _combine_audio(self): """Combines all audio in self.files into one song of raw audio.""" if len(self.files) == 0: return None elif len(self.files) == 1: return self.files[0].data # Find length of longest audiofile. longest = 0 for file in self.files: length = len(file.data) if file.info.get('channels', 2) == 1: # Mono segments will be doubled when converted to stereo. length *= 2 if length > longest: longest = length combined = bytes(longest) for file in self.files: data = file.data # Convert to stereo if mono. if file.info.get('channels', 2) == 1: data = audioop.tostereo(data, int(IMPORT_WIDTH / 8), 1, 1) data += bytes(longest - len(data)) combined = audioop.add(combined, data, int(IMPORT_WIDTH / 8)) return combined
def normalize(self) -> 'Sample': """ Normalize the sample, meaning: convert it to the default samplerate, sample width and number of channels. When mixing samples, they should all have the same properties, and this method is ideal to make sure of that. """ if self.__locked: raise RuntimeError("cannot modify a locked sample") self.resample(params.norm_samplerate) if self.samplewidth != params.norm_samplewidth: # Convert to desired sample size. self.__frames = audioop.lin2lin(self.__frames, self.samplewidth, params.norm_samplewidth) self.__samplewidth = params.norm_samplewidth if params.norm_nchannels not in (1, 2): raise ValueError( "norm_nchannels has invalid value, can only be 1 or 2") if self.nchannels == 1 and params.norm_nchannels == 2: # convert to stereo self.__frames = audioop.tostereo(self.__frames, self.samplewidth, 1, 1) self.__nchannels = 2 elif self.nchannels == 2 and params.norm_nchannels == 1: # convert to mono self.__frames = audioop.tomono(self.__frames, self.__samplewidth, 1, 1) self.__nchannels = 1 return self
def write(self, data): if not hasattr(self, 'LC'): return assert self.isOpen(), "calling write() on closed %s" % (self, ) if self.writechannels == 2: data = audioop.tostereo(data, 2, 1, 1) wrote = self.writedev.write(data) if not wrote: log.msg("ALSA overrun")
def write(self, data): if not hasattr(self, 'LC'): return assert self.isOpen(), "calling write() on closed %s"%(self,) if self.writechannels == 2: data = audioop.tostereo(data, 2, 1, 1) wrote = self.writedev.write(data) if not wrote: log.msg("ALSA overrun")
def testtostereo(data): if verbose: print 'tostereo' data2 = '' for d in data[0]: data2 = data2 + d + d if audioop.tostereo(data[0], 1, 1, 1) <> data2: return 0 return 1
def testtostereo(data): if verbose: print 'tostereo' data2 = '' for d in data[0]: data2 = data2 + d + d if audioop.tostereo(data[0], 1, 1, 1) != data2: return 0 return 1
def doPanning(self, buf): """ Calculate panning levels for the specified buffer. Will return a new bytes-like object of the same length as buf. """ if self.panning == 0.0: return buf left = audioop.tomono(buf, SAMPLE_WIDTH, 1, 0) right = audioop.tomono(buf, SAMPLE_WIDTH, 0, 1) left = audioop.mul(left, SAMPLE_WIDTH, (self.panning - 1) / 2) right = audioop.mul(right, SAMPLE_WIDTH, (self.panning + 1) / 2) left = audioop.tostereo(left, SAMPLE_WIDTH, 1, 0) right = audioop.tostereo(right, SAMPLE_WIDTH, 0, 1) return audioop.add(left, right, SAMPLE_WIDTH)
def _monoToStereo(fn, leftBalance, rightBalance): audiofile = wave.open(fn, "r") params = audiofile.getparams() sampwidth = params[1] nframes = params[3] waveData = audiofile.readframes(nframes) sample = audioop.tostereo(waveData, sampwidth, leftBalance, rightBalance) return sample, params
async def source(self, session): data = await fetch_voice_data( session=session, token=self.google_cloud_token, text=self.text, language_code=self.language, name=self.voice_setting.voice[self.language], rate=self.voice_setting.speed, pitch=self.voice_setting.pitch, ) return discord.PCMAudio(io.BytesIO(audioop.tostereo(data, 2, 1, 1)))
def read(self, size: int = -1) -> bytes: """Convert audio. Convert the samples to the given rate and makes it mono or stereo depending on the channels value. """ data = self._buffer while len(data) < size: temp_data = self._source.read() if not temp_data: if len(data) != 0: data += b'\x00' * (size - len(data)) break if self._source._floatp and not self._floatp: in_array = array('f', temp_data) out_array = array(f"{'h' if self._depth <= 16 else 'i'}") for i in in_array: if i >= 1.0: out_array.append(32767) elif i <= -1.0: out_array.append(-32768) else: out_array.append(math.floor(i * 32768.0)) temp_data = out_array.tobytes() # Convert audio data from source width to self._width. if self._depth != self._source.depth: temp_data = audioop.lin2lin(temp_data, self._source._width, self._width) if self._unsigned != self._source.unsigned: temp_data = audioop.bias(temp_data, self._source._width, 128) # Make it stereo if self._source.channels < self._channels: temp_data = audioop.tostereo(temp_data, self._width, 1, 1) # Make it mono elif self._source.channels > self._channels: temp_data = audioop.tomono(temp_data, self._width, 1, 1) # Convert the sample rate of the data to the requested rate. if self._rate != self._source.rate and temp_data: temp_data, self._state = audioop.ratecv( temp_data, self._width, self._channels, self._source.rate, self._rate, self._state) data += temp_data self._buffer = data[size:] return data[:size]
def write_audio( audio_buf, filename, sample_rate=RATE, stereo=True ): """parameter is a mono audio file. If flag @stereo is set, then the output file is stereo with one silent channel (this is useful if the file is going to be used as a sonar emission).""" # convert mono audio buffer to stereo # below, parameters are ( buffer, width, lfactor, rfactor) if( stereo ): if SPEAKER=='right': audio_buf = audioop.tostereo( audio_buf, 2, 0, 1 ) else: # SPEAKER=='left' audio_buf = audioop.tostereo( audio_buf, 2, 1, 0 ) wfile = wave.open( filename, 'w' ) if( stereo ): wfile.setnchannels(2) else: wfile.setnchannels(1) wfile.setsampwidth(2) # two bytes == 16 bit wfile.setframerate(sample_rate) wfile.writeframes( audio_buf ) wfile.close()
async def source(self, session): data = await fetch_voice_data( session=session, token=self.bot.google_cloud_token, text=self.text, language_code=LANGUAGES[self.language], name=convert_voice_name(LANGUAGES[self.language], self.voice_setting.voice[self.language]), rate=self.voice_setting.speed, pitch=self.voice_setting.pitch, ) source = discord.PCMAudio(io.BytesIO(audioop.tostereo(data, 2, 1, 1))) return discord.PCMVolumeTransformer(source, volume=0.4)
def dump_wav(fn, samples, params, stereo=True): with wave.open(fn, mode='wb') as audio_file: audio_file.setparams(params) if stereo: audio_file.setnchannels(2) ch_left, ch_right = list(zip(*samples)) left_bytes = audioop.tostereo(array.array('h', ch_left), params.sampwidth, 1, 0) right_bytes = audioop.tostereo(array.array('h', ch_right), params.sampwidth, 0, 1) sample_bytes = audioop.add(left_bytes, right_bytes, params.sampwidth) else: audio_file.setnchannels(1) sample_bytes = array.array('h', samples) audio_file.writeframesraw(sample_bytes)
def resample(self, data, freq=44100, bits=8, signed=0, channels=1, byteorder=None): "Convert a sample to the mixer's own format." bytes = bits / 8 byteorder = byteorder or sys.byteorder if (freq, bytes, signed, channels, byteorder) == self.parameters: return data # convert to native endianness if byteorder != sys.byteorder: data = byteswap(data, bytes) byteorder = sys.byteorder # convert unsigned -> signed for the next operations if not signed: data = audioop.bias(data, bytes, -(1 << (bytes * 8 - 1))) signed = 1 # convert stereo -> mono while channels > self.channels: assert channels % 2 == 0 data = audioop.tomono(data, bytes, 0.5, 0.5) channels /= 2 # resample to self.freq if freq != self.freq: data, ignored = audioop.ratecv(data, bytes, channels, freq, self.freq, None) freq = self.freq # convert between 8bits and 16bits if bytes != self.bytes: data = audioop.lin2lin(data, bytes, self.bytes) bytes = self.bytes # convert mono -> stereo while channels < self.channels: data = audioop.tostereo(data, bytes, 1.0, 1.0) channels *= 2 # convert signed -> unsigned if not self.signed: data = audioop.bias(data, bytes, 1 << (bytes * 8 - 1)) signed = 0 # convert to mixer endianness if byteorder != self.byteorder: data = byteswap(data, bytes) byteorder = self.byteorder # done if (freq, bytes, signed, channels, byteorder) != self.parameters: raise ValueError, 'sound sample conversion failed' return data
def to_stereo(self, right=None): 'Convert mono audio to stereo' if right is None: right = self if self.params.nchannels != 1 or right.params.nchannels != 1: raise PcmValueError('Mono audio required') if self.params.framerate != right.params.framerate: raise PcmValueError('Channels have different frame rate') if self.params.nframes != right.params.nframes: raise PcmValueError('Channels have different length') left, right = self._adjust_both(right) return self.__class__(left.params, audioop.add( audioop.tostereo(left.frames, left.params.sampwidth, 1, 0), audioop.tostereo(right.frames, right.params.sampwidth, 0, 1), left.params.sampwidth), nchannels=2)
def audio_stereo_out(sig_meas,fs,n_chan): global stream #converts the signal to a stereo signal stereoaudio = audioop.tostereo(sig_meas, 2, 1, 1) #DEBUG sna=len(stereoaudio) #DEBUG print sna # save .wav file sf = wave.open("/tmp/amg2025/meas_stereo.wav", 'w') sf.setparams((n_chan, 2, fs, 0, 'NONE', 'no compression')) sf.writeframesraw(stereoaudio) sf.close() stream.write(stereoaudio)
def normalize(self): """ Normalize the sample, meaning: convert it to the default samplerate, sample width and number of channels. When mixing samples, they should all have the same properties, and this method is ideal to make sure of that. """ assert not self.__locked self.resample(self.norm_samplerate) if self.samplewidth != self.norm_samplewidth: # Convert to 16 bit sample size. self.__frames = audioop.lin2lin(self.__frames, self.samplewidth, self.norm_samplewidth) self.__samplewidth = self.norm_samplewidth if self.nchannels == 1: # convert to stereo self.__frames = audioop.tostereo(self.__frames, self.samplewidth, 1, 1) self.__nchannels = 2 return self
def audio_stereo_out(sig_meas, fs, n_chan): global stream #converts the signal to a stereo signal stereoaudio = audioop.tostereo(sig_meas, 2, 1, 1) #DEBUG sna=len(stereoaudio) #DEBUG print sna # save .wav file sf = wave.open("/tmp/amg2025/meas_stereo.wav", 'w') sf.setparams((n_chan, 2, fs, 0, 'NONE', 'no compression')) sf.writeframesraw(stereoaudio) sf.close() stream.write(stereoaudio)
def speeker(): try: while True: try: data = audioChunks.get(True, 1) except Queue.Empty: if is_stopped: global is_stopped is_stopped = False else: stream.write(audioop.tostereo(data, 2, 1, 1)) audioChunks.task_done() if stop_check(): stop() finally: print "closeing" stream.close() p.terminate()
def read(self, size=None): """ Convert the samples to the given rate and makes it mono or stereo depending on the channels value. The data is buffered so """ if not audioop: print("audioop not found so returning empty byte") return b'\x00' data = self._buffer while len(data) < size: temp_data = self._source.read() if not temp_data: if len(data) != 0: data += b'\x00' * (size - len(data)) break if self._depth != self._source.depth: temp_data = audioop.lin2lin(temp_data, self._source._width, self._width) if self._unsigned != self._source.unsigned: temp_data = audioop.bias(temp_data, self._source._width, 128) # Make it stereo if self._source.channels < self._channels: temp_data = audioop.tostereo(temp_data, self._width, 1, 1) # Make it mono elif self._source.channels > self._channels: temp_data = audioop.tomono(temp_data, self._width, 1, 1) # Convert the sample rate of the data to the requested rate. if self._rate != self._source.rate and temp_data: temp_data, self._state = audioop.ratecv(temp_data, self._width, self._channels, self._source.rate, self._rate, self._state) data += temp_data self._buffer = data[size:] return data[:size]
def make_pcm(content: bytes) -> io.BytesIO: """ wavのファイルからヘッダーを取り除き、フレームレートなどを合わせます。 :param content: wavのデータ :return: 出力するPCM """ with wave.open(io.BytesIO(content)) as wav: bit = wav.getsampwidth() pcm = wav.readframes(wav.getnframes()) if bit != 2: pcm = audioop.lin2lin(pcm, bit, 2) if wav.getnchannels() == 1: pcm = audioop.tostereo(pcm, 2, 1, 1) if wav.getframerate() != 48000: pcm = audioop.ratecv(pcm, 2, 2, wav.getframerate(), 48000, None)[0] return io.BytesIO(pcm)
def stereo(self, left_factor=1.0, right_factor=1.0): """ Turn a mono sample into a stereo one with given factors/amplitudes for left and right channels. Note that it is a fast but simplistic conversion; the waveform in both channels is identical so you may suffer from phase cancellation when playing the resulting stereo sample. If the sample is already stereo, the left/right channel separation is changed instead. """ assert not self.__locked if self.__nchannels == 2: # first split the left and right channels and then remix them right = self.copy().right() self.left().amplify(left_factor) return self.stereo_mix(right, 'R', right_factor) if self.__nchannels == 1: self.__frames = audioop.tostereo(self.__frames, self.__samplewidth, left_factor, right_factor) self.__nchannels = 2 return self raise ValueError("sample must be mono or stereo already")
def normalize(self) -> 'Sample': """ Normalize the sample, meaning: convert it to the default samplerate, sample width and number of channels. When mixing samples, they should all have the same properties, and this method is ideal to make sure of that. """ if self.__locked: raise RuntimeError("cannot modify a locked sample") self.resample(params.norm_samplerate) if self.samplewidth != params.norm_samplewidth: # Convert to 16 bit sample size. self.__frames = audioop.lin2lin(self.__frames, self.samplewidth, params.norm_samplewidth) self.__samplewidth = params.norm_samplewidth if self.nchannels == 1: # convert to stereo self.__frames = audioop.tostereo(self.__frames, self.samplewidth, 1, 1) self.__nchannels = 2 return self
def Removevoice(self): self.createMessage("") f = self.selectedFile if(f==None): showinfo("Warning!","Please upload or create a file first") return if(f.removevoiceval==False): rate = 44100 self.removevoice["bg"]= "dark green" wf = wave.open(f.file_path) data = numpy.fromstring(wf.readframes(wf.getnframes()), dtype=numpy.int16) CHANNELS = wf.getnchannels() if(CHANNELS!=2): showinfo("Warning!", "File must be Stereo and not Mono") self.removevoice["bg"]= "black" else: f.tempFile = True self.createMessage("Processing...") #Each channel is saved alternately and hence, this idea # can be used in conjunction with the fact that voice #samples are generally present in all channels to eliminate # the voice. Here, I use stereo, and hence the below method. f.tempVoiceRemovedMono = data[0::2] - data[1::2] f.tempVoiceRemovedStereo = audioop.tostereo(f.tempVoiceRemovedMono, 2, 1, 1) f.tempFileArray = numpy.fromstring(f.tempVoiceRemovedStereo,dtype=numpy.int16) f.removevoiceval = True self.createMessage("Done!") (nchannels,sampwidth,framerate,nframes,comptype,compname) = (2,2,44100, len(f.tempVoiceRemovedStereo), "NONE", "not compressed") f.tempfileparameters = (nchannels,sampwidth,framerate,nframes,comptype,compname) wf.close() f.destroy() f.plot(self,rate,'temp') self.removevoice["bg"]= "black" else: self.removevoiceval = True self.removevoice["bg"]= "black"
def read(filename): """ Read a 44.1k / 16bit WAV file from disk with the Python wave module. Mono files are converted to stereo automatically. """ if not quiet: print 'loading', filename file = wave.open(filename, "r") file_frames = file.readframes(file.getnframes()) snd = Sound() # check for mono files if file.getnchannels() == 1: file_frames = audioop.tostereo(file_frames, file.getsampwidth(), 0.5, 0.5) snd.params = file.getparams() snd.params = (2, snd.params[1], snd.params[2], snd.params[3], snd.params[4], snd.params[5]) else: snd.params = file.getparams() snd.data = file_frames return snd
def _tostereo(self, output): return [tostereo(o, self.bytes, n % 2, (n + 1) % 2) for n, o in enumerate(output)]
def write(self, data): from audioop import tostereo if self._channels == 2: data = tostereo(data, 2, 1, 1) self.dev.write(data)
def test_tostereo(self): data2 = '' for d in data[0]: data2 = data2 + d + d self.assertEqual(audioop.tostereo(data[0], 1, 1, 1), data2)
def loop(self): """Master loop""" import os.path import audioop silent = "\x00" * settings.STEREO_CHUNK_SIZE self.mumble.users.myself.texture(self.load_bitmap(settings.STOP_BITMAP)) while self.mumble.is_alive() and not self.exit: while not self.stdin_q.empty(): line = self.stdin_q.get() self.cmd(line) if ( ( self.recording and not self.force_stop ) or self.force_start ) and not self.force_newfile: if not self.audio_file: # Start recording # ask the pymumble library to handle incoming audio self.mumble.set_receive_sound(True) # signal the others I'm recording (to be fair) self.mumble.users.myself.recording() # Change the recorder avatar self.mumble.users.myself.texture(self.load_bitmap(settings.START_BITMAP)) # time of the start of the recording self.cursor_time = time.time() - settings.BUFFER # Assemble file name audio_file_name = os.path.join(settings.SAVEDIR, "mumble-%s" % time.strftime("%Y%m%d-%H%M%S")) print "Starting new Recording" self.audio_file = AudioFile(audio_file_name, self.settings["OUTPUT"]) self.cmd_log.info("RECORDING START") if self.cursor_time < time.time() - settings.BUFFER: # it's time to check audio base_sound = copy.copy(silent) for user in self.mumble.users.values(): # check the audio queue of each users session = user["session"] while ( user.sound.is_sound() and user.sound.first_sound().time < self.cursor_time): user.sound.get_sound(settings.FLOAT_RESOLUTION) # forget about too old sounds if user.sound.is_sound(): if ( user.sound.first_sound().time >= self.cursor_time and user.sound.first_sound().time < self.cursor_time + settings.FLOAT_RESOLUTION ): # available sound is to be treated now and not later sound = user.sound.get_sound(settings.FLOAT_RESOLUTION) if sound.target == 0: # take care of the stereo feature stereo_pcm = audioop.tostereo(sound.pcm, 2, *self.users[session]["stereo"]) else: stereo_pcm = audioop.tostereo(sound.pcm, 2, 1, 1) base_sound = self.add_sound(base_sound, stereo_pcm) if not self.audio_file.write(base_sound): self.cmd_log.error("ERROR 2002 ENCODER closed while write") self.force_stop = True self.force_start = False self.cursor_time += settings.FLOAT_RESOLUTION else: time.sleep(settings.FLOAT_RESOLUTION) else: if self.audio_file: # finish recording self.mumble.users.myself.unrecording() self.mumble.users.myself.texture(self.load_bitmap(settings.STOP_BITMAP)) self.mumble.set_receive_sound(False) self.cursor_time = None self.audio_file.close() self.audio_file = None self.cmd_log.info("RECORDING STOP") self.force_newfile = False time.sleep(0.5)
def loop(self): """Master loop""" import os.path import audioop silent = "\x00" * STEREO_CHUNK_SIZE self.mumble.users.myself.comment("Auto mode (starting with %i users)." % USER_COUNT + COMMENT_SUFFIX) self.mumble.users.myself.texture(self.load_bitmap(STOP_BITMAP)) while self.mumble.is_alive() and not self.exit: if ( ( self.recording and not self.force_stop ) or self.force_start ) and not self.force_newfile: if not self.audio_file: # Start recording self.mumble.set_receive_sound(True) # ask the pymumble library to handle incoming audio self.mumble.users.myself.recording() # signal the others I'm recording (to be fair) self.mumble.users.myself.texture(self.load_bitmap(START_BITMAP)) # Change the recorder avatar self.cursor_time = time.time() - BUFFER # time of the start of the recording #create the files audio_file_name = os.path.join(SAVEDIR, "mumble-%s" % time.strftime("%Y%m%d-%H%M%S")) self.audio_file = AudioFile(audio_file_name) if CREATE_WEBVTT: self.chapters = webvtt.WebVtt(audio_file_name + "-chapters.vtt") self.captions = webvtt.WebVtt( audio_file_name + "-captions.vtt", regions=[ "Region: id=left width=50% regionanchor=0%,100% viewportanchor=0%,100%", "Region: id=right width=50% regionanchor=100%,100% viewportanchor=100%,100%", ] ) usernames = list() for user in self.mumble.users.values(): if user["name"] != USER: usernames.append(user["name"]) title = "<c.system>Recording started with users {users}".format(users=",".join(usernames)) self.captions.add_cue(title, duration=2) if self.cursor_time < time.time() - BUFFER: # it's time to check audio base_sound = None for user in self.mumble.users.values(): # check the audio queue of each users session = user["session"] while ( user.sound.is_sound() and user.sound.first_sound().time < self.cursor_time): user.sound.get_sound(FLOAT_RESOLUTION) # forget about too old sounds if user.sound.is_sound(): if self.captions is not None and "caption" not in self.users[session]: self.users[session]["caption"] = self.captions.add_cue("<v {user}>{user}".format(user=user["name"])) if ( user.sound.first_sound().time >= self.cursor_time and user.sound.first_sound().time < self.cursor_time + FLOAT_RESOLUTION ): # available sound is to be treated now and not later sound = user.sound.get_sound(FLOAT_RESOLUTION) if sound.target == 0: # take care of the stereo feature stereo_pcm = audioop.tostereo(sound.pcm, 2, *self.users[session]["stereo"]) if self.captions is not None: self.users[session]["caption"].set_region(self.users[session]["region"]) else: stereo_pcm = audioop.tostereo(sound.pcm, 2, 1, 1) if base_sound == None: base_sound = stereo_pcm else: #base_sound = audioop.add(base_sound, sound.pcm, 2) base_sound = self.add_sound(base_sound, stereo_pcm) else: if self.captions is not None and "caption" in self.users[session]: self.users[session]["caption"].end() del self.users[session]["caption"] if base_sound: self.audio_file.write(base_sound) else: self.audio_file.write(silent) self.cursor_time += FLOAT_RESOLUTION else: time.sleep(FLOAT_RESOLUTION) else: if self.audio_file: # finish recording self.mumble.users.myself.unrecording() self.mumble.users.myself.texture(self.load_bitmap(STOP_BITMAP)) self.mumble.set_receive_sound(False) self.cursor_time = None self.audio_file.close() self.audio_file = None if self.current_chapter is not None: self.current_chapter.end() self.current_chapter = None self.chapters = None for user in self.users.values(): if "caption" in user: user["caption"].end() del user["caption"] self.captions = None self.force_newfile = False time.sleep(0.5)
def mixstereo(chans): """ mix a list of two mono sounds into a stereo sound """ chans[0] = audioop.tostereo(chans[0], audio_params[1], 1, 0) chans[1] = audioop.tostereo(chans[1], audio_params[1], 0, 1) return mix(chans)
def __init__(self, filename, mode='r', depth=16, rate=44100, channels=2, bigendian=False, unsigned=False, **kwargs): """ AllFile(self, filename, mode='r', depth=16, rate=44100, channels=2, bigendian=False, unsigned=False, **kwargs) -> Loads the correct codec for the file and acts as a wrapper providing additional funcionality. """ codec = get_codec(filename, blacklist=[os_basename(__file__)]) self._supported_modes = getattr(codec, '_supported_modes', 'r') source = codec(filename, mode=mode, **kwargs) super(AllFile, self).__init__(filename, mode, source.depth, source.rate, source.channels) self._source = source self._bigendian = bigendian self._unsigned = unsigned self._state = None annotations = getattr(codec.read, '__annotations__') self.read.__annotations__.update(annotations) self._buffer = annotations.get('return', bytes)() self._buffer_size = self._source.buffer_size self._length = self._source.length self._info_dict = self._source._info_dict self.write = self._source.write self._closed = False if self._depth != self._source.depth: self._convert_depth = lambda data: \ audioop.lin2lin(data, self._source._width, self._width) else: self._convert_depth = lambda data: data if self._unsigned != self._source.unsigned: self._convert_unsigned = lambda data: \ audioop.bias(data, self._source._width, 128) else: self._convert_unsigned = lambda data: data # Make it stereo if self._source.channels < self._channels: self._convert_channels = lambda data: audioop.tostereo(data, self._width, 1, 1) # Make it mono elif self._source.channels > self._channels: self._convert_channels = lambda data: audioop.tomono(data, self._width, 1, 1) else: self._convert_channels = lambda data: data # Convert the sample rate of the data to the requested rate. if self._rate != self._source.rate: self._convert_rate = lambda data: audioop.ratecv(data, self._width, self._channels, self._source.rate, self._rate, self._state) else: self._convert_rate = lambda data: (data, self._state) if self._bigendian != self._source.bigendian: self._convert_endian = swap_endian else: self._convert_endian = lambda data: data
def test_tostereo(self): data2 = bytearray() for d in data[0]: data2.append(d) data2.append(d) self.assertEqual(audioop.tostereo(data[0], 1, 1, 1), data2)
def stereo(snd): """ Just a wrapper for audioop.tostereo from the standard library. Pass it a mono sound and it will convert it to stereo. """ return audioop.tostereo(snd, audio_params[1], 0.5, 0.5)