def copy_wave(src, dest): print("copy wave '{}' to '{}'".format(src, dest)) src = dota_file(src) try: input = wave_open(src, "rb") frames_available = input.getnframes() # fill to two seconds because of noise frames_needed = 2 * input.getframerate() empty_frame = b"\0" * input.getsampwidth() * input.getnchannels() filler_frames = empty_frame * max(frames_needed - frames_available, 0) if nohats_dir is None: return dest = nohats_file(dest) dest_dir = dirname(dest) if not exists(dest_dir): makedirs(dest_dir) try: output = wave_open(dest, "wb") output.setparams(input.getparams()) output.writeframes(input.readframes(frames_available) + filler_frames) finally: output.close() finally: input.close()
def __init__(self,filename=None,sampleWidth=2,channels=1,name=None): super(WavOut,self).__init__(channels=channels,name=name) if ("constructors" in UGen.debug): print >>stderr, "WavOut.__init__(%s)" % name if (filename == None): msg = "can't write to an unnamed wav file (for %s)" % self raise UGenError(msg) if (sampleWidth not in [1,2]): msg = "sampleWidth=%s is not supported (for %s)" % (sampleWidth,self) raise UGenError(msg) self.ignoreInputlessSink = True self.sampleWidth = sampleWidth self.sampleScale = (1<<(8*sampleWidth-1)) - 1 if ("resolution" in UGen.debug): print >>stderr, "%s sampleWidth=%s sampleScale=%s" \ % (self,self.sampleWidth,self.sampleScale) if (sampleWidth == 1): self.packFormat = "b" else: self.packFormat = "h" self.wavFile = wavFile = wave_open(filename, "wb") wavFile.setparams((self.outChannels,self.sampleWidth,UGen.samplingRate,1, "NONE","not compressed")) UGen.add_sink(self)
def _load_from_file(self,source): self.filename = source self.wavFile = wavFile = wave_open(self.filename, "rb") channels = wavFile.getnchannels() sampleWidth = wavFile.getsampwidth() samplingRate = wavFile.getframerate() numSamples = wavFile.getnframes() compName = wavFile.getcompname() if (channels not in [1,2]): msg = "for \"%s\", channels=%d is not supported" \ % (self.filename,channels) raise UGenError(msg) if (sampleWidth not in [1,2]): msg = "for \"%s\", sampleWidth=%d is not supported" \ % (self.filename,sampleWidth) raise UGenError(msg) if (samplingRate != UGen.samplingRate): msg = "for \"%s\", samplingRate=%s does not match %s" \ % (self.filename,samplingRate) raise UGenError(msg) if (compName != "not compressed"): msg = "for \"%s\", compName=\"%s\" is not supported" \ % (self.filename,compName) raise UGenError(msg) self.inChannels = 0 self.outChannels = channels sampleScale = (1<<(8*sampleWidth-1)) - 1 if (sampleWidth == 1): packFormat = "b" else: packFormat = "h" if (channels == 2): packFormat = "2%s" % packFormat self._allocate(numSamples) if ("Clip" in UGen.debug): print >>stderr, "Clip.load(%s)" % self print >>stderr, " channels = %s" % channels print >>stderr, " numSamples = %s" % numSamples print >>stderr, " sampleWidth = %s" % sampleWidth print >>stderr, " sampleScale = %s" % sampleScale print >>stderr, " packFormat = %s" % packFormat if (channels == 1): for ix in xrange(numSamples): sample = wavFile.readframes(1) sample = struct_unpack(packFormat,sample)[0] if (sample < -sampleScale): sample = -sampleScale self._buffer[ix] = sample / float(sampleScale) else: # (channels == 2): for ix in xrange(numSamples): sample = wavFile.readframes(1) (sample,sample2) = struct_unpack(packFormat,sample) if (sample < -sampleScale): sample = -sampleScale if (sample2 < -sampleScale): sample2 = -sampleScale self._buffer [ix] = sample / float(sampleScale) self._buffer2[ix] = sample2 / float(sampleScale) wavFile.close()
def _Stop() -> bytes: global r, p, t, stream, frames if not r: return False r = False t.join() # Write to memory obj = BytesIO() wf = wave_open(obj, "wb") wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() return obj.getvalue()
def __init__(self, audio_path): super(Player, self).__init__() self.audio_path = audio_path # define stream chunk self.chunk = 1024 # open a wav format music self.wave_obj = wave_open(audio_path, "rb") # instantiate PyAudio self.p = PyAudio() # open stream self.stream = self.p.open(format=self.p.get_format_from_width( self.wave_obj.getsampwidth()), channels=self.wave_obj.getnchannels(), rate=self.wave_obj.getframerate(), output=True)
def perform_export(self, instance: WAV) -> bytes: if instance.sound_pack in (SoundPack.ADPCM, SoundPack.ADPCM_RESIDENT ) and not self.enforce: raise ExportingError( f"Can not unpack {SoundPack(instance.sound_pack).name}") with BytesIO() as temp_file: with wave_open(temp_file, 'w') as wave_file: # noqa wave_file: Wave_write = wave_file wave_file.setnchannels(instance.channels) wave_file.setsampwidth(instance.sample_width // 8) wave_file.setframerate(float(instance.frame_rate)) wave_file.writeframesraw(instance.frames) temp_file.seek(0) return temp_file.read()
def __call__(self, file_path): from wave import open as wave_open from ossaudiodev import open as oss_open s = wave_open(file_path,'rb') (nc, sw, fr, nf, comptype, compname) = s.getparams() dsp = oss_open('/dev/dsp','w') try: from ossaudiodev import AFMT_S16_NE except ImportError: if byteorder == "little": AFMT_S16_NE = ossaudiodev.AFMT_S16_LE else: AFMT_S16_NE = ossaudiodev.AFMT_S16_BE dsp.setparameters(AFMT_S16_NE, nc, fr) data = s.readframes(nf) s.close() dsp.write(data) dsp.close()
def set_chattime(): # Make the global variable defaultChatTime global to this function in case # we need to modify it global defaultChatTime # If this is the first time setting the chattime, store the default time if defaultChatTime == -1: defaultChatTime = int(mp_chattime) # If the sound does not exist on the server, use the defaultChatTime if not sound_exists(winnerSounds[0]): mp_chattime.set(defaultChatTime) return # Get the path and extension of the sound file soundPath = soundDir.joinpath(winnerSounds[0]) extension = winnerSounds[0].split(".")[-1] duration = defaultChatTime # If the sound file is an mp3, use mp3info to find its duration if extension == 'mp3': try: info = mp3info(soundPath) duration = info['MM'] * 60 + info['SS'] except: pass # If the sound file is a wav, use the wave module to find its duration elif extension == 'wav': try: w = wave_open(soundPath, 'rb') duration = float(w.getnframes()) / w.getframerate() except: pass finally: w.close() # If the duration is greater than 30 seconds, set it to 30 seconds if duration > 30: duration = 30 # Set the new mp_chattime gamethread.delayed(5, mp_chattime.set, duration)
def set_chattime(): # Make the global variable defaultChatTime global to this function in case # we need to modify it global defaultChatTime # If this is the first time setting the chattime, store the default time if defaultChatTime == -1: defaultChatTime = int(mp_chattime) # If the sound does not exist on the server, use the defaultChatTime if not sound_exists(winnerSounds[0]): mp_chattime.set(defaultChatTime) return # Get the path and extension of the sound file soundPath = soundDir.joinpath(winnerSounds[0]) extension = winnerSounds[0].split(".")[-1] duration = defaultChatTime # If the sound file is an mp3, use mp3info to find its duration if extension == 'mp3': try: info = mp3info(soundPath) duration = info['MM'] * 60 + info['SS'] except: pass # If the sound file is a wav, use the wave module to find its duration elif extension == 'wav': try: w = wave_open(soundPath, 'rb') duration = float(w.getnframes()) / w.getframerate() except: pass finally: w.close() # If the duration is greater than 30 seconds, set it to 30 seconds if duration > 30: duration = 30 # Set the new mp_chattime gamethread.delayed(5, mp_chattime.set, duration)
def play_with_ossaudiodev(file_name): import sys import sndhdr from contextlib import closing, nested from ossaudiodev import open as oss_open from wave import open as wave_open file_info = sndhdr.what(file_name) if not file_info or file_info[0] != "wav": print "Not supported audio file type" return with nested(closing(wave_open(file_name, "rb")), closing(oss_open("w"))) as (wav, dev): nc, sw, fr, nf, comptype, compname = wav.getparams() try: from ossaudiodev import (AFMT_S16_NE, AFMT_S16_BE, AFMT_S16_LE) except ImportError: AFMT_S16_NE = AFMT_S16_BE if sys.byteorder == "little": AFMT_S16_NE = AFMT_S16_LE dev.setparameters(AFMT_S16_NE, nc, fr) data = wav.readframes(nf) dev.write(data)
def write_sample(sample, fname): with wave_open(fname, 'wb') as sfile: sfile.setframerate(SAMPLE_RATE) sfile.setnchannels(1) sfile.setsampwidth(2) sfile.writeframes(sample.arr.astype(np.int16))
def copy_wave(src, dest): print("copy wave '{}' to '{}'".format(src, dest)) orig_file = source_file(dest) orig_input = wave_open(orig_file, "rb") try: orig_nframes = orig_input.getnframes() orig_nchannels = orig_input.getnchannels() orig_framerate = orig_input.getframerate() orig_sampwidth = orig_input.getsampwidth() finally: orig_input.close() src_file = source_file(src) input = wave_open(src_file, "rb") try: nframes = input.getnframes() frames = input.readframes(nframes) nchannels = input.getnchannels() sampwidth = input.getsampwidth() framerate = input.getframerate() # convert null.wav into an empty copy of the original destination if src == "sound/null.wav": framerate = orig_framerate sampwidth = orig_sampwidth frames = bytearray() nframes = 0 # sanity if framerate != orig_framerate: print("Warning: source {} has framerate {} but destination {} has framerate {}".format(src, framerate, dest, orig_framerate), file=stderr) return if sampwidth != orig_sampwidth: print("Warning: source {} has sampwidth {} but destination {} has sampwidth {}".format(src, sampwidth, dest, orig_sampwidth), file=stderr) return # fix number of channels if nchannels == orig_nchannels: pass elif nchannels == 1 and orig_nchannels == 2: # convert mono to stereo new_frames = bytearray() i = 0 while i < len(frames): new_frames += frames[i:i+sampwidth] new_frames += frames[i:i+sampwidth] i += sampwidth assert i == len(frames) assert len(new_frames) == 2 * len(frames) frames = new_frames nchannels = orig_nchannels elif nchannels == 2 and orig_nchannels == 1: # convert stereo to mono assert sampwidth == 2, sampwidth new_frames = bytearray() for i in range(nframes): frame_i = i*2*sampwidth frame_left = int.from_bytes(frames[frame_i:frame_i+sampwidth], byteorder="little", signed=True) frame_right = int.from_bytes(frames[frame_i+sampwidth:frame_i+sampwidth*2], byteorder="little", signed=True) new_frame = (frame_left + frame_right) // 2 new_frames += new_frame.to_bytes(2, byteorder="little", signed=True) assert len(new_frames) == len(frames) / 2 frames = new_frames nchannels = orig_nchannels else: assert False, "Don't know how to convert from {} channels to {} channels".format(nchannels, orig_nchannels) # fill to original length, or at least two seconds, because of static noise frames_needed = max(orig_nframes, 2 * framerate) nfiller_frames = max(frames_needed - nframes, 0) nfiller_bytes = nfiller_frames * sampwidth * nchannels if "_loop" in src: filler_frames = bytearray() while len(filler_frames) < nfiller_bytes: filler_frame += frames filler_frames = filler_frames[:nfiller_bytes] else: if sampwidth == 1: filler_b = b"\x80" else: filler_b = b"\0" filler_frames = filler_b * nfiller_bytes frames += filler_frames orig_frames_len = orig_nframes * orig_sampwidth * orig_nchannels assert len(frames) >= orig_frames_len , "Converted {} has {} data bytes, but destination {} has {} data bytes".format(src, len(frames), dest, orig_frames_len) if nohats_dir is None: return dest_file = nohats_file(dest) dest_dir = dirname(dest_file) if not exists(dest_dir): makedirs(dest_dir) output = wave_open(dest_file, "wb") try: output.setparams(input.getparams()) output.setnchannels(nchannels) output.setframerate(framerate) output.setsampwidth(sampwidth) output.writeframes(frames) finally: output.close() finally: input.close()