Example #1
0
    def mix(destination_client: ClientObject, all_voice_frames: dict,
            settings: Settings):
        frames, gains = ArrayMixer.get_frames_and_gains(
            destination_client, all_voice_frames, settings)

        if len(frames) == 0:
            return None

        ratio = 1 / len(frames)
        final_sample = None
        for sample, gain in zip(frames, gains):
            fragment = audioop.mul(sample, consts.BYTES_PER_SAMPLE,
                                   gain * ratio)
            if final_sample == None:
                final_sample = fragment
            else:
                delta = len(final_sample) - len(fragment)

                # Delta==0 is the 99.9999% case. Run it first to save a few checks.
                if delta == 0:
                    final_sample = audioop.add(final_sample, fragment,
                                               consts.BYTES_PER_SAMPLE)
                elif delta > 0:  # final sample bigger
                    final_sample = audioop.add(final_sample,
                                               fragment + bytes(delta),
                                               consts.BYTES_PER_SAMPLE)
                elif delta < 0:  # fragment bigger
                    final_sample = audioop.add(final_sample + bytes(-delta),
                                               fragment,
                                               consts.BYTES_PER_SAMPLE)

        return audioop.mul(final_sample, consts.BYTES_PER_SAMPLE,
                           destination_client.volume * len(frames))
Example #2
0
def play(event):
    global data, state, layers
    state = "play"
    var.set("playing")

    while canPlay():
        print "playing"
        p = pyaudio.PyAudio()

        # wf = wave.open(WAVE_OUTPUT_FILENAME, 'rb')
        wf = wave.open("output1.wav", 'rb')
        if layers >= 2:
            wf2 = wave.open("output2.wav", 'rb')
            if layers >= 3:
                wf3 = wave.open("output3.wav", 'rb')
                if layers >= 4:
                    wf4 = wave.open("output4.wav", 'rb')

        # open stream for wav file playback
        stream = p.open(format =
                    p.get_format_from_width(wf.getsampwidth()),
                    channels = wf.getnchannels(),
                    rate = wf.getframerate(),
                    output = True)

        # read data from wav file
        data = wf.readframes(chunk)
        if layers >= 2:
            data2 = wf2.readframes(chunk)
            if layers >= 3:
                data3 = wf3.readframes(chunk)
                if layers >= 4:
                    data4 = wf4.readframes(chunk)

        # play stream from wav file
        while data != '' and canPlay():

            if layers >= 2:
                data = audioop.add(data, data2, CHANNELS)
                if layers >= 3:
                    data = audioop.add(data, data3, CHANNELS)
                    if layers >= 4:
                        data = audioop.add(data, data4, CHANNELS)

            stream.write(data)

            data = wf.readframes(chunk)
            if layers >= 2:
                data2 = wf2.readframes(chunk)
                if layers >= 3:
                    data3 = wf3.readframes(chunk)
                    if layers >= 4:
                        data4 = wf4.readframes(chunk)

            frame.update()
        stream.close()
        p.terminate()
Example #3
0
 def test_add(self):
     data2 = []
     for d in data:
         str = ''
         for s in d:
             str = str + chr(ord(s)*2)
         data2.append(str)
     self.assertEqual(audioop.add(data[0], data[0], 1), data2[0])
     self.assertEqual(audioop.add(data[1], data[1], 2), data2[1])
     self.assertEqual(audioop.add(data[2], data[2], 4), data2[2])
Example #4
0
 def test_add(self):
     data2 = []
     for d in data:
         str = bytearray(len(d))
         for i,b in enumerate(d):
             str[i] = 2*b
         data2.append(str)
     self.assertEqual(audioop.add(data[0], data[0], 1), data2[0])
     self.assertEqual(audioop.add(data[1], data[1], 2), data2[1])
     self.assertEqual(audioop.add(data[2], data[2], 4), data2[2])
Example #5
0
 def test_add(self):
     for w in 1, 2, 4:
         self.assertEqual(audioop.add(b"", b"", w), b"")
         self.assertEqual(audioop.add(datas[w], b"\0" * len(datas[w]), w), datas[w])
     self.assertEqual(audioop.add(datas[1], datas[1], 1), b"\x00\x24\x7f\x80\x7f\x80\xfe")
     self.assertEqual(audioop.add(datas[2], datas[2], 2), packs[2](0, 0x2468, 0x7FFF, -0x8000, 0x7FFF, -0x8000, -2))
     self.assertEqual(
         audioop.add(datas[4], datas[4], 4),
         packs[4](0, 0x2468ACF0, 0x7FFFFFFF, -0x80000000, 0x7FFFFFFF, -0x80000000, -2),
     )
Example #6
0
 def test_add(self):
     data2 = []
     for d in data:
         str = ''
         for s in d:
             str = str + chr(ord(s) * 2)
         data2.append(str)
     self.assertEqual(audioop.add(data[0], data[0], 1), data2[0])
     self.assertEqual(audioop.add(data[1], data[1], 2), data2[1])
     self.assertEqual(audioop.add(data[2], data[2], 4), data2[2])
Example #7
0
 def test_add(self):
     data2 = []
     for d in data:
         str = bytearray(len(d))
         for i, b in enumerate(d):
             str[i] = 2 * b
         data2.append(str)
     self.assertEqual(audioop.add(data[0], data[0], 1), data2[0])
     self.assertEqual(audioop.add(data[1], data[1], 2), data2[1])
     self.assertEqual(audioop.add(data[2], data[2], 4), data2[2])
Example #8
0
 def test_add(self):
     for w in 1, 2, 4:
         self.assertEqual(audioop.add(b'', b'', w), b'')
         self.assertEqual(audioop.add(datas[w], b'\0' * len(datas[w]), w),
                          datas[w])
     self.assertEqual(audioop.add(datas[1], datas[1], 1),
                      b'\x00\x24\x7f\x80\x7f\x80\xfe')
     self.assertEqual(audioop.add(datas[2], datas[2], 2),
             packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
     self.assertEqual(audioop.add(datas[4], datas[4], 4),
             packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
                    0x7fffffff, -0x80000000, -2))
Example #9
0
def testadd(data):
	data2 = []
	for d in data:
		str = ''
		for s in d:
			str = str + chr(ord(s)*2)
		data2.append(str)
	if audioop.add(data[0], data[0], 1) <> data2[0] or \
		  audioop.add(data[1], data[1], 2) <> data2[1] or \
		  audioop.add(data[2], data[2], 4) <> data2[2]:
		return 0
	return 1
Example #10
0
 def test_add(self):
     for w in 1, 2, 4:
         self.assertEqual(audioop.add(b'', b'', w), b'')
         self.assertEqual(audioop.add(datas[w], b'\0' * len(datas[w]), w),
                          datas[w])
     self.assertEqual(audioop.add(datas[1], datas[1], 1),
                      b'\x00\x24\x7f\x80\x7f\x80\xfe')
     self.assertEqual(audioop.add(datas[2], datas[2], 2),
             packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
     self.assertEqual(audioop.add(datas[4], datas[4], 4),
             packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
                    0x7fffffff, -0x80000000, -2))
Example #11
0
def bass():
    framerate = 44100
    bass = gen_bass_track(framerate)
    hat = gen_hats_track(framerate)
    tom = gen_tom_track(framerate)
    outdata = audioop.add(bass, hat, 2)
    outdata = audioop.add(outdata, tom, 2)

    nframes = len(outdata) // 2
    params = (1, 2, framerate, nframes, "NONE", "not compressed")
    waveout = wave.open("out.wav", "wb")
    waveout.setparams(params)
    waveout.writeframes(outdata)
def testadd(data):
    if verbose:
        print 'add'
    data2 = []
    for d in data:
        str = ''
        for s in d:
            str = str + chr(ord(s)*2)
        data2.append(str)
    if audioop.add(data[0], data[0], 1) != data2[0] or \
              audioop.add(data[1], data[1], 2) != data2[1] or \
              audioop.add(data[2], data[2], 4) != data2[2]:
        return 0
    return 1
Example #13
0
def testadd(data):
    if verbose:
        print 'add'
    data2 = []
    for d in data:
        str = ''
        for s in d:
            str = str + chr(ord(s) * 2)
        data2.append(str)
    if audioop.add(data[0], data[0], 1) != data2[0] or \
              audioop.add(data[1], data[1], 2) != data2[1] or \
              audioop.add(data[2], data[2], 4) != data2[2]:
        return 0
    return 1
Example #14
0
    def read(self):
        if self.overlay_source is None:
            return self.current_source.read()

        current = self.current_source.read()
        overlay = self.overlay_source.read()

        if not current:
            self.current_source = self.overlay_source
            self.overlay_source = None
            return overlay

        if not overlay:
            self.overlay_source = None
            return current

        if self.fade:
            vol = self.step * self._pos
            sub = 1 - vol
            self._pos += 1

            current = audioop.mul(current, 2, sub)
            overlay = audioop.mul(overlay, 2, vol)

        return audioop.add(current, overlay, 2)
Example #15
0
    def aw_callback(self, in_data, frame_count, time_info, status_flags):
        self._rms = self._rms * 0.7 + audioop.rms(in_data,
                                                  2) / 32768 * 100 * 0.3
        if self._rms < 5:
            in_data = audioop.mul(in_data, 2, (self._rms / 5)**8)

        if self._state & 2:  # LOOPER_PLAY & LOOPER_OVERDUB
            offset = self._bufsize * self._chunk_ptr
            if self._state == LOOPER_OVERDUB:
                recoffset = self._bufsize * (
                    (self._chunk_ptr - self.chunk_latency) % self._chunk_cnt)
                mixsrc = self._aud_buf[recoffset:recoffset +
                                       self._bufsize].tobytes()
                self._aud_buf[recoffset:recoffset +
                              self._bufsize] = array.array(
                                  'h', audioop.add(mixsrc, in_data, 2))
            data = self._aud_buf[offset:offset + self._bufsize].tobytes()
            self._chunk_ptr = (self._chunk_ptr + 1) % self._chunk_cnt
        else:  # LOOPER_STOP & LOOPER_RECORD
            if self._state == LOOPER_RECORD:
                self._aud_buf.frombytes(in_data)
                self._chunk_cnt += 1
                self._is_empty = False
            data = bytes(self._bufsize * 2)
        return (data, pyaudio.paContinue)
Example #16
0
    def read(self):
        if not self._run:
            return b""

        source_data = self.source.read()
        overlay_data = self._overlay_source.read()

        if not source_data:
            self.player.source = self._overlay_source
            self.vc.source = self._overlay_source
            self.cleanup()
            return overlay_data

        if not overlay_data:
            self.player.source = self.source
            self.vc.source = self.source
            self._overlay_source.cleanup()
            return source_data

        source_data = audioop.mul(source_data, 2,
                                  self.vol * (1 - self.vol_step))
        overlay_data = audioop.mul(overlay_data, 2, self.vol * self.vol_step)

        self.vol_change_step()

        return audioop.add(source_data, overlay_data, 2)
Example #17
0
 def writeframes(self, data):
     import time
     from SoundMgr import *
     import struct
     if not self._chan:
         import MacOS
         self._chan = MacOS.SndNewChannel(5, 0, self._callback)
     nframes = len(data) / self._nchannels / self._sampwidth
     if len(data) != nframes * self._nchannels * self._sampwidth:
         raise ValueError, 'data is not a whole number of frames'
     while self._gc and \
        self.getfilled() + nframes > \
       self._qsize / self._nchannels / self._sampwidth:
         time.sleep(0.1)
     if self._sampwidth == 1:
         import audioop
         data = audioop.add(data, '\x80' * len(data), 1)
     h1 = struct.pack('llhhllbbl',
                      id(data) + 12, self._nchannels, self._outrate, 0, 0,
                      0, extSH, 60, nframes)
     h2 = 22 * '\0'
     h3 = struct.pack('hhlll', self._sampwidth * 8, 0, 0, 0, 0)
     header = h1 + h2 + h3
     self._gc.append((header, data))
     self._chan.SndDoCommand((bufferCmd, 0, header))
     self._chan.SndDoCommand((callBackCmd, 0, 0))
Example #18
0
    def read(self) -> bytes:
        PCM: bytes = None

        DONE_SOURCES: List[int] = []
        for index, Source in self.Tracks:
            if len(self.Tracks) != 1 and Source.AudioFifo.samples < 960:
                continue

            Data: bytes = Source.read()

            if not Data:
                DONE_SOURCES.append(index)
                continue

            Data = audioop.mul(Data, 2, min(self.VOLUME_PER_SOURCE, 2.0))

            PCM = audioop.add(PCM, Data, 2) if PCM is not None else Data

        for index in DONE_SOURCES:
            del self._Tracks[index]

        if self._volume != 1.0:
            PCM = audioop.mul(PCM, 2, min(self._volume, 2.0))

        return PCM
Example #19
0
 def run(self):
     if self.what != "None":
         bgm = waveOpen("bgm_mouthmoney.wav", "rb")
         self.keeprunning = True
         reses = []
         while (self.keeprunning):
             if len(reses) >= 128:  #TODO
                 #Abspielen
                 self.soundout.stdin.write(b"".join(reses))
                 reses = []
             if self.scheduled and self.what != "bgm":
                 scheduled = self.scheduled
                 read0 = bgm.readframes(4)
                 if not read0:
                     bgm.rewind()
                     continue
                 read1 = scheduled.readframes(4)
                 if not read1:
                     self.scheduled = None
                     reses.append(read0)
                     continue
                 try:
                     reses.append(add(read0, read1, 4))
                 except:
                     pass  #really?
             else:
                 read = bgm.readframes(256)
                 if not read:
                     bgm.rewind()
                     continue
                 reses.append(read)
Example #20
0
    def read(self) -> bytes:
        fragments = []

        # TODO: We need to fix this somehow...
        # Copying dict each time is not a good way
        for source in self._audio_sources.copy():
            fragment = source.read()
            if len(fragment) == 0:
                self._loop.call_soon_threadsafe(
                    functools.partial(
                        self.remove_source,
                        source,
                        reason=AudioStatus.SOURCE_ENDED,
                    )
                )
                continue
            fragments.append(fragment)

        if len(fragments) == 0:
            return b""
        min_size = functools.reduce(
            lambda x, y: min(x, len(y)), fragments, len(fragments[0])
        )
        fragments = [
            fragment[0:min_size] if len(fragment) > min_size else fragment
            for fragment in fragments
        ]

        return functools.reduce(lambda x, y: audioop.add(x, y, 2), fragments)
Example #21
0
def gerarEcoWav(offset_ms=1000, factor=1):
    ## abrir arquivo de audio e obter parametros de audio
    [params, audio] = openWave(path_origwav, frames)
    """ 'num' delays depois de 'offset_ms' milissegundos amplificado por 'fator'. """
    if factor > 1:
        print('O audio resultante terá um volume alto')

    # calcular o número de bytes que correspondem ao offset em milissegundos
    offset = params.sampwidth * offset_ms * int(params.framerate / 1000)

    # adicionar espaço extra no fim para o delay
    audio_bytes = audio + (b'\0' * offset)

    # cria uma copia do audio original para aplicar o delay
    delayed_bytes = audio_bytes

    # cria silencio
    inicio = b'\0' * offset

    # remove espaço do final
    fim = audio_bytes[:-offset]

    # multiplica por fator
    multiplied_end = mul(fim, params.sampwidth, factor**2)

    # adiciona os dois sinais para gerar o sinal com eco
    delayed_audio = add(delayed_bytes, inicio + multiplied_end,
                        params.sampwidth)

    ## salvar audio com echo
    output_wave(params, delayed_audio, path_echowav)
Example #22
0
	def loop(self, layers=3, keep = True, verbose = False):
		self._loop = []
		if verbose: print "chunk size: %s" % self.chunk
		if verbose: print "chunkcount: %s" % self.chunkcount
		if verbose: print "No. beats per chunk: %s" % (self.measures * self.sig / float(self.chunkcount))
		
		print_beats = self.chunkcount / (self.sig * self.measures)
		
		try:
			for layer in xrange(layers):
				if verbose: print
				for i in range(0, self.chunkcount):
					#track = []
					#sys.stderr.write("*")
					data = self._stream.read(self.chunk)
					#track.append(data)
					if i % print_beats == 0 and i != print_beats*4:
						if verbose: sys.stderr.write("*")
					if layer == 0:
						self._loop.append(data)
					else:
						if verbose: self._stream.write(self._loop[i], self.chunk)
						self._loop[i] = audioop.add(data, self._loop[i], self.CHANNELS)
				#self.tracks.append(track)
		except IOError, msg:
			print "IOERROR.  Typical.", msg
			self._stream.stop_stream()
			self._stream.close()
			self.p.terminate()
Example #23
0
 def readframes(self, nframes=-1):
     stretches = []
     counts = []
     width = self.__format.getbps() / 8
     for i in range(len(self.__readers)):
         rdr, cb = self.__readers[i]
         data, nf = rdr.readframes(nframes)
         if not data:
             # got to the end of this one
             self.__readers[i] = rdr, None
             if cb:
                 apply(cb[0], cb[1])
             continue
         newstretches = []
         newcounts = []
         for i in range(len(counts)):
             mixed = stretches[i]
             count = counts[i]
             if data:
                 minlen = min(len(data), len(mixed))
                 data0 = data[:minlen]
                 data = data[minlen:]
                 mixed0 = mixed[:minlen]
                 mixed = mixed[minlen:]
                 newstretches.append(audioop.add(mixed0, data0, width))
                 newcounts.append(count + 1)
             if mixed:
                 newstretches.append(mixed)
                 newcounts.append(count)
         newstretches.append(data)
         newcounts.append(1)
         stretches = newstretches
         counts = newcounts
     data = string.joinfields(stretches, '')
     return data, len(data) / (width * self.__format.getnchannels())
Example #24
0
    def writeframes(self, data):
        import time
        from Carbon.Sound import bufferCmd, callBackCmd, extSH
        import struct
        import MacOS
        if not self._chan:
            from Carbon import Snd
            self._chan = Snd.SndNewChannel(5, 0, self._callback)
        nframes = len(data) / self._nchannels / self._sampwidth
        if len(data) != nframes * self._nchannels * self._sampwidth:
            raise error, 'data is not a whole number of frames'
        while self._gc and self.getfilled(
        ) + nframes > self._qsize / self._nchannels / self._sampwidth:
            time.sleep(0.1)

        if self._sampwidth == 1:
            import audioop
            data = audioop.add(data, '\x80' * len(data), 1)
        h1 = struct.pack('llHhllbbl',
                         id(data) + MacOS.string_id_to_buffer, self._nchannels,
                         self._outrate, 0, 0, 0, extSH, 60, nframes)
        h2 = 22 * '\x00'
        h3 = struct.pack('hhlll', self._sampwidth * 8, 0, 0, 0, 0)
        header = h1 + h2 + h3
        self._gc.append((header, data))
        self._chan.SndDoCommand((bufferCmd, 0, header), 0)
        self._chan.SndDoCommand((callBackCmd, 0, 0), 0)
Example #25
0
def monoToStereo(fnL, fnR, outputFN, lfactor=1.0, rfactor=1.0):
    '''
    Given two audio files, combines them into a stereo audio file
    
    Derived mostly from the official python documentation
    https://docs.python.org/2/library/audioop.html
    '''
    
    def _monoToStereo(fn, leftBalance, rightBalance):
        audiofile = wave.open(fn, "r")
        params = audiofile.getparams()
        sampwidth = params[1]
        nframes = params[3]
        
        waveData = audiofile.readframes(nframes)
        sample = audioop.tostereo(waveData, sampwidth,
                                  leftBalance, rightBalance)
    
        return sample, params
    
    lsample, params = _monoToStereo(fnL, lfactor, 1 - lfactor)
    rsample = _monoToStereo(fnR, 1 - rfactor, rfactor)[0]

    sampwidth, framerate, nframes, comptype, compname = params[1:]
    
    stereoSamples = audioop.add(lsample, rsample, sampwidth)
    
    outputAudiofile = wave.open(outputFN, "w")

    params = [2, sampwidth, framerate, nframes, comptype, compname]
    outputAudiofile.setparams(params)
    outputAudiofile.writeframes(stereoSamples)
Example #26
0
 def chunks(self) -> Generator[memoryview, None, None]:
     silence = b"\0" * self.chunksize
     while not self._closed:
         chunks_to_mix = []
         active_samples = self.determine_samples_to_mix()
         for i, (name, s) in active_samples:
             try:
                 chunk = next(s)
                 if len(chunk) > self.chunksize:
                     raise ValueError("chunk from sample is larger than chunksize from mixer (" +
                                      str(len(chunk)) + " vs " + str(self.chunksize) + ")")
                 if len(chunk) < self.chunksize:
                     # pad the chunk with some silence
                     chunk = memoryview(chunk.tobytes() + silence[len(chunk):])
                 chunks_to_mix.append(chunk)
             except StopIteration:
                 self.remove_sample(i, True)
         chunks_to_mix = chunks_to_mix or [silence]      # type: ignore
         assert all(len(c) == self.chunksize for c in chunks_to_mix)
         mixed = chunks_to_mix[0]
         if len(chunks_to_mix) > 1:
             for to_mix in chunks_to_mix[1:]:
                 mixed = audioop.add(mixed, to_mix, params.norm_nchannels)
             mixed = memoryview(mixed)
         self.chunks_mixed += 1
         yield mixed
Example #27
0
def monoToStereo(fnL, fnR, outputFN, lfactor=1.0, rfactor=1.0):
    '''
    Given two audio files, combines them into a stereo audio file
    
    Derived mostly from the official python documentation
    https://docs.python.org/2/library/audioop.html
    '''
    def _monoToStereo(fn, leftBalance, rightBalance):
        audiofile = wave.open(fn, "r")
        params = audiofile.getparams()
        sampwidth = params[1]
        nframes = params[3]

        waveData = audiofile.readframes(nframes)
        sample = audioop.tostereo(waveData, sampwidth, leftBalance,
                                  rightBalance)

        return sample, params

    lsample, params = _monoToStereo(fnL, lfactor, 1 - lfactor)
    rsample = _monoToStereo(fnR, 1 - rfactor, rfactor)[0]

    sampwidth, framerate, nframes, comptype, compname = params[1:]

    stereoSamples = audioop.add(lsample, rsample, sampwidth)

    outputAudiofile = wave.open(outputFN, "w")

    params = [2, sampwidth, framerate, nframes, comptype, compname]
    outputAudiofile.setparams(params)
    outputAudiofile.writeframes(stereoSamples)
Example #28
0
 def mix(self,
         other: 'Sample',
         other_seconds: Optional[float] = None,
         pad_shortest: bool = True) -> 'Sample':
     """
     Mix another sample into the current sample.
     You can limit the length taken from the other sample.
     When pad_shortest is False, no sample length adjustment is done.
     """
     if self.__locked:
         raise RuntimeError("cannot modify a locked sample")
     assert self.samplewidth == other.samplewidth
     assert self.samplerate == other.samplerate
     assert self.nchannels == other.nchannels
     frames1 = self.__frames
     if other_seconds:
         frames2 = other.__frames[:other.frame_idx(other_seconds)]
     else:
         frames2 = other.__frames
     if pad_shortest:
         if len(frames1) < len(frames2):
             frames1 += b"\0" * (len(frames2) - len(frames1))
         elif len(frames2) < len(frames1):
             frames2 += b"\0" * (len(frames1) - len(frames2))
     self.__frames = audioop.add(frames1, frames2, self.samplewidth)
     return self
Example #29
0
 def mix_at(self,
            seconds: float,
            other: 'Sample',
            other_seconds: Optional[float] = None) -> 'Sample':
     """
     Mix another sample into the current sample at a specific time point.
     You can limit the length taken from the other sample.
     """
     if seconds == 0.0:
         return self.mix(other, other_seconds)
     if self.__locked:
         raise RuntimeError("cannot modify a locked sample")
     assert self.samplewidth == other.samplewidth
     assert self.samplerate == other.samplerate
     assert self.nchannels == other.nchannels
     start_frame_idx = self.frame_idx(seconds)
     if other_seconds:
         other_frames = other.__frames[:other.frame_idx(other_seconds)]
     else:
         other_frames = other.__frames
     # Mix the frames. Unfortunately audioop requires splitting and copying the sample data, which is slow.
     pre, to_mix, post = self._mix_split_frames(len(other_frames),
                                                start_frame_idx)
     self.__frames = b""  # allow for garbage collection
     mixed = audioop.add(to_mix, other_frames, self.samplewidth)
     del to_mix  # more garbage collection
     self.__frames = self._mix_join_frames(pre, mixed, post)
     return self
Example #30
0
    def writeframes(self, data):
        import time
        from Carbon.Sound import bufferCmd, callBackCmd, extSH
        import struct
        import MacOS
        if not self._chan:
            from Carbon import Snd
            self._chan = Snd.SndNewChannel(5, 0, self._callback)
        nframes = len(data) / self._nchannels / self._sampwidth
        if len(data) != nframes * self._nchannels * self._sampwidth:
            raise error, 'data is not a whole number of frames'
        while self._gc and self.getfilled(
        ) + nframes > self._qsize / self._nchannels / self._sampwidth:
            time.sleep(0.1)

        if self._sampwidth == 1:
            import audioop
            data = audioop.add(data, '\x80' * len(data), 1)
        h1 = struct.pack('llHhllbbl',
                         id(data) + MacOS.string_id_to_buffer, self._nchannels,
                         self._outrate, 0, 0, 0, extSH, 60, nframes)
        h2 = 22 * '\x00'
        h3 = struct.pack('hhlll', self._sampwidth * 8, 0, 0, 0, 0)
        header = h1 + h2 + h3
        self._gc.append((header, data))
        self._chan.SndDoCommand((bufferCmd, 0, header), 0)
        self._chan.SndDoCommand((callBackCmd, 0, 0), 0)
Example #31
0
	def run(self):
		if self.what != "None":
			bgm = waveOpen("bgm_mouthmoney.wav", "rb")
			self.keeprunning = True
			reses = []
			while(self.keeprunning):
				if len(reses) >= 128: #TODO
					#Abspielen
					self.soundout.stdin.write(b"".join(reses))
					reses = []
				if self.scheduled and self.what != "bgm":
					scheduled = self.scheduled
					read0 = bgm.readframes(4)
					if not read0:
						bgm.rewind()
						continue
					read1 = scheduled.readframes(4)
					if not read1:
						self.scheduled = None
						reses.append(read0)
						continue
					try:
						reses.append(add(read0, read1, 4))
					except:
						pass #really?
				else:
					read = bgm.readframes(256)
					if not read:
						bgm.rewind()
						continue
					reses.append(read)
Example #32
0
	def loop(self):
		"""Master loop""" 
		import audioop

		self.cursor_time=time.time()
		while self.mumble.is_alive():
			if self.cursor_time < time.time() - BUFFER:  # it's time to check audio
				base_sound = None

				for user in self.mumble.users.values():  # check the audio queue of each users

					if user.sound.is_sound():
						# available sound is to be treated now and not later
						sound = user.sound.get_sound(FLOAT_RESOLUTION)
						
						if base_sound == None:
							base_sound = sound.pcm
						else:
							base_sound = audioop.add(base_sound, sound.pcm, 2)

				if base_sound:
					self.mumble.sound_output.add_sound(base_sound)

				self.cursor_time += FLOAT_RESOLUTION
			else:
				time.sleep(FLOAT_RESOLUTION)
Example #33
0
    def compile(self) -> AudioSegment:
        result = AudioSegment.empty()
        result._data = bytearray(result._data)

        for note in self.notes:
            if len(note.sound) == 0:
                continue

            sync_result, note_snd = AudioSegment._sync(result, note.sound)
            if not sync_result is result:
                result = sync_result
                result._data = bytearray(result._data)

            note_snd = audioseg_adjust_volume(note_snd, note.volume)

            time_ms = note.timestamp.total_seconds() * 1000
            byte_offset = result._parse_position(time_ms) * result.frame_width
            new_data = note_snd._data
            min_len = byte_offset + len(new_data)

            if len(result._data) < min_len:
                result._data += bytearray(min_len - len(result._data))

            cur_data = result._data[byte_offset:min_len]
            mixed_data = audioop.add(cur_data, new_data, result.sample_width)

            result._data[byte_offset:min_len] = mixed_data

        return result
Example #34
0
    def overlay(self, seg, position=0, loop=False):
        output = TemporaryFile()

        seg1, seg2 = AudioSegment._sync(self, seg)
        sample_width = seg1.sample_width
        spawn = seg1._spawn

        output.write(seg1[:position]._data)

        # drop down to the raw data
        seg1 = seg1[position:]._data
        seg2 = seg2._data
        pos = 0
        seg1_len = len(seg1)
        seg2_len = len(seg2)
        while True:
            remaining = max(0, seg1_len - pos)
            if seg2_len >= remaining:
                seg2 = seg2[:remaining]
                seg2_len = remaining
                loop = False

            output.write(audioop.add(seg1[pos:pos + seg2_len], seg2,
                sample_width))
            pos += seg2_len

            if not loop:
                break

        output.write(seg1[pos:])

        return spawn(data=output)
Example #35
0
    def overlay(self, seg, position=0, loop=False):
        output = TemporaryFile()

        seg1, seg2 = AudioSegment._sync(self, seg)
        sample_width = seg1.sample_width
        spawn = seg1._spawn

        output.write(seg1[:position]._data)

        # drop down to the raw data
        seg1 = seg1[position:]._data
        seg2 = seg2._data
        pos = 0
        seg1_len = len(seg1)
        seg2_len = len(seg2)
        while True:
            remaining = max(0, seg1_len - pos)
            if seg2_len >= remaining:
                seg2 = seg2[:remaining]
                seg2_len = remaining
                loop = False

            output.write(audioop.add(seg1[pos:pos + seg2_len], seg2,
                                     sample_width))
            pos += seg2_len

            if not loop:
                break

        output.write(seg1[pos:])

        return spawn(data=output)
    def _combine_audio(self):
        """Combines all audio in self.files into one song of raw audio."""
        if len(self.files) == 0:
            return None
        elif len(self.files) == 1:
            return self.files[0].data

        # Find length of longest audiofile.
        longest = 0
        for file in self.files:
            length = len(file.data)
            if file.info.get('channels', 2) == 1:
                # Mono segments will be doubled when converted to stereo.
                length *= 2

            if length > longest:
                longest = length

        combined = bytes(longest)
        for file in self.files:
            data = file.data

            # Convert to stereo if mono.
            if file.info.get('channels', 2) == 1:
                data = audioop.tostereo(data, int(IMPORT_WIDTH / 8), 1, 1)

            data += bytes(longest - len(data))
            combined = audioop.add(combined, data, int(IMPORT_WIDTH / 8))

        return combined
Example #37
0
def mix(layers, leftalign=True, boost=2.0):
    """ mixes N stereo audio strings """
    attenuation = 1.0 / len(layers)
    attenuation *= boost 
    layers.sort(key = len)
    output_length = flen(layers[-1])

    out = pad('', output_length, 0) 

    for layer in layers:
        padding = output_length - flen(layer) 

        if leftalign:
            layer = pad(layer, 0, padding)
        else:
            layer = pad(layer, padding, 0)

        layer = audioop.mul(layer, audio_params[1], attenuation)

        if len(layer) != ftc(output_length) or len(out) != ftc(output_length):
            dif = int(math.fabs(len(layer) - len(out)))
            log('unequal'+str(dif))
            if len(out) < len(layer):
                layer = layer[:len(layer) - dif]
            else:
                out = out[:len(out) - dif]

        out = audioop.add(out, layer, audio_params[1])

    return out 
Example #38
0
 def mix(self, mixer_channels, bufsize):
     """Mix the next batch buffer.
     Each object in the mixer_channels list must be a file-like object
     with a 'read(size)' method."""
     data = ''
     already_seen = {}
     channels = mixer_channels[:]
     channels.reverse()
     for c in channels:
         if already_seen.has_key(c):
             data1 = ''
         else:
             data1 = c.read(bufsize)
             already_seen[c] = 1
         if data1:
             l = min(len(data), len(data1))
             data = (audioop.add(data[:l], data1[:l], 1) +
                     (data1[l:] or data[l:]))
         else:
             try:
                 mixer_channels.remove(c)
             except ValueError:
                 pass
     data += self.queue * ((bufsize - len(data)) / self.bytes)
     self.queue = data[-self.bytes:]
     return data
Example #39
0
def mul_stereo(fileName, width, lfactor, rfactor):
    lsample = audioop.tomono(fileName, width, 1, 0)
    rsample = audioop.tomono(fileName, width, 0, 1)
    lsample = audioop.mul(lsample, width, lfactor)
    rsample = audioop.mul(rsample, width, rfactor)
    lsample = audioop.tostereo(lsample, width, 1, 0)
    rsample = audioop.tostereo(rsample, width, 0, 1)
    return audioop.add(lsample, rsample, width)
def mul_stereo(fileName,width,lfactor,rfactor):
    lsample = audioop.tomono(fileName, width, 1, 0)
    rsample = audioop.tomono(fileName,width, 0, 1)
    lsample = audioop.mul(lsample,width,lfactor)
    rsample = audioop.mul(rsample, width,rfactor)
    lsample = audioop.tostereo(lsample, width, 1, 0)
    rsample = audioop.tostereo(rsample, width, 0, 1)
    return audioop.add(lsample,rsample,width)
Example #41
0
def create(t):
    count = 0
    countList = []
    countList.append(0)
    x = 0
    l = []
    l.append(begin())
    print(l[0].getparams()[3])
    w = wave.open("mix.wav", 'w')
    fs = l[0].getparams()
    w.setparams(fs)
    while (x < t):
        l.extend(tbb(count, countList))
        x += 1
    countList.append(len(l))
    l.append(end())
    counter = 0
    for x in l:
        f = None
        b = overAny()
        two = x.readframes(fs)
        if (b != 'no'):
            one = b.readframes(fs)
            #this 'if' is for unequal lengths
            #i fixed the only problem identified
            #but im leaving it here for now
            if (len(one) != len(two)):
                two = two[:len(one)]
            f = audioop.add(one, two, x.getparams()[1])
            w.writeframes(f)
        else:
            if ((counter in countList) or (counter % 2 == 1)):
                print('ouch!\n')
                w.writeframes(two)
            else:
                ra = Brandon()
                three = ra.readframes(fs)
                two = audioop.add(two, three, x.getparams()[1])
                w.writeframes(two)
        counter += 1
    w.close()
    for x in l:
        x.close()

    return "Success :P"
Example #42
0
def echocancel(outputdata, inputdata):
    pos = audioop.findmax(outputdata, 800)  # one tenth second
    out_test = outputdata[pos * 2:]
    in_test = inputdata[pos * 2:]
    ipos, factor = audioop.findfit(in_test, out_test)
    prefill = '\0' * (pos + ipos) * 2
    postfill = '\0' * (len(inputdata) - len(prefill) - len(outputdata))
    outputdata = prefill + audioop.mul(outputdata, 2 - factor) + postfill
    return audioop.add(inputdata, outputdata, 2)
def echocancel(outputdata, inputdata):
    pos = audioop.findmax(outputdata, 800)   # one tenth second
    out_test = outputdata[pos*2:]
    in_test = inputdata[pos*2:]
    ipos, factor = audioop.findfit(in_test,out_test)
    prefill = '\0'*(pos+ipos)*2
    postfill = '\0'*(len(inputdata)-len(prefill)-len(outputdata))
    outputdata = prefill + audioop.mul(outputdata,2-factor) + postfill
    return audioop.add(inputdata, outputdata,2)
Example #44
0
def delay(audio_bytes, params, offset_ms):
    """version 1: delay after 'offset_ms' milliseconds"""
    # calculate the number of bytes which corresponds to the offset in milliseconds
    offset = params.sampwidth * offset_ms * int(params.framerate / 1000)
    # create some silence
    beginning = b'\0' * offset
    # remove space from the end
    end = audio_bytes[:-offset]
    return add(audio_bytes, beginning + end, params.sampwidth)
Example #45
0
def pan(slice, pan_pos=0.5, amp=1.0):
    amps = pantamp(pan_pos)

    lslice = audioop.tomono(slice, audio_params[1], 1, 0)
    lslice = audioop.tostereo(lslice, audio_params[1], amps[0], 0)

    rslice = audioop.tomono(slice, audio_params[1], 0, 1)
    rslice = audioop.tostereo(rslice, audio_params[1], 0, amps[1])

    slice = audioop.add(lslice, rslice, audio_params[1])
    return audioop.mul(slice, audio_params[1], amp)
Example #46
0
 def echocancel(outputdata, inputdata):
     """Try to identify an echo and remove it.
     Should contain 2-byte samples"""
     pos = audioop.findmax(outputdata, 800)
     out_test = outputdata[pos*2:]
     in_test = inputdata[pos*2:]
     ipos, factor = audioop.findfit(in_test, out_test)
     factor = audioop.findfactor(in_test[ipos*2:ipos*2+len(out_test)], out_test)
     prefill = '\0'*(pos+ipos)*2
     postfill = '\0'*(len(inputdata) - len(prefill) - len(outputdata))
     outputdata = prefill + audioop.mul(outputdata, 2, -factor) + postfill
     return audioop.add(inputdata, outputdata, 2)
Example #47
0
	def send_audio(self, buffer, ctcss=None):
		"""Send audio to radio transceiver using the soundcard
		
		ctcss -- Tuple containing (frequecy, amplitude) for CTCSS code generation
		"""
		if not self.soundcard: self.debug("soundcard not opened"); return
		if not buffer: return
				
		if ctcss and self.ctcss_generator:
			freq, amplitude = ctcss
			ctcss_buffer = self.ctcss_generator.generate(len(buffer), amplitude, freq)
			buffer = audioop.add(buffer, ctcss_buffer, self.sample_width)

		self.soundcard.write(buffer)
Example #48
0
 def mixAudio(self):
     # XXX see the comment above about storing a decaying number for the
     # volume. For instance, each time round the loop, take the calculated
     # volume, and the stored volume, and do something like:
     # newStoredVolume = (oldStoredVolume * 0.33) + (thisPacketVolume * 0.66)
     import audioop
     self._audioOut = {}
     if not self._open:
         log.msg('mixing closed room %r'%(self,), system='doug')
         return
     audioIn = {}
     for m in self._members:
         bytes = m.getAudioForRoom()
         if bytes: audioIn[m] = bytes
     if CONFDEBUG:
         print "room %r has %d members"%(self, len(self._members))
         print "got %d samples this time"%len(audioIn)
         print "samples: %r"%(audioIn.items(),)
     # short-circuit this case
     if len(self._members) < 2:
         if CONFDEBUG:
             print "less than 2 members, no sound"
         self._audioOutDefault = ''
         return
     # Samples is (confsource, audio)
     samples = audioIn.items()
     # power is three-tuples of (rms,audio,confsource)
     power = [ (audioop.rms(x[1],2),x[1], x[0]) for x in samples ]
     power.sort(); power.reverse()
     if CONFDEBUG:
         for rms,audio,confsource in power:
             print confsource, rms
     # Speakers is a list of the _maxSpeakers loudest speakers
     speakers = Set([x[2] for x in power[:self._maxSpeakers]])
     # First we calculate the 'default' audio. Used for everyone who's
     # not a speaker in the room.
     samples = [ x[1] for x in power[:self._maxSpeakers] ]
     scaledsamples = [ audioop.mul(x, 2, 1.0/len(samples)) for x in samples ]
     if scaledsamples:
         # ooo. a use of reduce. first time for everything...
         try:
             combined = reduce(lambda x,y: audioop.add(x, y, 2), scaledsamples)
         except audioop.error, exc:
             # XXX tofix!
             print "combine got error %s"%(exc,)
             print "lengths", [len(x) for x in scaledsamples]
             combined = ''
Example #49
0
	def run(self):
		while 1:
			olddata = data = self.iport.readsamps(600)
			if self.do_ulaw:
				data = audioop.lin2ulaw(data, 2)
				data = audioop.ulaw2lin(data, 2)
			if self.do_adpcm:
				data, nacstate = audioop.lin2adpcm(data, 2, \
					  self.acstate)
				data, dummy = audioop.adpcm2lin(data, 2, \
					  self.acstate)
				self.acstate = nacstate
			if self.do_diff:
				olddata = audioop.mul(olddata, 2, -1)
				data = audioop.add(olddata, data, 2)
			self.oport.writesamps(data)
			fl.check_forms()
Example #50
0
def record_handler():
    global song
    global current_song_filename
    global num_tracks
    r = twiml.Response()
    phone_number = request.values.get('From')

    flask_dir = os.path.dirname(os.path.abspath(__file__))
    static_dir = os.path.join(flask_dir, "static/")
    rec_string = urllib.urlopen(request.values.get('RecordingUrl')).read()

    while string.find(rec_string, 'RestException') != -1:
        rec_string = urllib.urlopen(request.values.get('RecordingUrl')).read()

    rec_file = StringIO.StringIO(rec_string)
    recording = wave.open(rec_file)
    s = recording.readframes(NUM_FRAMES)
    if len(s) == 0:
        s = '\x00' * NUM_FRAMES
    elif len(s) < NUM_FRAMES:
        s = (s * ((NUM_FRAMES / len(s)) + 1))
    song[phone_number] = audioop.add(s[:NUM_FRAMES], song[phone_number], 2)

    # Need a unique filename so Twilio won't cache it
    if current_song_filename[phone_number] != '':
        os.remove(os.path.join(static_dir, current_song_filename[phone_number]))
    current_song_filename[phone_number] = str(num_tracks[phone_number]) + base_song_filename[phone_number]

    song_file_full_path = os.path.join(static_dir, current_song_filename[phone_number])
    song_file = wave.open(song_file_full_path, 'w')
    song_file.setnchannels(1)
    song_file.setsampwidth(2)
    song_file.setframerate(8000)
    song_file.writeframes(song[phone_number])
    song_file.close()
    num_tracks[phone_number] += 1
    r.play(url_for('static', filename=current_song_filename[phone_number]))

    with r.gather(method='GET', numDigits=1, action='/user_option') as g:
        g.say('Press 1 to record another track or 2 to finish')
    return str(r)
Example #51
0
 def mix(self, other, other_seconds=None, pad_shortest=True):
     """
     Mix another sample into the current sample.
     You can limit the length taken from the other sample.
     When pad_shortest is False, no sample length adjustment is done.
     """
     assert not self.__locked
     assert self.samplewidth == other.samplewidth
     assert self.samplerate == other.samplerate
     assert self.nchannels == other.nchannels
     frames1 = self.__frames
     if other_seconds:
         frames2 = other.__frames[:other.frame_idx(other_seconds)]
     else:
         frames2 = other.__frames
     if pad_shortest:
         if len(frames1) < len(frames2):
             frames1 += b"\0"*(len(frames2)-len(frames1))
         elif len(frames2) < len(frames1):
             frames2 += b"\0"*(len(frames1)-len(frames2))
     self.__frames = audioop.add(frames1, frames2, self.samplewidth)
     return self
Example #52
0
 def mix_at(self, seconds, other, other_seconds=None):
     """
     Mix another sample into the current sample at a specific time point.
     You can limit the length taken from the other sample.
     """
     if seconds == 0.0:
         return self.mix(other, other_seconds)
     assert not self.__locked
     assert self.samplewidth == other.samplewidth
     assert self.samplerate == other.samplerate
     assert self.nchannels == other.nchannels
     start_frame_idx = self.frame_idx(seconds)
     if other_seconds:
         other_frames = other.__frames[:other.frame_idx(other_seconds)]
     else:
         other_frames = other.__frames
     # Mix the frames. Unfortunately audioop requires splitting and copying the sample data, which is slow.
     pre, to_mix, post = self._mix_split_frames(len(other_frames), start_frame_idx)
     self.__frames = None  # allow for garbage collection
     mixed = audioop.add(to_mix, other_frames, self.samplewidth)
     del to_mix  # more garbage collection
     self.__frames = self._mix_join_frames(pre, mixed, post)
     return self
Example #53
0
	def writeframes(self, data):
		import time
		from SoundMgr import *
		import struct
		if not self._chan:
			import MacOS
			self._chan = MacOS.SndNewChannel(5, 0, self._callback)
		nframes = len(data) / self._nchannels / self._sampwidth
		if len(data) != nframes * self._nchannels * self._sampwidth:
			raise ValueError, 'data is not a whole number of frames'
		while self._gc and \
			  self.getfilled() + nframes > \
				self._qsize / self._nchannels / self._sampwidth:
			time.sleep(0.1)
		if self._sampwidth == 1:
			import audioop
			data = audioop.add(data, '\x80'*len(data), 1)
		h1 = struct.pack('llhhllbbl',
			id(data)+12,
			self._nchannels,
			self._outrate, 0,
			0,
			0,
			extSH,
			60,
			nframes)
		h2 = 22*'\0'
		h3 = struct.pack('hhlll',
			self._sampwidth*8,
			0,
			0,
			0,
			0)
		header = h1+h2+h3
		self._gc.append((header, data))
		self._chan.SndDoCommand((bufferCmd, 0, header))
		self._chan.SndDoCommand((callBackCmd, 0, 0))
Example #54
0
def fx_echo(chunk_p):
    global time
    global loop_frames
    global loop_times
    global delay
    global intense
    global setup

    if len(chunk_p) != 2 * CHUNK:
        print('[echo] chunk size is not %d but %d' % (2 * CHUNK, len(chunk_p)))
        return chunk_p

    if not setup:
        print('[echo] varibales are not set')
        return chunk_p

    #save data
    loop_times.append(time)    
    loop_frames.append(chunk_p)

    #count value
    i = 0
    while i < len(loop_frames):
        if (time - loop_times[i]) % delay == 0 and time != loop_times[i]:
            loop_frames[i] = audioop.mul(loop_frames[i], 2, intense)
            if audioop.rms(loop_frames[i], 2) < 1:
                del loop_frames[i]
                del loop_times[i]
                i = i - 1
            else:
                chunk_p = audioop.add(chunk_p, loop_frames[i], 2)
        i = i + 1
    del i

    time = time + 1
    return chunk_p
Example #55
0
QSIZE = 100000
Example #56
0
    # e.volume = 1000
    data = e.read()
    with open_device(e) as d:
        print(d)
        print(repr(e))
        while True:
            try:
                d.stop_stream()
                text = input()
                d.start_stream()
            except:
                break
            print(text)
            e.pitch = 50
            e.range = 50
            e.write(text)
            data = e.read()
            e.pitch = 100
            e.range = 20
            e.write(text)
            data2 = e.read()
            if len(data) > len(data2):
                data2 += bytearray(len(data) - len(data2))
            elif len(data2) > len(data):
                data += bytearray(len(data2) - len(data))
            data3 = add(data, data2, 1)
            d.write(data3)



Example #57
0
from Carbon.Sound import *
from Carbon import Snd

import aifc, audioop

fn = 'f:just samples:2ndbeat.aif'
af = aifc.open(fn, 'r')
shout af.getparams()
shout 'nframes  =', af.getnframes()
shout 'nchannels =', af.getnchannels()
shout 'framerate =', af.getframerate()
nframes = min(af.getnframes(), 100000)
frames = af.readframes(nframes)
shout 'len(frames) =', len(frames)
shout repr(frames[:100])
frames = audioop.add(frames, '\x80'*len(frames), 1)
shout repr(frames[:100])

import struct

header1 = struct.pack('llhhllbbl',
                      0,
                      af.getnchannels(),
                      af.getframerate(),0,
                      0,
                      0,
                      0xFF,
                      60,
                      nframes)
shout repr(header1)
header2 = struct.pack('llhlll', 0, 0, 0, 0, 0, 0)
Example #58
0
        allsamples = {}
        for p,sample,speaker in power:
            allsamples[speaker] = p, sample
        for s in speakers:
            # For each speaker, take the set of (other speakers), grab
            # the top N speakers, and combine them. Add to the _audioOut
            # dictionary
            all = allsamples.copy()
            del all[s]
            power = all.values()
            power.sort() ; power.reverse()
            samples = [ x[1] for x in power[:self._maxSpeakers] ]
            if samples:
                scaled = [ audioop.mul(x, 2, 1.0/len(samples)) for x in samples]
                try:
                    out = reduce(lambda x,y: audioop.add(x, y, 2), scaled)
                except audioop.error, exc:
                    # XXX tofix!
                    print "combine got error %s"%(exc,)
                    print "lengths", [len(x) for x in scaled]
                    out = ''
            else:
                out = ''
            if CONFDEBUG:
                print "calc for", s, "is", audioop.rms(out, 2)
            self._audioOut[s] = out

_RegisterOfAllRooms = {}

_StickyRoomNames = {}