Esempio n. 1
0
        note = np.sin(2 * np.pi * t * freq)

        if tone[2] == True:
            note *= np.exp(-coeff_exp * (t - N_note / fs))
        outdata += note
    outdata = outdata.reshape(-1, 1)
    return outdata


def callback(indata, outdata, frames, time, status):
    if status:
        print(status)
    global start_idx
    global tone

    note = generer_son(tone, start_idx, frames, N_cross, fs)

    outdata[:] = note

    start_idx += frames


tone = Tone()

tone.start_tone("Fa", 44100)

with sd.Stream(channels=2, callback=callback, blocksize=blocksize):
    sd.sleep(int(duration * 1000))

print(tone.list_tone)
Esempio n. 2
0
import sounddevice as sd
import numpy as np

duration = 200


def print_sound(indata, outdata, frames, time, status):
    l = indata[::8]
    print(*l)


with sd.Stream(callback=print_sound, channels=1):
    sd.sleep(100000000000000000)
Esempio n. 3
0
import sounddevice as sd
import numpy as np

duration = 10  # seconds


def print_sound(indata, outdata, frames, time, status):
    volume_norm = np.linalg.norm(indata) * 10
    print('|' * int(volume_norm))


with sd.Stream(callback=print_sound):
    sd.sleep(duration * 1000)
Esempio n. 4
0
    pid.set_point = mapp(DESIRED_SNR, MIN_SNR, MAX_SNR, -1, 1)
    pid.print_config()

    def callback(indata, outdata, frames, time_,
                 status):  # Prototipo del callback de sounddevice.
        # ~ print('---')
        SNR = calculate_SNR(indata.transpose()[0])
        amplitude = mapp(pid.get_control(mapp(SNR, MIN_SNR, MAX_SNR, -1, 1)),
                         -1, 1, 0, 1)
        outdata[:] = amplitude * PURE_SAMPLES.reshape(len(PURE_SAMPLES), 1)
        # ~ print('SP={:.2f}   '.format(DESIRED_SNR) + 'SNR={:.2f}   '.format(SNR) + 'eSNR={:.2f}   '.format(DESIRED_SNR - SNR) + 'a={:.3f}   '.format(amplitude))
        print(
            str(time.time() - t_start) + '\t' + str(DESIRED_SNR) + '\t' +
            str(SNR) + '\t' + str(DESIRED_SNR - SNR) + '\t' + str(amplitude))

    return callback


print('# ' + get_timestamp())
print('# time \t SNR_setpoint \t SNR_measured \t SNR_error \t amplitude')

stream = sd.Stream(samplerate=SAMPLING_FREQUENCY,
                   callback=create_callback(),
                   blocksize=len(PURE_SAMPLES),
                   channels=1)

stream.start()
while True:
    time.sleep(1)
stream.stop()
Esempio n. 5
0
def execute():
    while True:
        with sd.Stream(callback=print_sound):
            sd.sleep(duration * 5000)
Esempio n. 6
0
def _in(ctx):
    _config = DEFAULT_CONFIG.copy()

    seconds_per_buffer = _config.get("chunk") / _config.get("sample_rate")
    pause_buffer_count = math.ceil(_config.get("pause_threshold") / seconds_per_buffer)

    # Number of buffers of non-speaking audio during a phrase before the phrase should be considered complete.
    phrase_buffer_count = math.ceil(_config.get("phrase_threshold") / seconds_per_buffer) # Minimum number of buffers of speaking audio before we consider the speaking audio a phrase.
    non_speaking_buffer_count = math.ceil(_config.get("non_speaking_duration") / seconds_per_buffer)  # Maximum number of buffers of non-speaking audio to retain before and after a phrase.

    stream = sounddevice.Stream(samplerate=_config.get("sample_rate"), channels=_config.get("channels"), dtype='int16')
    with stream:
        while not ctx.finished.is_set():
            elapsed_time = 0  # Number of seconds of audio read
            buf = b""  # An empty buffer means that the stream has ended and there is no data left to read.
            while not ctx.finished.is_set():
                frames = collections.deque()

                # Store audio input until the phrase starts
                while not ctx.finished.is_set():
                    # Handle waiting too long for phrase by raising an exception
                    elapsed_time += seconds_per_buffer
                    if _config.get("timeout") and (elapsed_time > _config.get("timeout")):
                        raise Exception("Listening timed out while waiting for phrase to start.")

                    buf = stream.read(_config.get("chunk"))[0]
                    frames.append(buf)
                    if len(frames) > non_speaking_buffer_count:  
                    # Ensure we only keep the required amount of non-speaking buffers.
                        frames.popleft()

                    # Detect whether speaking has started on audio input.
                    energy = audioop.rms(buf, _config.get("sample_width"))  # Energy of the audio signal.
                    if energy > _config.get("energy_threshold"):
                        break

                    # Dynamically adjust the energy threshold using asymmetric weighted average.
                    if _config.get("dynamic_energy_threshold"):
                        damping = _config.get("dynamic_energy_adjustment_damping") ** seconds_per_buffer  # Account for different chunk sizes and rates.
                        target_energy = energy * _config.get("dynamic_energy_ratio")
                        _config["energy_threshold"] = _config.get("energy_threshold") * damping + target_energy * (1 - damping)

                # Read audio input until the phrase ends.
                pause_count, phrase_count = 0, 0
                phrase_start_time = elapsed_time
                while not ctx.finished.is_set():
                    # Handle phrase being too long by cutting off the audio.
                    elapsed_time += seconds_per_buffer
                    if _config.get("timeout") and (elapsed_time - phrase_start_time > _config.get("timeout")):
                        break

                    buf = stream.read(_config.get("chunk"))[0]
                    frames.append(buf)
                    phrase_count += 1

                    # Check if speaking has stopped for longer than the pause threshold on the audio input.
                    energy = audioop.rms(buf, _config.get("sample_width"))  # unit energy of the audio signal within the buffer.
                    if energy > _config.get("energy_threshold"):
                        pause_count = 0
                    else:
                        pause_count += 1
                    if pause_count > pause_buffer_count:  # End of the phrase.
                        break

                # Check how long the detected phrase is and retry listening if the phrase is too short.
                phrase_count -= pause_count  # Exclude the buffers for the pause before the phrase.
                if phrase_count >= phrase_buffer_count or len(buf) == 0: break  # Phrase is long enough or we've reached the end of the stream, so stop listening.

            # Obtain frame data.
            for _ in range(pause_count - non_speaking_buffer_count): frames.pop()  # Remove extra non-speaking frames at the end.
            frame_data = numpy.concatenate(frames)
            yield frame_data
Esempio n. 7
0
parser.add_argument('-t', '--dtype', help='audio data type')
parser.add_argument('-s', '--samplerate', type=float, help='sampling rate')
parser.add_argument('-b', '--blocksize', type=int, help='block size')
parser.add_argument('-l', '--latency', type=float, help='latency in seconds')
args = parser.parse_args()

try:
    import sounddevice as sd

    def callback(indata, outdata, frames, time, status):
        if status:
            print(status)
        flip = 1
        outdata[:] = indata

    with sd.Stream(device=(args.input_device, args.output_device),
                   samplerate=args.samplerate,
                   blocksize=args.blocksize,
                   dtype=args.dtype,
                   latency=args.latency,
                   channels=args.channels,
                   callback=callback) as s:
        print('#' * 80)
        print('press Return to quit')
        print('#' * 80)
        input()
except KeyboardInterrupt:
    parser.exit('\nInterrupted by user')
except Exception as e:
    parser.exit(type(e).__name__ + ': ' + str(e))
Esempio n. 8
0
    # Colors
    if len(args.channels) > 1:
        ax.legend(['channel {}'.format(c) for c in args.channels],
                  loc='lower left',
                  ncol=len(args.channels),
                  bbox_to_anchor=(0., 1.02, 1., .102))
        fx.legend(['channel {}'.format(c) for c in args.channels],
                  loc='lower left',
                  ncol=len(args.channels),
                  bbox_to_anchor=(0., 1.02, 1., .102))

    ax.axis((0, len(plotdata), -1, 1))
    fx.axis((0, 44100, -0.5, 1.5))
    # ax.set_yticks([0])
    ax.yaxis.grid(True)
    fx.yaxis.grid(True)
    # ax.tick_params(bottom='off', top='off', labelbottom='off',
    #                right='off', left='off', labelleft='off')
    fig.tight_layout(pad=0)

    stream = sd.Stream(device=(0, 1),
                       channels=max(args.channels),
                       samplerate=args.samplerate,
                       callback=audio_callback,
                       blocksize=960)
    ani = FuncAnimation(fig, update_plot, interval=args.interval, blit=False)
    with stream:
        plt.show()
except Exception as e:
    parser.exit(type(e).__name__ + ': ' + str(e))
Esempio n. 9
0
 def collect_samples(self):
     with sounddevice.Stream(callback=self.messure):
         sounddevice.sleep(100)
     self.check_sound_level()
Esempio n. 10
0
plt.show()
blackman = scipy.blackman(rate)

A4 = 440
C0 = A4 * pow(2, -4.75)
name = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]


def pitch(freq):
    h = round(12 * log2(freq / C0))
    octave = h // 12
    n = h % 12
    return name[n] + str(octave)


with sd.Stream(channels=1, samplerate=rate) as s:
    playback_speed_multiplier = 1
    for i in range(duration * playback_speed_multiplier):
        print("start reading")
        print(datetime.datetime.now())
        data, b = s.read(int(rate / playback_speed_multiplier))
        # data = data + samples
        # data = data
        print("end reading")
        print(datetime.datetime.now())

        time_audiodata_ax.clear()
        time_audiodata_ax.set_ylim(-1, 1)
        time_audiodata_ax.plot(data)
        time_audiodata_ax.set_title(datetime.datetime.now())
Esempio n. 11
0
import sounddevice as sd
import numpy as np
''' NOTES:

	TO DO:

		fix it so sound comes from both headphone ears

		figure out what 'data' numpy array represents

	'''

s = sd.Stream(device=(4, 0), samplerate=44100, channels=2)
# s = sd.Stream(
# 	device=(4, 0),
# 	samplerate=44100,
# 	blocksize=1024,
# 	dtype='int16',
# 	latency=None,
# 	channels=2)

# how to use 2 devices
# source: https://github.com/spatialaudio/python-sounddevice/issues/29
# sd.Stream(device=('hw:0,1', 'hw:1,0'), samplerate=(48000, 48000), blocksize=1024, dtype=('int16', 'int16'), latency=None, channels=2):

with s:
    while True:
        data, overflowed = s.read(s.read_available)
        s.write(data)

        # for some reason this doesn't work :(
Esempio n. 12
0
    def __init__(self):
        """initialization"""
        self.mode = "krl"
        self.data_in = []
        self.count_krl = 0
        self.num_bit = 0
        self.krl_freq = 475
        self.krl_ampl = 0.1
        self.krl_code = 0x2C
        self.krl_fdev = f_dev
        self.krl_speed = f_mod
        for j in range(7, -1, -1):
            self.data_in.append((self.krl_code & 1<<j)>>j)

        self.count_alsn = 0
        self.alsn_freq = 50
        self.alsn_ampl = 0.1
        self.alsn_code = "RedYellow"
        self.alsn_green = {
                'pause1': 0.03,
                'pulse1': 0.38,
                'pause2': 0.5,
                'pulse2': 0.72,
                'pause3': 0.84,
                'pulse3': 1.06
                }
        self.alsn_yellow = {
                'pause1': 0.03,
                'pulse1': 0.41,
                'pause2': 0.53,
                'pulse2': 0.91,
                }
        self.alsn_redyellow = {
                'pulse1': 0.23,
                'pause1': 0.80
                }

        self.count_alsen = 0
        self.alsen_freq = 175
        self.alsen_ampl = 0.1
        self.alsen_data = [0x2C,0x2C]

        self.count_ars = 0
        self.ars_freq1 = 75
        self.ars_freq1 = 0
        self.ars_ampl1 = 0.1
        self.ars_ampl2 = 0.1
        self.sao = False
        self.sao_param = {
                'pulse1': 0.5,
                'pause1': 1.0
                }

        self.downsample = 1
        self.start_idx = 0
        self.channels = [1,2]
        self.fs = fs
        self.bs = 1024
        sd.default.blocksize = self.bs
        sd.default.samplerate = self.fs
        sd.default.channels = 2
        self.q = queue.Queue()
        self.generator = sd.Stream(device = (sd.default.device,\
                                                       sd.default.device),
        callback = self.__audio_callback)
#        self.generator.start()
        self.mapping = [c - 1 for c in self.channels] 
Esempio n. 13
0
    def __init__(self,
                 ioDevice=None,
                 iDevice=None,
                 oDevice=None,
                 iChannels=None,
                 oChannels=None,
                 blockSize=None):
        if ioDevice != None:
            self.device = sd.query_devices(ioDevice)
            print(self.device)

            self.status = 0

            if iChannels != None and 2 * iChannels <= self.device[
                    'max_input_channels']:
                self.ninputs = iChannels
            elif True:
                self.ninputs = int(self.device['max_input_channels'] / 2)

            if oChannels != None and 2 * oChannels <= self.device[
                    'max_output_channels']:
                self.noutputs = oChannels
            elif True:
                self.noutputs = int(self.device['max_output_channels'] / 2)

            self.stream = sd.Stream(
                device=self.device['name'],
                channels=(2 * self.ninputs, 2 * self.noutputs),
                latency=(self.device['default_low_input_latency'],
                         self.device['default_low_output_latency']),
                callback=self.ioCallback,
                blocksize=blockSize)
        elif ioDevice == None and iDevice != None and oDevice != None:
            self.idevice = sd.query_devices(iDevice)
            self.odevice = sd.query_devices(oDevice)

            self.status = 1

            if iChannels != None and 2 * iChannels <= self.idevice[
                    'max_input_channels']:
                self.ninputs = iChannels
            elif True:
                self.ninputs = int(self.idevice['max_input_channels'] / 2)

            if oChannels != None and 2 * oChannels <= self.odevice[
                    'max_output_channels']:
                self.noutputs = oChannels
            elif True:
                self.noutputs = int(self.odevice['max_output_channels'] / 2)

            if self.idevice['default_samplerate'] <= self.odevice[
                    'default_samplerate']:
                samplerate = int(self.idevice['default_samplerate'])
            elif self.idevice['default_samplerate'] > self.odevice[
                    'default_samplerate']:
                samplerate = int(self.idevice['default_samplerate'])

            if blockSize == None:
                blockSize = 255

            sd.default.device = iDevice, oDevice
            sd.default.samplerate = samplerate
            sd.default.channels = 2 * self.ninputs, 2 * self.noutputs
            sd.default.latency = 'low', 'low'
            sd.default.blocksize = blockSize
            sd.default.never_drop_input = True

            self.istream = sd.InputStream(device=iDevice,
                                          callback=self.iCallback)
            self.ostream = sd.OutputStream(device=iDevice,
                                           callback=self.oCallback)

            self.iEvent = threading.Event()
            self.oEvent = threading.Event()
            self.oEvent.set()

        self.runEvent = threading.Event()
        self.outputBuff = np.empty([0, 2 * self.noutputs])
        self.modulos = []
        self.conexiones = []
Esempio n. 14
0
mouthVel = 150  # scale according to mechanics

sd.play(data, fs)

#arduino = True
arduino = False
while (not arduino):
    try:
        #TODO: write getter()
        USB_PORT = "/dev/ttyACM1"
        arduino = serial.Serial(USB_PORT, 9600, timeout=1)
    except:
        print('Connecting USB...')

while sd.get_stream().active:
    with sd.Stream(sd.default.samplerate, 0, sd.default.device, 2) as stream:
        amp = stream.read(128)[0]  # increase blocksize for better accuracy
        #print(amp)
        L = []
        R = []
        for i in range(len(amp)):
            L.append(amp[i][0])
            R.append(amp[i][1])
        amp_L = round(max(L) * mouthVel, 1)
        amp_R = round(max(R) * mouthVel, 1)

        print('L:', amp_L, 'R:', amp_R)
        # Left audio channel
        if (amp_L):
            arduino.write('ml'.encode())
            arduino.write(str(amp_L).encode())
Esempio n. 15
0
    indexFrame += 1


import time

import pylab as pl

import ryLab01_1 as ry

spDuration = 10 * bufferTime  # bufferTime= 10 seconds

ryClearBuffer()

with sd.Stream(
        callback=ryCallback,
        channels=nChannel,  # 1 for mono, 2 for stereo
        samplerate=nSamplePerSec,  # sample/sec
        blocksize=nSamplePerFrame  #1000   # frame_size_in_sample, sample/frame
) as ryStream:
    t0 = time.time()
    #sd.sleep(int(duration * 1000))
    #time.sleep(spDuration)

    t = 0
    while t < spDuration:

        print(' {:.1f}, '.format(t), end='', flush=True)
        dt = .2  # sec
        '''
        x= ryBuffer.flatten()#.shape
        
        #t2= BufferSize*nSamplePerFrame
Esempio n. 16
0
def startMonitor():
    if started:
        with sd.Stream(callback=monitor_sound):
            sd.sleep(100)
    root.after(1, startMonitor)
Esempio n. 17
0
    def run(self):
        sending_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        receiving_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        listening_endpoint = ("0.0.0.0", self.listening_port)
        receiving_sock.bind(listening_endpoint)

        def receive_and_buffer():
            messagepack, source_address = receiving_sock.recvfrom(
                Intercom.max_packet_size)

            out = numpy.frombuffer(messagepack,
                                   numpy.int16)  #get 1d-array of message
            idx = out[0]  #get value of first position (index o package)
            self.packet_list[
                idx % self.
                buffer_size] = out  #save message with index in buffer at position index modulo buffer_size
            self.cycle += 1
            sys.stderr.write("\nMSGSIZE" + str(sys.getsizeof(messagepack)))
            sys.stderr.flush()
            cpu = psutil.cpu_percent() / psutil.cpu_count()
            if self.cpu_max < cpu:
                self.cpu_max = cpu
            self.cpu_load += cpu
            sys.stderr.write("\nCPU_LOAD" + str(self.cpu_load / self.cycle))
            sys.stderr.flush()
            sys.stderr.write("\nCPU_MAX" + str(self.cpu_max))
            sys.stderr.flush()

            #sys.stderr.write("\nIDX_REC:" + str(idx)); sys.stderr.flush()

        def record_send_and_play(indata, outdata, frames, time, status):

            datapack = numpy.insert(
                numpy.frombuffer(indata, numpy.int16), 0, self.packet_send
            )  #create datapackage with message and add in first position index of package

            sending_sock.sendto(
                datapack, (self.destination_IP_addr, self.destination_port))
            self.packet_send = (self.packet_send + 1) % (2**16)
            #sys.stderr.write("\nIDX_SEND:" + str(self.packet_send-1)); sys.stderr.flush()

            if self.packet_received < 0:
                nonzeros = []  #check non-zero elements in buffer
                for s in range(self.buffer_size):
                    if len(self.packet_list[s]) != 0:
                        nonzeros.append(s)

            if self.packet_received < 0 and len(
                    nonzeros
            ) > 0:  #if buffer is half filled and we havent started playing (packet_received < 0)
                if max(nonzeros) - min(nonzeros) >= math.ceil(
                        self.buffer_size / 2
                ):  #if distnace between lowest received package and highest on is higher than half-size of buffer, bypass and start playing
                    self.packet_received = nonzeros[
                        0]  #get buffer-index of lowest received package
                    print("\nBUFFERING FINISHED - START PLAYING")

            try:
                message = self.packet_list[self.packet_received]

                if len(message) <= 1:
                    raise ValueError

                idx = message[0]
                message = numpy.delete(message, 0)
                sys.stderr.write("\nmessage:" + str(message))
                sys.stderr.flush()

            except ValueError:
                message = numpy.zeros(
                    (self.samples_per_chunk, self.bytes_per_sample),
                    self.dtype)

            if self.packet_received >= 0:  #if we started playing (after buffer was filled more than half-size)
                self.packet_received = (self.packet_received +
                                        1) % self.buffer_size

            outdata[:] = numpy.reshape(
                message, (self.samples_per_chunk, self.number_of_channels))

            if __debug__:
                sys.stderr.write(".")
                sys.stderr.flush()

        with sd.Stream(samplerate=self.samples_per_second,
                       blocksize=self.samples_per_chunk,
                       dtype=self.dtype,
                       channels=self.number_of_channels,
                       callback=record_send_and_play):
            print('-=- Press <CTRL> + <C> to quit -=-')
            while True:
                receive_and_buffer()
 def __init__(self):
     self.sound_volume = 0
     self.stream = sd.Stream(channels=2, callback=self.sound_callback)
     self.stream.start()
Esempio n. 19
0
 def run(self):
     with sd.Stream(samplerate=self.frames_per_second, blocksize=self.frames_per_chunk, dtype=np.int16, channels=self.number_of_channels, callback=self.record_send_and_play):
         print("-=- Press CTRL + c to quit -=-")
         while True:
             self.receive_and_buffer()
Esempio n. 20
0
    def run(self):

        self.recorded_chunk_number = 0
        self.played_chunk_number = 0

        def receive_and_buffer():

            #Same as intercom_buffer but adding the most significant column with an "or" operation,
            #in order to place right the column of most significant bits.
            #The operation is repeated as many columns and channels there are.

            message, source_address = self.receiving_sock.recvfrom(
                Intercom.MAX_MESSAGE_SIZE)
            chunk_number, significantCol, channelNum, *bitplane = struct.unpack(
                self.packet_format, message)

            bitplane8 = np.asarray(bitplane, dtype=np.uint8)
            bitplane_unpack = np.unpackbits(bitplane8)
            bitplane16 = bitplane_unpack.astype(
                np.int16)  #Unpacking and final conversion to int16

            #We store bitplane16 in a specific buffer and channel position
            self._buffer[chunk_number %
                         self.cells_in_buffer][:, channelNum] |= (
                             bitplane16 << significantCol)

            return chunk_number

        def record_send_and_play(indata, outdata, frames, time, status):
            for significantCol in range(15, -1, -1):

                #For each channel dictated by significantCol, we store all columns of each channel
                #of that significantCol position. For instance, having bitplane 15, for each channel in that 15th position
                #we store the indata of that 15th column.
                bitArray = (indata & (1 << significantCol)) >> significantCol
                #print(indata)

                for channelNum in range(self.number_of_channels):
                    channelArray = bitArray[:, channelNum]

                    int8 = channelArray.astype(
                        np.uint8)  #channel conversion to 8bit integer
                    channelpack8 = np.packbits(int8)  #packing
                    message = struct.pack(self.packet_format,
                                          self.recorded_chunk_number,
                                          significantCol, channelNum,
                                          *channelpack8)
                    self.sending_sock.sendto(
                        message,
                        (self.destination_IP_addr, self.destination_port))

            self.recorded_chunk_number = (self.recorded_chunk_number +
                                          1) % self.MAX_CHUNK_NUMBER
            chunk = self._buffer[self.played_chunk_number %
                                 self.cells_in_buffer]
            self._buffer[self.played_chunk_number %
                         self.cells_in_buffer] = self.generate_zero_chunk()
            self.played_chunk_number = (self.played_chunk_number +
                                        1) % self.cells_in_buffer
            outdata[:] = chunk
            #print(outdata)

            if __debug__:
                sys.stderr.write(".")
                sys.stderr.flush()

        with sd.Stream(samplerate=self.frames_per_second,
                       blocksize=self.frames_per_chunk,
                       dtype=np.int16,
                       channels=self.number_of_channels,
                       callback=record_send_and_play):
            print("-=- Press CTRL + c to quit -=-")
            first_received_chunk_number = receive_and_buffer()
            self.played_chunk_number = (
                first_received_chunk_number -
                self.chunks_to_buffer) % self.cells_in_buffer
            while True:
                receive_and_buffer()
Esempio n. 21
0
def record(callback, samplerate, record_time):
    global run_record
    with sd.Stream(channels=2, callback=callback, samplerate=samplerate):
        while run_record or current_frame < min_frame:
            sd.sleep(100)
Esempio n. 22
0
# socket connect to the server



SERVER_IP = '34.70.181.155'
# SERVER_IP = '0.0.0.0'
SERVER_PORT = 9001
BUFMAX = 512
running = True
mutex_t = Lock()
item_available = Condition()
# SLEEPTIME = 0.00001
SLEEPTIME = 0.000001
audio_available = Condition()

sdstream = sd.Stream(samplerate=44100, channels=1, dtype='float32')
sdstream.start()

key = b'thisisthepasswordforAESencryptio'
# random.seed(input("ENTER RANDOM SEED :"))
random.seed('changethisrandomseed')
# iv_seed = hash(hash(key))
# random.seed(iv_seed)
iv = ''.join([chr(random.randint(0, 0xFF)) for i in range(16)])
iv = iv.encode()
cipher = AES.new(key, AES.MODE_CBC, iv[:16])
# nonce = cipher.nonce
# ciphertext, tag = cipher.encrypt_and_digest(data)
def get_iv():
    return (''.join([chr(random.randint(0, 0xFF)) for i in range(16)])).encode()[:16]
Esempio n. 23
0
def _in():
    timeout = None
    channels = 1
    #The ``phrase_time_limit`` parameter is the maximum number of seconds that this will allow a phrase to continue before stopping and returning the part of the phrase processed before the time limit was reached. The resulting audio will be the phrase cut off at the time limit. If ``phrase_timeout`` is ``None`` there will be no phrase time limit.
    phrase_time_limit = None
    dynamic_energy_adjustment_damping = 0.15
    dynamic_energy_ratio = 1.5
    dynamic_energy_threshold = True
    energy_threshold = 3000  # minimum audio energy to consider for recording
    pause_threshold = 0.5  # seconds of non-speaking audio before a phrase is considered complete
    phrase_threshold = 0.3  # minimum seconds of speaking audio before we consider the speaking audio a phrase - values below this are ignored (for filtering out clicks and pops)
    non_speaking_duration = 0.5  # seconds of non-speaking audio to keep on both sides of the recording
    chunk = 1024  # number of frames stored in each buffer
    sample_rate = 16000  # sampling rate in Hertz
    ##        pa_format=pyaudio.paInt16 # 16-bit int sampling
    sample_width = 2  #pyaudio.get_sample_size(pa_format) # size of each sample
    seconds_per_buffer = float(chunk) / sample_rate
    pause_buffer_count = int(
        math.ceil(pause_threshold / seconds_per_buffer)
    )  # number of buffers of non-speaking audio during a phrase before the phrase should be considered complete
    phrase_buffer_count = int(
        math.ceil(phrase_threshold / seconds_per_buffer)
    )  # minimum number of buffers of speaking audio before we consider the speaking audio a phrase
    non_speaking_buffer_count = int(
        math.ceil(non_speaking_duration / seconds_per_buffer)
    )  # maximum number of buffers of non-speaking audio to retain before and after a phrase
    stream = sounddevice.Stream(samplerate=sample_rate,
                                channels=channels,
                                dtype='int16')
    with stream:
        while oa.alive:
            elapsed_time = 0  # number of seconds of audio read
            buf = b""  # an empty buffer means that the stream has ended and there is no data left to read
            #            energy_threshold = 300  # minimum audio energy to consider for recording
            while oa.alive:
                frames = collections.deque()

                # store audio input until the phrase starts
                while oa.alive:
                    # handle waiting too long for phrase by raising an exception
                    elapsed_time += seconds_per_buffer
                    if timeout and elapsed_time > timeout:
                        raise Exception(
                            "listening timed out while waiting for phrase to start"
                        )

                    buf = stream.read(chunk)[0]
                    #                if len(buffer) == 0: break  # reached end of the stream
                    frames.append(buf)
                    if len(
                            frames
                    ) > non_speaking_buffer_count:  # ensure we only keep the needed amount of non-speaking buffers
                        frames.popleft()

                    # detect whether speaking has started on audio input
                    energy = audioop.rms(
                        buf, sample_width)  # energy of the audio signal
                    if energy > energy_threshold: break

                    # dynamically adjust the energy threshold using asymmetric weighted average
                    if dynamic_energy_threshold:
                        damping = dynamic_energy_adjustment_damping**seconds_per_buffer  # account for different chunk sizes and rates
                        target_energy = energy * dynamic_energy_ratio
                        energy_threshold = energy_threshold * damping + target_energy * (
                            1 - damping)

                # read audio input until the phrase ends
                pause_count, phrase_count = 0, 0
                phrase_start_time = elapsed_time
                while oa.alive:
                    # handle phrase being too long by cutting off the audio
                    elapsed_time += seconds_per_buffer
                    if phrase_time_limit and elapsed_time - phrase_start_time > phrase_time_limit:
                        break

                    buf = stream.read(chunk)[0]
                    #                if len(buffer) == 0: break  # reached end of the stream
                    frames.append(buf)
                    phrase_count += 1

                    # check if speaking has stopped for longer than the pause threshold on the audio input
                    energy = audioop.rms(
                        buf, sample_width
                    )  # unit energy of the audio signal within the buffer
                    if energy > energy_threshold:
                        pause_count = 0
                    else:
                        pause_count += 1
                    if pause_count > pause_buffer_count:  # end of the phrase
                        break

                # check how long the detected phrase is, and retry listening if the phrase is too short
                phrase_count -= pause_count  # exclude the buffers for the pause before the phrase
                if phrase_count >= phrase_buffer_count or len(buf) == 0:
                    break  # phrase is long enough or we've reached the end of the stream, so stop listening

            # obtain frame data
            for i in range(pause_count - non_speaking_buffer_count):
                frames.pop()  # remove extra non-speaking frames at the end
            #            frame_data = b"".join(frames)
            frame_data = numpy.concatenate(frames)
            yield frame_data


#for x in _in():
#    print(len(x))
#no output
#def _out():
Esempio n. 24
0
 def stream(self):
     return sd.Stream(sd.default.samplerate, 0, sd.default.device, 2)
Esempio n. 25
0
        corr_buffer1 = np.array([])
        corr_buffer2 = np.array([])
        correlation_msg = OSC.OSCMessage()
        correlation_msg.setAddress('/correlation')
        correlation_msg.append(pearson)
        client.send(correlation_msg)
        print pearson



server.addMsgHandler('/buffer_entropy', buffer_entropy)
server.addMsgHandler('/predict', predict_all_buffers)
server.addMsgHandler('/gen_settings', gen_class_settings)
server.addMsgHandler('/play', play)
server.addMsgHandler('/gen_preset', gen_preset)
server.addMsgHandler('meter', meter)
server.addMsgHandler('/quit', quit_handler)
server.addMsgHandler('/create_synth_datapoint', create_synth_datapoint)
server.addMsgHandler('/compute_pearson', compute_pearson)


th = threading.Thread(target = server.serve_forever)
th.daemon = True  #automatically kills thread when quitting server
th.start()

if __name__ == '__main__':
    with sd.Stream(channels=AUDIO_IN_CHANNELS, blocksize=BLOCKSIZE, callback=rec_callback):
        print ""
        print "Audio stream started"  #audio recording stream
        input()
Esempio n. 26
0
    def run(self):
        
        sending_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        receiving_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
       
        listening_endpoint = ("0.0.0.0", self.listening_port)
        receiving_sock.bind(listening_endpoint)
    
     
        


        def receive_and_buffer():
           
            #Hacemos la transformación
            message, source_address = receiving_sock.recvfrom(
                Intercom.max_packet_size)
            unpack=struct.unpack(self.packet_format,message)
            
       #     message=numpy.frombuffer(
        #        message,
         #       numpy.int16).reshape(
          #         (self.samples_per_chunk), self.number_of_channels)
            #Recogemos el chunk insertado en el paquete de audio
            #numero=message[1024][0]
            #indata va a guardar el message, pero sin contar el último chunk, que es el número.
           # indata=numpy.delete(message,1024,axis=0)
           
           #Metemos el paquete de sonido en el buffer de 0 a X posiciones
            size = len(self.buffer)
            self.buffer[unpack[0]%size]=unpack[:1]
            
            
            
        
        


        def record_send_and_play(indata, outdata, frames, time, status):
            #Paquete se inicializa en vacío
            paquete = numpy.array(indata,numpy.int16)
            pack = struct.pack(self.packet_format, self.chunk_number, *paquete.flatten())
            #pack = struct.pack(self.packet_format, 1, *indata)
            
            #ps = struct.pack(self.packet_format, )

          
            
            
            sending_sock.sendto(
               pack,
                (self.destination_IP_addr, self.destination_port))
           
            self.chunk_number+=1
            message=indata
            outdata[:] = numpy.frombuffer(
                message,
                numpy.int16).reshape(
                    (self.samples_per_chunk), self.number_of_channels)
            
            if __debug__:
                sys.stderr.write("."); sys.stderr.flush()

        with sd.Stream(
                samplerate=self.samples_per_second,
                blocksize=self.samples_per_chunk,
                dtype=self.dtype,
                channels=self.number_of_channels,
                callback=record_send_and_play):
            print('-=- Press <CTRL> + <C> to quit -=-')
            while True:
                receive_and_buffer()
Esempio n. 27
0
# Moving the computation to a process: result, a core loadad, but the
# other process working without any special issue.

import multiprocessing
import sounddevice as sd
import numpy as np


def computation():
    while True:
        x = 0
        for i in range(1000000):
            x += 1


p = multiprocessing.Process(target=computation)
p.start()

CHUNK_SIZE = 1024

stream = sd.Stream(samplerate=44100, channels=2, dtype='int16')
stream.start()
while True:
    chunk, overflowed = stream.read(CHUNK_SIZE)
    if overflowed:
        print("Overflow")
    stream.write(chunk)
def measure_latency_phase_one(levels,
                              desired_latency="high",
                              samples_per_second=48000,
                              channels=(1, 1)):
    """Channels are specified as a tuple of (input channels, output channels)."""
    input_channels, output_channels = channels

    # Class to describe the current state of the process
    class ProcessState(Enum):
        RESET = 0
        DETECT_SILENCE_START = 10
        START_TONE = 20
        DETECT_TONE = 30
        STOP_TONE = 35
        DETECT_SILENCE_END = 40
        CLEANUP = 50
        COMPLETING = 60
        COMPLETED = 70
        ABORTED = 80

    # Create a class to hold the shared variables
    class SharedVariables:
        def __init__(self, samples_per_second):
            # DEBUG
            # Create a buffer to store the outdata for analysis
            duration = 20  # seconds
            self.out_pcm = numpy.zeros(
                (duration * samples_per_second, output_channels))
            self.out_pcm_pos = 0

            # Threading event on which the stream can wait
            self.event = threading.Event()

            # Queue to save output data for debugging
            self.q = queue.Queue()

            # Queues for off-thread processing
            self.q_process = queue.Queue()

            # Store samples per second parameter
            self.samples_per_second = samples_per_second

            # Allocate space for recording
            max_recording_duration = 10  # seconds
            max_recording_samples = (max_recording_duration *
                                     samples_per_second)
            self.rec_pcm = numpy.zeros((max_recording_samples, input_channels))

            # Initialise recording position
            self.rec_position = 0

            # Current state of the process
            self.process_state = ProcessState.RESET

            # Instance of the Fast Fourier Transform (FFT) analyser
            self.fft_analyser = FFTAnalyser(
                array=self.rec_pcm, samples_per_second=samples_per_second)

            # Variable to record when we entered the current state
            self.state_start_time = None

            # Variables for tone
            assert self.fft_analyser.n_freqs == 1
            tone_duration = 1  # second
            self.tone = Tone(self.fft_analyser.freqs[0],
                             self.samples_per_second,
                             channels=output_channels,
                             max_level=1,
                             duration=tone_duration)

            # Variables for levels
            self.silence_mean = levels["silence_mean"]
            self.silence_sd = levels["silence_sd"]
            self.tone0_mean = levels["tone0_mean"]
            self.tone0_sd = levels["tone0_sd"]

            # Variables for DETECT_SILENCE_START
            self.detect_silence_start_threshold_levels = (
                (self.tone0_mean[0] + self.silence_mean[0]) / 2)
            self.detect_silence_start_samples = 0
            self.detect_silence_start_threshold_samples = 100  # samples
            self.detect_silence_start_detected = False

            # Variables for START_TONE
            self.start_tone_click_duration = 75 / 1000  # seconds

            # Variables for DETECT_TONE
            self.detect_tone_threshold = (
                (self.tone0_mean[0] + self.silence_mean[0]) / 2)
            self.detect_tone_start_detect_time = None
            self.detect_tone_threshold_duration = 50 / 1000  # seconds
            self.detect_tone_detected = False
            self.detect_tone_max_time_in_state = 5  # seconds

            # Variables for STOP_TONE
            self.stop_tone_fadeout_duration = 20 / 1000  # seconds

            # Variables for DETECT_SILENCE_END
            self.detect_silence_end_threshold_levels = (
                (self.tone0_mean[0] + self.silence_mean[0]) / 2)
            self.detect_silence_end_samples = 0
            self.detect_silence_end_threshold_samples = 10
            self.detect_silence_end_detected = False

            # Variables for CLEANUP
            self.cleanup_cycles = 0
            self.cleanup_cycles_threshold = 3

            # =======

            # Variables for START_TONE0
            self.start_tone0_start_play_time = None

            # Variables for DETECT_TONE0
            self.detect_tone0_threshold_num_sd = 4

            # Variables for START_TONE0_TONE1
            self.start_tone0_tone1_start_play_time = None
            self.start_tone0_tone1_fadein_duration = 5 / 1000  # seconds

            # Variables for DETECT_TONE0_TONE1
            self.detect_tone0_tone1_start_detect_time = None
            self.detect_tone0_tone1_threshold_num_sd = 4
            self.detect_tone0_tone1_threshold_duration = 50 / 1000  # seconds
            self.detect_tone0_tone1_max_time_in_state = 5  # seconds
            self.detect_tone0_tone1_detected = False

    # Create an instance of the shared variables
    v = SharedVariables(samples_per_second)

    # # Check that tone0 and tone0_tone1 are not overlapping in terms of
    # # the thresholds defined above.  We are only concerned with tone1
    # # and not-tone1 being mutually exclusive.
    # x = numpy.array([-1,1])
    # range_not_tone1 = v.tone0_mean[1] + x * v.tone0_sd[1]
    # range_tone1 = v.tone0_tone1_mean[1] + x * v.tone0_tone1_sd[1]
    # if min(range_not_tone1) < max(range_tone1) and \
    #    min(range_tone1) < max(range_not_tone1):
    #     print("range_not_tone1:", range_not_tone1)
    #     print("range_tone1:", range_tone1)
    #     raise Exception("ERROR: The expected ranges of the two tones overlap. "+
    #                     "Try increasing the system volume and try again")

    # Callback for when the recording buffer is ready.  The size of the
    # buffer depends on the latency requested.
    def callback(indata, outdata, samples, time, status):
        nonlocal v

        # Store any exceptions to be raised
        exception = None

        # Store Recording
        # ===============

        v.rec_pcm[v.rec_position:v.rec_position + samples] = indata[:]
        v.rec_position += samples

        # Analysis
        # ========

        assert v.rec_pcm is v.fft_analyser._pcm
        analyses = v.fft_analyser.run(v.rec_position)
        #print(tones_level)

        # # Clear the first half second of the recording buffer if we've
        # # recorded more than one second
        # if v.rec_position > v.samples_per_second:
        #     seconds_to_clear = 0.5 # seconds
        #     samples_to_clear = int(seconds_to_clear * v.samples_per_second)
        #     v.rec_pcm = numpy.roll(v.rec_pcm, -samples_to_clear, axis=0)
        #     v.rec_position -= samples_to_clear

        # Transitions
        # ===========

        previous_state = v.process_state

        if v.process_state == ProcessState.RESET:
            v.process_state = ProcessState.DETECT_SILENCE_START

        elif v.process_state == ProcessState.DETECT_SILENCE_START:
            if v.detect_silence_start_detected:
                v.process_state = ProcessState.START_TONE

        elif v.process_state == ProcessState.START_TONE:
            v.process_state = ProcessState.DETECT_TONE

        elif v.process_state == ProcessState.DETECT_TONE:
            if v.detect_tone_detected:
                v.process_state = ProcessState.STOP_TONE

            if time.currentTime - v.state_start_time > v.detect_tone_max_time_in_state:
                print(
                    "ERROR: We've spent too long listening for tone.  Aborting."
                )
                v.process_state = ProcessState.ABORTED

        elif v.process_state == ProcessState.STOP_TONE:
            if v.tone.inactive:
                v.process_state = ProcessState.DETECT_SILENCE_END

        elif v.process_state == ProcessState.DETECT_SILENCE_END:
            if v.detect_silence_end_detected:
                v.process_state = ProcessState.CLEANUP

        elif v.process_state == ProcessState.CLEANUP:
            if v.tone.inactive:
                if v.cleanup_cycles >= v.cleanup_cycles_threshold:
                    v.process_state = ProcessState.COMPLETED
                else:
                    v.process_state = ProcessState.DETECT_SILENCE_START

        elif v.process_state == ProcessState.COMPLETED:
            pass

        elif v.process_state == ProcessState.ABORTED:
            pass

        # Set state start time
        if previous_state != v.process_state:
            v.state_start_time = time.currentTime

        # States
        # ======

        if v.process_state == ProcessState.RESET:
            # Ensure tone #0 is stopped
            v.tone.stop()
            v.tone.output(outdata)

        if v.process_state == ProcessState.DETECT_SILENCE_START:
            # The tone was stopped in the previous state
            v.tone.output(outdata)

            for analysis in analyses:
                tones_level = analysis["freq_levels"]

                # Ensure that the levels are below the threshold
                if tones_level is not None:
                    # Check if levels are below the silence threshold
                    if tones_level[0] < v.detect_silence_start_threshold_levels:
                        v.detect_silence_start_samples += 1
                        if v.detect_silence_start_samples >= v.detect_silence_start_threshold_samples:
                            #print("Silence detected")
                            v.detect_silence_start_detected = True
                    else:
                        # Restart the counter
                        v.detect_silence_start_samples = 0
                else:
                    print("tones_levels was None")

        elif v.process_state == ProcessState.START_TONE:
            # Play tone #0
            v.tone.click(v.start_tone_click_duration)
            v.tone.output(outdata)

            # Start the timer from the moment the system says it will
            # play the audio.
            v.start_tone0_start_play_time = time.outputBufferDacTime

            # Send the data for off-thread analysis
            v.q_process.put_nowait({
                "start": True,
                "out": outdata.copy(),
                "out_time": v.start_tone0_start_play_time,
                "in": indata.copy(),
                "in_time": time.inputBufferAdcTime
            })

        elif v.process_state == ProcessState.DETECT_TONE:
            # Output tone, which may or may not be active
            v.tone.output(outdata)

            for analysis in analyses:
                tones_level = analysis["freq_levels"]

                if tones_level is not None:
                    # Are we hearing the tone?
                    if tones_level[0] > v.detect_tone_threshold:
                        #print("Tone detected")
                        v.detect_tone_detected = True

                        # No need to process the remaining samples
                        break
                else:
                    print("tones_levels was None")

            # Send the data for off-thread analysis
            v.q_process.put_nowait({
                "out": outdata.copy(),
                "out_time": v.start_tone0_start_play_time,
                "in": indata.copy(),
                "in_time": time.inputBufferAdcTime
            })

        elif v.process_state == ProcessState.STOP_TONE:
            # Fadeout tone
            #v.tone.fadeout(v.stop_tone_fadeout_duration)
            v.tone.output(outdata)

            # Send the data for off-thread analysis
            v.q_process.put_nowait({
                "out": outdata.copy(),
                "out_time": v.start_tone0_start_play_time,
                "in": indata.copy(),
                "in_time": time.inputBufferAdcTime
            })

        elif v.process_state == ProcessState.DETECT_SILENCE_END:
            # The tone was stopped in the previous state
            v.tone.output(outdata)

            for analysis in analyses:
                tones_level = analysis["freq_levels"]

                # Ensure that the levels are below the threshold
                if tones_level is not None:
                    if tones_level[0] < v.detect_silence_end_threshold_levels:
                        v.detect_silence_end_samples += 1
                        if v.detect_silence_end_samples >= v.detect_silence_end_threshold_samples:
                            #print("Silence detected")
                            v.detect_silence_end_detected = True
                    else:
                        # Restart the timer
                        v.detect_silence_end_samples = 0
                else:
                    print("tones_levels was None")

            # Send the data for off-thread analysis
            v.q_process.put_nowait({
                "out": outdata.copy(),
                "out_time": v.start_tone0_start_play_time,
                "in": indata.copy(),
                "in_time": time.inputBufferAdcTime
            })

        elif v.process_state == ProcessState.CLEANUP:
            # Keep outputting tone until it's inactive
            v.tone.output(outdata)

            # Send the data for off-thread analysis
            v.q_process.put_nowait({"end": True})

            # Reset key variables
            v.detect_silence_start_start_time = None
            v.detect_silence_start_detected = False
            v.detect_silence_end_start_time = None
            v.detect_silence_end_detected = False
            v.detect_tone_start_detect_time = None
            v.detect_tone_detected = False

            # Increment the number of cleanup cycles
            v.cleanup_cycles += 1

        elif v.process_state == ProcessState.COMPLETED:
            # Actively fill outdata with zeros
            outdata.fill(0)
            print("Completed phase one latency measurement")
            exception = sd.CallbackStop

        elif v.process_state == ProcessState.ABORTED:
            # Actively fill outdata with zeros
            outdata.fill(0)
            print("Aborted phase one latency measurement")
            exception = sd.CallbackAbort

        # Store output
        # ============
        v.q.put(outdata.copy())

        # Terminate if required
        # =====================
        if exception is not None:
            raise exception

    # Play first tone
    # Open a read-write stream
    stream = sd.Stream(samplerate=samples_per_second,
                       channels=channels,
                       dtype=numpy.float32,
                       latency=desired_latency,
                       callback=callback,
                       finished_callback=v.event.set)

    print("Measuring latency...")
    with stream:
        print("Stated stream latency:", stream.latency)
        v.event.wait()  # Wait until measurement is finished

    print("Processing collected samples...")
    latencies = process_samples(v.q_process)

    # # Save output as wave file
    # print("Writing wave file")
    # wave_file = wave.open("out.wav", "wb")
    # wave_file.setnchannels(2) #FIXME
    # wave_file.setsampwidth(2)
    # wave_file.setframerate(samples_per_second)
    # while True:
    #     try:
    #         data = v.q.get_nowait()
    #     except:
    #         break
    #     data = data * (2**15-1)
    #     data = data.astype(numpy.int16)
    #     wave_file.writeframes(data)
    # wave_file.close()

    # Done!
    print("Finished measurement of latency.")

    return latencies
Esempio n. 29
0
from pylab import *
from scipy import *
from scipy import signal
from scipy.io import wavfile
import sounddevice as sd
import numpy as np
import random
import time

duration = 5.5  # seconds

def callback(indata, outdata, frames, time, status):
    if status:
        print(status)
    outdata[:] = (-1 + random.random()*2)*0.4

with sd.Stream(channels=2, callback=callback):
    sd.sleep(int(duration * 1000))
    def run(self):

        #self.packet_format = f"HH{(1024)}H"
        self.packet_format = f"HHH{(128)}B"
        self.recorded_chunk_number = 0
        self.played_chunk_number = 0

        def receive_and_buffer():
            message, source_address = self.receiving_sock.recvfrom(
                Intercom_buffer.MAX_MESSAGE_SIZE)
            #Ahora necesitamos el numero de chunk,el canal y el paquete
            chunk_number, bitplane, channel, *chunk = struct.unpack(
                self.packet_format, message)
            #unpacked = np.array(chunk, dtype= np.uint16).view('uint8')
            chunk = np.array(chunk, dtype=np.uint8)
            unpacked = np.unpackbits(chunk)
            unpacked16 = np.asarray(unpacked, dtype=np.uint16)
            #print(unpacked)
            #print(*chunk)
            #Mete dentro del buffer el cuerpo del paquete, pero en un canal determinado
            self._buffer[chunk_number %
                         self.cells_in_buffer][:,
                                               channel] |= unpacked << bitplane

            return chunk_number

        def record_send_and_play(indata, outdata, frames, time, status):
            for i in range(1, 16):
                bitsCanal0 = np.packbits(indata[:, 0] >> (16 - i) & 1)
                bitsCanal1 = np.packbits(indata[:, 1] >> (16 - i) & 1)
                #bitsCanal0 = np.array(bitsCanal0, dtype=np.uint8)
                #bitsCanal1 = np.array(bitsCanal0, dtype=np.uint8)
                message = struct.pack(self.packet_format,
                                      self.recorded_chunk_number, i, 0,
                                      *bitsCanal0)
                self.sending_sock.sendto(
                    message, (self.destination_IP_addr, self.destination_port))

                message = struct.pack(self.packet_format,
                                      self.recorded_chunk_number, i, 1,
                                      *bitsCanal1)
                self.sending_sock.sendto(
                    message, (self.destination_IP_addr, self.destination_port))

                #print("bit de indata enviado:", 16-i, "\ncanal izquierdo:\n", bitsCanal0, "\ncanal derecho:\n", bitsCanal1)
                #print("--------------------------------------------------------")

            self.recorded_chunk_number = (self.recorded_chunk_number +
                                          1) % self.MAX_CHUNK_NUMBER
            chunk = self._buffer[self.played_chunk_number %
                                 self.cells_in_buffer]
            self._buffer[self.played_chunk_number %
                         self.cells_in_buffer] = self.generate_zero_chunk()
            self.played_chunk_number = (self.played_chunk_number +
                                        1) % self.cells_in_buffer
            outdata[:] = chunk
            if __debug__:
                sys.stderr.write(".")
                sys.stderr.flush()

        with sd.Stream(samplerate=self.frames_per_second,
                       blocksize=self.frames_per_chunk,
                       dtype=np.int16,
                       channels=self.number_of_channels,
                       callback=record_send_and_play):
            print("-=- Press CTRL + c to quit -=-")
            first_received_chunk_number = receive_and_buffer()
            self.played_chunk_number = (
                first_received_chunk_number -
                self.chunks_to_buffer) % self.cells_in_buffer
            while True:
                receive_and_buffer()