示例#1
0
 def play(self, snd_idx, loop=True):
     self.snd_src = SoundSource(gain=0.25, position=[0,0,0])
     self.snd_src.looping = loop
     self.snd_src.queue( self.snd_data[snd_idx] )
     self.sink.play(self.snd_src)
     self.sink.update()
     writeFile(self.parent.log_file_path, '%s, [audioOut], sound (%s) play starts.\n'%(get_time_stamp(), self.wav_file_paths[snd_idx]))
def run():
    if len (sys.argv) < 2:
        print ("Usage: %s wavefile" % os.path.basename(sys.argv[0]))
        print ("    Using an example wav file...")
        dirname = os.path.dirname(__file__)
        fname = os.path.join(dirname, "hey.wav")
    else:
        fname = sys.argv[1]


    sink = SoundSink()
    sink.activate()

    source = SoundSource(position=[10, 3, 3])
    source.looping = True

    data = load_wav_file(fname)
    source.queue(data)

    sink.play(source)
    while source.position[0] > -10:
        source.position = [source.position[0] - 1,
                           source.position[1],
                           source.position[2]]
        sink.update()
        print("playing at %r" % source.position)
        time.sleep(1)
    print("done")
示例#3
0
 def __init__(self):
     self.sink = SoundSink()
     self.sink.activate()
     self.listener = SoundListener()
     self.listener.orientation = (0,0,1,0,0,1)
     self.sources = [SoundSource(position = [i, 0, 0], pitch = 1) for i in range(-1, 2, 2)]
     self.intense_sources = [SoundSource(position = [i, 0, 0], pitch = 1) for i in range(-1, 2, 2)]
     #pitch: 5,4,3,2,1
     self.data = load_wav_file("./beep.wav")
     self.intense_data = load_wav_file("./high_beep.wav")
     '''
     for source in self.sources:
         #we want the sources to be looping
         source.looping = True
         source.queue(self.data)
     '''
     for source in self.intense_sources:
         source.looping = True
         source.queue(self.intense_data)
     self.threading_pool = []
     for mild_source in self.sources:
         t = sound_source(self.sink, mild_source)
         self.threading_pool.append(t)
     for intense_source in self.intense_sources:
         t = sound_source(self.sink, intense_source)
         self.threading_pool.append(t)
     #threading pool: mild left; mild right; i left; i right
     #self.sink.play(source1)
     #self.sink.play(source2)
     self.cutoff = [i * (config.MAX_DISTANCE / len(self.sources)) for i in range(len(self.sources))]
示例#4
0
def run():
    if len(sys.argv) < 2:
        print("Usage: %s wavefile" % os.path.basename(sys.argv[0]))
        print("    Using an example wav file...")
        dirname = os.path.dirname(__file__)
        fname = os.path.join(dirname, "hey.wav")
    else:
        fname = sys.argv[1]

    sink = SoundSink()
    sink.activate()

    xint = int(x.get())
    yint = int(y.get())
    zint = int(z.get())
    source = SoundSource(position=[xint, yint, zint])
    source.looping = False

    data = load_wav_file(fname)
    source.queue(data)

    sink.play(source)

    sink.update()
    time.sleep(2)
    print("done")
示例#5
0
            def start():
                # Prepare audio feedback
                try:
                    global sink
                    global source
                    global data
                    global ping_delay
                    global ping_pos_x
                    global ping_pos_z
                    global ping_pitch
                    global ping_delay_mult
                    sound_beep = resource_path("beep.wav")
                    ping_delay = 1000
                    ping_pos_x = 0.0
                    ping_pos_z = 0.0
                    ping_pitch = 1.0
                    ping_delay_mult = 2

                    sink = SoundSink()
                    sink.activate()
                    source = SoundSource(position=[ping_pos_x, 0, ping_pos_z])
                    # source.looping = False
                    source.gain = 50.0
                    data = load_wav_file(sound_beep)
                    sink.play(source)
                    print("Audio system started")
                except Exception as e:
                    print("E.Starting Audio: " + str(e))
                    addLogEntry(e)
def setup(retinal_encoded_image_cv2_format):
    global sound_folder_location, is_setup, sound_sources, unit_vector_map

    retinal_encoded_image_width = len(retinal_encoded_image_cv2_format[0])
    retinal_encoded_image_height = len(retinal_encoded_image_cv2_format)

    # Generate unit vector map
    distance_to_near_plane = 0.5  # arbitrary distance
    pixel_width, pixel_height = ssf_core.calc_pixel_size(
        distance_to_near_plane, retinal_encoded_image_h_fov_rad,
        retinal_encoded_image_v_fov_rad, retinal_encoded_image_width,
        retinal_encoded_image_height)
    unit_vector_map = ssf_core.generate_unit_vector_map(
        pixel_width, pixel_height, retinal_encoded_image_width,
        retinal_encoded_image_height, distance_to_near_plane)

    sound_folder_location = get_current_path() + "/sound_files/"
    soundsink.activate()

    # Setting up the listner using the defaults specified
    # here: https://media.readthedocs.org/pdf/pyal/latest/pyal.pdf
    listener = SoundListener()
    listener.position = (0, 0, 0)
    listener.velocity = (0, 0, 0)
    # (x-direction, y-direction, z-direction, x-rotation, y-rotation, z-rotation)
    listener.orientation = (0, 0, -1, 0, 1, 0)

    # Load the audio
    sample_audio = load_wav_file(sound_folder_location + "sample_audio.wav")

    # Setting up the sound sources for each receptive field (i.e. each
    # pixel in the retinal encoded image)
    for row in xrange(retinal_encoded_image_height):
        sound_sources.append([])
        for column in xrange(retinal_encoded_image_width):
            # A sound source is an object that emits sounds
            sound_sources[row].append(SoundSource(position=[0, 0, -1]))

            # Specifying if the source should loop the sound
            sound_sources[row][column].looping = True

            # Queueing appends the sound to the source for
            # processing and playback
            sound_sources[row][column].queue(sample_audio)

            # Setting the gain for each source:
            #   Assuming the total gain should sum up to the max of a single
            #   sample_audio file, then each sound sources gain should be
            #   divided by the number of sound emitters.
            sound_sources[row][column].gain = sound_sources[row][column].gain       \
                                              / (retinal_encoded_image_height       \
                                              * retinal_encoded_image_width)

            soundsink.play(sound_sources[row][column])

    soundsink.update()

    is_setup = True
    def Start():
        #Prepare audio feedback
        try:
            global sink
            global source
            global data

            sink = SoundSink()
            sink.activate()
            source = SoundSource(position=[0, 0, 50])
            source.looping = False
            source.gain = 50.0
            data = load_wav_file(sound_beep)
            #source.queue(data)
            sink.play(source)
            print("Audio system started")
        except Exception as e:
            print("E.Starting Audio:" + str(e))
            AddLogEntry(e)
示例#8
0
 def play_sfx(self, name, offset=(0., 0., 0.), volume=1.):
     source = SoundSource()
     source.queue(load_wav_file(get_sfx(name)))
     source.gain = volume
     source.position = tuple(
         new_pt(*self.sink.listener.position) + new_pt(*offset))
     self.sink.play(source)
def run():
    if len(sys.argv) < 2:
        print("Usage: %s wavefile" % os.path.basename(sys.argv[0]))
        print("    Using an example wav file...")
        dirname = os.path.dirname(__file__)
        fname = os.path.join(dirname, "hey.wav")
    else:
        fname = sys.argv[1]

    sink = SoundSink()
    sink.activate()

    source = SoundSource(position=[10, 3, 3])
    source.looping = True

    data = load_wav_file(fname)
    source.queue(data)

    sink.play(source)
    while source.position[0] > -10:
        source.position = [
            source.position[0] - 1, source.position[1], source.position[2]
        ]
        sink.update()
        print("playing at %r" % source.position)
        time.sleep(1)
    print("done")
示例#10
0
class AudioOut:
    def __init__(self, parent, wav_file_paths):
        self.parent = parent
        self.wav_file_paths = wav_file_paths
        self.snd_src = None
        self.sink = SoundSink()
        self.sink.activate()
        self.listener = SoundListener()
        self.sink.listener = self.listener
        self.snd_data = []
        for fp in wav_file_paths: self.snd_data.append( load_wav_file(fp) )
        self.wav_file_paths = wav_file_paths
        writeFile(self.parent.log_file_path, '%s, [audioOut], audioOut mod init.\n'%(get_time_stamp()))
        
    # --------------------------------------------------
    
    def play(self, snd_idx, loop=True):
        self.snd_src = SoundSource(gain=0.25, position=[0,0,0])
        self.snd_src.looping = loop
        self.snd_src.queue( self.snd_data[snd_idx] )
        self.sink.play(self.snd_src)
        self.sink.update()
        writeFile(self.parent.log_file_path, '%s, [audioOut], sound (%s) play starts.\n'%(get_time_stamp(), self.wav_file_paths[snd_idx]))
    
    # --------------------------------------------------
    
    def move(self, pos):
        self.snd_src.position = [ pos[0], pos[1], pos[2] ]
        if pos[0] == 0: self.snd_src.gain = 0.25
        else: self.snd_src.gain = 0.5
        self.sink.update()
       
    # --------------------------------------------------
    
    def stop(self):
        if self.snd_src != None:
            self.sink.stop(self.snd_src)
            self.sink.update()
            self.snd_src = None
        writeFile(self.parent.log_file_path, '%s, [audioOut], sound stopped.\n'%(get_time_stamp()))
    def __init__(self, conn: DirectConnection):
        self.p = PyAudio()
        atexit.register(self.p.terminate)
        self.stream = None  # type: Stream

        self.sink = SoundSink()
        self.sink.activate()

        self.source = SoundSource()

        def close():
            del self.sink

        atexit.register(close)

        self.source.gain = 1.0
        self.pos = self._pos = (random(), random())
        self.sink.play(self.source)

        self.conn = conn
        self.mic_data = Queue()
        self.speaker_data = Queue()
        self.energy_tracker = EnergyTracker(self.rate)
示例#12
0
def make_sounds(items):

    sink = SoundSink()
    sink.activate()

    print("Frame with these items: " + str([item.label for item in items]))

    for item in items:
        label = item.label.strip()
        position = item.box.center
        area = item.box.area

        source_x = (position[0] - frame_width / 2) / (frame_width / 2) * 5
        source_z = -1 / math.sqrt(area / frame_area)

        print("{label} @ ({x:2f}, {z:2f})".format(label=label,
                                                  x=source_x,
                                                  z=source_z))

        base_name = os.path.join(temp_path, label)
        wav_file = base_name + ".wav"

        if (not os.path.exists(wav_file)):

            tts = gTTS(label)
            tts.save(base_name + '.mp3')

            sound = AudioSegment.from_mp3(base_name + '.mp3')
            sound.export(wav_file, format="wav")

        data = load_wav_file(wav_file)

        duration = 0.0

        with contextlib.closing(wave.open(wav_file, 'r')) as f:
            frames = f.getnframes()
            rate = f.getframerate()
            duration = frames / float(rate)

        source = SoundSource(position=[0, 0, 0])
        source.looping = False
        source.queue(data)

        sink.play(source)
        source.position = [source_x, 0, source_z]
        sink.update()
        time.sleep(duration + 0.1)
def setup(retinal_encoded_image_cv2_format):
    global sound_folder_location, is_setup, sound_sources, unit_vector_map, gain_scaled, alert_sound_sources

    retinal_encoded_image_width = len(retinal_encoded_image_cv2_format[0])
    retinal_encoded_image_height = len(retinal_encoded_image_cv2_format)

    # Generate unit vector map
    distance_to_near_plane = 0.5 # arbitrary distance
    pixel_width, pixel_height = ssf_core.calc_pixel_size(distance_to_near_plane,
                                                         retinal_encoded_image_h_fov_rad,
                                                         retinal_encoded_image_v_fov_rad,
                                                         retinal_encoded_image_width,
                                                         retinal_encoded_image_height)
    unit_vector_map = ssf_core.generate_unit_vector_map(pixel_width,
                                                        pixel_height,
                                                        retinal_encoded_image_width,
                                                        retinal_encoded_image_height,
                                                        distance_to_near_plane)

    sound_folder_location = get_current_path() + "/sound_files/"
    soundsink.activate()

    # Setting up the listner using the defaults specified
    # here: https://media.readthedocs.org/pdf/pyal/latest/pyal.pdf
    listener = SoundListener()
    listener.position = (0, 0, 0)
    listener.velocity = (0, 0, 0)
    # (x-direction, y-direction, z-direction, x-rotation, y-rotation, z-rotation)
    listener.orientation = (0, 0, -1, 0, 1, 0)

    # Load the audio
    large_water_sample = load_wav_file(sound_folder_location + "large_water_sample.wav")
    water_lapping_wind_sample = load_wav_file(sound_folder_location + "water_lapping_wind_sample.wav")
    top = load_wav_file(sound_folder_location + "top.wav")
    top_middle = load_wav_file(sound_folder_location + "top_middle.wav")
    middle = load_wav_file(sound_folder_location + "middle.wav")
    bottom_middle = load_wav_file(sound_folder_location + "bottom_middle.wav")
    bottom = load_wav_file(sound_folder_location + "bottom.wav")
    beep_short = load_wav_file(sound_folder_location + "beep_short.wav")

    # To avoid clipping, the gain for each sound source needs to be
    # scaled down relative to the number of sound emitters
    gain_scaled = 1.0 / (retinal_encoded_image_width * retinal_encoded_image_height)
    gain_scaled = gain_scaled + 0.02

    # Setting up the sound sources for each receptive field (i.e. each
    # pixel in the retinal encoded image)
    for row in xrange(retinal_encoded_image_height):
        sound_sources.append([])
        for column in xrange(retinal_encoded_image_width):
            # A sound source is an object that emits sounds
            sound_sources[row].append(SoundSource(position=[0, 0, 0]))
            
            # Specifying if the source should loop the sound
            sound_sources[row][column].looping = True
            
            # Queueing appends the sound to the source for 
            # processing and playback
            if row == 0:
                sound_sources[row][column].queue(top)
            elif row == 1:
                sound_sources[row][column].queue(top_middle)
            elif row == 2:
                sound_sources[row][column].queue(middle)
            elif row == 3:
                sound_sources[row][column].queue(water_lapping_wind_sample)
            elif row == 4:
                sound_sources[row][column].queue(large_water_sample)

            # Scale gain
            sound_sources[row][column].gain = gain_scaled

            # Play the sound
            soundsink.play(sound_sources[row][column])

    # Setting up the sound sources for the minimum distance alert
    # 0 is left, 1 is right
    # alert_sound_sources.append(SoundSource(position=[0, 0, 0]))
    # alert_sound_sources[0].looping = True
    # alert_sound_sources[0].queue(beep_short)
    # alert_sound_sources[0].gain = 0.0
    # soundsink.play(alert_sound_sources[0])
    # alert_sound_sources.append(SoundSource(position=[0, 0, 0]))
    # alert_sound_sources[1].looping = True
    # alert_sound_sources[1].queue(beep_short)
    # alert_sound_sources[1].gain = 0.0
    # soundsink.play(alert_sound_sources[1])

    soundsink.update()

    is_setup = True
示例#14
0
"""Utility functions for loading sounds."""
import os
import sys
import wave
from openal.audio import SoundData
from openal.loaders import load_wav_file
from openal.audio import SoundSink, SoundSource, SoundListener
import time
import math

if __name__ == "__main__":
    sink = SoundSink()
    sink.activate()
    listener = SoundListener()
    listener.orientation = (0,0,1,0,0,1)
    source1 = SoundSource(position=[0, 0, 3])
    source1.looping = True
    source2 = SoundSource(position=[0, 0, 3],pitch=2.0)
    source2.looping = True
    data2 = load_wav_file("./hey.wav")
    source1.queue(data2)
    source2.queue(data2)
    sink.play(source1)
    sink.play(source2)
    t = 0
    while True:
        x_pos = 5*math.sin(math.radians(t))
        source1.position = [x_pos, source1.position[1], source1.position[2]]
        source2.position = [0, source2.position[1], source2.position[2]]
        sink.update()
        print("playing source 1 at %r" % source1.position)
示例#15
0
if len(sys.argv) < 5:
    print(
        'Usage: python3 player.py filename.dae samples lowest_frequency highest_frequency'
    )
    sys.exit(-1)

# magic for openal to use default audio output
device_name = audio.alc.alcGetString(None,
                                     audio.alc.ALC_DEFAULT_DEVICE_SPECIFIER)
device = audio.alc.alcOpenDevice(device_name)

# cleanup device and temp files
atexit.register(exit_handler)

loader = LoaderDae(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]),
                   int(sys.argv[4]))
res = gen_waves(loader.load())

sources = [SoundSource(position=p[1]) for p in res]
for s in sources:
    s.looping = True
[sources[i].queue(load_wav_file(res[i][0])) for i in range(len(sources))]
sink = SoundSink()
sink.activate()
print(len(sources))
print(res)
[sink.play(s) for s in sources]
sink.update()
while True:
    sleep(0.1)
import pygame
import random
from openal.audio import SoundSink, SoundSource, SoundListener
from openal.loaders import load_wav_file
from pygame.locals import QUIT, K_LEFT, K_RIGHT, KEYDOWN
import sys

__author__ = 'vamc'

#Initialize OpenAL related components
sound_sink = SoundSink()
sound_source = SoundSource()
listener = SoundListener()
sound_sink.activate()
sound_sink._listener = listener

source_sound_file = "asw.wav"
sound_data = load_wav_file(source_sound_file)
sound_source.queue(sound_data)
sound_source.looping = True

#initialize pygame and screen
pygame.init()
screen_width = 600
screen_height = 600
screen = pygame.display.set_mode((screen_width, screen_height))
screen.fill((0, 255, 0))
pygame.display.set_caption('Snake to the sound')

#Create Snake
snake_xpos = [300, 300, 300]
示例#17
0
import time
import math
from openal.audio import SoundSink, SoundSource
from openal.loaders import load_wav_file

if __name__ == "__main__":
    sink = SoundSink()
    sink.activate()
    source = SoundSource(position=[0, 0, 0])
    source.looping = True
    data = load_wav_file("./sounds/Blip_Select.wav")
    source.queue(data)
    sink.play(source)
    t = 0
    while True:
        x_pos = 5 * math.sin(math.radians(t))
        source.position = [x_pos, source.position[1], source.position[2]]
        sink.update()
        print("playing at %r" % source.position)
        time.sleep(0.1)
        t += 5
示例#18
0
def place_sound(pos, wav, loop_bool):
    source = SoundSource(position=pos)
    source.queue(wav)
    source.looping = loop_bool
    return source
示例#19
0
def callback(retinal_encoded_data):
    retinal_encoded_image = bridge.imgmsg_to_cv2(retinal_encoded_data,
                                                 desired_encoding="32FC1")

    retinal_encoded_image_width = len(retinal_encoded_image[0])
    retinal_encoded_image_height = len(retinal_encoded_image)

    if (retinal_encoded_image_width != 8 or retinal_encoded_image_height != 8):
        rospy.logerr("The retinal_encoded_image must be an 8 x 8 image!!!")

    # Loading the audio data
    row_one_audio = load_wav_file(sound_folder_location +
                                  generate_sound_file_name(C_5))  # top
    row_two_audio = load_wav_file(sound_folder_location +
                                  generate_sound_file_name(B))
    row_three_audio = load_wav_file(sound_folder_location +
                                    generate_sound_file_name(A))
    row_four_audio = load_wav_file(sound_folder_location +
                                   generate_sound_file_name(G))
    row_five_audio = load_wav_file(sound_folder_location +
                                   generate_sound_file_name(F))
    row_six_audio = load_wav_file(sound_folder_location +
                                  generate_sound_file_name(E))
    row_seven_audio = load_wav_file(sound_folder_location +
                                    generate_sound_file_name(D))
    row_eight_audio = load_wav_file(sound_folder_location +
                                    generate_sound_file_name(C_4))  # bottom

    global soundSources
    global soundSourcesSetup
    if not soundSourcesSetup:
        soundsink.activate()

        # Setting up the listner (can actually comment this all out)
        listener = SoundListener()
        listener.position = (0, 0, 0)  # default = (0, 0, 0)
        listener.velocity = (0, 0, 0)  # default = (0, 0, 0)
        # (x-direction, y-direction, z-direction, x-rotation, y-rotation, z-rotation)
        # default = (0, 0, -1, 0, 1, 0)
        listener.orientation = (0, 0, -1, 0, 1, 0)

        # Setup sound sources for each "receptive field"
        # Create array of sound sources
        for y in xrange(retinal_encoded_image_height):
            soundSources.append([])
            for x in xrange(retinal_encoded_image_width):
                # A SoundSource is an object that emits sounds
                soundSources[y].append(SoundSource(position=[0, 0, 0]))
                # Specifying if the source should loop the sound
                soundSources[y][x].looping = True
                # Queueing appends the sound to the source for processing and playback
                if y == 0:
                    soundSources[y][x].queue(row_one_audio)
                elif y == 1:
                    soundSources[y][x].queue(row_two_audio)
                elif y == 2:
                    soundSources[y][x].queue(row_three_audio)
                elif y == 3:
                    soundSources[y][x].queue(row_four_audio)
                elif y == 4:
                    soundSources[y][x].queue(row_five_audio)
                elif y == 5:
                    soundSources[y][x].queue(row_six_audio)
                elif y == 6:
                    soundSources[y][x].queue(row_seven_audio)
                elif y == 7:
                    soundSources[y][x].queue(row_eight_audio)
                # Informing the SoundSink about the SoundSource so it knows a new sound emitter is available
                soundsink.play(soundSources[y][x])

                # TODO: fix start position
                soundSources[y][x].position = [
                    x - (retinal_encoded_image_width / 2),
                    y - (retinal_encoded_image_height / 2),
                    -random.randint(1, 9)
                ]

        soundsink.update()
        print('soundSources have been setup')

        soundSourcesSetup = True

    # TODO: update positions of sound sources
    x_scale_factor = 0.5
    z_power_scale_factor = 2.0

    gain_scaled = 1.0 / (retinal_encoded_image_width *
                         retinal_encoded_image_height)

    x_pos = 0
    for row in xrange(retinal_encoded_image_height):
        for column in xrange(retinal_encoded_image_width):
            # center x
            x_pos = column - ((retinal_encoded_image_width - 1.0) / 2.0)
            # scale x
            x_pos = x_pos * x_scale_factor  # right is positive

            # set to zero, since MeloSee doesn't use height
            y_pos = 0.0

            # distance
            z_pos = retinal_encoded_image[row][column]

            # Gain settings, dependant on z
            if math.isnan(z_pos) or                     \
               (z_pos == 0.0) or                        \
               (z_pos >= depth_camera_max_depth):
                soundSources[row][column].gain = 0.0
            else:
                soundSources[row][column].gain = gain_scaled

                # NB: z scaling is done after gain settings
                z_pos = depth_camera_min_depth + (
                    (z_pos - depth_camera_min_depth)
                    **(z_power_scale_factor * 1.0))

                soundSources[row][column].position = [x_pos, y_pos, -z_pos]

    soundsink.update()
class AudioManager:
    format = paInt16
    chunk_size = 1024
    rate = 44100

    audio_scale = 10.

    def __init__(self, conn: DirectConnection):
        self.p = PyAudio()
        atexit.register(self.p.terminate)
        self.stream = None  # type: Stream

        self.sink = SoundSink()
        self.sink.activate()

        self.source = SoundSource()

        def close():
            del self.sink

        atexit.register(close)

        self.source.gain = 1.0
        self.pos = self._pos = (random(), random())
        self.sink.play(self.source)

        self.conn = conn
        self.mic_data = Queue()
        self.speaker_data = Queue()
        self.energy_tracker = EnergyTracker(self.rate)

    @property
    def pos(self):
        return self._pos

    @pos.setter
    def pos(self, pos):
        self.source.position = (self.audio_scale * pos[0], 0,
                                self.audio_scale * pos[1])
        self._pos = pos

    def create_stream(self):
        if self.stream:
            raise RuntimeError('Already created stream!')
        stream = self.p.open(format=self.format,
                             channels=1,
                             rate=self.rate,
                             input=True,
                             output=False,
                             stream_callback=self.callback,
                             frames_per_buffer=self.chunk_size)
        atexit.register(stream.close)
        atexit.register(stream.stop_stream)
        stream.start_stream()
        self.stream = stream
        Thread(target=self._stream_thread, daemon=True).start()

    def _stream_thread(self):
        while True:
            new_chunk = self.mic_data.get()
            audio = np.fromstring(new_chunk, dtype=np.int16)
            audio = audio.astype(np.float32) / float(np.iinfo(audio.dtype).max)
            if not self.energy_tracker.update(audio):
                continue

            self.conn.send(
                json.dumps({
                    'type': 'audio',
                    'audio': b64encode(new_chunk).decode('ascii')
                }))

    def update(self):
        self.sink.update()

    def process_audio_message(self, message):
        new_chunk = b64decode(message['audio'])
        print('Add', len(new_chunk))
        self.source.queue(
            SoundData(new_chunk, 1, 16, len(new_chunk), self.rate))

    def callback(self, in_data, frame_count, time_info, status):
        self.mic_data.put(in_data)
        return None, paContinue