예제 #1
0
 def __init__(self, file="audio"):
     self.format = paInt16
     audio = PyAudio()
     if hasattr(settings, 'AUDIO_DEVICE_INDEX'):
         self.device_index = settings.AUDIO_DEVICE_INDEX
     elif hasattr(settings, 'AUDIO_DEVICE'):
         for i in range(audio.get_device_count()):
             curr_device = audio.get_device_info_by_index(i)
             print 'Found device: %s' % curr_device['name']
             if curr_device['name'] == settings.AUDIO_DEVICE:
                 print 'Assigning %s (Index: %s)' % (
                     settings.AUDIO_DEVICE, i
                 )
                 self.device_index = i
     elif not hasattr(self, 'device_index'):
         print 'No Audio device specified. Discovering...'
         for i in range(audio.get_device_count()):
             curr_device = audio.get_device_info_by_index(i)
             print 'Found device: %s' % curr_device['name']
             if curr_device['maxInputChannels'] > 0:
                 self.device_index = curr_device['index']
                 print 'Using device: %s' % curr_device['name']
                 break
     print audio.get_device_info_by_index(self.device_index)
     try:
         device = audio.get_device_info_by_index(self.device_index)
         calc_rate = device['defaultSampleRate']
         print 'Discovered Sample Rate: %s' % calc_rate
         self.rate = int(calc_rate)
     except:
         print 'Guessing sample rate of 44100'
         self.rate = 44100
     self.channel = 1
     self.chunk = 1024
     self.file = file
예제 #2
0
 def __init__(self, file="audio"):
     self.format = paInt16
     audio = PyAudio()
     if hasattr(settings, 'AUDIO_DEVICE_INDEX'):
         self.device_index = settings.AUDIO_DEVICE_INDEX
     elif hasattr(settings, 'AUDIO_DEVICE'):
         for i in range(audio.get_device_count()):
             curr_device = audio.get_device_info_by_index(i)
             print 'Found device: %s' % curr_device['name']
             if curr_device['name'] == settings.AUDIO_DEVICE:
                 print 'Assigning %s (Index: %s)' % (settings.AUDIO_DEVICE,
                                                     i)
                 self.device_index = i
     elif not hasattr(self, 'device_index'):
         print 'No Audio device specified. Discovering...'
         for i in range(audio.get_device_count()):
             curr_device = audio.get_device_info_by_index(i)
             print 'Found device: %s' % curr_device['name']
             if curr_device['maxInputChannels'] > 0:
                 self.device_index = curr_device['index']
                 print 'Using device: %s' % curr_device['name']
                 break
     print audio.get_device_info_by_index(self.device_index)
     try:
         device = audio.get_device_info_by_index(self.device_index)
         calc_rate = device['defaultSampleRate']
         print 'Discovered Sample Rate: %s' % calc_rate
         self.rate = int(calc_rate)
     except:
         print 'Guessing sample rate of 44100'
         self.rate = 44100
     self.channel = 1
     self.chunk = 1024
     self.file = file
예제 #3
0
def ask_for_device(
    p: pyaudio.PyAudio,
    as_input: bool = False
) -> Optional[int]:  # todo return device_info? and filter for input/output
    """returns the device_id selected by the user"""

    if p.get_device_count() == 0:
        raise Exception("No devices available")

    default_device_index = p.get_default_input_device_info(
    )["index"] if as_input else p.get_default_output_device_info()["index"]

    for i in (get_input_device_indexes(p)
              if as_input else get_output_device_indexes(p)):
        info = p.get_device_info_by_index(i)
        index = info["index"]
        api_name = p.get_host_api_info_by_index(info["hostApi"])["name"]
        device_name = info["name"]
        print(f"{index}:  \t {api_name} \t {device_name}")

    device_id = None
    while device_id is None:
        user_input = input("Choose device index: ")
        if user_input:
            try:
                device_id = int(user_input)
            except ValueError:
                print(f"Could not cast to int: {user_input}")
        else:
            print(f"Using default device index: {default_device_index}")
            device_id = default_device_index

    return device_id
예제 #4
0
 def __init__(self):
     super(VCGame, self).__init__(255, 255, 255, 255, 800, 600)
     # frames_per_buffer
     self.num_samples = 1000
     # 声控条
     self.vbar = Sprite(cfg.BLOCK_IMAGE_PATH)
     self.vbar.position = 20, 450
     self.vbar.scale_y = 0.1
     self.vbar.image_anchor = 0, 0
     self.add(self.vbar)
     # 皮卡丘
     self.pikachu = Pikachu(cfg.PIKACHU_IMAGE_PATH)
     self.add(self.pikachu)
     # 地面
     self.floor = cocos.cocosnode.CocosNode()
     self.add(self.floor)
     position = 0, 100
     for i in range(120):
         b = Block(cfg.BLOCK_IMAGE_PATH, position)
         self.floor.add(b)
         position = b.x + b.width, b.height
     # 声音输入
     audio = PyAudio()
     self.stream = audio.open(
         format=paInt16,
         channels=1,
         rate=int(audio.get_device_info_by_index(0)['defaultSampleRate']),
         input=True,
         frames_per_buffer=self.num_samples)
     # 屏幕更新
     self.schedule(self.update)
예제 #5
0
파일: game.py 프로젝트: xfause/VoiceGame
    def __init__(self):
        super(VoiceGame, self).__init__(255, 255, 255, 255, 800, 600)

        # init voice
        self.NUM_SAMPLES = 1000
        self.LEVEL = 1500

        self.voicebar = Sprite('block.png', color=(0, 0, 255))
        self.voicebar.position = 20, 450
        self.voicebar.scale_y = 0.1
        self.voicebar.image_anchor = 0, 0
        self.add(self.voicebar)

        self.player = Player()
        self.add(self.player)

        self.floor = cocos.cocosnode.CocosNode()
        self.add(self.floor)
        pos = 0, 100
        for i in range(100):
            b = Block(pos)
            self.floor.add(b)
            pos = b.x + b.width, b.height
        
        # voice input
        pa = PyAudio()
        SAMPLING_RATE = int(pa.get_device_info_by_index(0)['defaultSampleRate'])
        self.stream = pa.open(format=paInt16, channels=1, rate=SAMPLING_RATE, input=True, frames_per_buffer=self.NUM_SAMPLES)

        self.schedule(self.update)
예제 #6
0
파일: Game2.py 프로젝트: Guaderxx/Games
	def __init__(self):
		super(VCGame, self).__init__(255, 255, 255, 255, 800, 600)
		# 初始化参数
		# frames_per_buffer
		self.numSamples = 1000
		# 声控条
		self.vbar = Sprite('black.png')
		self.vbar.position = 20, 450
		self.vbar.scale_y = 0.1
		self.vbar.image_anchor = 0, 0
		self.add(self.vbar)
		# 皮卡丘类
		self.pikachu = Pikachu()
		self.add(self.pikachu)
		# cocosnode精灵类
		self.floor = cocos.cocosnode.CocosNode()
		self.add(self.floor)
		position = 0, 100
		for i in range(120):
			b = Block(position)
			self.floor.add(b)
			position = b.x + b.width, b.height
		# 声音输入
		audio = PyAudio()
		SampleRate = int(audio.get_device_info_by_index(0)['defaultSampleRate'])
		self.stream = audio.open(format=paInt16, 
								 channels=1, 
								 rate=SampleRate, 
								 input=True, 
								 frames_per_buffer=self.numSamples)
		self.schedule(self.update)
예제 #7
0
def Voice_Game():
    button_1 = PyKeyboard()
    NUM_SAMPLES = 1000
    pa = PyAudio()
    while True:
        SAMPLING_RATE = int(
            pa.get_device_info_by_index(0)['defaultSampleRate'])
        stream = pa.open(format=paInt16,
                         channels=1,
                         rate=SAMPLING_RATE,
                         input=True,
                         frames_per_buffer=NUM_SAMPLES)
        string_audio_data = stream.read(NUM_SAMPLES)
        k = max(struct.unpack('1000h', string_audio_data))
        if k > 3000:
            button_1.press_key("a")
            time.sleep(1)
            button_1.release_key("a")
        if k > 8000:
            #    button_1.tap_key("^[[D")
            button_1.tap_key("w")
            time.sleep(1)
            button_1.release_key("w")
        if k > 13000:
            #    button_1.tap_key("^[[D")
            button_1.tap_key("s")
            time.sleep(1)
            button_1.release_key("s")
        time.sleep(0.1)
    return 0
예제 #8
0
 def __init__(self):
     super(VoiceGame, self).__init__(255, 255, 255, 255)
     # 添加提示信息
     self.info = cocos.text.Label('音量大小',
                                  font_size=18,
                                  bold=True,
                                  color=(255, 73, 49, 255))
     self.info.position = 50, 420
     self.add(self.info)
     # 添加音量条
     self.voice = cocos.text.Label('▉',
                                   font_size=20,
                                   color=(94, 245, 94, 255))
     self.voice.position = 50, 380
     self.add(self.voice, z=2)
     # 添加背景
     # self.bg = BackGround()
     # self.add(self.bg)
     # 添加皮卡丘
     self.pika = Pika()
     self.add(self.pika, z=1)
     # 声音输入(这部分还不熟悉)
     self.NUM_SAMPLES = 1000  # 内部缓存块大小
     self.LEVEL = 1500  # 声音保存阈值
     pa = PyAudio()
     SAMPLING_RATE = int(
         pa.get_device_info_by_index(0)['defaultSampleRate'])
     self.stream = pa.open(format=paInt16,
                           channels=1,
                           rate=SAMPLING_RATE,
                           input=True,
                           frames_per_buffer=self.NUM_SAMPLES)
     # 实时更新
     self.schedule(self.update)
예제 #9
0
파일: Main.py 프로젝트: Gini5/Game
    def __init__(self):
        #initialize canvas
        super(JumpPikachu, self).__init__(255, 255, 255, 255, 800, 1000)
        # frames per buffer
        self.numSamples = 1000

        #voice control bar
        self.vbar = Sprite('block.png')
        self.vbar.position = 20,450
        self.vbar.scale_y = 0.1     #bar height
        self.vbar.image_anchor = 0,0
        self.add(self.vbar)
        #Pikachu instance
        self.pikachu = Pikachu()
        self.add(self.pikachu)

        #cocosnode, draw blocks
        self.floor = cocos.cocosnode.CocosNode()
        self.add(self.floor)
        position = 0, 100
        for i in range(10):
            block = Block(position)
            self.floor.add(block)
            position = block.x + block.width, block.height

        # audio input
        audio = PyAudio()
        SampleRate = int(audio.get_device_info_by_index(0)['defaultSampleRate'])
        self.stream = audio.open(format = paInt16,
                                 channels = 1,
                                 rate = SampleRate,
                                 input = True,
                                 frames_per_buffer = self.numSamples)
        self.schedule(self.update)
예제 #10
0
파일: pygsr.py 프로젝트: royalbhati/PyGSR
 def record(self, time, device_i=None):
     audio = PyAudio()
     print(audio.get_device_info_by_index(1))
     stream = audio.open(input_device_index=device_i,
                         output_device_index=device_i,
                         format=self.format,
                         channels=self.channel,
                         rate=self.rate,
                         input=True,
                         frames_per_buffer=self.chunk)
     print("REC: ")
     frames = []
     for i in range(0, self.rate / self.chunk * time):
         data = stream.read(self.chunk)
         frames.append(data)
     stream.stop_stream()
     print("END")
     stream.close()
     audio.terminate()
     write_frames = open_audio(self.file, 'wb')
     write_frames.setnchannels(self.channel)
     write_frames.setsampwidth(audio.get_sample_size(self.format))
     write_frames.setframerate(self.rate)
     write_frames.writeframes(''.join(frames))
     write_frames.close()
     self.convert()
예제 #11
0
 def __init__(self):
     super(VCGame,self).__init__(255,255,255,255,800,600)
     # 初始化参数
     self.numSamples=1000
     # 声控条
     self.vbar=Sprite('black.png')
     self.vbar.position=20,450
     self.vbar.scale_y=0.1
     self.vbar.image_anchor=0,0
     self.add(self.vbar)
     # 皮卡丘类
     self.pikachu=Pikachu()
     self.add(self.pikachu)
     # cocosnode精灵类
     self.floor=cocos.cocosnode.CocosNode()
     self.add(self.floor)
     position=0,100
     for i in range(120):
         b=Block(position)
         self.floor.add(b)
         position=b.x+b.width,b.height
     #声音输入
     audio=PyAudio()
     SampleRate=int(audio.get_device_info_by_index(0)['defaultSampleRate'])
     self.stream=audio.open(format=paInt16,channels=1,rate=SampleRate,input=True,frames_per_buffer=self.numSamples)
     self.schedule(self.update)
예제 #12
0
def get_microphone_index(audio: pyaudio.PyAudio, name: str) -> int:
    for i in range(audio.get_device_count()):
        dev = audio.get_device_info_by_index(i)
        if dev["name"].lower() != name.lower():
            logger.debug(f"Skipping audio device '{dev['name']}' (index {i})")
            continue
        logger.debug(f"Using audio device '{dev['name']}' (index {i})")
        return i
예제 #13
0
    def __init__(self, level):
        super(HowlGame, self).__init__(255, 255, 255, 255, 4000, 2000)

        # init voice
        self.NUM_SAMPLES = 1000  # pyAudio内部缓存块大小
        self.LEVEL = 1500  # 声音保存的阈值
        self.sample_count = 0  # 取样次数
        self.average_volume = 0  # 平均音量

        # init floor
        self.floor = Floor(self, level)
        self.add(self.floor, 0)

        # init voiceBar
        self.voiceBar = Sprite('ground/black.png', color=(0, 0, 255))
        # self.voiceBar.position = 100, 460
        self.voiceBar.scale_y = 0.1
        self.voiceBar.image_anchor = 0, 0
        self.add(self.voiceBar, 1)

        # init role
        self.role = Role(self)
        self.role_run_to_right = False
        self.role_run_to_left = False
        self.add(self.role, 2)
        self.action = FadeOut(0.5)

        # init monster
        # self.monster_node = cocos.cocosnode.CocosNode()
        # for i in range(5):
        #     self.monster_node.add(Monster(self))
        # self.add(self.monster_node)

        # init flag
        # flag = cocos.sprite.Sprite('scene/flag.png')
        # flag.position = 3500, 120
        # flag.scale = 0.5
        # self.flag = flag
        # self.add(flag)

        # init stone
        self.stone = None
        self.boom = cocos.sprite.Sprite('scene/boom.png')

        # init gameoverlayer
        self.gameover = None

        # Open Audio Input
        pa = PyAudio()
        SAMPLING_RATE = int(
            pa.get_device_info_by_index(0)['defaultSampleRate'])
        self.stream = pa.open(format=paInt16,
                              channels=1,
                              rate=SAMPLING_RATE,
                              input=True,
                              frames_per_buffer=self.NUM_SAMPLES)

        self.schedule(self.update)
def find_tigerjet_audio_device():
    p = PyAudio()
    for dev_idx in range(0, p.get_device_count()):
        if 'TigerJet' in p.get_device_info_by_index(dev_idx).get('name'):
            global TJ_DEV_INDEX
            TJ_DEV_INDEX = dev_idx
            break
    else:
        raise RuntimeError('TigerJet audio output device not found!')
예제 #15
0
def __getAudioDevice(audio: pyaudio.PyAudio, deviceIndex, isInput: bool):
    parameter = f'Audio{("input" if isInput else "Output")}Device'
    # Use default device if not specified
    if deviceIndex == None:
        try:
            if isInput:
                a = audio.get_default_input_device_info()
                deviceIndex = audio.get_default_input_device_info().get(
                    'index')
            else:
                deviceIndex = audio.get_default_output_device_info().get(
                    'index')
        except:
            deviceIndex = 0

    if deviceIndex == None:
        __error('Неверное имя или индекс аудиоустройтсва', parameter)
        return (None, None)

    # Extract device index if possible
    try:
        deviceIndex = int(deviceIndex)
    except:
        deviceIndex = str(deviceIndex)

    # Resolve audio input & output devices over the list of devices
    # available
    deviceName = None
    device = 0
    for i in range(audio.get_device_count()):
        #print(audio.get_device_info_by_index( i ))
        device = audio.get_device_info_by_index(i)
        name = str(device.get("name"))
        # Resolve index by device name
        if isinstance(deviceIndex, str) and name.lower().startswith(
                deviceIndex.lower()):
            deviceIndex = i
        # Assign original device name
        if isinstance(deviceIndex, int) and (deviceIndex == i):
            deviceName = name
            break

    # check if device was resolved
    if deviceIndex == None or deviceName == None:
        __error('Неверное имя или индекс аудиоустройтсва', parameter)

    channels = device.get('maxInputChannels') if isInput else device.get(
        'maxOutputChannels')
    if channels <= 0:
        __error(
            f'Устройство не имеет {"аудиовходов" if isInput else "аудиовыходов"}',
            parameter)

    return (deviceIndex, deviceName)
예제 #16
0
파일: pygsr.py 프로젝트: drneox/PyGSR
 def record(self, time, device_i=None):
     audio = PyAudio()
     print audio.get_device_info_by_index(1)
     stream = audio.open(input_device_index=device_i,output_device_index=device_i,format=self.format, channels=self.channel,
                         rate=self.rate, input=True,
                         frames_per_buffer=self.chunk)
     print "REC: "
     frames = []
     for i in range(0, self.rate / self.chunk * time):
         data = stream.read(self.chunk)
         frames.append(data)
     stream.stop_stream()
     print "END"
     stream.close()
     audio.terminate()
     write_frames = open_audio(self.file, 'wb')
     write_frames.setnchannels(self.channel)
     write_frames.setsampwidth(audio.get_sample_size(self.format))
     write_frames.setframerate(self.rate)
     write_frames.writeframes(''.join(frames))
     write_frames.close()
     self.convert()
예제 #17
0
def get_device_info(p: pyaudio.PyAudio,
                    as_input: bool = False,
                    device_id: Optional[int] = None):
    while True:
        if device_id is None:
            device_id = ask_for_device(p, as_input)
        try:
            device_info = p.get_device_info_by_index(device_id)
            return device_info
        except IOError:
            print(f"Failed to open device index {device_id}!"
                  )  # todo improve message with device index and name
            device_id = None
        print("Please select another device")
예제 #18
0
    def __init__(self):
        super(VoiceGame, self).__init__(255, 255, 255, 255, 2000, 2000)
        self.init_time = time.time()
        self.NUM_SAMPLES = 2048
        self.LEVEL = 1500
        self.highest_score = 0
        self.score = 0
        self.txt_score = cocos.text.Label(u'score:0',
                                          font_name="Arial",
                                          font_size=20,
                                          color=(0, 0, 0, 255))
        self.txt_score.position = 400, 440
        self.add(self.txt_score, 99999)

        self.highest_score = 0
        self.txt_highest_score = cocos.text.Label(u'High Score:0',
                                                  font_name="Arial",
                                                  font_size=20,
                                                  color=(0, 0, 0, 255))
        self.txt_highest_score.position = 200, 440
        self.add(self.txt_highest_score, 99999)

        self.voice_bar = Sprite('block.png', color=(0, 0, 255))
        self.voice_bar.position = 20, 450
        self.voice_bar.scale_y = 0.1
        self.voice_bar.image_anchor = 0, 0
        self.add(self.voice_bar)

        self.object = Object()
        self.add(self.object)

        self.floor = cocos.cocosnode.CocosNode()
        self.add(self.floor)
        pos = 0, 100
        for i in range(100):
            b1 = Block(pos)
            b2 = Block(pos)
            b3 = Block(pos)
            self.floor.add(b1)
            self.floor.add(b2)
            self.floor.add(b3)
            pos = b1.x + b1.width, b1.height

        pa = PyAudio()
        sampling_rate = int(pa.get_device_info_by_index(0)['defaultSampleRate'])
        self.stream = pa.open(format=paInt16, channels=1, rate=sampling_rate, input=True, frames_per_buffer=self.NUM_SAMPLES)

        self.schedule(self.update)
예제 #19
0
파일: game.py 프로젝트: zybing/ppx
    def __init__(self):
        super(VoiceGame, self).__init__(255, 255, 255, 255, 800, 600)
        # init voice
        self.NUM_SAMPLES = 1000
        par = {"image": "black.png"}
        self.voicebar = Sprite(**par)
        self.voicebar.position = 20, 450
        self.voicebar.scale_y = 0.1
        self.voicebar.image_anchor = 0, 0
        self.add(self.voicebar)
        self.label = cocos.text.RichLabel('3 LIVES',
                                          font_name='Times New Roman',
                                          font_size=32,
                                          anchor_x='center',
                                          anchor_y='center',
                                          color=(255, 0, 0, 255))

        self.label.position = 330, 400

        self.add(self.label)

        self.ppx = PPX()
        self.add(self.ppx)

        # layer order according the layer added order
        self.floor = cocos.cocosnode.CocosNode()
        self.add(self.floor)
        pos = 0, 100
        self.maxx = 0
        for i in range(30):
            b = Block(pos)
            self.floor.add(b)
            pos = b.x + b.width, b.height
            if i == 29:
                self.maxx = b.x

        # open sound input
        pa = PyAudio()
        sampling_rate = int(
            pa.get_device_info_by_index(0)['defaultSampleRate'])
        self.stream = pa.open(format=paInt16,
                              channels=1,
                              rate=sampling_rate,
                              input=True,
                              frames_per_buffer=self.NUM_SAMPLES)

        self.schedule(self.update)
예제 #20
0
class AfWidget(GridLayout):
    def __init__(self, **kwargs):
        self.p = PyAudio()
        self.rows = 1
        self.cols = 1
        GridLayout.__init__(self, **kwargs)

        self.mainPanel = TabbedPanel()
        print "WIDTH", self.width
        self.mainPanel.default_tab_text = "AF Output Devices"

        self.add_widget(self.mainPanel)
        self.inputPanel = TabbedPanelHeader(text="AF Input Devices")
        self.inputPanel.content = AfInputManager()
        self.mainPanel.add_widget(self.inputPanel)
        self.mainPanel.tab_width = 200
        #topLayout = BoxLayout(orientation = "vertical")

        #topLayout.add_widget(Label(text="Input device", ))
        #self.inputDevs = Spinner(text = "Select input")
        #topLayout.add_widget(self.inputDevs)

        #topLayout.add_widget(Label(text="Output device", ))
        #self.outputDevs = Spinner(text = "Select output")
        #topLayout.add_widget(self.outputDevs)

        #self.updateSoundDevices()
        #self.add_widget(topLayout)

    def updateSoundDevices(self):
        api_cnt = self.p.get_host_api_count()
        dev_cnt = self.p.get_device_count()
        inputs = []
        outputs = []
        print "Number of API's", api_cnt, "Number of sound devices", dev_cnt
        for i in range(dev_cnt):
            d = self.p.get_device_info_by_index(i)
            if d['maxInputChannels'] > 0:
                inputs.append(d['name'])
            if d['maxOutputChannels'] > 0:
                outputs.append(d['name'])

        print "inputs", inputs
        print "outputs", outputs
        self.inputDevs.values = inputs
        self.outputDevs.values = outputs
예제 #21
0
파일: ears.py 프로젝트: fangfx/JarvisTuring
def ears_setup():
    p = PyAudio()
    count = p.get_device_count()
    device = [i for i in range(count) if "Logitech" in p.get_device_info_by_index(i)["name"]][0]

    source = Microphone(device_index=device)
    # yup, I'm playing with the internals of this class.
    source.CHUNK = 512
    source.RATE = 8000
    source.CHANNELS = 1
    try:
        source.__enter__()
        source.stream.stop_stream()
    except:
        vprint(1, "Microphone initialization failed.")
        source.__exit__()

    return source
예제 #22
0
def RecordWave():
    pa = PyAudio()
    devinfo = pa.get_device_info_by_index(1)
    '''
    if pa.is_format_supported(8000, input_device=devinfo['index'],
                              input_channels=devinfo['maxInputChannels'],
                              input_format=paInt16):
        print 'Yes'
    '''
    stream = pa.open(format=paInt16, channels=1, rate=framerate, input=True,
                     frames_per_buffer = NUM_SAMPLES)
    saveBuffer = []
    count = 0
    print 'please say anything'
    while count < TIME*2:
        stringAudioData = stream.read(NUM_SAMPLES)
        saveBuffer.append(stringAudioData)
        count += 1
    filename = datetime.now().strftime("2")+".wav"
    SaveWaveFile(filename, saveBuffer)
    print filename, "saved"
예제 #23
0
    def run(self):
        audio = PyAudio()
        print("Sound device:", self.dev_idx)
        device_info = audio.get_device_info_by_index(self.dev_idx)
        self.channels = device_info["maxInputChannels"] if (
            device_info["maxOutputChannels"] < device_info["maxInputChannels"]
        ) else device_info["maxOutputChannels"]
        self.rate = int(device_info["defaultSampleRate"])
        print(color.yellow(str(device_info)))
        wavstream = audio.open(format=self.format,
                               channels=self.channels,
                               rate=self.rate,
                               input=True,
                               frames_per_buffer=self.chunk,
                               input_device_index=device_info["index"],
                               as_loopback=True)

        # wavstream = audio.open(format=self.format,
        #                        channels=self.channels,
        #                        rate=self.rate,
        #                        input=True,
        #                        frames_per_buffer=self.chunk)
        # 如果没有外放的话,loopback会没有数据,造成阻塞
        # 循环读取输入流
        while self.bRecord:
            data = wavstream.read(self.chunk)
            self._frames.append(data)

        self._status = 1
        wavstream.stop_stream()
        wavstream.close()
        # 保存到文件
        print("Saveing .... ", self.audiofile)
        with wave.open(self.audiofile, 'wb') as wavfile:
            wavfile.setnchannels(self.channels)
            wavfile.setsampwidth(audio.get_sample_size(self.format))
            wavfile.setframerate(self.rate)
            wavfile.writeframes(b''.join(self._frames))
        audio.terminate()
        self._status = 2
예제 #24
0
 def __init__(self, config):
     super(VoiceControlPikachuLayer,
           self).__init__(255, 255, 255, 255, config.SCREENSIZE[0],
                          config.SCREENSIZE[1])
     pyglet.resource.path = [
         os.path.split(config.IMAGE_PATHS_DICT['block'])[0]
     ]
     pyglet.resource.reindex()
     # frames_per_buffer
     self.num_samples = 1000
     # 声控条
     self.vbar = Sprite(os.path.split(config.IMAGE_PATHS_DICT['block'])[1])
     self.vbar.position = 20, 450
     self.vbar.scale_y = 0.1
     self.vbar.image_anchor = 0, 0
     self.add(self.vbar)
     # 皮卡丘
     self.pikachu = Pikachu(
         os.path.split(config.IMAGE_PATHS_DICT['pikachu'])[1])
     self.add(self.pikachu)
     # 地面
     self.floor = cocos.cocosnode.CocosNode()
     self.add(self.floor)
     position = 0, 100
     for i in range(120):
         b = Block(
             os.path.split(config.IMAGE_PATHS_DICT['block'])[1], position)
         self.floor.add(b)
         position = b.x + b.width, b.height
     # 声音输入
     audio = PyAudio()
     self.stream = audio.open(
         format=paInt16,
         channels=1,
         rate=int(audio.get_device_info_by_index(0)['defaultSampleRate']),
         input=True,
         frames_per_buffer=self.num_samples)
     # 屏幕更新
     self.schedule(self.update)
예제 #25
0
    def __init__(self):
        super(VoiceGame, self).__init__(255, 255, 255, 255, 800, 600)

        # 右上角的标志
        # python中文件用'/'分割,以'.'或者'c:/'开头的为绝对路径
        self.logo = Sprite('static/img/ppx.png')
        self.logo.position = 550, 400
        self.add(self.logo, 9999)

        # init voice
        # pyAudion内部缓存的块的大小
        self.NUM_SAMPLES = 1000
        # 声音保存的阈值
        self.LEVEL = 1500

        # 左上角的声音条
        self.voicebar = Sprite('img/black.png', color=(0, 0, 255))
        self.voicebar.position = 20, 450
        self.voicebar.scale_y = 0.1
        self.voicebar.img_anchor = 0, 0
        self.add(self.voicebar)

        self.ppx = PPX()
        self.add(self.ppx)

        self.floor = cocos.cocosnode.CocosNode()
        self.add(self.floor)
        pos = 0, 100
        for i in range(100):
            b = Block(pos)
            self.floor.add(b)
            pos = b.x + b.width, b.height

        # 开启声音输入
        pa = PyAudio()
        SAMPLING_RATE = int(pa.get_device_info_by_index(0)['defaultSampleRate'])
        self.stream = pa.open(format=paInt16, channels=1,
                              rate=SAMPLING_RATE, input=True, frames_per_buffer=self.NUM_SAMPLES)
        self.schedule(self.update)
예제 #26
0
def get_input_device(p: pyaudio.PyAudio, name: str):
    """
    Returns Example:
        {'index': 1, 'structVersion': 2, 'name': 'MacBook Pro麦克风', 
        'hostApi': 0, 'maxInputChannels': 1, 'maxOutputChannels': 0, 
        'defaultLowInputLatency': 0.04852607709750567, 
        'defaultLowOutputLatency': 0.01, 
        'defaultHighInputLatency': 0.05868480725623583, 
        'defaultHighOutputLatency': 0.1, 
        'defaultSampleRate': 44100.0}
    """
    device_info = None
    for idx in range(p.get_device_count()):
        info = p.get_device_info_by_index(idx)
        channels = info["maxInputChannels"]
        if channels == 0:
            continue
        logger.debug("device name: %s", info['name'])
        if info['name'] == name:
            device_info = info

    if not device_info:
        sys.exit("Missing iShowU Audio Capture")
    return device_info
예제 #27
0
class RadioChronicle:
    # Default parameter values
    fileNameFormat = './RC-%Y%m%d-%H%M%S.wav'
    monitor = False
    volumeTreshold = 5.0
    maxPauseLength = 1.0
    trailLength = 1.0
    minRecordingLength = 0.5
    chunkSize = 1024
    inputDevice: Optional[int] = None
    outputDevice: Optional[int] = None
    audioBits = 16
    sampleRate = 44100
    inputStream: Optional[AudioStream] = None
    outputStream: Optional[AudioStream] = None

    audio: PyAudio
    logger: Logger

    audioFile: Optional[WaveWriter]
    sample: bytes
    sampleLength: int
    audioFileLength: int
    inLoop: bool
    recording: bool
    quitAfterRecording: bool
    lastSecondVolumes: List[float]

    fileName: str
    localMaxVolume: float

    def __init__(self) -> None:  # pylint: disable=too-complex, too-many-statements
        '''Fully constructs class instance, including reading configuration file and configuring audio devices.'''
        try:  # Reading command line options
            configFileName = DEFAULT_CONFIG_FILE_NAME
            (options, _args) = getopt(argv[1:], 'c:h', ['config=', 'help'])
            for (option, optionValue) in options:
                if option in ('-c', '--config'):
                    configFileName = optionValue.strip()
                else:
                    usage()
        except Exception as e:
            usage(e)
        try:  # Reading config file and configuring logging
            config = ConfigParser(interpolation=None,
                                  inline_comment_prefixes=(';', ))
            config.read_file(
                open(configFileName
                     ))  # Using read_file(open()) to make sure file exists
            if config.has_section('loggers'):
                fileConfig(config)
            self.logger = getLogger()
            if not self.logger.handlers:  # Provide default logger
                self.logger.addHandler(StreamHandler())
                self.logger.setLevel(NOTSET)
            signal(SIGTERM, self.sigTerm)
        except Exception as e:
            print(f"{TITLE}\n\nConfig error: {e}")
            print(format_exc())
            sysExit(1)
        # Above this point, use print for diagnostics
        # From this point on, we have self.logger to use instead
        self.logger.info(TITLE)
        self.logger.info(f"Using {configFileName}")
        print()  # Empty line to console only
        try:  # Applying configuration
            channel = 'MONO'
            value: str
            try:
                section = 'general'
                try:
                    self.fileNameFormat = config.get(section,
                                                     'fileNameFormat').strip()
                except NoOptionError:
                    pass
                try:
                    self.monitor = config.getboolean(section, 'monitor')
                except NoOptionError:
                    pass
                except ValueError as e:
                    raise ValueError(
                        f"Bad value for [{section}].monitor: '{config.get(section, 'monitor')}', must be 1/yes/true/on or 0/no/false/off"
                    ) from e
            except NoSectionError:
                pass
            try:
                section = 'tuning'
                try:
                    value = config.get(section, 'volumeTreshold')
                    self.volumeTreshold = float(value)
                except NoOptionError:
                    pass
                except ValueError as e:
                    raise ValueError(
                        f"Bad value for [{section}].volumeTreshold: '{value}', must be a float"
                    ) from e
                try:
                    value = config.get(section, 'maxPauseLength')
                    self.maxPauseLength = float(value)
                except NoOptionError:
                    pass
                except ValueError as e:
                    raise ValueError(
                        f"Bad value for [{section}].maxPauseLength: '{value}', must be a float"
                    ) from e
                try:
                    value = config.get(section, 'minRecordingLength')
                    self.minRecordingLength = float(value)
                except NoOptionError:
                    pass
                except ValueError as e:
                    raise ValueError(
                        f"Bad value for [{section}].minRecordingLength: '{value}', must be a float"
                    ) from e
                try:
                    value = config.get(section, 'trailLength')
                    self.trailLength = float(value)
                except NoOptionError:
                    pass
                except ValueError as e:
                    raise ValueError(
                        f"Bad value for [{section}].trailLength: '{value}', must be a float"
                    ) from e
            except NoSectionError:
                pass
            try:
                section = 'device'
                try:
                    value = config.get(section, 'chunkSize')
                    self.chunkSize = int(value)
                except NoOptionError:
                    pass
                except ValueError as e:
                    raise ValueError(
                        f"Bad value for [{section}].chunkSize: '{value}', must be an integer"
                    ) from e
                try:
                    value = config.get(section, 'inputDevice')
                    self.inputDevice = int(value)
                except NoOptionError:
                    pass
                except ValueError as e:
                    raise ValueError(
                        f"Bad value for [{section}].inputDevice: '{value}', must be an integer"
                    ) from e
                try:
                    value = config.get(section, 'outputDevice')
                    self.outputDevice = int(value)
                except NoOptionError:
                    pass
                except ValueError as e:
                    raise ValueError(
                        f"Bad value for [{section}].outputDevice: '{value}', must be an integer"
                    ) from e
                try:
                    value = config.get(section, 'audioBits')
                    self.audioBits = int(value)
                except NoOptionError:
                    pass
                except ValueError as e:
                    raise ValueError(
                        f"Bad value for [{section}].audioBits: '{value}', must be an integer"
                    ) from e
                try:
                    value = config.get(section, 'sampleRate')
                    self.sampleRate = int(value)
                except NoOptionError:
                    pass
                except ValueError as e:
                    raise ValueError(
                        f"Bad value for [{section}].sampleRate: '{value}', must be an integer"
                    ) from e
                try:
                    channel = config.get(section, 'channel')  # pylint: disable=redefined-variable-type # Will be processed later
                except NoOptionError:
                    pass
            except NoSectionError:
                pass

            # Validating configuration parameters
            if not self.fileNameFormat:
                raise ValueError(
                    "Bad value for fileNameFormat: must be not empty")
            if not 0 <= self.volumeTreshold <= 100:
                raise ValueError(
                    f"Bad value for volumeTreshold: {self.volumeTreshold:.2f}, must be 0-100"
                )
            if self.maxPauseLength < 0:
                self.maxPauseLength = 0.0
            if self.minRecordingLength < 0:
                self.minRecordingLength = 0.0
            if self.trailLength < 0:
                self.trailLength = 0.0
            if self.chunkSize < 1:
                raise ValueError(
                    f"Bad value for chunkSize: {self.chunkSize}, must be 1 or more"
                )
            if self.inputDevice:
                if self.inputDevice == -1:
                    self.inputDevice = None
                elif self.inputDevice < -1:
                    raise ValueError(
                        f"Bad value for input device: {self.inputDevice}, must be -1 or more"
                    )
            if self.outputDevice:
                if self.outputDevice == -1:
                    self.outputDevice = None
                elif self.outputDevice < -1:
                    raise ValueError(
                        f"Bad value for output device: {self.outputDevice}, must be -1 or more"
                    )
            if self.audioBits not in (8, 16, 32):
                raise ValueError(
                    f"Bad value for audioBits: {self.audioBits}, must be 8, 16, or 32"
                )
            if self.sampleRate < 1:
                raise ValueError(
                    f"Bad value for chunkSize: {self.sampleRate}, must be positive"
                )
            try:
                intChannel: Optional[int] = int(channel)
                assert intChannel is not None
                if intChannel <= 0:
                    intChannel = None  # Exception will be thrown below
            except ValueError:
                intChannel = CHANNEL_NUMBERS.get(
                    channel.strip().upper())  # Would be None if not found
            if intChannel is None:
                raise ValueError(
                    f"Bad value for channel: {channel}, must be LEFT/RIGHT/STEREO/ALL/MONO or a number of 1 or more"
                )
            self.channel = intChannel

            # Accessing PyAudio engine
            self.audio = PyAudio()
            print(f"{self.deviceInfo()}\n"
                  )  # Using print for non-functional logging

            # Accessing audio devices
            try:
                if self.inputDevice is not None:
                    inputDeviceInfo = self.audio.get_device_info_by_index(
                        self.inputDevice)
                    self.logger.info(
                        f"Using input device {self.deviceInfo(inputDeviceInfo, False)}"
                    )
                else:
                    inputDeviceInfo = self.audio.get_default_input_device_info(
                    )
                    self.logger.info(
                        f"Using default input device {self.deviceInfo(inputDeviceInfo, False)}"
                    )
            except ValueError as e:
                raise ValueError(
                    f"{f'Input device {self.inputDevice}' if self.inputDevice is not None else 'Default input device'} is not in fact an input device"
                ) from e
            except IOError as e:
                raise IOError(
                    f"Can't access {f'input device {self.inputDevice}' if self.inputDevice is not None else 'default input device'}: {e}"
                ) from e
            try:
                if self.outputDevice is not None:
                    outputDeviceInfo = self.audio.get_device_info_by_index(
                        self.outputDevice)
                    self.logger.info(
                        f"Using output device {self.deviceInfo(outputDeviceInfo, True)}"
                    )
                else:
                    outputDeviceInfo = self.audio.get_default_output_device_info(
                    )
                    self.logger.info(
                        f"Using default output device {self.deviceInfo(outputDeviceInfo, True)}"
                    )
            except ValueError as e:
                raise ValueError(
                    f"{f'output device {self.outputDevice}' if self.outputDevice is not None else 'Default output device'} is not in fact an output device"
                ) from e
            except IOError as e:
                raise IOError(
                    f"Can't access {f'output device {self.outputDevice}' if self.outputDevice is not None else 'default output device'}: {e}"
                ) from e
            print()  # Empty line to console only

            # Calculating derivative paratemers
            self.numInputChannels = 1 if self.channel == MONO else cast(
                int, inputDeviceInfo['maxInputChannels'])
            assert self.numInputChannels > 0
            if self.channel > self.numInputChannels:
                raise ValueError(
                    f"Bad value for channel: {self.channel}, must be no more than {self.numInputChannels}"
                )
            self.numOutputChannels = self.numInputChannels if self.channel == STEREO else 1
            assert self.numOutputChannels > 0

            self.audioBytes = self.audioBits // 8
            self.maxVolume = 1 << (self.audioBits - 1)
            self.audioFormat = self.audio.get_format_from_width(
                self.audioBytes, False)
            self.packFormat = PACK_FORMATS[self.audioBits]

            self.inputBlockSize = self.numInputChannels * self.chunkSize * self.audioBytes
            self.outputBlockSize = self.numOutputChannels * self.chunkSize * self.audioBytes
            self.inputSecondSize = self.numInputChannels * self.sampleRate * self.audioBytes
            self.outputSecondSize = self.numOutputChannels * self.sampleRate * self.audioBytes
            self.chunksInSecond = self.sampleRate // self.chunkSize
            self.chunksToStop = self.chunksInSecond * self.maxPauseLength
            self.chunksOfFadeout = self.chunksInSecond * self.trailLength

            # Diagnosting audio devices
            if not self.createInputStream():
                raise Exception("Can't create input stream")
            self.closeInputStream()
            if not self.createOutputStream():
                raise Exception("Can't create output stream")
            self.closeOutputStream()

            # Printing configuration info
            self.logger.info(
                f"Recording {self.sampleRate}Hz/{self.audioBits}-bit/{CHANNEL_NAMES.get(self.channel) or f'channel {self.channel}'} to {self.fileNameFormat}"
            )
            self.logger.info(
                f"Volume threshold {self.volumeTreshold:.2f}%, max pause {self.maxPauseLength:.1f} seconds, min recording length {self.minRecordingLength:.1f} seconds, trail {self.trailLength:.1f} seconds"
            )
            self.logger.info(f"Monitor is {'ON' if self.monitor else 'OFF'}")
            print("Type 'help' for console commands reference"
                  )  # Using print for non-functional logging
            print()  # Empty line to console only
        except Exception as e:
            self.logger.error(f"Configuration error: {e}")
            print(format_exc())
            sysExit(1)

    def __del__(self) -> None:
        '''Frees the PyAudio resources.'''
        if self.audio:
            self.closeInputStream()
            self.closeOutputStream()
            self.audio.terminate()
            self.logger.debug("destroyed")

    def deviceInfo(self,
                   device: Union[int, Mapping[str, Union[str, int, float]],
                                 None] = None,
                   expectOutput: Optional[bool] = None) -> str:
        '''Provides string information about system audio device(s).'''
        if device is None:
            # Return info on all available devices
            inputDevices = []
            outputDevices = []
            for i in range(self.audio.get_device_count()):
                device = self.audio.get_device_info_by_index(i)
                if device['maxOutputChannels']:
                    outputDevices.append(device)
                if device['maxInputChannels']:
                    inputDevices.append(device)
            return '\n'.join(
                ("Detected audio input devices:",
                 '\n'.join(self.deviceInfo(device) for device in inputDevices),
                 "\nDetected audio output devices:", '\n'.join(
                     self.deviceInfo(device) for device in outputDevices)))
        # else Return info on a particular device
        if isinstance(device, int):
            device = self.audio.get_device_info_by_index(device)
        inputChannels = device['maxInputChannels']
        outputChannels = device['maxOutputChannels']
        if expectOutput is not None and not bool(
                outputChannels if expectOutput else inputChannels):
            raise ValueError
        return f"{device['index']}: {device['name']} ({inputChannels}/{outputChannels} channels)"

    def createInputStream(self) -> bool:
        '''Creates an input stream if it doesn't already exist.
           Returns True if stream already exists or was created successfuly, False otherwise.'''
        if self.inputStream:
            return True
        try:
            self.inputStream = self.audio.open(self.sampleRate,
                                               self.numInputChannels,
                                               self.audioFormat, True, False,
                                               self.inputDevice, None,
                                               self.chunkSize)
            return True
        except Exception as e:
            self.logger.warning(
                f"Error creating input stream: {(type(e).__name__)}: {e}")
            return False

    def createOutputStream(self) -> bool:
        '''Creates an output stream if it doesn't already exist.
           Returns True if stream already exists or was created successfuly, False otherwise.'''
        if self.outputStream:
            return True
        try:
            self.outputStream = self.audio.open(self.sampleRate,
                                                self.numOutputChannels,
                                                self.audioFormat, False, True,
                                                None, self.outputDevice,
                                                self.chunkSize)
            return True
        except Exception as e:
            self.logger.warning(
                f"Error creating output stream: {(type(e).__name__)}: {e}")
            return False

    def closeInputStream(self) -> None:
        if self.inputStream:
            self.inputStream.close()
            self.inputStream = None

    def closeOutputStream(self) -> None:
        if self.outputStream:
            self.outputStream.close()
            self.outputStream = None

    def readAudioData(self) -> Optional[bytes]:
        '''Reads a chunk of audio data from the input stream.
           Returns the retrieved data if successful, None otherwise.'''
        if not self.createInputStream():
            return None
        try:
            assert self.inputStream
            data = self.inputStream.read(self.chunkSize)
            return data
        except Exception as e:
            # Note: IOError: [Errno Input overflowed] -9981 often occurs when running under debugger
            # Note: IOError: [Errno Unanticipated host error] -9999 occurs when audio device is removed (cable unplugged)
            # Note: After 5-10 occurences of the above exception system hangs, so stream re-create seems necessary
            self.logger.warning(
                f"Audio input error: {(type(e).__name__)}: {e}")
            self.closeInputStream()
            self.saveSample()
            return None

    def writeAudioData(self, data: bytes) -> bool:
        '''Writes a chunk of audio data to the output stream.
           Returns True if successful, False otherwise.'''
        if not self.createOutputStream():
            return False
        try:
            assert self.outputStream
            self.outputStream.write(data)
            return True
        except Exception as e:
            self.logger.warning(
                f"Audio output error: {(type(e).__name__)}: {e}")
            self.closeOutputStream()
            return False

    def saveSample(self) -> bool:
        '''Saves the curent sample to the audio file.
           If the file does not exists, it is created.
           If the sample length is not equal to the self.sampleLength value, it means, we've cut
           the silence at the end of the sample, so it's the end of the file and it should be closed.
           The function returns True on success or if the recording is off, False otherwise.'''
        if not self.recording:
            return True
        try:
            if self.sampleLength:
                finalSample = True
            else:
                # If sampleLength wasn't set manualy, all the sample is saved.
                # It means the recording isn't over yet.
                self.sampleLength = len(self.sample)
                finalSample = False

            self.audioFileLength += self.sampleLength
            recordLength = (float(self.audioFileLength) /
                            self.outputSecondSize)

            if recordLength > self.minRecordingLength:  # The save-to-file process starts only when the sample is long enough
                if not self.audioFile:  # Creating the file if necessary
                    self.audioFile = waveOpen(self.fileName, 'wb')
                    assert self.audioFile
                    self.audioFile.setnchannels(self.numOutputChannels)
                    self.audioFile.setsampwidth(self.audioBytes)
                    self.audioFile.setframerate(self.sampleRate)

                self.audioFile.writeframes(
                    self.sample[:self.sampleLength]
                )  # Removing extra silence at the end, if needed

                self.sample = b''
                self.sampleLength = 0

                if finalSample or not self.inLoop:
                    self.recording = False
                    self.audioFile.close()
                    self.audioFile = None
                    self.logger.info(
                        f"Recording finished, max volume {self.localMaxVolume:.2f}%, {recordLength:.1f} seconds"
                    )

                return True
            if finalSample or not self.inLoop:
                self.recording = False
                self.logger.info(
                    f"Recording discarded as it's too short ({recordLength:.1f} seconds)"
                )
            else:
                self.audioFileLength -= self.sampleLength  # If the sample is short we do not operate with it, so param changes should be undone
            return True
        except Exception as e:
            self.logger.warning(
                f"File output error: {(type(e).__name__)}: {e}")
            return False

    def run(self) -> None:
        '''Runs main audio processing loop.'''
        self.audioFile = None
        self.sampleLength = 0
        self.audioFileLength = 0
        self.inLoop = True
        self.recording = False
        self.quitAfterRecording = False
        self.lastSecondVolumes = [0.0] * self.chunksInSecond
        chunkInSecond = 0
        start_new_thread(self.commandConsole,
                         ())  # Start command console thread
        self.logger.info("Listening started")

        # Main audio processing loop
        try:
            while self.inLoop:
                # Retrieve next chunk of audio data
                data = self.readAudioData()
                if not data:  # Error occurred
                    sleep(1.0 / self.chunksInSecond
                          )  # Avoid querying malfunctioning device too often
                    continue
                assert len(data) == self.inputBlockSize

                if self.channel not in (
                        MONO,
                        STEREO):  # Extract the data for particular channel
                    data = b''.join(
                        data[i:i + self.audioBytes]
                        for i in range((self.channel - 1) * self.audioBytes,
                                       len(data), self.numInputChannels *
                                       self.audioBytes))
                assert len(data) == self.outputBlockSize

                if self.monitor:  # Provide monitor output
                    self.writeAudioData(data)

                # Gathering volume statistics
                volume = (mean(
                    abs(
                        cast(
                            int,
                            unpack(self.packFormat, data[i:i +
                                                         self.audioBytes])[0]))
                    for i in range(0, len(data), self.audioBytes)) * 100 +
                          self.maxVolume // 2) / self.maxVolume  # pylint: disable=old-division
                self.lastSecondVolumes[
                    chunkInSecond] = volume  # Logging the sound volume during the last second
                chunkInSecond = (chunkInSecond + 1) % self.chunksInSecond

                if volume >= self.volumeTreshold:  # The chunk is loud enough
                    if not self.recording:  # Start recording
                        # ToDo: check inputStream.get_time(), latency etc. to provide exact time stamp for file naming
                        self.fileName = strftime(self.fileNameFormat)
                        self.logger.info(f"{self.fileName} recording started")
                        self.recording = True
                        self.sample = b''
                        self.localMaxVolume = volume
                        self.audioFileLength = 0
                    elif volume > self.localMaxVolume:
                        self.localMaxVolume = volume
                    self.sampleLength = 0
                    chunksOfSilence = 0
                    self.sample += data
                    self.saveSample()
                elif self.recording:  # Check for stop recording
                    self.sample += data
                    chunksOfSilence += 1
                    if not self.sampleLength and chunksOfSilence > self.chunksOfFadeout:  # Enough silence for a trail
                        self.sampleLength = len(
                            self.sample)  # Removing extra silence at the end
                    if chunksOfSilence > self.chunksToStop:  # Enough silence to stop recording
                        self.saveSample()  # Stopping recording
                        if self.quitAfterRecording:
                            self.inLoop = False
        except Exception as e:
            self.logger.warning(f"Processing error: {(type(e).__name__)}: {e}")
        except KeyboardInterrupt:
            self.logger.warning("Ctrl-C detected at input, exiting")
        self.inLoop = False
        self.saveSample()
        self.closeInputStream()
        self.closeOutputStream()
        self.logger.info("Done")

    def commandConsole(self) -> None:
        '''Runs in a separate thread to provide a command line operation adjustments.'''
        try:
            while self.inLoop:
                inp = input().split(' ')
                command = inp[0].lower()
                if 'help'.startswith(command):
                    print(
                        """\nAvailable console commands (first letter is enough):
Help               - Show this information
EXit/Quit          - Exit the program immediately
Last               - Exit the program after completion of the current file
Volume             - Print the current mean volume level
Monitor [on/off]   - Show or toggle monitor status
Threshold [value]  - Show or set the volume threshold level\n""")
                elif 'exit'.startswith(
                        command) or command == 'x' or 'quit'.startswith(
                            command):
                    self.logger.info("Exiting")
                    self.inLoop = False
                elif 'volume'.startswith(command):
                    print(f"{mean(self.lastSecondVolumes):.2f}%"
                          )  # Using print for non-functional logging
                elif 'monitor'.startswith(command):
                    if len(inp) < 2:
                        print(f"Monitor is {'ON' if self.monitor else 'OFF'}"
                              )  # Using print for non-functional logging
                    else:
                        self.monitor = inp[1].lower().strip() in ('true',
                                                                  'yes', 'on',
                                                                  '1')
                        self.logger.info(
                            f"Monitor is set to {'ON' if self.monitor else 'OFF'}"
                        )
                elif 'last'.startswith(command):
                    if self.recording:
                        self.quitAfterRecording = True
                        self.logger.info(
                            "Going to exit after the end of the recording")
                    else:
                        self.logger.info("Exiting")
                        self.inLoop = False
                elif 'threshold'.startswith(command):
                    if len(inp) < 2:
                        print(
                            f"Current volume treshold: {self.volumeTreshold:.2f}%"
                        )  # Using print for non-functional logging
                    else:
                        try:
                            self.volumeTreshold = float(inp[1])
                            if not 0 <= self.volumeTreshold <= 100:
                                raise ValueError()
                            self.logger.info(
                                f"New volume treshold: {self.volumeTreshold:.2f}%"
                            )
                        except ValueError:
                            print("Bad value, expected 0-100"
                                  )  # Using print for non-functional logging
        except EOFError:
            self.logger.warning("Console EOF detected")
        except Exception as e:
            self.logger.warning(
                f"Console error: {type(e).__name__}: {e}\n{format_exc()}")
            self.inLoop = False
        except KeyboardInterrupt:
            self.logger.warning("Ctrl-C detected at console, exiting")
            self.inLoop = False

    def sigTerm(self, _signum: int, _frame: FrameType) -> None:
        '''SIGTERM handler.'''
        self.logger.warning("SIGTERM caught, exiting")
        self.inLoop = False
예제 #28
0
class AudioDevice(QtCore.QObject):
    def __init__(self, logger):
        QtCore.QObject.__init__(self)
        self.logger = logger
        self.duo_input = False
        self.logger.push("Initializing PyAudio")
        self.pa = PyAudio()

        # look for devices
        self.input_devices = self.get_input_devices()
        self.output_devices = self.get_output_devices()

        for device in self.input_devices:
            self.logger.push("Opening the stream")
            self.stream = self.open_stream(device)
            self.device = device

            self.logger.push("Trying to read from input device %d" % device)
            if self.try_input_stream(self.stream):
                self.logger.push("Success")
                break
            else:
                self.logger.push("Fail")

        self.first_channel = 0
        nchannels = self.get_current_device_nchannels()
        if nchannels == 1:
            self.second_channel = 0
        else:
            self.second_channel = 1

        # counter for the number of input buffer overflows
        self.xruns = 0

            # method

    def get_readable_devices_list(self):
        devices_list = []

        default_device_index = self.get_default_input_device()

        for device in self.input_devices:
            dev_info = self.pa.get_device_info_by_index(device)
            api = self.pa.get_host_api_info_by_index(dev_info
                                                     ['hostApi'])['name']

            if device is default_device_index:
                extra_info = ' (system default)'
            else:
                extra_info = ''

            nchannels = self.pa.get_device_info_by_index(device)[
                                                    'maxInputChannels']

            desc = "%s (%d channels) (%s) %s" % (dev_info['name'],
                                                nchannels, api, extra_info)

            devices_list += [desc]

        return devices_list

    # method
    def get_readable_output_devices_list(self):
        devices_list = []

        default_device_index = self.get_default_output_device()

        for device in self.output_devices:
            dev_info = self.pa.get_device_info_by_index(device)
            api = self.pa.get_host_api_info_by_index(dev_info['hostApi']
                                                     )['name']

            if device is default_device_index:
                extra_info = ' (system default)'
            else:
                extra_info = ''

            nchannels = self.pa.get_device_info_by_index(device)[
                                                    'maxOutputChannels']

            desc = "%s (%d channels) (%s) %s" % (dev_info['name'], nchannels,
                                                 api, extra_info)

            devices_list += [desc]

        return devices_list

    # method
    def get_default_input_device(self):
        return self.pa.get_default_input_device_info()['index']

    # method
    def get_default_output_device(self):
        return self.pa.get_default_output_device_info()['index']

    # method
    def get_device_count(self):
        # FIXME only input devices should be chosen, not all of them !
        return self.pa.get_device_count()

    # method
    # returns a list of input devices index, starting with the system default
    def get_input_devices(self):
        device_count = self.get_device_count()
        default_input_device = self.get_default_input_device()

        device_range = range(0, device_count)
    # start by the default input device
        device_range.remove(default_input_device)
        device_range = [default_input_device] + device_range

    # select only the input devices by looking at the number of input channels
        input_devices = []
        for device in device_range:
            n_input_channels = self.pa.get_device_info_by_index(device)[
                                                            'maxInputChannels']
            if n_input_channels > 0:
                input_devices += [device]

        return input_devices

    # method
    # returns a list of output devices index, starting with the system default
    def get_output_devices(self):
        device_count = self.get_device_count()
        default_output_device = self.get_default_output_device()

        device_range = range(0, device_count)
        # start by the default input device
        device_range.remove(default_output_device)
        device_range = [default_output_device] + device_range

# select only the output devices by looking at the number of output channels
        output_devices = []
        for device in device_range:
            n_output_channels = self.pa.get_device_info_by_index(device)[
                                                        'maxOutputChannels']
            if n_output_channels > 0:
                output_devices += [device]

        return output_devices

    # method
    def select_input_device(self, device):
        # save current stream in case we need to restore it
        previous_stream = self.stream
        previous_device = self.device

        self.stream = self.open_stream(device)
        self.device = device

        self.logger.push("Trying to read from input device #%d" % (device))
        if self.try_input_stream(self.stream):
            self.logger.push("Success")
            previous_stream.close()
            success = True
            self.first_channel = 0
            nchannels = self.get_current_device_nchannels()
            if nchannels == 1:
                self.second_channel = 0
            else:
                self.second_channel = 1
        else:
            self.logger.push("Fail")
            self.stream.close()
            self.stream = previous_stream
            self.device = previous_device
            success = False

        return success, self.device

    # method
    def select_first_channel(self, index):
        self.first_channel = index
        success = True
        return success, self.first_channel

    # method
    def select_second_channel(self, index):
        self.second_channel = index
        success = True
        return success, self.second_channel

    # method
    def open_stream(self, device):
        ''' by default we open the device stream with all the channels
        # (interleaved in the data buffer)'''
        maxInputChannels = self.pa.get_device_info_by_index(device)[
                                                    'maxInputChannels']
        stream = self.pa.open(format=paInt32, channels=maxInputChannels,
                               rate=SAMPLING_RATE, input=True,
                frames_per_buffer=FRAMES_PER_BUFFER, input_device_index=device)
        return stream

    # method
    # return the index of the current input device in the input devices list
    # (not the same as the PortAudio index, since the latter is the index
    # in the list of *all* devices, not only input ones)
    def get_readable_current_device(self):
        i = 0
        for device in self.input_devices:
            if device == self.device:
                break
            else:
                i += 1
        return i

    # method
    def get_readable_current_channels(self):
        dev_info = self.pa.get_device_info_by_index(self.device)
        nchannels = dev_info['maxInputChannels']

        if nchannels == 2:
            channels = ['L', 'R']
        else:
            channels = []
            for channel in range(0, dev_info['maxInputChannels']):
                channels += ["%d" % channel]

        return channels

    # method
    def get_current_first_channel(self):
        return self.first_channel

    # method
    def get_current_second_channel(self):
        return self.second_channel

    # method
    def get_current_device_nchannels(self):
        return self.pa.get_device_info_by_index(self.device)[
                                                'maxInputChannels']

    # method
    # return True on success
    def try_input_stream(self, stream):
        n_try = 0
        while (stream.get_read_available() < FRAMES_PER_BUFFER and
                n_try < 1000000):
            n_try += 1

        if n_try == 1000000:
            return False
        else:
            lat_ms = 1000 * stream.get_input_latency()
            self.logger.push("Device claims %d ms latency" % (lat_ms))
            return True

    # try to update the audio buffer
    # return the number of chunks retrieved, and the time elapsed
    def update(self, ringbuffer):
        t = QtCore.QTime()
        t.start()

        channel = self.get_current_first_channel()
        nchannels = self.get_current_device_nchannels()
        if self.duo_input:
            channel_2 = self.get_current_second_channel()

        chunks = 0
        available = self.stream.get_read_available()
        available = int(floor(available / FRAMES_PER_BUFFER))
        for _ in range(0, available):
            try:
                rawdata = self.stream.read(FRAMES_PER_BUFFER)
            except IOError as inst:
                # FIXME specialize this exception handling code
                # to treat overflow errors particularly
                self.xruns += 1
                print "Caught an IOError on stream read.", inst
                break
            intdata_all_channels = fromstring(rawdata, int32)
            int32info = iinfo(int32)
            norm_coeff = max(abs(int32info.min), int32info.max)
            floatdata_all_channels = (intdata_all_channels.astype(float64) /
                                       float(norm_coeff))

            floatdata1 = floatdata_all_channels[channel::nchannels]
            if self.duo_input:
                floatdata2 = floatdata_all_channels[channel_2::nchannels]
                floatdata = vstack((floatdata1, floatdata2))
            else:
                floatdata = floatdata1
                floatdata.shape = (1, FRAMES_PER_BUFFER)
            # update the circular buffer
            ringbuffer.push(floatdata)
            chunks += 1
        return (chunks, t.elapsed(), chunks * FRAMES_PER_BUFFER)

    def set_single_input(self):
        self.duo_input = False

    def set_duo_input(self):
        self.duo_input = True

    # returns the stream time in seconds
    def get_stream_time(self):
        return self.stream.get_time()
예제 #29
0
    def run(self):
        playing = True
        pa = PyAudio()
        sampling_rate = int(
            pa.get_device_info_by_index(0)['defaultSampleRate'])
        self.stream = pa.open(format=paInt16,
                              channels=1,
                              rate=sampling_rate,
                              input=True,
                              frames_per_buffer=self.NUM_SAMPLES)

        while playing:
            print(self.PIPEGAP)
            FPSCLOCK = pygame.time.Clock()
            SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))

            for event in pygame.event.get():
                # print("pygame.event.get() == ", pygame.event.get())
                if event.type == QUIT or (event.type == KEYDOWN
                                          and event.key == K_ESCAPE):
                    return 'quit'
                if event.type == pygame.MOUSEBUTTONDOWN:
                    (x, y) = event.pos
                    if self.isInBound(x, y) == True:
                        self.pause = True
                if event.type == KEYDOWN and (event.key == K_SPACE
                                              or event.key == K_UP):
                    if self.pause == False:
                        if self.birdy > -2 * IMAGES['bird'][0].get_height():
                            self.playerVelY = self.playerFlapAcc
                            self.playerFlapped = True
                    if self.pause == True:
                        self.pause = False

            string_audio_data = self.stream.read(self.NUM_SAMPLES)
            volume = max(struct.unpack('2048h', string_audio_data))
            volume -= 6000

            # soundbar graph feature
            if volume <= 0:
                xlength = 1
            else:
                xlength = int(100 * volume / 20000)
            if xlength >= 100:
                xlength = 99
            newGraph = pygame.transform.scale(IMAGES['graph'], (xlength, 10))

            # pause feature
            if not self.pause:

                # volume control feature
                if volume > 1000:
                    if self.birdy > -2 * IMAGES['bird'][0].get_height():
                        self.playerVelY = -(volume // 2000)
                        self.playerFlapped = True

                crashTest = self.checkCrash()
                if crashTest[0]:
                    return ('gameover', self.score)

                birdMid = self.birdx + IMAGES['bird'][0].get_width() / 2
                for pipe in self.upperPipes:
                    pipeMid = pipe['x'] + IMAGES['pipe'][0].get_width() / 2
                    if pipeMid <= birdMid < pipeMid + 4:
                        self.score += 1
                if (self.loopIter + 1) % 3 == 0:
                    self.birdIndex = next(self.birdIndexGen)
                self.loopIter = (self.loopIter + 1) % 30
                self.basex = -((-self.basex + 100) % self.baseShift)

                if self.playerVelY < self.playerMaxVelY and not self.playerFlapped:
                    self.playerVelY += self.playerAccY
                if self.playerFlapped:
                    self.playerFlapped = False
                playerHeight = IMAGES['bird'][self.birdIndex].get_height()
                self.birdy += min(self.playerVelY,
                                  BASEY - self.birdy - playerHeight)

                for uPipe in self.upperPipes:
                    uPipe['x'] += self.pipeVelX

                for lPipe in self.lowerPipes:
                    lPipe['x'] += self.pipeVelX

                if 0 < self.upperPipes[0]['x'] < 5:
                    newPipe = self.getRandomPipe()
                    self.upperPipes.append(newPipe[0])
                    self.lowerPipes.append(newPipe[1])

                if self.upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width():
                    self.upperPipes.pop(0)
                    self.lowerPipes.pop(0)
                SCREEN.blit(IMAGES['background'], (0, 0))

                count = 0
                for uPipe in self.upperPipes:
                    SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
                    count += 1
                    if count % 2 == 1:
                        self.firstPipe = uPipe['x']
                    else:
                        self.secondPipe = uPipe['x']

                for lPipe in self.lowerPipes:
                    SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))

                SCREEN.blit(IMAGES['base'], (self.basex, BASEY))
                self.showScore(SCREEN)
                SCREEN.blit(IMAGES['bird'][self.birdIndex],
                            (self.birdx, self.birdy))

                SCREEN.blit(newGraph, (20, 20))
                SCREEN.blit(IMAGES['pause'], (self.pausex, self.pausey))
                pygame.display.update()
                FPSCLOCK.tick(FPS)
            else:
                # paused mode
                SCREEN.blit(IMAGES['background'], (0, 0))
                for uPipe, lPipe in zip(self.upperPipes, self.lowerPipes):
                    SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
                    SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
                SCREEN.blit(IMAGES['base'], (self.basex, BASEY))
                self.showScore(SCREEN)
                SCREEN.blit(IMAGES['bird'][self.birdIndex],
                            (self.birdx, self.birdy))
                SCREEN.blit(newGraph, (20, 20))
                SCREEN.blit(IMAGES['pause'], (self.pausex, self.pausey))
                SCREEN.blit(IMAGES['paused'],
                            (self.messagex, self.messagey + 50))
                pygame.display.update()
예제 #30
0
    def __init__(self):
        super(VoiceGame, self).__init__(255, 255, 255, 255, WIDTH, HEIGHT)
        pygame.mixer.init()

        self.gameover = None
        self.billboard = None

        self.score = 0  #记录分数
        self.txt_score = cocos.text.Label(u'分数:0',
                                          font_name=FONTS,
                                          font_size=24,
                                          color=BLACK)
        self.txt_score.position = 500, 440
        self.add(self.txt_score, 99999)

        self.top = '', 0
        self.top_notice = cocos.text.Label(u'',
                                           font_name=FONTS,
                                           font_size=18,
                                           color=BLACK)
        self.top_notice.position = 400, 410
        self.add(self.top_notice, 99999)

        self.name = ''

        # init voice
        self.NUM_SAMPLES = 2048  # pyAudio内部缓存的块的大小
        self.LEVEL = 1500  # 声音保存的阈值

        self.voicebar = Sprite('black.png', color=(0, 0, 255))
        self.voicebar.position = 20, 450
        self.voicebar.scale_y = 0.1
        self.voicebar.image_anchor = 0, 0
        self.add(self.voicebar)

        self.ppx = PPX(self)
        self.add(self.ppx)

        self.floor = cocos.cocosnode.CocosNode()
        self.add(self.floor)
        self.last_block = 0, 100
        for i in range(5):
            b = Block(self)
            self.floor.add(b)
            pos = b.x + b.width, b.height

        # 开启声音输入
        pa = PyAudio()
        SAMPLING_RATE = int(
            pa.get_device_info_by_index(0)['defaultSampleRate'])
        self.stream = pa.open(format=paInt16,
                              channels=1,
                              rate=SAMPLING_RATE,
                              input=True,
                              frames_per_buffer=self.NUM_SAMPLES)
        self.stream.stop_stream()

        pygame.music.load('bgm.wav')
        pygame.music.play(-1)

        self.schedule(self.update)
예제 #31
0
from pyaudio import PyAudio, paInt16
import time
import struct
import config
pa = PyAudio()
data = list()
data.append(1)
sampling_rate = int(pa.get_device_info_by_index(0)['defaultSampleRate'])
print("collecting data")
stream = pa.open(format=paInt16,
                 channels=1,
                 rate=sampling_rate,
                 input=True,
                 frames_per_buffer=1000)
init_time = time.time()
while (time.time() - init_time) <= 3:
    string_audio_data = stream.read(1000)
    volume = max(struct.unpack('1000h', string_audio_data))
    data.append(int(volume))
    time.sleep(0.5)
noise_level = sum(data) / len(data)
print(noise_level)
print("data collected")
config.sensitivity = noise_level
예제 #32
0
class AudioBackend(QtCore.QObject):

	underflow = QtCore.pyqtSignal()
	new_data_available_from_callback = QtCore.pyqtSignal(bytes, int, float, int)
	new_data_available = QtCore.pyqtSignal(ndarray, float, int)

	def callback(self, in_data, frame_count, time_info, status):
		#do the minimum from here to prevent overflows, just pass the data to the main thread

		input_time = time_info['input_buffer_adc_time']

		# some API drivers in PortAudio do not return a valid time, so fallback to the current stream time
		if input_time == 0.:
			input_time = time_info['current_time']
		if input_time == 0.:
			input_time = self.stream.get_time()

		self.new_data_available_from_callback.emit(in_data, frame_count, input_time, status)

		return (None, 0)

	def __init__(self, logger):
		QtCore.QObject.__init__(self)

		self.logger = logger

		self.duo_input = False

		self.logger.push("Initializing PyAudio")
		self.pa = PyAudio()

		# look for devices
		self.input_devices = self.get_input_devices()
		self.output_devices = self.get_output_devices()

		self.device = None
		self.first_channel = None
		self.second_channel = None

		# we will try to open all the input devices until one
		# works, starting by the default input device
		for device in self.input_devices:
			self.logger.push("Opening the stream")
			try:
				self.stream = self.open_stream(device)
				self.stream.start_stream()
				self.device = device
				self.logger.push("Success")
				break
			except:
				self.logger.push("Fail")

		if self.device is not None:
			self.first_channel = 0
			nchannels = self.get_current_device_nchannels()
			if nchannels == 1:
				self.second_channel = 0
			else:
				self.second_channel = 1

		# counter for the number of input buffer overflows
		self.xruns = 0

		self.chunk_number = 0

		self.new_data_available_from_callback.connect(self.handle_new_data)

	def close(self):
		self.stream.stop_stream()
		self.stream.close()
		self.stream = None

	# method
	def get_readable_devices_list(self):
		devices_list = []
		
		default_device_index = self.get_default_input_device()
		
		for device in self.input_devices:
			dev_info = self.pa.get_device_info_by_index(device)
			api = self.pa.get_host_api_info_by_index(dev_info['hostApi'])['name']

			if device is default_device_index:
				extra_info = ' (system default)'
			else:
				extra_info = ''
			
			nchannels = self.pa.get_device_info_by_index(device)['maxInputChannels']

			desc = "%s (%d channels) (%s) %s" %(dev_info['name'], nchannels, api, extra_info)
			
			devices_list += [desc]

		return devices_list

	# method
	def get_readable_output_devices_list(self):
		devices_list = []
		
		default_device_index = self.get_default_output_device()
		
		for device in self.output_devices:
			dev_info = self.pa.get_device_info_by_index(device)
			api = self.pa.get_host_api_info_by_index(dev_info['hostApi'])['name']

			if device is default_device_index:
				extra_info = ' (system default)'
			else:
				extra_info = ''
			
			nchannels = self.pa.get_device_info_by_index(device)['maxOutputChannels']

			desc = "%s (%d channels) (%s) %s" %(dev_info['name'], nchannels, api, extra_info)
			
			devices_list += [desc]

		return devices_list

	# method
	def get_default_input_device(self):
		try:
			index = self.pa.get_default_input_device_info()['index']
		except IOError:
			index = None

		return index

	# method
	def get_default_output_device(self):
		try:
			index = self.pa.get_default_output_device_info()['index']
		except IOError:
			index = None
		return

	# method
	def get_device_count(self):
		# FIXME only input devices should be chosen, not all of them !
		return self.pa.get_device_count()

	# method
	# returns a list of input devices index, starting with the system default
	def get_input_devices(self):
		device_count = self.get_device_count()
		device_range = list(range(0, device_count))

		default_input_device = self.get_default_input_device()

		if default_input_device	is not None:
			# start by the default input device
			device_range.remove(default_input_device)
			device_range = [default_input_device] + device_range

		# select only the input devices by looking at the number of input channels
		input_devices = []
		for device in device_range:
			n_input_channels = self.pa.get_device_info_by_index(device)['maxInputChannels']
			if n_input_channels > 0:
				input_devices += [device]

		return input_devices

	# method
	# returns a list of output devices index, starting with the system default
	def get_output_devices(self):
		device_count = self.get_device_count()
		device_range = list(range(0, device_count))

		default_output_device = self.get_default_output_device()

		if default_output_device is not None:
			# start by the default input device
			device_range.remove(default_output_device)
			device_range = [default_output_device] + device_range

		# select only the output devices by looking at the number of output channels
		output_devices = []
		for device in device_range:
			n_output_channels = self.pa.get_device_info_by_index(device)['maxOutputChannels']
			if n_output_channels > 0:
				output_devices += [device]
		
		return output_devices

	# method.
	# The index parameter is the index in the self.input_devices list of devices !
	# The return parameter is also an index in the same list.
	def select_input_device(self, index):
		device = self.input_devices[index]

		# save current stream in case we need to restore it
		previous_stream = self.stream
		previous_device = self.device

		self.logger.push("Trying to open input device #%d" % (index))

		try:
			self.stream = self.open_stream(device)
			self.device = device
			self.stream.start_stream()
			success = True
		except:
			self.logger.push("Fail")
			success = False
			if self.stream is not None:
				self.stream.close()
			# restore previous stream
			self.stream = previous_stream
			self.device = previous_device

		if success:
			self.logger.push("Success")
			previous_stream.close()

			self.first_channel = 0
			nchannels = self.get_current_device_nchannels()
			if nchannels == 1:
				self.second_channel = 0
			else:
				self.second_channel = 1

		return success, self.input_devices.index(self.device)

	# method
	def select_first_channel(self, index):
		self.first_channel = index
		success = True
		return success, self.first_channel

	# method
	def select_second_channel(self, index):
		self.second_channel = index
		success = True
		return success, self.second_channel

	# method
	def open_stream(self, device):
		# by default we open the device stream with all the channels
		# (interleaved in the data buffer)
		maxInputChannels = self.pa.get_device_info_by_index(device)['maxInputChannels']
		stream = self.pa.open(format=paInt16, channels=maxInputChannels, rate=SAMPLING_RATE, input=True,
				input_device_index=device, stream_callback=self.callback,
				frames_per_buffer = FRAMES_PER_BUFFER)

		lat_ms = 1000*stream.get_input_latency()
		self.logger.push("Device claims %d ms latency" %(lat_ms))

		return stream

	# method
	# return the index of the current input device in the input devices list
	# (not the same as the PortAudio index, since the latter is the index
	# in the list of *all* devices, not only input ones)
	def get_readable_current_device(self):
		return self.input_devices.index(self.device)

	# method
	def get_readable_current_channels(self):			
		dev_info = self.pa.get_device_info_by_index(self.device)  
		nchannels = dev_info['maxInputChannels']

		if nchannels == 2:
			channels = ['L', 'R']
		else:
			channels = []
			for channel in range(0, dev_info['maxInputChannels']):
				channels += ["%d" %channel]			
			
		return channels

	# method
	def get_current_first_channel(self):
		return self.first_channel

	# method
	def get_current_second_channel(self):
		return self.second_channel

	# method	
	def get_current_device_nchannels(self):
		return self.pa.get_device_info_by_index(self.device)['maxInputChannels']

	def handle_new_data(self, in_data, frame_count, input_time, status):
		if (status & paInputOverflow):
			print("Stream overflow!")
			self.xruns += 1
			self.underflow.emit()

		intdata_all_channels = fromstring(in_data, int16)

		int16info = iinfo(int16)
		norm_coeff = max(abs(int16info.min), int16info.max)
		floatdata_all_channels = intdata_all_channels.astype(float64)/float(norm_coeff)

		channel = self.get_current_first_channel()
		nchannels = self.get_current_device_nchannels()
		if self.duo_input:
			channel_2 = self.get_current_second_channel()

		floatdata1 = floatdata_all_channels[channel::nchannels]

		if self.duo_input:
			floatdata2 = floatdata_all_channels[channel_2::nchannels]
			floatdata = vstack((floatdata1, floatdata2))
		else:
			floatdata = floatdata1
			floatdata.shape = (1, floatdata.size)

		self.new_data_available.emit(floatdata, input_time, status)

		self.chunk_number += 1

	def set_single_input(self):
		self.duo_input = False

	def set_duo_input(self):
		self.duo_input = True

	# returns the stream time in seconds
	def get_stream_time(self):
		return self.stream.get_time()

	def pause(self):
		self.stream.stop_stream()

	def restart(self):
		self.stream.start_stream()
예제 #33
0
class music_visualizer:
    FRAME_WIDTH, FRAME_HEIGHT = (8, 32)
    NUM_COLOR_CHANNELS = 3
    NUM_AUDIO_CHANNELS = 2
    NUM_AUDIO_FRAMES_PER_BUFFER = 128  # This value is a power of 2 (optimal for FFT) and good for 44.1-96 kHz sampling rates
    # NOTE: Thanks to 'http://www.perbang.dk/rgbgradient/' for HSV gradient wheel color generation
    #       Start color: FF293B, End Color: 01156A
    COLOR_GRADIENT_WHEEL = \
        ['800000', '810F00', '832000', '843100', '864200', '885300', '896500', '8B7800',
         '8D8A00', '7F8E00', '6F9000', '5E9200', '4D9300', '3C9500', '2A9700', '179800',
         '059A00', '009B0D', '009D21', '009F35', '00A049', '00A25E', '00A473', '00A589',
         '00A79F', '009CA9', '0089AA', '0075AC', '0061AE', '004CAF', '0037B1', '0021B2']
    #['FF283B', 'FA2F27', 'F54725', 'F05F23', 'EB7521', 'E68B1F', 'E2A11D', 'DDB51C',
    # 'D8C91A', 'CBD318', 'AFCE17', '94CA15', '7AC514', '61C012', '48BB11', '31B610',
    # '1AB20E', '0DAD15', '0CA827', '0BA339', '0A9E49', '089A59', '079567', '079075',
    # '068B82', '057F86', '046B82', '03577D', '024578', '023473', '01246E', '00146A']

    @staticmethod
    def name():
        return 'Music visualizer'

    def _readRawAudioDataIntoAudioChannelFrames(self, audio_frames,
                                                frames_per_buffer, sample_size,
                                                signed):
        # Convert it to proper sample size
        left_audio_frames = [0] * frames_per_buffer
        right_audio_frames = [0] * frames_per_buffer

        for i in range(0, len(audio_frames), sample_size):
            frame_channel_data = int.from_bytes(audio_frames[i:(i +
                                                                sample_size)],
                                                byteorder='little',
                                                signed=signed)

            j = (i // sample_size)
            if j % 2 == 0:
                left_audio_frames[j //
                                  self.NUM_AUDIO_CHANNELS] = frame_channel_data
            else:
                right_audio_frames[
                    j // self.NUM_AUDIO_CHANNELS] = frame_channel_data

        return left_audio_frames, right_audio_frames

    def _getFFTAmplitudes(self, audio_frames):
        audio_frames = (np.abs(
            np.fft.fft(audio_frames)[:self.FRAME_HEIGHT])) / len(audio_frames)
        assert len(
            audio_frames
        ) == self.FRAME_HEIGHT, "BUG: 'getFFT()' must return DC and positive frequencies."

        return audio_frames

    def _scaleFFTAmplitudes(self, fft_amplitudes, max_limit):
        # NOTE: Here we log rescale the FFTs amplitudes such that it looks good. Nothing fancy about this algorithm.
        MIN_LOG_BOUND = 1.2
        MAGIC_NUMBER = 10.0
        scaler_func = lambda x: min(
            max(math.log(x + 0.00001) - MIN_LOG_BOUND, 0.0) / MAGIC_NUMBER *
            max_limit, max_limit)
        fft_amplitudes = list(
            map(lambda x: int(scaler_func(x)), fft_amplitudes))

        assert all((x >= 0 and x <= max_limit)
                   for x in fft_amplitudes), "FFT Amplitude scaling is buggy."

        return fft_amplitudes

    def __init__(self, path_prefix):
        assert len(self.COLOR_GRADIENT_WHEEL) == self.FRAME_HEIGHT, \
            "Need exactly {} colors in 'COLOR_GRADIENT_WHEEL'".format(self.FRAME_HEIGHT)
        # Convert hex string (for easy programmer modification) to bytearrays in 'COLOR_GRADIENT_WHEEL'
        for i, color_str in enumerate(self.COLOR_GRADIENT_WHEEL):
            self.COLOR_GRADIENT_WHEEL[i] = np.frombuffer(
                bytes.fromhex(color_str), dtype=np.uint8)

        self.template = np.zeros(
            (self.FRAME_HEIGHT, self.FRAME_WIDTH, self.NUM_COLOR_CHANNELS),
            dtype=np.uint8)
        self.pyaudio = PyAudio()
        audio_device_index = settings(
            path_prefix).get_selected_audio_device_index()
        self.audio_device_info = self.pyaudio.get_device_info_by_index(
            audio_device_index)
        if self.audio_device_info[
                'maxOutputChannels'] < self.NUM_AUDIO_CHANNELS:
            raise Exception("Audio output device should be at least stereo.")

        self.format = pyaudio.paInt16
        self.sample_size = pyaudio.get_sample_size(self.format)
        self.stream = None
        self.raw_audio_frames = b'\x00' * (self.NUM_AUDIO_CHANNELS *
                                           self.NUM_AUDIO_FRAMES_PER_BUFFER *
                                           self.sample_size)

    def __enter__(self):
        def audiodata_arrived(data, frame_count, time_info, status):
            self.raw_audio_frames = data
            return (data, pyaudio.paContinue)

        self.stream = self.pyaudio.open(
            format=self.format,
            channels=self.NUM_AUDIO_CHANNELS,
            rate=int(self.audio_device_info['defaultSampleRate']),
            input=True,
            frames_per_buffer=self.NUM_AUDIO_FRAMES_PER_BUFFER,
            input_device_index=self.audio_device_info['index'],
            stream_callback=audiodata_arrived,
            as_loopback=True)
        return self

    def __exit__(self, type, value, traceback):
        if self.stream is not None:
            self.stream.stop_stream()
            self.stream.close()
        self.pyaudio.terminate()

    def get_interval(self):
        return IntervalEnum.MSECS_100

    def get_frame(self):
        left_audio_frames, right_audio_frames = self._readRawAudioDataIntoAudioChannelFrames(
            copy(self.raw_audio_frames), self.NUM_AUDIO_FRAMES_PER_BUFFER,
            self.sample_size, True)

        # Get FFT Amplitudes of each channel and rescale them from 0 to 'FRAME_WIDTH'
        left_channel_FFTAmp = self._getFFTAmplitudes(left_audio_frames)
        left_channel_FFTAmp = self._scaleFFTAmplitudes(left_channel_FFTAmp,
                                                       self.FRAME_WIDTH)
        right_channel_FFTAmp = self._getFFTAmplitudes(right_audio_frames)
        right_channel_FFTAmp = self._scaleFFTAmplitudes(
            right_channel_FFTAmp, self.FRAME_WIDTH)

        # Create a deep copy of template to work on
        frame = deepcopy(self.template)

        # Draw rescaled amplitude bars on to 'frame'
        for i in range(self.FRAME_HEIGHT):
            # For left channel
            FFTAmp = left_channel_FFTAmp[i]
            if FFTAmp > 0:
                frame[i, 0:FFTAmp, :] = self.COLOR_GRADIENT_WHEEL[i]

            # For right channel
            FFTAmp = right_channel_FFTAmp[i]
            if FFTAmp > 0:
                frame[-(i + 1),
                      -1:-(FFTAmp + 1):-1, :] = self.COLOR_GRADIENT_WHEEL[i]

        return frame.flatten()
예제 #34
0
    def __init__(self):
        super(VoiceGame2, self).__init__(255, 255, 255, 255, WIDTH, HEIGHT)
        pygame.mixer.init()
        self.cloud = cocos.sprite.Sprite('fla.png')
        self.cloud.scale_x = 1.5
        self.cloud.scale_y = 1.83
        self.cloud.position = 300, 240
        self.add(self.cloud)
        self.gameover = None
        self.score = 0  #count score
        self.txt_score = cocos.text.Label(u'Score:0',
                                          font_name=FONTS,
                                          font_size=16,
                                          color=BLACK)
        self.txt_score.position = 510, 240
        self.add(self.txt_score, 99999)

        self.top = '', 0
        self.top_notice = cocos.text.Label(u'',
                                           font_name=FONTS,
                                           font_size=18,
                                           color=BLACK)
        self.top_notice.position = 400, 410
        self.add(self.top_notice, 99999)

        self.name = ''

        # init voice
        self.NUM_SAMPLES = 2048  # pyAudio cache size
        self.LEVEL = 1500  # sound threshold
        '''self.voicebar = Sprite('black.png', color=(0, 0, 255))
        self.voicebar.position = 20, 450
        self.voicebar.scale_y = 0.1
        self.voicebar.image_anchor = 0, 0
        self.add(self.voicebar)'''

        self.ppx = Flappy(self)
        self.add(self.ppx)
        self.floor2 = cocos.cocosnode.CocosNode()
        self.floor = cocos.cocosnode.CocosNode()
        self.add(self.floor)
        self.add(self.floor2)
        self.last_block = 0, 100
        for i in range(5):
            b = Tube(self)
            u = upTube(self)
            self.floor.add(b)
            self.floor.add(u)
            self.pitch_pic(u)
            pos = b.x + b.width, b.height

        # start inputing sound
        pa = PyAudio()
        SAMPLING_RATE = int(
            pa.get_device_info_by_index(0)['defaultSampleRate'])
        self.stream = pa.open(format=paInt16,
                              channels=1,
                              rate=SAMPLING_RATE,
                              input=True,
                              frames_per_buffer=self.NUM_SAMPLES)
        self.stream.stop_stream()

        pygame.music.load('intro.wav'.encode())
        pygame.music.play(1)

        self.schedule(self.update)
예제 #35
0
class AudioDevice(QtCore.QObject):
    def __init__(self, logger):
        QtCore.QObject.__init__(self)
        self.logger = logger
        self.duo_input = False
        self.logger.push("Initializing PyAudio")
        self.pa = PyAudio()

        # look for devices
        self.input_devices = self.get_input_devices()
        self.output_devices = self.get_output_devices()

        for device in self.input_devices:
            self.logger.push("Opening the stream")
            self.stream = self.open_stream(device)
            self.device = device

            self.logger.push("Trying to read from input device %d" % device)
            if self.try_input_stream(self.stream):
                self.logger.push("Success")
                break
            else:
                self.logger.push("Fail")

        self.first_channel = 0
        nchannels = self.get_current_device_nchannels()
        if nchannels == 1:
            self.second_channel = 0
        else:
            self.second_channel = 1

        # counter for the number of input buffer overflows
        self.xruns = 0

        # method

    def get_readable_devices_list(self):
        devices_list = []

        default_device_index = self.get_default_input_device()

        for device in self.input_devices:
            dev_info = self.pa.get_device_info_by_index(device)
            api = self.pa.get_host_api_info_by_index(
                dev_info['hostApi'])['name']

            if device is default_device_index:
                extra_info = ' (system default)'
            else:
                extra_info = ''

            nchannels = self.pa.get_device_info_by_index(
                device)['maxInputChannels']

            desc = "%s (%d channels) (%s) %s" % (dev_info['name'], nchannels,
                                                 api, extra_info)

            devices_list += [desc]

        return devices_list

    # method
    def get_readable_output_devices_list(self):
        devices_list = []

        default_device_index = self.get_default_output_device()

        for device in self.output_devices:
            dev_info = self.pa.get_device_info_by_index(device)
            api = self.pa.get_host_api_info_by_index(
                dev_info['hostApi'])['name']

            if device is default_device_index:
                extra_info = ' (system default)'
            else:
                extra_info = ''

            nchannels = self.pa.get_device_info_by_index(
                device)['maxOutputChannels']

            desc = "%s (%d channels) (%s) %s" % (dev_info['name'], nchannels,
                                                 api, extra_info)

            devices_list += [desc]

        return devices_list

    # method
    def get_default_input_device(self):
        return self.pa.get_default_input_device_info()['index']

    # method
    def get_default_output_device(self):
        return self.pa.get_default_output_device_info()['index']

    # method
    def get_device_count(self):
        # FIXME only input devices should be chosen, not all of them !
        return self.pa.get_device_count()

    # method
    # returns a list of input devices index, starting with the system default
    def get_input_devices(self):
        device_count = self.get_device_count()
        default_input_device = self.get_default_input_device()

        device_range = range(0, device_count)
        # start by the default input device
        device_range.remove(default_input_device)
        device_range = [default_input_device] + device_range

        # select only the input devices by looking at the number of input channels
        input_devices = []
        for device in device_range:
            n_input_channels = self.pa.get_device_info_by_index(
                device)['maxInputChannels']
            if n_input_channels > 0:
                input_devices += [device]

        return input_devices

    # method
    # returns a list of output devices index, starting with the system default
    def get_output_devices(self):
        device_count = self.get_device_count()
        default_output_device = self.get_default_output_device()

        device_range = range(0, device_count)
        # start by the default input device
        device_range.remove(default_output_device)
        device_range = [default_output_device] + device_range

        # select only the output devices by looking at the number of output channels
        output_devices = []
        for device in device_range:
            n_output_channels = self.pa.get_device_info_by_index(
                device)['maxOutputChannels']
            if n_output_channels > 0:
                output_devices += [device]

        return output_devices

    # method
    def select_input_device(self, device):
        # save current stream in case we need to restore it
        previous_stream = self.stream
        previous_device = self.device

        self.stream = self.open_stream(device)
        self.device = device

        self.logger.push("Trying to read from input device #%d" % (device))
        if self.try_input_stream(self.stream):
            self.logger.push("Success")
            previous_stream.close()
            success = True
            self.first_channel = 0
            nchannels = self.get_current_device_nchannels()
            if nchannels == 1:
                self.second_channel = 0
            else:
                self.second_channel = 1
        else:
            self.logger.push("Fail")
            self.stream.close()
            self.stream = previous_stream
            self.device = previous_device
            success = False

        return success, self.device

    # method
    def select_first_channel(self, index):
        self.first_channel = index
        success = True
        return success, self.first_channel

    # method
    def select_second_channel(self, index):
        self.second_channel = index
        success = True
        return success, self.second_channel

    # method
    def open_stream(self, device):
        ''' by default we open the device stream with all the channels
        # (interleaved in the data buffer)'''
        maxInputChannels = self.pa.get_device_info_by_index(
            device)['maxInputChannels']
        stream = self.pa.open(format=paInt32,
                              channels=maxInputChannels,
                              rate=SAMPLING_RATE,
                              input=True,
                              frames_per_buffer=FRAMES_PER_BUFFER,
                              input_device_index=device)
        return stream

    # method
    # return the index of the current input device in the input devices list
    # (not the same as the PortAudio index, since the latter is the index
    # in the list of *all* devices, not only input ones)
    def get_readable_current_device(self):
        i = 0
        for device in self.input_devices:
            if device == self.device:
                break
            else:
                i += 1
        return i

    # method
    def get_readable_current_channels(self):
        dev_info = self.pa.get_device_info_by_index(self.device)
        nchannels = dev_info['maxInputChannels']

        if nchannels == 2:
            channels = ['L', 'R']
        else:
            channels = []
            for channel in range(0, dev_info['maxInputChannels']):
                channels += ["%d" % channel]

        return channels

    # method
    def get_current_first_channel(self):
        return self.first_channel

    # method
    def get_current_second_channel(self):
        return self.second_channel

    # method
    def get_current_device_nchannels(self):
        return self.pa.get_device_info_by_index(
            self.device)['maxInputChannels']

    # method
    # return True on success
    def try_input_stream(self, stream):
        n_try = 0
        while (stream.get_read_available() < FRAMES_PER_BUFFER
               and n_try < 1000000):
            n_try += 1

        if n_try == 1000000:
            return False
        else:
            lat_ms = 1000 * stream.get_input_latency()
            self.logger.push("Device claims %d ms latency" % (lat_ms))
            return True

    # try to update the audio buffer
    # return the number of chunks retrieved, and the time elapsed
    def update(self, ringbuffer):
        t = QtCore.QTime()
        t.start()

        channel = self.get_current_first_channel()
        nchannels = self.get_current_device_nchannels()
        if self.duo_input:
            channel_2 = self.get_current_second_channel()

        chunks = 0
        available = self.stream.get_read_available()
        available = int(floor(available / FRAMES_PER_BUFFER))
        for _ in range(0, available):
            try:
                rawdata = self.stream.read(FRAMES_PER_BUFFER)
            except IOError as inst:
                # FIXME specialize this exception handling code
                # to treat overflow errors particularly
                self.xruns += 1
                print "Caught an IOError on stream read.", inst
                break
            intdata_all_channels = fromstring(rawdata, int32)
            int32info = iinfo(int32)
            norm_coeff = max(abs(int32info.min), int32info.max)
            floatdata_all_channels = (intdata_all_channels.astype(float64) /
                                      float(norm_coeff))

            floatdata1 = floatdata_all_channels[channel::nchannels]
            if self.duo_input:
                floatdata2 = floatdata_all_channels[channel_2::nchannels]
                floatdata = vstack((floatdata1, floatdata2))
            else:
                floatdata = floatdata1
                floatdata.shape = (1, FRAMES_PER_BUFFER)
            # update the circular buffer
            ringbuffer.push(floatdata)
            chunks += 1
        return (chunks, t.elapsed(), chunks * FRAMES_PER_BUFFER)

    def set_single_input(self):
        self.duo_input = False

    def set_duo_input(self):
        self.duo_input = True

    # returns the stream time in seconds
    def get_stream_time(self):
        return self.stream.get_time()
예제 #36
0
class AudioRecorder(DIWA_THREAD):
    """
    A thread for capturing audio continuously.
    It keeps a buffer that can be saved to a file.
    By convention AudioRecorder is usually written in mixed case
    even as we prefer upper case for threading types.

    :param parent: Parent of the thread.
    :type parent: :py:class:`diwacs.GraphicalUserInterface`

    """
    def __init__(self, parent):
        DIWA_THREAD.__init__(self, name='AudioRecorder')
        self.parent = parent
        self.py_audio = PyAudio()
        self.stream = self.open_mic_stream()
        self.buffer = deque(maxlen=diwavars.MAX_LENGTH)

    def stop(self):
        """
        Stop the audio recorder thread.

        """
        DIWA_THREAD.stop(self)
        sleep(0.1)
        self.stream.close()

    def find_input_device(self):
        """
        Find a microphone device.

        """
        for i in range(self.py_audio.get_device_count()):
            # Internationalization hack...
            # LOGGER.debug("Selecting audio device %s / %s " %
            # (str(i),str(self.py_audio.get_device_count())))
            # device_index = i
            # return device_index
            devinfo = self.py_audio.get_device_info_by_index(i)
            for keyword in ['microphone']:
                if keyword in devinfo['name'].lower():
                    return i

        default_device = self.py_audio.get_default_input_device_info()
        if default_device:
            return default_device['index']
        return None

    def open_mic_stream(self):
        """
        Opens the stream object for microphone.

        """
        device_index = None
        # uncomment the next line to search for a device.
        # device_index = self.find_input_device()
        stream = self.py_audio.open(
            format=diwavars.FORMAT,
            channels=diwavars.CHANNELS,
            rate=diwavars.RATE,
            input=True,
            input_device_index=device_index,
            frames_per_buffer=diwavars.INPUT_FRAMES_PER_BLOCK)
        return stream

    def run(self):
        """
        Continuously record from the microphone to the buffer.

        The size should be limited at diwavars.MAX_LENGTH constant.
        The implementation keeps only the most recent data in the
        case that there's too much data to store.

        """
        while not self._stop.is_set():
            try:
                data = self.stream.read(diwavars.INPUT_FRAMES_PER_BLOCK)
                while len(self.buffer) >= self.buffer.maxlen:
                    element = self.buffer.popleft()
                    del element
                self.buffer.append(data)
            except IOError as excp:
                _logger().exception('Error recording: {0!s}'.format(excp))

    def save(self, event_id, path):
        """
        Save the buffer to a file.

        """
        try:
            _logger().debug('Saving audio buffer')
            date_string = datetime.now().strftime('%d%m%Y%H%M')
            filename = '{0}_{1}.wav'.format(event_id, date_string)
            filepath = os.path.join(path, 'Audio')
            if not os.path.exists(filepath):
                os.makedirs(filepath)
            filepath = os.path.join(filepath, filename)
            sample_size = self.py_audio.get_sample_size(diwavars.FORMAT)
            wave_file = wave.open(filepath, 'wb')
            wave_file.setnchannels(diwavars.CHANNELS)
            wave_file.setsampwidth(sample_size)
            wave_file.setframerate(diwavars.RATE)
            wave_file.writeframes(b''.join(self.buffer))
            wave_file.close()
        except:
            _logger().exception('audio save exception')
        #CallAfter(self.parent.ClearStatusText)
        self.parent.diwa_state.remove_from_swnp_data('audio')
        CallAfter(self.parent.UpdateScreens(update=True))
예제 #37
0
class RTAudio(object):

    def __init__(self, input_device_index, output_device_index, fs=48000, frame_length=1024,
                 channels=1, callback=None):
        self.input_device_index = input_device_index
        self.output_device_index = output_device_index
        self.fs = fs
        self.stream_callback = callback
        self.p = PyAudio()
        self.frame_length = frame_length
        self.channels = channels
        self.dostop = False
        self.sleeptime = 0.1
        self.frames = 0
        
    def run(self):

        self.stream_start()

        if False:
            self.stream_run()
        else:
            t = Thread(target=self.stream_run)
            t.start()

    def stop(self):
        self.do_stop = True        

    def _callback(self, in_data, frame_count, time_info, status):        

        self.frames += 1

        in_data = np.frombuffer(in_data, dtype=np.int16)
        in_data = in_data.astype(np.float32) / 32767
        
        out_data = self(in_data) * 32767
        
        out_data = out_data.astype(np.int16)
        return out_data.tobytes(), paContinue

    def stream_start(self):

        self.stream = self.p.open(format=paInt16, channels=self.channels,
                                  rate=self.fs, input=True, output=True,
                                  input_device_index=self.input_device_index,
                                  output_device_index=self.output_device_index,
                                  frames_per_buffer=self.frame_length,
                                  stream_callback=self._callback)

        self.stream.start_stream()

    def stream_run(self):
        
        self.do_stop = False
        while self.stream.is_active() and not self.do_stop:
            time.sleep(self.sleeptime)

        self.stream_stop()


    def stream_stop(self):
            
        self.stream.stop_stream()
        self.stream.close()

        #self.p.terminate()
        
    def devices(self):

        devices = []
        for m in range(self.p.get_device_count()):
            dev = self.p.get_device_info_by_index(m)
            
            devices.append({'name': dev['name'], 'inputs': dev['maxInputChannels'], 'outputs': dev['maxOutputChannels']})
        return devices