def __init__(self, wavefile, volume_factor=0.2, pitch_factor=2.5, base_pitch=1): self.source = openal.oalOpen(wavefile) self.volume_factor = volume_factor self.pitch_factor = pitch_factor self.base_pitch = base_pitch self.source.set_gain(2) self.source.set_source_relative(True)
def on_key(self, _win, key, _scancode, action, _mods): """ 'Q' or 'Escape' quits """ if action == glfw.PRESS or action == glfw.REPEAT: if key == glfw.KEY_ESCAPE or key == glfw.KEY_Q: glfw.set_window_should_close(self.win, True) if key == glfw.KEY_W: GL.glPolygonMode(GL.GL_FRONT_AND_BACK, next(self.fill_modes)) if key == glfw.KEY_SPACE: glfw.set_time(0.0) if key == glfw.KEY_A: AL.oalQuit() if key == glfw.KEY_S: source = AL.oalOpen("lotr.wav") source.play() # call Node.key_handler which calls key_handlers for all drawables self.key_handler(key)
def run(self): """ Main render loop for this OpenGL window """ self.text_help() source = AL.oalOpen("lotr.wav") source.play() while not glfw.window_should_close(self.win): # clear draw buffer and depth buffer (<-TP2) GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT) win_size = glfw.get_window_size(self.win) view = self.trackball.view_matrix() projection = self.trackball.projection_matrix(win_size) # draw our scene objects self.draw(projection, view, identity()) # flush render commands, and swap draw buffers glfw.swap_buffers(self.win) # Poll for and process events glfw.poll_events() AL.oalQuit()
""" 脚本作用:使用 OpenAl 将播放音频,施加空间旋转效果,用于测试 hrtf """ import openal import importlib import time import math import sys import os os.chdir(os.path.dirname(os.path.abspath(__file__))) r = 1 声源 = openal.oalOpen('lone_ranger_left.wav') 声源.play() angle = 0 for angle in range(360 * 2): x = r * math.cos((angle + 60) / 180 * math.pi) y = r * math.sin((angle + 60) / 180 * math.pi) 声源.set_position((x, y, 0)) time.sleep(3 / 360) openal.oalQuit()
# -*- coding: utf-8 -*- import openal import time import pyautogui from msvcrt import getch, kbhit volume_factor = 0.01 pitch_factor = 0.01 z_pos = 0.0 z_speed = 0.5 source = openal.oalOpen("sound.wav") source.play() [mouse_x, mouse_y] = pyautogui.position() while True: if kbhit(): key = ord(getch()) if key == 80: #down-arrow z_pos += z_speed elif key == 72: #up-arrow z_pos -= z_speed [curr_x, curr_y] = pyautogui.position() relative_x, relative_y = curr_x - mouse_x, curr_y - mouse_y
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.sendto(b"end", addr) s.close() f.close() print("transmit conplete") s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind((host, port)) while True: data = s.recv(packetSize) if data == b'end': break else: audiof.write(data) audiof.close() print("receive complete") print(time.time() - startTime) s.close() source = al.oalOpen(audiof.name) source.play() while source.get_state() == al.AL_PLAYING: pass al.oalQuit() cap.release()
import openal as al for i in range(5): al.oalInit() source = al.oalOpen("audio.wav") source.play() while source.get_state() == al.AL_PLAYING: pass al.oalQuit()
def playHhOpen(): hhOpen.play() def playTomOne(): tomOne.play() def playTomTwo(): tomTwo.play() def playTomThree(): tomThree.play() def playSplash(): splash.play() caixa = oalOpen("caixa.wav") bumbo = oalOpen("bumbo.wav") hhClose = oalOpen("hhClose.wav") hhOpen = oalOpen("hhOpen.wav") tomOne = oalOpen("tom1.wav") tomTwo = oalOpen("tom2.wav") tomThree = oalOpen("tom3.wav") splash = oalOpen("splash.wav") while True: time.sleep(0.02) #Identificação da caixa if keyboard.is_pressed('v'): t = threading.Thread(target=playCaixa) t.start()
def __init__(self, path): self.handle = openal.oalOpen(path) self.paused = False
# -*- coding: utf-8 -*- import openal import time import pyautogui volume_factor = 0.02 pitch_factor = 0.001 source = openal.oalOpen("audio_playground/sound.wav") [mouse_x, mouse_y] = pyautogui.position() while True: [curr_x, curr_y] = pyautogui.position() relative_x, relative_y = curr_x - mouse_x, curr_y - mouse_y source.set_position([relative_x * volume_factor, relative_y * volume_factor, 0]) source.set_pitch(1 + (abs(relative_x) + abs(relative_y)) * pitch_factor) time.sleep(0.01) print("X: {}, Y: {}".format(relative_x, relative_y)) print("Current pos and pitch: ", source.position, source.pitch) if source.get_state() != openal.AL_PLAYING: source.play() openal.oalQuit()
def play_sound(self, file_path: Path): logger.debug(f"Playing sound at {file_path}") openal_thread = oalOpen(str(file_path)) self._audio_threads.append(openal_thread) openal_thread.play()
#!/usr/bin/python import random import openal youpie = openal.oalOpen("cheer.wav") fail = openal.oalOpen("glass.wav") while True: a = random.randrange(0, 12) b = random.randrange(0, 12) print("How much is {} * {} ?".format(a, b)) c = input() if (a * b) == int(c): print("Well done!") youpie.play() else: print("No, sorry it is wrong...") fail.play()
import audioread import numpy as np import openal audioBuffer = [] sampleRate = None audioLength = None soundFile = 'Jerobeam Fenderson - Planets.wav' # open with OpenAL alSound = openal.oalOpen(soundFile) # load audio with audioread.audio_open(soundFile) as inaudio: assert inaudio.channels == 2 sampleRate = inaudio.samplerate audioLength = inaudio.duration for buf in inaudio: data = np.frombuffer(buf, dtype=np.int16) audioBuffer.append(data) dataBuffer = np.concatenate(audioBuffer).reshape((-1, 2)).astype(np.float32) numTotalSamples = dataBuffer.shape[0] dataBuffer /= (0x7fff - 1) # max value of int16 dataBuffer = dataBuffer.flatten() ''' # shows Lissajous curve #tArray = np.arange(0, 1000000).astype(np.float) tArray = np.linspace(0, 1000000, 1000000).astype(np.float) numTotalSamples = tArray.size
args = parser.parse_args() # 检查音频文件是否存在 音频文件 = args.Media if not os.path.exists(音频文件): print('音频文件不存在') exit() # 将音频文件分成两个单独的单声道文件 临时左声道文件, 临时右声道文件 = 得到临时左右音频文件(音频文件) # 设置距离直径 r = 1 # 打开两个声音源 左后源 = openal.oalOpen(临时左声道文件) 右后源 = openal.oalOpen(临时右声道文件) # 初始化两个音源的三维位置 左后源.set_position((r, r * 1, 0)) 右后源.set_position((r, -r * 1, 0)) # 开始播放 左后源.play() 右后源.play() # 在这里睡眠,直到播放完毕 # while 左后源.get_state() == openal.AL_PLAYING or \ # 右后源.get_state() == openal.AL_PLAYING: # time.sleep(1)