def testStep(self): scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) agentNp = scene.agents[0] agentNp.setPos(LVecBase3f(45, -42.5, 1.6)) agentNp.setHpr(45, 0, 0) # Define a sound source sourceSize = 0.25 modelId = 'source-0' modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg') objectsNp = scene.scene.attachNewNode('objects') objectsNp.setTag('acoustics-mode', 'source') objectNp = objectsNp.attachNewNode('object-' + modelId) model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(sourceSize)) model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(39, -40.5, 1.5)) samplingRate = 16000.0 hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=2, maxBufferLength=30.0) # Attach sound to object filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg') sound = EvertAudioSound(filename) acoustics.attachSoundToObject(sound, objectNp) sound.setLoop(True) sound.setLoopCount(1) sound.play() for i, dt in enumerate([5.0, 20.0, 10.0]): acoustics.step(dt) if i == 0: self.assertTrue(sound.status() == AudioSound.PLAYING) elif i > 1: self.assertTrue(sound.status() == AudioSound.READY) inbuf = acoustics.srcBuffers[sound] outbuf = acoustics.outBuffers[agentNp.getName()] fig = plt.figure() plt.subplot(121) plt.plot(inbuf) plt.subplot(122) plt.plot(outbuf.T) plt.show(block=False) time.sleep(4.0) plt.close(fig)
def testUpdate(self): scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) agentNp = scene.agents[0] agentNp.setPos(LVecBase3f(45, -42.5, 1.6)) agentNp.setHpr(45, 0, 0) samplingRate = 16000.0 hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=2, maxBufferLength=30.0) # Add ambient sound filename = os.path.join(TEST_DATA_DIR, 'audio', 'radio.ogg') ambientSound = EvertAudioSound(filename) ambientSound.setLoop(True) ambientSound.setLoopCount(0) acoustics.addAmbientSound(ambientSound) ambientSound.play() acoustics.step(0.0) player = AudioPlayer(acoustics) for _ in range(10): taskMgr.step()
def testMultipleSources(self): scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) agentNp = scene.agents[0] agentNp.setPos(LVecBase3f(45, -42.5, 1.6)) agentNp.setHpr(45, 0, 0) # Define multiple sound sources sources = [] for i, pos in enumerate([(39, -40.5, 1.5), (45.5, -42.5, 0.5)]): sourceSize = 0.25 modelId = 'source-%d' % (i) modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg') objectsNp = scene.scene.attachNewNode('objects') objectsNp.setTag('acoustics-mode', 'source') objectNp = objectsNp.attachNewNode('object-' + modelId) model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(sourceSize)) model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(*pos)) sources.append(objectNp) samplingRate = 16000.0 hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=2) audioFilenames = ['toilet.ogg', 'radio.ogg'] for audioFilename, source in zip(audioFilenames, sources): # Attach sound to object filename = os.path.join(TEST_DATA_DIR, 'audio', audioFilename) sound = EvertAudioSound(filename) acoustics.attachSoundToObject(sound, source) sound.setLoop(True) sound.setLoopCount(1) sound.play() for _ in range(20): acoustics.step(dt=0.1) obs = acoustics.getObservationsForAgent(agentNp.getName()) self.assertTrue('audio-buffer-right' in obs) self.assertTrue('audio-buffer-left' in obs) self.assertTrue( np.array_equal(obs['audio-buffer-right'].shape, obs['audio-buffer-left'].shape))
def testAttachSoundToObject(self): samplingRate = 16000.0 scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) acoustics = EvertAcoustics(scene, samplingRate=samplingRate, maximumOrder=2) filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg') sound = EvertAudioSound(filename) objNode = scene.scene.find('**/object-83*') acoustics.attachSoundToObject(sound, objNode)
def testInit(self): filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg') sound = EvertAudioSound(filename) self.assertTrue(sound.data.ndim == 1) self.assertTrue(sound.data.dtype == np.float) self.assertTrue(sound.samplingRate == 16000.0) self.assertTrue(np.allclose(sound.length(), 15.846, atol=1e-2)) sound.resample(8000.0) self.assertTrue(sound.data.ndim == 1) self.assertTrue(sound.data.dtype == np.float) self.assertTrue(np.allclose(sound.length(), 15.846, atol=1e-2))
def testAddAmbientSound(self): scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) agentNp = scene.agents[0] agentNp.setPos(LVecBase3f(45, -42.5, 1.6)) agentNp.setHpr(45, 0, 0) # Define a sound source sourceSize = 0.25 modelId = 'source-0' modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg') objectsNp = scene.scene.attachNewNode('objects') objectsNp.setTag('acoustics-mode', 'source') objectNp = objectsNp.attachNewNode('object-' + modelId) model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(sourceSize)) model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(39, -40.5, 1.5)) samplingRate = 16000.0 hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=2, maxBufferLength=30.0) # Attach sound to object filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg') sound = EvertAudioSound(filename) acoustics.attachSoundToObject(sound, objectNp) sound.setLoop(True) sound.setLoopCount(1) # Add ambient sound filename = os.path.join(TEST_DATA_DIR, 'audio', 'radio.ogg') ambientSound = EvertAudioSound(filename) ambientSound.setLoop(True) ambientSound.setLoopCount(0) ambientSound.setVolume(0.25) acoustics.addAmbientSound(ambientSound) ambientSound.play() acoustics.step(dt=5.0) sound.play() acoustics.step(dt=5.0) obs = acoustics.getObservationsForAgent(agentNp.getName()) data = np.array([obs['audio-buffer-left'], obs['audio-buffer-right']], dtype=np.float32).T self.assertTrue( np.allclose(data.shape[0] / samplingRate, 10.0, atol=1e-3)) #from scipy.io import wavfile #wavfile.write('output.wav', samplingRate, data) fig = plt.figure() plt.plot(data) plt.show(block=False) time.sleep(1.0) plt.close(fig)
def testRenderHouseWithAcousticsPath(self): scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) agentNp = scene.agents[0] agentNp.setPos(LVecBase3f(45, -42.5, 1.6)) agentNp.setHpr(45, 0, 0) # Define a sound source sourceSize = 0.25 modelId = 'source-0' modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg') objectsNp = scene.scene.attachNewNode('objects') objectsNp.setTag('acoustics-mode', 'source') objectNp = objectsNp.attachNewNode('object-' + modelId) model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(sourceSize)) model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(39, -40.5, 1.5)) samplingRate = 16000.0 hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=2, debug=True) # Attach sound to object filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg') sound = EvertAudioSound(filename) acoustics.attachSoundToObject(sound, objectNp) sound.play() acoustics.step(0.0) # Hide ceilings for nodePath in scene.scene.findAllMatches( '**/layouts/*/acoustics/*c'): nodePath.hide(BitMask32.allOn()) viewer = Viewer(scene, interactive=False) # Configure the camera # NOTE: in Panda3D, the X axis points to the right, the Y axis is forward, and Z is up center = agentNp.getNetTransform().getPos() mat = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, 0.0], [center.x, center.y, 20, 1]]) mat = LMatrix4f(*mat.ravel()) viewer.cam.setMat(mat) for _ in range(20): viewer.step() time.sleep(1.0) viewer.destroy() viewer.graphicsEngine.removeAllWindows() # Calculate and show impulse responses impulse = acoustics.calculateImpulseResponse(objectNp.getName(), agentNp.getName()) fig = plt.figure() plt.plot(impulse.impulse[0], color='b', label='Left channel') plt.plot(impulse.impulse[1], color='g', label='Right channel') plt.legend() plt.show(block=False) time.sleep(1.0) plt.close(fig) acoustics.destroy()
def testRenderSimpleCubeRoom(self): samplingRate = 16000.0 scene = Scene() hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) viewer = Viewer(scene, interactive=False) # Define a simple cube (10 x 10 x 10 m) as room geometry roomSize = 10.0 modelId = 'room-0' modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'cube.egg') layoutNp = scene.scene.attachNewNode('layouts') objectNp = layoutNp.attachNewNode('object-' + modelId) objectNp.setTag('acoustics-mode', 'obstacle') model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(roomSize)) model.setRenderModeWireframe() model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(0.0, 0.0, 0.0)) # Define a sound source sourceSize = 0.25 modelId = 'source-0' modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg') objectsNp = scene.scene.attachNewNode('objects') objectNp = objectsNp.attachNewNode('object-' + modelId) objectNp.setTag('acoustics-mode', 'source') model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(sourceSize)) model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(0.0, 0.0, 0.0)) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=3, materialAbsorption=False, frequencyDependent=False, debug=True) # Attach sound to object filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg') sound = EvertAudioSound(filename) acoustics.attachSoundToObject(sound, objectNp) acoustics.step(0.1) center = acoustics.world.getCenter() self.assertTrue( np.allclose(acoustics.world.getMaxLength() / 1000.0, roomSize)) self.assertTrue( np.allclose([center.x, center.y, center.z], [0.0, 0.0, 0.0])) self.assertTrue(acoustics.world.numElements() == 12) self.assertTrue(acoustics.world.numConvexElements() == 12) # Configure the camera # NOTE: in Panda3D, the X axis points to the right, the Y axis is forward, and Z is up mat = np.array([ 0.999992, 0.00394238, 0, 0, -0.00295702, 0.750104, -0.661314, 0, -0.00260737, 0.661308, 0.75011, 0, 0.0, -25.0, 22, 1 ]) mat = LMatrix4f(*mat.ravel()) viewer.cam.setMat(mat) agentNp = scene.agents[0] agentNp.setPos( LVecBase3f(0.25 * roomSize, -0.25 * roomSize, 0.3 * roomSize)) for _ in range(10): viewer.step() time.sleep(1.0) agentNp.setPos( LVecBase3f(0.35 * roomSize, -0.35 * roomSize, 0.4 * roomSize)) for _ in range(10): viewer.step() time.sleep(1.0) agentNp.setPos( LVecBase3f(-0.25 * roomSize, 0.25 * roomSize, -0.3 * roomSize)) for _ in range(10): viewer.step() time.sleep(1.0) # Calculate and show impulse responses impulse = acoustics.calculateImpulseResponse(objectNp.getName(), agentNp.getName()) fig = plt.figure() plt.plot(impulse.impulse[0], color='b', label='Left channel') plt.plot(impulse.impulse[1], color='g', label='Right channel') plt.legend() plt.show(block=False) time.sleep(1.0) plt.close(fig) acoustics.destroy() viewer.destroy() viewer.graphicsEngine.removeAllWindows()