示例#1
0
    def testUpdate(self):

        scene = SunCgSceneLoader.loadHouseFromJson(
            "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR)

        agentNp = scene.agents[0]
        agentNp.setPos(LVecBase3f(45, -42.5, 1.6))
        agentNp.setHpr(45, 0, 0)

        samplingRate = 16000.0
        hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'),
                         samplingRate)
        acoustics = EvertAcoustics(scene,
                                   hrtf,
                                   samplingRate,
                                   maximumOrder=2,
                                   maxBufferLength=30.0)

        # Add ambient sound
        filename = os.path.join(TEST_DATA_DIR, 'audio', 'radio.ogg')
        ambientSound = EvertAudioSound(filename)
        ambientSound.setLoop(True)
        ambientSound.setLoopCount(0)
        acoustics.addAmbientSound(ambientSound)
        ambientSound.play()

        acoustics.step(0.0)

        player = AudioPlayer(acoustics)
        for _ in range(10):
            taskMgr.step()
示例#2
0
    def testStep(self):

        scene = SunCgSceneLoader.loadHouseFromJson(
            "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR)

        agentNp = scene.agents[0]
        agentNp.setPos(LVecBase3f(45, -42.5, 1.6))
        agentNp.setHpr(45, 0, 0)

        # Define a sound source
        sourceSize = 0.25
        modelId = 'source-0'
        modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg')
        objectsNp = scene.scene.attachNewNode('objects')
        objectsNp.setTag('acoustics-mode', 'source')
        objectNp = objectsNp.attachNewNode('object-' + modelId)
        model = loadModel(modelFilename)
        model.setName('model-' + modelId)
        model.setTransform(TransformState.makeScale(sourceSize))
        model.reparentTo(objectNp)
        objectNp.setPos(LVecBase3f(39, -40.5, 1.5))

        samplingRate = 16000.0
        hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'),
                         samplingRate)
        acoustics = EvertAcoustics(scene,
                                   hrtf,
                                   samplingRate,
                                   maximumOrder=2,
                                   maxBufferLength=30.0)

        # Attach sound to object
        filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg')
        sound = EvertAudioSound(filename)
        acoustics.attachSoundToObject(sound, objectNp)
        sound.setLoop(True)
        sound.setLoopCount(1)
        sound.play()

        for i, dt in enumerate([5.0, 20.0, 10.0]):

            acoustics.step(dt)
            if i == 0:
                self.assertTrue(sound.status() == AudioSound.PLAYING)
            elif i > 1:
                self.assertTrue(sound.status() == AudioSound.READY)
            inbuf = acoustics.srcBuffers[sound]
            outbuf = acoustics.outBuffers[agentNp.getName()]

            fig = plt.figure()
            plt.subplot(121)
            plt.plot(inbuf)
            plt.subplot(122)
            plt.plot(outbuf.T)
            plt.show(block=False)
            time.sleep(4.0)
            plt.close(fig)
示例#3
0
    def testMultipleSources(self):

        scene = SunCgSceneLoader.loadHouseFromJson(
            "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR)

        agentNp = scene.agents[0]
        agentNp.setPos(LVecBase3f(45, -42.5, 1.6))
        agentNp.setHpr(45, 0, 0)

        # Define multiple sound sources
        sources = []
        for i, pos in enumerate([(39, -40.5, 1.5), (45.5, -42.5, 0.5)]):
            sourceSize = 0.25
            modelId = 'source-%d' % (i)
            modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg')
            objectsNp = scene.scene.attachNewNode('objects')
            objectsNp.setTag('acoustics-mode', 'source')
            objectNp = objectsNp.attachNewNode('object-' + modelId)
            model = loadModel(modelFilename)
            model.setName('model-' + modelId)
            model.setTransform(TransformState.makeScale(sourceSize))
            model.reparentTo(objectNp)
            objectNp.setPos(LVecBase3f(*pos))
            sources.append(objectNp)

        samplingRate = 16000.0
        hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'),
                         samplingRate)
        acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=2)

        audioFilenames = ['toilet.ogg', 'radio.ogg']
        for audioFilename, source in zip(audioFilenames, sources):
            # Attach sound to object
            filename = os.path.join(TEST_DATA_DIR, 'audio', audioFilename)
            sound = EvertAudioSound(filename)
            acoustics.attachSoundToObject(sound, source)
            sound.setLoop(True)
            sound.setLoopCount(1)
            sound.play()

        for _ in range(20):
            acoustics.step(dt=0.1)
            obs = acoustics.getObservationsForAgent(agentNp.getName())
            self.assertTrue('audio-buffer-right' in obs)
            self.assertTrue('audio-buffer-left' in obs)
            self.assertTrue(
                np.array_equal(obs['audio-buffer-right'].shape,
                               obs['audio-buffer-left'].shape))
示例#4
0
    def testAddAmbientSound(self):

        scene = SunCgSceneLoader.loadHouseFromJson(
            "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR)

        agentNp = scene.agents[0]
        agentNp.setPos(LVecBase3f(45, -42.5, 1.6))
        agentNp.setHpr(45, 0, 0)

        # Define a sound source
        sourceSize = 0.25
        modelId = 'source-0'
        modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg')
        objectsNp = scene.scene.attachNewNode('objects')
        objectsNp.setTag('acoustics-mode', 'source')
        objectNp = objectsNp.attachNewNode('object-' + modelId)
        model = loadModel(modelFilename)
        model.setName('model-' + modelId)
        model.setTransform(TransformState.makeScale(sourceSize))
        model.reparentTo(objectNp)
        objectNp.setPos(LVecBase3f(39, -40.5, 1.5))

        samplingRate = 16000.0
        hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'),
                         samplingRate)
        acoustics = EvertAcoustics(scene,
                                   hrtf,
                                   samplingRate,
                                   maximumOrder=2,
                                   maxBufferLength=30.0)

        # Attach sound to object
        filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg')
        sound = EvertAudioSound(filename)
        acoustics.attachSoundToObject(sound, objectNp)
        sound.setLoop(True)
        sound.setLoopCount(1)

        # Add ambient sound
        filename = os.path.join(TEST_DATA_DIR, 'audio', 'radio.ogg')
        ambientSound = EvertAudioSound(filename)
        ambientSound.setLoop(True)
        ambientSound.setLoopCount(0)
        ambientSound.setVolume(0.25)
        acoustics.addAmbientSound(ambientSound)

        ambientSound.play()
        acoustics.step(dt=5.0)
        sound.play()
        acoustics.step(dt=5.0)

        obs = acoustics.getObservationsForAgent(agentNp.getName())
        data = np.array([obs['audio-buffer-left'], obs['audio-buffer-right']],
                        dtype=np.float32).T
        self.assertTrue(
            np.allclose(data.shape[0] / samplingRate, 10.0, atol=1e-3))

        #from scipy.io import wavfile
        #wavfile.write('output.wav', samplingRate, data)

        fig = plt.figure()
        plt.plot(data)
        plt.show(block=False)
        time.sleep(1.0)
        plt.close(fig)
示例#5
0
    def testRenderHouseWithAcousticsPath(self):

        scene = SunCgSceneLoader.loadHouseFromJson(
            "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR)

        agentNp = scene.agents[0]
        agentNp.setPos(LVecBase3f(45, -42.5, 1.6))
        agentNp.setHpr(45, 0, 0)

        # Define a sound source
        sourceSize = 0.25
        modelId = 'source-0'
        modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg')
        objectsNp = scene.scene.attachNewNode('objects')
        objectsNp.setTag('acoustics-mode', 'source')
        objectNp = objectsNp.attachNewNode('object-' + modelId)
        model = loadModel(modelFilename)
        model.setName('model-' + modelId)
        model.setTransform(TransformState.makeScale(sourceSize))
        model.reparentTo(objectNp)
        objectNp.setPos(LVecBase3f(39, -40.5, 1.5))

        samplingRate = 16000.0
        hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'),
                         samplingRate)
        acoustics = EvertAcoustics(scene,
                                   hrtf,
                                   samplingRate,
                                   maximumOrder=2,
                                   debug=True)

        # Attach sound to object
        filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg')
        sound = EvertAudioSound(filename)
        acoustics.attachSoundToObject(sound, objectNp)
        sound.play()

        acoustics.step(0.0)

        # Hide ceilings
        for nodePath in scene.scene.findAllMatches(
                '**/layouts/*/acoustics/*c'):
            nodePath.hide(BitMask32.allOn())

        viewer = Viewer(scene, interactive=False)

        # Configure the camera
        # NOTE: in Panda3D, the X axis points to the right, the Y axis is forward, and Z is up
        center = agentNp.getNetTransform().getPos()
        mat = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0],
                        [0.0, 1.0, 0.0, 0.0], [center.x, center.y, 20, 1]])
        mat = LMatrix4f(*mat.ravel())
        viewer.cam.setMat(mat)

        for _ in range(20):
            viewer.step()
        time.sleep(1.0)

        viewer.destroy()
        viewer.graphicsEngine.removeAllWindows()

        # Calculate and show impulse responses
        impulse = acoustics.calculateImpulseResponse(objectNp.getName(),
                                                     agentNp.getName())

        fig = plt.figure()
        plt.plot(impulse.impulse[0], color='b', label='Left channel')
        plt.plot(impulse.impulse[1], color='g', label='Right channel')
        plt.legend()
        plt.show(block=False)
        time.sleep(1.0)
        plt.close(fig)

        acoustics.destroy()