Ejemplo n.º 1
0
    def testGetColorsFromObjectBasic(self):
        modelId = '317'
        modelFilename = os.path.join(TEST_SUNCG_DATA_DIR, "object",
                                     str(modelId),
                                     str(modelId) + ".egg")
        assert os.path.exists(modelFilename)
        model = loadModel(modelFilename)
        model.setName('model-' + str(modelId))
        obj = NodePath('object-' + str(modelId))
        model.reparentTo(obj)
        colorDescriptions = MaterialColorTable.getColorsFromObject(
            obj, mode='basic')
        self.assertTrue(len(colorDescriptions) == 1)
        self.assertTrue(colorDescriptions[0] == "silver")

        modelId = '83'
        modelFilename = os.path.join(TEST_SUNCG_DATA_DIR, "object",
                                     str(modelId),
                                     str(modelId) + ".egg")
        assert os.path.exists(modelFilename)
        model = loadModel(modelFilename)
        model.setName('model-' + str(modelId))
        obj = NodePath('object-' + str(modelId))
        model.reparentTo(obj)
        colorDescriptions = MaterialColorTable.getColorsFromObject(
            obj, mode='basic')
        self.assertTrue(len(colorDescriptions) == 1)
        self.assertTrue(colorDescriptions[0] == "white")
Ejemplo n.º 2
0
    def testGetColorsFromObjectAdvanced(self):
        modelId = '317'
        modelFilename = os.path.join(TEST_SUNCG_DATA_DIR, "object",
                                     str(modelId),
                                     str(modelId) + ".egg")
        assert os.path.exists(modelFilename)
        model = loadModel(modelFilename)
        model.setName('model-' + str(modelId))
        obj = NodePath('object-' + str(modelId))
        model.reparentTo(obj)
        colorDescriptions = MaterialColorTable.getColorsFromObject(
            obj, mode='advanced')
        self.assertTrue(len(colorDescriptions) == 1)
        self.assertTrue(colorDescriptions[0] == "navajo white")

        colorDescriptions = MaterialColorTable.getColorsFromObject(
            obj, mode='advanced', thresholdRelArea=0.0)
        self.assertTrue(len(colorDescriptions) == 2)
        self.assertTrue("navajo white" in colorDescriptions)
        self.assertTrue("dark slate gray" in colorDescriptions)

        modelId = '210'
        modelFilename = os.path.join(TEST_SUNCG_DATA_DIR, "object",
                                     str(modelId),
                                     str(modelId) + ".egg")
        assert os.path.exists(modelFilename)
        model = loadModel(modelFilename)
        model.setName('model-' + str(modelId))
        obj = NodePath('object-' + str(modelId))
        model.reparentTo(obj)
        colorDescriptions = MaterialColorTable.getColorsFromObject(
            obj, mode='advanced')
        self.assertTrue(len(colorDescriptions) == 2)
        self.assertTrue("dark gray" in colorDescriptions)
        self.assertTrue("cadet blue" in colorDescriptions)
Ejemplo n.º 3
0
    def testStep(self):

        scene = SunCgSceneLoader.loadHouseFromJson(
            "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR)

        agentNp = scene.agents[0]
        agentNp.setPos(LVecBase3f(45, -42.5, 1.6))
        agentNp.setHpr(45, 0, 0)

        # Define a sound source
        sourceSize = 0.25
        modelId = 'source-0'
        modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg')
        objectsNp = scene.scene.attachNewNode('objects')
        objectsNp.setTag('acoustics-mode', 'source')
        objectNp = objectsNp.attachNewNode('object-' + modelId)
        model = loadModel(modelFilename)
        model.setName('model-' + modelId)
        model.setTransform(TransformState.makeScale(sourceSize))
        model.reparentTo(objectNp)
        objectNp.setPos(LVecBase3f(39, -40.5, 1.5))

        samplingRate = 16000.0
        hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'),
                         samplingRate)
        acoustics = EvertAcoustics(scene,
                                   hrtf,
                                   samplingRate,
                                   maximumOrder=2,
                                   maxBufferLength=30.0)

        # Attach sound to object
        filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg')
        sound = EvertAudioSound(filename)
        acoustics.attachSoundToObject(sound, objectNp)
        sound.setLoop(True)
        sound.setLoopCount(1)
        sound.play()

        for i, dt in enumerate([5.0, 20.0, 10.0]):

            acoustics.step(dt)
            if i == 0:
                self.assertTrue(sound.status() == AudioSound.PLAYING)
            elif i > 1:
                self.assertTrue(sound.status() == AudioSound.READY)
            inbuf = acoustics.srcBuffers[sound]
            outbuf = acoustics.outBuffers[agentNp.getName()]

            fig = plt.figure()
            plt.subplot(121)
            plt.plot(inbuf)
            plt.subplot(122)
            plt.plot(outbuf.T)
            plt.show(block=False)
            time.sleep(4.0)
            plt.close(fig)
Ejemplo n.º 4
0
 def testGetColorsFromObjectTransparent(self):
     modelId = 'sphere'
     modelFilename = os.path.join(TEST_DATA_DIR, "models", "sphere.egg")
     assert os.path.exists(modelFilename)
     model = loadModel(modelFilename)
     model.setName('model-' + str(modelId))
     obj = NodePath('object-' + str(modelId))
     model.reparentTo(obj)
     colorDescriptions = MaterialColorTable.getColorsFromObject(
         obj, mode='basic')
     self.assertTrue(len(colorDescriptions) == 1)
     self.assertTrue(colorDescriptions[0] == "maroon")
Ejemplo n.º 5
0
    def testObj2bam(self):
        inputFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.obj')
        with tempfile.NamedTemporaryFile(suffix='.bam', delete=False) as f:
            outputFilename = f.name
        obj2bam(inputFilename, outputFilename)
        model = loadModel(outputFilename)
        os.remove(outputFilename)
        self.assertTrue(not model.isEmpty())

        with tempfile.NamedTemporaryFile(suffix='.bam', delete=False) as f:
            outputFilename = f.name
        obj2bam(inputFilename,
                outputFilename,
                coordinateSystem='z-up',
                recomputeVertexNormals=True,
                recomputeTangentBinormal=True,
                recomputePolygonNormals=True,
                triangulatePolygons=True,
                degreeSmoothing=30.0)
        model = loadModel(outputFilename)
        os.remove(outputFilename)
        self.assertTrue(not model.isEmpty())
Ejemplo n.º 6
0
    def testRenderWithModelLights(self):

        filename = os.path.join(TEST_SUNCG_DATA_DIR, 'metadata',
                                'suncgModelLights.json')
        info = SunCgModelLights(filename)

        scene = Scene()

        modelId = 's__1296'
        modelFilename = os.path.join(TEST_SUNCG_DATA_DIR, "object",
                                     str(modelId),
                                     str(modelId) + ".egg")
        assert os.path.exists(modelFilename)
        model = loadModel(modelFilename)
        model.setName('model-' + str(modelId))
        model.show(BitMask32.allOn())

        objectsNp = scene.scene.attachNewNode('objects')
        objNp = objectsNp.attachNewNode('object-' + str(modelId))
        model.reparentTo(objNp)

        # Calculate the center of this object
        minBounds, maxBounds = model.getTightBounds()
        centerPos = minBounds + (maxBounds - minBounds) / 2.0

        # Add offset transform to make position relative to the center
        model.setTransform(TransformState.makePos(-centerPos))

        # Add lights to model
        for lightNp in info.getLightsForModel(modelId):
            lightNp.node().setShadowCaster(True, 512, 512)
            lightNp.reparentTo(model)
            scene.scene.setLight(lightNp)

        viewer = None

        try:
            viewer = Viewer(scene, interactive=False, shadowing=True)

            viewer.cam.setTransform(
                TransformState.makePos(LVector3f(0.5, 0.5, 3.0)))
            viewer.cam.lookAt(lightNp)

            for _ in range(20):
                viewer.step()
            time.sleep(1.0)

        finally:
            if viewer is not None:
                viewer.destroy()
                viewer.graphicsEngine.removeAllWindows()
Ejemplo n.º 7
0
 def testGetDimensionsFromObject(self):
     
     modelId = '274'
     modelFilename = os.path.join(TEST_SUNCG_DATA_DIR, "object", str(modelId), str(modelId) + ".egg")
     assert os.path.exists(modelFilename)
     model = loadModel(modelFilename)
     model.setName('model-' + str(modelId))
     obj = NodePath('object-' + str(modelId))
     model.reparentTo(obj)
      
     # XXX: should use the full metadata files if descriptors are not precomputed
     modelInfoFilename = os.path.join(TEST_SUNCG_DATA_DIR, "metadata", "models.csv")
     modelCatFilename = os.path.join(TEST_SUNCG_DATA_DIR, "metadata", "ModelCategoryMapping.csv")
     dimensionDescription = DimensionTable().getDimensionsFromModelId(modelId, modelInfoFilename, modelCatFilename)
     self.assertTrue(dimensionDescription == 'normal')
Ejemplo n.º 8
0
    def testMultipleSources(self):

        scene = SunCgSceneLoader.loadHouseFromJson(
            "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR)

        agentNp = scene.agents[0]
        agentNp.setPos(LVecBase3f(45, -42.5, 1.6))
        agentNp.setHpr(45, 0, 0)

        # Define multiple sound sources
        sources = []
        for i, pos in enumerate([(39, -40.5, 1.5), (45.5, -42.5, 0.5)]):
            sourceSize = 0.25
            modelId = 'source-%d' % (i)
            modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg')
            objectsNp = scene.scene.attachNewNode('objects')
            objectsNp.setTag('acoustics-mode', 'source')
            objectNp = objectsNp.attachNewNode('object-' + modelId)
            model = loadModel(modelFilename)
            model.setName('model-' + modelId)
            model.setTransform(TransformState.makeScale(sourceSize))
            model.reparentTo(objectNp)
            objectNp.setPos(LVecBase3f(*pos))
            sources.append(objectNp)

        samplingRate = 16000.0
        hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'),
                         samplingRate)
        acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=2)

        audioFilenames = ['toilet.ogg', 'radio.ogg']
        for audioFilename, source in zip(audioFilenames, sources):
            # Attach sound to object
            filename = os.path.join(TEST_DATA_DIR, 'audio', audioFilename)
            sound = EvertAudioSound(filename)
            acoustics.attachSoundToObject(sound, source)
            sound.setLoop(True)
            sound.setLoopCount(1)
            sound.play()

        for _ in range(20):
            acoustics.step(dt=0.1)
            obs = acoustics.getObservationsForAgent(agentNp.getName())
            self.assertTrue('audio-buffer-right' in obs)
            self.assertTrue('audio-buffer-left' in obs)
            self.assertTrue(
                np.array_equal(obs['audio-buffer-right'].shape,
                               obs['audio-buffer-left'].shape))
Ejemplo n.º 9
0
    def testInit(self):
        scene = Scene()

        # Load object to scene
        modelId = '126'
        modelFilename = os.path.join(TEST_SUNCG_DATA_DIR, "object",
                                     str(modelId),
                                     str(modelId) + ".egg")
        assert os.path.exists(modelFilename)
        model = loadModel(modelFilename)
        model.setName('object-' + str(modelId))
        model.reparentTo(scene.scene)

        self.assertTrue(scene.getTotalNbHouses() == 0)
        self.assertTrue(scene.getTotalNbRooms() == 0)
        self.assertTrue(scene.getTotalNbObjects() == 1)
Ejemplo n.º 10
0
 def testGetMaterialNameFromObject(self):
      
     modelId = '317'
     modelFilename = os.path.join(TEST_SUNCG_DATA_DIR, "object", str(modelId), str(modelId) + ".egg")
     assert os.path.exists(modelFilename)
     model = loadModel(modelFilename)
     model.setName('model-' + str(modelId))
     obj = NodePath('object-' + str(modelId))
     model.reparentTo(obj)
     materialDescriptions = MaterialTable.getMaterialNameFromObject(obj)
     self.assertTrue(len(materialDescriptions) == 1)
     self.assertTrue(materialDescriptions[0] == "wood")
      
     materialDescriptions = MaterialTable.getMaterialNameFromObject(obj, thresholdRelArea=0.0)
     self.assertTrue(len(materialDescriptions) == 1)
     self.assertTrue(materialDescriptions[0] == "wood")
Ejemplo n.º 11
0
    def testObjectWithViewer(self):

        scene = Scene()

        modelId = '83'
        modelFilename = os.path.join(TEST_SUNCG_DATA_DIR, "object",
                                     str(modelId),
                                     str(modelId) + ".egg")
        assert os.path.exists(modelFilename)
        model = loadModel(modelFilename)
        model.setName('model-' + str(modelId))
        model.show(BitMask32.allOn())

        objectsNp = scene.scene.attachNewNode('objects')
        objNp = objectsNp.attachNewNode('object-' + str(modelId))
        model.reparentTo(objNp)

        # Calculate the center of this object
        minBounds, maxBounds = model.getTightBounds()
        centerPos = minBounds + (maxBounds - minBounds) / 2.0

        # Add offset transform to make position relative to the center
        model.setTransform(TransformState.makePos(-centerPos))

        renderer = None
        viewer = None

        try:
            renderer = Panda3dRenderer(scene, shadowing=False)

            viewer = Viewer(scene, interactive=False)
            viewer.disableMouse()

            viewer.cam.setTransform(
                TransformState.makePos(LVecBase3(5.0, 0.0, 0.0)))
            viewer.cam.lookAt(model)

            for _ in range(20):
                viewer.step()
            time.sleep(1.0)

        finally:
            if renderer is not None:
                renderer.destroy()
            if viewer is not None:
                viewer.destroy()
                viewer.graphicsEngine.removeAllWindows()
Ejemplo n.º 12
0
def getApproximationForModel(model, mode='box'):

    if mode == 'mesh':
        transform = model.getNetTransform()
        approxModel = model.copyTo(model.getParent())
        approxModel.detachNode()
        approxModel.setTransform(transform)

    elif mode == 'box':
        # Bounding box approximation
        # FIXME: taking the tight bounds after the transform does not fit well models
        #        that are rotated (e.g. diagonal orientation).
        minRefBounds, maxRefBounds = model.getTightBounds()
        refDims = maxRefBounds - minRefBounds
        refPos = model.getPos()
        refCenter = minRefBounds + (maxRefBounds - minRefBounds) / 2.0
        refDeltaCenter = refCenter - refPos

        approxModel = loadModel(os.path.join(MODEL_DATA_DIR, 'cube.egg'))

        # Rescale the cube model to match the bounding box of the original
        # model
        minBounds, maxBounds = approxModel.getTightBounds()
        dims = maxBounds - minBounds
        pos = approxModel.getPos()
        center = minBounds + (maxBounds - minBounds) / 2.0
        deltaCenter = center - pos

        position = refPos + refDeltaCenter - deltaCenter
        scale = LVector3f(refDims.x / dims.x, refDims.y / dims.y,
                          refDims.z / dims.z)

        # NOTE: remove the local transform used to center the model
        transform = model.getNetTransform().compose(
            model.getTransform().getInverse())
        transform = transform.compose(
            TransformState.makePosHprScale(position, LVector3f(0, 0, 0),
                                           scale))
        approxModel.setTransform(transform)
    else:
        raise Exception('Unknown mode type for object shape: %s' % (mode))

    approxModel.setName(model.getName())

    return approxModel
Ejemplo n.º 13
0
    def testDebugObjectWithRender(self):

        scene = Scene()

        modelId = '83'
        modelFilename = os.path.join(TEST_SUNCG_DATA_DIR, "object",
                                     str(modelId),
                                     str(modelId) + ".egg")
        assert os.path.exists(modelFilename)
        model = loadModel(modelFilename)
        model.setName('model-' + str(modelId))
        model.hide()

        objectsNp = scene.scene.attachNewNode('objects')
        objNp = objectsNp.attachNewNode('object-' + str(modelId))
        model.reparentTo(objNp)

        # Calculate the center of this object
        minBounds, maxBounds = model.getTightBounds()
        centerPos = minBounds + (maxBounds - minBounds) / 2.0

        # Add offset transform to make position relative to the center
        model.setTransform(TransformState.makePos(-centerPos))

        renderer = None
        physics = None
        viewer = None

        try:
            renderer = Panda3dRenderer(scene, shadowing=False)
            physics = Panda3dBulletPhysics(scene, debug=True)

            viewer = Viewer(scene, interactive=False)
            viewer.disableMouse()

            viewer.cam.setTransform(
                TransformState.makePos(LVecBase3(5.0, 0.0, 0.0)))
            viewer.cam.lookAt(model)

            for _ in range(20):
                viewer.step()
            time.sleep(1.0)

        finally:
            self.hulkSmash(renderer, physics, viewer)
Ejemplo n.º 14
0
    def __init__(self, scene, agentId, agentRadius=0.25):
        self.scene = scene
        self.agentId = agentId

        agentsNp = self.scene.scene.find('**/agents')
        agentNp = agentsNp.attachNewNode(agentId)
        agentNp.setTag('agent-id', agentId)
        scene.agents.append(agentNp)

        # Define a simple sphere model
        modelId = 'sphere-0'
        modelFilename = os.path.join(CDIR, 'sphere.egg')

        agentNp.setTag('model-id', modelId)
        model = loadModel(modelFilename)
        model.setColor(
            LVector4f(np.random.uniform(), np.random.uniform(),
                      np.random.uniform(), 1.0))
        model.setName('model-' + os.path.basename(modelFilename))
        model.setTransform(TransformState.makeScale(agentRadius))
        model.reparentTo(agentNp)
        model.hide(BitMask32.allOn())

        # Calculate the center of this object
        minBounds, maxBounds = model.getTightBounds()
        centerPos = minBounds + (maxBounds - minBounds) / 2.0

        # Add offset transform to make position relative to the center
        agentNp.setTransform(TransformState.makePos(centerPos))
        model.setTransform(model.getTransform().compose(
            TransformState.makePos(-centerPos)))

        self.agentNp = agentNp
        self.model = model
        self.agentRbNp = None

        self.rotationStepCounter = -1
        self.rotationsStepDuration = 40
Ejemplo n.º 15
0
    def testAddAmbientSound(self):

        scene = SunCgSceneLoader.loadHouseFromJson(
            "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR)

        agentNp = scene.agents[0]
        agentNp.setPos(LVecBase3f(45, -42.5, 1.6))
        agentNp.setHpr(45, 0, 0)

        # Define a sound source
        sourceSize = 0.25
        modelId = 'source-0'
        modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg')
        objectsNp = scene.scene.attachNewNode('objects')
        objectsNp.setTag('acoustics-mode', 'source')
        objectNp = objectsNp.attachNewNode('object-' + modelId)
        model = loadModel(modelFilename)
        model.setName('model-' + modelId)
        model.setTransform(TransformState.makeScale(sourceSize))
        model.reparentTo(objectNp)
        objectNp.setPos(LVecBase3f(39, -40.5, 1.5))

        samplingRate = 16000.0
        hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'),
                         samplingRate)
        acoustics = EvertAcoustics(scene,
                                   hrtf,
                                   samplingRate,
                                   maximumOrder=2,
                                   maxBufferLength=30.0)

        # Attach sound to object
        filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg')
        sound = EvertAudioSound(filename)
        acoustics.attachSoundToObject(sound, objectNp)
        sound.setLoop(True)
        sound.setLoopCount(1)

        # Add ambient sound
        filename = os.path.join(TEST_DATA_DIR, 'audio', 'radio.ogg')
        ambientSound = EvertAudioSound(filename)
        ambientSound.setLoop(True)
        ambientSound.setLoopCount(0)
        ambientSound.setVolume(0.25)
        acoustics.addAmbientSound(ambientSound)

        ambientSound.play()
        acoustics.step(dt=5.0)
        sound.play()
        acoustics.step(dt=5.0)

        obs = acoustics.getObservationsForAgent(agentNp.getName())
        data = np.array([obs['audio-buffer-left'], obs['audio-buffer-right']],
                        dtype=np.float32).T
        self.assertTrue(
            np.allclose(data.shape[0] / samplingRate, 10.0, atol=1e-3))

        #from scipy.io import wavfile
        #wavfile.write('output.wav', samplingRate, data)

        fig = plt.figure()
        plt.plot(data)
        plt.show(block=False)
        time.sleep(1.0)
        plt.close(fig)
Ejemplo n.º 16
0
    def testRenderSimpleCubeRoom(self):

        samplingRate = 16000.0
        scene = Scene()
        hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'),
                         samplingRate)

        viewer = Viewer(scene, interactive=False)

        # Define a simple cube (10 x 10 x 10 m) as room geometry
        roomSize = 10.0
        modelId = 'room-0'
        modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'cube.egg')
        layoutNp = scene.scene.attachNewNode('layouts')
        objectNp = layoutNp.attachNewNode('object-' + modelId)
        objectNp.setTag('acoustics-mode', 'obstacle')
        model = loadModel(modelFilename)
        model.setName('model-' + modelId)
        model.setTransform(TransformState.makeScale(roomSize))
        model.setRenderModeWireframe()
        model.reparentTo(objectNp)
        objectNp.setPos(LVecBase3f(0.0, 0.0, 0.0))

        # Define a sound source
        sourceSize = 0.25
        modelId = 'source-0'
        modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg')
        objectsNp = scene.scene.attachNewNode('objects')
        objectNp = objectsNp.attachNewNode('object-' + modelId)
        objectNp.setTag('acoustics-mode', 'source')
        model = loadModel(modelFilename)
        model.setName('model-' + modelId)
        model.setTransform(TransformState.makeScale(sourceSize))
        model.reparentTo(objectNp)
        objectNp.setPos(LVecBase3f(0.0, 0.0, 0.0))

        acoustics = EvertAcoustics(scene,
                                   hrtf,
                                   samplingRate,
                                   maximumOrder=3,
                                   materialAbsorption=False,
                                   frequencyDependent=False,
                                   debug=True)

        # Attach sound to object
        filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg')
        sound = EvertAudioSound(filename)
        acoustics.attachSoundToObject(sound, objectNp)

        acoustics.step(0.1)
        center = acoustics.world.getCenter()
        self.assertTrue(
            np.allclose(acoustics.world.getMaxLength() / 1000.0, roomSize))
        self.assertTrue(
            np.allclose([center.x, center.y, center.z], [0.0, 0.0, 0.0]))
        self.assertTrue(acoustics.world.numElements() == 12)
        self.assertTrue(acoustics.world.numConvexElements() == 12)

        # Configure the camera
        # NOTE: in Panda3D, the X axis points to the right, the Y axis is forward, and Z is up
        mat = np.array([
            0.999992, 0.00394238, 0, 0, -0.00295702, 0.750104, -0.661314, 0,
            -0.00260737, 0.661308, 0.75011, 0, 0.0, -25.0, 22, 1
        ])
        mat = LMatrix4f(*mat.ravel())
        viewer.cam.setMat(mat)

        agentNp = scene.agents[0]
        agentNp.setPos(
            LVecBase3f(0.25 * roomSize, -0.25 * roomSize, 0.3 * roomSize))
        for _ in range(10):
            viewer.step()
        time.sleep(1.0)

        agentNp.setPos(
            LVecBase3f(0.35 * roomSize, -0.35 * roomSize, 0.4 * roomSize))
        for _ in range(10):
            viewer.step()
        time.sleep(1.0)

        agentNp.setPos(
            LVecBase3f(-0.25 * roomSize, 0.25 * roomSize, -0.3 * roomSize))
        for _ in range(10):
            viewer.step()
        time.sleep(1.0)

        # Calculate and show impulse responses
        impulse = acoustics.calculateImpulseResponse(objectNp.getName(),
                                                     agentNp.getName())

        fig = plt.figure()
        plt.plot(impulse.impulse[0], color='b', label='Left channel')
        plt.plot(impulse.impulse[1], color='g', label='Right channel')
        plt.legend()
        plt.show(block=False)
        time.sleep(1.0)
        plt.close(fig)

        acoustics.destroy()
        viewer.destroy()
        viewer.graphicsEngine.removeAllWindows()
Ejemplo n.º 17
0
    def testRenderHouseWithAcousticsPath(self):

        scene = SunCgSceneLoader.loadHouseFromJson(
            "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR)

        agentNp = scene.agents[0]
        agentNp.setPos(LVecBase3f(45, -42.5, 1.6))
        agentNp.setHpr(45, 0, 0)

        # Define a sound source
        sourceSize = 0.25
        modelId = 'source-0'
        modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg')
        objectsNp = scene.scene.attachNewNode('objects')
        objectsNp.setTag('acoustics-mode', 'source')
        objectNp = objectsNp.attachNewNode('object-' + modelId)
        model = loadModel(modelFilename)
        model.setName('model-' + modelId)
        model.setTransform(TransformState.makeScale(sourceSize))
        model.reparentTo(objectNp)
        objectNp.setPos(LVecBase3f(39, -40.5, 1.5))

        samplingRate = 16000.0
        hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'),
                         samplingRate)
        acoustics = EvertAcoustics(scene,
                                   hrtf,
                                   samplingRate,
                                   maximumOrder=2,
                                   debug=True)

        # Attach sound to object
        filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg')
        sound = EvertAudioSound(filename)
        acoustics.attachSoundToObject(sound, objectNp)
        sound.play()

        acoustics.step(0.0)

        # Hide ceilings
        for nodePath in scene.scene.findAllMatches(
                '**/layouts/*/acoustics/*c'):
            nodePath.hide(BitMask32.allOn())

        viewer = Viewer(scene, interactive=False)

        # Configure the camera
        # NOTE: in Panda3D, the X axis points to the right, the Y axis is forward, and Z is up
        center = agentNp.getNetTransform().getPos()
        mat = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0],
                        [0.0, 1.0, 0.0, 0.0], [center.x, center.y, 20, 1]])
        mat = LMatrix4f(*mat.ravel())
        viewer.cam.setMat(mat)

        for _ in range(20):
            viewer.step()
        time.sleep(1.0)

        viewer.destroy()
        viewer.graphicsEngine.removeAllWindows()

        # Calculate and show impulse responses
        impulse = acoustics.calculateImpulseResponse(objectNp.getName(),
                                                     agentNp.getName())

        fig = plt.figure()
        plt.plot(impulse.impulse[0], color='b', label='Left channel')
        plt.plot(impulse.impulse[1], color='g', label='Right channel')
        plt.legend()
        plt.show(block=False)
        time.sleep(1.0)
        plt.close(fig)

        acoustics.destroy()