def testStep(self): scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) agentNp = scene.agents[0] agentNp.setPos(LVecBase3f(45, -42.5, 1.6)) agentNp.setHpr(45, 0, 0) # Define a sound source sourceSize = 0.25 modelId = 'source-0' modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg') objectsNp = scene.scene.attachNewNode('objects') objectsNp.setTag('acoustics-mode', 'source') objectNp = objectsNp.attachNewNode('object-' + modelId) model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(sourceSize)) model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(39, -40.5, 1.5)) samplingRate = 16000.0 hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=2, maxBufferLength=30.0) # Attach sound to object filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg') sound = EvertAudioSound(filename) acoustics.attachSoundToObject(sound, objectNp) sound.setLoop(True) sound.setLoopCount(1) sound.play() for i, dt in enumerate([5.0, 20.0, 10.0]): acoustics.step(dt) if i == 0: self.assertTrue(sound.status() == AudioSound.PLAYING) elif i > 1: self.assertTrue(sound.status() == AudioSound.READY) inbuf = acoustics.srcBuffers[sound] outbuf = acoustics.outBuffers[agentNp.getName()] fig = plt.figure() plt.subplot(121) plt.plot(inbuf) plt.subplot(122) plt.plot(outbuf.T) plt.show(block=False) time.sleep(4.0) plt.close(fig)
def testMultipleSources(self): scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) agentNp = scene.agents[0] agentNp.setPos(LVecBase3f(45, -42.5, 1.6)) agentNp.setHpr(45, 0, 0) # Define multiple sound sources sources = [] for i, pos in enumerate([(39, -40.5, 1.5), (45.5, -42.5, 0.5)]): sourceSize = 0.25 modelId = 'source-%d' % (i) modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg') objectsNp = scene.scene.attachNewNode('objects') objectsNp.setTag('acoustics-mode', 'source') objectNp = objectsNp.attachNewNode('object-' + modelId) model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(sourceSize)) model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(*pos)) sources.append(objectNp) samplingRate = 16000.0 hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=2) audioFilenames = ['toilet.ogg', 'radio.ogg'] for audioFilename, source in zip(audioFilenames, sources): # Attach sound to object filename = os.path.join(TEST_DATA_DIR, 'audio', audioFilename) sound = EvertAudioSound(filename) acoustics.attachSoundToObject(sound, source) sound.setLoop(True) sound.setLoopCount(1) sound.play() for _ in range(20): acoustics.step(dt=0.1) obs = acoustics.getObservationsForAgent(agentNp.getName()) self.assertTrue('audio-buffer-right' in obs) self.assertTrue('audio-buffer-left' in obs) self.assertTrue( np.array_equal(obs['audio-buffer-right'].shape, obs['audio-buffer-left'].shape))
def __init__(self, scene, agentId, agentRadius=0.25): self.scene = scene self.agentId = agentId agentsNp = self.scene.scene.find('**/agents') agentNp = agentsNp.attachNewNode(agentId) agentNp.setTag('agent-id', agentId) scene.agents.append(agentNp) # Define a simple sphere model modelId = 'sphere-0' modelFilename = os.path.join(CDIR, 'sphere.egg') agentNp.setTag('model-id', modelId) model = loadModel(modelFilename) model.setColor( LVector4f(np.random.uniform(), np.random.uniform(), np.random.uniform(), 1.0)) model.setName('model-' + os.path.basename(modelFilename)) model.setTransform(TransformState.makeScale(agentRadius)) model.reparentTo(agentNp) model.hide(BitMask32.allOn()) # Calculate the center of this object minBounds, maxBounds = model.getTightBounds() centerPos = minBounds + (maxBounds - minBounds) / 2.0 # Add offset transform to make position relative to the center agentNp.setTransform(TransformState.makePos(centerPos)) model.setTransform(model.getTransform().compose( TransformState.makePos(-centerPos))) self.agentNp = agentNp self.model = model self.agentRbNp = None self.rotationStepCounter = -1 self.rotationsStepDuration = 40
def testAddAmbientSound(self): scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) agentNp = scene.agents[0] agentNp.setPos(LVecBase3f(45, -42.5, 1.6)) agentNp.setHpr(45, 0, 0) # Define a sound source sourceSize = 0.25 modelId = 'source-0' modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg') objectsNp = scene.scene.attachNewNode('objects') objectsNp.setTag('acoustics-mode', 'source') objectNp = objectsNp.attachNewNode('object-' + modelId) model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(sourceSize)) model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(39, -40.5, 1.5)) samplingRate = 16000.0 hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=2, maxBufferLength=30.0) # Attach sound to object filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg') sound = EvertAudioSound(filename) acoustics.attachSoundToObject(sound, objectNp) sound.setLoop(True) sound.setLoopCount(1) # Add ambient sound filename = os.path.join(TEST_DATA_DIR, 'audio', 'radio.ogg') ambientSound = EvertAudioSound(filename) ambientSound.setLoop(True) ambientSound.setLoopCount(0) ambientSound.setVolume(0.25) acoustics.addAmbientSound(ambientSound) ambientSound.play() acoustics.step(dt=5.0) sound.play() acoustics.step(dt=5.0) obs = acoustics.getObservationsForAgent(agentNp.getName()) data = np.array([obs['audio-buffer-left'], obs['audio-buffer-right']], dtype=np.float32).T self.assertTrue( np.allclose(data.shape[0] / samplingRate, 10.0, atol=1e-3)) #from scipy.io import wavfile #wavfile.write('output.wav', samplingRate, data) fig = plt.figure() plt.plot(data) plt.show(block=False) time.sleep(1.0) plt.close(fig)
def testRenderHouseWithAcousticsPath(self): scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) agentNp = scene.agents[0] agentNp.setPos(LVecBase3f(45, -42.5, 1.6)) agentNp.setHpr(45, 0, 0) # Define a sound source sourceSize = 0.25 modelId = 'source-0' modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg') objectsNp = scene.scene.attachNewNode('objects') objectsNp.setTag('acoustics-mode', 'source') objectNp = objectsNp.attachNewNode('object-' + modelId) model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(sourceSize)) model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(39, -40.5, 1.5)) samplingRate = 16000.0 hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=2, debug=True) # Attach sound to object filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg') sound = EvertAudioSound(filename) acoustics.attachSoundToObject(sound, objectNp) sound.play() acoustics.step(0.0) # Hide ceilings for nodePath in scene.scene.findAllMatches( '**/layouts/*/acoustics/*c'): nodePath.hide(BitMask32.allOn()) viewer = Viewer(scene, interactive=False) # Configure the camera # NOTE: in Panda3D, the X axis points to the right, the Y axis is forward, and Z is up center = agentNp.getNetTransform().getPos() mat = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, 0.0], [center.x, center.y, 20, 1]]) mat = LMatrix4f(*mat.ravel()) viewer.cam.setMat(mat) for _ in range(20): viewer.step() time.sleep(1.0) viewer.destroy() viewer.graphicsEngine.removeAllWindows() # Calculate and show impulse responses impulse = acoustics.calculateImpulseResponse(objectNp.getName(), agentNp.getName()) fig = plt.figure() plt.plot(impulse.impulse[0], color='b', label='Left channel') plt.plot(impulse.impulse[1], color='g', label='Right channel') plt.legend() plt.show(block=False) time.sleep(1.0) plt.close(fig) acoustics.destroy()
def testRenderSimpleCubeRoom(self): samplingRate = 16000.0 scene = Scene() hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) viewer = Viewer(scene, interactive=False) # Define a simple cube (10 x 10 x 10 m) as room geometry roomSize = 10.0 modelId = 'room-0' modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'cube.egg') layoutNp = scene.scene.attachNewNode('layouts') objectNp = layoutNp.attachNewNode('object-' + modelId) objectNp.setTag('acoustics-mode', 'obstacle') model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(roomSize)) model.setRenderModeWireframe() model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(0.0, 0.0, 0.0)) # Define a sound source sourceSize = 0.25 modelId = 'source-0' modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg') objectsNp = scene.scene.attachNewNode('objects') objectNp = objectsNp.attachNewNode('object-' + modelId) objectNp.setTag('acoustics-mode', 'source') model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(sourceSize)) model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(0.0, 0.0, 0.0)) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=3, materialAbsorption=False, frequencyDependent=False, debug=True) # Attach sound to object filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg') sound = EvertAudioSound(filename) acoustics.attachSoundToObject(sound, objectNp) acoustics.step(0.1) center = acoustics.world.getCenter() self.assertTrue( np.allclose(acoustics.world.getMaxLength() / 1000.0, roomSize)) self.assertTrue( np.allclose([center.x, center.y, center.z], [0.0, 0.0, 0.0])) self.assertTrue(acoustics.world.numElements() == 12) self.assertTrue(acoustics.world.numConvexElements() == 12) # Configure the camera # NOTE: in Panda3D, the X axis points to the right, the Y axis is forward, and Z is up mat = np.array([ 0.999992, 0.00394238, 0, 0, -0.00295702, 0.750104, -0.661314, 0, -0.00260737, 0.661308, 0.75011, 0, 0.0, -25.0, 22, 1 ]) mat = LMatrix4f(*mat.ravel()) viewer.cam.setMat(mat) agentNp = scene.agents[0] agentNp.setPos( LVecBase3f(0.25 * roomSize, -0.25 * roomSize, 0.3 * roomSize)) for _ in range(10): viewer.step() time.sleep(1.0) agentNp.setPos( LVecBase3f(0.35 * roomSize, -0.35 * roomSize, 0.4 * roomSize)) for _ in range(10): viewer.step() time.sleep(1.0) agentNp.setPos( LVecBase3f(-0.25 * roomSize, 0.25 * roomSize, -0.3 * roomSize)) for _ in range(10): viewer.step() time.sleep(1.0) # Calculate and show impulse responses impulse = acoustics.calculateImpulseResponse(objectNp.getName(), agentNp.getName()) fig = plt.figure() plt.plot(impulse.impulse[0], color='b', label='Left channel') plt.plot(impulse.impulse[1], color='g', label='Right channel') plt.legend() plt.show(block=False) time.sleep(1.0) plt.close(fig) acoustics.destroy() viewer.destroy() viewer.graphicsEngine.removeAllWindows()
def calculateObstacleMap(self, resolution=0.1, level=0, zlim=(0.15, 1.50), xlim=None, ylim=None, roomIds=None, layoutOnly=False): figSize = (10, 10) fig = Figure(figsize=figSize, dpi=100, frameon=False) canvas = FigureCanvas(fig) ax = fig.add_subplot(111, aspect='equal') fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0) ax.axis('off') ax.set_aspect('equal') floorZ = self._getFloorReferenceZ(level) zlim = np.array(zlim) + floorZ if roomIds is not None: layoutModels = [] objModels = [] for roomId in roomIds: layoutModels.extend([ model for model in self.scene.scene.findAllMatches( '**/level-%d/room-%s/layouts/object-*/+ModelNode' % (level, roomId)) ]) objModels.extend([ model for model in self.scene.scene.findAllMatches( '**/level-%d/room-%s/objects/object-*/+ModelNode' % (level, roomId)) ]) else: layoutModels = [ model for model in self.scene.scene.findAllMatches( '**/level-%d/**/layouts/object-*/+ModelNode' % level) ] objModels = [ model for model in self.scene.scene.findAllMatches( '**/level-%d/**/objects/object-*/+ModelNode' % level) ] # Loop for all walls in the scene: for model in layoutModels: modelId = model.getNetTag('model-id') if not modelId.endswith('w'): continue addModelTriangles(ax, model, zlim=zlim) # Loop for all doors in the scene: for model in objModels: modelId = model.getNetTag('model-id') if self._isDoor(modelId): if modelId in self.openedStandardDoorModelIds: # Shift the model a little more to the wall transform = TransformState.makePos( LVector3f(0.0, -0.10, 0.0)) # Reduce width by 25% not to mess with close corner walls transform = transform.compose( TransformState.makeScale(LVector3f(0.75, 1.0, 1.0))) elif modelId in self.openedThinDoorModelIds: # Rescale the model to be able to cover the entire depth of walls # Reduce width by 25% not to mess with close corner walls transform = TransformState.makeScale( LVector3f(0.75, 4.0, 1.0)) elif modelId in self.openedGarageDoorModelIds: # Shift the model a little more to the wall transform = TransformState.makePos( LVector3f(0.0, 0.10, 0.0)) # Reduce width by 10% not to mess with close corner walls transform = transform.compose( TransformState.makeScale(LVector3f(0.90, 1.0, 1.0))) else: raise Exception('Unsupported model id: %s' % (modelId)) # TODO: would be more efficient if it needed not copying the # model parentNp = NodePath('tmp-objnode') parentNp.setTransform(model.getParent().getNetTransform()) midNp = parentNp.attachNewNode('tmp-transform') midNp.setTransform(transform) model = model.copyTo(midNp) approxModel = getApproximationForModel(model, mode='box') addModelTriangles(ax, approxModel, invert=True, zlim=zlim) approxModel.removeNode() midNp.removeNode() parentNp.removeNode() # Loop for all objects in the scene: if not layoutOnly: for model in objModels: modelId = model.getNetTag('model-id') if self._isDoor(modelId): continue approxModel = getApproximationForModel(model, mode='box') addModelTriangles(ax, approxModel, zlim=zlim) if xlim is not None and ylim is not None: ax.set_xlim(xlim) ax.set_ylim(ylim) else: ax.autoscale(True) set_axes_equal(ax) xlim, ylim = ax.get_xlim(), ax.get_ylim() xrange = xlim[1] - xlim[0] yrange = ylim[1] - ylim[0] assert np.allclose(xrange, yrange, atol=1e-6) dpi = (xrange / resolution) / figSize[0] fig.set_dpi(dpi) obstacleMap = canvas2image(canvas) plt.close(fig) # RGB to binary obstacleMap = np.round(np.mean(obstacleMap[:, :], axis=-1)) # NOTE: inverse image so that obstacle areas are shown in white obstacleMap = 1.0 - obstacleMap return obstacleMap, xlim, ylim