def lookAt(pos, at): """ Takes 2 LVector3f vectors, Returns the lookAt matrix Mat3d """ # forward yaxis = at - pos print("forward ", yaxis) yaxis.normalize() # right xaxis = LVector3f.up().cross(yaxis) xaxis.normalize() print("right ", xaxis) # up zaxis = xaxis.cross(yaxis) zaxis.normalize() print("up ", zaxis) rmat = LMatrix4f() rmat.setRow(0, xaxis) rmat.setRow(1, zaxis) rmat.setRow(2, yaxis) tmat = LMatrix4f.translateMat(pos) print(rmat*tmat) return rmat * tmat
def change_transform(self, transform, face, type='shape'): """ change to transform to go on a face """ #print " change face to {}".format(face) mat = transform.getMat() mat = LMatrix4f.rotateMat(*self.quat_dict[face]) * mat mat = LMatrix4f.translateMat(Vec3(1.0, 0, 0)) * mat transform = transform.makeMat(mat) return transform
def change_back_transform(self, transform, face, type='bloc'): #print "back transform" mat = transform.getMat() mult = LMatrix4f.rotateMat(*self.quat_dict[face]) mult.invertInPlace() mat = LMatrix4f.translateMat(Vec3(-1.0, 0, 0)) * mat mat = mult * mat transform = transform.makeMat(mat) return transform
def build_link(self, node): (id_bda, ta), (id_bdb, tb) = self.link_building_status[node] bda = self.factory.multiboxes[id_bda] bdb = self.factory.multiboxes[id_bdb] pos = bda.body.getPosition() quat = LQuaternionf(bda.body.getQuaternion()) mat = TransformState.makePosQuatScale(pos, quat, Vec3(1, 1, 1)).getMat() mat = ta.getMat() * mat print "ta", ta print "absol", TransformState.makeMat(mat) mat = LMatrix4f.translateMat(Vec3(0.5, 0, 0)) * mat anchor = TransformState.makeMat(mat).getPos() print "absol", TransformState.makeMat(mat) axis = self.quat_dict[1][1] if node.orientation == 1: t = LMatrix4f.rotateMat(*self.quat_dict[4]) * mat else: t = LMatrix4f.rotateMat(*self.quat_dict[2]) * mat row = t.getRow(0) print "rotation", t.getRow(0), type(t.getRow(0)) #axis = t.getQuat().getAxis() axis = Vec3(row.getX(), row.getY(), row.getZ()) print "axis",axis print "anchor", anchor mat = LMatrix4f.translateMat(Vec3(0.5, 0, 0)) * mat mat = tb.getInverse().getMat() * mat t = TransformState.makeMat(mat) posb = t.getPos() quatb = t.getQuat() bdb.body.setPosition(posb) bdb.body.setQuaternion(quatb) cs = OdeHingeJoint(self.physics.world, self.physics.servogroup) cs.attach(bda.body, bdb.body) cs.setAxis(axis) cs.setAnchor(anchor) #add the motor cs.setParamFMax(self.satu_cmd) cs.setParamFudgeFactor(0.5) cs.setParamCFM(11.1111) cs.setParamStopCFM(11.1111) cs.setParamStopERP(0.444444) cs.setParamLoStop(- math.pi * 0.5) cs.setParamHiStop(math.pi * 0.5) pid = PID() self.dof_motors[node] = (cs, pid) print "add constraint"
def stop(self): self.gameApp.taskMgr.remove("HxMouseLook::cameraTask") mat = LMatrix4f(self.camera.getTransform(self.refNode).getMat()) mat.invertInPlace() self.camera.setMat(LMatrix4f.identMat()) self.gameApp.mouseInterfaceNode.setMat(mat) self.gameApp.enableMouse() props = WindowProperties() props.setCursorHidden(False) self.gameApp.win.requestProperties(props) self.forward = False self.backward = False self.left = False self.right = False self.up = False self.down = False self.rollLeft = False self.ignore("w") self.ignore("shift-w") self.ignore("w-up") self.ignore("s") self.ignore("shift-s") self.ignore("s-up") self.ignore("a") self.ignore("shift-a") self.ignore("a-up") self.ignore("d") self.ignore("shift-d") self.ignore("d-up") self.ignore("r") self.ignore("shift-r") self.ignore("r-up") self.ignore("f") self.ignore("shift-f") self.ignore("f-up") self.ignore("q") self.ignore("q-up") self.ignore("e") self.ignore("e-up") self.ignore("shift") self.ignore("shift-up") # un-setup collisions if self.collisionHandler != None: # remove camera from the collision system self.gameApp.cTrav.removeCollider(self.collisionNP) # remove from collisionHandler (Pusher) self.collisionHandler.removeCollider(self.collisionNP) # remove the collision node self.collisionNP.removeNode()
def spinCameraTask(self, task): self.t += self.physics.DT ph = self.t*np.float32(2*np.pi/2.5) #sensors = self.physics.get_sensor_values("spine").flatten() #print sensors.shape import math self.state = self.physics.do_time_step(self.state, motor_signals=[math.sin(ph),math.sin(ph),2,2]) positions, velocity, rotations = self.state for obj_name, obj in self.objects.iteritems(): obj_id = self.physics.get_object_index(obj_name) if (abs(positions[obj_id,:]) > 10**5).any(): print "problem with", obj_name sc = obj.getScale() #print obj_name, self.physics.getRotationMatrix(obj_name).flatten() obj.setMat(self.render, LMatrix4f(LMatrix3f(*rotations[obj_id,:,:].flatten()))) obj.setPos(*positions[obj_id,:]) obj.setScale(sc) # change camera movement self.camera.setPos(-10,0,1.5) self.camera.lookAt(0,0,0) # self.camera.lookAt(*self.physics.getPosition(self.physics.camera_focus)[:3]) #print self.t, self.physics.getPosition(self.physics.camera_focus) real_time = time.time() - self.starttime self.textObject.setText('Time: %3.3f s\n%3.3fx real time\n%s' % ( self.t, self.t/real_time , "")) time.sleep(0.0001) if self.t>80: self.userExit() return Task.cont
def testStep(self): scene = SunCgSceneLoader.loadHouseFromJson("0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) modelLightsInfo = SunCgModelLights(os.path.join(TEST_SUNCG_DATA_DIR, 'metadata', 'suncgModelLights.json')) renderer = Panda3dRenderer(scene, shadowing=True, mode='offscreen', modelLightsInfo=modelLightsInfo) renderer.showRoomLayout(showCeilings=False) mat = np.array([0.999992, 0.00394238, 0, 0, -0.00295702, 0.750104, -0.661314, 0, -0.00260737, 0.661308, 0.75011, 0, 43.621, -55.7499, 12.9722, 1]) scene.agents[0].setMat(LMatrix4f(*mat.ravel())) renderer.step(dt=0.1) image = renderer.getRgbImages()['agent-0'] depth = renderer.getDepthImages(mode='distance')['agent-0'] self.assertTrue(np.min(depth) >= renderer.zNear) self.assertTrue(np.max(depth) <= renderer.zFar) fig = plt.figure(figsize=(16, 8)) plt.axis("off") ax = plt.subplot(121) ax.imshow(image) ax = plt.subplot(122) ax.imshow(depth / np.max(depth), cmap='binary') plt.show(block=False) time.sleep(1.0) plt.close(fig) renderer.destroy()
def testAgent(self): physics = None viewer = None try: scene = Scene() physics = Panda3dBulletPhysics(scene, debug=True) agent = scene.agents[0].find('**/+BulletRigidBodyNode') agent.setPos(LVector3f(0, 0, 1.0)) agent.node().setLinearVelocity(LVector3f(1, 0, 0)) agent.node().setAngularVelocity(LVector3f(0, 0, 1)) agent.node().setActive(True) viewer = Viewer(scene, interactive=False) viewer.disableMouse() mat = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, -10, 0, 1]) mat = LMatrix4f(*mat.ravel()) viewer.cam.setMat(mat) for _ in range(50): viewer.step() time.sleep(1.0) finally: self.hulkSmash(None, physics, viewer)
def testStep(self): try: scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) #NOTE: show initial models loaded into the scene for model in scene.scene.findAllMatches('**/+ModelNode'): model.show() viewer = Viewer(scene, shadowing=True) # Configure the camera #NOTE: in Panda3D, the X axis points to the right, the Y axis is forward, and Z is up mat = np.array([ 0.999992, 0.00394238, 0, 0, -0.00295702, 0.750104, -0.661314, 0, -0.00260737, 0.661308, 0.75011, 0, 43.621, -55.7499, 12.9722, 1 ]) mat = LMatrix4f(*mat.ravel()) viewer.cam.setMat(mat) for _ in range(20): viewer.step() time.sleep(1.0) finally: viewer.destroy()
def spinCameraTask(self, task): if self.starttime is None: self.starttime = time.time() DT = 0.01 self.t += DT self.positions, self.velocities, self.rotations = self.timestep( self.positions, self.velocities, self.rotations) #print positions, rotations, velocity for obj_name, obj in self.objects.iteritems(): sc = obj.getScale() idx = self.physics.get_object_index(obj_name) obj.setMat( self.render, LMatrix4f(LMatrix3f(*self.rotations[idx, :, :].flatten()))) obj.setPos(*self.positions[idx, :]) obj.setScale(sc) # change camera movement self.camera.setPos(0, 2, 0.3) #self.camera.lookAt(0,0,3) self.camera.lookAt( *self.positions[self.physics.get_object_index("spine"), :]) real_time = time.time() - self.starttime self.textObject.setText('Time: %3.3f s\n%3.3fx real time\n%s' % (self.t, self.t / real_time, "")) #time.sleep(0.001) if real_time > 100: self.userExit() return Task.cont
def testDebugHouseWithViewer(self): try: scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) physics = Panda3dBulletPhysics( scene, suncgDatasetRoot=TEST_SUNCG_DATA_DIR, debug=True) viewer = Viewer(scene, interactive=False) viewer.disableMouse() mat = np.array([ 0.999992, 0.00394238, 0, 0, -0.00295702, 0.750104, -0.661314, 0, -0.00260737, 0.661308, 0.75011, 0, 43.621, -55.7499, 12.9722, 1 ]) mat = LMatrix4f(*mat.ravel()) viewer.cam.setMat(mat) for _ in range(20): viewer.step() time.sleep(1.0) finally: physics.destroy() viewer.destroy() viewer.graphicsEngine.removeAllWindows()
def testStep(self): scene = SunCgSceneLoader.loadHouseFromJson("0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) renderer = Panda3dSemanticsRenderer(scene, TEST_SUNCG_DATA_DIR, mode='offscreen') renderer.showRoomLayout(showCeilings=False) mat = np.array([0.999992, 0.00394238, 0, 0, -0.00295702, 0.750104, -0.661314, 0, -0.00260737, 0.661308, 0.75011, 0, 43.621, -55.7499, 12.9722, 1]) scene.agents[0].setMat(LMatrix4f(*mat.ravel())) renderer.step(dt=0.1) image = renderer.getRgbaImages()['agent-0'] # Validate that all rendered colors maps to original values, up to some tolerance eps = 1e-2 colors = np.stack(MODEL_CATEGORY_COLOR_MAPPING.values()) for color in image.reshape((-1, image.shape[-1])): alpha = color[-1] if alpha == 1.0: self.assertTrue(np.min(np.sum(np.abs(colors - color[:3]), axis=1)) < eps) fig = plt.figure(figsize=(8, 8)) plt.axis("off") ax = plt.subplot(111) ax.imshow(image) plt.show(block=False) time.sleep(1.0) plt.close(fig) renderer.destroy()
def testDebugHouseWithRender(self): renderer = None physics = None viewer = None try: scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) renderer = Panda3dRenderer(scene, shadowing=False, mode='offscreen') renderer.showRoomLayout(showCeilings=False) physics = Panda3dBulletPhysics( scene, suncgDatasetRoot=TEST_SUNCG_DATA_DIR, debug=True) viewer = Viewer(scene, interactive=False) viewer.disableMouse() mat = np.array([ 0.999992, 0.00394238, 0, 0, -0.00295702, 0.750104, -0.661314, 0, -0.00260737, 0.661308, 0.75011, 0, 43.621, -55.7499, 12.9722, 1 ]) mat = LMatrix4f(*mat.ravel()) viewer.cam.setMat(mat) for _ in range(20): viewer.step() time.sleep(1.0) finally: self.hulkSmash(renderer, physics, viewer)
def load_shader(self): """ The function loads the vertex and fragment shader. It provides an example of sending the model-view-projection matrix to the shader program when it's calculated. """ self.shader = Shader.load(Shader.SL_GLSL, "vertex.glsl", "fragment.glsl"); self.model.set_shader(self.shader) self.model.set_shader_input("my_ModelViewProjectionMatrix", LMatrix4f())
def set_world_matrix(self, matrix): self.matrix = matrix panda_mat = LMatrix4f(matrix[0][0], matrix[1][0], matrix[2][0], matrix[3][0], matrix[0][1], matrix[1][1], matrix[2][1], matrix[3][1], matrix[0][2], matrix[1][2], matrix[2][2], matrix[3][2], matrix[0][3], matrix[1][3], matrix[2][3], matrix[3][3]) self.geom_path.setTransform(TransformState.makeMat(panda_mat))
def spinCameraTask(self, task): self.t += self.physics.DT ph = self.t * np.float32(2 * np.pi * 1.5) / 15 #sensors = self.physics.get_sensor_values("spine").flatten() #print sensors.shape #self.physics.do_time_step(motor_signals=[-sin(ph),sin(ph),-1,1,0,0,0,0,0,0,0,0,0,0,0,0]) ALPHA = 1.00 self.step = (1 - ALPHA) * self.step + ALPHA * np.random.randn(16) * 30 A1, A2, A3, A4, B1, B2, B3, B4 = 0.8, 0.8, 0.5, 0.5, 0.5, 0.5, 0, 0 #self.physics.do_time_step(motor_signals=[-A1*sin(ph)+B1,-A1*sin(ph)+B1,-A2*sin(ph)-B2,-A2*sin(-ph)-B2,-A3*cos(ph)+B3,A3*cos(ph)+B3,A4*cos(ph)+B4,-A4*cos(ph)+B4]) #self.physics.do_time_step(motor_signals=[A1*sin(ph)+B1,-A1*sin(ph)+B1,-A2*sin(ph)+B2,A2*sin(ph)+B2,A3*cos(ph)+B3,-A3*cos(ph)+B3,-A4*cos(ph)+B4,A4*cos(ph)+B4]) p4 = np.pi / 4 p3 = 3. * np.pi / 4. p2 = np.pi / 2 p1 = np.pi self.physics.do_time_step(motor_signals=np.array([ A1 * sin(ph) + B1, A1 * sin(ph) + B1, -A2 * sin(ph) - B2, -A2 * sin(-ph) - B2 ], dtype='float32')) #self.physics.do_time_step(motor_signals=[-p2,-p4,0,0]) for obj_name, obj in self.objects.iteritems(): if (abs(self.physics.getPosition(obj_name)) > 10**5).any(): print "problem with", obj_name sc = obj.getScale() #print obj_name, self.physics.getRotationMatrix(obj_name).flatten() obj.setMat( self.render, LMatrix4f( LMatrix3f( *self.physics.getRotationMatrix(obj_name).flatten()))) obj.setPos(*self.physics.getPosition(obj_name)[:3]) obj.setScale(sc) # change camera movement self.camera.setPos(1.5, 3.5, 1.5) #self.camera.lookAt(0,0,3) self.camera.lookAt( *self.physics.getPosition(self.physics.camera_focus)[:3]) #print self.t, self.physics.getPosition(self.physics.camera_focus) real_time = time.time() - self.starttime if self.textObject: self.textObject.setText('Time: %3.3f s\n%3.3fx real time\n%s' % (self.t, self.t / real_time, "")) time.sleep(0.0001) if self.t > 80: self.userExit() return Task.cont
def rotateSO(self, x, y, z): if self.nodePath != None: self.eAng = [x, y, z] a0v = LVecBase3f(self.a0[0], self.a0[1], self.a0[2]) a1v = LVecBase3f(self.a1[0], self.a1[1], self.a1[2]) a2v = LVecBase3f(self.a2[0], self.a2[1], self.a2[2]) mat3 = LMatrix3f(a0v, a1v, a2v) mato = LMatrix4f(mat3) self.nodePath.setMat(mato) self.nodePath.setPos(self.pos[0], self.pos[1], self.pos[2]) #Why would the center of rotation be the first corner ? #self.nodePath.setHpr(0,0,90) else: print('Non-existing Node Path to rotate')
def testRenderHouse(self): scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) samplingRate = 16000.0 hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=2, debug=True) acoustics.step(0.0) # Hide ceilings for nodePath in scene.scene.findAllMatches( '**/layouts/*/acoustics/*c'): nodePath.hide(BitMask32.allOn()) viewer = Viewer(scene, interactive=False) # Configure the camera # NOTE: in Panda3D, the X axis points to the right, the Y axis is # forward, and Z is up mat = np.array([ 0.999992, 0.00394238, 0, 0, -0.00295702, 0.750104, -0.661314, 0, -0.00260737, 0.661308, 0.75011, 0, 43.621, -55.7499, 12.9722, 1 ]) mat = LMatrix4f(*mat.ravel()) viewer.cam.setMat(mat) for _ in range(20): acoustics.step(dt=0.1) viewer.step() time.sleep(1.0) acoustics.destroy() viewer.destroy() viewer.graphicsEngine.removeAllWindows()
def spinCameraTask(self, task): frame_step = 0.01 self.t += frame_step positions, velocities, rotations = self.states[0], self.states[1], self.states[2] step = int(self.t / self.dt) step = step % positions.shape[0] self.t = self.t % (self.dt*positions.shape[0]) robot_id = self.robot_id % positions.shape[1] print np.linalg.norm(velocities[step,robot_id,0,:3]) for idx, name in enumerate(self.names): if name in self.objects: obj = self.objects[name] sc = obj.getScale() #print obj_name, self.physics.getRotationMatrix(obj_name).flatten() if np.isfinite(positions[step,robot_id,idx]).all() and np.isfinite(rotations[step,robot_id,idx]).all(): obj.setMat(self.render, LMatrix4f(LMatrix3f(*rotations[step,robot_id,idx].flatten()))) obj.setPos(*positions[step,robot_id,idx]) obj.setScale(sc) #print np.sqrt(np.sum((positions[step,robot_id,self.names.index("sphere2"),:]-np.array([0.5,0.5,0.5]))**2)) #if np.isfinite(positions[step,robot_id,self.names.index(self.camera_focus),:]).all(): # self.parentnode.setX(positions[step,robot_id,self.names.index(self.camera_focus),0]) # self.parentnode.setY(positions[step,robot_id,self.names.index(self.camera_focus),1]) #print self.names # change camera movement #self.camera.setPos(1.5,3.5,1.5) #if np.isfinite(positions[step,robot_id,self.names.index(self.camera_focus)]).all(): # self.camera.lookAt(*positions[step,robot_id,self.names.index(self.camera_focus)]) #self.objects["target"].setPos(*self.target[robot_id,:]) #print self.t, self.physics.getPosition("ball") #real_time = time.time() - self.starttime #self.textObject.setText('Time: %3.3f s\nVx: %3.3f\nrobot #%d' % ( self.t, velocities[step,robot_id,self.names.index(self.camera_focus),0], robot_id)) time.sleep(frame_step) return Task.cont
def np_mat4_to_panda(np_mat): return LMatrix4f(np_mat[0][0], np_mat[1][0], np_mat[2][0], np_mat[3][0], np_mat[0][1], np_mat[1][1], np_mat[2][1], np_mat[3][1], np_mat[0][2], np_mat[1][2], np_mat[2][2], np_mat[3][2], np_mat[0][3], np_mat[1][3], np_mat[2][3], np_mat[3][3])
def loadHouseFromJson(houseId, datasetRoot): filename = SunCgSceneLoader.getHouseJsonPath(datasetRoot, houseId) with open(filename) as f: data = json.load(f) assert houseId == data['id'] houseId = str(data['id']) # Create new node for house instance houseNp = NodePath('house-' + str(houseId)) objectIds = {} for levelId, level in enumerate(data['levels']): logger.debug('Loading Level %s to scene' % (str(levelId))) # Create new node for level instance levelNp = houseNp.attachNewNode('level-' + str(levelId)) roomNpByNodeIndex = {} for nodeIndex, node in enumerate(level['nodes']): if not node['valid'] == 1: continue modelId = str(node['modelId']) if node['type'] == 'Room': logger.debug('Loading Room %s to scene' % (modelId)) # Create new nodes for room instance roomNp = levelNp.attachNewNode('room-' + str(modelId)) roomLayoutsNp = roomNp.attachNewNode('layouts') roomObjectsNp = roomNp.attachNewNode('objects') # Load models defined for this room for roomObjFilename in reglob( os.path.join(datasetRoot, 'room', houseId), modelId + '[a-z].obj'): # Convert extension from OBJ + MTL to EGG format f, _ = os.path.splitext(roomObjFilename) modelFilename = f + ".egg" if not os.path.exists(modelFilename): raise Exception( 'The SUNCG dataset object models need to be convert to Panda3D EGG format!' ) # Create new node for object instance objectNp = NodePath('object-' + str(modelId) + '-0') objectNp.reparentTo(roomLayoutsNp) model = loadModel(modelFilename) model.setName('model-' + os.path.basename(f)) model.reparentTo(objectNp) model.hide() if 'nodeIndices' in node: for childNodeIndex in node['nodeIndices']: roomNpByNodeIndex[childNodeIndex] = roomObjectsNp elif node['type'] == 'Object': logger.debug('Loading Object %s to scene' % (modelId)) # Instance identification if modelId in objectIds: objectIds[modelId] = objectIds[modelId] + 1 else: objectIds[modelId] = 0 # Create new node for object instance objectNp = NodePath('object-' + str(modelId) + '-' + str(objectIds[modelId])) #TODO: loading the BAM format would be much more efficient # Convert extension from OBJ + MTL to EGG format objFilename = os.path.join(datasetRoot, 'object', node['modelId'], node['modelId'] + '.obj') assert os.path.exists(objFilename) f, _ = os.path.splitext(objFilename) modelFilename = f + ".egg" if not os.path.exists(modelFilename): raise Exception( 'The SUNCG dataset object models need to be convert to Panda3D EGG format!' ) model = loadModel(modelFilename) model.setName('model-' + os.path.basename(f)) model.reparentTo(objectNp) model.hide() # 4x4 column-major transformation matrix from object coordinates to scene coordinates transform = np.array(node['transform']).reshape((4, 4)) # Transform from Y-UP to Z-UP coordinate systems #TODO: use Mat4.convertMat(CS_zup_right, CS_yup_right) yupTransform = np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) zupTransform = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]]) transform = np.dot(np.dot(yupTransform, transform), zupTransform) transform = TransformState.makeMat( LMatrix4f(*transform.ravel())) # Calculate the center of this object minBounds, maxBounds = model.getTightBounds() centerPos = minBounds + (maxBounds - minBounds) / 2.0 # Add offset transform to make position relative to the center objectNp.setTransform( transform.compose(TransformState.makePos(centerPos))) model.setTransform(TransformState.makePos(-centerPos)) # Get the parent nodepath for the object (room or level) if nodeIndex in roomNpByNodeIndex: objectNp.reparentTo(roomNpByNodeIndex[nodeIndex]) else: objectNp.reparentTo(levelNp) # Validation assert np.allclose(mat4ToNumpyArray( model.getNetTransform().getMat()), mat4ToNumpyArray(transform.getMat()), atol=1e-6) objectNp.setTag('model-id', str(modelId)) objectNp.setTag('level-id', str(levelId)) objectNp.setTag('house-id', str(houseId)) elif node['type'] == 'Ground': logger.debug('Loading Ground %s to scene' % (modelId)) # Create new nodes for ground instance groundNp = levelNp.attachNewNode('ground-' + str(modelId)) groundLayoutsNp = groundNp.attachNewNode('layouts') # Load model defined for this ground for groundObjFilename in reglob( os.path.join(datasetRoot, 'room', houseId), modelId + '[a-z].obj'): # Convert extension from OBJ + MTL to EGG format f, _ = os.path.splitext(groundObjFilename) modelFilename = f + ".egg" if not os.path.exists(modelFilename): raise Exception( 'The SUNCG dataset object models need to be convert to Panda3D EGG format!' ) objectNp = NodePath('object-' + str(modelId) + '-0') objectNp.reparentTo(groundLayoutsNp) model = loadModel(modelFilename) model.setName('model-' + os.path.basename(f)) model.reparentTo(objectNp) model.hide() else: raise Exception('Unsupported node type: %s' % (node['type'])) scene = Scene() houseNp.reparentTo(scene.scene) # Recenter objects in rooms for room in scene.scene.findAllMatches('**/room*'): # Calculate the center of this room minBounds, maxBounds = room.getTightBounds() centerPos = minBounds + (maxBounds - minBounds) / 2.0 # Add offset transform to room node room.setTransform(TransformState.makePos(centerPos)) # Add recentering transform to all children nodes for childNp in room.getChildren(): childNp.setTransform(TransformState.makePos(-centerPos)) # Recenter objects in grounds for ground in scene.scene.findAllMatches('**/ground*'): # Calculate the center of this ground minBounds, maxBounds = ground.getTightBounds() centerPos = minBounds + (maxBounds - minBounds) / 2.0 # Add offset transform to ground node ground.setTransform(TransformState.makePos(centerPos)) # Add recentering transform to all children nodes for childNp in ground.getChildren(): childNp.setTransform(TransformState.makePos(-centerPos)) return scene
def getLightsForModel(self, modelId): lights = [] if modelId in self.supportedModelIds: for n, lightData in enumerate(self.data[modelId]): attenuation = LVector3f(*lightData['attenuation']) #TODO: implement light power #power = float(lightData['power']) positionYup = LVector3f(*lightData['position']) yupTozupMat = LMatrix4f.convertMat(CS_yup_right, CS_zup_right) position = yupTozupMat.xformVec(positionYup) colorHtml = lightData['color'] color = LVector3f(*[ int('0x' + colorHtml[i:i + 2], 16) for i in range(1, len(colorHtml), 2) ]) / 255.0 direction = None lightType = lightData['type'] lightName = modelId + '-light-' + str(n) if lightType == 'SpotLight': light = Spotlight(lightName) light.setAttenuation(attenuation) light.setColor(color) cutoffAngle = float(lightData['cutoffAngle']) lens = PerspectiveLens() lens.setFov(cutoffAngle / np.pi * 180.0) light.setLens(lens) # NOTE: unused attributes #dropoffRate = float(lightData['dropoffRate']) directionYup = LVector3f(*lightData['direction']) direction = yupTozupMat.xformVec(directionYup) elif lightType == 'PointLight': light = PointLight(lightName) light.setAttenuation(attenuation) light.setColor(color) elif lightType == 'LineLight': #XXX: we may wish to use RectangleLight from the devel branch of Panda3D light = PointLight(lightName) light.setAttenuation(attenuation) light.setColor(color) # NOTE: unused attributes #dropoffRate = float(lightData['dropoffRate']) #cutoffAngle = float(lightData['cutoffAngle']) #position2Yup = LVector3f(*lightData['position2']) #position2 = yupTozupMat.xformVec(position2Yup) #directionYup = LVector3f(*lightData['direction']) #direction = yupTozupMat.xformVec(directionYup) else: raise Exception('Unsupported light type: %s' % (lightType)) lightNp = NodePath(light) # Set position and direction of light lightNp.setPos(position) if direction is not None: targetPos = position + direction lightNp.look_at(targetPos, LVector3f.up()) lights.append(lightNp) return lights
def testRenderSimpleCubeRoom(self): samplingRate = 16000.0 scene = Scene() hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) viewer = Viewer(scene, interactive=False) # Define a simple cube (10 x 10 x 10 m) as room geometry roomSize = 10.0 modelId = 'room-0' modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'cube.egg') layoutNp = scene.scene.attachNewNode('layouts') objectNp = layoutNp.attachNewNode('object-' + modelId) objectNp.setTag('acoustics-mode', 'obstacle') model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(roomSize)) model.setRenderModeWireframe() model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(0.0, 0.0, 0.0)) # Define a sound source sourceSize = 0.25 modelId = 'source-0' modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg') objectsNp = scene.scene.attachNewNode('objects') objectNp = objectsNp.attachNewNode('object-' + modelId) objectNp.setTag('acoustics-mode', 'source') model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(sourceSize)) model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(0.0, 0.0, 0.0)) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=3, materialAbsorption=False, frequencyDependent=False, debug=True) # Attach sound to object filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg') sound = EvertAudioSound(filename) acoustics.attachSoundToObject(sound, objectNp) acoustics.step(0.1) center = acoustics.world.getCenter() self.assertTrue( np.allclose(acoustics.world.getMaxLength() / 1000.0, roomSize)) self.assertTrue( np.allclose([center.x, center.y, center.z], [0.0, 0.0, 0.0])) self.assertTrue(acoustics.world.numElements() == 12) self.assertTrue(acoustics.world.numConvexElements() == 12) # Configure the camera # NOTE: in Panda3D, the X axis points to the right, the Y axis is forward, and Z is up mat = np.array([ 0.999992, 0.00394238, 0, 0, -0.00295702, 0.750104, -0.661314, 0, -0.00260737, 0.661308, 0.75011, 0, 0.0, -25.0, 22, 1 ]) mat = LMatrix4f(*mat.ravel()) viewer.cam.setMat(mat) agentNp = scene.agents[0] agentNp.setPos( LVecBase3f(0.25 * roomSize, -0.25 * roomSize, 0.3 * roomSize)) for _ in range(10): viewer.step() time.sleep(1.0) agentNp.setPos( LVecBase3f(0.35 * roomSize, -0.35 * roomSize, 0.4 * roomSize)) for _ in range(10): viewer.step() time.sleep(1.0) agentNp.setPos( LVecBase3f(-0.25 * roomSize, 0.25 * roomSize, -0.3 * roomSize)) for _ in range(10): viewer.step() time.sleep(1.0) # Calculate and show impulse responses impulse = acoustics.calculateImpulseResponse(objectNp.getName(), agentNp.getName()) fig = plt.figure() plt.plot(impulse.impulse[0], color='b', label='Left channel') plt.plot(impulse.impulse[1], color='g', label='Right channel') plt.legend() plt.show(block=False) time.sleep(1.0) plt.close(fig) acoustics.destroy() viewer.destroy() viewer.graphicsEngine.removeAllWindows()
def __init__(self, bam_file, bam_version): MovingPartBase.__init__(self, bam_file, bam_version) self.matrix = LMatrix4f() self.initial_matrix = LMatrix4f()
def testRenderHouseWithAcousticsPath(self): scene = SunCgSceneLoader.loadHouseFromJson( "0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR) agentNp = scene.agents[0] agentNp.setPos(LVecBase3f(45, -42.5, 1.6)) agentNp.setHpr(45, 0, 0) # Define a sound source sourceSize = 0.25 modelId = 'source-0' modelFilename = os.path.join(TEST_DATA_DIR, 'models', 'sphere.egg') objectsNp = scene.scene.attachNewNode('objects') objectsNp.setTag('acoustics-mode', 'source') objectNp = objectsNp.attachNewNode('object-' + modelId) model = loadModel(modelFilename) model.setName('model-' + modelId) model.setTransform(TransformState.makeScale(sourceSize)) model.reparentTo(objectNp) objectNp.setPos(LVecBase3f(39, -40.5, 1.5)) samplingRate = 16000.0 hrtf = CipicHRTF(os.path.join(TEST_DATA_DIR, 'hrtf', 'cipic_hrir.mat'), samplingRate) acoustics = EvertAcoustics(scene, hrtf, samplingRate, maximumOrder=2, debug=True) # Attach sound to object filename = os.path.join(TEST_DATA_DIR, 'audio', 'toilet.ogg') sound = EvertAudioSound(filename) acoustics.attachSoundToObject(sound, objectNp) sound.play() acoustics.step(0.0) # Hide ceilings for nodePath in scene.scene.findAllMatches( '**/layouts/*/acoustics/*c'): nodePath.hide(BitMask32.allOn()) viewer = Viewer(scene, interactive=False) # Configure the camera # NOTE: in Panda3D, the X axis points to the right, the Y axis is forward, and Z is up center = agentNp.getNetTransform().getPos() mat = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, 0.0], [center.x, center.y, 20, 1]]) mat = LMatrix4f(*mat.ravel()) viewer.cam.setMat(mat) for _ in range(20): viewer.step() time.sleep(1.0) viewer.destroy() viewer.graphicsEngine.removeAllWindows() # Calculate and show impulse responses impulse = acoustics.calculateImpulseResponse(objectNp.getName(), agentNp.getName()) fig = plt.figure() plt.plot(impulse.impulse[0], color='b', label='Left channel') plt.plot(impulse.impulse[1], color='g', label='Right channel') plt.legend() plt.show(block=False) time.sleep(1.0) plt.close(fig) acoustics.destroy()