def __init__(self, context, name, parent, numberOfObjects, port, baud, maxHoaOrder, sofaFile, interpolationSteps, headTracking, udpReceivePort=4242, headTrackingCalibrationPort=None): super(HoaBinauralRendererSAW, self).__init__(context, name, parent) self.hoaBinauralRenderer = HoaBinauralRendererSerial( context, "HoaBinauralRendererSerial", self, numberOfObjects, port, baud, maxHoaOrder, sofaFile, interpolationSteps, headTracking, headTrackingCalibrationPort=headTrackingCalibrationPort) self.sceneReceiver = rcl.UdpReceiver( context, "SceneReceiver", self, port=udpReceivePort, mode=rcl.UdpReceiver.Mode.Asynchronous) self.sceneDecoder = rcl.SceneDecoder(context, "SceneDecoder", self) self.parameterConnection( self.sceneReceiver.parameterPort("messageOutput"), self.sceneDecoder.parameterPort("datagramInput")) self.parameterConnection( self.sceneDecoder.parameterPort("objectVectorOutput"), self.hoaBinauralRenderer.parameterPort("objectVector")) self.objectSignalInput = visr.AudioInputFloat("audioIn", self, numberOfObjects) self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2) self.audioConnection(self.objectSignalInput, self.hoaBinauralRenderer.audioPort("audioIn")) self.audioConnection(self.hoaBinauralRenderer.audioPort("audioOut"), self.binauralOutput) if headTrackingCalibrationPort is not None: self.calibrationTriggerReceiver = rcl.UdpReceiver( context, "CalibrationTriggerReceiver", self, port=headTrackingCalibrationPort) self.parameterConnection( self.calibrationTriggerReceiver.parameterPort("messageOutput"), self.hoaBinauralRenderer.parameterPort( "headTrackingCalibration"))
def __init__(self, context, name, parent, numLoudspeakers, port, baud, sofaFile, enableSerial=True, dynITD=True, hrirInterp=True, irTruncationLength=None, headTrackingCalibrationPort=None, switchUdpPort=12345): super(Comparison, self).__init__(context, name, parent) self.input = visr.AudioInputFloat("in", self, 2) self.output = visr.AudioOutputFloat("out", self, 2) self.renderer = VirtualLoudspeakerRendererSerial( context, "renderer", self, numLoudspeakers, port, baud, sofaFile, enableSerial=useTracking, dynITD=useDynamicITD, hrirInterp=useHRIRinterpolation, irTruncationLength=BRIRtruncationLength, headTrackingCalibrationPort=headTrackingCalibrationPort) self.controlReceiver = rcl.UdpReceiver( context, "ControlReceiver", self, port=switchUdpPort, mode=rcl.UdpReceiver.Mode.Asynchronous) self.switch = AudioSignalSwitch(context, "OutputSwitch", self, numberOfChannels=2, numberOfInputs=2, controlDataType=pml.Float) self.controlDecoder = rcl.ScalarOscDecoder(context, "ControlDecoder", self) self.controlDecoder.setup(dataType='float') self.parameterConnection( self.controlReceiver.parameterPort("messageOutput"), self.controlDecoder.parameterPort("datagramInput")) self.parameterConnection(self.controlDecoder.parameterPort("dataOut"), self.switch.parameterPort("controlIn")) self.audioConnection(self.input, self.renderer.audioPort("audioIn")) self.audioConnection(self.renderer.audioPort("audioOut"), self.switch.audioPort("in_0")) self.audioConnection(self.input, self.switch.audioPort("in_1")) self.audioConnection(self.switch.audioPort("out"), self.output)
def __init__( self, context, name, parent, loudspeakerConfigFiles, numberOfInputs, numberOfOutputs, interpolationPeriod, diffusionFilterFile, trajectoryPositions, trajectoryUpdateSamples = None, sourceLevel=1.0, sourceObjectId = 0, controlReceivePort=8889, trackingConfiguration='', ): super(RealTimeMultiRendererTrajectory,self).__init__( context, name, parent) self.input = visr.AudioInputFloat( "in", self, numberOfInputs ) self.output = visr.AudioOutputFloat( "out", self, numberOfOutputs ) rendererConfigs = [] for cfgFile in loudspeakerConfigFiles: rendererConfigs.append( panning.LoudspeakerArray(cfgFile) ) diffFilters = np.array(pml.MatrixParameterFloat.fromAudioFile( diffusionFilterFile )) if trajectoryUpdateSamples is None: trajectoryUpdateSamples = self.period() self.multiRenderer = MultiRenderer(context, name, self, loudspeakerConfigs=rendererConfigs, numberOfInputs=numberOfInputs, numberOfOutputs=numberOfOutputs, interpolationPeriod=trajectoryUpdateSamples, diffusionFilters=diffFilters, trackingConfiguration='', controlDataType=pml.Float) self.audioConnection( self.input, self.multiRenderer.audioPort("in" ) ) self.audioConnection( self.multiRenderer.audioPort("out" ), self.output ) self.sceneGenerator = PointSourceTrajectoryGenerator( context, "SceneDecoder", self, positions=trajectoryPositions, updateRateSamples = trajectoryUpdateSamples, objectId = sourceObjectId, groupId = 0, priority = 0, objectLevel = sourceLevel ) self.parameterConnection( self.sceneGenerator.parameterPort( "objectVectorOutput"), self.multiRenderer.parameterPort( "objectIn" ) ) self.controlReceiver = rcl.UdpReceiver( context, "ControlReceiver", self, port=controlReceivePort, mode=rcl.UdpReceiver.Mode.Asynchronous) self.controlDecoder = rcl.ScalarOscDecoder( context, "ControlDecoder", self ) self.controlDecoder.setup(dataType='float') self.parameterConnection( self.controlReceiver.parameterPort("messageOutput"), self.controlDecoder.parameterPort("datagramInput") ) self.parameterConnection( self.controlDecoder.parameterPort( "dataOut"), self.multiRenderer.parameterPort( "controlIn" ) )
def __init__(self, context, name, parent, numberOfObjects, lspConfig, nwPort): """ Constructor, instantiates the component, all contained sub-components, and their connections. Parameters ---------- self: VbapRenderer self argument, mandatory for Python methods. context: visr.SignalFlowContext A context object containing the sampling frequency and the block size. That is a mandatory parameter for VISR components. name: string Name of the component to be identified within a containing component. parent: visr.Compositcomponent A containing component, or None if this is the top-level component. numberOfObjects: int The maximum number of objects to be rendered. lspConfig: panning.LoudspeakerArray Object containing the loudspeaker positions. nwPort: int Port number of a UDP connection to receive object metadata messages. """ super().__init__(context, name, parent) if not isinstance(lspConfig, panning.LoudspeakerArray): lspConfig = panning.LoudspeakerArray(lspConfig) self.audioIn = visr.AudioInputFloat("in", self, numberOfObjects) self.audioOut = visr.AudioOutputFloat( "out", self, lspConfig.numberOfRegularLoudspeakers) self.receiver = rcl.UdpReceiver(context, "NetworkReceiver", self, port=nwPort) self.decoder = rcl.SceneDecoder(context, "SceneDecoder", self) self.panner = VbapRenderer(context, "VbapPanner", self, numberOfObjects, lspConfig) self.audioConnection(self.audioIn, self.panner.audioPort("in")) self.audioConnection(self.panner.audioPort("out"), self.audioOut) self.parameterConnection(self.receiver.parameterPort("messageOutput"), self.decoder.parameterPort("datagramInput")) self.parameterConnection( self.decoder.parameterPort("objectVectorOutput"), self.panner.parameterPort("objects"))
def __init__(self, context, name, parent, numberOfObjects, lspArray, nwPort): super().__init__(context, name, parent) if not isinstance(lspArray, panning.LoudspeakerArray): lspArray = panning.LoudspeakerArray(lspArray) self.audioIn = visr.AudioInputFloat("in", self, numberOfObjects) self.audioOut = visr.AudioOutputFloat( "out", self, lspArray.numberOfRegularLoudspeakers) self.receiver = rcl.UdpReceiver(context, "NetworkReceiver", self, port=nwPort) self.decoder = rcl.SceneDecoder(context, "SceneDecoder", self) self.panner = VbapL2Renderer(context, "VbapPanner", self, numberOfObjects, lspArray) self.audioConnection(self.audioIn, self.panner.audioPort("in")) self.audioConnection(self.panner.audioPort("out"), self.audioOut) self.parameterConnection(self.receiver.parameterPort("messageOutput"), self.decoder.parameterPort("datagramInput")) self.parameterConnection( self.decoder.parameterPort("objectVectorOutput"), self.panner.parameterPort("objects"))
def __init__( self, context, name, parent, *, # Only keyword arguments after this point numberOfObjects, maxHoaOrder, sofaFile=None, decodingFilters=None, interpolationSteps=None, headTracking=True, headOrientation=None, objectChannelAllocation=False, fftImplementation="default", headTrackingReceiver=None, headTrackingPositionalArguments=None, headTrackingKeywordArguments=None, sceneReceiveUdpPort=None): """ Constructor. Parameters ---------- context : visr.SignalFlowContext Standard visr.Component construction argument, holds the block size and the sampling frequency name : string Name of the component, Standard visr.Component construction argument parent : visr.CompositeComponent Containing component if there is one, None if this is a top-level component of the signal flow. numberOfObjects : int The number of audio objects to be rendered. maxHoaOrder: int HOA order used for encoding the point source and plane wave objects. sofaFile: string, optional A SOFA file containing the HOA decoding filters. These are expects as a 2 x (maxHoaIrder+1)^2 array in the field Data.IR decodingFilters : numpy.ndarray, optional Alternative way to provide the HOA decoding filters. Expects a 2 x (maxHoaIrder+1)^2 matrix containing FIR coefficients. interpolationSteps: int, optional Number of samples to transition to new object positions after an update. headOrientation : array-like Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used), or the initial view direction headTracking: bool Whether dynamic head tracking is active. objectChannelAllocation: bool Whether the processing resources are allocated from a pool of resources (True), or whether fixed processing resources statically tied to the audio signal channels are used. Not implemented at the moment, so leave the default value (False). fftImplementation: string, optional The FFT implementation to use. Default value enables VISR's default FFT library for the platform. headTrackingReceiver: class type, optional Class of the head tracking recveiver, None (default value) disables dynamic head tracking. headTrackingPositionalArguments: tuple optional Positional arguments passed to the constructor of the head tracking receiver object. Must be a tuple. If there is only a single argument, a trailing comma must be added. headTrackingKeywordArguments: dict, optional Keyword arguments passed to the constructor of the head tracking receiver. Must be a dictionary (dict) sceneReceiveUdpPort: int, optional A UDP port number where scene object metadata (in the S3A JSON format) is to be received). If not given (default), no network receiver is instantiated, and the object exposes a top-level parameter input port "objectVectorInput" """ super(RealtimeHoaObjectToBinauralRenderer, self).__init__(context, name, parent) self.objectSignalInput = visr.AudioInputFloat("audioIn", self, numberOfObjects) self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2) enableTracking = (headTrackingReceiver is not None) self.hoaBinauralRenderer = HoaObjectToBinauralRenderer( context, "HoaBinauralRenderer", self, numberOfObjects=numberOfObjects, maxHoaOrder=maxHoaOrder, sofaFile=sofaFile, decodingFilters=decodingFilters, interpolationSteps=interpolationSteps, headTracking=headTracking, headOrientation=headOrientation, fftImplementation=fftImplementation) if sceneReceiveUdpPort is None: self.objectVectorInput = visr.ParameterInput( "objectVector", self, pml.ObjectVector.staticType, pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig()) self.parameterConnection( self.objectVectorInput, self.hoaBinauralRenderer.parameterPort("objects")) else: self.sceneReceiver = rcl.UdpReceiver(context, "SceneReceiver", self, port=int(sceneReceiveUdpPort)) self.sceneDecoder = rcl.SceneDecoder(context, "SceneDecoder", self) self.parameterConnection( self.sceneReceiver.parameterPort("messageOutput"), self.sceneDecoder.parameterPort("datagramInput")) self.parameterConnection( self.sceneDecoder.parameterPort("objectVectorOutput"), self.hoaBinauralRenderer.parameterPort("objects")) if enableTracking: if headTrackingPositionalArguments == None: headTrackingPositionalArguments = () if headTrackingKeywordArguments == None: headTrackingKeywordArguments = {} self.trackingDevice = headTrackingReceiver( context, "HeadTrackingReceiver", self, *headTrackingPositionalArguments, **headTrackingKeywordArguments) self.parameterConnection( self.trackingDevice.parameterPort("orientation"), self.hoaBinauralRenderer.parameterPort("tracking")) self.audioConnection(self.objectSignalInput, self.hoaBinauralRenderer.audioPort("audioIn")) self.audioConnection(self.hoaBinauralRenderer.audioPort("audioOut"), self.binauralOutput)
def __init__( self, context, name, parent, loudspeakerConfig, numberOfInputs, numberOfOutputs, trajectoryPositions, trajectoryUpdateSamples=None, sourceLevel=1.0, sourceObjectId=0, controlDataType=pml.Float, controlReceivePort=8889, ): if trajectoryUpdateSamples is None: trajectoryUpdateSamples = context.period super(RealTimePanningComparisonTrajectory, self).__init__(context, name, parent) self.input = visr.AudioInputFloat("in", self, numberOfInputs) self.output = visr.AudioOutputFloat("out", self, numberOfOutputs) self.multiRenderer = PanningComparison( context, "MultiRenderer", self, loudspeakerConfig=loudspeakerConfig, numberOfInputs=numberOfInputs, numberOfOutputs=numberOfOutputs, interpolationSteps=trajectoryUpdateSamples, controlDataType=pml.Float) self.audioConnection(self.input, self.multiRenderer.audioPort("in")) self.audioConnection(self.multiRenderer.audioPort("out"), self.output) self.sceneGenerator = PointSourceTrajectoryGenerator( context, "SceneDecoder", self, positions=trajectoryPositions, updateRateSamples=trajectoryUpdateSamples, objectId=sourceObjectId, groupId=0, priority=0, objectLevel=sourceLevel) self.parameterConnection( self.sceneGenerator.parameterPort("objectVectorOutput"), self.multiRenderer.parameterPort("objectIn")) self.controlReceiver = rcl.UdpReceiver( context, "ControlReceiver", self, port=controlReceivePort, mode=rcl.UdpReceiver.Mode.Asynchronous) self.controlDecoder = rcl.ScalarOscDecoder(context, "ControlDecoder", self) self.controlDecoder.setup(dataType='float') self.parameterConnection( self.controlReceiver.parameterPort("messageOutput"), self.controlDecoder.parameterPort("datagramInput")) self.parameterConnection(self.controlDecoder.parameterPort("dataOut"), self.multiRenderer.parameterPort("controlIn"))
def __init__(self, context, name, parent, loudspeakerConfigFiles, numberOfInputs, numberOfOutputs, interpolationPeriod, diffusionFilterFile, udpReceivePort=8888, controlReceivePort=8889, trackingConfiguration=''): super(RealTimeMultiRenderer, self).__init__(context, name, parent) self.input = visr.AudioInputFloat("in", self, numberOfInputs) self.output = visr.AudioOutputFloat("out", self, numberOfOutputs) rendererConfigs = [] for cfgFile in loudspeakerConfigFiles: rendererConfigs.append(panning.LoudspeakerArray(cfgFile)) diffFilters = np.array( pml.MatrixParameterFloat.fromAudioFile(diffusionFilterFile)) self.multiRenderer = MultiRenderer( context, name, self, loudspeakerConfigs=rendererConfigs, numberOfInputs=numberOfInputs, numberOfOutputs=numberOfOutputs, interpolationPeriod=interpolationPeriod, diffusionFilters=diffFilters, trackingConfiguration='', controlDataType=pml.Float) self.audioConnection(self.input, self.multiRenderer.audioPort("in")) self.audioConnection(self.multiRenderer.audioPort("out"), self.output) self.sceneReceiver = rcl.UdpReceiver( context, "SceneReceiver", self, port=udpReceivePort, mode=rcl.UdpReceiver.Mode.Asynchronous) self.sceneDecoder = rcl.SceneDecoder(context, "SceneDecoder", self) self.parameterConnection( self.sceneReceiver.parameterPort("messageOutput"), self.sceneDecoder.parameterPort("datagramInput")) self.parameterConnection( self.sceneDecoder.parameterPort("objectVectorOutput"), self.multiRenderer.parameterPort("objectIn")) self.controlReceiver = rcl.UdpReceiver( context, "ControlReceiver", self, port=controlReceivePort, mode=rcl.UdpReceiver.Mode.Asynchronous) self.controlDecoder = rcl.ScalarOscDecoder(context, "ControlDecoder", dataType='float') self.parameterConnection( self.controlReceiver.parameterPort("messageOutput"), self.controlDecoder.parameterPort("datagramInput")) self.parameterConnection(self.controlDecoder.parameterPort("dataOut"), self.multiRenderer.parameterPort("controlIn"))
def __init__( self, context, name, parent, *, numberOfObjects, sofaFile = None, hrirPositions = None, hrirData = None, hrirDelays = None, headOrientation = None, dynamicITD = False, dynamicILD = False, hrirInterpolation = False, filterCrossfading = False, fftImplementation = "default", headTrackingReceiver = None, headTrackingPositionalArguments = None, headTrackingKeywordArguments = None, sceneReceiveUdpPort = None ): """ Constructor. Parameters ---------- context : visr.SignalFlowContext Standard visr.Component construction argument, holds the block size and the sampling frequency name : string Name of the component, Standard visr.Component construction argument parent : visr.CompositeComponent Containing component if there is one, None if this is a top-level component of the signal flow. numberOfObjects: int Maximum number of audio objects sofaFile: str, optional Optional SOFA for loading loaded the HRIR and associated data (HRIR measurement positions and delays) If not provided, the information must be provided by the hrirPositions and hrirData arguments. hrirPositions: numpy.ndarray, optional Optional way to provide the measurement grid for the BRIR listener view directions. If a SOFA file is provided, this is optional and overrides the listener view data in the file. Otherwise this argument is mandatory. Dimension #grid directions x (dimension of position argument) hrirData: numpy.ndarray, optional Optional way to provide the BRIR data. Dimension: #grid directions x #ears (2) # x #loudspeakers x #ir length hrirDelays: numpy.ndarray, optional Optional BRIR delays. If a SOFA file is given, this argument overrides a potential delay setting from the file. Otherwise, no extra delays are applied unless this option is provided. Dimension: #grid directions x #ears(2) x # loudspeakers headOrientation: array-like, optional Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used), or the initial view direction dynamicITD: bool, optional Whether the ITD is applied separately. That requires preprocessed HRIR data dynamicILD: bool, optional Whether the ILD is computed and applied separately. At the moment this feature is not used (apart from applying the object gains) hrirInterpolation: bool, optional Whether the controller supports interpolation between neighbouring HRTF grid points. False means nearest neighbour (no interpolation), True enables barycentric interpolation. filterCrossfading: bool, optional Use a crossfading FIR filter matrix to avoid switching artifacts. fftImplementation: string, optional The FFT implementation to use. Default value enables VISR's default FFT library for the platform. headTrackingReceiver: class type, optional Class of the head tracking recveiver, None (default value) disables dynamic head tracking. headTrackingPositionalArguments: tuple optional Positional arguments passed to the constructor of the head tracking receiver object. Must be a tuple. If there is only a single argument, a trailing comma must be added. headTrackingKeywordArguments: dict, optional Keyword arguments passed to the constructor of the head tracking receiver. Must be a dictionary (dict) sceneReceiveUdpPort: int, optional A UDP port number where scene object metadata (in the S3A JSON format) is to be received). If not given (default), no network receiver is instantiated, and the object exposes a top-level parameter input port "objectVectorInput" """ super( RealtimeDynamicHrirRenderer, self ).__init__( context, name, parent ) self.objectSignalInput = visr.AudioInputFloat( "audioIn", self, numberOfObjects ) self.binauralOutput = visr.AudioOutputFloat( "audioOut", self, 2 ) enableTracking = (headTrackingReceiver is not None) # Handle loading of HRIR data from either a SOFA file or the the matrix arguments. if (hrirData is not None) == (sofaFile is not None): raise ValueError( "Exactly one of the arguments sofaFile and hrirData must be present." ) if sofaFile is not None: [ sofaHrirPositions, hrirData, sofaHrirDelays ] = readSofaFile( sofaFile ) # If hrirDelays is not provided as an argument, use the one retrieved from the SOFA file if hrirDelays is None: hrirDelays = sofaHrirDelays # Use the positions obtained from the SOFA file only if the argument is not set if hrirPositions is None: hrirPositions = sofaHrirPositions self.dynamicHrirRenderer = DynamicHrirRenderer( context, "DynamicBinauralRenderer", self, numberOfObjects = numberOfObjects, hrirPositions = hrirPositions, hrirData = hrirData, hrirDelays = hrirDelays, headOrientation = headOrientation, headTracking = enableTracking, dynamicITD = dynamicITD, dynamicILD = dynamicILD, hrirInterpolation = hrirInterpolation, filterCrossfading = filterCrossfading, fftImplementation = fftImplementation ) if sceneReceiveUdpPort is None: self.objectVectorInput = visr.ParameterInput( "objectVector", self, pml.ObjectVector.staticType, pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig() ) self.parameterConnection( self.objectVectorInput, self.dynamicHrirRenderer.parameterPort("objectVector")) else: self.sceneReceiver = rcl.UdpReceiver( context, "SceneReceiver", self, port = int(sceneReceiveUdpPort) ) self.sceneDecoder = rcl.SceneDecoder( context, "SceneDecoder", self ) self.parameterConnection( self.sceneReceiver.parameterPort("messageOutput"), self.sceneDecoder.parameterPort("datagramInput") ) self.parameterConnection( self.sceneDecoder.parameterPort( "objectVectorOutput"), self.dynamicHrirRenderer.parameterPort("objectVector")) if enableTracking: if headTrackingPositionalArguments == None: headTrackingPositionalArguments = () if headTrackingKeywordArguments == None: headTrackingKeywordArguments = {} self.trackingDevice = headTrackingReceiver(context, "HeadTrackingReceiver", self, *headTrackingPositionalArguments, **headTrackingKeywordArguments ) self.parameterConnection( self.trackingDevice.parameterPort("orientation"), self.dynamicHrirRenderer.parameterPort("tracking")) self.audioConnection( self.objectSignalInput, self.dynamicHrirRenderer.audioPort("audioIn")) self.audioConnection( self.dynamicHrirRenderer.audioPort("audioOut"), self.binauralOutput)
def __init__(self, context, name, parent, numberOfObjects, port, baud, sofaFile, enableSerial=True, dynamicITD=True, dynamicILD=True, hrirInterpolation=False, udpReceivePort=4242, headTrackingCalibrationPort=None, filterCrossfading=False): super(DynamicBinauralRendererSAW, self).__init__(context, name, parent) self.dynamicBinauralRenderer = DynamicBinauralRendererSerial( context, "DynamicBinauralRenderer", self, numberOfObjects, port, baud, sofaFile, enableSerial=enableSerial, dynITD=dynamicITD, dynILD=dynamicILD, hrirInterp=hrirInterpolation, headTrackingCalibrationPort=headTrackingCalibrationPort, filterCrossfading=filterCrossfading) self.sceneReceiver = rcl.UdpReceiver( context, "SceneReceiver", self, port=udpReceivePort, mode=rcl.UdpReceiver.Mode.Asynchronous) self.sceneDecoder = rcl.SceneDecoder(context, "SceneDecoder", self) self.parameterConnection( self.sceneReceiver.parameterPort("messageOutput"), self.sceneDecoder.parameterPort("datagramInput")) self.parameterConnection( self.sceneDecoder.parameterPort("objectVectorOutput"), self.dynamicBinauralRenderer.parameterPort("objectVector")) self.objectSignalInput = visr.AudioInputFloat("audioIn", self, numberOfObjects) self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2) self.audioConnection(self.objectSignalInput, self.dynamicBinauralRenderer.audioPort("audioIn")) self.audioConnection( self.dynamicBinauralRenderer.audioPort("audioOut"), self.binauralOutput) if headTrackingCalibrationPort is not None: self.calibrationTriggerReceiver = rcl.UdpReceiver( context, "CalibrationTriggerReceiver", self, port=headTrackingCalibrationPort) self.parameterConnection( self.calibrationTriggerReceiver.parameterPort("messageOutput"), self.dynamicBinauralRenderer.parameterPort( "headTrackingCalibration"))