Exemplo n.º 1
0
    def __init__(
        self,
        context,
        name,
        parent,  # Standard visr component constructor arguments
        numberOfObjects,  # The number of point source objects rendered.
        hoaOrder,  # The Ambisonics order for encoding the objects
        channelAllocation=False  # Whether to allocate object channels dynamically (not used at the moment)
    ):
        """
        Constructor.

        Parameters
        ----------
        numberOfObjects: int
            The maximum number of audio objects to be rendered.
        hoaOrder: int
            The Ambisonics order for encoding the objects.
        channelAllocation: bool, optional
            Whether to send dynamic channel allocation data. Not used at the moment.
            Default value means that the object channels are allocated statically and correspond to the
            obbject's channel id.
        """
        # Call base class (AtomicComponent) constructor
        super(HoaObjectEncoder, self).__init__(context, name, parent)
        self.numberOfObjects = numberOfObjects
        self.hoaOrder = hoaOrder
        self.numHoaCoeffs = (self.hoaOrder + 1)**2

        # %% Define parameter ports
        self.objectInput = visr.ParameterInput(
            "objectVector", self, pml.ObjectVector.staticType,
            pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig())
        self.objectInputProtocol = self.objectInput.protocolInput()

        matrixConfig = pml.MatrixParameterConfig(self.numHoaCoeffs,
                                                 self.numberOfObjects)
        self.coefficientOutput = visr.ParameterOutput(
            "coefficientOutput", self, pml.MatrixParameterFloat.staticType,
            pml.SharedDataProtocol.staticType, matrixConfig)
        self.coefficientOutputProtocol = self.coefficientOutput.protocolOutput(
        )

        if channelAllocation:
            self.channelAllocator = rbbl.ObjectChannelAllocator(
                self.numberOfObjects)
            self.usedChannels = set()  # Initialised with an empty set.
            self.routingOutput = visr.ParameterOutput(
                "routingOutput", self, pml.SignalRoutingParameter.staticType,
                pml.DoubleBufferingProtocol.staticType,
                pml.EmptyParameterConfig())
            self.routingOutputProtocol = self.routingOutput.protocolOutput()
        else:
            self.routingOutputProtocol = None
            self.channelAllocator = None
    def __init__(
            self,
            context,
            name,
            parent,
            numberOfChannels,
            numberOfInputs,
            initialInput=0,
            controlDataType=pml.UnsignedInteger  # Data type
    ):
        super(AudioSignalSwitch, self).__init__(context, name, parent)
        self.inputs = []
        for inIdx in range(0, numberOfInputs):
            portName = "in_%d" % inIdx
            self.inputs.append(
                visr.AudioInputFloat(portName, self, numberOfChannels))

        self.output = visr.AudioOutputFloat("out", self, numberOfChannels)
        self.controlInput = visr.ParameterInput(
            "controlIn",
            self,
            protocolType=pml.MessageQueueProtocol.staticType,
            parameterType=controlDataType.staticType,
            parameterConfig=pml.EmptyParameterConfig())
        self.activeInput = initialInput
    def __init__(self, context, name, parent, *, calibrationPort, **razorArgs):
        """
        Constructor.

        context : visr.SignalFlowContext
            Standard visr.Component construction argument, a structure holding the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        calibrationPort: int
            A UDP port number. Packets sent to this port trigger the calibration.
        razorArg: keyword list
            Set of parameters to the RazorAHRS. See this class for parameter documentation.
        """
        super(RazorAHRSWithUdpCalibrationTrigger,
              self).__init__(context, name, parent)
        self.trackingOutput = visr.ParameterOutput(
            "orientation", self, pml.ListenerPosition.staticType,
            pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig())

        razorArgs[
            'calibrationInput'] = True  # Reset the keyword argument (if already present)
        self.tracker = RazorAHRS(context, "Tracker", self, **razorArgs)
        self.triggerReceiver = UdpReceiver(context,
                                           "CalibrationTriggerReceiver",
                                           self,
                                           port=calibrationPort)
        self.parameterConnection(
            self.triggerReceiver.parameterPort("messageOutput"),
            self.tracker.parameterPort("calibration"))
        self.parameterConnection(self.tracker.parameterPort("orientation"),
                                 self.trackingOutput)
Exemplo n.º 4
0
    def __init__(self,
                 context,
                 name,
                 parent,
                 numberOfChannels,
                 measurePeriod=0.4,
                 channelWeights=None,
                 audioOut=False):
        # Call the base class constructor
        super(LoudnessMeter, self).__init__(context, name, parent)
        # Define an audio input port with name "audioIn" and width (number of signal waveforms) numberOfChannels
        self.audioInput = visr.AudioInputFloat("audioIn", self,
                                               numberOfChannels)

        # If the option is set, add an audio output to put out the K-weigth input signals
        # Some audio interfaces don't like configs with no outputs.
        if audioOut:
            self.audioOutput = visr.AudioOutputFloat("audioOut", self,
                                                     numberOfChannels)
        else:
            self.audioOutput = None

        # Define a parameter output port with type "Float" and communication protocol "MessageQueue"
        # MessageQueue means that all computed data are hold in a first-in-first-out queue,
        # which decouples the parameter update rate from the buffer size.
        self.loudnessOut = visr.ParameterOutput(
            "loudnessOut", self, pml.Float.staticType,
            pml.MessageQueueProtocol.staticType, pml.EmptyParameterConfig())

        # %% Setup data used in the process() function.

        # Round the measurement period to the next multiple of the buffer period
        numMeanBlocks = int(
            np.ceil(
                (measurePeriod * context.samplingFrequency) / context.period))
        self.pastPower = np.zeros(numMeanBlocks, dtype=np.float32)

        # IIR filter state to be saved in betweem
        self.filterState = np.zeros((2, numberOfChannels, 2), dtype=np.float32)

        # IIR coefficients for K-weighting, taken from ITU-R BS.1770-4
        # https://www.itu.int/dms_pubrec/itu-r/rec/bs/R-REC-BS.1770-4-201510-I!!PDF-E.pdf
        self.Kweighting = np.asarray([[
            1.53512485958697, -2.69169618940638, 1.19839281085285, 1.0,
            -1.69065929318241, 0.73248077421585
        ], [1.0, -2.0, 1.0, 1.0, -1.99004745483398, 0.99007225036621]],
                                     dtype=np.float32)

        # Initialise weightings for the channels.
        # Use unit weighting if none are given
        if channelWeights is not None:
            self.channelWeights = np.asarray(channelWeights, dtype=np.float32)
            if self.channelWeights.shape[0] != numberOfChannels:
                raise ValueError(
                    "The channelWeights argument does not match the number of channels"
                )
        else:
            self.channelWeights = np.ones(numberOfChannels, dtype=np.float32)
Exemplo n.º 5
0
 def __init__(self,
              context,
              name,
              parent,
              loudspeakerConfig,
              numberOfInputs,
              rendererOutputs,
              interpolationPeriod,
              diffusionFilters,
              trackingConfiguration,
              brirRouting,
              brirFilters,
              scenePort=4242,
              reverbConfiguration=''):
     super(ReverbToBinaural, self).__init__(context, name, parent)
     self.coreRenderer = signalflows.BaselineRenderer(
         ctxt,
         'renderer',
         self,
         loudspeakerConfig=loudspeakerConfig,
         numberOfInputs=numberOfInputs,
         numberOfOutputs=rendererOutputs,
         interpolationPeriod=interpolationPeriod,
         diffusionFilters=diffusionFilters,
         reverbConfig=reverbConfiguration,
         sceneReceiverPort=scenePort,
         trackingConfiguration=trackingConfiguration)
     numFilters = brirFilters.numberOfRows
     firLength = brirFilters.numberOfColumns
     numRoutings = brirRouting.size
     self.convolver = rcl.FirFilterMatrix(
         ctxt,
         'convolver',
         self,
         numberOfInputs=rendererOutputs,
         numberOfOutputs=2,
         maxFilters=numFilters,
         filterLength=firLength,
         maxRoutings=numRoutings,
         filters=brirFilters,
         routings=brirRouting,
         controlInputs=rcl.FirFilterMatrix.ControlPortConfig.NoInputs)
     self.audioIn = visr.AudioInputFloat("audioIn", self, numberOfInputs)
     self.audioOut = visr.AudioOutputFloat("audioOut", self, 2)
     self.audioConnection(self.audioIn,
                          self.coreRenderer.audioPort("input"))
     self.audioConnection(self.coreRenderer.audioPort("output"),
                          self.convolver.audioPort("in"))
     self.audioConnection(self.convolver.audioPort("out"), self.audioOut)
     if len(trackingConfiguration) > 0:
         self.posIn = visr.ParameterInput(
             "posIn", self, pml.ListenerPosition.staticType,
             pml.DoubleBufferingProtocol.staticType,
             pml.EmptyParameterConfig())
         self.parameterConnection(
             self.posIn,
             self.coreRenderer.parameterPort("trackingPositionInput"))
Exemplo n.º 6
0
    def __init__( self,
                  context, name, parent,
                  numberOfObjects,
                  hoaOrder,
                  dynamicUpdates = False,
                  headOrientation = None
                  ):
        """
        Constructor.

        Parameters
        ----------
        context: visr.SignalFlowContext
            Structure containing block size and sampling frequency, standard visr component construction parameter.
        name: string
            Name of the component, can be chosen freely as long as it is unique withion the containing component.
        parent: visr.CompositeComponent or NoneType
            The containing composite component, or None for a top-level component.
        numberOfObjects: int
            The number of objects to be rendered, i.e., columns in the received spherical harmonics matrices.
        hoaOrder: int
            The order of the spherical harmonics. Defines the number of rows of the processed matrices ((hoaOrder+1)^2)
        headOrientation: array-like (2- or 3- element) or NoneType
            The initial head rotation or the static head orientation if dynamic updates are deactivated. Given as yaw, pitch, roll.
        """
        # Call base class (AtomicComponent) constructor
        super( HoaCoefficientRotation, self ).__init__( context, name, parent )
        self.numberOfObjects = numberOfObjects
        self.hoaOrder = hoaOrder
        self.numHoaCoeffs = (self.hoaOrder+1)**2

        # %% Define parameter ports
        matrixConfig = pml.MatrixParameterConfig( self.numHoaCoeffs, self.numberOfObjects )
        self.coefficientInput = visr.ParameterInput( "coefficientInput", self,
                                                pml.MatrixParameterFloat.staticType,
                                                pml.SharedDataProtocol.staticType,
                                                matrixConfig )
        self.coefficientInputProtocol = self.coefficientInput.protocolInput()
        self.coefficientOutput = visr.ParameterOutput( "coefficientOutput", self,
                                                pml.MatrixParameterFloat.staticType,
                                                pml.SharedDataProtocol.staticType,
                                                matrixConfig )
        self.coefficientOutputProtocol = self.coefficientOutput.protocolOutput()

        # Instantiate the head tracker input.
        self.trackingInput = visr.ParameterInput( "tracking", self, pml.ListenerPosition.staticType,
                                                  pml.DoubleBufferingProtocol.staticType,
                                                  pml.EmptyParameterConfig() )
        self.trackingInputProtocol = self.trackingInput.protocolInput()

        if headOrientation is None:
            headOrientation = np.zeros( (3), np.float32 )
        R1 = rotationMatrixReorderingACN( calcRotationMatrix( headOrientation ) )
        self.rotationMatrices = allSphericalHarmonicsRotationMatrices( self.hoaOrder, R1 )
Exemplo n.º 7
0
    def __init__(self, context, name, parent, numberOfObjects, lspConfig):
        """
        Constructor, instantiates the component, all contained sub-components,
        and their connections.

        Parameters
        ----------

        self: VbapRenderer
            self argument, mandatory for Python methods.
        context: visr.SignalFlowContext
            A context object containing the sampling frequency and the block size.
            That is a mandatory parameter for VISR components.
        name: string
            Name of the component to be identified within a containing component.
        parent: visr.Compositcomponent
            A containing component, or None if this is the top-level component.
        numberOfObjects: int
            The maximum number of objects to be rendered.
        lspConfig: panning.LoudspeakerArray
            Object containing the loudspeaker positions.
        """
        numLsp = lspConfig.numberOfRegularLoudspeakers
        super().__init__(context, name, parent)
        self.audioIn = visr.AudioInputFloat("in", self, numberOfObjects)
        self.audioOut = visr.AudioOutputFloat("out", self, numLsp)
        self.objectIn = visr.ParameterInput(
            "objects", self, pml.ObjectVector.staticType,
            pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig())
        self.calculator = rcl.PanningCalculator(context, "VbapGainCalculator",
                                                self, numberOfObjects,
                                                lspConfig)
        self.matrix = rcl.GainMatrix(context,
                                     "GainMatrix",
                                     self,
                                     numberOfObjects,
                                     numLsp,
                                     interpolationSteps=context.period,
                                     initialGains=0.0)
        # Uncomment this and comment the lines above to use the simple, Python-based
        # GainMatrix class instead.
        #        self.matrix = GainMatrix( context, "GainMatrix", self, numberOfObjects,
        #                                  numLsp )
        self.audioConnection(self.audioIn, self.matrix.audioPort("in"))
        self.audioConnection(self.matrix.audioPort("out"), self.audioOut)
        self.parameterConnection(
            self.objectIn, self.calculator.parameterPort("objectVectorInput"))
        self.parameterConnection(self.calculator.parameterPort("vbapGains"),
                                 self.matrix.parameterPort("gainInput"))
    def __init__(
            self,
            context,
            name,
            parent,
            positions,  # Either #points x 3 or 1x3 matrix of Cartesian object positions.
            updateRateSamples=None,
            objectId=0,
            groupId=0,
            priority=0,
            objectLevel=1.0,
            objectChannel=None,
            diffuseness=None):
        super(PointSourceTrajectoryGenerator,
              self).__init__(context, name, parent)
        if updateRateSamples % self.period() != 0:
            raise ValueError(
                "TrajectoryGenerator: The update rate must be a multiple of the period."
            )
        self.updateCycles = updateRateSamples // self.period()

        self.positions = positions
        self.numPositions = self.positions.shape[1]

        self.objectOutput = visr.ParameterOutput(
            "objectVectorOutput",
            self,
            parameterType=pml.ObjectVector.staticType,
            protocolType=pml.DoubleBufferingProtocol.staticType,
            parameterConfig=pml.EmptyParameterConfig())
        self.cycleCounter = 0
        self.positionCounter = 0

        if diffuseness is None:
            self.object = om.PointSource(objectId)
        else:
            ValueError("Diffuse point sources not currently supported.")

        self.object.position = positions[:, 0]
        self.object.level = objectLevel
        self.groupId = groupId
        self.object.priority = priority
        if objectChannel is None:
            objectChannel = objectId
        self.object.channels = [objectChannel]
Exemplo n.º 9
0
 def __init__( self, context, name, parent,
              processorConfig,
              objectVectorInput = True,  # receive input as pml.ObjectVector parameters (as opposed to JSON)
              objectVectorOutput = True, # send output as pml.ObjectVector parameters (as opposed to JSON)
              oscControlPort = False,
              jsonControlPort = False,
              alwaysProcess = True,
              verbose = False):
     """ Construction function. """
     super( Component, self ).__init__( context, name, parent )
     if objectVectorInput:
         self.textInput = False
         self.objectInput = visr.ParameterInput( "objectIn", self,
                                                pml.ObjectVector.staticType,
                                                pml.DoubleBufferingProtocol.staticType,
                                                pml.EmptyParameterConfig() )
     else:
         self.textInput = True
         self.objectInput = visr.ParameterInput( "objectIn", self,
                                                pml.StringParameter.staticType,
                                                pml.MessageQueueProtocol.staticType,
                                                pml.EmptyParameterConfig() )
     if objectVectorOutput:
         self.textOutput = False
         self.objectOutput = visr.ParameterOutput( "objectOut", self,
                                                pml.ObjectVector.staticType,
                                                pml.DoubleBufferingProtocol.staticType,
                                                pml.EmptyParameterConfig() )
     else:
         self.textOutput = True
         self.objectOutput = visr.ParameterOutput( "objectOut", self,
                                                pml.StringParameter.staticType,
                                                pml.MessageQueueProtocol.staticType,
                                                pml.EmptyParameterConfig() )
     if oscControlPort:
         self.oscControlInput = visr.ParameterInput( "oscControlIn", self,
                                                pml.StringParameter.staticType,
                                                pml.MessageQueueProtocol.staticType,
                                                pml.EmptyParameterConfig() )
     else:
         self.oscControlInput = None
     if jsonControlPort:
         self.jsonControlInput = visr.ParameterInput( "jsonControlIn", self,
                                                pml.StringParameter.staticType,
                                                pml.MessageQueueProtocol.staticType,
                                                pml.EmptyParameterConfig() )
     else:
         self.jsonControlInput = None
     self._engine = Engine( processorConfig, alwaysProcess, verbose )
Exemplo n.º 10
0
    def __init__(self, context, name, parent, numberOfInputs, numberOfOutputs,
                 arrayConfig, interpolationSteps):
        super(StandardVbap, self).__init__(context, name, parent)
        self.input = visr.AudioInputFloat("in", self, numberOfInputs)
        self.output = visr.AudioOutputFloat("out", self, numberOfOutputs)

        self.objectInput = visr.ParameterInput(
            "objectVectorInput",
            self,
            parameterType=pml.ObjectVector.staticType,
            protocolType=pml.DoubleBufferingProtocol.staticType,
            parameterConfig=pml.EmptyParameterConfig())
        self.panningCalculator = rcl.PanningCalculator(
            context,
            "GainCalculator",
            self,
            arrayConfig=arrayConfig,
            numberOfObjects=numberOfInputs,
            separateLowpassPanning=False)

        self.panningMatrix = rcl.GainMatrix(
            context,
            "PanningMatrix",
            self,
            numberOfInputs=numberOfInputs,
            numberOfOutputs=numberOfOutputs,
            interpolationSteps=interpolationSteps,
            initialGains=0.0,
            controlInput=True)

        self.audioConnection(self.input, self.panningMatrix.audioPort("in"))
        self.audioConnection(self.panningMatrix.audioPort("out"), self.output)

        self.parameterConnection(
            self.objectInput,
            self.panningCalculator.parameterPort("objectVectorInput"))
        self.parameterConnection(
            self.panningCalculator.parameterPort("gainOutput"),
            self.panningMatrix.parameterPort("gainInput"))
Exemplo n.º 11
0
 def __init__(self, context, name, parent, numberOfObjects, lspArray):
     numLsp = lspArray.numberOfRegularLoudspeakers
     super().__init__(context, name, parent)
     self.audioIn = visr.AudioInputFloat("in", self, numberOfObjects)
     self.audioOut = visr.AudioOutputFloat("out", self, numLsp)
     self.objectIn = visr.ParameterInput(
         "objects", self, pml.ObjectVector.staticType,
         pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig())
     self.calculator = VbapL2Panner(context, "VbapGainCalculator", self,
                                    numberOfObjects, lspArray)
     self.matrix = rcl.GainMatrix(context,
                                  "GainMatrix",
                                  self,
                                  numberOfObjects,
                                  numLsp,
                                  interpolationSteps=context.period,
                                  initialGains=0.0)
     self.audioConnection(self.audioIn, self.matrix.audioPort("in"))
     self.audioConnection(self.matrix.audioPort("out"), self.audioOut)
     self.parameterConnection(self.objectIn,
                              self.calculator.parameterPort("objects"))
     self.parameterConnection(self.calculator.parameterPort("gains"),
                              self.matrix.parameterPort("gainInput"))
Exemplo n.º 12
0
    def __init__(self,
                 context,
                 name,
                 parent,
                 arrayConfiguration,
                 numberOfListeners=2,
                 firLength=1024):
        super(TransauralCoefficientCalculator,
              self).__init__(context, name, parent)  # Call base constructor

        totalNumberOfFilters = numberOfListeners * 2  # Correct ???

        # Create a parameter input for each listening
        listPosParamType = pml.ListenerPosition.staticType
        doubleBufferProtocolType = pml.DoubleBufferingProtocol.staticType
        emptyCfg = pml.EmptyParameterConfig(
        )  # Need to provide an 'empty' parameter config.
        self.listenerInputs = []
        for listenerIdx in range(1, numberOfListeners):
            portName = "listener%d" % listenerIdx
            self.listenerInputs.append(
                visr.ParameterInput(portName, self, listPosParamType,
                                    doubleBufferProtocolType, emptyCfg))
Exemplo n.º 13
0
 def __init__(
     self,
     context,
     name,
     parent,
     arrayConfig,
     numberOfObjects=1,
 ):
     super(PythonPanner, self).__init__(context, name, parent)
     self.numLsp = arrayConfig.numberOfRegularLoudspeakers
     self.objectInput = visr.ParameterInput(
         "objectVectorInput",
         self,
         parameterType=pml.ObjectVector.staticType,
         protocolType=pml.DoubleBufferingProtocol.staticType,
         parameterConfig=pml.EmptyParameterConfig())
     self.gainOutput = visr.ParameterOutput(
         "gainOutput",
         self,
         parameterType=pml.MatrixParameterFloat.staticType,
         protocolType=pml.SharedDataProtocol.staticType,
         parameterConfig=pml.MatrixParameterConfig(self.numLsp,
                                                   numberOfObjects))
     self.vbap = panning.VBAP(arrayConfig)
Exemplo n.º 14
0
    def __init__(self,
                 context,
                 name,
                 parent,
                 loudspeakerConfig,
                 numberOfInputs,
                 numberOfOutputs,
                 interpolationSteps=None,
                 controlDataType=pml.Float):
        super(PanningComparison, self).__init__(context, name, parent)

        if interpolationSteps is None:
            interpolationSteps = context.period

        numberOfRenderers = 2

        self.input = visr.AudioInputFloat("in", self, numberOfInputs)
        self.output = visr.AudioOutputFloat("out", self, numberOfOutputs)
        self.objectInput = visr.ParameterInput(
            "objectIn",
            self,
            protocolType=pml.DoubleBufferingProtocol.staticType,
            parameterType=pml.ObjectVector.staticType,
            parameterConfig=pml.EmptyParameterConfig())
        self.controlInput = visr.ParameterInput(
            "controlIn",
            self,
            protocolType=pml.MessageQueueProtocol.staticType,
            parameterType=controlDataType.staticType,
            parameterConfig=pml.EmptyParameterConfig())

        self.signalSwitch = AudioSignalSwitch(context,
                                              "OutputSwitch",
                                              self,
                                              numberOfChannels=numberOfOutputs,
                                              numberOfInputs=numberOfRenderers,
                                              controlDataType=controlDataType)
        self.parameterConnection(self.controlInput,
                                 self.signalSwitch.parameterPort('controlIn'))
        self.audioConnection(self.signalSwitch.audioPort('out'), self.output)

        self.standardRenderer = StandardVbap(
            context,
            "StandardVbap",
            self,
            arrayConfig=loudspeakerConfig,
            numberOfInputs=numberOfInputs,
            numberOfOutputs=numberOfOutputs,
            interpolationSteps=interpolationSteps)

        self.lfHfRenderer = LfHfVbap(context,
                                     "LfHfVbap",
                                     self,
                                     arrayConfig=loudspeakerConfig,
                                     numberOfInputs=numberOfInputs,
                                     numberOfOutputs=numberOfOutputs,
                                     interpolationSteps=interpolationSteps)
        # Use default filters for the moment
        self.audioConnection(self.input, self.standardRenderer.audioPort('in'))
        self.audioConnection(self.standardRenderer.audioPort('out'),
                             self.signalSwitch.audioPort('in_0'))
        self.parameterConnection(
            self.objectInput,
            self.standardRenderer.parameterPort('objectVectorInput'))

        self.audioConnection(self.input, self.lfHfRenderer.audioPort('in'))
        self.audioConnection(self.lfHfRenderer.audioPort('out'),
                             self.signalSwitch.audioPort('in_1'))
        self.parameterConnection(
            self.objectInput,
            self.lfHfRenderer.parameterPort('objectVectorInput'))
Exemplo n.º 15
0
    def __init__(
            self,
            context,
            name,
            parent,
            *,  # Only keyword arguments after this point
            numberOfObjects,
            maxHoaOrder,
            sofaFile=None,
            decodingFilters=None,
            interpolationSteps=None,
            headTracking=True,
            headOrientation=None,
            objectChannelAllocation=False,
            fftImplementation="default",
            headTrackingReceiver=None,
            headTrackingPositionalArguments=None,
            headTrackingKeywordArguments=None,
            sceneReceiveUdpPort=None):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, holds the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        numberOfObjects : int
            The number of audio objects to be rendered.
        maxHoaOrder: int
            HOA order used for encoding the point source and plane wave objects.
        sofaFile: string, optional
            A SOFA file containing the HOA decoding filters. These are expects as a
            2 x (maxHoaIrder+1)^2 array in the field Data.IR
        decodingFilters : numpy.ndarray, optional
            Alternative way to provide the HOA decoding filters. Expects a
            2 x (maxHoaIrder+1)^2 matrix containing FIR coefficients.
        interpolationSteps: int, optional
           Number of samples to transition to new object positions after an update.
        headOrientation : array-like
            Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            or the initial view direction
        headTracking: bool
            Whether dynamic head tracking is active.
        objectChannelAllocation: bool
            Whether the processing resources are allocated from a pool of resources
            (True), or whether fixed processing resources statically tied to the audio signal channels are used.
            Not implemented at the moment, so leave the default value (False).
        fftImplementation: string, optional
            The FFT implementation to use. Default value enables VISR's default
            FFT library for the platform.
        headTrackingReceiver: class type, optional
            Class of the head tracking recveiver, None (default value) disables dynamic head tracking.
        headTrackingPositionalArguments: tuple optional
            Positional arguments passed to the constructor of the head tracking receiver object.
            Must be a tuple. If there is only a single argument, a trailing comma must be added.
        headTrackingKeywordArguments: dict, optional
            Keyword arguments passed to the constructor of the head tracking receiver. Must be a dictionary (dict)
        sceneReceiveUdpPort: int, optional
            A UDP port number where scene object metadata (in the S3A JSON format) is to be received).
            If not given (default), no network receiver is instantiated, and the object exposes a
            top-level parameter input port "objectVectorInput"
        """
        super(RealtimeHoaObjectToBinauralRenderer,
              self).__init__(context, name, parent)
        self.objectSignalInput = visr.AudioInputFloat("audioIn", self,
                                                      numberOfObjects)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)

        enableTracking = (headTrackingReceiver is not None)

        self.hoaBinauralRenderer = HoaObjectToBinauralRenderer(
            context,
            "HoaBinauralRenderer",
            self,
            numberOfObjects=numberOfObjects,
            maxHoaOrder=maxHoaOrder,
            sofaFile=sofaFile,
            decodingFilters=decodingFilters,
            interpolationSteps=interpolationSteps,
            headTracking=headTracking,
            headOrientation=headOrientation,
            fftImplementation=fftImplementation)

        if sceneReceiveUdpPort is None:
            self.objectVectorInput = visr.ParameterInput(
                "objectVector", self, pml.ObjectVector.staticType,
                pml.DoubleBufferingProtocol.staticType,
                pml.EmptyParameterConfig())
            self.parameterConnection(
                self.objectVectorInput,
                self.hoaBinauralRenderer.parameterPort("objects"))
        else:
            self.sceneReceiver = rcl.UdpReceiver(context,
                                                 "SceneReceiver",
                                                 self,
                                                 port=int(sceneReceiveUdpPort))
            self.sceneDecoder = rcl.SceneDecoder(context, "SceneDecoder", self)
            self.parameterConnection(
                self.sceneReceiver.parameterPort("messageOutput"),
                self.sceneDecoder.parameterPort("datagramInput"))
            self.parameterConnection(
                self.sceneDecoder.parameterPort("objectVectorOutput"),
                self.hoaBinauralRenderer.parameterPort("objects"))

        if enableTracking:
            if headTrackingPositionalArguments == None:
                headTrackingPositionalArguments = ()
            if headTrackingKeywordArguments == None:
                headTrackingKeywordArguments = {}
            self.trackingDevice = headTrackingReceiver(
                context, "HeadTrackingReceiver", self,
                *headTrackingPositionalArguments,
                **headTrackingKeywordArguments)
            self.parameterConnection(
                self.trackingDevice.parameterPort("orientation"),
                self.hoaBinauralRenderer.parameterPort("tracking"))

        self.audioConnection(self.objectSignalInput,
                             self.hoaBinauralRenderer.audioPort("audioIn"))
        self.audioConnection(self.hoaBinauralRenderer.audioPort("audioOut"),
                             self.binauralOutput)
    def __init__( self,
                 context, name, parent,
                 *,
                 numberOfObjects,
                 sofaFile = None,
                 hrirPositions = None,
                 hrirData = None,
                 hrirDelays = None,
                 headOrientation = None,
                 dynamicITD = False,
                 dynamicILD = False,
                 hrirInterpolation = False,
                 filterCrossfading = False,
                 fftImplementation = "default",
                 headTrackingReceiver = None,
                 headTrackingPositionalArguments = None,
                 headTrackingKeywordArguments = None,
                 sceneReceiveUdpPort = None
                 ):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, holds the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        numberOfObjects: int
            Maximum number of audio objects
        sofaFile: str, optional
            Optional SOFA for loading loaded the HRIR and associated data (HRIR measurement positions and delays)
            If not provided, the information must be provided by the hrirPositions and hrirData arguments.
        hrirPositions: numpy.ndarray, optional
            Optional way to provide the measurement grid for the BRIR listener view directions.
            If a SOFA file is provided, this is optional and overrides the listener view data
            in the file. Otherwise this argument is mandatory.
            Dimension #grid directions x (dimension of position argument)
        hrirData: numpy.ndarray, optional
            Optional way to provide the BRIR data.
            Dimension: #grid directions  x #ears (2) # x #loudspeakers x #ir length
        hrirDelays: numpy.ndarray, optional
            Optional BRIR delays. If a SOFA file is given, this  argument overrides
            a potential delay setting from the file. Otherwise, no extra delays
            are applied unless this option is provided.
            Dimension: #grid directions  x #ears(2) x # loudspeakers
        headOrientation: array-like, optional
            Head orientation in spherical coordinates (2- or 3-element vector or list).
            Either a static orientation (when no tracking is used), or the
            initial view direction
        dynamicITD: bool, optional
            Whether the ITD is applied separately. That requires preprocessed HRIR data
        dynamicILD: bool, optional
            Whether the ILD is computed and applied separately. At the moment this feature is not used (apart from applying the object gains)
        hrirInterpolation: bool, optional
            Whether the controller supports interpolation between neighbouring HRTF grid
            points. False means nearest neighbour (no interpolation), True
            enables barycentric interpolation.
        filterCrossfading: bool, optional
            Use a crossfading FIR filter matrix to avoid switching artifacts.
        fftImplementation: string, optional
            The FFT implementation to use. Default value enables VISR's default
            FFT library for the platform.
        headTrackingReceiver: class type, optional
            Class of the head tracking recveiver, None (default value) disables dynamic head tracking.
        headTrackingPositionalArguments: tuple optional
            Positional arguments passed to the constructor of the head tracking receiver object.
            Must be a tuple. If there is only a single argument, a trailing comma must be added.
        headTrackingKeywordArguments: dict, optional
            Keyword arguments passed to the constructor of the head tracking receiver. Must be a dictionary (dict)
        sceneReceiveUdpPort: int, optional
            A UDP port number where scene object metadata (in the S3A JSON format) is to be received).
            If not given (default), no network receiver is instantiated, and the object exposes a
            top-level parameter input port "objectVectorInput"
        """
        super( RealtimeDynamicHrirRenderer, self ).__init__( context, name, parent )
        self.objectSignalInput = visr.AudioInputFloat( "audioIn", self, numberOfObjects )
        self.binauralOutput = visr.AudioOutputFloat( "audioOut", self, 2 )

        enableTracking = (headTrackingReceiver is not None)

        # Handle loading of HRIR data from either a SOFA file or the the matrix arguments.
        if (hrirData is not None) == (sofaFile is not None):
            raise ValueError( "Exactly one of the arguments sofaFile and hrirData must be present." )
        if sofaFile is not None:
            [ sofaHrirPositions, hrirData, sofaHrirDelays ] = readSofaFile( sofaFile )
            # If hrirDelays is not provided as an argument, use the one retrieved from the SOFA file
            if hrirDelays is None:
                hrirDelays = sofaHrirDelays
            # Use the positions obtained from the SOFA file only if the argument is not set
            if hrirPositions is None:
                hrirPositions = sofaHrirPositions

        self.dynamicHrirRenderer = DynamicHrirRenderer( context, "DynamicBinauralRenderer", self,
                                                       numberOfObjects = numberOfObjects,
                                                       hrirPositions = hrirPositions,
                                                       hrirData = hrirData,
                                                       hrirDelays = hrirDelays,
                                                       headOrientation = headOrientation,
                                                       headTracking = enableTracking,
                                                       dynamicITD = dynamicITD,
                                                       dynamicILD = dynamicILD,
                                                       hrirInterpolation = hrirInterpolation,
                                                       filterCrossfading = filterCrossfading,
                                                       fftImplementation = fftImplementation
                                                       )

        if sceneReceiveUdpPort is None:
            self.objectVectorInput = visr.ParameterInput( "objectVector", self, pml.ObjectVector.staticType,
                                                         pml.DoubleBufferingProtocol.staticType,
                                                         pml.EmptyParameterConfig() )
            self.parameterConnection( self.objectVectorInput,
                                     self.dynamicHrirRenderer.parameterPort("objectVector"))

        else:
            self.sceneReceiver = rcl.UdpReceiver( context, "SceneReceiver", self,
                                                 port = int(sceneReceiveUdpPort) )
            self.sceneDecoder = rcl.SceneDecoder( context, "SceneDecoder", self )
            self.parameterConnection( self.sceneReceiver.parameterPort("messageOutput"),
                                 self.sceneDecoder.parameterPort("datagramInput") )
            self.parameterConnection( self.sceneDecoder.parameterPort( "objectVectorOutput"),
                                 self.dynamicHrirRenderer.parameterPort("objectVector"))
        if enableTracking:
            if headTrackingPositionalArguments == None:
                headTrackingPositionalArguments = ()
            if headTrackingKeywordArguments == None:
                headTrackingKeywordArguments = {}
            self.trackingDevice = headTrackingReceiver(context, "HeadTrackingReceiver", self,
                                                *headTrackingPositionalArguments,
                                                **headTrackingKeywordArguments )
            self.parameterConnection( self.trackingDevice.parameterPort("orientation"), self.dynamicHrirRenderer.parameterPort("tracking"))

        self.audioConnection(  self.objectSignalInput, self.dynamicHrirRenderer.audioPort("audioIn"))
        self.audioConnection( self.dynamicHrirRenderer.audioPort("audioOut"), self.binauralOutput)
Exemplo n.º 17
0
    def __init__(self,
                 context,
                 name,
                 parent,
                 numberOfInputs,
                 numberOfOutputs,
                 arrayConfig,
                 interpolationSteps,
                 lfFilter=rbbl.BiquadCoefficientFloat(0.001921697757295,
                                                      0.003843395514590,
                                                      0.001921697757295,
                                                      -1.824651307057289,
                                                      0.832338098086468),
                 hfFilter=rbbl.BiquadCoefficientFloat(0.914247351285939,
                                                      1.828494702571878,
                                                      -0.914247351285939,
                                                      -1.824651307057289,
                                                      0.832338098086468)):
        super(LfHfVbap, self).__init__(context, name, parent)
        self.input = visr.AudioInputFloat("in", self, numberOfInputs)
        self.output = visr.AudioOutputFloat("out", self, numberOfOutputs)

        self.objectInput = visr.ParameterInput(
            "objectVectorInput",
            self,
            parameterType=pml.ObjectVector.staticType,
            protocolType=pml.DoubleBufferingProtocol.staticType,
            parameterConfig=pml.EmptyParameterConfig())

        self.panningCalculator = rcl.PanningCalculator(
            context,
            "GainCalculator",
            self,
            arrayConfig=arrayConfig,
            numberOfObjects=numberOfInputs,
            separateLowpassPanning=True)

        filterMtx = rbbl.BiquadCoefficientMatrixFloat(2 * numberOfInputs, 1)
        for idx in range(0, numberOfInputs):
            filterMtx[idx, 0] = lfFilter
            filterMtx[idx + numberOfInputs, 0] = hfFilter

        self.filterBank = rcl.BiquadIirFilter(
            context,
            "filterBank",
            self,
            numberOfChannels=2 * numberOfInputs,
            numberOfBiquads=1,  # TODO: allow variable number of sections.
            initialBiquads=filterMtx,
            controlInput=False)
        self.audioConnection(
            self.input,
            [i % numberOfInputs for i in range(0, 2 * numberOfInputs)],
            self.filterBank.audioPort("in"), range(0, 2 * numberOfInputs))

        self.lfMatrix = rcl.GainMatrix(context,
                                       "LfPanningMatrix",
                                       self,
                                       numberOfInputs=numberOfInputs,
                                       numberOfOutputs=numberOfOutputs,
                                       interpolationSteps=interpolationSteps,
                                       initialGains=0.0,
                                       controlInput=True)
        self.audioConnection(self.filterBank.audioPort("out"),
                             range(0, numberOfInputs),
                             self.lfMatrix.audioPort("in"),
                             range(0, numberOfInputs))

        self.hfMatrix = rcl.GainMatrix(context,
                                       "HfPanningMatrix",
                                       self,
                                       numberOfInputs=numberOfInputs,
                                       numberOfOutputs=numberOfOutputs,
                                       interpolationSteps=interpolationSteps,
                                       initialGains=0.0,
                                       controlInput=True)
        self.audioConnection(self.filterBank.audioPort("out"),
                             range(numberOfInputs, 2 * numberOfInputs),
                             self.hfMatrix.audioPort("in"),
                             range(0, numberOfInputs))

        self.signalMix = rcl.Add(context,
                                 "SignalMix",
                                 self,
                                 numInputs=2,
                                 width=numberOfOutputs)
        self.audioConnection(self.signalMix.audioPort("out"), self.output)

        self.audioConnection(self.lfMatrix.audioPort("out"),
                             self.signalMix.audioPort("in0"))
        self.audioConnection(self.hfMatrix.audioPort("out"),
                             self.signalMix.audioPort("in1"))

        self.parameterConnection(
            self.objectInput,
            self.panningCalculator.parameterPort("objectVectorInput"))
        self.parameterConnection(
            self.panningCalculator.parameterPort("lowFrequencyGainOutput"),
            self.lfMatrix.parameterPort("gainInput"))
        self.parameterConnection(
            self.panningCalculator.parameterPort("gainOutput"),
            self.hfMatrix.parameterPort("gainInput"))
    def __init__(self,
                 context,
                 name,
                 parent,
                 numberOfObjects,
                 maxHoaOrder=None,
                 sofaFile=None,
                 decodingFilters=None,
                 interpolationSteps=None,
                 headOrientation=None,
                 headTracking=True,
                 objectChannelAllocation=False,
                 fftImplementation='default'):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, holds the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        numberOfObjects : int
            The number of audio objects to be rendered.
        maxHoaOrder: int or None
            The maximum HOA order that can be reproduced. If None, the HOA order is deduced
            from the first dimension of the HOA filters (possibly contained in a SOFA file).
        sofaFile: string or NoneType
        decodingFilters : numpy.ndarray or NoneType
            Alternative way to provide the HOA decoding filters.
        interpolationSteps: int
        headOrientation : array-like
            Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            or the initial view direction
        headTracking: bool
            Whether dynamic head tracking is active.
        objectChannelAllocation: bool
            Whether the processing resources are allocated from a pool of resources
            (True), or whether fixed processing resources statically tied to the audio signal channels are used.
            Not implemented at the moment, so leave the default value (False).
        fftImplementation: string, optional
            The FFT library to be used in the filtering. THe default uses VISR's
            default implementation for the present platform.
        """
        if (decodingFilters is None) == (sofaFile is None):
            raise ValueError(
                "HoaObjectToBinauralRenderer: Either 'decodingFilters' or 'sofaFile' must be provided."
            )
        if sofaFile is None:
            filters = decodingFilters
        else:
            # pos and delays are not used here.
            [pos, filters, delays] = readSofaFile(sofaFile)

        if maxHoaOrder is None:
            numHoaCoeffs = filters.shape[0]
            orderP1 = int(np.floor(np.sqrt(numHoaCoeffs)))
            if orderP1**2 != numHoaCoeffs:
                raise ValueError(
                    "If maxHoaOrder is not given, the number of HOA filters must be a square number"
                )
            maxHoaOrder = orderP1 - 1
        else:
            numHoaCoeffs = (maxHoaOrder + 1)**2

        if filters.ndim != 3 or filters.shape[1] != 2 or filters.shape[
                0] < numHoaCoeffs:
            raise ValueError(
                "HoaObjectToBinauralRenderer: the filter data must be a 3D matrix where the second dimension is 2 and the first dimension is equal or larger than (maxHoaOrder+1)^2."
            )

        super(HoaObjectToBinauralRenderer,
              self).__init__(context, name, parent)
        self.objectSignalInput = visr.AudioInputFloat("audioIn", self,
                                                      numberOfObjects)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)
        self.objectVectorInput = visr.ParameterInput(
            "objectVector", self, pml.ObjectVector.staticType,
            pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig())

        if interpolationSteps is None:
            interpolationSteps = context.period

        self.objectEncoder = HoaObjectEncoder(
            context,
            'HoaEncoder',
            self,
            numberOfObjects=numberOfObjects,
            hoaOrder=maxHoaOrder,
            channelAllocation=objectChannelAllocation)

        self.parameterConnection(
            self.objectVectorInput,
            self.objectEncoder.parameterPort("objectVector"))

        self.encoderMatrix = rcl.GainMatrix(
            context,
            "encoderMatrix",
            self,
            numberOfInputs=numberOfObjects,
            numberOfOutputs=(maxHoaOrder + 1)**2,
            interpolationSteps=interpolationSteps,
            initialGains=0.0,
            controlInput=True)
        self.audioConnection(self.objectSignalInput,
                             self.encoderMatrix.audioPort("in"))

        filterMtx = np.concatenate(
            (filters[0:numHoaCoeffs, 0, :], filters[0:numHoaCoeffs, 1, :]))

        routings = rbbl.FilterRoutingList()
        for idx in range(0, numHoaCoeffs):
            routings.addRouting(idx, 0, idx, 1.0)
            routings.addRouting(idx, 1, idx + numHoaCoeffs, 1.0)

        self.binauralFilterBank = rcl.FirFilterMatrix(
            context,
            'binauralFilterBank',
            self,
            numberOfInputs=numHoaCoeffs,
            numberOfOutputs=2,
            filterLength=filters.shape[-1],
            maxFilters=2 * numHoaCoeffs,
            maxRoutings=2 * numHoaCoeffs,
            filters=filterMtx,
            routings=routings,
            controlInputs=rcl.FirFilterMatrix.ControlPortConfig.NoInputs,
            fftImplementation=fftImplementation)

        self.audioConnection(self.encoderMatrix.audioPort("out"),
                             self.binauralFilterBank.audioPort("in"))
        self.audioConnection(self.binauralFilterBank.audioPort("out"),
                             self.binauralOutput)

        if headTracking:
            self.trackingInput = visr.ParameterInput(
                "tracking", self, pml.ListenerPosition.staticType,
                pml.DoubleBufferingProtocol.staticType,
                pml.EmptyParameterConfig())
            self.coefficientRotator = HoaCoefficientRotation(
                context,
                'coefficientRotator',
                self,
                numberOfObjects=numberOfObjects,
                hoaOrder=maxHoaOrder)
            self.parameterConnection(
                self.trackingInput,
                self.coefficientRotator.parameterPort("tracking"))
            self.parameterConnection(
                self.objectEncoder.parameterPort("coefficientOutput"),
                self.coefficientRotator.parameterPort("coefficientInput"))
            self.parameterConnection(
                self.coefficientRotator.parameterPort("coefficientOutput"),
                self.encoderMatrix.parameterPort("gainInput"))
        else:
            self.parameterConnection(
                self.objectEncoder.parameterPort("coefficientOutput"),
                self.encoderMatrix.parameterPort("gainInput"))
Exemplo n.º 19
0
    def __init__(self,
                 context,
                 name,
                 parent,
                 loudspeakerConfigs,
                 numberOfInputs,
                 numberOfOutputs,
                 interpolationPeriod,
                 diffusionFilters,
                 trackingConfiguration='',
                 controlDataType=pml.UnsignedInteger):
        super(MultiRenderer, self).__init__(context, name, parent)
        self.input = visr.AudioInputFloat("in", self, numberOfInputs)
        self.output = visr.AudioOutputFloat("out", self, numberOfOutputs)
        self.objectInput = visr.ParameterInput(
            "objectIn",
            self,
            protocolType=pml.DoubleBufferingProtocol.staticType,
            parameterType=pml.ObjectVector.staticType,
            parameterConfig=pml.EmptyParameterConfig())
        self.controlInput = visr.ParameterInput(
            "controlIn",
            self,
            protocolType=pml.MessageQueueProtocol.staticType,
            parameterType=controlDataType.staticType,
            parameterConfig=pml.EmptyParameterConfig())

        numRenderers = len(loudspeakerConfigs)
        self.outputSwitch = AudioSignalSwitch(context,
                                              "OutputSwitch",
                                              self,
                                              numberOfChannels=numberOfOutputs,
                                              numberOfInputs=numRenderers,
                                              controlDataType=controlDataType)
        self.parameterConnection(self.controlInput,
                                 self.outputSwitch.parameterPort('controlIn'))
        self.audioConnection(self.outputSwitch.audioPort('out'), self.output)

        self.renderers = []
        for rendererIdx in range(0, numRenderers):
            rendererName = "renderer%d" % rendererIdx
            config = loudspeakerConfigs[rendererIdx]

            decorrFilters = pml.MatrixParameterFloat(
                diffusionFilters[0:config.numberOfRegularLoudspeakers, :])

            renderer = signalflows.CoreRenderer(
                context,
                rendererName,
                self,
                loudspeakerConfiguration=config,
                numberOfInputs=numberOfInputs,
                numberOfOutputs=numberOfOutputs,
                interpolationPeriod=interpolationPeriod,
                diffusionFilters=decorrFilters,
                trackingConfiguration=trackingConfiguration)
            self.audioConnection(self.input, renderer.audioPort('audioIn'))
            self.audioConnection(
                renderer.audioPort('audioOut'),
                self.outputSwitch.audioPort('in_%d' % rendererIdx))
            self.parameterConnection(self.objectInput,
                                     renderer.parameterPort('objectDataInput'))
            self.renderers.append(renderer)
    def __init__(
            self,
            context,
            name,
            parent,  # Standard arguments for a VISR component
            numberOfObjects,  # Number of audo objects to be rendered.
            *,  # No positional arguments beyond this point
            sofaFile=None,  # whether a SOFA file is used to loaded the HRIR data.
            hrirPositions=None,  # Optional way to provide the measurement grid for the BRIR listener view directions. If a SOFA file is provided, this is optional and
            # overrides the listener view data in the file. Otherwise this argument is mandatory. Dimension #grid directions x (dimension of position argument)
        hrirData=None,  # Optional way to provide the BRIR data. Dimension: #grid directions  x #ears (2) # x #loudspeakers x #ir length
            hrirDelays=None,  # Optional BRIR delays. If a SOFA file is given, this  argument overrides a potential delay setting from the file. Otherwise, no extra delays
            # are applied unless this option is provided. Dimension: #grid directions  x #ears(2) x # loudspeakers
        headOrientation=None,  # Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            # or the initial view direction
        headTracking=True,  # Whether dynamic racking is used.
            dynamicITD=True,  # Whether the ITD is applied separately. That requires preprocessed HRIR data
            dynamicILD=True,  # Whether the ILD is computed and applied separately. At the moment this feature is not used (apart from applying the object gains)
            hrirInterpolation=True,  # Whether the controller supports interpolation between neighbouring HRTF gridpoints. False means nearest neighbour (no interpolation),
            # True enables barycentric interpolation.
        filterCrossfading=False,  # Use a crossfading FIR filter matrix to avoid switching artifacts.
            interpolatingConvolver=False,
            fftImplementation="default"  # The FFT implementation to use.
    ):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, holds the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        numberOfObjects: int
            Maximum number of audio objects
        sofaFile: str, optional
            Optional SOFA for loading loaded the HRIR and associated data (HRIR measurement positions and delays)
            If not provided, the information must be provided by the hrirPositions and hrirData arguments.
        hrirPositions: numpy.ndarray, optional
            Optional way to provide the measurement grid for the BRIR listener view directions.
            If a SOFA file is provided, this is optional and overrides the listener view data
            in the file. Otherwise this argument is mandatory.
            Dimension #grid directions x (dimension of position argument)
        hrirData: numpy.ndarray, optional
            Optional way to provide the BRIR data.
            Dimension: #grid directions  x #ears (2) # x #loudspeakers x #ir length
        hrirDelays: numpy.ndarray, optional
            Optional BRIR delays. If a SOFA file is given, this  argument overrides
            a potential delay setting from the file. Otherwise, no extra delays
            are applied unless this option is provided.
            Dimension: #grid directions  x #ears(2) x # loudspeakers
        headOrientation: array-like, optional
            Head orientation in spherical coordinates (2- or 3-element vector or list).
            Either a static orientation (when no tracking is used), or the
            initial view direction
        headTracking: bool
            Whether dynamic head tracking is supported. If True, a parameter input with type
            pml.ListenerPosition and protocol pml.DoubleBufffering is created.
        dynamicITD: bool, optional
            Whether the ITD is applied separately. That requires preprocessed HRIR data
        dynamicILD: bool, optional
            Whether the ILD is computed and applied separately. At the moment this feature is not used (apart from applying the object gains)
        hrirInterpolation: bool, optional
            Whether the controller supports interpolation between neighbouring HRTF grid
            points. False means nearest neighbour (no interpolation), True
            enables barycentric interpolation.
        filterCrossfading: bool, optional
            Use a crossfading FIR filter matrix to avoid switching artifacts.
        fftImplementation: string, optional
            The FFT implementation to use. Default value enables VISR's default
            FFT library for the platform.
        """
        super(DynamicHrirRenderer, self).__init__(context, name, parent)
        self.objectSignalInput = visr.AudioInputFloat("audioIn", self,
                                                      numberOfObjects)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)
        self.objectVectorInput = visr.ParameterInput(
            "objectVector", self, pml.ObjectVector.staticType,
            pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig())
        if headTracking:
            self.trackingInput = visr.ParameterInput(
                "tracking", self, pml.ListenerPosition.staticType,
                pml.DoubleBufferingProtocol.staticType,
                pml.EmptyParameterConfig())

        if (hrirData is not None) and (sofaFile is not None):
            raise ValueError(
                "Exactly one of the arguments sofaFile and hrirData must be present."
            )
        if sofaFile is not None:
            # We don't support HRIR truncation here because they are usually quite short.
            [sofaHrirPositions, hrirData,
             sofaHrirDelays] = readSofaFile(sofaFile)
            # If hrirDelays is not provided as an argument, use the one retrieved from the SOFA file
            if hrirDelays is None:
                hrirDelays = sofaHrirDelays
            # Use the positions obtained from the SOFA file only if the argument is not set
            if hrirPositions is None:
                hrirPositions = sofaHrirPositions

        if dynamicITD:
            if (hrirDelays is None) or (hrirDelays.ndim !=
                                        2) or (hrirDelays.shape !=
                                               (hrirData.shape[0], 2)):
                raise ValueError(
                    'If the "dynamicITD" option is given, the parameter "delays" must be a #hrirs x 2 matrix.'
                )

        self.dynamicHrirController = DynamicHrirController(
            context,
            "DynamicHrirController",
            self,
            numberOfObjects,
            hrirPositions,
            hrirData,
            useHeadTracking=headTracking,
            dynamicITD=dynamicITD,
            dynamicILD=dynamicILD,
            hrirInterpolation=hrirInterpolation,
            interpolatingConvolver=interpolatingConvolver,
            hrirDelays=hrirDelays)

        self.parameterConnection(
            self.objectVectorInput,
            self.dynamicHrirController.parameterPort("objectVector"))
        if headTracking:
            self.parameterConnection(
                self.trackingInput,
                self.dynamicHrirController.parameterPort("headTracking"))

        firLength = hrirData.shape[-1]

        # Used if the InterpolatingConvolver is selected.
        numberOfInterpolants = 3 if hrirInterpolation else 1
        interpolationSteps = context.period if filterCrossfading else 0

        if dynamicITD or dynamicILD:
            if dynamicITD:
                delayControls = rcl.DelayVector.ControlPortConfig.Delay
            else:
                delayControls = rcl.DelayVector.ControlPortConfig.No
            if dynamicILD:
                delayControls = delayControls | rcl.DelayVector.ControlPortConfig.Gain
                initialGain = 0.0  # If the ILD is applied in the DelayVector, start from zero.
            else:
                initialGain = 1.0  # Fixed setting as the gain of the delay vector is not used

            self.delayVector = rcl.DelayVector(
                context,
                "delayVector",
                self,
                numberOfObjects * 2,
                interpolationType="lagrangeOrder3",
                initialDelay=0,
                controlInputs=delayControls,
                methodDelayPolicy=rcl.DelayMatrix.MethodDelayPolicy.Add,
                initialGain=initialGain,
                interpolationSteps=context.period)

            inConnections = [
                i % numberOfObjects for i in range(numberOfObjects * 2)
            ]
            self.audioConnection(self.objectSignalInput, inConnections,
                                 self.delayVector.audioPort("in"),
                                 range(2 * numberOfObjects))

            # Define the routing for the binaural convolver such that it match the layout of the
            # flat BRIR matrix.
            filterRouting = rbbl.FilterRoutingList()
            for idx in range(0, numberOfObjects):
                filterRouting.addRouting(idx, 0, idx, 1.0)
                filterRouting.addRouting(idx + numberOfObjects, 1,
                                         idx + numberOfObjects, 1.0)
            numMatrixInputs = 2 * numberOfObjects
        else:
            filterRouting = rbbl.FilterRoutingList()
            for idx in range(0, numberOfObjects):
                filterRouting.addRouting(idx, 0, idx, 1.0)
                filterRouting.addRouting(idx, 1, idx + numberOfObjects, 1.0)
            filterRouting2 = rbbl.FilterRoutingList([
                rbbl.FilterRouting(i % numberOfObjects, i // numberOfObjects,
                                   i, 1.0) for i in range(2 * numberOfObjects)
            ])
            numMatrixInputs = numberOfObjects

        if interpolatingConvolver:
            numFilters = np.prod(np.array(hrirData.shape[0:-1]))
            filterReshaped = np.reshape(hrirData, (numFilters, firLength), 'C')
            self.convolver = rcl.InterpolatingFirFilterMatrix(
                context,
                'convolutionEngine',
                self,
                numberOfInputs=numMatrixInputs,
                numberOfOutputs=2,
                maxFilters=numFilters,
                filterLength=firLength,
                maxRoutings=2 * numberOfObjects,
                numberOfInterpolants=numberOfInterpolants,
                transitionSamples=interpolationSteps,
                filters=filterReshaped,
                routings=filterRouting,
                controlInputs=rcl.InterpolatingFirFilterMatrix.
                ControlPortConfig.Interpolants,
                fftImplementation=fftImplementation)
        elif filterCrossfading:
            self.convolver = rcl.CrossfadingFirFilterMatrix(
                context,
                'convolutionEngine',
                self,
                numberOfInputs=numMatrixInputs,
                numberOfOutputs=2,
                maxFilters=2 * numberOfObjects,
                filterLength=firLength,
                maxRoutings=2 * numberOfObjects,
                routings=filterRouting,
                transitionSamples=context.period,
                controlInputs=rcl.CrossfadingFirFilterMatrix.ControlPortConfig.
                Filters,
                fftImplementation=fftImplementation)
        else:
            self.convolver = rcl.FirFilterMatrix(
                context,
                'convolutionEngine',
                self,
                numberOfInputs=numMatrixInputs,
                numberOfOutputs=2,
                maxFilters=2 * numberOfObjects,
                filterLength=firLength,
                maxRoutings=2 * numberOfObjects,
                routings=filterRouting,
                controlInputs=rcl.FirFilterMatrix.ControlPortConfig.Filters,
                fftImplementation=fftImplementation)
        if dynamicITD or dynamicILD:
            self.audioConnection(self.delayVector.audioPort("out"),
                                 self.convolver.audioPort("in"))
            if dynamicITD:
                self.parameterConnection(
                    self.dynamicHrirController.parameterPort("delayOutput"),
                    self.delayVector.parameterPort("delayInput"))
            if dynamicILD:
                self.parameterConnection(
                    self.dynamicHrirController.parameterPort("gainOutput"),
                    self.delayVector.parameterPort("gainInput"))
        else:
            self.audioConnection(self.objectSignalInput,
                                 self.convolver.audioPort("in"))

        self.audioConnection(self.convolver.audioPort("out"),
                             self.binauralOutput)
        if interpolatingConvolver:
            self.parameterConnection(
                self.dynamicHrirController.parameterPort("interpolatorOutput"),
                self.convolver.parameterPort("interpolantInput"))
        else:
            self.parameterConnection(
                self.dynamicHrirController.parameterPort("filterOutput"),
                self.convolver.parameterPort("filterInput"))
Exemplo n.º 21
0
    def __init__(self,
                 context,
                 name,
                 parent,
                 hoaOrder,
                 dynamicOrientation=False,
                 initialOrientation=None):
        """
        Constructor.

        Parameters
        ----------
        context: visr.SignalFlowContext
            Structure containing block size and sampling frequency, standard visr component construction parameter.
        name: string
            Name of the component, can be chosen freely as long as it is unique withion the containing component.
        parent: visr.CompositeComponent or NoneType
            The containing composite component, or None for a top-level component.
        hoaOrder: int
            The spherical harmonics order, determines the size of the output matrix.
        dynamicOrientation: bool
            Whether the orientation is updated at runtime. If True, a parmater input
            "orientation" is instantiated that receivers pml.ListenerPositions
        initialOrientation: array-like (2- or 3- element) or NoneType
            The initial head rotation or the static head orientation if dynamic updates are deactivated. Given as yaw, pitch, roll.
        """
        # Call base class (AtomicComponent) constructor
        super(HoaRotationMatrixCalculator,
              self).__init__(context, name, parent)
        self.hoaOrder = hoaOrder
        self.numHoaCoeffs = (self.hoaOrder + 1)**2

        # Number of nonzero matrix coefficients.
        # Explisit formula sum( (2i+1)^2 ) for i = 0 .. hoaOrder
        # This is always an integer (i.e., multiple of 3 before division).
        self.numMatrixCoeffs = ((hoaOrder + 1) * (2 * hoaOrder + 1) *
                                (2 * hoaOrder + 3)) // 3

        # %% Define parameter ports
        outConfig = pml.VectorParameterConfig(self.numMatrixCoeffs)
        self.coeffOutput = visr.ParameterOutput(
            "coefficients", self, pml.VectorParameterFloat.staticType,
            pml.DoubleBufferingProtocol.staticType, outConfig)
        self.coeffOutputProtocol = self.coeffOutput.protocolOutput()

        if dynamicOrientation:
            self.orientationInput = visr.ParameterInput(
                "orientation", self, pml.ListenerPosition.staticType,
                pml.DoubleBufferingProtocol.staticType,
                pml.EmptyParameterConfig())
            self.orientationInputProtocol = self.orientationInput.protocolInput(
            )
        else:
            self.orientationInputProtocol is None

        if initialOrientation is None:
            initialOrientation = np.zeros((3), np.float32)
        else:
            initialOrientation = np.asarray(initialOrientation,
                                            dtype=np.float32)
            if initialOrientation.size < 3:
                initialOrientation = np.concatenate(
                    (initialOrientation,
                     np.zeros(3 - initialOrientation.size, dtype=np.float32)))
        R1 = rotationMatrixReorderingACN(
            calcRotationMatrix(initialOrientation))
        self.rotationMatrices = allSphericalHarmonicsRotationMatrices(
            self.hoaOrder, R1)
Exemplo n.º 22
0
    def __init__(self,
                 context,
                 name,
                 parent,
                 hoaOrder=None,
                 sofaFile=None,
                 decodingFilters=None,
                 interpolationSteps=None,
                 headOrientation=None,
                 headTracking=True,
                 fftImplementation='default'):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, holds the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        hoaOrder: int or None
            The maximum HOA order that can be reproduced. If None, the HOA order is deduced
            from the first dimension of the HOA filters (possibly contained in a SOFA file).
        sofaFile: string or NoneType
            A file in SOFA format containing the decoding filters. This expects the filters in the
            field 'Data.IR', dimensions (hoaOrder+1)**2 x 2 x irLength. If None, then the filters
            must be provided in 'decodingFilters' parameter.
        decodingFilters : numpy.ndarray or NoneType
            Alternative way to provide the HOA decoding filters.
        interpolationSteps: int, optional
           Number of samples to transition to new object positions after an update.
        headOrientation : array-like
            Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            or the initial view direction
        headTracking: bool
            Whether dynamic head tracking is active.
        fftImplementation: string, optional
            The FFT library to be used in the filtering. THe default uses VISR's
            default implementation for the present platform.
        """
        if (decodingFilters is None) == (sofaFile is None):
            raise ValueError(
                "HoaObjectToBinauralRenderer: Either 'decodingFilters' or 'sofaFile' must be provided."
            )
        if sofaFile is None:
            filters = decodingFilters
        else:
            # pos and delays are not used here.
            [pos, filters, delays] = readSofaFile(sofaFile)

        if hoaOrder is None:
            numHoaCoeffs = filters.shape[0]
            orderP1 = int(np.floor(np.sqrt(numHoaCoeffs)))
            if orderP1**2 != numHoaCoeffs:
                raise ValueError(
                    "If hoaOrder is not given, the number of HOA filters must be a square number"
                )
            hoaOrder = orderP1 - 1
        else:
            numHoaCoeffs = (hoaOrder + 1)**2

        if filters.ndim != 3 or filters.shape[1] != 2 or filters.shape[
                0] < numHoaCoeffs:
            raise ValueError(
                "HoaObjectToBinauralRenderer: the filter data must be a 3D matrix where the second dimension is 2 and the first dimension is equal or larger than (hoaOrder+1)^2."
            )

        # Set default value for fading between interpolation
        if interpolationSteps is None:
            interpolationSteps = context.period

        super(HoaBinauralRenderer, self).__init__(context, name, parent)
        self.hoaSignalInput = visr.AudioInputFloat("audioIn", self,
                                                   numHoaCoeffs)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)

        filterMtx = np.concatenate(
            (filters[0:numHoaCoeffs, 0, :], filters[0:numHoaCoeffs, 1, :]))
        routings = rbbl.FilterRoutingList()
        for idx in range(0, numHoaCoeffs):
            routings.addRouting(idx, 0, idx, 1.0)
            routings.addRouting(idx, 1, idx + numHoaCoeffs, 1.0)

        self.binauralFilterBank = rcl.FirFilterMatrix(
            context,
            'binauralFilterBank',
            self,
            numberOfInputs=numHoaCoeffs,
            numberOfOutputs=2,
            filterLength=filters.shape[-1],
            maxFilters=2 * numHoaCoeffs,
            maxRoutings=2 * numHoaCoeffs,
            filters=filterMtx,
            routings=routings,
            controlInputs=rcl.FirFilterMatrix.ControlPortConfig.NoInputs,
            fftImplementation=fftImplementation)

        if headTracking or (headOrientation is not None):

            numMatrixCoeffs = ((hoaOrder + 1) * (2 * hoaOrder + 1) *
                               (2 * hoaOrder + 3)) // 3

            self.rotationCalculator = HoaRotationMatrixCalculator(
                context,
                "RotationCalculator",
                self,
                hoaOrder,
                dynamicOrientation=headTracking,
                initialOrientation=headOrientation)

            rotationMatrixRoutings = rbbl.SparseGainRoutingList()
            for oIdx in range(hoaOrder + 1):
                entryStart = (oIdx * (2 * oIdx - 1) * (2 * oIdx + 1)) // 3
                diagStart = oIdx**2
                for rowIdx in range(2 * oIdx + 1):
                    row = diagStart + rowIdx
                    colsPerRow = 2 * oIdx + 1
                    for colIdx in range(2 * oIdx + 1):
                        col = diagStart + colIdx
                        entryIdx = entryStart + rowIdx * colsPerRow + colIdx
                        rotationMatrixRoutings.addRouting(
                            entryIdx, row, col, 0.0)

            self.rotationMatrix = rcl.SparseGainMatrix(
                context,
                "rotationMatrix",
                self,
                numberOfInputs=numHoaCoeffs,
                numberOfOutputs=numHoaCoeffs,
                interpolationSteps=interpolationSteps,
                maxRoutingPoints=numMatrixCoeffs,
                initialRoutings=rotationMatrixRoutings,
                controlInputs=rcl.SparseGainMatrix.ControlPortConfig.Gain)
            self.audioConnection(self.hoaSignalInput,
                                 self.rotationMatrix.audioPort("in"))
            self.audioConnection(self.rotationMatrix.audioPort("out"),
                                 self.binauralFilterBank.audioPort("in"))
            self.parameterConnection(
                self.rotationCalculator.parameterPort("coefficients"),
                self.rotationMatrix.parameterPort("gainInput"))

            if headTracking:
                self.trackingInput = visr.ParameterInput(
                    "tracking", self, pml.ListenerPosition.staticType,
                    pml.DoubleBufferingProtocol.staticType,
                    pml.EmptyParameterConfig())
                self.parameterConnection(
                    self.trackingInput,
                    self.rotationCalculator.parameterPort("orientation"))
        else:
            self.audioConnection(self.hoaSignalInput,
                                 self.binauralFilterbank.audioPort("in"))

        self.audioConnection(self.binauralFilterBank.audioPort("out"),
                             self.binauralOutput)
    def __init__( self,
                  context, name, parent,    # Standard visr component constructor arguments
                  numberOfObjects,          # The number of point source objects rendered.
                  hrirPositions,            # The directions of the HRTF measurements, given as a Nx3 array
                  hrirData,                 # The HRTF data as 3 Nx2xL matrix, with L as the FIR length.
                  headRadius = 0.0875,      # Head radius, optional. Might be used in a dynamic ITD/ILD individualisation algorithm.
                  useHeadTracking = False,        # Whether head tracking data is provided via a self.headOrientation port.
                  dynamicITD = False,             # Whether ITD delays are calculated and sent via a "delays" port.
                  dynamicILD = False,             # Whether ILD gains are calculated and sent via a "gains" port.
                  interpolatingConvolver = False, # Whether to transmit interpolation parameters (True) or complete interpolated filters
                  hrirInterpolation = False, # HRTF interpolation selection: False: Nearest neighbour, True: Barycentric (3-point) interpolation
                  channelAllocation = False, # Whether to allocate object channels dynamically (not tested yet)
                  hrirDelays = None,         # Matrix of delays associated with filter dataset. Dimension: # filters * 2
                  ):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, a structure holding the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        numberOfObjects: int
            The number of point source objects rendered.
        hrirPositions : numpy.ndaarray
            The directions of the HRTF measurements, given as a Nx3 array
        hrirData : numpy.ndarray
            The HRTF data as 3 Nx2xL matrix, with L as the FIR length.
        headRadius: float
            Head radius, optional and not currently used. Might be used in a dynamic ITD/ILD individualisation algorithm.
        useHeadTracking: bool
            Whether head tracking data is provided via a self.headOrientation port.
        dynamicITD: bool
            Whether ITD delays are calculated and sent via a "delays" port.
        dynamicILD: bool
            Whether ILD gains are calculated and sent via a "gains" port.
        hrirInterpolation: bool
            HRTF interpolation selection: False: Nearest neighbour, True: Barycentric (3-point) interpolation
        channelAllocation: bool
            Whether to allocate object channels dynamically (not tested yet)
        hrirDelays: numpy.ndarray
            Matrix of delays associated with filter dataset. Dimension: # filters * 2. Default None means there are no separate
            delays, i.e., they must be contained in the HRIR data.
        """
        # Call base class (AtomicComponent) constructor
        super( DynamicHrirController, self ).__init__( context, name, parent )
        self.numberOfObjects = numberOfObjects
        self.dynamicITD = dynamicITD
        self.dynamicILD = dynamicILD
        # %% Define parameter ports
        self.objectInput = visr.ParameterInput( "objectVector", self, pml.ObjectVector.staticType,
                                              pml.DoubleBufferingProtocol.staticType,
                                              pml.EmptyParameterConfig() )
        self.objectInputProtocol = self.objectInput.protocolInput()

        if useHeadTracking:
            self.useHeadTracking = True
            self.trackingInput = visr.ParameterInput( "headTracking", self, pml.ListenerPosition.staticType,
                                              pml.DoubleBufferingProtocol.staticType,
                                              pml.EmptyParameterConfig() )
            self.trackingInputProtocol = self.trackingInput.protocolInput()

        else:
            self.useHeadTracking = False
            self.trackingInputProtocol = None # Flag that head tracking is not used.
        self.rotationMatrix = np.identity( 3, dtype=np.float32 )

        self.interpolatingConvolver = interpolatingConvolver
        if interpolatingConvolver:
            self.filterOutputProtocol = None # Used as flag to distinguish between the output modes.

            if not hrirInterpolation:
                numInterpolants = 1
            elif hrirPositions.shape[-1] == 2:
                numInterpolants = 2
            else:
                numInterpolants = 3

            self.interpolationOutput = visr.ParameterOutput( "interpolatorOutput", self,
                                                     pml.InterpolationParameter.staticType,
                                                     pml.MessageQueueProtocol.staticType,
                                                     pml.InterpolationParameterConfig(numInterpolants) )
            self.interpolationOutputProtocol = self.interpolationOutput.protocolOutput()
        else:
            self.filterOutput = visr.ParameterOutput( "filterOutput", self,
                                                     pml.IndexedVectorFloat.staticType,
                                                     pml.MessageQueueProtocol.staticType,
                                                     pml.EmptyParameterConfig() )
            self.filterOutputProtocol = self.filterOutput.protocolOutput()
            self.interpolationOutputProtocol = None

        if self.dynamicITD:
            if (hrirDelays is None) or (hrirDelays.ndim != 2) or (hrirDelays.shape != (hrirData.shape[0], 2 ) ):
                raise ValueError( 'If the "dynamicITD" option is given, the parameter "delays" must be a #hrirs x 2 matrix.' )
            self.dynamicDelays = np.array(hrirDelays, copy=True)
            self.delayOutput = visr.ParameterOutput( "delayOutput", self,
                                                    pml.VectorParameterFloat.staticType,
                                                    pml.DoubleBufferingProtocol.staticType,
                                                    pml.VectorParameterConfig( 2*self.numberOfObjects) )
            self.delayOutputProtocol = self.delayOutput.protocolOutput()

        # If we use dynamic ILD, only the object level is set at the moment.
        if self.dynamicILD:
            self.gainOutput = visr.ParameterOutput( "gainOutput", self,
                                                   pml.VectorParameterFloat.staticType,
                                                   pml.DoubleBufferingProtocol.staticType,
                                                   pml.VectorParameterConfig( 2*self.numberOfObjects) )
            self.gainOutputProtocol = self.gainOutput.protocolOutput()

        if channelAllocation:
            self.routingOutput = visr.ParameterOutput( "routingOutput", self,
                                                     pml.SignalRoutingParameter.staticType,
                                                     pml.DoubleBufferingProtocol.staticType,
                                                     pml.EmptyParameterConfig() )
            self.routingOutputProtocol = self.routingOutput.protocolOutput()
        else:
            self.routingOutputProtocol = None

        # HRIR selection and interpolation data
        # If the interpolatingconvolver is used, only interpolation parameters are transmitted.
        if interpolatingConvolver:
            self.hrirs = None
        else:
            self.hrirs = np.array( hrirData, copy = True, dtype = np.float32 )

        # Normalise the hrir positions to unit radius (to let the k-d tree
        # lookup work as expected.)
        hrirPositions[:,2] = 1.0
        self.hrirPos = sph2cart(np.array( hrirPositions, copy = True, dtype = np.float32 ))
        self.hrirInterpolation = hrirInterpolation
        if self.hrirInterpolation:
            self.lastPosition = np.repeat( [[np.NaN, np.NaN, np.NaN]], self.numberOfObjects, axis=0 )
            self.hrirLookup = ConvexHull( self.hrirPos )
            self.triplets = np.transpose(self.hrirLookup.points[self.hrirLookup.simplices], axes=(0, 2, 1))
            self.inverted = np.asarray( inv(self.triplets), dtype=np.float32 )
        else:
            self.lastFilters = np.repeat( -1, self.numberOfObjects, axis=0 )

        # %% Dynamic allocation of objects to channels
        if channelAllocation:
            self.channelAllocator = rbbl.ObjectChannelAllocator( self.numberOfObjects )
            self.usedChannels = set()
        else:
            self.channelAllocator = None
            self.sourcePos = np.repeat( np.array([[1.0,0.0,0.0]],
                                        dtype = np.float32 ), self.numberOfObjects, axis = 0 )
            self.levels = np.zeros( (self.numberOfObjects), dtype = np.float32 )
    def __init__(
            self,
            context,
            name,
            parent,
            *,  # This ensures that the remaining arguments are given as keyword arguments.
            numberOfObjects,
            sofaFile=None,
            hrirPositions=None,
            hrirData=None,
            hrirDelays=None,
            headOrientation=None,
            headTracking=True,
            dynamicITD=False,
            hrirInterpolation=False,
            irTruncationLength=None,
            filterCrossfading=False,
            interpolatingConvolver=False,
            staticLateSofaFile=None,
            staticLateFilters=None,
            staticLateDelays=None,
            fftImplementation="default",
            loudspeakerConfiguration=None,
            loudspeakerRouting=None,
            objectRendererOptions={}):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, a structure holding the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        sofaFile: string
            BRIR database provided as a SOFA file. This is an alternative to the hrirPosition, hrirData
            (and optionally hrirDelays) argument. Default None means that hrirData and hrirPosition must be provided.
        hrirPositions : numpy.ndarray
            Optional way to provide the measurement grid for the BRIR listener view directions. If a
            SOFA file is provided, this is optional and overrides the listener view data in the file.
            Otherwise this argument is mandatory. Dimension #grid directions x (dimension of position argument)
        hrirData: numpy.ndarray
            Optional way to provide the BRIR data. Dimension: #grid directions  x #ears (2) # x #loudspeakers x #ir length
        hrirDelays: numpy.ndarray
            Optional BRIR delays. If a SOFA file is given, this  argument overrides a potential delay setting from the file. Otherwise, no extra delays
            are applied unless this option is provided. Dimension: #grid directions  x #ears(2) x # loudspeakers
        headOrientation : array-like
            Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            or the initial view direction
        headTracking: bool
            Whether dynamic headTracking is active. If True, an control input "tracking" is created.
        dynamicITD: bool
            Whether the delay part of th BRIRs is applied separately to the (delay-free) BRIRs.
        hrirInterpolation: bool
            Whether BRIRs are interpolated for the current head oriention. If False, a nearest-neighbour interpolation is used.
        irTruncationLength: int
            Maximum number of samples of the BRIR impulse responses. Functional only if the BRIR is provided in a SOFA file.
        filterCrossfading: bool
            Whether dynamic BRIR changes are crossfaded (True) or switched immediately (False)
        interpolatingConvolver: bool
            Whether the interpolating convolver option is used. If True, the convolver stores all BRIR filters, and the controller sends only
            interpolation coefficient messages to select the BRIR filters and their interpolation ratios.
        staticLateSofaFile: string, optional
            Name of a file containing a static (i.e., head orientation-independent) late part of the BRIRs.
            Optional argument, might be used as an alternative to the staticLateFilters argument, but these options are mutually exclusive.
            If neither is given, no static late part is used. The fields 'Data.IR' and the 'Data.Delay' are used.
        staticLateFilters: numpy.ndarray, optional
            Matrix containing a static, head position-independent part of the BRIRs. This option is mutually exclusive to
            staticLateSofaFile. If none of these is given, no separate static late part  is rendered.
            Dimension: 2 x #numberOfLoudspeakers x firLength
        staticLateDelays: numpy.ndarray, optional
            Time delay of the late static BRIRs per loudspeaker. Optional attribute,
            only used if late static BRIR coefficients are provided.
            Dimension: 2 x #loudspeakers
        fftImplementation: string
            The FFT implementation to be used in the convolver. the default value selects the system default.
        loudspeakerConfiguration: panning.LoudspeakerArray
            Loudspeaker configuration object used in the ob ject renderer. Must not be None
        loudspeakerRouting: array-like list of integers or None
            Routing indices from the outputs of the object renderer to the inputs of the binaural virtual loudspeaker renderer.
            If empty, the outputs of the object renderer are connected to the first inputs of the virt. lsp renderer.
        objectRendererOptions: dict
            Keyword arguments passed to the object renderer (rcl.CoreRenderer). This may involve all optional
            arguments for this class apart from loudspeakerConfiguration, numberOfInputs, and numberOfOutputs.
            If provided, these paremters are overwritten by the values determined from the binaural renderer's configuration.

        """

        # Parameter checking
        if not isinstance(loudspeakerConfiguration, panning.LoudspeakerArray):
            # Try to convert automatically
            loudspeakerConfiguration = panning.LoudspeakerArray(
                loudspeakerConfiguration)
            # raise ValueError( "'loudspeakerConfiguration' is not a 'panning.LoudspeakerArray' object." )
        numArraySpeakers = loudspeakerConfiguration.numberOfRegularLoudspeakers

        outRoutings = list(range(numArraySpeakers))  # Plain[0,1,...] routing

        super(ObjectToVirtualLoudspeakerRenderer,
              self).__init__(context, name, parent)

        self.objectInput = visr.AudioInputFloat("audioIn", self,
                                                numberOfObjects)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)
        self.objectVectorInput = visr.ParameterInput(
            "objectVector", self, pml.ObjectVector.staticType,
            pml.DoubleBufferingProtocol.staticType, pml.EmptyParameterConfig())
        if headTracking:
            self.trackingInput = visr.ParameterInput(
                "tracking", self, pml.ListenerPosition.staticType,
                pml.DoubleBufferingProtocol.staticType,
                pml.EmptyParameterConfig())

        objectRendererOptions[
            "loudspeakerConfiguration"] = loudspeakerConfiguration
        objectRendererOptions["numberOfInputs"] = numberOfObjects
        objectRendererOptions["numberOfOutputs"] = numArraySpeakers

        if "interpolationPeriod" not in objectRendererOptions:
            objectRendererOptions["interpolationPeriod"] = context.period

        if "diffusionFilters" not in objectRendererOptions:
            diffLen = 512
            fftLen = int(np.ceil(0.5 * (diffLen + 1)))
            H = np.exp(-1j * (np.random.rand(numArraySpeakers, fftLen)))
            h = np.fft.irfft(H, axis=1)
            diffFilters = efl.BasicMatrixFloat(h)
            objectRendererOptions["diffusionFilters"] = diffFilters

        self.objectRenderer = CoreRenderer(context, "ObjectRenderer", self,
                                           **objectRendererOptions)

        self.virtualLoudspeakerRenderer = VirtualLoudspeakerRenderer(
            context,
            "VirtualLoudspeakerRenderer",
            self,
            sofaFile=sofaFile,
            hrirPositions=hrirPositions,
            hrirData=hrirData,
            hrirDelays=hrirDelays,
            headOrientation=headOrientation,
            headTracking=headTracking,
            dynamicITD=dynamicITD,
            hrirInterpolation=hrirInterpolation,
            irTruncationLength=irTruncationLength,
            filterCrossfading=filterCrossfading,
            interpolatingConvolver=interpolatingConvolver,
            staticLateSofaFile=staticLateSofaFile,
            staticLateFilters=staticLateFilters,
            staticLateDelays=staticLateDelays,
            fftImplementation=fftImplementation)

        self.audioConnection(self.objectInput,
                             self.objectRenderer.audioPort("audioIn"))
        numVirtualSpeakers = self.virtualLoudspeakerRenderer.audioPort(
            'audioIn').width

        if loudspeakerRouting is None:
            if numVirtualSpeakers != numArraySpeakers:
                raise ValueError(
                    "If no 'loudspeakerRouting' parameter is provided, the numbers of loudspeakers of the object renderer and the binaural virt. loudspeaker renderer must match."
                )
            loudspeakerRouting = list(
                range(numArraySpeakers))  # Plain[0,1,...] routing

        if numVirtualSpeakers > numArraySpeakers:
            unconnectedSpeakers = list(
                set(range(numVirtualSpeakers)) - set(outRoutings))
            self.nullSource = NullSource(context, "NullSource", self, width=1)
            self.audioConnection(
                self.nullSource.audioPort("out"),
                [0] * len(unconnectedSpeakers),
                self.virtualLoudspeakerRenderer.audioPort("audioIn"),
                loudspeakerRouting)
        self.audioConnection(
            self.objectRenderer.audioPort("audioOut"), outRoutings,
            self.virtualLoudspeakerRenderer.audioPort("audioIn"),
            loudspeakerRouting)
        self.audioConnection(
            self.virtualLoudspeakerRenderer.audioPort("audioOut"),
            self.binauralOutput)
        self.parameterConnection(
            self.objectVectorInput,
            self.objectRenderer.parameterPort("objectDataInput"))
        if headTracking:
            self.parameterConnection(
                self.trackingInput,
                self.virtualLoudspeakerRenderer.parameterPort("tracking"))
Exemplo n.º 25
0
    def __init__( self,
                  context, name, parent,
                  port,
                  yawOffset=0,
                  pitchOffset=0,
                  rollOffset=0,
                  yawRightHand=False,
                  pitchRightHand=False,
                  rollRightHand=False,
                  calibrationInput = False # Whether to instantiate an input port to set the orientation.
                  ):
        """
        Constructor.


        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, a structure holding the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        yawOffset:
            Initial offset for the yaw component, default 0.0
        pitchOffset :  float
            Offset for the pitch value, in degree
        rollOffset : float:
            Initial value for the roll component, default 0.0
        yawRightHand: bool
            Whehther the yaw coordinate is interpreted as right-hand
            (mathematically negative) rotation. Default: False
        pitchRightHand: bool
            Whehther the pitch coordinate is interpreted as right-hand
            (mathematically negative) rotation. Default: False
        rollRightHand: bool
            Whehther the roll coordinate is interpreted as right-hand
            (mathematically negative) rotation. Default: False
        calibrationInput: bool
            Flag to determine whehter the component has an additional input "calibration"
            that resets the orientation offsets. At the moment, this input is of
            type StringParameter, and the value is ignored.

        TODO: Check whether to support ListenerPosition objects as calibration triggers
        to set the orientation to an arbitrary value
        """


        super( RazorAHRS, self ).__init__( context, name, parent )
        self.yprVec =   np.zeros( 3, dtype = np.float32 )
        baudRate = 57600
        self.ser = serial.Serial(port, baudRate, timeout=0)
        self.message = ""
        self.sent = False
        self.trackingOutput = visr.ParameterOutput( "orientation", self,
                                              pml.ListenerPosition.staticType,
                                              pml.DoubleBufferingProtocol.staticType,
                                              pml.EmptyParameterConfig() )
        self.trackingOutputProtocol = self.trackingOutput.protocolOutput()

        if calibrationInput:
            self.calibrationInput = visr.ParameterInput( "calibration", self,
                                                         pml.StringParameter.staticType,
                                                         pml.MessageQueueProtocol.staticType,
                                                         pml.EmptyParameterConfig() )
        else:
            self.calibrationInput = None

        self.sentN = 0
        self.parsedN = 0
        self.ser.read() #necessary for the .in_waiting to work
        self.procN =0
        self.yawOffset = yawOffset
        self.pitchOffset = pitchOffset
        self.rollOffset = rollOffset
        self.yawRightHand = yawRightHand
        self.pitchRightHand = pitchRightHand
        self.rollRightHand= rollRightHand
        self.orientation = np.array( [0.0, 0.0, 0.0 ] ) # Current orientation, unadjusted, in radian
Exemplo n.º 26
0
    def __init__(
            self,
            context,
            name,
            parent,
            *,  # This ensures that the remaining arguments are given as keyword arguments.
            sofaFile=None,
            hrirPositions=None,
            hrirData=None,
            hrirDelays=None,
            headOrientation=None,
            headTracking=True,
            dynamicITD=False,
            hrirInterpolation=False,
            irTruncationLength=None,
            filterCrossfading=False,
            interpolatingConvolver=False,
            staticLateSofaFile=None,
            staticLateFilters=None,
            staticLateDelays=None,
            fftImplementation='default'):
        """
        Constructor.

        Parameters
        ----------
        context : visr.SignalFlowContext
            Standard visr.Component construction argument, a structure holding the block size and the sampling frequency
        name : string
            Name of the component, Standard visr.Component construction argument
        parent : visr.CompositeComponent
            Containing component if there is one, None if this is a top-level component of the signal flow.
        sofaFile: string
            BRIR database provided as a SOFA file. This is an alternative to the hrirPosition, hrirData
            (and optionally hrirDelays) argument. Default None means that hrirData and hrirPosition must be provided.
        hrirPositions : numpy.ndarray
            Optional way to provide the measurement grid for the BRIR listener view directions. If a
            SOFA file is provided, this is optional and overrides the listener view data in the file.
            Otherwise this argument is mandatory. Dimension #grid directions x (dimension of position argument)
        hrirData: numpy.ndarray
            Optional way to provide the BRIR data. Dimension: #grid directions  x #ears (2) # x #loudspeakers x #ir length
        hrirDelays: numpy.ndarray
            Optional BRIR delays. If a SOFA file is given, this  argument overrides a potential delay setting from the file. Otherwise, no extra delays
            are applied unless this option is provided. Dimension: #grid directions  x #ears(2) x # loudspeakers
        headOrientation : array-like
            Head orientation in spherical coordinates (2- or 3-element vector or list). Either a static orientation (when no tracking is used),
            or the initial view direction
        headTracking: bool
            Whether dynamic headTracking is active. If True, an control input "tracking" is created.
        dynamicITD: bool
            Whether the delay part of th BRIRs is applied separately to the (delay-free) BRIRs.
        hrirInterpolation: bool
            Whether BRIRs are interpolated for the current head oriention. If False, a nearest-neighbour interpolation is used.
        irTruncationLength: int
            Maximum number of samples of the BRIR impulse responses. Functional only if the BRIR is provided in a SOFA file.
        filterCrossfading: bool
            Whether dynamic BRIR changes are crossfaded (True) or switched immediately (False)
        interpolatingConvolver: bool
            Whether the interpolating convolver option is used. If True, the convolver stores all BRIR filters, and the controller sends only
            interpolation coefficient messages to select the BRIR filters and their interpolation ratios.
        staticLateSofaFile: string, optional
            Name of a file containing a static (i.e., head orientation-independent) late part of the BRIRs.
            Optional argument, might be used as an alternative to the staticLateFilters argument, but these options are mutually exclusive.
            If neither is given, no static late part is used. The fields 'Data.IR' and the 'Data.Delay' are used.
        staticLateFilters: numpy.ndarray, optional
            Matrix containing a static, head position-independent part of the BRIRs. This option is mutually exclusive to
            staticLateSofaFile. If none of these is given, no separate static late part  is rendered.
            Dimension: 2 x #numberOfLoudspeakers x firLength
        staticLateDelays: numpy.ndarray, optional
            Time delay of the late static BRIRs per loudspeaker. Optional attribute,
            only used if late static BRIR coefficients are provided.
            Dimension: 2 x #loudspeakers
        fftImplementation: string
            The FFT implementation to be used in the convolver. the default value selects the system default.
        """
        if (hrirData is not None) and (sofaFile is not None):
            raise ValueError(
                "Exactly one of the arguments sofaFile and hrirData must be present."
            )
        if sofaFile is not None:
            [sofaHrirPositions, hrirData, sofaHrirDelays
             ] = readSofaFile(sofaFile,
                              truncationLength=irTruncationLength,
                              truncationWindowLength=16)
            # If hrirDelays is not provided as an argument, use the one retrieved from the SOFA file
            if hrirDelays is None:
                hrirDelays = sofaHrirDelays
            # Use the positions obtained from the SOFA file only if the argument is not set
            if hrirPositions is None:
                hrirPositions = sofaHrirPositions

        # Crude check for 'horizontal-only' listener view directions
        if np.max(np.abs(hrirPositions[:, 1])) < deg2rad(1):
            hrirPositions = hrirPositions[:,
                                          [0, 2
                                           ]]  # transform to polar coordinates

        numberOfLoudspeakers = hrirData.shape[2]

        super(VirtualLoudspeakerRenderer, self).__init__(context, name, parent)
        self.loudspeakerSignalInput = visr.AudioInputFloat(
            "audioIn", self, numberOfLoudspeakers)
        self.binauralOutput = visr.AudioOutputFloat("audioOut", self, 2)
        if headTracking:
            self.trackingInput = visr.ParameterInput(
                "tracking", self, pml.ListenerPosition.staticType,
                pml.DoubleBufferingProtocol.staticType,
                pml.EmptyParameterConfig())

        # Check consistency between HRIR positions and HRIR data
        if (hrirPositions.shape[0] != hrirData.shape[0]):
            raise ValueError(
                "The number of HRIR positions is inconsistent with the dimension of the HRIR data."
            )

        # Additional safety check (is tested in the controller anyway)
        if dynamicITD:
            if (hrirDelays is
                    None) or (hrirDelays.ndim != hrirData.ndim - 1) or (
                        hrirDelays.shape != hrirData.shape[0:-1]):
                raise ValueError(
                    'If the "dynamicITD" option is given, the parameter "delays" must match the first dimensions of the hrir data matrix.'
                )

        self.virtualLoudspeakerController = VirtualLoudspeakerController(
            context,
            "VirtualLoudspeakerController",
            self,
            hrirPositions=hrirPositions,
            hrirData=hrirData,
            headTracking=headTracking,
            dynamicITD=dynamicITD,
            hrirInterpolation=hrirInterpolation,
            hrirDelays=hrirDelays,
            interpolatingConvolver=interpolatingConvolver)

        if headTracking:
            self.parameterConnection(
                self.trackingInput,
                self.virtualLoudspeakerController.parameterPort(
                    "headTracking"))

        # Define the routing for the binaural convolver such that it matches the organisation of the
        # flat BRIR matrix.
        filterRouting = rbbl.FilterRoutingList()

        firLength = hrirData.shape[-1]

        if dynamicITD:
            self.delayVector = rcl.DelayVector(
                context,
                "delayVector",
                self,
                numberOfLoudspeakers * 2,
                interpolationType="lagrangeOrder3",
                initialDelay=0,
                controlInputs=rcl.DelayVector.ControlPortConfig.Delay,
                methodDelayPolicy=rcl.DelayMatrix.MethodDelayPolicy.Add,
                initialGain=1.0,
                interpolationSteps=context.period)

            self.audioConnection(self.loudspeakerSignalInput, [
                i % numberOfLoudspeakers
                for i in range(numberOfLoudspeakers * 2)
            ], self.delayVector.audioPort("in"),
                                 range(0, 2 * numberOfLoudspeakers))

            for idx in range(0, numberOfLoudspeakers):
                filterRouting.addRouting(idx, 0, idx, 1.0)
                filterRouting.addRouting(idx + numberOfLoudspeakers, 1,
                                         idx + numberOfLoudspeakers, 1.0)

            if interpolatingConvolver:
                if filterCrossfading:
                    interpolationSteps = context.period
                else:
                    interpolationSteps = 0

                numFilters = np.prod(hrirData.shape[:-1])
                filterReshaped = np.reshape(hrirData, (numFilters, firLength))
                self.convolver = rcl.InterpolatingFirFilterMatrix(
                    context,
                    'convolutionEngine',
                    self,
                    numberOfInputs=2 * numberOfLoudspeakers,
                    numberOfOutputs=2,
                    maxFilters=numFilters,
                    filterLength=firLength,
                    maxRoutings=2 * numberOfLoudspeakers,
                    numberOfInterpolants=2,  # TODO: Find out from
                    transitionSamples=interpolationSteps,
                    filters=filterReshaped,
                    routings=filterRouting,
                    controlInputs=rcl.InterpolatingFirFilterMatrix.
                    ControlPortConfig.Filters,
                    fftImplementation=fftImplementation)

            elif filterCrossfading:
                self.convolver = rcl.CrossfadingFirFilterMatrix(
                    context,
                    'convolutionEngine',
                    self,
                    numberOfInputs=2 * numberOfLoudspeakers,
                    numberOfOutputs=2,
                    maxFilters=2 * numberOfLoudspeakers,
                    filterLength=firLength,
                    maxRoutings=2 * numberOfLoudspeakers,
                    transitionSamples=context.period,
                    routings=filterRouting,
                    controlInputs=rcl.CrossfadingFirFilterMatrix.
                    ControlPortConfig.Filters,
                    fftImplementation=fftImplementation)
            else:
                self.convolver = rcl.FirFilterMatrix(
                    context,
                    'convolutionEngine',
                    self,
                    numberOfInputs=2 * numberOfLoudspeakers,
                    numberOfOutputs=2,
                    maxFilters=2 * numberOfLoudspeakers,
                    filterLength=firLength,
                    maxRoutings=2 * numberOfLoudspeakers,
                    routings=filterRouting,
                    controlInputs=rcl.FirFilterMatrix.ControlPortConfig.
                    Filters,
                    fftImplementation=fftImplementation)

            self.audioConnection(
                self.delayVector.audioPort("out"),
                self.convolver.audioPort("in"),
            )
            self.parameterConnection(
                self.virtualLoudspeakerController.parameterPort("delayOutput"),
                self.delayVector.parameterPort("delayInput"))

        else:  # no dynamic ITD
            for idx in range(0, numberOfLoudspeakers):
                filterRouting.addRouting(idx, 0, idx, 1.0)
                filterRouting.addRouting(idx, 1, idx + numberOfLoudspeakers,
                                         1.0)
            if interpolatingConvolver:
                if filterCrossfading:
                    interpolationSteps = context.period
                else:
                    interpolationSteps = 0

                #filterReshaped = np.concatenate( (hrirData[:,0,...],hrirData[:,1,...]), axis=1 )
                numFilters = np.prod(np.array(hrirData.shape[0:-1]))
                filterReshaped = np.reshape(hrirData, (numFilters, firLength))
                self.convolver = rcl.InterpolatingFirFilterMatrix(
                    context,
                    'convolutionEngine',
                    self,
                    numberOfInputs=numberOfLoudspeakers,
                    numberOfOutputs=2,
                    maxFilters=numFilters,
                    filterLength=firLength,
                    maxRoutings=2 * numberOfLoudspeakers,
                    numberOfInterpolants=2,  # TODO: Find out from
                    transitionSamples=interpolationSteps,
                    filters=filterReshaped,
                    routings=filterRouting,
                    controlInputs=rcl.InterpolatingFirFilterMatrix.
                    ControlPortConfig.Interpolants,
                    fftImplementation=fftImplementation)
            elif filterCrossfading:
                self.convolver = rcl.CrossfadingFirFilterMatrix(
                    context,
                    'convolutionEngine',
                    self,
                    numberOfInputs=numberOfLoudspeakers,
                    numberOfOutputs=2,
                    maxFilters=2 * numberOfLoudspeakers,
                    filterLength=firLength,
                    maxRoutings=2 * numberOfLoudspeakers,
                    transitionSamples=context.period,
                    routings=filterRouting,
                    controlInputs=rcl.CrossfadingFirFilterMatrix.
                    ControlPortConfig.Filters,
                    fftImplementation=fftImplementation)
            else:
                self.convolver = rcl.FirFilterMatrix(
                    context,
                    'convolutionEngine',
                    self,
                    numberOfInputs=numberOfLoudspeakers,
                    numberOfOutputs=2,
                    maxFilters=2 * numberOfLoudspeakers,
                    filterLength=firLength,
                    maxRoutings=2 * numberOfLoudspeakers,
                    routings=filterRouting,
                    controlInputs=rcl.FirFilterMatrix.ControlPortConfig.
                    Filters,
                    fftImplementation=fftImplementation)
            self.audioConnection(self.loudspeakerSignalInput,
                                 self.convolver.audioPort("in"))

        if interpolatingConvolver:
            self.parameterConnection(
                self.virtualLoudspeakerController.parameterPort(
                    "interpolatorOutput"),
                self.convolver.parameterPort("interpolantInput"))
        else:
            self.parameterConnection(
                self.virtualLoudspeakerController.parameterPort(
                    "filterOutput"),
                self.convolver.parameterPort("filterInput"))

        # Optionally use static filters for the late part.
        if (staticLateSofaFile is not None) and (staticLateFilters
                                                 is not None):
            raise ValueError(
                "The arguments 'staticLateSofaFile' and 'staticLateFilters' cannot both be given."
            )
        if (staticLateSofaFile is not None):
            latePos, lateFilters, lateDelay = readSofaFile(staticLateSofaFile)
            staticLateDelays = np.squeeze(lateDelay)
            staticLateFilters = np.squeeze(lateFilters)

        if (staticLateFilters is not None):
            flatDelays = staticLateDelays.flatten(order='C')
            self.staticLateDelays = rcl.DelayVector(
                context,
                'staticLateDelays',
                self,
                2 * numberOfLoudspeakers,
                interpolationSteps=context.period,
                interpolationType='nearestSample',
                initialGain=np.ones((numberOfLoudspeakers), dtype=np.float32),
                initialDelay=flatDelays)
            lateFilterRouting = rbbl.FilterRoutingList([
                rbbl.FilterRouting(i, i // numberOfLoudspeakers, i, 1.0)
                for i in range(2 * numberOfLoudspeakers)
            ])

            flatLateFilters = np.reshape(staticLateFilters,
                                         (2 * numberOfLoudspeakers, -1),
                                         order='C')
            self.staticLateFilters = rcl.FirFilterMatrix(
                context,
                "staticlateFilters",
                self,
                numberOfInputs=2 * numberOfLoudspeakers,
                numberOfOutputs=2,
                filterLength=staticLateFilters.shape[-1],
                maxFilters=2 * numberOfLoudspeakers,
                maxRoutings=2 * numberOfLoudspeakers,
                routings=lateFilterRouting,
                filters=flatLateFilters,
                fftImplementation=fftImplementation)
            self.audioConnection(
                sendPort=self.loudspeakerSignalInput,
                sendIndices=list(range(numberOfLoudspeakers)) +
                list(range(numberOfLoudspeakers)),
                receivePort=self.staticLateDelays.audioPort("in"))
            self.audioConnection(
                sendPort=self.staticLateDelays.audioPort("out"),
                receivePort=self.staticLateFilters.audioPort("in"))
            self.earlyLateSum = rcl.Add(context,
                                        "earlyLateSum",
                                        self,
                                        numInputs=2,
                                        width=2)
            self.audioConnection(self.convolver.audioPort("out"),
                                 self.earlyLateSum.audioPort("in0"))
            self.audioConnection(self.staticLateFilters.audioPort("out"),
                                 self.earlyLateSum.audioPort("in1"))
            self.audioConnection(self.earlyLateSum.audioPort("out"),
                                 self.binauralOutput)
        else:
            self.audioConnection(self.convolver.audioPort("out"),
                                 self.binauralOutput)